prompt
large_stringlengths 70
991k
| completion
large_stringlengths 0
1.02k
|
---|---|
<|file_name|>server_run_options.go<|end_file_name|><|fim▁begin|>/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package options
import (
"fmt"
"net"
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/serializer"
"k8s.io/apiserver/pkg/server"
utilfeature "k8s.io/apiserver/pkg/util/feature"
// add the generic feature gates
_ "k8s.io/apiserver/pkg/features"
"github.com/spf13/pflag"
)
// ServerRunOptions contains the options while running a generic api server.
type ServerRunOptions struct {
AdvertiseAddress net.IP
CorsAllowedOriginList []string
ExternalHost string
MaxRequestsInFlight int
MaxMutatingRequestsInFlight int
RequestTimeout time.Duration
MinRequestTimeout int
TargetRAMMB int
}
func NewServerRunOptions() *ServerRunOptions {
defaults := server.NewConfig(serializer.CodecFactory{})
return &ServerRunOptions{
MaxRequestsInFlight: defaults.MaxRequestsInFlight,
MaxMutatingRequestsInFlight: defaults.MaxMutatingRequestsInFlight,
RequestTimeout: defaults.RequestTimeout,
MinRequestTimeout: defaults.MinRequestTimeout,
}
}
// ApplyOptions applies the run options to the method receiver and returns self
func (s *ServerRunOptions) ApplyTo(c *server.Config) error {
c.CorsAllowedOriginList = s.CorsAllowedOriginList
c.ExternalAddress = s.ExternalHost
c.MaxRequestsInFlight = s.MaxRequestsInFlight
c.MaxMutatingRequestsInFlight = s.MaxMutatingRequestsInFlight
c.RequestTimeout = s.RequestTimeout
c.MinRequestTimeout = s.MinRequestTimeout
c.PublicAddress = s.AdvertiseAddress
return nil
}
// DefaultAdvertiseAddress sets the field AdvertiseAddress if unset. The field will be set based on the SecureServingOptions.
func (s *ServerRunOptions) DefaultAdvertiseAddress(secure *SecureServingOptions) error {
if secure == nil {
return nil
}
if s.AdvertiseAddress == nil || s.AdvertiseAddress.IsUnspecified() {
hostIP, err := secure.DefaultExternalAddress()
if err != nil {
return fmt.Errorf("Unable to find suitable network address.error='%v'. "+
"Try to set the AdvertiseAddress directly or provide a valid BindAddress to fix this.", err)
}
s.AdvertiseAddress = hostIP
}
return nil
}
// Validate checks validation of ServerRunOptions
func (s *ServerRunOptions) Validate() []error {
errors := []error{}
if s.TargetRAMMB < 0 {
errors = append(errors, fmt.Errorf("--target-ram-mb can not be negative value"))
}<|fim▁hole|> errors = append(errors, fmt.Errorf("--max-requests-inflight can not be negative value"))
}
if s.MaxMutatingRequestsInFlight < 0 {
errors = append(errors, fmt.Errorf("--max-mutating-requests-inflight can not be negative value"))
}
if s.RequestTimeout.Nanoseconds() < 0 {
errors = append(errors, fmt.Errorf("--request-timeout can not be negative value"))
}
return errors
}
// AddFlags adds flags for a specific APIServer to the specified FlagSet
func (s *ServerRunOptions) AddUniversalFlags(fs *pflag.FlagSet) {
// Note: the weird ""+ in below lines seems to be the only way to get gofmt to
// arrange these text blocks sensibly. Grrr.
fs.IPVar(&s.AdvertiseAddress, "advertise-address", s.AdvertiseAddress, ""+
"The IP address on which to advertise the apiserver to members of the cluster. This "+
"address must be reachable by the rest of the cluster. If blank, the --bind-address "+
"will be used. If --bind-address is unspecified, the host's default interface will "+
"be used.")
fs.StringSliceVar(&s.CorsAllowedOriginList, "cors-allowed-origins", s.CorsAllowedOriginList, ""+
"List of allowed origins for CORS, comma separated. An allowed origin can be a regular "+
"expression to support subdomain matching. If this list is empty CORS will not be enabled.")
fs.IntVar(&s.TargetRAMMB, "target-ram-mb", s.TargetRAMMB,
"Memory limit for apiserver in MB (used to configure sizes of caches, etc.)")
fs.StringVar(&s.ExternalHost, "external-hostname", s.ExternalHost,
"The hostname to use when generating externalized URLs for this master (e.g. Swagger API Docs).")
deprecatedMasterServiceNamespace := metav1.NamespaceDefault
fs.StringVar(&deprecatedMasterServiceNamespace, "master-service-namespace", deprecatedMasterServiceNamespace, ""+
"DEPRECATED: the namespace from which the kubernetes master services should be injected into pods.")
fs.IntVar(&s.MaxRequestsInFlight, "max-requests-inflight", s.MaxRequestsInFlight, ""+
"The maximum number of non-mutating requests in flight at a given time. When the server exceeds this, "+
"it rejects requests. Zero for no limit.")
fs.IntVar(&s.MaxMutatingRequestsInFlight, "max-mutating-requests-inflight", s.MaxMutatingRequestsInFlight, ""+
"The maximum number of mutating requests in flight at a given time. When the server exceeds this, "+
"it rejects requests. Zero for no limit.")
fs.DurationVar(&s.RequestTimeout, "request-timeout", s.RequestTimeout, ""+
"An optional field indicating the duration a handler must keep a request open before timing "+
"it out. This is the default request timeout for requests but may be overridden by flags such as "+
"--min-request-timeout for specific types of requests.")
fs.IntVar(&s.MinRequestTimeout, "min-request-timeout", s.MinRequestTimeout, ""+
"An optional field indicating the minimum number of seconds a handler must keep "+
"a request open before timing it out. Currently only honored by the watch request "+
"handler, which picks a randomized value above this number as the connection timeout, "+
"to spread out load.")
utilfeature.DefaultFeatureGate.AddFlag(fs)
}<|fim▁end|> | if s.MaxRequestsInFlight < 0 { |
<|file_name|>issue-2185.rs<|end_file_name|><|fim▁begin|>// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.<|fim▁hole|>// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// xfail-test FIXME #2263
// xfail-fast
// This test had to do with an outdated version of the iterable trait.
// However, the condition it was testing seemed complex enough to
// warrant still having a test, so I inlined the old definitions.
trait iterable<A> {
fn iter(blk: &fn(A));
}
impl<A> iterable<A> for @fn(&fn(A)) {
fn iter(blk: &fn(A)) { self(blk); }
}
impl iterable<uint> for @fn(&fn(uint)) {
fn iter(blk: &fn(&&v: uint)) { self( |i| blk(i) ) }
}
fn filter<A,IA:iterable<A>>(self: IA, prd: @fn(A) -> bool, blk: &fn(A)) {
do self.iter |a| {
if prd(a) { blk(a) }
}
}
fn foldl<A,B,IA:iterable<A>>(self: IA, b0: B, blk: &fn(B, A) -> B) -> B {
let mut b = b0;
do self.iter |a| {
b = blk(b, a);
}
b
}
fn range(lo: uint, hi: uint, it: &fn(uint)) {
let mut i = lo;
while i < hi {
it(i);
i += 1u;
}
}
pub fn main() {
let range: @fn(&fn(uint)) = |a| range(0u, 1000u, a);
let filt: @fn(&fn(v: uint)) = |a| filter(
range,
|&&n: uint| n % 3u != 0u && n % 5u != 0u,
a);
let sum = foldl(filt, 0u, |accum, &&n: uint| accum + n );
io::println(fmt!("%u", sum));
}<|fim▁end|> | // |
<|file_name|>common.rs<|end_file_name|><|fim▁begin|>use chrono::NaiveDate;
use std::fmt;
/// A common type for toornament dates.
pub type Date = NaiveDate;
macro_rules! enum_number {
($name:ident { $($variant:ident = $value:expr, )* }) => {
#[allow(missing_docs)]
#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd)]
pub enum $name {
$($variant = $value,)*
}
impl serde::Serialize for $name {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where S: serde::Serializer
{
// Serialize the enum as a u64.
serializer.serialize_u64(*self as u64)
}
}
impl<'de> serde::Deserialize<'de> for $name {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where D: serde::Deserializer<'de>
{
struct Visitor;
<|fim▁hole|> formatter.write_str("positive integer")
}
fn visit_u64<E>(self, value: u64) -> Result<$name, E>
where E: serde::de::Error
{
// Rust does not come with a simple way of converting a
// number to an enum, so use a big `match`.
match value {
$( $value => Ok($name::$variant), )*
_ => Err(E::custom(
format!("unknown {} value: {}",
stringify!($name), value))),
}
}
}
// Deserialize the enum from a u64.
deserializer.deserialize_u64(Visitor)
}
}
}
}
/// Team size bounds (minimum and maximum).
#[derive(
Clone, Debug, Default, Eq, Ord, PartialEq, PartialOrd, serde::Serialize, serde::Deserialize,
)]
pub struct TeamSize {
/// Minimum team size
pub min: i64,
/// Maximum team size
pub max: i64,
}
enum_number!(MatchResultSimple {
Win = 1,
Draw = 2,
Loss = 3,
});<|fim▁end|> | impl<'de> serde::de::Visitor<'de> for Visitor {
type Value = $name;
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { |
<|file_name|>ApplicationMenu.py<|end_file_name|><|fim▁begin|>##########################################################################
#
# Copyright (c) 2011-2012, John Haddon. All rights reserved.
# Copyright (c) 2011, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import os
import weakref
import IECore
import Gaffer
import GafferUI
def appendDefinitions( menuDefinition, prefix ) :
menuDefinition.append( prefix + "/About Gaffer...", { "command" : about } )
menuDefinition.append( prefix + "/Preferences...", { "command" : preferences } )
menuDefinition.append( prefix + "/Documentation...", { "command" : IECore.curry( GafferUI.showURL, os.path.expandvars( "$GAFFER_ROOT/doc/gaffer/html/index.html" ) ) } )
menuDefinition.append( prefix + "/Quit", { "command" : quit, "shortCut" : "Ctrl+Q" } )
def quit( menu ) :
scriptWindow = menu.ancestor( GafferUI.ScriptWindow )<|fim▁hole|> application = scriptWindow.scriptNode().ancestor( Gaffer.ApplicationRoot )
unsavedNames = []
for script in application["scripts"].children() :
if script["unsavedChanges"].getValue() :
f = script["fileName"].getValue()
f = f.rpartition( "/" )[2] if f else "untitled"
unsavedNames.append( f )
if unsavedNames :
dialogue = GafferUI.ConfirmationDialogue(
"Discard Unsaved Changes?",
"The following files have unsaved changes : \n\n" +
"\n".join( [ " - " + n for n in unsavedNames ] ) +
"\n\nDo you want to discard the changes and quit?",
confirmLabel = "Discard and Quit"
)
if not dialogue.waitForConfirmation( parentWindow=scriptWindow ) :
return
# Defer the actual removal of scripts till an idle event - removing all
# the scripts will result in the removal of the window our menu item is
# parented to, which would cause a crash as it's deleted away from over us.
GafferUI.EventLoop.addIdleCallback( IECore.curry( __removeAllScripts, application ) )
def __removeAllScripts( application ) :
for script in application["scripts"].children() :
application["scripts"].removeChild( script )
return False # remove idle callback
__aboutWindow = None
def about( menu ) :
global __aboutWindow
if __aboutWindow is not None and __aboutWindow() :
window = __aboutWindow()
else :
window = GafferUI.AboutWindow( Gaffer.About )
__aboutWindow = weakref.ref( window )
scriptWindow = menu.ancestor( GafferUI.ScriptWindow )
scriptWindow.addChildWindow( window )
window.setVisible( True )
__preferencesWindows = weakref.WeakKeyDictionary()
def preferences( menu ) :
scriptWindow = menu.ancestor( GafferUI.ScriptWindow )
application = scriptWindow.scriptNode().ancestor( Gaffer.ApplicationRoot )
global __preferencesWindows
window = __preferencesWindows.get( application, None )
if window is not None and window() :
window = window()
else :
window = GafferUI.Dialogue( "Preferences" )
closeButton = window._addButton( "Close" )
window.__closeButtonConnection = closeButton.clickedSignal().connect( __closePreferences )
saveButton = window._addButton( "Save" )
window.__saveButtonConnection = saveButton.clickedSignal().connect( __savePreferences )
nodeUI = GafferUI.NodeUI.create( application["preferences"] )
window._setWidget( nodeUI )
__preferencesWindows[application] = weakref.ref( window )
scriptWindow.addChildWindow( window )
window.setVisible( True )
def __closePreferences( button ) :
button.ancestor( type=GafferUI.Window ).setVisible( False )
def __savePreferences( button ) :
scriptWindow = button.ancestor( GafferUI.ScriptWindow )
application = scriptWindow.scriptNode().ancestor( Gaffer.ApplicationRoot )
application.savePreferences()
button.ancestor( type=GafferUI.Window ).setVisible( False )<|fim▁end|> | |
<|file_name|>editor_plugin_src.js<|end_file_name|><|fim▁begin|>/**
* $Id: sites/all/libraries/tinymce/jscripts/tiny_mce/plugins/visualchars/editor_plugin_src.js 1.3 2010/02/18 14:48:59EST Linda M. Williams (WILLIAMSLM) Production $
*
* @author Moxiecode
* @copyright Copyright © 2004-2008, Moxiecode Systems AB, All rights reserved.
*/
(function() {
tinymce.create('tinymce.plugins.VisualChars', {
init : function(ed, url) {
var t = this;
t.editor = ed;
// Register commands
ed.addCommand('mceVisualChars', t._toggleVisualChars, t);
// Register buttons
ed.addButton('visualchars', {title : 'visualchars.desc', cmd : 'mceVisualChars'});
ed.onBeforeGetContent.add(function(ed, o) {
if (t.state) {
t.state = true;
t._toggleVisualChars();
}
});
},
getInfo : function() {
return {
longname : 'Visual characters',
author : 'Moxiecode Systems AB',
authorurl : 'http://tinymce.moxiecode.com',
infourl : 'http://wiki.moxiecode.com/index.php/TinyMCE:Plugins/visualchars',
version : tinymce.majorVersion + "." + tinymce.minorVersion
};
},
// Private methods
_toggleVisualChars : function() {
var t = this, ed = t.editor, nl, i, h, d = ed.getDoc(), b = ed.getBody(), nv, s = ed.selection, bo;
t.state = !t.state;
ed.controlManager.setActive('visualchars', t.state);
if (t.state) {
nl = [];
tinymce.walk(b, function(n) {
if (n.nodeType == 3 && n.nodeValue && n.nodeValue.indexOf('\u00a0') != -1)
nl.push(n);
}, 'childNodes');
for (i=0; i<nl.length; i++) {
nv = nl[i].nodeValue;
nv = nv.replace(/(\u00a0+)/g, '<span class="mceItemHidden mceVisualNbsp">$1</span>');
nv = nv.replace(/\u00a0/g, '\u00b7');
ed.dom.setOuterHTML(nl[i], nv, d);
}
} else {
nl = tinymce.grep(ed.dom.select('span', b), function(n) {
return ed.dom.hasClass(n, 'mceVisualNbsp');
});
for (i=0; i<nl.length; i++)
<|fim▁hole|> });
// Register plugin
tinymce.PluginManager.add('visualchars', tinymce.plugins.VisualChars);
})();<|fim▁end|> | ed.dom.setOuterHTML(nl[i], nl[i].innerHTML.replace(/(·|\u00b7)/g, ' '), d);
}
}
|
<|file_name|>test_lore.py<|end_file_name|><|fim▁begin|># Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
# ++ single anchor added to individual output file
# ++ two anchors added to individual output file
# ++ anchors added to individual output files
# ++ entry added to index
# ++ index entry pointing to correct file and anchor
# ++ multiple entries added to index
# ++ multiple index entries pointing to correct files and anchors
# __ all of above for files in deep directory structure
#
# ++ group index entries by indexed term
# ++ sort index entries by indexed term
# __ hierarchical index entries (e.g. language!programming)
#
# ++ add parameter for what the index filename should be
# ++ add (default) ability to NOT index (if index not specified)
#
# ++ put actual index filename into INDEX link (if any) in the template
# __ make index links RELATIVE!
# __ make index pay attention to the outputdir!
#
# __ make index look nice
#
# ++ add section numbers to headers in lore output
# ++ make text of index entry links be chapter numbers
# ++ make text of index entry links be section numbers
#
# __ put all of our test files someplace neat and tidy
#
import os, shutil, errno, time
from StringIO import StringIO
from xml.dom import minidom as dom
from twisted.trial import unittest
from twisted.python.filepath import FilePath
from twisted.lore import tree, process, indexer, numberer, htmlbook, default
from twisted.lore.default import factory
from twisted.lore.latex import LatexSpitter
from twisted.python.util import sibpath
from twisted.lore.scripts import lore
from twisted.web import domhelpers
from twisted.test.testutils import XMLAssertionMixin
def sp(originalFileName):
return sibpath(__file__, originalFileName)
options = {"template" : sp("template.tpl"), 'baseurl': '%s', 'ext': '.xhtml'}
d = options
class RemoveBlanksTests(unittest.TestCase):
"""
Tests for L{tree._removeLeadingBlankLines} and
L{tree._removeLeadingTrailingBlankLines}.
"""
def setUp(self):
self.inputString = '\n\n\n\nfoo\nbar\n\n\n'
def test_removeLeadingBlankLines(self):
"""
L{tree._removeLeadingBlankLines} removes leading blank lines from a string and returns a list containing the remaining characters.
"""
result = tree._removeLeadingBlankLines(self.inputString)
self.assertEqual(result,
['f', 'o', 'o', '\n', 'b', 'a', 'r', '\n', '\n', '\n'])
def test_removeLeadingTrailingBlankLines(self):
"""
L{tree._removeLeadingTrailingBlankLines} removes leading and trailing
blank lines from a string and returns a string with all lines joined.
"""
result = tree._removeLeadingTrailingBlankLines(self.inputString)
self.assertEqual(result, 'foo\nbar\n')
class TestFactory(unittest.TestCase, XMLAssertionMixin):
file = sp('simple.html')
linkrel = ""
def assertEqualFiles1(self, exp, act):
if (exp == act): return True
fact = open(act)
self.assertEqualsFile(exp, fact.read())
def assertEqualFiles(self, exp, act):
if (exp == act): return True
fact = open(sp(act))
self.assertEqualsFile(exp, fact.read())
def assertEqualsFile(self, exp, act):
expected = open(sp(exp)).read()
self.assertEqual(expected, act)
def makeTemp(self, *filenames):
tmp = self.mktemp()
os.mkdir(tmp)
for filename in filenames:
tmpFile = os.path.join(tmp, filename)
shutil.copyfile(sp(filename), tmpFile)
return tmp
########################################
def setUp(self):
indexer.reset()
numberer.reset()
def testProcessingFunctionFactory(self):
base = FilePath(self.mktemp())
base.makedirs()
simple = base.child('simple.html')
FilePath(__file__).sibling('simple.html').copyTo(simple)
htmlGenerator = factory.generate_html(options)
htmlGenerator(simple.path, self.linkrel)
self.assertXMLEqual(
"""\
<?xml version="1.0" ?><!DOCTYPE html PUBLIC '-//W3C//DTD XHTML 1.0 Transitional//EN' 'http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd'><html lang="en" xmlns="http://www.w3.org/1999/xhtml">
<head><title>Twisted Documentation: My Test Lore Input</title></head>
<body bgcolor="white">
<h1 class="title">My Test Lore Input</h1>
<div class="content">
<span/>
<p>A Body.</p>
</div>
<a href="index.xhtml">Index</a>
</body>
</html>""",
simple.sibling('simple.xhtml').getContent())
def testProcessingFunctionFactoryWithFilenameGenerator(self):
base = FilePath(self.mktemp())
base.makedirs()
def filenameGenerator(originalFileName, outputExtension):
name = os.path.splitext(FilePath(originalFileName).basename())[0]
return base.child(name + outputExtension).path
htmlGenerator = factory.generate_html(options, filenameGenerator)
htmlGenerator(self.file, self.linkrel)
self.assertXMLEqual(
"""\
<?xml version="1.0" ?><!DOCTYPE html PUBLIC '-//W3C//DTD XHTML 1.0 Transitional//EN' 'http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd'><html lang="en" xmlns="http://www.w3.org/1999/xhtml">
<head><title>Twisted Documentation: My Test Lore Input</title></head>
<body bgcolor="white">
<h1 class="title">My Test Lore Input</h1>
<div class="content">
<span/>
<p>A Body.</p>
</div>
<a href="index.xhtml">Index</a>
</body>
</html>""",
base.child("simple.xhtml").getContent())
def test_doFile(self):
base = FilePath(self.mktemp())
base.makedirs()
simple = base.child('simple.html')
FilePath(__file__).sibling('simple.html').copyTo(simple)
templ = dom.parse(open(d['template']))
tree.doFile(simple.path, self.linkrel, d['ext'], d['baseurl'], templ, d)
self.assertXMLEqual(
"""\
<?xml version="1.0" ?><!DOCTYPE html PUBLIC '-//W3C//DTD XHTML 1.0 Transitional//EN' 'http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd'><html lang="en" xmlns="http://www.w3.org/1999/xhtml">
<head><title>Twisted Documentation: My Test Lore Input</title></head>
<body bgcolor="white">
<h1 class="title">My Test Lore Input</h1>
<div class="content">
<span/>
<p>A Body.</p>
</div>
<a href="index.xhtml">Index</a>
</body>
</html>""",
base.child("simple.xhtml").getContent())
def test_doFile_withFilenameGenerator(self):
base = FilePath(self.mktemp())
base.makedirs()
def filenameGenerator(originalFileName, outputExtension):
name = os.path.splitext(FilePath(originalFileName).basename())[0]
return base.child(name + outputExtension).path
templ = dom.parse(open(d['template']))
tree.doFile(self.file, self.linkrel, d['ext'], d['baseurl'], templ, d, filenameGenerator)
self.assertXMLEqual(
"""\
<?xml version="1.0" ?><!DOCTYPE html PUBLIC '-//W3C//DTD XHTML 1.0 Transitional//EN' 'http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd'><html lang="en" xmlns="http://www.w3.org/1999/xhtml">
<head><title>Twisted Documentation: My Test Lore Input</title></head>
<body bgcolor="white">
<h1 class="title">My Test Lore Input</h1>
<div class="content">
<span/>
<p>A Body.</p>
</div>
<a href="index.xhtml">Index</a>
</body>
</html>""",
base.child("simple.xhtml").getContent())
def test_munge(self):
indexer.setIndexFilename("lore_index_file.html")
doc = dom.parse(open(self.file))
node = dom.parse(open(d['template']))
tree.munge(doc, node, self.linkrel,
os.path.dirname(self.file),
self.file,
d['ext'], d['baseurl'], d)
self.assertXMLEqual(
"""\
<?xml version="1.0" ?><!DOCTYPE html PUBLIC '-//W3C//DTD XHTML 1.0 Transitional//EN' 'http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd'><html lang="en" xmlns="http://www.w3.org/1999/xhtml">
<head><title>Twisted Documentation: My Test Lore Input</title></head>
<body bgcolor="white">
<h1 class="title">My Test Lore Input</h1>
<div class="content">
<span/>
<p>A Body.</p>
</div>
<a href="lore_index_file.html">Index</a>
</body>
</html>""",
node.toxml())
def test_mungeAuthors(self):
"""
If there is a node with a I{class} attribute set to C{"authors"},
L{tree.munge} adds anchors as children to it, taking the necessary
information from any I{link} nodes in the I{head} with their I{rel}
attribute set to C{"author"}.
"""
document = dom.parseString(
"""\
<html>
<head>
<title>munge authors</title>
<link rel="author" title="foo" href="bar"/>
<link rel="author" title="baz" href="quux"/>
<link rel="author" title="foobar" href="barbaz"/>
</head>
<body>
<h1>munge authors</h1>
</body>
</html>""")
template = dom.parseString(
"""\
<html xmlns="http://www.w3.org/1999/xhtml" lang="en">
<head>
<title />
</head>
<body>
<div class="body" />
<div class="authors" />
</body>
</html>
""")
tree.munge(
document, template, self.linkrel, os.path.dirname(self.file),
self.file, d['ext'], d['baseurl'], d)
self.assertXMLEqual(
template.toxml(),
"""\
<?xml version="1.0" ?><html lang="en" xmlns="http://www.w3.org/1999/xhtml">
<head>
<title>munge authors</title>
<link href="bar" rel="author" title="foo"/><link href="quux" rel="author" title="baz"/><link href="barbaz" rel="author" title="foobar"/></head>
<body>
<div class="content">
<span/>
</div>
<div class="authors"><span><a href="bar">foo</a>, <a href="quux">baz</a>, and <a href="barbaz">foobar</a></span></div>
</body>
</html>""")
def test_getProcessor(self):
base = FilePath(self.mktemp())
base.makedirs()
input = base.child("simple3.html")
FilePath(__file__).sibling("simple3.html").copyTo(input)
options = { 'template': sp('template.tpl'), 'ext': '.xhtml', 'baseurl': 'burl',
'filenameMapping': None }
p = process.getProcessor(default, "html", options)
p(input.path, self.linkrel)
self.assertXMLEqual(
"""\
<?xml version="1.0" ?><!DOCTYPE html PUBLIC '-//W3C//DTD XHTML 1.0 Transitional//EN' 'http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd'><html lang="en" xmlns="http://www.w3.org/1999/xhtml">
<head><title>Twisted Documentation: My Test Lore Input</title></head>
<body bgcolor="white">
<h1 class="title">My Test Lore Input</h1>
<div class="content">
<span/>
<p>A Body.</p>
</div>
<a href="index.xhtml">Index</a>
</body>
</html>""",
base.child("simple3.xhtml").getContent())
def test_outputdirGenerator(self):
normp = os.path.normpath; join = os.path.join
inputdir = normp(join("/", 'home', 'joe'))
outputdir = normp(join("/", 'away', 'joseph'))
actual = process.outputdirGenerator(join("/", 'home', 'joe', "myfile.html"),
'.xhtml', inputdir, outputdir)
expected = normp(join("/", 'away', 'joseph', 'myfile.xhtml'))
self.assertEqual(expected, actual)
def test_outputdirGeneratorBadInput(self):
options = {'outputdir': '/away/joseph/', 'inputdir': '/home/joe/' }
self.assertRaises(ValueError, process.outputdirGenerator, '.html', '.xhtml', **options)
def test_makeSureDirectoryExists(self):
dirname = os.path.join("tmp", 'nonexistentdir')
if os.path.exists(dirname):
os.rmdir(dirname)
self.failIf(os.path.exists(dirname), "Hey: someone already created the dir")
filename = os.path.join(dirname, 'newfile')
tree.makeSureDirectoryExists(filename)
self.failUnless(os.path.exists(dirname), 'should have created dir')
os.rmdir(dirname)
def test_indexAnchorsAdded(self):
indexer.setIndexFilename('theIndexFile.html')
# generate the output file
templ = dom.parse(open(d['template']))
tmp = self.makeTemp('lore_index_test.xhtml')
tree.doFile(os.path.join(tmp, 'lore_index_test.xhtml'),
self.linkrel, '.html', d['baseurl'], templ, d)
self.assertXMLEqual(
"""\
<?xml version="1.0" ?><!DOCTYPE html PUBLIC '-//W3C//DTD XHTML 1.0 Transitional//EN' 'http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd'><html lang="en" xmlns="http://www.w3.org/1999/xhtml">
<head><title>Twisted Documentation: The way of the program</title></head>
<body bgcolor="white">
<h1 class="title">The way of the program</h1>
<div class="content">
<span/>
<p>The first paragraph.</p>
<h2>The Python programming language<a name="auto0"/></h2>
<a name="index01"/>
<a name="index02"/>
<p>The second paragraph.</p>
</div>
<a href="theIndexFile.html">Index</a>
</body>
</html>""",
FilePath(tmp).child("lore_index_test.html").getContent())
def test_indexEntriesAdded(self):
indexer.addEntry('lore_index_test.html', 'index02', 'language of programming', '1.3')
indexer.addEntry('lore_index_test.html', 'index01', 'programming language', '1.2')
indexer.setIndexFilename("lore_index_file.html")
indexer.generateIndex()
self.assertEqualFiles1("lore_index_file_out.html", "lore_index_file.html")
def test_book(self):
tmp = self.makeTemp()
inputFilename = sp('lore_index_test.xhtml')
bookFilename = os.path.join(tmp, 'lore_test_book.book')
bf = open(bookFilename, 'w')
bf.write('Chapter(r"%s", None)\r\n' % inputFilename)
bf.close()
book = htmlbook.Book(bookFilename)
expected = {'indexFilename': None,
'chapters': [(inputFilename, None)],
}
dct = book.__dict__
for k in dct:
self.assertEqual(dct[k], expected[k])
def test_runningLore(self):
options = lore.Options()
tmp = self.makeTemp('lore_index_test.xhtml')
templateFilename = sp('template.tpl')
inputFilename = os.path.join(tmp, 'lore_index_test.xhtml')
indexFilename = 'theIndexFile'
bookFilename = os.path.join(tmp, 'lore_test_book.book')
bf = open(bookFilename, 'w')
bf.write('Chapter(r"%s", None)\n' % inputFilename)
bf.close()
options.parseOptions(['--null', '--book=%s' % bookFilename,
'--config', 'template=%s' % templateFilename,
'--index=%s' % indexFilename
])
result = lore.runGivenOptions(options)
self.assertEqual(None, result)
self.assertEqualFiles1("lore_index_file_unnumbered_out.html", indexFilename + ".html")
def test_runningLoreMultipleFiles(self):
tmp = self.makeTemp('lore_index_test.xhtml', 'lore_index_test2.xhtml')
templateFilename = sp('template.tpl')
inputFilename = os.path.join(tmp, 'lore_index_test.xhtml')
inputFilename2 = os.path.join(tmp, 'lore_index_test2.xhtml')
indexFilename = 'theIndexFile'
bookFilename = os.path.join(tmp, 'lore_test_book.book')
bf = open(bookFilename, 'w')
bf.write('Chapter(r"%s", None)\n' % inputFilename)
bf.write('Chapter(r"%s", None)\n' % inputFilename2)
bf.close()
options = lore.Options()
options.parseOptions(['--null', '--book=%s' % bookFilename,
'--config', 'template=%s' % templateFilename,
'--index=%s' % indexFilename
])
result = lore.runGivenOptions(options)
self.assertEqual(None, result)
self.assertEqual(
# XXX This doesn't seem like a very good index file.
"""\
aahz: <a href="lore_index_test2.html#index03">link</a><br />
aahz2: <a href="lore_index_test2.html#index02">link</a><br />
language of programming: <a href="lore_index_test.html#index02">link</a>, <a href="lore_index_test2.html#index01">link</a><br />
programming language: <a href="lore_index_test.html#index01">link</a><br />
""",
file(FilePath(indexFilename + ".html").path).read())
self.assertXMLEqual(
"""\
<?xml version="1.0" ?><!DOCTYPE html PUBLIC '-//W3C//DTD XHTML 1.0 Transitional//EN' 'http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd'><html lang="en" xmlns="http://www.w3.org/1999/xhtml">
<head><title>Twisted Documentation: The way of the program</title></head>
<body bgcolor="white">
<h1 class="title">The way of the program</h1>
<div class="content">
<span/>
<p>The first paragraph.</p>
<h2>The Python programming language<a name="auto0"/></h2>
<a name="index01"/>
<a name="index02"/>
<p>The second paragraph.</p>
</div>
<a href="theIndexFile.html">Index</a>
</body>
</html>""",
FilePath(tmp).child("lore_index_test.html").getContent())
self.assertXMLEqual(
"""\
<?xml version="1.0" ?><!DOCTYPE html PUBLIC '-//W3C//DTD XHTML 1.0 Transitional//EN' 'http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd'><html lang="en" xmlns="http://www.w3.org/1999/xhtml">
<head><title>Twisted Documentation: The second page to index</title></head>
<body bgcolor="white">
<h1 class="title">The second page to index</h1>
<div class="content">
<span/>
<p>The first paragraph of the second page.</p>
<h2>The Jython programming language<a name="auto0"/></h2>
<a name="index01"/>
<a name="index02"/>
<a name="index03"/>
<p>The second paragraph of the second page.</p>
</div>
<a href="theIndexFile.html">Index</a>
</body>
</html>""",
FilePath(tmp).child("lore_index_test2.html").getContent())
def XXXtest_NumberedSections(self):
# run two files through lore, with numbering turned on
# every h2 should be numbered:
# first file's h2s should be 1.1, 1.2
# second file's h2s should be 2.1, 2.2
templateFilename = sp('template.tpl')
inputFilename = sp('lore_numbering_test.xhtml')
inputFilename2 = sp('lore_numbering_test2.xhtml')
indexFilename = 'theIndexFile'
# you can number without a book:
options = lore.Options()
options.parseOptions(['--null',
'--index=%s' % indexFilename,
'--config', 'template=%s' % templateFilename,
'--config', 'ext=%s' % ".tns",
'--number',
inputFilename, inputFilename2])
result = lore.runGivenOptions(options)
self.assertEqual(None, result)
#self.assertEqualFiles1("lore_index_file_out_multiple.html", indexFilename + ".tns")
# VVV change to new, numbered files
self.assertEqualFiles("lore_numbering_test_out.html", "lore_numbering_test.tns")
self.assertEqualFiles("lore_numbering_test_out2.html", "lore_numbering_test2.tns")
def test_setTitle(self):
"""
L{tree.setTitle} inserts the given title into the first I{title}
element and the first element with the I{title} class in the given
template.
"""
parent = dom.Element('div')
firstTitle = dom.Element('title')
parent.appendChild(firstTitle)
secondTitle = dom.Element('span')
secondTitle.setAttribute('class', 'title')
parent.appendChild(secondTitle)
titleNodes = [dom.Text()]
# minidom has issues with cloning documentless-nodes. See Python issue
# 4851.
titleNodes[0].ownerDocument = dom.Document()
titleNodes[0].data = 'foo bar'
tree.setTitle(parent, titleNodes, None)
self.assertEqual(firstTitle.toxml(), '<title>foo bar</title>')
self.assertEqual(
secondTitle.toxml(), '<span class="title">foo bar</span>')
def test_setTitleWithChapter(self):
"""
L{tree.setTitle} includes a chapter number if it is passed one.
"""
document = dom.Document()
parent = dom.Element('div')
parent.ownerDocument = document
title = dom.Element('title')
parent.appendChild(title)
titleNodes = [dom.Text()]
titleNodes[0].ownerDocument = document
titleNodes[0].data = 'foo bar'
# Oh yea. The numberer has to agree to put the chapter number in, too.
numberer.setNumberSections(True)
tree.setTitle(parent, titleNodes, '13')
self.assertEqual(title.toxml(), '<title>13. foo bar</title>')
def test_setIndexLink(self):
"""
Tests to make sure that index links are processed when an index page
exists and removed when there is not.
"""
templ = dom.parse(open(d['template']))
indexFilename = 'theIndexFile'
numLinks = len(domhelpers.findElementsWithAttribute(templ,
"class",
"index-link"))
# if our testing template has no index-link nodes, complain about it
self.assertNotEquals(
[],
domhelpers.findElementsWithAttribute(templ,
"class",
"index-link"))
tree.setIndexLink(templ, indexFilename)
self.assertEqual(
[],
domhelpers.findElementsWithAttribute(templ,
"class",
"index-link"))
indexLinks = domhelpers.findElementsWithAttribute(templ,
"href",
indexFilename)
self.assertTrue(len(indexLinks) >= numLinks)
templ = dom.parse(open(d['template']))
self.assertNotEquals(
[],
domhelpers.findElementsWithAttribute(templ,
"class",
"index-link"))
indexFilename = None
tree.setIndexLink(templ, indexFilename)
self.assertEqual(
[],
domhelpers.findElementsWithAttribute(templ,
"class",
"index-link"))
def test_addMtime(self):
"""
L{tree.addMtime} inserts a text node giving the last modification time
of the specified file wherever it encounters an element with the
I{mtime} class.
"""
path = FilePath(self.mktemp())
path.setContent('')
when = time.ctime(path.getModificationTime())
parent = dom.Element('div')
mtime = dom.Element('span')
mtime.setAttribute('class', 'mtime')
parent.appendChild(mtime)
tree.addMtime(parent, path.path)
self.assertEqual(
mtime.toxml(), '<span class="mtime">' + when + '</span>')
def test_makeLineNumbers(self):
"""
L{tree._makeLineNumbers} takes an integer and returns a I{p} tag with
that number of line numbers in it.
"""
numbers = tree._makeLineNumbers(1)
self.assertEqual(numbers.tagName, 'p')
self.assertEqual(numbers.getAttribute('class'), 'py-linenumber')
self.assertIsInstance(numbers.firstChild, dom.Text)
self.assertEqual(numbers.firstChild.nodeValue, '1\n')
numbers = tree._makeLineNumbers(10)
self.assertEqual(numbers.tagName, 'p')
self.assertEqual(numbers.getAttribute('class'), 'py-linenumber')
self.assertIsInstance(numbers.firstChild, dom.Text)
self.assertEqual(
numbers.firstChild.nodeValue,
' 1\n 2\n 3\n 4\n 5\n'
' 6\n 7\n 8\n 9\n10\n')
def test_fontifyPythonNode(self):
"""
L{tree.fontifyPythonNode} accepts a text node and replaces it in its
parent with a syntax colored and line numbered version of the Python
source it contains.
"""
parent = dom.Element('div')
source = dom.Text()
source.data = 'def foo():\n pass\n'
parent.appendChild(source)
tree.fontifyPythonNode(source)
expected = """\
<div><pre class="python"><p class="py-linenumber">1
2
</p><span class="py-src-keyword">def</span> <span class="py-src-identifier">foo</span>():
<span class="py-src-keyword">pass</span>
</pre></div>"""
self.assertEqual(parent.toxml(), expected)
def test_addPyListings(self):
"""
L{tree.addPyListings} accepts a document with nodes with their I{class}
attribute set to I{py-listing} and replaces those nodes with Python
source listings from the file given by the node's I{href} attribute.
"""
listingPath = FilePath(self.mktemp())
listingPath.setContent('def foo():\n pass\n')
parent = dom.Element('div')
listing = dom.Element('a')
listing.setAttribute('href', listingPath.basename())
listing.setAttribute('class', 'py-listing')
parent.appendChild(listing)
tree.addPyListings(parent, listingPath.dirname())
expected = """\
<div><div class="py-listing"><pre><p class="py-linenumber">1
2
</p><span class="py-src-keyword">def</span> <span class="py-src-identifier">foo</span>():
<span class="py-src-keyword">pass</span>
</pre><div class="caption"> - <a href="temp"><span class="filename">temp</span></a></div></div></div>"""
self.assertEqual(parent.toxml(), expected)
def test_addPyListingsSkipLines(self):
"""
If a node with the I{py-listing} class also has a I{skipLines}
attribute, that number of lines from the beginning of the source
listing are omitted.
"""
listingPath = FilePath(self.mktemp())
listingPath.setContent('def foo():\n pass\n')
parent = dom.Element('div')
listing = dom.Element('a')
listing.setAttribute('href', listingPath.basename())
listing.setAttribute('class', 'py-listing')
listing.setAttribute('skipLines', 1)
parent.appendChild(listing)
tree.addPyListings(parent, listingPath.dirname())
expected = """\
<div><div class="py-listing"><pre><p class="py-linenumber">1
</p> <span class="py-src-keyword">pass</span>
</pre><div class="caption"> - <a href="temp"><span class="filename">temp</span></a></div></div></div>"""
self.assertEqual(parent.toxml(), expected)
def test_fixAPI(self):
"""
The element passed to L{tree.fixAPI} has all of its children with the
I{API} class rewritten to contain links to the API which is referred to
by the text they contain.
"""
parent = dom.Element('div')
link = dom.Element('span')
link.setAttribute('class', 'API')
text = dom.Text()
text.data = 'foo'
link.appendChild(text)<|fim▁hole|> parent.appendChild(link)
tree.fixAPI(parent, 'http://example.com/%s')
self.assertEqual(
parent.toxml(),
'<div><span class="API">'
'<a href="http://example.com/foo" title="foo">foo</a>'
'</span></div>')
def test_fixAPIBase(self):
"""
If a node with the I{API} class and a value for the I{base} attribute
is included in the DOM passed to L{tree.fixAPI}, the link added to that
node refers to the API formed by joining the value of the I{base}
attribute to the text contents of the node.
"""
parent = dom.Element('div')
link = dom.Element('span')
link.setAttribute('class', 'API')
link.setAttribute('base', 'bar')
text = dom.Text()
text.data = 'baz'
link.appendChild(text)
parent.appendChild(link)
tree.fixAPI(parent, 'http://example.com/%s')
self.assertEqual(
parent.toxml(),
'<div><span class="API">'
'<a href="http://example.com/bar.baz" title="bar.baz">baz</a>'
'</span></div>')
def test_fixLinks(self):
"""
Links in the nodes of the DOM passed to L{tree.fixLinks} have their
extensions rewritten to the given extension.
"""
parent = dom.Element('div')
link = dom.Element('a')
link.setAttribute('href', 'foo.html')
parent.appendChild(link)
tree.fixLinks(parent, '.xhtml')
self.assertEqual(parent.toxml(), '<div><a href="foo.xhtml"/></div>')
def test_setVersion(self):
"""
Nodes of the DOM passed to L{tree.setVersion} which have the I{version}
class have the given version added to them a child.
"""
parent = dom.Element('div')
version = dom.Element('span')
version.setAttribute('class', 'version')
parent.appendChild(version)
tree.setVersion(parent, '1.2.3')
self.assertEqual(
parent.toxml(), '<div><span class="version">1.2.3</span></div>')
def test_footnotes(self):
"""
L{tree.footnotes} finds all of the nodes with the I{footnote} class in
the DOM passed to it and adds a footnotes section to the end of the
I{body} element which includes them. It also inserts links to those
footnotes from the original definition location.
"""
parent = dom.Element('div')
body = dom.Element('body')
footnote = dom.Element('span')
footnote.setAttribute('class', 'footnote')
text = dom.Text()
text.data = 'this is the footnote'
footnote.appendChild(text)
body.appendChild(footnote)
body.appendChild(dom.Element('p'))
parent.appendChild(body)
tree.footnotes(parent)
self.assertEqual(
parent.toxml(),
'<div><body>'
'<a href="#footnote-1" title="this is the footnote">'
'<super>1</super>'
'</a>'
'<p/>'
'<h2>Footnotes</h2>'
'<ol><li><a name="footnote-1">'
'<span class="footnote">this is the footnote</span>'
'</a></li></ol>'
'</body></div>')
def test_generateTableOfContents(self):
"""
L{tree.generateToC} returns an element which contains a table of
contents generated from the headers in the document passed to it.
"""
parent = dom.Element('body')
header = dom.Element('h2')
text = dom.Text()
text.data = u'header & special character'
header.appendChild(text)
parent.appendChild(header)
subheader = dom.Element('h3')
text = dom.Text()
text.data = 'subheader'
subheader.appendChild(text)
parent.appendChild(subheader)
tableOfContents = tree.generateToC(parent)
self.assertEqual(
tableOfContents.toxml(),
'<ol><li><a href="#auto0">header & special character</a></li><ul><li><a href="#auto1">subheader</a></li></ul></ol>')
self.assertEqual(
header.toxml(),
'<h2>header & special character<a name="auto0"/></h2>')
self.assertEqual(
subheader.toxml(),
'<h3>subheader<a name="auto1"/></h3>')
def test_putInToC(self):
"""
L{tree.putInToC} replaces all of the children of the first node with
the I{toc} class with the given node representing a table of contents.
"""
parent = dom.Element('div')
toc = dom.Element('span')
toc.setAttribute('class', 'toc')
toc.appendChild(dom.Element('foo'))
parent.appendChild(toc)
tree.putInToC(parent, dom.Element('toc'))
self.assertEqual(toc.toxml(), '<span class="toc"><toc/></span>')
def test_invalidTableOfContents(self):
"""
If passed a document with I{h3} elements before any I{h2} element,
L{tree.generateToC} raises L{ValueError} explaining that this is not a
valid document.
"""
parent = dom.Element('body')
parent.appendChild(dom.Element('h3'))
err = self.assertRaises(ValueError, tree.generateToC, parent)
self.assertEqual(
str(err), "No H3 element is allowed until after an H2 element")
def test_notes(self):
"""
L{tree.notes} inserts some additional markup before the first child of
any node with the I{note} class.
"""
parent = dom.Element('div')
noteworthy = dom.Element('span')
noteworthy.setAttribute('class', 'note')
noteworthy.appendChild(dom.Element('foo'))
parent.appendChild(noteworthy)
tree.notes(parent)
self.assertEqual(
noteworthy.toxml(),
'<span class="note"><strong>Note: </strong><foo/></span>')
def test_findNodeJustBefore(self):
"""
L{tree.findNodeJustBefore} returns the previous sibling of the node it
is passed. The list of nodes passed in is ignored.
"""
parent = dom.Element('div')
result = dom.Element('foo')
target = dom.Element('bar')
parent.appendChild(result)
parent.appendChild(target)
self.assertIdentical(
tree.findNodeJustBefore(target, [parent, result]),
result)
# Also, support other configurations. This is a really not nice API.
newTarget = dom.Element('baz')
target.appendChild(newTarget)
self.assertIdentical(
tree.findNodeJustBefore(newTarget, [parent, result]),
result)
def test_getSectionNumber(self):
"""
L{tree.getSectionNumber} accepts an I{H2} element and returns its text
content.
"""
header = dom.Element('foo')
text = dom.Text()
text.data = 'foobar'
header.appendChild(text)
self.assertEqual(tree.getSectionNumber(header), 'foobar')
def test_numberDocument(self):
"""
L{tree.numberDocument} inserts section numbers into the text of each
header.
"""
parent = dom.Element('foo')
section = dom.Element('h2')
text = dom.Text()
text.data = 'foo'
section.appendChild(text)
parent.appendChild(section)
tree.numberDocument(parent, '7')
self.assertEqual(section.toxml(), '<h2>7.1 foo</h2>')
def test_parseFileAndReport(self):
"""
L{tree.parseFileAndReport} parses the contents of the filename passed
to it and returns the corresponding DOM.
"""
path = FilePath(self.mktemp())
path.setContent('<foo bar="baz">hello</foo>\n')
document = tree.parseFileAndReport(path.path)
self.assertXMLEqual(
document.toxml(),
'<?xml version="1.0" ?><foo bar="baz">hello</foo>')
def test_parseFileAndReportMismatchedTags(self):
"""
If the contents of the file passed to L{tree.parseFileAndReport}
contain a mismatched tag, L{process.ProcessingFailure} is raised
indicating the location of the open and close tags which were
mismatched.
"""
path = FilePath(self.mktemp())
path.setContent(' <foo>\n\n </bar>')
err = self.assertRaises(
process.ProcessingFailure, tree.parseFileAndReport, path.path)
self.assertEqual(
str(err),
"mismatched close tag at line 3, column 4; expected </foo> "
"(from line 1, column 2)")
# Test a case which requires involves proper close tag handling.
path.setContent('<foo><bar></bar>\n </baz>')
err = self.assertRaises(
process.ProcessingFailure, tree.parseFileAndReport, path.path)
self.assertEqual(
str(err),
"mismatched close tag at line 2, column 4; expected </foo> "
"(from line 1, column 0)")
def test_parseFileAndReportParseError(self):
"""
If the contents of the file passed to L{tree.parseFileAndReport} cannot
be parsed for a reason other than mismatched tags,
L{process.ProcessingFailure} is raised with a string describing the
parse error.
"""
path = FilePath(self.mktemp())
path.setContent('\n foo')
err = self.assertRaises(
process.ProcessingFailure, tree.parseFileAndReport, path.path)
self.assertEqual(str(err), 'syntax error at line 2, column 3')
def test_parseFileAndReportIOError(self):
"""
If an L{IOError} is raised while reading from the file specified to
L{tree.parseFileAndReport}, a L{process.ProcessingFailure} is raised
indicating what the error was. The file should be closed by the
time the exception is raised to the caller.
"""
class FakeFile:
_open = True
def read(self, bytes=None):
raise IOError(errno.ENOTCONN, 'socket not connected')
def close(self):
self._open = False
theFile = FakeFile()
def fakeOpen(filename):
return theFile
err = self.assertRaises(
process.ProcessingFailure, tree.parseFileAndReport, "foo", fakeOpen)
self.assertEqual(str(err), "socket not connected, filename was 'foo'")
self.assertFalse(theFile._open)
class XMLParsingTests(unittest.TestCase):
"""
Tests for various aspects of parsing a Lore XML input document using
L{tree.parseFileAndReport}.
"""
def _parseTest(self, xml):
path = FilePath(self.mktemp())
path.setContent(xml)
return tree.parseFileAndReport(path.path)
def test_withoutDocType(self):
"""
A Lore XML input document may omit a I{DOCTYPE} declaration. If it
does so, the XHTML1 Strict DTD is used.
"""
# Parsing should succeed.
document = self._parseTest("<foo>uses an xhtml entity: ©</foo>")
# But even more than that, the © entity should be turned into the
# appropriate unicode codepoint.
self.assertEqual(
domhelpers.gatherTextNodes(document.documentElement),
u"uses an xhtml entity: \N{COPYRIGHT SIGN}")
def test_withTransitionalDocType(self):
"""
A Lore XML input document may include a I{DOCTYPE} declaration
referring to the XHTML1 Transitional DTD.
"""
# Parsing should succeed.
document = self._parseTest("""\
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<foo>uses an xhtml entity: ©</foo>
""")
# But even more than that, the © entity should be turned into the
# appropriate unicode codepoint.
self.assertEqual(
domhelpers.gatherTextNodes(document.documentElement),
u"uses an xhtml entity: \N{COPYRIGHT SIGN}")
def test_withStrictDocType(self):
"""
A Lore XML input document may include a I{DOCTYPE} declaration
referring to the XHTML1 Strict DTD.
"""
# Parsing should succeed.
document = self._parseTest("""\
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
<foo>uses an xhtml entity: ©</foo>
""")
# But even more than that, the © entity should be turned into the
# appropriate unicode codepoint.
self.assertEqual(
domhelpers.gatherTextNodes(document.documentElement),
u"uses an xhtml entity: \N{COPYRIGHT SIGN}")
def test_withDisallowedDocType(self):
"""
A Lore XML input document may not include a I{DOCTYPE} declaration
referring to any DTD other than XHTML1 Transitional or XHTML1 Strict.
"""
self.assertRaises(
process.ProcessingFailure,
self._parseTest,
"""\
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-frameset.dtd">
<foo>uses an xhtml entity: ©</foo>
""")
class XMLSerializationTests(unittest.TestCase, XMLAssertionMixin):
"""
Tests for L{tree._writeDocument}.
"""
def test_nonASCIIData(self):
"""
A document which contains non-ascii characters is serialized to a
file using UTF-8.
"""
document = dom.Document()
parent = dom.Element('foo')
text = dom.Text()
text.data = u'\N{SNOWMAN}'
parent.appendChild(text)
document.appendChild(parent)
outFile = self.mktemp()
tree._writeDocument(outFile, document)
self.assertXMLEqual(
FilePath(outFile).getContent(),
u'<foo>\N{SNOWMAN}</foo>'.encode('utf-8'))
class LatexSpitterTestCase(unittest.TestCase):
"""
Tests for the Latex output plugin.
"""
def test_indexedSpan(self):
"""
Test processing of a span tag with an index class results in a latex
\\index directive the correct value.
"""
doc = dom.parseString('<span class="index" value="name" />').documentElement
out = StringIO()
spitter = LatexSpitter(out.write)
spitter.visitNode(doc)
self.assertEqual(out.getvalue(), u'\\index{name}\n')
class ScriptTests(unittest.TestCase):
"""
Tests for L{twisted.lore.scripts.lore}, the I{lore} command's
implementation,
"""
def test_getProcessor(self):
"""
L{lore.getProcessor} loads the specified output plugin from the
specified input plugin.
"""
processor = lore.getProcessor("lore", "html", options)
self.assertNotIdentical(processor, None)<|fim▁end|> | |
<|file_name|>day_09.rs<|end_file_name|><|fim▁begin|>use std::iter::{Enumerate, Peekable};
use std::str::{Chars, FromStr};
type Cursor<'c> = Peekable<Enumerate<Chars<'c>>>;
#[derive(Debug, PartialEq)]
pub enum Ast {
Num(f64),
Op(char, Box<Ast>, Box<Ast>),
}
impl Ast {
fn index(chars: &mut Cursor) -> usize {
match chars.peek() {
Some(&(index, _)) => index,
None => 0,
}
}
fn parse_num(chars: &mut Cursor) -> Result<Self, ParseAstError> {
let mut num = String::new();
let index = Self::index(chars.by_ref());
while let Some(&(_, char)) = chars.peek() {
match char {
'+' | '*' | '/' => break,
'-' if !num.is_empty() => break,
'0'..='9' | '-' => {
chars.next();
num.push(char);
}
_ => return Err(ParseAstError(Self::index(chars.by_ref()))),
}
}
num.as_str()
.parse()
.map(Ast::Num)
.map_err(|_| ParseAstError(index))
}
fn parse_high_priority_op(chars: &mut Cursor) -> Option<char> {
match chars.peek() {
Some(&(_, '*')) | Some(&(_, '/')) => chars.next().map(|(_, op)| op),
_ => None,
}
}
fn parse_term(chars: &mut Cursor) -> Result<Self, ParseAstError> {
let mut root = Self::parse_num(chars.by_ref());
if let Some(op) = Self::parse_high_priority_op(chars.by_ref()) {
root = match (root, Self::parse_num(chars.by_ref())) {
(Ok(left), Ok(right)) => Ok(Ast::Op(op, Box::new(left), Box::new(right))),
(Ok(_), Err(right)) => Err(right),
(Err(left), _) => Err(left),
}
}
root
}
fn parse_low_priority_op(chars: &mut Cursor) -> Option<char> {
match chars.peek() {
Some(&(_, '+')) | Some(&(_, '-')) => chars.next().map(|(_, op)| op),
_ => None,
}
}
fn parse_expression(chars: &mut Cursor) -> Result<Self, ParseAstError> {
let mut root = Self::parse_term(chars.by_ref());
while let Some(op) = Self::parse_low_priority_op(chars.by_ref()) {
root = match (root, Self::parse_term(chars.by_ref())) {
(Ok(left), Ok(right)) => Ok(Ast::Op(op, Box::new(left), Box::new(right))),
(Ok(_), Err(right)) => Err(right),
(Err(left), _) => Err(left),
}
}
root
}
}
impl FromStr for Ast {
type Err = ParseAstError;
fn from_str(source: &str) -> Result<Self, Self::Err> {
let mut chars = source.chars().enumerate().peekable();
Self::parse_expression(chars.by_ref())
}
}
#[derive(Debug, PartialEq)]
pub struct ParseAstError(usize);
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn number() {
assert_eq!("4".parse(), Ok(Ast::Num(4.0)));
}
#[test]
fn error() {
assert_eq!(Ast::from_str("abc"), Err(ParseAstError(0)))
}
#[test]
fn negative_number() {
assert_eq!("-6".parse(), Ok(Ast::Num(-6.0)))
}
#[test]
fn addition() {
assert_eq!(
"5+3".parse(),
Ok(Ast::Op(
'+',
Box::new(Ast::Num(5.0)),
Box::new(Ast::Num(3.0)),
))
)
}
#[test]
fn left_hand_side_error() {
assert_eq!(Ast::from_str("abc+3"), Err(ParseAstError(0)))
}
#[test]
fn right_hand_side_error() {
assert_eq!(Ast::from_str("5+abc"), Err(ParseAstError(2)))
}
#[test]
fn subtraction() {
assert_eq!(
"6-4".parse(),
Ok(Ast::Op(
'-',
Box::new(Ast::Num(6.0)),
Box::new(Ast::Num(4.0))
))
)
}
#[test]
fn multiplication() {
assert_eq!(
"5*8".parse(),
Ok(Ast::Op(
'*',
Box::new(Ast::Num(5.0)),
Box::new(Ast::Num(8.0))
))
)
}
#[test]
fn division() {
assert_eq!(
"9/3".parse(),
Ok(Ast::Op(
'/',
Box::new(Ast::Num(9.0)),
Box::new(Ast::Num(3.0))
))
)
}
#[test]
fn multiple_operation() {
assert_eq!(
"4+6/2-3*9".parse(),
Ok(Ast::Op(
'-',
Box::new(Ast::Op(
'+',
Box::new(Ast::Num(4.0)),
Box::new(Ast::Op(
'/',
Box::new(Ast::Num(6.0)),
Box::new(Ast::Num(2.0))
))
)),
Box::new(Ast::Op(
'*',
Box::new(Ast::Num(3.0)),
Box::new(Ast::Num(9.0))
))
))
)
}<|fim▁hole|> fn unknown_operator() {
assert_eq!(Ast::from_str("6&5"), Err(ParseAstError(1)))
}
}<|fim▁end|> |
#[test] |
<|file_name|>test_writing.py<|end_file_name|><|fim▁begin|>"""
(Future home of) Tests for program enrollment writing Python API.
Currently, we do not directly unit test the functions in api/writing.py.
This is okay for now because they are all used in
`rest_api.v1.views` and is thus tested through `rest_api.v1.tests.test_views`.
Eventually it would be good to directly test the Python API function and just use
mocks in the view tests.
This file serves as a placeholder and reminder to do that the next time there
is development on the program_enrollments writing API.<|fim▁hole|><|fim▁end|> | """
from __future__ import absolute_import, unicode_literals |
<|file_name|>gulpfile.babel.js<|end_file_name|><|fim▁begin|>'use strict';
import gulp from 'gulp';<|fim▁hole|>import webpack from 'webpack';
import path from 'path';
import sync from 'run-sequence';
import rename from 'gulp-rename';
import template from 'gulp-template';
import fs from 'fs';
import yargs from 'yargs';
import lodash from 'lodash';
import gutil from 'gulp-util';
import serve from 'browser-sync';
import del from 'del';
import webpackDevMiddleware from 'webpack-dev-middleware';
import webpackHotMiddleware from 'webpack-hot-middleware';
import colorsSupported from 'supports-color';
import historyApiFallback from 'connect-history-api-fallback';
let root = 'client';
// helper method for resolving paths
let resolveToApp = (glob = '') => {
return path.join(root, 'app', glob); // app/{glob}
};
let resolveToComponents = (glob = '') => {
return path.join(root, 'app/components', glob); // app/components/{glob}
};
// map of all paths
let paths = {
js: resolveToComponents('**/*!(.spec.js).js'), // exclude spec files
styl: resolveToApp('**/*.scss'), // stylesheets
html: [
resolveToApp('**/*.html'),
path.join(root, 'index.html')
],
entry: [
'babel-polyfill',
path.join(__dirname, root, 'app/app.js')
],
output: root,
blankTemplates: path.join(__dirname, 'generator', 'component/**/*.**'),
dest: path.join(__dirname, 'dist')
};
// use webpack.config.js to build modules
gulp.task('webpack', ['clean'], (cb) => {
const config = require('./webpack.dist.config');
config.entry.app = paths.entry;
webpack(config, (err, stats) => {
if(err) {
throw new gutil.PluginError("webpack", err);
}
gutil.log("[webpack]", stats.toString({
colors: colorsSupported,
chunks: false,
errorDetails: true
}));
cb();
});
});
gulp.task('serve', () => {
const config = require('./webpack.dev.config');
config.entry.app = [
// this modules required to make HRM working
// it responsible for all this webpack magic
'webpack-hot-middleware/client?reload=true',
// application entry point
].concat(paths.entry);
var compiler = webpack(config);
serve({
port: process.env.PORT || 3000,
open: false,
server: {baseDir: root},
middleware: [
historyApiFallback(),
webpackDevMiddleware(compiler, {
stats: {
colors: colorsSupported,
chunks: false,
modules: false
},
publicPath: config.output.publicPath
}),
webpackHotMiddleware(compiler)
]
});
});
gulp.task('watch', ['serve']);
gulp.task('component', () => {
const cap = (val) => {
return val.charAt(0).toUpperCase() + val.slice(1);
};
const name = yargs.argv.name;
const parentPath = yargs.argv.parent || '';
const destPath = path.join(resolveToComponents(), parentPath, name);
return gulp.src(paths.blankTemplates)
.pipe(template({
name: name,
upCaseName: cap(name)
}))
.pipe(rename((path) => {
path.basename = path.basename.replace('temp', name);
}))
.pipe(gulp.dest(destPath));
});
gulp.task('clean', (cb) => {
del([paths.dest]).then(function (paths) {
gutil.log("[clean]", paths);
cb();
})
});
gulp.task('default', ['watch']);<|fim▁end|> | |
<|file_name|>0015_datapoint_sorttimestamp.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
from __future__ import unicode_literals
<|fim▁hole|>
class Migration(migrations.Migration):
dependencies = [
('agentex', '0014_remove_decision_datacollect'),
]
operations = [
migrations.AddField(
model_name='datapoint',
name='sorttimestamp',
field=models.DateTimeField(null=True, blank=True),
),
]<|fim▁end|> | from django.db import models, migrations
|
<|file_name|>main.rs<|end_file_name|><|fim▁begin|>#![cfg_attr(all(test, feature = "nightly"), feature(test))] // we only need test feature when testing
#[macro_use] extern crate log;
extern crate syntex_syntax;
extern crate toml;
extern crate env_logger;
#[macro_use] extern crate clap;
extern crate racer;
#[cfg(not(test))]
use racer::core;
#[cfg(not(test))]
use racer::util;
#[cfg(not(test))]
use racer::core::Match;
#[cfg(not(test))]
use racer::util::getline;
#[cfg(not(test))]
use racer::nameres::{do_file_search, do_external_search, PATH_SEP};
#[cfg(not(test))]
use racer::scopes;
#[cfg(not(test))]
use std::path::{Path, PathBuf};
#[cfg(not(test))]
use std::io::{self, BufRead};
#[cfg(not(test))]
use clap::{App, AppSettings, Arg, ArgMatches, SubCommand};
#[cfg(not(test))]
fn match_with_snippet_fn(m: Match, session: &core::Session, interface: Interface) {
let (linenum, charnum) = scopes::point_to_coords_from_file(&m.filepath, m.point, session).unwrap();
if m.matchstr == "" {
panic!("MATCHSTR is empty - waddup?");
}
let snippet = racer::snippets::snippet_for_match(&m, session);
match interface {
Interface::Text =>
println!("MATCH {};{};{};{};{};{:?};{};{}",
m.matchstr,
snippet,
linenum.to_string(),
charnum.to_string(),
m.filepath.to_str().unwrap(),
m.mtype,
m.contextstr.replace(";", "\\;").split_whitespace().collect::<Vec<&str>>().join(" "),
format!("{:?}", m.docs).replace(";", "\\;")),
Interface::TabText =>
println!("MATCH\t{}\t{}\t{}\t{}\t{}\t{:?}\t{}\t{:?}",
m.matchstr,
snippet,
linenum.to_string(),
charnum.to_string(),
m.filepath.to_str().unwrap(),
m.mtype,
m.contextstr.replace("\t", "\\t").split_whitespace().collect::<Vec<&str>>().join(" "),
m.docs),
}
}
#[cfg(not(test))]
fn match_fn(m: Match, session: &core::Session, interface: Interface) {
if let Some((linenum, charnum)) = scopes::point_to_coords_from_file(&m.filepath,
m.point,
session) {
match interface {
Interface::Text =>
println!("MATCH {},{},{},{},{:?},{}",
m.matchstr,
linenum.to_string(),
charnum.to_string(),
m.filepath.to_str().unwrap(),
m.mtype,
m.contextstr.split_whitespace().collect::<Vec<&str>>().join(" ")),
Interface::TabText =>
println!("MATCH\t{}\t{}\t{}\t{}\t{:?}\t{}",
m.matchstr,
linenum.to_string(),
charnum.to_string(),
m.filepath.to_str().unwrap(),
m.mtype,
m.contextstr.split_whitespace().collect::<Vec<&str>>().join(" ")),
}
} else {
error!("Could not resolve file coords for match {:?}", m);
}
}
#[cfg(not(test))]
fn complete(cfg: Config, print_type: CompletePrinter) {
if cfg.fqn.is_some() {
return external_complete(cfg, print_type);
}
complete_by_line_coords(cfg, print_type);
}
#[cfg(not(test))]
fn complete_by_line_coords(cfg: Config,
print_type: CompletePrinter) {
// input: linenum, colnum, fname
let tb = std::thread::Builder::new().name("searcher".to_owned());
// PD: this probably sucks for performance, but lots of plugins
// end up failing and leaving tmp files around if racer crashes,
// so catch the crash.
let res = tb.spawn(move || {
run_the_complete_fn(&cfg, print_type);
}).unwrap();
if let Err(e) = res.join() {
error!("Search thread paniced: {:?}", e);
}
println!("END");
}
#[cfg(not(test))]
#[derive(Debug)]
enum CompletePrinter {
Normal,
WithSnippets
}
#[cfg(not(test))]<|fim▁hole|>
let mut rawbytes = Vec::new();
stdin.lock().read_until(0x04, &mut rawbytes).unwrap();
let buf = String::from_utf8(rawbytes).unwrap();
cache.cache_file_contents(file, buf);
}
#[cfg(not(test))]
fn run_the_complete_fn(cfg: &Config, print_type: CompletePrinter) {
let fn_path = &*cfg.fn_name.as_ref().unwrap();
let substitute_file = cfg.substitute_file.as_ref().unwrap_or(fn_path);
let cache = core::FileCache::new();
let session = core::Session::from_path(&cache, fn_path, substitute_file);
if substitute_file.to_str() == Some("-") {
cache_file_contents_from_stdin(substitute_file, &cache);
}
let src = session.load_file(fn_path);
let line = &getline(substitute_file, cfg.linenum, &session);
let (start, pos) = util::expand_ident(line, cfg.charnum);
match cfg.interface {
Interface::Text =>
println!("PREFIX {},{},{}", start, pos, &line[start..pos]),
Interface::TabText =>
println!("PREFIX\t{}\t{}\t{}", start, pos, &line[start..pos]),
}
let point = scopes::coords_to_point(&src, cfg.linenum, cfg.charnum);
for m in core::complete_from_file(&src, fn_path, point, &session) {
match print_type {
CompletePrinter::Normal => match_fn(m, &session, cfg.interface),
CompletePrinter::WithSnippets => match_with_snippet_fn(m, &session, cfg.interface),
};
}
}
#[cfg(not(test))]
fn external_complete(cfg: Config, print_type: CompletePrinter) {
// input: a command line string passed in
let p: Vec<&str> = cfg.fqn.as_ref().unwrap().split("::").collect();
let cwd = Path::new(".");
let cache = core::FileCache::new();
let session = core::Session::from_path(&cache, cwd, cwd);
for m in do_file_search(p[0], cwd) {
if p.len() == 1 {
match print_type {
CompletePrinter::Normal => match_fn(m, &session, cfg.interface),
CompletePrinter::WithSnippets => match_with_snippet_fn(m, &session, cfg.interface),
}
} else {
for m in do_external_search(&p[1..], &m.filepath, m.point,
core::SearchType::StartsWith,
core::Namespace::BothNamespaces, &session) {
match print_type {
CompletePrinter::Normal => match_fn(m, &session, cfg.interface),
CompletePrinter::WithSnippets => match_with_snippet_fn(m, &session, cfg.interface),
}
}
}
}
}
#[cfg(not(test))]
fn prefix(cfg: Config) {
let fn_path = &*cfg.fn_name.as_ref().unwrap();
let substitute_file = cfg.substitute_file.as_ref().unwrap_or(fn_path);
let cache = core::FileCache::new();
let session = core::Session::from_path(&cache, fn_path, substitute_file);
if substitute_file.to_str() == Some("-") {
cache_file_contents_from_stdin(substitute_file, &cache);
}
// print the start, end, and the identifier prefix being matched
let line = &getline(fn_path, cfg.linenum, &session);
let (start, pos) = util::expand_ident(line, cfg.charnum);
match cfg.interface {
Interface::Text =>
println!("PREFIX {},{},{}", start, pos, &line[start..pos]),
Interface::TabText =>
println!("PREFIX\t{}\t{}\t{}", start, pos, &line[start..pos]),
}
}
#[cfg(not(test))]
fn find_definition(cfg: Config) {
let fn_path = &*cfg.fn_name.as_ref().unwrap();
let substitute_file = cfg.substitute_file.as_ref().unwrap_or(fn_path);
let cache = core::FileCache::new();
let session = core::Session::from_path(&cache, fn_path, substitute_file);
if substitute_file.to_str() == Some("-") {
cache_file_contents_from_stdin(substitute_file, &cache);
}
let src = session.load_file(fn_path);
let pos = scopes::coords_to_point(&src, cfg.linenum, cfg.charnum);
core::find_definition(&src, fn_path, pos, &session).map(|m| match_fn(m, &session, cfg.interface));
println!("END");
}
#[cfg(not(test))]
fn check_rust_src_env_var() {
if let Ok(srcpaths) = std::env::var("RUST_SRC_PATH") {
let v = srcpaths.split(PATH_SEP).collect::<Vec<_>>();
if !v.is_empty() {
let f = Path::new(v[0]);
if !f.exists() {
println!("racer can't find the directory pointed to by the RUST_SRC_PATH variable \"{}\". Try using an absolute fully qualified path and make sure it points to the src directory of a rust checkout - e.g. \"/home/foouser/src/rust/src\".", srcpaths);
std::process::exit(1);
} else if !f.join("libstd").exists() {
println!("Unable to find libstd under RUST_SRC_PATH. N.B. RUST_SRC_PATH variable needs to point to the *src* directory inside a rust checkout e.g. \"/home/foouser/src/rust/src\". Current value \"{}\"", srcpaths);
std::process::exit(1);
}
}
} else {
let default_paths = [
"/usr/local/src/rust/src",
"/usr/src/rust/src",
];
for &path in &default_paths {
let f = Path::new(path);
if f.exists() {
std::env::set_var("RUST_SRC_PATH", path);
return;
}
}
println!("RUST_SRC_PATH environment variable must be set to point to the src directory of a rust checkout. E.g. \"/home/foouser/src/rust/src\"");
std::process::exit(1);
}
}
#[cfg(not(test))]
fn daemon(cfg: Config) {
let mut input = String::new();
while let Ok(n) = io::stdin().read_line(&mut input) {
// '\n' == 1
if n == 1 {
break;
}
// We add the setting NoBinaryName because in daemon mode we won't be passed the preceeding
// binary name
let cli = build_cli().setting(AppSettings::NoBinaryName);
let matches = match cfg.interface {
Interface::Text => cli.get_matches_from(input.trim_right().split_whitespace()),
Interface::TabText => cli.get_matches_from(input.trim_right().split('\t'))
};
run(matches, cfg.interface);
input.clear();
}
}
#[cfg(not(test))]
#[derive(Copy, Clone)]
enum Interface {
Text, // The original human-readable format.
TabText, // Machine-readable format. This is basically the same as Text, except that all field
// separators are replaced with tabs.
// In `deamon` mode tabs are also used to delimit command arguments.
}
#[cfg(not(test))]
impl Default for Interface {
fn default() -> Self { Interface::Text }
}
#[cfg(not(test))]
#[derive(Default)]
struct Config {
fqn: Option<String>,
linenum: usize,
charnum: usize,
fn_name: Option<PathBuf>,
substitute_file: Option<PathBuf>,
interface: Interface,
}
#[cfg(not(test))]
impl<'a> From<&'a ArgMatches<'a>> for Config {
fn from(m: &'a ArgMatches) -> Self {
// We check for charnum because it's the second argument, which means more than just
// an FQN was used (i.e. racer complete <linenum> <charnum> <fn_name> [substitute_file])
if m.is_present("charnum") {
let cfg = Config {
charnum: value_t_or_exit!(m.value_of("charnum"), usize),
fn_name: m.value_of("path").map(PathBuf::from),
substitute_file: m.value_of("substitute_file").map(PathBuf::from),
..Default::default()
};
if !m.is_present("linenum") {
// Becasue of the hack to allow fqn and linenum to share a single arg we set FQN
// to None and set the charnum correctly using the FQN arg so there's no
// hackery later
return Config {linenum: value_t_or_exit!(m.value_of("fqn"), usize), .. cfg };
}
return Config {linenum: value_t_or_exit!(m.value_of("linenum"), usize), .. cfg };
}
Config {fqn: m.value_of("fqn").map(ToOwned::to_owned), ..Default::default() }
}
}
#[cfg(not(test))]
fn build_cli<'a, 'b>() -> App<'a, 'b> {
// we use the more verbose "Builder Pattern" to create the CLI because it's a littel faster
// than the less verbose "Usage String" method...faster, meaning runtime speed since that's
// extremely important here
App::new("racer")
.version(env!("CARGO_PKG_VERSION"))
.author("Phil Dawes")
.about("A Rust code completion utility")
.settings(&[AppSettings::GlobalVersion,
AppSettings::SubcommandRequiredElseHelp])
.arg(Arg::with_name("interface")
.long("interface")
.short("i")
.takes_value(true)
.possible_value("text")
.possible_value("tab-text")
.value_name("mode")
.help("Interface mode"))
.subcommand(SubCommand::with_name("complete")
.about("performs completion and returns matches")
// We set an explicit usage string here, instead of letting `clap` write one due to
// using a single arg for multiple purposes
.usage("racer complete <fqn>\n\t\
racer complete <linenum> <charnum> <path> [substitute_file]")
// Next we make it an error to run without any args
.setting(AppSettings::ArgRequiredElseHelp)
// Because we want a single arg to play two roles and be compatible with previous
// racer releases, we have to be a little hacky here...
//
// We start by making 'fqn' the first positional arg, which will hold this dual value
// of either an FQN as it says, or secretly a line-number
.arg(Arg::with_name("fqn")
.help("complete with a fully-qualified-name (e.g. std::io::)"))
.arg(Arg::with_name("charnum")
.help("The char number to search for matches")
.requires("path"))
.arg(Arg::with_name("path")
.help("The path to search for name to match"))
.arg(Arg::with_name("substitute_file")
.help("An optional substitute file"))
// 'linenum' **MUST** be last (or have the highest index so that it's never actually
// used by the user, but still appears in the help text)
.arg(Arg::with_name("linenum")
.help("The line number at which to find the match")))
.subcommand(SubCommand::with_name("daemon")
.about("start a process that receives the above commands via stdin"))
.subcommand(SubCommand::with_name("find-definition")
.about("finds the definition of a function")
.arg(Arg::with_name("linenum")
.help("The line number at which to find the match")
.required(true))
.arg(Arg::with_name("charnum")
.help("The char number at which to find the match")
.required(true))
.arg(Arg::with_name("path")
.help("The path to search for name to match")
.required(true))
.arg(Arg::with_name("substitute_file")
.help("An optional substitute file")))
.subcommand(SubCommand::with_name("prefix")
.arg(Arg::with_name("linenum")
.help("The line number at which to find the match")
.required(true))
.arg(Arg::with_name("charnum")
.help("The char number at which to find the match")
.required(true))
.arg(Arg::with_name("path")
.help("The path to search for the match to prefix")
.required(true)))
.subcommand(SubCommand::with_name("complete-with-snippet")
.about("performs completion and returns more detailed matches")
.usage("racer complete-with-snippet <fqn>\n\t\
racer complete-with-snippet <linenum> <charnum> <path> [substitute_file]")
.setting(AppSettings::ArgRequiredElseHelp)
.arg(Arg::with_name("fqn")
.help("complete with a fully-qualified-name (e.g. std::io::)"))
.arg(Arg::with_name("charnum")
.help("The char number to search for matches")
.requires("path"))
.arg(Arg::with_name("path")
.help("The path to search for name to match"))
.arg(Arg::with_name("substitute_file")
.help("An optional substitute file"))
.arg(Arg::with_name("linenum")
.help("The line number at which to find the match")))
.after_help("For more information about a specific command try 'racer <command> --help'")
}
#[cfg(not(test))]
fn main() {
env_logger::init().unwrap();
check_rust_src_env_var();
let matches = build_cli().get_matches();
let interface = match matches.value_of("interface") {
Some("tab-text") => Interface::TabText,
Some("text") | _ => Interface::Text
};
run(matches, interface);
}
#[cfg(not(test))]
fn run(m: ArgMatches, interface: Interface) {
use CompletePrinter::{Normal, WithSnippets};
// match raw subcommand, and get it's sub-matches "m"
if let (name, Some(sub_m)) = m.subcommand() {
let mut cfg = Config::from(sub_m);
cfg.interface = interface;
match name {
"daemon" => daemon(cfg),
"prefix" => prefix(cfg),
"complete" => complete(cfg, Normal),
"complete-with-snippet" => complete(cfg, WithSnippets),
"find-definition" => find_definition(cfg),
_ => unreachable!()
}
}
}<|fim▁end|> | fn cache_file_contents_from_stdin(file: &PathBuf, cache: &core::FileCache) {
let stdin = io::stdin(); |
<|file_name|>ticket2855.py<|end_file_name|><|fim▁begin|># Copyright (C) 2007, Red Hat, Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
"""
Test the style of toggle and radio buttons inside a palette. The buttons
contains only an icon and should be rendered similarly to the toolbar
controls. Ticket #2855.
"""
from gi.repository import Gtk
from sugar3.graphics.palette import Palette
from sugar3.graphics.icon import Icon
from sugar3.graphics import style
import common
test = common.TestPalette()
palette = Palette('Test radio and toggle')
test.set_palette(palette)<|fim▁hole|>
box = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL)
toggle = Gtk.ToggleButton()
icon = Icon(icon_name='go-previous', pixel_size=style.STANDARD_ICON_SIZE)
toggle.set_image(icon)
box.pack_start(toggle, False, False, 0)
toggle.show()
radio = Gtk.RadioButton()
icon = Icon(icon_name='go-next', pixel_size=style.STANDARD_ICON_SIZE)
radio.set_image(icon)
radio.set_mode(False)
box.pack_start(radio, False, False, 0)
radio.show()
palette.set_content(box)
box.show()
if __name__ == '__main__':
common.main(test)<|fim▁end|> | |
<|file_name|>guiInspector.cc<|end_file_name|><|fim▁begin|>//-----------------------------------------------------------------------------
// Copyright (c) 2013 GarageGames, LLC
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to
// deal in the Software without restriction, including without limitation the
// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
// sell copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
//-----------------------------------------------------------------------------
#include "gui/editor/guiInspector.h"
#include "gui/buttons/guiIconButtonCtrl.h"
#include "memory/frameAllocator.h"
//////////////////////////////////////////////////////////////////////////
// GuiInspector
//////////////////////////////////////////////////////////////////////////
// The GuiInspector Control houses the body of the inspector.
// It is not exposed as a conobject because it merely does the grunt work
// and is only meant to be used when housed by a scroll control. Therefore
// the GuiInspector control is a scroll control that creates it's own
// content. That content being of course, the GuiInspector control.
IMPLEMENT_CONOBJECT(GuiInspector);
GuiInspector::GuiInspector()
{
mGroups.clear();
mTarget = NULL;
mPadding = 1;
}
GuiInspector::~GuiInspector()
{
clearGroups();
}
bool GuiInspector::onAdd()
{
if( !Parent::onAdd() )
return false;
return true;
}
//////////////////////////////////////////////////////////////////////////
// Handle Parent Sizing (We constrain ourself to our parents width)
//////////////////////////////////////////////////////////////////////////
void GuiInspector::parentResized(const Point2I &oldParentExtent, const Point2I &newParentExtent)
{
GuiControl *parent = getParent();
if( parent && dynamic_cast<GuiScrollCtrl*>(parent) != NULL )
{
GuiScrollCtrl *scroll = dynamic_cast<GuiScrollCtrl*>(parent);
setWidth( ( newParentExtent.x - ( scroll->scrollBarThickness() + 4 ) ) );
}
else
Parent::parentResized(oldParentExtent,newParentExtent);
}
bool GuiInspector::findExistentGroup( StringTableEntry groupName )
{
// If we have no groups, it couldn't possibly exist
if( mGroups.empty() )
return false;
// Attempt to find it in the group list
Vector<GuiInspectorGroup*>::iterator i = mGroups.begin();
for( ; i != mGroups.end(); i++ )
{
if( dStricmp( (*i)->getGroupName(), groupName ) == 0 )
return true;
}
return false;
}
void GuiInspector::clearGroups()
{
// If we're clearing the groups, we want to clear our target too.
mTarget = NULL;
// If we have no groups, there's nothing to clear!
if( mGroups.empty() )
return;
// Attempt to find it in the group list
Vector<GuiInspectorGroup*>::iterator i = mGroups.begin();
for( ; i != mGroups.end(); i++ )
if( (*i)->isProperlyAdded() )
(*i)->deleteObject();
mGroups.clear();
}
void GuiInspector::inspectObject( SimObject *object )
{
GuiCanvas *guiCanvas = getRoot();
if( !guiCanvas )
return;
SimObjectPtr<GuiControl> currResponder = guiCanvas->getFirstResponder();
// If our target is the same as our current target, just update the groups.
if( mTarget == object )
{
Vector<GuiInspectorGroup*>::iterator i = mGroups.begin();
for ( ; i != mGroups.end(); i++ )
(*i)->inspectGroup();
// Don't steal first responder
if( !currResponder.isNull() )
guiCanvas->setFirstResponder( currResponder );
return;
}
// Clear our current groups
clearGroups();
// Set Target
mTarget = object;
// Always create the 'general' group (for un-grouped fields)
GuiInspectorGroup* general = new GuiInspectorGroup( mTarget, "General", this );
if( general != NULL )
{
general->registerObject();
mGroups.push_back( general );
addObject( general );
}
// Grab this objects field list
AbstractClassRep::FieldList &fieldList = mTarget->getModifiableFieldList();
AbstractClassRep::FieldList::iterator itr;
// Iterate through, identifying the groups and create necessary GuiInspectorGroups
for(itr = fieldList.begin(); itr != fieldList.end(); itr++)
{
if(itr->type == AbstractClassRep::StartGroupFieldType && !findExistentGroup( itr->pGroupname ) )
{
GuiInspectorGroup *group = new GuiInspectorGroup( mTarget, itr->pGroupname, this );
if( group != NULL )
{
group->registerObject();
mGroups.push_back( group );
addObject( group );
}
}
}
// Deal with dynamic fields
GuiInspectorGroup *dynGroup = new GuiInspectorDynamicGroup( mTarget, "Dynamic Fields", this);
if( dynGroup != NULL )
{
dynGroup->registerObject();
mGroups.push_back( dynGroup );
addObject( dynGroup );
}
// If the general group is still empty at this point, kill it.
for(S32 i=0; i<mGroups.size(); i++)
{
if(mGroups[i] == general && general->mStack->size() == 0)
{
mGroups.erase(i);
general->deleteObject();
updatePanes();
}
}
// Don't steal first responder
if( !currResponder.isNull() )
guiCanvas->setFirstResponder( currResponder );
}
ConsoleMethod( GuiInspector, inspect, void, 3, 3, "(obj) Goes through the object's fields and autogenerates editor boxes\n"
"@return No return value.")
{
SimObject * target = Sim::findObject(argv[2]);
if(!target)
{
if(dAtoi(argv[2]) > 0)
Con::warnf("%s::inspect(): invalid object: %s", argv[0], argv[2]);
object->clearGroups();
return;
}
object->inspectObject(target);
}
ConsoleMethod( GuiInspector, getInspectObject, const char*, 2, 2, "() - Returns currently inspected object\n"
"@return The Object's ID as a string.")
{
SimObject *pSimObject = object->getInspectObject();
if( pSimObject != NULL )
return pSimObject->getIdString();
return "";
}
void GuiInspector::setName( const char* newName )
{
if( mTarget == NULL )
return;
// Only assign a new name if we provide one
mTarget->assignName(newName);
}
ConsoleMethod( GuiInspector, setName, void, 3, 3, "(NewObjectName) Set object name.\n"
"@return No return value.")
{
object->setName(argv[2]);
}
//////////////////////////////////////////////////////////////////////////
// GuiInspectorField
//////////////////////////////////////////////////////////////////////////
// The GuiInspectorField control is a representation of a single abstract
// field for a given ConsoleObject derived object. It handles creation
// getting and setting of it's fields data and editing control.
//
// Creation of custom edit controls is done through this class and is
// dependent upon the dynamic console type, which may be defined to be
// custom for different types.
//
// Note : GuiInspectorField controls must have a GuiInspectorGroup as their
// parent.
IMPLEMENT_CONOBJECT(GuiInspectorField);
// Caption width is in percentage of total width
S32 GuiInspectorField::smCaptionWidth = 50;
GuiInspectorField::GuiInspectorField( GuiInspectorGroup* parent, SimObjectPtr<SimObject> target, AbstractClassRep::Field* field )
{
if( field != NULL )
mCaption = StringTable->insert( field->pFieldname );
else
mCaption = StringTable->EmptyString;
mParent = parent;
mTarget = target;
mField = field;
mCanSave = false;
mFieldArrayIndex = NULL;
mBounds.set(0,0,100,18);
}
GuiInspectorField::GuiInspectorField()
{
mCaption = StringTable->EmptyString;
mParent = NULL;
mTarget = NULL;
mField = NULL;
mFieldArrayIndex = NULL;
mBounds.set(0,0,100,18);
mCanSave = false;
}
GuiInspectorField::~GuiInspectorField()
{
}
//////////////////////////////////////////////////////////////////////////
// Get/Set Data Functions
//////////////////////////////////////////////////////////////////////////
void GuiInspectorField::setData( const char* data )
{
if( mField == NULL || mTarget == NULL )
return;
mTarget->inspectPreApply();
mTarget->setDataField( mField->pFieldname, mFieldArrayIndex, data );
// Force our edit to update
updateValue( data );
mTarget->inspectPostApply();
}
const char* GuiInspectorField::getData()
{
if( mField == NULL || mTarget == NULL )
return "";
return mTarget->getDataField( mField->pFieldname, mFieldArrayIndex );
}
void GuiInspectorField::setInspectorField( AbstractClassRep::Field *field, const char*arrayIndex )
{
mField = field;
if( arrayIndex != NULL )
{
mFieldArrayIndex = StringTable->insert( arrayIndex );
S32 frameTempSize = dStrlen( field->pFieldname ) + 32;
FrameTemp<char> valCopy( frameTempSize );
dSprintf( (char *)valCopy, frameTempSize, "%s%s", field->pFieldname, arrayIndex );
mCaption = StringTable->insert( valCopy );
}
else
mCaption = StringTable->insert( field->pFieldname );
}
StringTableEntry GuiInspectorField::getFieldName()
{
// Sanity
if ( mField == NULL )
return StringTable->EmptyString;
// Array element?
if( mFieldArrayIndex != NULL )
{
S32 frameTempSize = dStrlen( mField->pFieldname ) + 32;
FrameTemp<char> valCopy( frameTempSize );
dSprintf( (char *)valCopy, frameTempSize, "%s%s", mField->pFieldname, mFieldArrayIndex );
// Return formatted element
return StringTable->insert( valCopy );
}
// Plain ole field name.
return mField->pFieldname;
};
//////////////////////////////////////////////////////////////////////////
// Overrideables for custom edit fields
//////////////////////////////////////////////////////////////////////////
GuiControl* GuiInspectorField::constructEditControl()
{
GuiControl* retCtrl = new GuiTextEditCtrl();
// If we couldn't construct the control, bail!
if( retCtrl == NULL )
return retCtrl;
// Let's make it look pretty.
retCtrl->setField( "profile", "GuiInspectorTextEditProfile" );
// Don't forget to register ourselves
registerEditControl( retCtrl );
char szBuffer[512];
dSprintf( szBuffer, 512, "%d.apply(%d.getText());",getId(), retCtrl->getId() );
retCtrl->setField("AltCommand", szBuffer );
retCtrl->setField("Validate", szBuffer );
return retCtrl;
}
void GuiInspectorField::registerEditControl( GuiControl *ctrl )
{
if(!mTarget)
return;
char szName[512];
dSprintf( szName, 512, "IE_%s_%d_%s_Field", ctrl->getClassName(), mTarget->getId(),mCaption);
// Register the object
ctrl->registerObject( szName );
}
void GuiInspectorField::onRender(Point2I offset, const RectI &updateRect)
{
if(mCaption && mCaption[0])
{
// Calculate Caption Rect
RectI captionRect( offset , Point2I((S32) mFloor( mBounds.extent.x * (F32)( (F32)GuiInspectorField::smCaptionWidth / 100.0f ) ), (S32)mBounds.extent.y ) );
// Calculate Y Offset to center vertically the caption
U32 captionYOffset = (U32)mFloor( (F32)( captionRect.extent.y - mProfile->mFont->getHeight() ) / 2 );
RectI clipRect = dglGetClipRect();
if( clipRect.intersect( captionRect ) )
{
// Backup Bitmap Modulation
ColorI currColor;
dglGetBitmapModulation( &currColor );
dglSetBitmapModulation( mProfile->mFontColor );
dglSetClipRect( RectI( clipRect.point, Point2I( captionRect.extent.x, clipRect.extent.y ) ));
// Draw Caption ( Vertically Centered )
U32 textY = captionRect.point.y + captionYOffset;
U32 textX = captionRect.point.x + captionRect.extent.x - mProfile->mFont->getStrWidth(mCaption) - 6;
Point2I textPT(textX, textY);
dglDrawText( mProfile->mFont, textPT, mCaption, &mProfile->mFontColor );
dglSetBitmapModulation( currColor );
dglSetClipRect( clipRect );
}
}
Parent::onRender( offset, updateRect );
}
bool GuiInspectorField::onAdd()
{
if( !Parent::onAdd() )
return false;
if( !mTarget )
return false;
mEdit = constructEditControl();
if( mEdit == NULL )
return false;
// Add our edit as a child
addObject( mEdit );
// Calculate Caption Rect
RectI captionRect( mBounds.point , Point2I( (S32)mFloor( mBounds.extent.x * (F32)( (F32)GuiInspectorField::smCaptionWidth / 100.0 ) ), (S32)mBounds.extent.y ) );
// Calculate Edit Field Rect
RectI editFieldRect( Point2I( captionRect.extent.x + 1, 1 ) , Point2I( mBounds.extent.x - ( captionRect.extent.x + 5 ) , mBounds.extent.y - 1) );
// Resize to fit properly in allotted space
mEdit->resize( editFieldRect.point, editFieldRect.extent );
// Prefer GuiInspectorFieldProfile
setField( "profile", "GuiInspectorFieldProfile" );
// Force our editField to set it's value
updateValue( getData() );
return true;
}
void GuiInspectorField::updateValue( const char* newValue )
{
GuiTextEditCtrl *ctrl = dynamic_cast<GuiTextEditCtrl*>( mEdit );
if( ctrl != NULL )
ctrl->setText( newValue );
}
ConsoleMethod( GuiInspectorField, apply, void, 3,3, "(newValue) Applies the given value to the field\n"
"@return No return value." )
{
object->setData( argv[2] );
}
void GuiInspectorField::resize( const Point2I &newPosition, const Point2I &newExtent )
{
Parent::resize( newPosition, newExtent );
if( mEdit != NULL )
{
// Calculate Caption Rect
RectI captionRect( mBounds.point , Point2I( (S32)mFloor( mBounds.extent.x * (F32)( (F32)GuiInspectorField::smCaptionWidth / 100.0f ) ), (S32)mBounds.extent.y ) );
// Calculate Edit Field Rect
RectI editFieldRect( Point2I( captionRect.extent.x + 1, 1 ) , Point2I( mBounds.extent.x - ( captionRect.extent.x + 5 ) , mBounds.extent.y - 1) );
mEdit->resize( editFieldRect.point, editFieldRect.extent );
}
}
//////////////////////////////////////////////////////////////////////////
// GuiInspectorGroup
//////////////////////////////////////////////////////////////////////////
//
// The GuiInspectorGroup control is a helper control that the inspector
// makes use of which houses a collapsible pane type control for separating
// inspected objects fields into groups. The content of the inspector is
// made up of zero or more GuiInspectorGroup controls inside of a GuiStackControl
//
//
//
IMPLEMENT_CONOBJECT(GuiInspectorGroup);
GuiInspectorGroup::GuiInspectorGroup()
{
mBounds.set(0,0,200,20);
mChildren.clear();
mTarget = NULL;
mParent = NULL;
mCanSave = false;
// Make sure we receive our ticks.
setProcessTicks();
}
GuiInspectorGroup::GuiInspectorGroup( SimObjectPtr<SimObject> target, StringTableEntry groupName, SimObjectPtr<GuiInspector> parent )
{
mBounds.set(0,0,200,20);
mChildren.clear();
mCaption = StringTable->insert(groupName);
mTarget = target;
mParent = parent;
mCanSave = false;
}
GuiInspectorGroup::~GuiInspectorGroup()
{
if( !mChildren.empty() )
{
Vector<GuiInspectorField*>::iterator i = mChildren.begin();
for( ; i != mChildren.end(); i++ );
}
}
//////////////////////////////////////////////////////////////////////////
// Scene Events
//////////////////////////////////////////////////////////////////////////
bool GuiInspectorGroup::onAdd()
{
setField( "profile", "GuiInspectorGroupProfile" );
if( !Parent::onAdd() )
return false;
// Create our inner controls. Allow subclasses to provide other content.
if(!createContent())
return false;
inspectGroup();
return true;
}
bool GuiInspectorGroup::createContent()
{
// Create our field stack control
mStack = new GuiStackControl();
if( !mStack )
return false;
// Prefer GuiTransperantProfile for the stack.
mStack->setField( "profile", "GuiTransparentProfile" );
mStack->registerObject();
addObject( mStack );
mStack->setField( "padding", "0" );
return true;
}
//////////////////////////////////////////////////////////////////////////
// Control Sizing Animation Functions
//////////////////////////////////////////////////////////////////////////
void GuiInspectorGroup::animateToContents()
{
calculateHeights();
if(size() > 0)
animateTo( mExpanded.extent.y );
else
animateTo( mHeader.extent.y );
}
GuiInspectorField* GuiInspectorGroup::constructField( S32 fieldType )
{
ConsoleBaseType *cbt = ConsoleBaseType::getType(fieldType);
AssertFatal(cbt, "GuiInspectorGroup::constructField - could not resolve field type!");
// Alright, is it a datablock?
if(cbt->isDatablock())
{
// This is fairly straightforward to deal with.
GuiInspectorDatablockField *dbFieldClass = new GuiInspectorDatablockField( cbt->getTypeClassName() );
if( dbFieldClass != NULL )
{
// return our new datablock field with correct datablock type enumeration info
return dbFieldClass;
}
}
// Nope, not a datablock. So maybe it has a valid inspector field override we can use?
if(!cbt->getInspectorFieldType())
// Nothing, so bail.
return NULL;
// Otherwise try to make it!
ConsoleObject *co = create(cbt->getInspectorFieldType());
GuiInspectorField *gif = dynamic_cast<GuiInspectorField*>(co);
if(!gif)
{
// Wasn't appropriate type, bail.
delete co;
return NULL;
}
return gif;
}
GuiInspectorField *GuiInspectorGroup::findField( StringTableEntry fieldName )
{
// If we don't have any field children we can't very well find one then can we?
if( mChildren.empty() )
return NULL;
Vector<GuiInspectorField*>::iterator i = mChildren.begin();
for( ; i != mChildren.end(); i++ )
{
if( (*i)->getFieldName() != NULL && dStricmp( (*i)->getFieldName(), fieldName ) == 0 )
return (*i);
}
return NULL;
}
bool GuiInspectorGroup::inspectGroup()
{
// We can't inspect a group without a target!
if( !mTarget )
return false;
// to prevent crazy resizing, we'll just freeze our stack for a sec..
mStack->freeze(true);
bool bNoGroup = false;
// Un-grouped fields are all sorted into the 'general' group
if ( dStricmp( mCaption, "General" ) == 0 )
bNoGroup = true;
AbstractClassRep::FieldList &fieldList = mTarget->getModifiableFieldList();
AbstractClassRep::FieldList::iterator itr;
bool bGrabItems = false;
bool bNewItems = false;
for(itr = fieldList.begin(); itr != fieldList.end(); itr++)
{
if( itr->type == AbstractClassRep::StartGroupFieldType )
{
// If we're dealing with general fields, always set grabItems to true (to skip them)
if( bNoGroup == true )
bGrabItems = true;
else if( itr->pGroupname != NULL && dStricmp( itr->pGroupname, mCaption ) == 0 )
bGrabItems = true;
continue;
}
else if ( itr->type == AbstractClassRep::EndGroupFieldType )
{
// If we're dealing with general fields, always set grabItems to false (to grab them)
if( bNoGroup == true )
bGrabItems = false;
else if( itr->pGroupname != NULL && dStricmp( itr->pGroupname, mCaption ) == 0 )
bGrabItems = false;
continue;
}
if( ( bGrabItems == true || ( bNoGroup == true && bGrabItems == false ) ) && itr->type != AbstractClassRep::DepricatedFieldType )
{
if( bNoGroup == true && bGrabItems == true )
continue;
// This is weird, but it should work for now. - JDD
// We are going to check to see if this item is an array
// if so, we're going to construct a field for each array element
if( itr->elementCount > 1 )
{
for(S32 nI = 0; nI < itr->elementCount; nI++)
{
FrameTemp<char> intToStr( 64 );
dSprintf( intToStr, 64, "%d", nI );
const char *val = mTarget->getDataField( itr->pFieldname, intToStr );
if (!val)
val = StringTable->EmptyString;
// Copy Val and construct proper ValueName[nI] format
// which is "ValueName0" for index 0, etc.
S32 frameTempSize = dStrlen( val ) + 32;
FrameTemp<char> valCopy( frameTempSize );
dSprintf( (char *)valCopy, frameTempSize, "%s%d", itr->pFieldname, nI );
// If the field already exists, just update it
GuiInspectorField *field = findField( valCopy );
if( field != NULL )
{
field->updateValue( field->getData() );
continue;
}
bNewItems = true;
field = constructField( itr->type );
if( field == NULL )
{
field = new GuiInspectorField( this, mTarget, itr );
field->setInspectorField( itr, intToStr );
}
else
{
field->setTarget( mTarget );
field->setParent( this );
field->setInspectorField( itr, intToStr );
}
field->registerObject();
mChildren.push_back( field );
mStack->addObject( field );
}
}
else
{
// If the field already exists, just update it
GuiInspectorField *field = findField( itr->pFieldname );
if( field != NULL )
{
field->updateValue( field->getData() );
continue;
}
bNewItems = true;
field = constructField( itr->type );
if( field == NULL )
field = new GuiInspectorField( this, mTarget, itr );
else
{
field->setTarget( mTarget );
field->setParent( this );
field->setInspectorField( itr );
}
field->registerObject();
mChildren.push_back( field );
mStack->addObject( field );
}
}
<|fim▁hole|> // If we've no new items, there's no need to resize anything!
if( bNewItems == false && !mChildren.empty() )
return true;
sizeToContents();
setUpdate();
return true;
}
IMPLEMENT_CONOBJECT(GuiInspectorDynamicGroup);
//////////////////////////////////////////////////////////////////////////
// GuiInspectorDynamicGroup - add custom controls
//////////////////////////////////////////////////////////////////////////
bool GuiInspectorDynamicGroup::createContent()
{
if(!Parent::createContent())
return false;
// add a button that lets us add new dynamic fields.
GuiIconButtonCtrl* addFieldBtn = new GuiIconButtonCtrl();
{
addFieldBtn->setBitmap("tools/gui/images/iconAdd");
SimObject* profilePtr = Sim::findObject("EditorButton");
if( profilePtr != NULL )
addFieldBtn->setControlProfile( dynamic_cast<GuiControlProfile*>(profilePtr) );
char commandBuf[64];
dSprintf(commandBuf, 64, "%d.addDynamicField();", this->getId());
addFieldBtn->setField("command", commandBuf);
addFieldBtn->setSizing(horizResizeLeft,vertResizeCenter);
//addFieldBtn->setField("buttonMargin", "2 2");
addFieldBtn->resize(Point2I(mBounds.extent.x - 20,2), Point2I(16, 16));
addFieldBtn->registerObject("zAddButton");
}
// encapsulate the button in a dummy control.
GuiControl* shell = new GuiControl();
shell->setField( "profile", "GuiTransparentProfile" );
shell->registerObject();
shell->resize(Point2I(0,0), Point2I(mBounds.extent.x, 28));
shell->addObject(addFieldBtn);
// save off the shell control, so we can push it to the bottom of the stack in inspectGroup()
mAddCtrl = shell;
mStack->addObject(shell);
return true;
}
static S32 QSORT_CALLBACK compareEntries(const void* a,const void* b)
{
SimFieldDictionary::Entry *fa = *((SimFieldDictionary::Entry **)a);
SimFieldDictionary::Entry *fb = *((SimFieldDictionary::Entry **)b);
return dStricmp(fa->slotName, fb->slotName);
}
//////////////////////////////////////////////////////////////////////////
// GuiInspectorDynamicGroup - inspectGroup override
//////////////////////////////////////////////////////////////////////////
bool GuiInspectorDynamicGroup::inspectGroup()
{
// We can't inspect a group without a target!
if( !mTarget )
return false;
// Clearing the fields and recreating them will more than likely be more
// efficient than looking up existent fields, updating them, and then iterating
// over existent fields and making sure they still exist, if not, deleting them.
clearFields();
// Create a vector of the fields
Vector<SimFieldDictionary::Entry *> flist;
// Then populate with fields
SimFieldDictionary * fieldDictionary = mTarget->getFieldDictionary();
for(SimFieldDictionaryIterator ditr(fieldDictionary); *ditr; ++ditr)
{
flist.push_back(*ditr);
}
dQsort(flist.address(),flist.size(),sizeof(SimFieldDictionary::Entry *),compareEntries);
for(U32 i = 0; i < (U32)flist.size(); i++)
{
SimFieldDictionary::Entry * entry = flist[i];
GuiInspectorField *field = new GuiInspectorDynamicField( this, mTarget, entry );
if( field != NULL )
{
field->registerObject();
mChildren.push_back( field );
mStack->addObject( field );
}
}
mStack->pushObjectToBack(mAddCtrl);
setUpdate();
return true;
}
ConsoleMethod(GuiInspectorDynamicGroup, inspectGroup, bool, 2, 2, "() Refreshes the dynamic fields in the inspector.\n"
"@return Returns true on success.")
{
return object->inspectGroup();
}
void GuiInspectorDynamicGroup::clearFields()
{
// save mAddCtrl
Sim::getGuiGroup()->addObject(mAddCtrl);
// delete everything else
mStack->clear();
// clear the mChildren list.
mChildren.clear();
// and restore.
mStack->addObject(mAddCtrl);
}
SimFieldDictionary::Entry* GuiInspectorDynamicGroup::findDynamicFieldInDictionary( StringTableEntry fieldName )
{
if( !mTarget )
return NULL;
SimFieldDictionary * fieldDictionary = mTarget->getFieldDictionary();
for(SimFieldDictionaryIterator ditr(fieldDictionary); *ditr; ++ditr)
{
SimFieldDictionary::Entry * entry = (*ditr);
if( dStricmp( entry->slotName, fieldName ) == 0 )
return entry;
}
return NULL;
}
void GuiInspectorDynamicGroup::addDynamicField()
{
// We can't add a field without a target
if( !mTarget || !mStack )
{
Con::warnf("GuiInspectorDynamicGroup::addDynamicField - no target SimObject to add a dynamic field to.");
return;
}
// find a field name that is not in use.
// But we wont try more than 100 times to find an available field.
U32 uid = 1;
char buf[64] = "dynamicField";
SimFieldDictionary::Entry* entry = findDynamicFieldInDictionary(buf);
while(entry != NULL && uid < 100)
{
dSprintf(buf, sizeof(buf), "dynamicField%03d", uid++);
entry = findDynamicFieldInDictionary(buf);
}
//Con::evaluatef( "%d.%s = \"defaultValue\";", mTarget->getId(), buf );
mTarget->setDataField(StringTable->insert(buf), NULL, "defaultValue");
// now we simply re-inspect the object, to see the new field.
this->inspectGroup();
animateToContents();
}
ConsoleMethod( GuiInspectorDynamicGroup, addDynamicField, void, 2, 2, "obj.addDynamicField();" )
{
object->addDynamicField();
}
//////////////////////////////////////////////////////////////////////////
// GuiInspectorDynamicField - Child class of GuiInspectorField
//////////////////////////////////////////////////////////////////////////
IMPLEMENT_CONOBJECT(GuiInspectorDynamicField);
GuiInspectorDynamicField::GuiInspectorDynamicField( GuiInspectorGroup* parent, SimObjectPtr<SimObject> target, SimFieldDictionary::Entry* field )
{
mCaption = NULL;
mParent = parent;
mTarget = target;
mDynField = field;
mBounds.set(0,0,100,20);
mRenameCtrl = NULL;
}
void GuiInspectorDynamicField::setData( const char* data )
{
if( mTarget == NULL || mDynField == NULL )
return;
char buf[1024];
const char * newValue = mEdit->getScriptValue();
dStrcpy( buf, newValue ? newValue : "" );
collapseEscape(buf);
mTarget->getFieldDictionary()->setFieldValue(mDynField->slotName, buf);
// Force our edit to update
updateValue( data );
}
const char* GuiInspectorDynamicField::getData()
{
if( mTarget == NULL || mDynField == NULL )
return "";
return mTarget->getFieldDictionary()->getFieldValue( mDynField->slotName );
}
void GuiInspectorDynamicField::renameField( StringTableEntry newFieldName )
{
if( mTarget == NULL || mDynField == NULL || mParent == NULL || mEdit == NULL )
{
Con::warnf("GuiInspectorDynamicField::renameField - No target object or dynamic field data found!" );
return;
}
if( !newFieldName )
{
Con::warnf("GuiInspectorDynamicField::renameField - Invalid field name specified!" );
return;
}
// Only proceed if the name has changed
if( dStricmp( newFieldName, getFieldName() ) == 0 )
return;
// Grab a pointer to our parent and cast it to GuiInspectorDynamicGroup
GuiInspectorDynamicGroup *group = dynamic_cast<GuiInspectorDynamicGroup*>(mParent);
if( group == NULL )
{
Con::warnf("GuiInspectorDynamicField::renameField - Unable to locate GuiInspectorDynamicGroup parent!" );
return;
}
// Grab our current dynamic field value
const char* currentValue = getData();
// Create our new field with the value of our old field and the new fields name!
mTarget->setDataField( newFieldName, NULL, currentValue );
// Configure our field to grab data from the new dynamic field
SimFieldDictionary::Entry *newEntry = group->findDynamicFieldInDictionary( newFieldName );
if( newEntry == NULL )
{
Con::warnf("GuiInspectorDynamicField::renameField - Unable to find new field!" );
return;
}
// Set our old fields data to "" (which will effectively erase the field)
mTarget->setDataField( getFieldName(), NULL, "" );
// Assign our dynamic field pointer (where we retrieve field information from) to our new field pointer
mDynField = newEntry;
// Lastly we need to reassign our Command and AltCommand fields for our value edit control
char szBuffer[512];
dSprintf( szBuffer, 512, "%d.%s = %d.getText();",mTarget->getId(), getFieldName(), mEdit->getId() );
mEdit->setField("AltCommand", szBuffer );
mEdit->setField("Validate", szBuffer );
}
ConsoleMethod( GuiInspectorDynamicField, renameField, void, 3,3, "field.renameField(newDynamicFieldName);" )
{
object->renameField( StringTable->insert(argv[2]) );
}
bool GuiInspectorDynamicField::onAdd()
{
if( !Parent::onAdd() )
return false;
mRenameCtrl = constructRenameControl();
pushObjectToBack(mEdit);
return true;
}
GuiControl* GuiInspectorDynamicField::constructRenameControl()
{
// Create our renaming field
GuiControl* retCtrl = new GuiTextEditCtrl();
// If we couldn't construct the control, bail!
if( retCtrl == NULL )
return retCtrl;
// Let's make it look pretty.
retCtrl->setField( "profile", "GuiInspectorTextEditRightProfile" );
// Don't forget to register ourselves
char szName[512];
dSprintf( szName, 512, "IE_%s_%d_%s_Rename", retCtrl->getClassName(), mTarget->getId(), getFieldName() );
retCtrl->registerObject( szName );
// Our command will evaluate to :
//
// if( (editCtrl).getText() !$= "" )
// (field).renameField((editCtrl).getText());
//
char szBuffer[512];
dSprintf( szBuffer, 512, "if( %d.getText() !$= \"\" ) %d.renameField(%d.getText());",retCtrl->getId(), getId(), retCtrl->getId() );
dynamic_cast<GuiTextEditCtrl*>(retCtrl)->setText( getFieldName() );
retCtrl->setField("AltCommand", szBuffer );
retCtrl->setField("Validate", szBuffer );
// Calculate Caption Rect (Adjust for 16 pixel wide delete button)
RectI captionRect( Point2I(mBounds.point.x,0) , Point2I( (S32)mFloor( mBounds.extent.x * (F32)( (F32)GuiInspectorField::smCaptionWidth / 100.0f ) ), (S32)mBounds.extent.y ) );
RectI valueRect(mEdit->mBounds.point, mEdit->mBounds.extent - Point2I(20, 0));
RectI deleteRect( Point2I( mBounds.point.x + mBounds.extent.x - 20,2), Point2I( 16, mBounds.extent.y - 4));
addObject( retCtrl );
// Resize the name control to fit in our caption rect (tricksy!)
retCtrl->resize( captionRect.point, captionRect.extent );
// resize the value control to leave space for the delete button
mEdit->resize(valueRect.point, valueRect.extent);
// Finally, add a delete button for this field
GuiIconButtonCtrl * delButt = new GuiIconButtonCtrl();
if( delButt != NULL )
{
dSprintf(szBuffer, 512, "%d.%s = \"\";%d.inspectGroup();", mTarget->getId(), getFieldName(), mParent->getId());
delButt->setField("Bitmap", "^modules/gui/images/iconDelete");
delButt->setField("Text", "X");
delButt->setField("Command", szBuffer);
delButt->setSizing(horizResizeLeft,vertResizeCenter);
delButt->registerObject();
delButt->resize( deleteRect.point,deleteRect.extent);
addObject(delButt);
}
return retCtrl;
}
void GuiInspectorDynamicField::resize( const Point2I &newPosition, const Point2I &newExtent )
{
Parent::resize( newPosition, newExtent );
// If we don't have a field rename control, bail!
if( mRenameCtrl == NULL )
return;
// Calculate Caption Rect
RectI captionRect( Point2I(mBounds.point.x,0) , Point2I( (S32)mFloor( mBounds.extent.x * (F32)( (F32)GuiInspectorField::smCaptionWidth / 100.0f ) ), (S32)mBounds.extent.y ) );
RectI valueRect(mEdit->mBounds.point, mEdit->mBounds.extent - Point2I(20, 0));
// Resize the edit control to fit in our caption rect (tricksy!)
mRenameCtrl->resize( captionRect.point, captionRect.extent );
mEdit->resize( valueRect.point, valueRect.extent);
}
//////////////////////////////////////////////////////////////////////////
// GuiInspectorDatablockField
// Field construction for datablock types
//////////////////////////////////////////////////////////////////////////
IMPLEMENT_CONOBJECT(GuiInspectorDatablockField);
static S32 QSORT_CALLBACK stringCompare(const void *a,const void *b)
{
StringTableEntry sa = *(StringTableEntry*)a;
StringTableEntry sb = *(StringTableEntry*)b;
return(dStricmp(sb, sa));
}
GuiInspectorDatablockField::GuiInspectorDatablockField( StringTableEntry className )
{
setClassName(className);
};
void GuiInspectorDatablockField::setClassName( StringTableEntry className )
{
// Walk the ACR list and find a matching class if any.
AbstractClassRep *walk = AbstractClassRep::getClassList();
while(walk)
{
if(!dStricmp(walk->getClassName(), className))
{
// Match!
mDesiredClass = walk;
return;
}
walk = walk->getNextClass();
}
// No dice.
Con::warnf("GuiInspectorDatablockField::setClassName - no class '%s' found!", className);
return;
}
GuiControl* GuiInspectorDatablockField::constructEditControl()
{
GuiControl* retCtrl = new GuiPopUpMenuCtrl();
// If we couldn't construct the control, bail!
if( retCtrl == NULL )
return retCtrl;
GuiPopUpMenuCtrl *menu = dynamic_cast<GuiPopUpMenuCtrl*>(retCtrl);
// Let's make it look pretty.
retCtrl->setField( "profile", "InspectorTypeEnumProfile" );
menu->setField("text", getData());
registerEditControl( retCtrl );
// Configure it to update our value when the popup is closed
char szBuffer[512];
dSprintf( szBuffer, 512, "%d.%s = %d.getText();%d.inspect(%d);",mTarget->getId(), mField->pFieldname, menu->getId(), mParent->mParent->getId(), mTarget->getId() );
menu->setField("Command", szBuffer );
Vector<StringTableEntry> entries;
SimDataBlockGroup * grp = Sim::getDataBlockGroup();
for(SimDataBlockGroup::iterator i = grp->begin(); i != grp->end(); i++)
{
SimDataBlock * datablock = dynamic_cast<SimDataBlock*>(*i);
// Skip non-datablocks if we somehow encounter them.
if(!datablock)
continue;
// Ok, now we have to figure inheritance info.
if( datablock && datablock->getClassRep()->isClass(mDesiredClass) )
entries.push_back(datablock->getName());
}
// sort the entries
dQsort(entries.address(), entries.size(), sizeof(StringTableEntry), stringCompare);
// add them to our enum
for(U32 j = 0; j < (U32)entries.size(); j++)
menu->addEntry(entries[j], 0);
return retCtrl;
}<|fim▁end|> | }
mStack->freeze(false);
mStack->updatePanes();
|
<|file_name|>node-check_test.go<|end_file_name|><|fim▁begin|>// Copyright 2020, Square, Inc.
package spec_test
import (
"fmt"
"testing"
. "github.com/square/spincycle/v2/request-manager/spec"
)
func TestFailHasCategoryNodeCheck(t *testing.T) {
check := HasCategoryNodeCheck{}
node := Node{
Name: nodeA,
Category: nil,
}
expectedErr := MissingValueError{
Node: &nodeA,
Field: "category",
}
err := check.CheckNode(node)
compareError(t, err, expectedErr, "accepted node with no category, expected error")
}
func TestFailValidCategoryNodeCheck(t *testing.T) {
check := ValidCategoryNodeCheck{}
node := Node{
Name: nodeA,
Category: &testVal,
}
expectedErr := InvalidValueError{
Node: &nodeA,
Field: "category",
Values: []string{testVal},
}
err := check.CheckNode(node)
compareError(t, err, expectedErr, "accepted category of value other than 'job', 'sequence', or 'conditional', expected error")
}
func TestFailValidEachNodeCheck(t *testing.T) {
check := ValidEachNodeCheck{}
node := Node{
Name: nodeA,
Each: []string{testVal},
}
expectedErr := InvalidValueError{
Node: &nodeA,
Field: "each",
Values: []string{testVal},
}
err := check.CheckNode(node)
compareError(t, err, expectedErr, "accepted each not in format 'list:element', expected error")
}
func TestFailEachElementUniqueNodeCheck(t *testing.T) {
check := EachElementUniqueNodeCheck{}
node := Node{
Name: nodeA,
Each: []string{"a:element", "b:element"},
}
expectedErr := DuplicateValueError{
Node: &nodeA,
Field: "each",
Values: []string{"element"},
}
err := check.CheckNode(node)
compareError(t, err, expectedErr, "accepted duplicated each element, expected error")
}
func TestFailEachNotRenamedTwiceNodeCheck(t *testing.T) {
check := EachNotRenamedTwiceNodeCheck{}
node := Node{
Name: nodeA,
Each: []string{"list:a", "list:b"},
}
expectedErr := DuplicateValueError{
Node: &nodeA,
Field: "each",
Values: []string{"list"},
}
err := check.CheckNode(node)
compareError(t, err, expectedErr, "accepted duplicated each list, expected error")
}
func TestFailArgsNotNilNodeCheck(t *testing.T) {
check := ArgsNotNilNodeCheck{}
node := Node{
Name: nodeA,
Args: []*NodeArg{nil},
}
expectedErr := InvalidValueError{
Node: &nodeA,
Field: "args",
Values: []string{"nil"},
}
err := check.CheckNode(node)
compareError(t, err, expectedErr, "accepted nil node args, expected error")
}
func TestFailArgsAreNamedNodeCheck(t *testing.T) {
check := ArgsAreNamedNodeCheck{}
node := Node{
Name: nodeA,
Args: []*NodeArg{
&NodeArg{
Given: &testVal,
},
},
}
expectedErr := MissingValueError{
Node: &nodeA,
Field: "args.expected",
}
err := check.CheckNode(node)
compareError(t, err, expectedErr, "accepted node arg without 'expected' field, expected error")
}
func TestFailArgsExpectedUniqueNodeCheck1(t *testing.T) {
check := ArgsExpectedUniqueNodeCheck{}
node := Node{
Name: nodeA,
Args: []*NodeArg{
&NodeArg{
Expected: &testVal,
},
&NodeArg{
Expected: &testVal,
},
},
}
expectedErr := DuplicateValueError{
Node: &nodeA,
Field: "args.expected",
Values: []string{testVal},
}
err := check.CheckNode(node)
compareError(t, err, expectedErr, "accepted duplicated args, expected error")
}
func TestFailArgsExpectedUniqueNodeCheck2(t *testing.T) {
check := ArgsExpectedUniqueNodeCheck{}
given1 := "given1"
given2 := "given2"
node := Node{
Name: nodeA,
Args: []*NodeArg{
&NodeArg{
Expected: &testVal,
Given: &given1,
},
&NodeArg{
Expected: &testVal,
Given: &given2,
},
},
}
expectedErr := DuplicateValueError{
Node: &nodeA,
Field: "args.expected",
Values: []string{testVal},
}
err := check.CheckNode(node)
compareError(t, err, expectedErr, "accepted duplicated args, expected error")
}
func TestFailArgsExpectedUniqueNodeCheck3(t *testing.T) {
check := ArgsExpectedUniqueNodeCheck{}
given1 := "given1"
node := Node{
Name: nodeA,
Args: []*NodeArg{
&NodeArg{
Expected: &testVal,
Given: &given1,
},
&NodeArg{
Expected: &testVal,
},
},
}
expectedErr := DuplicateValueError{
Node: &nodeA,
Field: "args.expected",
Values: []string{testVal},
}
err := check.CheckNode(node)
compareError(t, err, expectedErr, "accepted duplicated args, expected error")
}
func TestFailArgsNotRenamedTwiceNodeCheck(t *testing.T) {
check := ArgsNotRenamedTwiceNodeCheck{}
expected1 := "expected1"
expected2 := "expected2"
node := Node{
Name: nodeA,
Args: []*NodeArg{
&NodeArg{
Expected: &expected1,
Given: &testVal,
},
&NodeArg{
Expected: &expected2,
Given: &testVal,
},
},
}
expectedErr := DuplicateValueError{
Node: &nodeA,
Field: "args.given",
Values: []string{testVal},
}
err := check.CheckNode(node)
compareError(t, err, expectedErr, "arg renamed differently with args.given, expected error")
}
func TestFailEachElementDoesNotDuplicateArgsExpectedNodeCheck(t *testing.T) {
check := EachElementDoesNotDuplicateArgsExpectedNodeCheck{}
given := "given"
node := Node{
Name: nodeA,
Each: []string{"list:" + testVal},
Args: []*NodeArg{
&NodeArg{
Expected: &testVal,
Given: &given,
},
},
}
expectedErr := DuplicateValueError{
Node: &nodeA,
Field: "each",
Values: []string{testVal},
}
err := check.CheckNode(node)
compareError(t, err, expectedErr, fmt.Sprintf("allowed two lists to be renamed %s, expected error", testVal))
}
func TestFailEachListDoesNotDuplicateArgsGivenNodeCheck1(t *testing.T) {
check := EachListDoesNotDuplicateArgsGivenNodeCheck{}
expected := "expected"
node := Node{
Name: nodeA,
Each: []string{testVal + ":element"},
Args: []*NodeArg{
&NodeArg{
Expected: &expected,
Given: &testVal,
},
},
}
expectedErr := DuplicateValueError{
Node: &nodeA,
Field: "each",
Values: []string{testVal},
}
err := check.CheckNode(node)
compareError(t, err, expectedErr, fmt.Sprintf("allowed %s to be renamed twice, expected error", testVal))
}
func TestFailEachListDoesNotDuplicateArgsGivenNodeCheck2(t *testing.T) {
check := EachListDoesNotDuplicateArgsGivenNodeCheck{}
node := Node{
Name: nodeA,
Each: []string{testVal + ":element"},
Args: []*NodeArg{
&NodeArg{
Expected: &testVal,
Given: &testVal,
},
},
}
expectedErr := DuplicateValueError{
Node: &nodeA,
Field: "each",
Values: []string{testVal},
}
err := check.CheckNode(node)
compareError(t, err, expectedErr, fmt.Sprintf("allowed %s to be renamed twice, expected error", testVal))
}
func TestEachListDoesNotDuplicateArgsGivenNodeCheck(t *testing.T) {
check := EachListDoesNotDuplicateArgsGivenNodeCheck{}
node := Node{
Name: nodeA,
Each: []string{testVal + ":element"},
Args: []*NodeArg{
&NodeArg{
Expected: &testVal,
},
},
}
err := check.CheckNode(node)
if err != nil {
t.Errorf("unexpected error: %s", err.Error())
}
}
func TestFailSetsNotNilNodeCheck(t *testing.T) {
check := SetsNotNilNodeCheck{}
node := Node{
Name: nodeA,
Sets: []*NodeSet{nil},
}
expectedErr := InvalidValueError{
Node: &nodeA,
Field: "sets",
Values: []string{"nil"},
}
err := check.CheckNode(node)
compareError(t, err, expectedErr, "accepted nil node sets, expected error")
}
func TestFailSetsAreNamedNodeCheck(t *testing.T) {
check := SetsAreNamedNodeCheck{}
node := Node{
Name: nodeA,
Sets: []*NodeSet{
&NodeSet{
As: &testVal,
},
},
}
expectedErr := MissingValueError{
Node: &nodeA,
Field: "sets.arg",
}
err := check.CheckNode(node)
compareError(t, err, expectedErr, "accepted node sets without 'arg' field, expected error")
}
func TestFailSetsAsUniqueNodeCheck1(t *testing.T) {
check := SetsAsUniqueNodeCheck{}
node := Node{
Name: nodeA,
Sets: []*NodeSet{
&NodeSet{
Arg: &testVal,
As: &testVal,
},
&NodeSet{
Arg: &testVal,
As: &testVal,
},
},
}
expectedErr := DuplicateValueError{
Node: &nodeA,
Field: "sets.as",
Values: []string{testVal},
}
err := check.CheckNode(node)
compareError(t, err, expectedErr, "accepted duplicated sets, expected error")
}
func TestFailSetsAsUniqueNodeCheck2(t *testing.T) {
check := SetsAsUniqueNodeCheck{}
arg1 := "arg1"
arg2 := "arg2"
node := Node{
Name: nodeA,
Sets: []*NodeSet{
&NodeSet{
Arg: &arg1,
As: &testVal,
},
&NodeSet{
Arg: &arg2,
As: &testVal,
},
},
}
expectedErr := DuplicateValueError{
Node: &nodeA,
Field: "sets.as",
Values: []string{testVal},
}
err := check.CheckNode(node)
compareError(t, err, expectedErr, "accepted duplicated sets, expected error")
}
func TestFailSetsAsUniqueNodeCheck3(t *testing.T) {
check := SetsAsUniqueNodeCheck{}
arg1 := "arg1"
node := Node{
Name: nodeA,
Sets: []*NodeSet{
&NodeSet{
Arg: &arg1,
As: &testVal,
},
&NodeSet{
Arg: &testVal,
As: &testVal,
},
},
}
expectedErr := DuplicateValueError{
Node: &nodeA,
Field: "sets.as",
Values: []string{testVal},
}
err := check.CheckNode(node)
compareError(t, err, expectedErr, "accepted duplicated sets, expected error")
}
func TestFailSetsNotRenamedTwiceNodeCheck(t *testing.T) {
check := SetsNotRenamedTwiceNodeCheck{}
as1 := "as1"
as2 := "as2"
node := Node{
Name: nodeA,
Sets: []*NodeSet{
&NodeSet{
Arg: &testVal,
As: &as1,
},
&NodeSet{
Arg: &testVal,
As: &as2,
},
},
}
expectedErr := DuplicateValueError{
Node: &nodeA,
Field: "sets.arg",
Values: []string{testVal},
}
err := check.CheckNode(node)
compareError(t, err, expectedErr, "arg renamed differently with sets.as, expected error")
}
func TestFailEachIfParallelNodeCheck(t *testing.T) {
check := EachIfParallelNodeCheck{}
var parallel uint = 5
node := Node{
Name: nodeA,
Parallel: ¶llel,
}
expectedErr := MissingValueError{
Node: &nodeA,
Field: "each",
}
err := check.CheckNode(node)
compareError(t, err, expectedErr, "accepted node with 'parallel' field with empty 'each' field, expected error")
}
func TestFailValidParallelNodeCheck(t *testing.T) {
check := ValidParallelNodeCheck{}
var parallel uint = 0
node := Node{
Name: nodeA,
Parallel: ¶llel,
}
expectedErr := InvalidValueError{
Node: &nodeA,
Field: "parallel",
Values: []string{"0"},
}
err := check.CheckNode(node)
compareError(t, err, expectedErr, "accepted parallel = 0, expected error")
}
func TestFailConditionalNoTypeNodeCheck(t *testing.T) {
check := ConditionalNoTypeNodeCheck{}
conditional := "conditional"
node := Node{
Name: nodeA,
Category: &conditional,
NodeType: &testVal,
}
expectedErr := InvalidValueError{
Node: &nodeA,
Field: "type",
Values: []string{testVal},
}
err := check.CheckNode(node)
compareError(t, err, expectedErr, "accepted conditional node specifying a type, expected error")
}
func TestFailConditionalHasIfNodeCheck(t *testing.T) {
check := ConditionalHasIfNodeCheck{}
conditional := "conditional"
node := Node{
Name: nodeA,
Category: &conditional,
}
expectedErr := MissingValueError{
Node: &nodeA,
Field: "if",
}
err := check.CheckNode(node)
compareError(t, err, expectedErr, "accepted conditional sequence without 'if' field, expected error")
}
func TestFailConditionalHasEqNodeCheck(t *testing.T) {
check := ConditionalHasEqNodeCheck{}
conditional := "conditional"
node := Node{
Name: nodeA,
Category: &conditional,
}
expectedErr := MissingValueError{
Node: &nodeA,
Field: "eq",
}
err := check.CheckNode(node)
compareError(t, err, expectedErr, "accepted conditional sequence without 'eq' field, expected error")
}
func TestFailNonconditionalHasTypeNodeCheck(t *testing.T) {
check := NonconditionalHasTypeNodeCheck{}
node := Node{
Name: nodeA,
}
expectedErr := MissingValueError{
Node: &nodeA,
Field: "type",
}
err := check.CheckNode(node)
compareError(t, err, expectedErr, "accepted node with no type, expected error")
}
func TestFailNonconditionalNoIfNodeCheck(t *testing.T) {
check := NonconditionalNoIfNodeCheck{}
node := Node{
Name: nodeA,
If: &testVal,
}
expectedErr := InvalidValueError{
Node: &nodeA,
Field: "if",
Values: []string{testVal},
}
err := check.CheckNode(node)
compareError(t, err, expectedErr, "accepted nonconditional sequence with 'if' field, expected error")
}
func TestFailNonconditionalNoEqNodeCheck(t *testing.T) {
check := NonconditionalNoEqNodeCheck{}
eq := map[string]string{"yes": "noop", "default": "noop"}
node := Node{
Name: nodeA,
Eq: eq,
}
expectedErr := InvalidValueError{
Node: &nodeA,
Field: "eq",
Values: []string{fmt.Sprint(eq)},
}
err := check.CheckNode(node)
compareError(t, err, expectedErr, "accepted nonconditional sequence with 'eq' field, expected error")
}
func TestFailRetryIfRetryWaitNodeCheck(t *testing.T) {
check := RetryIfRetryWaitNodeCheck{}
node := Node{
Name: nodeA,
RetryWait: testVal,
}
expectedErr := MissingValueError{
Node: &nodeA,
Field: "retry",
}
err := check.CheckNode(node)
compareError(t, err, expectedErr, "accepted node with 'retryWait' field with retry: 0, expected error")
}
func TestFailValidRetryWaitNodeCheck(t *testing.T) {
check := ValidRetryWaitNodeCheck{}
node := Node{
Name: nodeA,
RetryWait: testVal,
}
expectedErr := InvalidValueError{
Node: &nodeA,
Field: "retryWait",
Values: []string{testVal},
}
err := check.CheckNode(node)
compareError(t, err, expectedErr, "accepted bad retryWait: duration, expected error")
}
func TestFailRequiredArgsProvidedNodeCheck1(t *testing.T) {
seqa := "seq-a"
specs := Specs{
Sequences: map[string]*Sequence{
seqa: &Sequence{
Name: seqa,
Args: SequenceArgs{
Required: []*Arg{
&Arg{Name: &testVal},
},
},
},
},
}
check := RequiredArgsProvidedNodeCheck{specs}
sequence := "sequence" // Test sequence node
node := Node{
Name: nodeA,
Category: &sequence,
NodeType: &seqa,
}
expectedErr := MissingValueError{
Node: &nodeA,
Field: "args",
}
err := check.CheckNode(node)
compareError(t, err, expectedErr, "not all required args to sequence node listed in 'args', expected error")
}
func TestFailRequiredArgsProvidedNodeCheck2(t *testing.T) {
seqa := "seq-a"
seqb := "seq-b"
specs := Specs{
Sequences: map[string]*Sequence{
seqa: &Sequence{
Name: seqa,
Args: SequenceArgs{
Required: []*Arg{
&Arg{Name: &testVal},
},
},
},
seqb: &Sequence{
Name: seqb,
},
},
}
check := RequiredArgsProvidedNodeCheck{specs}
conditional := "conditional" // Test conditional node
node := Node{
Name: nodeA,
Category: &conditional,
Eq: map[string]string{
seqa: seqa,
seqb: seqb,
},
}
expectedErr := MissingValueError{
Node: &nodeA,
Field: "args",
}
err := check.CheckNode(node)
compareError(t, err, expectedErr, "not all required args to conditional sequence node listed in 'args', expected error")
}
func TestFailRequiredArgsProvidedNodeCheck3(t *testing.T) {
seqa := "seq-a"
reqa := "req-a"
reqb := "req-b"
reqc := "req-c"
reqd := "req-d"
specs := Specs{
Sequences: map[string]*Sequence{
seqa: &Sequence{
Name: seqa,
Args: SequenceArgs{
Required: []*Arg{
&Arg{Name: &reqa},
&Arg{Name: &reqb},
&Arg{Name: &reqc},
&Arg{Name: &reqd}, // This is missing
},
},
},
},
}
check := RequiredArgsProvidedNodeCheck{specs}
sequence := "sequence" // Test expanded sequence node
node := Node{
Name: nodeA,
Category: &sequence,
NodeType: &seqa,
Each: []string{
fmt.Sprintf("%ss:%s", reqa, reqa), // Specify in 'each' and 'args'
fmt.Sprintf("%ss:%s", reqc, reqc), // Specify only here
},
Args: []*NodeArg{
&NodeArg{Expected: &reqa, Given: &reqa}, // Specified above in 'each'
&NodeArg{Expected: &reqb, Given: &reqb}, // Specify only here
},
}
expectedErr := MissingValueError{
Node: &nodeA,
Field: "args",
}
err := check.CheckNode(node)
compareError(t, err, expectedErr, "not all required args to expanded sequence node listed in 'args', expected error")
}
func TestNoExtraSequenceArgsProvidedNodeCheck1(t *testing.T) {
argA := "arg-a"
argB := "arg-b"
specs := Specs{
Sequences: map[string]*Sequence{
seqA: &Sequence{
Name: seqA,
Args: SequenceArgs{
Required: []*Arg{
&Arg{Name: &argA},
},
Optional: []*Arg{
&Arg{Name: &argB},
},
},
},
},
}
check := NoExtraSequenceArgsProvidedNodeCheck{specs}
sequence := "sequence" // Test sequence node
node := Node{
Name: nodeA,
Category: &sequence,
NodeType: &seqA,
Each: []string{
fmt.Sprintf("%ss:%s", argA, argA),
},
Args: []*NodeArg{
&NodeArg{Expected: &argB, Given: &argB},
},
}
err := check.CheckNode(node)
if err != nil {
t.Errorf("NoExtraSequenceArgsProvidedNodeCheck failed, expected pass")
}
}
func TestNoExtraSequenceArgsProvidedNodeCheck2(t *testing.T) {
seqB := "seq-b"
argA := "arg-a"
argB := "arg-b"
argC := "arg-c"
specs := Specs{
Sequences: map[string]*Sequence{
seqA: &Sequence{
Name: seqA,
Args: SequenceArgs{
Optional: []*Arg{
&Arg{Name: &argB},
},
},
},
seqB: &Sequence{
Name: seqB,
Args: SequenceArgs{
Required: []*Arg{
&Arg{Name: &argA},
&Arg{Name: &argC},
},
},
},
},
}
check := NoExtraSequenceArgsProvidedNodeCheck{specs}
conditional := "conditional" // Test conditional node
node := Node{
Name: nodeA,
Category: &conditional,
Eq: map[string]string{
seqA: seqA,
seqB: seqB,
},
Each: []string{
fmt.Sprintf("%ss:%s", argA, argA),
},
Args: []*NodeArg{
&NodeArg{Expected: &argB, Given: &argB},
&NodeArg{Expected: &argC, Given: &argC},
},
}
err := check.CheckNode(node)
if err != nil {
t.Errorf("NoExtraSequenceArgsProvidedNodeCheck failed, expected pass")
}
}
func TestNoExtraSequenceArgsProvidedNodeCheck3(t *testing.T) {
argA := "arg-a"
argB := "arg-b"
specs := Specs{
Sequences: map[string]*Sequence{
seqA: &Sequence{
Name: seqA,
Args: SequenceArgs{
Required: []*Arg{
&Arg{Name: &argA},
},
Optional: []*Arg{
&Arg{Name: &argB},
},
},
},
},
}
check := NoExtraSequenceArgsProvidedNodeCheck{specs}
sequence := "sequence" // Test expanded sequence node
node := Node{
Name: nodeA,
Category: &sequence,
NodeType: &seqA,
Each: []string{
fmt.Sprintf("%ss:%s", argA, argA),
},
Args: []*NodeArg{
&NodeArg{Expected: &argB, Given: &argB},
},
}
err := check.CheckNode(node)
if err != nil {
t.Errorf("NoExtraSequenceArgsProvidedNodeCheck failed, expected pass")
}
}
func TestFailNoExtraSequenceArgsProvidedNodeCheck1(t *testing.T) {
argA := "arg-a"
argB := "arg-b"
argC := "arg-c"
specs := Specs{
Sequences: map[string]*Sequence{
seqA: &Sequence{
Name: seqA,
Args: SequenceArgs{
Required: []*Arg{
&Arg{Name: &argA},
},
Optional: []*Arg{
&Arg{Name: &argB},
},
},
},
},
}
check := NoExtraSequenceArgsProvidedNodeCheck{specs}
sequence := "sequence" // Test sequence node
node := Node{
Name: nodeA,
Category: &sequence,
NodeType: &seqA,
Each: []string{
fmt.Sprintf("%ss:%s", argA, argA),
},
Args: []*NodeArg{
&NodeArg{Expected: &argB, Given: &argB},
&NodeArg{Expected: &argC, Given: &argC}, // Unnecessary arg
},
}
expectedErr := InvalidValueError{
Node: &nodeA,
Field: "args', 'each.element",
Values: []string{argC},
}
err := check.CheckNode(node)
compareError(t, err, expectedErr, "sequence node provides subsequence with unnecessary, expected error")
}
func TestFailNoExtraSequenceArgsProvidedNodeCheck2(t *testing.T) {
seqB := "seq-b"
argA := "arg-a"
argB := "arg-b"
argC := "arg-c"
specs := Specs{<|fim▁hole|> Sequences: map[string]*Sequence{
seqA: &Sequence{
Name: seqA,
Args: SequenceArgs{
Optional: []*Arg{
&Arg{Name: &argB},
},
},
},
seqB: &Sequence{
Name: seqB,
Args: SequenceArgs{
Required: []*Arg{
&Arg{Name: &argA},
},
},
},
},
}
check := NoExtraSequenceArgsProvidedNodeCheck{specs}
conditional := "conditional" // Test conditional node
node := Node{
Name: nodeA,
Category: &conditional,
Eq: map[string]string{
seqA: seqA,
seqB: seqB,
},
Each: []string{
fmt.Sprintf("%ss:%s", argA, argA),
},
Args: []*NodeArg{
&NodeArg{Expected: &argB, Given: &argB},
&NodeArg{Expected: &argC, Given: &argC}, // Unnecessary arg
},
}
expectedErr := InvalidValueError{
Node: &nodeA,
Field: "args', 'each.element",
Values: []string{argC},
}
err := check.CheckNode(node)
compareError(t, err, expectedErr, "sequence node provides subsequence with unnecessary, expected error")
}
func TestFailNoExtraSequenceArgsProvidedNodeCheck3(t *testing.T) {
argA := "arg-a"
argB := "arg-b"
argC := "arg-c"
specs := Specs{
Sequences: map[string]*Sequence{
seqA: &Sequence{
Name: seqA,
Args: SequenceArgs{
Required: []*Arg{
&Arg{Name: &argA},
},
Optional: []*Arg{
&Arg{Name: &argB},
},
},
},
},
}
check := NoExtraSequenceArgsProvidedNodeCheck{specs}
sequence := "sequence" // Test expanded sequence node
node := Node{
Name: nodeA,
Category: &sequence,
NodeType: &seqA,
Each: []string{
fmt.Sprintf("%ss:%s", argA, argA),
fmt.Sprintf("%ss:%s", argC, argC), // Unnecessary arg
},
Args: []*NodeArg{
&NodeArg{Expected: &argB, Given: &argB},
},
}
expectedErr := InvalidValueError{
Node: &nodeA,
Field: "args', 'each.element",
Values: []string{argC},
}
err := check.CheckNode(node)
compareError(t, err, expectedErr, "sequence node provides subsequence with unnecessary, expected error")
}
func TestFailSubsequencesExistNodeCheck1(t *testing.T) {
seqB := "seq-b"
specs := Specs{
Sequences: map[string]*Sequence{
seqA: &Sequence{
Name: seqA,
},
},
}
check := SubsequencesExistNodeCheck{specs}
conditional := "conditional" // Test conditional node
node := Node{
Name: nodeA,
Category: &conditional,
Eq: map[string]string{
seqA: seqA,
"default": seqB,
},
}
expectedErr := InvalidValueError{
Node: &nodeA,
Field: "eq",
Values: []string{seqB},
}
err := check.CheckNode(node)
compareError(t, err, expectedErr, "node calls seq that does not exist in specs, expected error")
}
func TestFailSubsequencesExistNodeCheck2(t *testing.T) {
specs := Specs{
Sequences: map[string]*Sequence{},
}
check := SubsequencesExistNodeCheck{specs}
sequence := "sequence" // Test sequence node
node := Node{
Name: nodeA,
Category: &sequence,
NodeType: &seqA,
}
expectedErr := InvalidValueError{
Node: &nodeA,
Field: "type",
Values: []string{seqA},
}
err := check.CheckNode(node)
compareError(t, err, expectedErr, "node calls seq that does not exist in specs, expected error")
}<|fim▁end|> | |
<|file_name|>16.d.ts<|end_file_name|><|fim▁begin|>import { TextLinkAnalysis16 } from "../../";<|fim▁hole|>export = TextLinkAnalysis16;<|fim▁end|> | |
<|file_name|>module_creator.py<|end_file_name|><|fim▁begin|>import os
import shutil
import logging
from getpass import getuser
from dls_ade import vcs_git, Server
from dls_ade.exceptions import (RemoteRepoError, VerificationError,<|fim▁hole|> ArgumentError)
class ModuleCreator(object):
"""Abstract base class for the management of the creation of new modules.
Attributes:
_area: The 'area' of the module to be created.
_cwd: The current working directory upon initialisation.
_module_name: The base name of the module path.
_module_path: The relative module path.
Used in messages and exceptions for user-friendliness.
abs_module_path: The absolute module path.
Used for system and git commands.
_server_repo_path: The git repository server path for module.
_module_template: Object that handles file and user message creation.
_remote_repo_valid(bool): True if conflicting paths exist on server.
This flag is separated as the user needs to check this towards
the beginning to avoid unnecessary file creation.
_can_create_local_module(bool): True if can run create_local_module.
_can_push_repo_to_remote(bool): True if can run push_repo_to_remote.
Raises:
:class:`~dls_ade.exceptions.ModuleCreatorError`: Base class for this \
module's exceptions
"""
def __init__(self, module_path, area, module_template_cls,
**kwargs):
"""Default initialisation of all object attributes.
Args:
module_path: The relative module path.
Used in messages and exceptions for user-friendliness.
area: The development area of the module to be created.
In particular, this specifies the exact template files to be
created as well as affecting the repository server path.
module_template_cls: Class for module_template object.
Must be a non-abstract subclass of ModuleTemplate.
kwargs: Additional arguments for module creation.
"""
self._usermsg = logging.getLogger("usermessages")
self._area = area
self._cwd = os.getcwd()
self._module_path = module_path
self._module_name = os.path.basename(os.path.normpath(
self._module_path))
self.server = Server()
self.abs_module_path = os.path.join(self._cwd, self._module_path)
self._server_repo_path = self.server.dev_module_path(self._module_path,
self._area)
template_args = {'module_name': self._module_name,
'module_path': self._module_path,
'user_login': getuser()}
if kwargs:
template_args.update(kwargs)
self._module_template = module_template_cls(template_args)
self._remote_repo_valid = False
# These boolean values allow us to call the methods in any order
self._can_create_local_module = False
self._can_push_repo_to_remote = False
def verify_remote_repo(self):
"""Verifies there are no name conflicts with the remote repository.
This checks whether or not there are any name conflicts between the
intended module name and the modules that already exist on the remote
repository.
Sets the `_remote_repo_valid` boolean value to True if there are no
conflicts.
Raises:
:class:`~dls_ade.exceptions.VerificationError`: If there is a \
name conflict with the server.
"""
if self._remote_repo_valid:
return
if self.server.is_server_repo(self._server_repo_path):
err_message = ("The path {dir:s} already exists on server,"
" cannot continue")
raise VerificationError(
err_message.format(dir=self._server_repo_path)
)
self._remote_repo_valid = True
def verify_can_create_local_module(self):
"""Verifies that conditions are suitable for creating a local module.
When :meth:`create_local_module` is called, if the boolean value
`_can_create_local_module` is False, this method is run to make sure
that :meth:`create_local_module` can operate completely.
This method also sets the `_can_create_local_module` attribute to True
so it can be run separately before :meth:`create_local_module`.
This method will fail (raise a VerificationError) if:
- The intended local directory for creation already exists
- The user is currently inside a git repository
Raises:
:class:`~dls_ade.exceptions.VerificationError`: Local module \
cannot be created.
"""
if self._can_create_local_module:
return
err_list = []
if os.path.exists(self.abs_module_path):
err_list.append("Directory {dir:s} already exists, "
"please move elsewhere and try again.")
if vcs_git.is_in_local_repo(self._cwd):
err_list.append("Currently in a git repository, "
"please move elsewhere and try again.")
if err_list:
err_message = "\n".join(err_list).format(dir=self._module_path)
self._can_create_local_module = False
raise VerificationError(err_message)
self._can_create_local_module = True
def verify_can_push_repo_to_remote(self):
"""Verifies that one can push the local module to the remote server.
When :meth:`push_repo_to_remote` is called, if the boolean value
`_can_push_repo_to_remote` is False, this method is run to make sure
that :meth:`push_repo_to_remote` can operate completely.
This method also sets the `_can_push_repo_to_remote` attribute to True
so it can be run separately before :meth:`push_repo_to_remote`.
This method will fail (raise a VerificationError) if:
- The local module does not exist
- The local module is not a git repository
- There is a naming conflict with the remote server
Raises:
:class:`~dls_ade.exceptions.VerificationError`: Local repository \
cannot be pushed to remote.
"""
if self._can_push_repo_to_remote:
return
self._can_push_repo_to_remote = True
err_list = []
if not os.path.exists(self.abs_module_path):
err_list.append("Directory {dir:s} does not exist.")
else:
mod_dir_is_repo = vcs_git.is_local_repo_root(self.abs_module_path)
if not mod_dir_is_repo:
err_list.append("Directory {dir:s} is not a git repository. "
"Unable to push to remote repository.")
err_list = [err.format(dir=self._module_path) for err in err_list]
# This allows us to retain the remote_repo_valid error message
if not self._remote_repo_valid:
try:
self.verify_remote_repo()
except VerificationError as e:
err_list.append(str(e))
if err_list:
self._can_push_repo_to_remote = False
raise VerificationError("\n".join(err_list))
def create_local_module(self):
"""Creates the folder structure and files in a new git repository.
This will use the file creation specified in
:meth:`~dls_ade.module_template.ModuleTemplate.create_files`. It will
also stage and commit these files to a git repository located in the
same directory
Note:
This will set `_can_create_local_module` False in order to prevent
the user calling this method twice in succession.
Raises:
:class:`~dls_ade.exceptions.VerificationError`: Local module \
cannot be created.
OSError: The abs_module_path already exists (outside interference).
"""
self.verify_can_create_local_module()
self._can_create_local_module = False
self._usermsg.info("Making clean directory structure for %s",
self._module_path)
os.makedirs(self.abs_module_path)
# The reason why we have to change directory into the folder where the
# files are created is in order to remain compatible with
# makeBaseApp.pl, used for IOC and Support modules
os.chdir(self.abs_module_path)
self._module_template.create_files()
os.chdir(self._cwd)
repo = vcs_git.init_repo(self.abs_module_path)
vcs_git.stage_all_files_and_commit(repo, "Initial commit")
def get_print_message(self):
"""Prints a message to detail the user's next steps."""
return self._module_template.get_print_message()
def push_repo_to_remote(self):
"""Pushes the local repo to the remote server.
Note:
This will set `_can_push_repo_to_remote` and `_remote_repo_valid`
False in order to prevent the user calling this method twice in
succession.
Raises:
:class:`~dls_ade.exceptions.VerificationError`: Local repository \
cannot be pushed to remote.
:class:`~dls_ade.exceptions.VCSGitError`: If issue with adding a \
new remote and pushing.
"""
self.verify_can_push_repo_to_remote()
self._can_push_repo_to_remote = False
self._remote_repo_valid = False
vcs = self.server.create_new_local_repo(self._module_name, self._area,
self.abs_module_path)
vcs.add_new_remote_and_push(self._server_repo_path)
class ModuleCreatorWithApps(ModuleCreator):
"""Abstract class for the management of the creation of app-based modules.
Attributes:
_app_name: The name of the app for the new module.
This is a separate folder in each git repository, corresponding to
the newly created module.
Raises:
:class:`~dls_ade.exceptions.ArgumentError`: If 'app_name' not given \
as a keyword argument
"""
def __init__(self, module_path, area, module_template, **kwargs):
"""Initialise variables.
Args:
kwargs: Must include app_name.
"""
if 'app_name' not in kwargs:
raise ArgumentError("'app_name' must be provided as keyword "
"argument.")
super(ModuleCreatorWithApps, self).__init__(
module_path,
area,
module_template,
**kwargs
)
self._app_name = kwargs['app_name']
class ModuleCreatorAddAppToModule(ModuleCreatorWithApps):
"""Class for the management of adding a new App to an existing IOC module.
In an old-style module, a single module repository contains multiple IOC
apps. To maintain compatibility, this class exists for the creation of new
apps inside existing modules.
Note:
While the script is called dls_start_new_module, the original svn
script similarly created the new 'app_nameApp' folders in existing
svn 'modules'.
In keeping with the rest of the :class:`ModuleCreator` code, I
continue to use the word 'module' to refer to the git repository (local
or remote) in the documentation, and the 'app' to be the new IOC folder
'app_nameApp' created inside.
From the point of view of the user, however, the 'app_nameApp' folder
itself was considered the 'module', hence the confusing use of eg.
dls_start_new_module for the main script's name.
"""
def verify_remote_repo(self):
"""Verifies there are no name conflicts with the remote repository.
This checks whether or not there are any name conflicts between the
intended module and app names, and the modules that already exist on
the remote repository.
Sets the `_remote_repo_valid` boolean value to True if there are no
conflicts.
This method will fail (raise a VerificationError) if:
- There is no remote repository to clone from
- There is an app_name conflict with one of the remote
paths
Raises:
:class:`~dls_ade.exceptions.VerificationError`: If there is an \
issue with the remote repository.
:class:`~dls_ade.exceptions.RemoteRepoError`: If the given server \
path does not exist.
This should never be raised. There is a bug if it is!
"""
if self._remote_repo_valid:
return
if not self.server.is_server_repo(self._server_repo_path):
err_message = ("The path {path:s} does not exist on server, so "
"cannot clone from it")
err_message = err_message.format(path=self._server_repo_path)
raise VerificationError(err_message)
conflicting_path = self._check_if_remote_repo_has_app(
self._server_repo_path
)
if conflicting_path:
err_message = ("The repository {path:s} has an app that conflicts "
"with app name: {app_name:s}")
err_message = err_message.format(
path=self._server_repo_path,
app_name=self._app_name
)
raise VerificationError(err_message)
self._remote_repo_valid = True
def _check_if_remote_repo_has_app(self, remote_repo_path):
"""Checks if the remote repository contains an app_nameApp folder.
This checks whether or not there is already a folder with the name
"app_nameApp" on the remote repository with the given server
repository path.
Sets the `_remote_repo_valid` boolean value to True if there are no
conflicts.
Returns:
bool: True if app exists, False otherwise.
Raises:
:class:`~dls_ade.exceptions.RemoteRepoError`: If given repo path \
does not exist on server.
This should never be raised. There is a bug if it is!
:class:`~dls_ade.exceptions.VCSGitError`: Issue with the vcs_git \
function calls.
"""
if not self.server.is_server_repo(remote_repo_path):
# This should never get raised!
err_message = ("Remote repo {repo:s} does not exist. Cannot "
"clone to determine if there is an app_name "
"conflict with {app_name:s}")
err_message = err_message.format(repo=remote_repo_path,
app_name=self._app_name)
raise RemoteRepoError(err_message)
temp_dir = ""
exists = False
try:
repo = self.server.temp_clone(remote_repo_path).repo
temp_dir = repo.working_tree_dir
if os.path.exists(os.path.join(temp_dir, self._app_name + "App")):
exists = True
finally:
try:
if temp_dir:
shutil.rmtree(temp_dir)
except OSError:
pass
return exists
def create_local_module(self):
"""Creates the folder structure and files in a cloned git repository.
This will use the file creation specified in
:meth:`~dls_ade.module_template.ModuleTemplate.create_files`.
Raises:
:class:`~dls_ade.exceptions.ArgumentError`: From \
:meth:`~dls_ade.module_template.ModuleTemplate.create_files`
OSError: From \
:meth:`~dls_ade.module_template.ModuleTemplate.create_files`
:class:`~dls_ade.exceptions.VCSGitError`: From \
:func:`~dls_ade.vcs_git.stage_all_files_and_commit`
"""
self.verify_can_create_local_module()
self._can_create_local_module = False
self._usermsg.info("Cloning module to {}".format(self._module_path))
vcs = self.server.clone(self._server_repo_path, self.abs_module_path)
os.chdir(self.abs_module_path)
self._module_template.create_files()
os.chdir(self._cwd)
commit_message = ("Added app, {app_name:s}, to module.".format(
app_name=self._app_name
))
vcs_git.stage_all_files_and_commit(vcs.repo, commit_message)
def push_repo_to_remote(self):
"""Pushes the local repo to the remote server using remote 'origin'.
:class:`~dls_ade.exceptions.VCSGitError`
This will push the master branch of the local repository to the remote
server it was cloned from.
Raises:
:class:`~dls_ade.exceptions.VerificationError`: From \
:meth:`.verify_can_push_repo_to_remote`.
:class:`~dls_ade.exceptions.VCSGitError`: From \
:func:`~dls_ade.vcs_git.push_to_remote`
"""
self.verify_can_push_repo_to_remote()
self._can_push_repo_to_remote = False
vcs = self.server.create_new_local_repo(self._module_name, self._area,
self.abs_module_path)
vcs.push_to_remote()<|fim▁end|> | |
<|file_name|>extract_images_to_s3.py<|end_file_name|><|fim▁begin|>import os
import posixpath
import random
import string
import logging
import tempfile
import time
from .extract_images import ExtractImages
logger = logging.getLogger(__name__)
class ExtractImagesToS3(ExtractImages):
'''
This KnowledgePostProcessor subclass extracts images from posts to S3. It
is designed to be used upon addition to a knowledge repository, which can
reduce the size of repositories. It replaces local images with remote urls
based on `http_image_root`.
`s3_image_root` should be the root of the image folder on an S3 remote, such
as "s3://my_bucket/images".
`http_image_root` should be the root of the server where the images will be
accessible after uploading.
Note: This requires that user AWS credentials are set up appropriately and
that they have installed the aws cli packages.
'''
_registry_keys = ['extract_images_to_s3']
def __init__(self, s3_image_root, http_image_root):
self.s3_image_root = s3_image_root
self.http_image_root = http_image_root
def copy_image(self, kp, img_path, is_ref=False, repo_name='knowledge'):
# Copy image data to new file
if is_ref:
_, tmp_path = tempfile.mkstemp()
with open(tmp_path, 'wb') as f:
f.write(kp._read_ref(img_path))
else:
tmp_path = img_path
<|fim▁hole|> # Make random filename for image
random_string = ''.join(random.choice(string.ascii_lowercase) for i in range(6))
fname_img = '{repo_name}_{time}_{random_string}{ext}'.format(
repo_name=repo_name,
time=int(round(time.time() * 100)),
random_string=random_string,
ext=img_ext).strip().replace(' ', '-')
# Copy image to accessible folder on S3
fname_s3 = posixpath.join(self.s3_image_root, repo_name, fname_img)
# Note: The following command may need to be prefixed with a login agent;
# for example, to handle multi-factor authentication.
cmd = "aws s3 cp '{0}' {1}".format(tmp_path, fname_s3)
logger.info("Uploading images to S3: {cmd}".format(cmd=cmd))
retval = os.system(cmd)
if retval != 0:
raise Exception('Problem uploading images to s3')
finally:
# Clean up temporary file
if is_ref:
os.remove(tmp_path)
# return uploaded path of file
return posixpath.join(self.http_image_root, repo_name, fname_img)
def skip_image(self, kp, image):
import re
if re.match('http[s]?://', image['src']):
return True
return False
def cleanup(self, kp):
if kp._has_ref('images'):
kp._drop_ref('images')<|fim▁end|> | try:
# Get image type
img_ext = posixpath.splitext(img_path)[1]
|
<|file_name|>templating.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
"""
flask.templating
~~~~~~~~~~~~~~~~
Implements the bridge to Jinja2.
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import posixpath
from jinja2 import BaseLoader, Environment as BaseEnvironment, \
TemplateNotFound
from .globals import _request_ctx_stack, _app_ctx_stack
from .signals import template_rendered
from .module import blueprint_is_module
from ._compat import itervalues, iteritems
def _default_template_ctx_processor():
"""Default template context processor. Injects `request`,
`session` and `g`.
"""
reqctx = _request_ctx_stack.top
appctx = _app_ctx_stack.top
rv = {}
if appctx is not None:
rv['g'] = appctx.g
if reqctx is not None:
rv['request'] = reqctx.request
rv['session'] = reqctx.session
return rv
class Environment(BaseEnvironment):
"""Works like a regular Jinja2 environment but has some additional
knowledge of how Flask's blueprint works so that it can prepend the
name of the blueprint to referenced templates if necessary.
"""
def __init__(self, app, **options):
if 'loader' not in options:
options['loader'] = app.create_global_jinja_loader()
BaseEnvironment.__init__(self, **options)
self.app = app
class DispatchingJinjaLoader(BaseLoader):
"""A loader that looks for templates in the application and all
the blueprint folders.
"""
def __init__(self, app):
self.app = app
def get_source(self, environment, template):
for loader, local_name in self._iter_loaders(template):
try:
return loader.get_source(environment, local_name)<|fim▁hole|> except TemplateNotFound:
pass
raise TemplateNotFound(template)
def _iter_loaders(self, template):
loader = self.app.jinja_loader
if loader is not None:
yield loader, template
# old style module based loaders in case we are dealing with a
# blueprint that is an old style module
try:
module, local_name = posixpath.normpath(template).split('/', 1)
blueprint = self.app.blueprints[module]
if blueprint_is_module(blueprint):
loader = blueprint.jinja_loader
if loader is not None:
yield loader, local_name
except (ValueError, KeyError):
pass
for blueprint in itervalues(self.app.blueprints):
if blueprint_is_module(blueprint):
continue
loader = blueprint.jinja_loader
if loader is not None:
yield loader, template
def list_templates(self):
result = set()
loader = self.app.jinja_loader
if loader is not None:
result.update(loader.list_templates())
for name, blueprint in iteritems(self.app.blueprints):
loader = blueprint.jinja_loader
if loader is not None:
for template in loader.list_templates():
prefix = ''
if blueprint_is_module(blueprint):
prefix = name + '/'
result.add(prefix + template)
return list(result)
def _render(template, context, app):
"""Renders the template and fires the signal"""
rv = template.render(context)
template_rendered.send(app, template=template, context=context)
return rv
def render_template(template_name_or_list, **context):
"""Renders a template from the template folder with the given
context.
:param template_name_or_list: the name of the template to be
rendered, or an iterable with template names
the first one existing will be rendered
:param context: the variables that should be available in the
context of the template.
"""
ctx = _app_ctx_stack.top
ctx.app.update_template_context(context)
return _render(ctx.app.jinja_env.get_or_select_template(template_name_or_list),
context, ctx.app)
def render_template_string(source, **context):
"""Renders a template from the given template source string
with the given context.
:param source: the sourcecode of the template to be
rendered
:param context: the variables that should be available in the
context of the template.
"""
ctx = _app_ctx_stack.top
ctx.app.update_template_context(context)
return _render(ctx.app.jinja_env.from_string(source),
context, ctx.app)<|fim▁end|> | |
<|file_name|>clean.rs<|end_file_name|><|fim▁begin|>// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Implementation of `make clean` in rustbuild.
//!
//! Responsible for cleaning out a build directory of all old and stale
//! artifacts to prepare for a fresh build. Currently doesn't remove the
//! `build/cache` directory (download cache) or the `build/$target/llvm`
//! directory as we want that cached between builds.
use std::fs;
use std::io::{self, ErrorKind};
use std::path::Path;
use Build;
pub fn clean(build: &Build) {<|fim▁hole|> for host in build.config.host.iter() {
let entries = match build.out.join(host).read_dir() {
Ok(iter) => iter,
Err(_) => continue,
};
for entry in entries {
let entry = t!(entry);
if entry.file_name().to_str() == Some("llvm") {
continue
}
let path = t!(entry.path().canonicalize());
rm_rf(&path);
}
}
}
fn rm_rf(path: &Path) {
match path.symlink_metadata() {
Err(e) => {
if e.kind() == ErrorKind::NotFound {
return;
}
panic!("failed to get metadata for file {}: {}", path.display(), e);
},
Ok(metadata) => {
do_op(path, "remove dir", |p| fs::remove_dir(p));
if metadata.file_type().is_file() || metadata.file_type().is_symlink() {
do_op(path, "remove file", |p| fs::remove_file(p));
return;
}
for file in t!(fs::read_dir(path)) {
rm_rf(&t!(file).path());
}
do_op(path, "remove dir", |p| fs::remove_dir(p));
},
};
}
fn do_op<F>(path: &Path, desc: &str, mut f: F)
where F: FnMut(&Path) -> io::Result<()>
{
match f(path) {
Ok(()) => {}
// On windows we can't remove a readonly file, and git will often clone files as readonly.
// As a result, we have some special logic to remove readonly files on windows.
// This is also the reason that we can't use things like fs::remove_dir_all().
Err(ref e) if cfg!(windows) &&
e.kind() == ErrorKind::PermissionDenied => {
let mut p = t!(path.symlink_metadata()).permissions();
p.set_readonly(false);
t!(fs::set_permissions(path, p));
f(path).unwrap_or_else(|e| {
panic!("failed to {} {}: {}", desc, path.display(), e);
})
}
Err(e) => {
panic!("failed to {} {}: {}", desc, path.display(), e);
}
}
}<|fim▁end|> | rm_rf("tmp".as_ref());
rm_rf(&build.out.join("tmp"));
rm_rf(&build.out.join("dist"));
|
<|file_name|>boot_wym.js<|end_file_name|><|fim▁begin|>onOpenDialog = function(dialog) {
$('.ui-dialog').corner('6px').find('.ui-dialog-titlebar').corner('1px top');
$(document.body).addClass('hide-overflow');
}
onCloseDialog = function(dialog) {
$(document.body).removeClass('hide-overflow');
}
var wymeditor_inputs = [];
var wymeditors_loaded = 0;
// supply custom_wymeditor_boot_options if you want to override anything here.
if (typeof(custom_wymeditor_boot_options) == "undefined") { custom_wymeditor_boot_options = {}; }
var form_actions =
"<div id='dialog-form-actions' class='form-actions'>"
+ "<div class='form-actions-left'>"
+ "<input id='submit_button' class='wym_submit button' type='submit' value='{Insert}' class='button' />"
+ "<a href='' class='wym_cancel close_dialog button'>{Cancel}</a>"
+ "</div>"
+ "</div>";
var wymeditor_boot_options = $.extend({
skin: 'refinery'
, basePath: "/"
, wymPath: "/javascripts/wymeditor/jquery.refinery.wymeditor.js"
, cssSkinPath: "/stylesheets/wymeditor/skins/"
, jsSkinPath: "/javascripts/wymeditor/skins/"
, langPath: "/javascripts/wymeditor/lang/"
, iframeBasePath: '/'
, classesItems: [
{name: 'text-align', rules:['left', 'center', 'right', 'justify'], join: '-'}
, {name: 'image-align', rules:['left', 'right'], join: '-'}
, {name: 'font-size', rules:['small', 'normal', 'large'], join: '-'}
]
, containersItems: [
{'name': 'h1', 'title':'Heading_1', 'css':'wym_containers_h1'}
, {'name': 'h2', 'title':'Heading_2', 'css':'wym_containers_h2'}
, {'name': 'h3', 'title':'Heading_3', 'css':'wym_containers_h3'}
, {'name': 'p', 'title':'Paragraph', 'css':'wym_containers_p'}
]
, toolsItems: [
{'name': 'Bold', 'title': 'Bold', 'css': 'wym_tools_strong'}
,{'name': 'Italic', 'title': 'Emphasis', 'css': 'wym_tools_emphasis'}
,{'name': 'InsertOrderedList', 'title': 'Ordered_List', 'css': 'wym_tools_ordered_list'}
,{'name': 'InsertUnorderedList', 'title': 'Unordered_List', 'css': 'wym_tools_unordered_list'}
/*,{'name': 'Indent', 'title': 'Indent', 'css': 'wym_tools_indent'}
,{'name': 'Outdent', 'title': 'Outdent', 'css': 'wym_tools_outdent'}
,{'name': 'Undo', 'title': 'Undo', 'css': 'wym_tools_undo'}
,{'name': 'Redo', 'title': 'Redo', 'css': 'wym_tools_redo'}*/
,{'name': 'CreateLink', 'title': 'Link', 'css': 'wym_tools_link'}<|fim▁hole|> ,{'name': 'InsertTable', 'title': 'Table', 'css': 'wym_tools_table'}
//,{'name': 'Paste', 'title': 'Paste_From_Word', 'css': 'wym_tools_paste'}
,{'name': 'ToggleHtml', 'title': 'HTML', 'css': 'wym_tools_html'}
]
,toolsHtml: "<ul class='wym_tools wym_section wym_buttons'>" + WYMeditor.TOOLS_ITEMS + WYMeditor.CLASSES + "</ul>"
,toolsItemHtml:
"<li class='" + WYMeditor.TOOL_CLASS + "'>"
+ "<a href='#' name='" + WYMeditor.TOOL_NAME + "' title='" + WYMeditor.TOOL_TITLE + "'>"
+ WYMeditor.TOOL_TITLE
+ "</a>"
+ "</li>"
, classesHtml: "<li class='wym_tools_class'>"
+ "<a href='#' name='" + WYMeditor.APPLY_CLASS + "' title='"+ titleize(WYMeditor.APPLY_CLASS) +"'></a>"
+ "<ul class='wym_classes wym_classes_hidden'>" + WYMeditor.CLASSES_ITEMS + "</ul>"
+ "</li>"
, classesItemHtml: "<li><a href='#' name='"+ WYMeditor.CLASS_NAME + "'>"+ WYMeditor.CLASS_TITLE+ "</a></li>"
, classesItemHtmlMultiple: "<li class='wym_tools_class_multiple_rules'>"
+ "<span>" + WYMeditor.CLASS_TITLE + "</span>"
+ "<ul>{classesItemHtml}</ul>"
+"</li>"
, containersHtml: "<ul class='wym_containers wym_section'>" + WYMeditor.CONTAINERS_ITEMS + "</ul>"
, containersItemHtml:
"<li class='" + WYMeditor.CONTAINER_CLASS + "'>"
+ "<a href='#' name='" + WYMeditor.CONTAINER_NAME + "' title='" + WYMeditor.CONTAINER_TITLE + "'></a>"
+ "</li>"
, boxHtml:
"<div class='wym_box'>"
+ "<div class='wym_area_top clearfix'>"
+ WYMeditor.CONTAINERS
+ WYMeditor.TOOLS
+ "</div>"
+ "<div class='wym_area_main'>"
+ WYMeditor.HTML
+ WYMeditor.IFRAME
+ WYMeditor.STATUS
+ "</div>"
+ "</div>"
, iframeHtml:
"<div class='wym_iframe wym_section'>"
+ "<iframe id='WYMeditor_" + WYMeditor.INDEX + "' src='" + WYMeditor.IFRAME_BASE_PATH + "wymiframe' frameborder='0'"
+ " onload='this.contentWindow.parent.WYMeditor.INSTANCES[" + WYMeditor.INDEX + "].initIframe(this);'></iframe>"
+"</div>"
, dialogImageHtml: ""
, dialogLinkHtml: ""
, dialogTableHtml:
"<div class='wym_dialog wym_dialog_table'>"
+ "<form>"
+ "<input type='hidden' id='wym_dialog_type' class='wym_dialog_type' value='"+ WYMeditor.DIALOG_TABLE + "' />"
+ "<div class='field'>"
+ "<label for='wym_caption'>{Caption}</label>"
+ "<input type='text' id='wym_caption' class='wym_caption' value='' size='40' />"
+ "</div>"
+ "<div class='field'>"
+ "<label for='wym_rows'>{Number_Of_Rows}</label>"
+ "<input type='text' id='wym_rows' class='wym_rows' value='3' size='3' />"
+ "</div>"
+ "<div class='field'>"
+ "<label for='wym_cols'>{Number_Of_Cols}</label>"
+ "<input type='text' id='wym_cols' class='wym_cols' value='2' size='3' />"
+ "</div>"
+ form_actions
+ "</form>"
+ "</div>"
, dialogPasteHtml:
"<div class='wym_dialog wym_dialog_paste'>"
+ "<form>"
+ "<input type='hidden' id='wym_dialog_type' class='wym_dialog_type' value='" + WYMeditor.DIALOG_PASTE + "' />"
+ "<div class='field'>"
+ "<textarea class='wym_text' rows='10' cols='50'></textarea>"
+ "</div>"
+ form_actions
+ "</form>"
+ "</div>"
, dialogPath: "/refinery/dialogs/"
, dialogFeatures: {
width: 866
, height: 455
, modal: true
, draggable: true
, resizable: false
, autoOpen: true
, open: onOpenDialog
, close: onCloseDialog
}
, dialogInlineFeatures: {
width: 600
, height: 485
, modal: true
, draggable: true
, resizable: false
, autoOpen: true
, open: onOpenDialog
, close: onCloseDialog
}
, dialogId: 'editor_dialog'
, dialogHtml:
"<!DOCTYPE html>"
+ "<html dir='" + WYMeditor.DIRECTION + "'>"
+ "<head>"
+ "<link rel='stylesheet' type='text/css' media='screen' href='" + WYMeditor.CSS_PATH + "' />"
+ "<title>" + WYMeditor.DIALOG_TITLE + "</title>"
+ "<script type='text/javascript' src='" + WYMeditor.JQUERY_PATH + "'></script>"
+ "<script type='text/javascript' src='" + WYMeditor.WYM_PATH + "'></script>"
+ "</head>"
+ "<body>"
+ "<div id='page'>" + WYMeditor.DIALOG_BODY + "</div>"
+ "</body>"
+ "</html>"
, postInit: function(wym)
{
// register loaded
wymeditors_loaded += 1;
// fire loaded if all editors loaded
if(WYMeditor.INSTANCES.length == wymeditors_loaded){
$('.wym_loading_overlay').remove();
WYMeditor.loaded();
}
$('.field.hide-overflow').removeClass('hide-overflow').css('height', 'auto');
}
, lang: (typeof(I18n.locale) != "undefined" ? I18n.locale : 'en')
}, custom_wymeditor_boot_options);
// custom function added by us to hook into when all wymeditor instances on the page have finally loaded:
WYMeditor.loaded = function(){};
$(function()
{
wymeditor_inputs = $('.wymeditor');
wymeditor_inputs.each(function(input) {
if ((containing_field = $(this).parents('.field')).get(0).style.height == '') {
containing_field.addClass('hide-overflow').css('height', $(this).outerHeight() - containing_field.offset().top + $(this).offset().top + 45);
}
$(this).hide();
});
wymeditor_inputs.wymeditor(wymeditor_boot_options);
});<|fim▁end|> | ,{'name': 'Unlink', 'title': 'Unlink', 'css': 'wym_tools_unlink'}
,{'name': 'InsertImage', 'title': 'Image', 'css': 'wym_tools_image'} |
<|file_name|>testStream.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
#-------------------------------------------------------------------------------
# Name: testStream.py
# Purpose: tests for stream.py
#
# Authors: Michael Scott Cuthbert
# Christopher Ariza
#
# Copyright: Copyright © 2009-2014 Michael Scott Cuthbert and the music21 Project
# License: LGPL or BSD, see license.txt
#-------------------------------------------------------------------------------
import random
import unittest
import copy
from music21.stream import Stream
from music21.stream import Voice
from music21.stream import Measure
from music21.stream import Score
from music21.stream import Part
from music21 import bar
from music21 import chord
from music21 import clef
from music21 import common
from music21 import duration
from music21 import interval
from music21 import key
from music21 import metadata
from music21 import meter
from music21 import note
from music21 import pitch
from music21.musicxml import m21ToXml
from music21.midi import translate as midiTranslate
from music21 import environment
_MOD = "testStream.py"
environLocal = environment.Environment(_MOD)
#-------------------------------------------------------------------------------
class TestExternal(unittest.TestCase):
def runTest(self):
pass
def testLilySimple(self):
a = Stream()
ts = meter.TimeSignature("3/4")
b = Stream()
q = note.Note(type='quarter')
q.octave = 5
b.repeatInsert(q, [0,1,2,3])
bestC = b.bestClef(allowTreble8vb = True)
a.insert(0, bestC)
a.insert(0, ts)
a.insert(0, b)
a.show('lily.png')
def testLilySemiComplex(self):
a = Stream()
ts = meter.TimeSignature("3/8")
b = Stream()
q = note.Note(type='eighth')
dur1 = duration.Duration()
dur1.type = "eighth"
tup1 = duration.Tuplet()
tup1.tupletActual = [5, dur1]
tup1.tupletNormal = [3, dur1]
q.octave = 2
q.duration.appendTuplet(tup1)
for i in range(0,5):
b.append(copy.deepcopy(q))
b.elements[i].accidental = pitch.Accidental(i - 2)
b.elements[0].duration.tuplets[0].type = "start"
b.elements[-1].duration.tuplets[0].type = "stop"
b.elements[2].editorial.comment.text = "a real C"
bestC = b.bestClef(allowTreble8vb = True)
a.insert(0, bestC)
a.insert(0, ts)
a.insert(0, b)
a.show('lily.png')
def testScoreLily(self):
'''
Test the lilypond output of various score operations.
'''
c = note.Note("C4")
d = note.Note("D4")
ts = meter.TimeSignature("2/4")
s1 = Part()
s1.append(copy.deepcopy(c))
s1.append(copy.deepcopy(d))
s2 = Part()
s2.append(copy.deepcopy(d))
s2.append(copy.deepcopy(c))
score1 = Score()
score1.insert(ts)
score1.insert(s1)
score1.insert(s2)
score1.show('lily.png')
def testMXOutput(self):
'''A simple test of adding notes to measures in a stream.
'''
c = Stream()
for dummy in range(4):
b = Measure()
for p in ['a', 'g', 'c#', 'a#']:
a = note.Note(p)
b.append(a)
c.append(b)
c.show()
def testMxMeasures(self):
'''A test of the automatic partitioning of notes in a measure and the creation of ties.
'''
n = note.Note()
n.quarterLength = 3
a = Stream()
a.repeatInsert(n, list(range(0,120,3)))
#a.show() # default time signature used
a.insert( 0, meter.TimeSignature("5/4") )
a.insert(10, meter.TimeSignature("2/4") )
a.insert( 3, meter.TimeSignature("3/16") )
a.insert(20, meter.TimeSignature("9/8") )
a.insert(40, meter.TimeSignature("10/4") )
a.show()
def testMultipartStreams(self):
'''Test the creation of multi-part streams by simply having streams within streams.
'''
q = Stream()
r = Stream()
for x in ['c3','a3','g#4','d2'] * 10:
n = note.Note(x)
n.quarterLength = .25
q.append(n)
m = note.Note(x)
m.quarterLength = 1.125
r.append(m)
s = Stream() # container
s.insert(q)
s.insert(r)
s.insert(0, meter.TimeSignature("3/4") )
s.insert(3, meter.TimeSignature("5/4") )
s.insert(8, meter.TimeSignature("3/4") )
s.show()
def testMultipartMeasures(self):
'''This demonstrates obtaining slices from a stream and layering
them into individual parts.
OMIT_FROM_DOCS
TODO: this should show instruments
this is presently not showing instruments
probably b/c when appending to s Stream activeSite is set to that stream
'''
from music21 import corpus, converter
a = converter.parse(corpus.getWork(['mozart', 'k155','movement2.xml']))
b = a[8][4:8]
c = a[8][8:12]
d = a[8][12:16]
s = Stream()
s.insert(b)
s.insert(c)
s.insert(d)
s.show()
def testCanons(self):
'''
A test of creating a canon with shifted presentations of a source melody.
This also demonstrates
the addition of rests to parts that start late or end early.
The addition of rests happens with makeRests(), which is called in
musicxml generation of a Stream.
'''
a = ['c', 'g#', 'd-', 'f#', 'e', 'f' ] * 4
s = Stream()
partOffsetShift = 1.25
partOffset = 0
for junk in range(6):
p = Stream()
for pitchName in a:
n = note.Note(pitchName)
n.quarterLength = 1.5
p.append(n)
p.offset = partOffset
s.insert(p)
partOffset += partOffsetShift
s.show()
def testBeamsPartial(self):
'''This demonstrates a partial beam; a beam that is not connected between more than one note.
'''
q = Stream()
for x in [.125, .25, .25, .125, .125, .125] * 30:
n = note.Note('c')
n.quarterLength = x
q.append(n)
s = Stream() # container
s.insert(q)
s.insert(0, meter.TimeSignature("3/4") )
s.insert(3, meter.TimeSignature("5/4") )
s.insert(8, meter.TimeSignature("4/4") )
s.show()
def testBeamsStream(self):
'''A test of beams applied to different time signatures.
'''
q = Stream()
r = Stream()
p = Stream()
for x in ['c3','a3','c#4','d3'] * 30:
n = note.Note(x)
#n.quarterLength = random.choice([.25, .125, .5])
n.quarterLength = random.choice([.25])
q.append(n)
m = note.Note(x)
m.quarterLength = .5
r.append(m)
o = note.Note(x)
o.quarterLength = .125
p.append(o)
s = Stream() # container
s.append(q)
s.append(r)
s.append(p)
s.insert(0, meter.TimeSignature("3/4") )
s.insert(3, meter.TimeSignature("5/4") )
s.insert(8, meter.TimeSignature("4/4") )
self.assertEqual(len(s.flat.notes), 360)
s.show()
def testBeamsMeasure(self):
aMeasure = Measure()
aMeasure.timeSignature = meter.TimeSignature('4/4')
aNote = note.Note()
aNote.quarterLength = .25
aMeasure.repeatAppend(aNote,16)
bMeasure = aMeasure.makeBeams()
bMeasure.show()
#-------------------------------------------------------------------------------
class Test(unittest.TestCase):
def runTest(self):
pass
def testAdd(self):
import music21 # needed to do fully-qualified isinstance name checking
a = Stream()
for dummy in range(5):
a.insert(0, music21.Music21Object())
self.assertTrue(a.isFlat)
a[2] = note.Note("C#")
self.assertTrue(a.isFlat)
a[3] = Stream()
self.assertFalse(a.isFlat)
def testSort(self):
s = Stream()
s.repeatInsert(note.Note("C#"), [0.0, 2.0, 4.0])
s.repeatInsert(note.Note("D-"), [1.0, 3.0, 5.0])
self.assertFalse(s.isSorted)
y = s.sorted
self.assertTrue(y.isSorted)
g = ""
for myElement in y:
g += "%s: %s; " % (myElement.offset, myElement.name)
self.assertEqual(g, '0.0: C#; 1.0: D-; 2.0: C#; 3.0: D-; 4.0: C#; 5.0: D-; ')
def testFlatSimple(self):
s1 = Score()
s1.id = "s1"
p1 = Part()
p1.id = "p1"
p2 = Part()
p2.id = "p2"
n1 = note.Note('C', type='half')
n2 = note.Note('D', type='quarter')
n3 = note.Note('E', type='quarter')
n4 = note.Note('F', type='half')
n1.id = "n1"
n2.id = "n2"
n3.id = "n3"
n4.id = "n4"
p1.append(n1)
p1.append(n2)
p2.append(n3)
p2.append(n4)
p2.offset = 20.0
s1.insert(p1)
s1.insert(p2)
sf1 = s1.flat
sf1.id = "flat s1"
# for site in n4.sites.getSites():
# print site.id,
# print n4.sites.getOffsetBySite(site)
self.assertEqual(len(sf1), 4)
assert(sf1[1] is n2)
def testActiveSiteCopiedStreams(self):
srcStream = Stream()
srcStream.insert(3, note.Note())
# the note's activeSite is srcStream now
self.assertEqual(srcStream[0].activeSite, srcStream)
midStream = Stream()
for x in range(2):
srcNew = copy.deepcopy(srcStream)
# for n in srcNew:
# offset = n.getOffsetBySite(srcStream)
#got = srcNew[0].getOffsetBySite(srcStream)
#for n in srcNew: pass
srcNew.offset = x * 10
midStream.insert(srcNew)
self.assertEqual(srcNew.offset, x * 10)
# no offset is set yet
self.assertEqual(midStream.offset, 0)
# component streams have offsets
self.assertEqual(midStream[0].getOffsetBySite(midStream), 0)
self.assertEqual(midStream[1].getOffsetBySite(midStream), 10.0)
# component notes still have a location set to srcStream
#self.assertEqual(midStream[1][0].getOffsetBySite(srcStream), 3.0)
# component notes still have a location set to midStream[1]
self.assertEqual(midStream[1][0].getOffsetBySite(midStream[1]), 3.0)
# one location in midstream
self.assertEqual(len(midStream.sites), 1)
#environLocal.printDebug(['srcStream', srcStream])
#environLocal.printDebug(['midStream', midStream])
x = midStream.flat
def testSimpleRecurse(self):
st1 = Stream()
st2 = Stream()
n1 = note.Note()
st2.insert(10, n1)
st1.insert(12, st2)
self.assertTrue(st1.flat.sorted[0] is n1)
self.assertEqual(st1.flat.sorted[0].offset, 22.0)
def testStreamRecursion(self):
srcStream = Stream()
for x in range(6):
n = note.Note('G#')
n.duration = duration.Duration('quarter')
n.offset = x * 1
srcStream.insert(n)
self.assertEqual(len(srcStream), 6)
self.assertEqual(len(srcStream.flat), 6)
self.assertEqual(srcStream.flat[1].offset, 1.0)
# self.assertEqual(len(srcStream.getOverlaps()), 0)
midStream = Stream()
for x in range(4):
srcNew = copy.deepcopy(srcStream)
srcNew.offset = x * 10
midStream.insert(srcNew)
self.assertEqual(len(midStream), 4)
#environLocal.printDebug(['pre flat of mid stream'])
self.assertEqual(len(midStream.flat), 24)
# self.assertEqual(len(midStream.getOverlaps()), 0)
mfs = midStream.flat.sorted
self.assertEqual(mfs[7].getOffsetBySite(mfs), 11.0)
farStream = Stream()
for x in range(7):
midNew = copy.deepcopy(midStream)
midNew.offset = x * 100
farStream.insert(midNew)
self.assertEqual(len(farStream), 7)
self.assertEqual(len(farStream.flat), 168)
# self.assertEqual(len(farStream.getOverlaps()), 0)
#
# get just offset times
# elementsSorted returns offset, dur, element
offsets = [a.offset for a in farStream.flat]
# create what we epxect to be the offsets
offsetsMatch = list(range(0, 6))
offsetsMatch += [x + 10 for x in range(0, 6)]
offsetsMatch += [x + 20 for x in range(0, 6)]
offsetsMatch += [x + 30 for x in range(0, 6)]
offsetsMatch += [x + 100 for x in range(0, 6)]
offsetsMatch += [x + 110 for x in range(0, 6)]
self.assertEqual(offsets[:len(offsetsMatch)], offsetsMatch)
def testStreamSortRecursion(self):
farStream = Stream()
for x in range(4):
midStream = Stream()
for y in range(4):
nearStream = Stream()
for z in range(4):
n = note.Note("G#")
n.duration = duration.Duration('quarter')
nearStream.insert(z * 2, n) # 0, 2, 4, 6
midStream.insert(y * 5, nearStream) # 0, 5, 10, 15
farStream.insert(x * 13, midStream) # 0, 13, 26, 39
# get just offset times
# elementsSorted returns offset, dur, element
fsfs = farStream.flat.sorted
offsets = [a.offset for a in fsfs] # safer is a.getOffsetBySite(fsfs)
offsetsBrief = offsets[:20]
self.assertEquals(offsetsBrief, [0, 2, 4, 5, 6, 7, 9, 10, 11, 12, 13, 14, 15, 15, 16, 17, 17, 18, 19, 19])
def testOverlapsA(self):
a = Stream()
# here, the third item overlaps with the first
for offset, dur in [(0,12), (3,2), (11,3)]:
n = note.Note('G#')
n.duration = duration.Duration()
n.duration.quarterLength = dur
n.offset = offset
a.insert(n)
includeDurationless = True
includeEndBoundary = False
simultaneityMap, overlapMap = a._findLayering(a.flat,
includeDurationless, includeEndBoundary)
self.assertEqual(simultaneityMap, [[], [], []])
self.assertEqual(overlapMap, [[1,2], [0], [0]])
dummy = a._consolidateLayering(a.flat, overlapMap)
# print dummy
#found = a.getOverlaps(includeDurationless, includeEndBoundary)
# there should be one overlap group
#self.assertEqual(len(found.keys()), 1)
# there should be three items in this overlap group
#self.assertEqual(len(found[0]), 3)
a = Stream()
# here, the thir item overlaps with the first
for offset, dur in [(0,1), (1,2), (2,3)]:
n = note.Note('G#')
n.duration = duration.Duration()
n.duration.quarterLength = dur
n.offset = offset
a.insert(n)
includeDurationless = True
includeEndBoundary = True
simultaneityMap, overlapMap = a._findLayering(a.flat,
includeDurationless, includeEndBoundary)
self.assertEqual(simultaneityMap, [[], [], []])
self.assertEqual(overlapMap, [[1], [0,2], [1]])
dummy = a._consolidateLayering(a.flat, overlapMap)
def testOverlapsB(self):
a = Stream()
for x in range(4):
n = note.Note('G#')
n.duration = duration.Duration('quarter')
n.offset = x * 1
a.insert(n)
d = a.getOverlaps(True, False)
# no overlaps
self.assertEqual(len(d), 0)
# including coincident boundaries
d = a.getOverlaps(includeDurationless=True, includeEndBoundary=True)
environLocal.printDebug(['testOverlapsB', d])
# return one dictionary that has a reference to each note that
# is in the same overlap group
self.assertEqual(len(d), 1)
self.assertEqual(len(d[0]), 4)
# a = Stream()
# for x in [0,0,0,0,13,13,13]:
# n = note.Note('G#')
# n.duration = duration.Duration('half')
# n.offset = x
# a.insert(n)
# d = a.getOverlaps()
# len(d[0])
# 4
# len(d[13])
# 3
# a = Stream()
# for x in [0,0,0,0,3,3,3]:
# n = note.Note('G#')
# n.duration = duration.Duration('whole')
# n.offset = x
# a.insert(n)
#
# # default is to not include coincident boundaries
# d = a.getOverlaps()
# len(d[0])
# 7
def testStreamDuration(self):
a = Stream()
q = note.Note(type='quarter')
a.repeatInsert(q, [0,1,2,3])
self.assertEqual(a.highestOffset, 3)
self.assertEqual(a.highestTime, 4)
self.assertEqual(a.duration.quarterLength, 4.0)
newDuration = duration.Duration("half")
self.assertEqual(newDuration.quarterLength, 2.0)
a.duration = newDuration
self.assertEqual(a.duration.quarterLength, 2.0)
self.assertEqual(a.highestTime, 4)
def testMeasureStream(self):
'''An approach to setting TimeSignature measures in offsets and durations
'''
a = meter.TimeSignature('3/4')
b = meter.TimeSignature('5/4')
c = meter.TimeSignature('2/4')
a.duration = duration.Duration()
b.duration = duration.Duration()
c.duration = duration.Duration()
# 20 measures of 3/4
a.duration.quarterLength = 20 * a.barDuration.quarterLength
# 10 measures of 5/4
b.duration.quarterLength = 10 * b.barDuration.quarterLength
# 5 measures of 2/4
c.duration.quarterLength = 5 * c.barDuration.quarterLength
m = Stream()
m.append(a)
m.append(b)
m.append(c)
self.assertEqual(m[1].offset, (20 * a.barDuration.quarterLength))
self.assertEqual(m[2].offset, ((20 * a.barDuration.quarterLength) +
(10 * b.barDuration.quarterLength)))
def testMultipartStream(self):
'''Test the creation of streams with multiple parts. See versions
of this tests in TestExternal for more details
'''
q = Stream()
r = Stream()
for x in ['c3','a3','g#4','d2'] * 10:
n = note.Note(x)
n.quarterLength = .25
q.append(n)
m = note.Note(x)
m.quarterLength = 1
r.append(m)
s = Stream() # container
s.insert(q)
s.insert(r)
s.insert(0, meter.TimeSignature("3/4") )
s.insert(3, meter.TimeSignature("5/4") )
s.insert(8, meter.TimeSignature("3/4") )
self.assertEqual(len(s.flat.notes), 80)
from music21 import corpus, converter
thisWork = corpus.getWork('corelli/opus3no1/1grave')
a = converter.parse(thisWork)
b = a[7][5:10]
environLocal.printDebug(['b', b, b.sites.getSiteIds()])
c = a[7][10:15]
environLocal.printDebug(['c', c, c.sites.getSiteIds()])
d = a[7][15:20]
environLocal.printDebug(['d', d, d.sites.getSiteIds()])
s2 = Stream()
environLocal.printDebug(['s2', s2, id(s2)])
s2.insert(b)
s2.insert(c)
s2.insert(d)
def testActiveSites(self):
'''Test activeSite relationships.
Note that here we see why sometimes qualified class names are needed.
This test passes fine with class names Part and Measure when run interactively,
creating a Test instance. When run from the command line
Part and Measure do not match, and instead music21.stream.Part has to be
employed instead.
'''
import music21.stream # needed to do fully-qualified isinstance name checking
from music21 import corpus
a = corpus.parse('corelli/opus3no1/1grave')
# test basic activeSite relationships
b = a[8]
self.assertEqual(isinstance(b, music21.stream.Part), True)
self.assertEqual(b.activeSite, a)
# this, if called, actively destroys the activeSite relationship!
# on the measures (as new Elements are not created)
#m = b.getElementsByClass('Measure')[5]
#self.assertEqual(isinstance(m, Measure), True)
# this false b/c, when getting the measures, activeSites are lost
#self.assertEqual(m.activeSite, b) #measures activeSite should be part
# NOTE: this is dependent on raw element order, and might change
# due to importing changes
#b.show('t')
self.assertEqual(isinstance(b[15], music21.stream.Measure), True)
self.assertEqual(b[8].activeSite, b) #measures activeSite should be part
# a different test derived from a TestExternal
q = Stream()
r = Stream()
for x in ['c3','a3','c#4','d3'] * 30:
n = note.Note(x)
n.quarterLength = random.choice([.25])
q.append(n)
m = note.Note(x)
m.quarterLength = .5
r.append(m)
s = Stream() # container
s.insert(q)
s.insert(r)
self.assertEqual(q.activeSite, s)
self.assertEqual(r.activeSite, s)
def testActiveSitesMultiple(self):
'''Test an object having multiple activeSites.
'''
a = Stream()
b = Stream()
n = note.Note("G#")
n.offset = 10
a.insert(n)
b.insert(n)
# the objects elements has been transfered to each activeSite
# stream in the same way
self.assertEqual(n.getOffsetBySite(a), n.getOffsetBySite(b))
self.assertEqual(n.getOffsetBySite(a), 10)
def testExtractedNoteAssignLyric(self):
from music21 import converter, corpus, text
a = converter.parse(corpus.getWork('corelli/opus3no1/1grave'))
b = a.parts[1]
c = b.flat
for thisNote in c.getElementsByClass('Note'):
thisNote.lyric = thisNote.name
textStr = text.assembleLyrics(b)
self.assertEqual(textStr.startswith('A A G F E'),
True)
def testGetInstrumentFromMxl(self):
'''Test getting an instrument from an mxl file
'''
from music21 import corpus, converter
# manually set activeSite to associate
a = converter.parse(corpus.getWork(['corelli', 'opus3no1',
'1grave.xml']))
b = a.parts[2]
# by calling the .part property, we create a new stream; thus, the
# activeSite of b is no longer a
# self.assertEqual(b.activeSite, None)
instObj = b.getInstrument()
self.assertEqual(instObj.partName, u'Violone e Organo')
def testGetInstrumentManual(self):
from music21 import defaults
#import pdb; pdb.set_trace()
# search activeSite from a measure within
# a different test derived from a TestExternal
q = Stream()
r = Stream()
for x in ['c3','a3','c#4','d3'] * 15:
n = note.Note(x)
n.quarterLength = random.choice([.25])
q.append(n)
m = note.Note(x)
m.quarterLength = .5
r.append(m)
s = Stream() # container
s.insert(q)
s.insert(r)
instObj = q.getInstrument()
self.assertEqual(instObj.partName, defaults.partName)
instObj = r.getInstrument()
self.assertEqual(instObj.partName, defaults.partName)
instObj = s.getInstrument()
self.assertEqual(instObj.partName, defaults.partName)
# test mx generation of parts
GEX = m21ToXml.GeneralObjectExporter()
unused_mx = GEX.parse(q).decode('utf-8')
unused_mx = GEX.parse(r).decode('utf-8')
# test mx generation of score
unused_mx = GEX.parse(s).decode('utf-8')
def testMeasureAndTieCreation(self):
'''A test of the automatic partitioning of notes in a measure and the creation of ties.
'''
n = note.Note()
n.quarterLength = 3
a = Stream()
a.repeatInsert(n, list(range(0,120,3)))
a.insert( 0, meter.TimeSignature("5/4") )
a.insert(10, meter.TimeSignature("2/4") )
a.insert( 3, meter.TimeSignature("3/16") )
a.insert(20, meter.TimeSignature("9/8") )
a.insert(40, meter.TimeSignature("10/4") )
GEX = m21ToXml.GeneralObjectExporter()
unused_mx = GEX.parse(a).decode('utf-8')
def testStreamCopy(self):
'''Test copying a stream
'''
#import pdb; pdb.set_trace()
# search activeSite from a measure within
# a different test derived from a TestExternal
q = Stream()
r = Stream()
for x in ['c3','a3','c#4','d3'] * 30:
n = note.Note(x)
n.quarterLength = random.choice([.25])
q.append(n)
m = note.Note(x)
m.quarterLength = .5
r.append(m)
s = Stream() # container
s.insert(q)
s.insert(r)
# copying the whole: this works
unused_w = copy.deepcopy(s)
post = Stream()
# copying while looping: this gets increasingly slow
for aElement in s:
environLocal.printDebug(['copying and inserting an element',
aElement, len(aElement.sites)])
bElement = copy.deepcopy(aElement)
post.insert(aElement.offset, bElement)
def testIteration(self):
'''This test was designed to illustrate a past problem with stream
Iterations.
'''
q = Stream()
r = Stream()
for x in ['c3','a3','c#4','d3'] * 5:
n = note.Note(x)
n.quarterLength = random.choice([.25])
q.append(n)
m = note.Note(x)
m.quarterLength = .5
r.append(m)
src = Stream() # container
src.insert(q)
src.insert(r)
a = Stream()
for obj in src.getElementsByClass('Stream'):
a.insert(obj)
environLocal.printDebug(['expected length', len(a)])
counter = 0
for x in a:
if counter >= 4:
environLocal.printDebug(['infinite loop', counter])
break
environLocal.printDebug([x])
junk = x.getInstrument(searchActiveSite=True)
del junk
counter += 1
def testGetTimeSignatures(self):
#getTimeSignatures
n = note.Note()
n.quarterLength = 3
a = Stream()
a.autoSort = False
a.insert( 0, meter.TimeSignature("5/4") )
a.insert(10, meter.TimeSignature("2/4") )
a.insert( 3, meter.TimeSignature("3/16") )
a.insert(20, meter.TimeSignature("9/8") )
a.insert(40, meter.TimeSignature("10/4") )
offsets = [x.offset for x in a]
self.assertEqual(offsets, [0.0, 10.0, 3.0, 20.0, 40.0])
# fill with notes
a.repeatInsert(n, list(range(0,120,3)))
b = a.getTimeSignatures(sortByCreationTime=False)
self.assertEqual(len(b), 5)
self.assertEqual(b[0].numerator, 5)
self.assertEqual(b[4].numerator, 10)
self.assertEqual(b[4].activeSite, b)
# none of the offsets are being copied
offsets = [x.offset for x in b]
# with autoSort is passed on from elements search
#self.assertEqual(offsets, [0.0, 3.0, 10.0, 20.0, 40.0])
self.assertEqual(offsets, [0.0, 10.0, 3.0, 20.0, 40.0])
def testElements(self):
'''Test basic Elements wrapping non music21 objects
'''
import music21 # needed to do fully-qualified isinstance name checking
a = Stream()
a.insert(50, music21.Music21Object())
self.assertEqual(len(a), 1)
# there are two locations, default and the one just added
self.assertEqual(len(a[0].sites), 2)
# this works
# self.assertEqual(a[0].sites.getOffsetByIndex(-1), 50.0)
# self.assertEqual(a[0].sites.getSiteByIndex(-1), a)
self.assertEqual(a[0].getOffsetBySite(a), 50.0)
self.assertEqual(a[0].offset, 50.0)
def testClefs(self):
s = Stream()
for x in ['c3','a3','c#4','d3'] * 5:
n = note.Note(x)
s.append(n)
clefObj = s.bestClef()
self.assertEqual(clefObj.sign, 'F')
measureStream = s.makeMeasures()
clefObj = measureStream[0].clef
self.assertEqual(clefObj.sign, 'F')
def testFindConsecutiveNotes(self):
s = Stream()
n1 = note.Note("c3")
n1.quarterLength = 1
n2 = chord.Chord(["c4", "e4", "g4"])
n2.quarterLength = 4
s.insert(0, n1)
s.insert(1, n2)
l1 = s.findConsecutiveNotes()
self.assertTrue(l1[0] is n1)
self.assertTrue(l1[1] is n2)
l2 = s.findConsecutiveNotes(skipChords = True)
self.assertTrue(len(l2) == 1)
self.assertTrue(l2[0] is n1)
r1 = note.Rest()
s2 = Stream()
s2.insert([0.0, n1,
1.0, r1,
2.0, n2])
l3 = s2.findConsecutiveNotes()
self.assertTrue(l3[1] is None)
l4 = s2.findConsecutiveNotes(skipRests = True)
self.assertTrue(len(l4) == 2)
s3 = Stream()
s3.insert([0.0, n1,
1.0, r1,
10.0, n2])
l5 = s3.findConsecutiveNotes(skipRests = False)
self.assertTrue(len(l5) == 3) # not 4 because two Nones allowed in a row!
l6 = s3.findConsecutiveNotes(skipRests = True, skipGaps = True)
self.assertTrue(len(l6) == 2)
n1.quarterLength = 10
n3 = note.Note("B-")
s4 = Stream()
s4.insert([0.0, n1,
1.0, n2,
10.0, n3])
l7 = s4.findConsecutiveNotes()
self.assertTrue(len(l7) == 2) # n2 is hidden because it is in an overlap
l8 = s4.findConsecutiveNotes(getOverlaps = True)
self.assertTrue(len(l8) == 3)
self.assertTrue(l8[1] is n2)
l9 = s4.findConsecutiveNotes(getOverlaps = True, skipChords = True)
self.assertTrue(len(l9) == 3)
self.assertTrue(l9[1] is None)
n4 = note.Note("A#")
n1.quarterLength = 1
n2.quarterLength = 1
s5 = Stream()
s5.insert([0.0, n1,
1.0, n2,
2.0, n3,
3.0, n4])
l10 = s5.findConsecutiveNotes()
self.assertTrue(len(l10) == 4)
l11 = s5.findConsecutiveNotes(skipUnisons = True)
self.assertTrue(len(l11) == 3)
self.assertTrue(l11[2] is n3)
n5 = note.Note("c4")
s6 = Stream()
s6.insert([0.0, n1,
1.0, n5,
2.0, n2])
l12 = s6.findConsecutiveNotes(noNone = True)
self.assertTrue(len(l12) == 3)
l13 = s6.findConsecutiveNotes(noNone = True, skipUnisons = True)
self.assertTrue(len(l13) == 3)
l14 = s6.findConsecutiveNotes(noNone = True, skipOctaves = True)
self.assertTrue(len(l14) == 2)
self.assertTrue(l14[0] is n1)
self.assertTrue(l14[1] is n2)
def testMelodicIntervals(self):
c4 = note.Note("C4")
d5 = note.Note("D5")
r1 = note.Rest()
b4 = note.Note("B4")
s1 = Stream()
s1.append([c4, d5, r1, b4])
intS1 = s1.melodicIntervals(skipRests=True)
self.assertTrue(len(intS1) == 2)
M9 = intS1[0]
self.assertEqual(M9.niceName, "Major Ninth")
## TODO: Many more tests
def testStripTiesBuiltA(self):
s1 = Stream()
n1 = note.Note("D#2")
n1.quarterLength = 6
s1.append(n1)
self.assertEqual(len(s1.notes), 1)
s1 = s1.makeMeasures()
s1.makeTies() # makes ties but no end tie positions!
# flat version has 2 notes
self.assertEqual(len(s1.flat.notes), 2)
sUntied = s1.stripTies()
self.assertEqual(len(sUntied.notes), 1)
self.assertEqual(sUntied.notes[0].quarterLength, 6)
n = note.Note()
n.quarterLength = 3
a = Stream()
a.repeatInsert(n, list(range(0,120,3)))
self.assertEqual(len(a), 40)
a.insert( 0, meter.TimeSignature("5/4") )
a.insert(10, meter.TimeSignature("2/4") )
a.insert( 3, meter.TimeSignature("3/16") )
a.insert(20, meter.TimeSignature("9/8") )
a.insert(40, meter.TimeSignature("10/4") )
b = a.makeMeasures()
b.makeTies()
# we now have 65 notes, as ties have been created
self.assertEqual(len(b.flat.notes), 65)
c = b.stripTies() # gets flat, removes measures
self.assertEqual(len(c.notes), 40)
def testStripTiesImportedA(self):
from music21 import converter
from music21.musicxml import testPrimitive
a = converter.parse(testPrimitive.multiMeasureTies)
p1 = a.parts[0]
self.assertEqual(len(p1.flat.notesAndRests), 16)
p1.stripTies(inPlace=True, retainContainers=True)
self.assertEqual(len(p1.flat.notesAndRests), 6)
p2 = a.parts[1]
self.assertEqual(len(p2.flat.notesAndRests), 16)
p2Stripped = p2.stripTies(inPlace=False, retainContainers=True)
self.assertEqual(len(p2Stripped.flat.notesAndRests), 5)
# original part should not be changed
self.assertEqual(len(p2.flat.notesAndRests), 16)
p3 = a.parts[2]
self.assertEqual(len(p3.flat.notesAndRests), 16)
p3.stripTies(inPlace=True, retainContainers=True)
self.assertEqual(len(p3.flat.notesAndRests), 3)
p4 = a.parts[3]
self.assertEqual(len(p4.flat.notesAndRests), 16)
p4Notes = p4.stripTies(retainContainers=False)
# original should be unchanged
self.assertEqual(len(p4.flat.notesAndRests), 16)
# lesser notes
self.assertEqual(len(p4Notes.notesAndRests), 10)
def testGetElementsByOffsetZeroLength(self):
'''
Testing multiple zero-length elements with mustBeginInSpan:
'''
c = clef.TrebleClef()
ts = meter.TimeSignature('4/4')
ks = key.KeySignature(2)
s = Stream()
s.insert(0.0, c)
s.insert(0.0, ts)
s.insert(0.0, ks)
l1 = len(s.getElementsByOffset(0.0, mustBeginInSpan=True))
l2 = len(s.getElementsByOffset(0.0, mustBeginInSpan=False))
self.assertEqual(l1, 3)
self.assertEqual(l2, 3)
def testStripTiesScore(self):
'''Test stripTies using the Score method
'''
from music21 import corpus, converter
from music21.musicxml import testPrimitive
# This score has 4 parts, each with eight measures, and 2 half-notes
# per measure, equaling 16 half notes, but with differing tie type.
# 1: . .~|~. .~|~.~~.~|~. .~|~. .~|~.~~. | .~~.~|~.~~. ||
# 2: .~~.~|~.~~. | .~~.~|~.~~. | .~~.~|~.~~. | .~~.~|~. . ||
# 3: .~~.~|~. .~|~.~~. | .~~.~|~.~~.~|~.~~.~|~.~~.~|~.~~. ||
# 4: . . | .~~. | . .~|~.~~. | . .~|~. .~|~. .~|~. . ||
s = converter.parse(testPrimitive.multiMeasureTies)
self.assertEqual(len(s.parts), 4)
self.assertEqual(len(s.parts[0].flat.notesAndRests), 16)
self.assertEqual(len(s.parts[1].flat.notesAndRests), 16)
self.assertEqual(len(s.parts[2].flat.notesAndRests), 16)
self.assertEqual(len(s.parts[3].flat.notesAndRests), 16)
# first, in place false
sPost = s.stripTies(inPlace=False)
self.assertEqual(len(sPost.parts[0].flat.notesAndRests), 6)
self.assertEqual(len(sPost.parts[1].flat.notesAndRests), 5)
self.assertEqual(len(sPost.parts[2].flat.notesAndRests), 3)
self.assertEqual(len(sPost.parts[3].flat.notesAndRests), 10)
# make sure original is unchchanged
self.assertEqual(len(s.parts[0].flat.notesAndRests), 16)
self.assertEqual(len(s.parts[1].flat.notesAndRests), 16)
self.assertEqual(len(s.parts[2].flat.notesAndRests), 16)
self.assertEqual(len(s.parts[3].flat.notesAndRests), 16)
# second, in place true
sPost = s.stripTies(inPlace=True)
self.assertEqual(len(s.parts[0].flat.notesAndRests), 6)
self.assertEqual(len(s.parts[1].flat.notesAndRests), 5)
self.assertEqual(len(s.parts[2].flat.notesAndRests), 3)
self.assertEqual(len(s.parts[3].flat.notesAndRests), 10)
# just two ties here
s = corpus.parse('bach/bwv66.6')
self.assertEqual(len(s.parts), 4)
self.assertEqual(len(s.parts[0].flat.notesAndRests), 37)
self.assertEqual(len(s.parts[1].flat.notesAndRests), 42)
self.assertEqual(len(s.parts[2].flat.notesAndRests), 45)
self.assertEqual(len(s.parts[3].flat.notesAndRests), 41)
# perform strip ties in place
s.stripTies(inPlace=True)
self.assertEqual(len(s.parts[0].flat.notesAndRests), 36)
self.assertEqual(len(s.parts[1].flat.notesAndRests), 42)
self.assertEqual(len(s.parts[2].flat.notesAndRests), 44)
self.assertEqual(len(s.parts[3].flat.notesAndRests), 41)
def testTwoStreamMethods(self):
from music21.note import Note
(n11,n12,n13,n14) = (Note(), Note(), Note(), Note())
(n21,n22,n23,n24) = (Note(), Note(), Note(), Note())
n11.step = "C"
n12.step = "D"
n13.step = "E"
n14.step = "F"
n21.step = "G"
n22.step = "A"
n23.step = "B"
n24.step = "C"
n24.octave = 5
n11.duration.type = "half"
n12.duration.type = "whole"
n13.duration.type = "eighth"
n14.duration.type = "half"
n21.duration.type = "half"
n22.duration.type = "eighth"
n23.duration.type = "whole"
n24.duration.type = "eighth"
stream1 = Stream()
stream1.append([n11,n12,n13,n14])
stream2 = Stream()
stream2.append([n21,n22,n23,n24])
attackedTogether = stream1.simultaneousAttacks(stream2)
self.assertEqual(len(attackedTogether), 3) # nx1, nx2, nx4
thisNote = stream2.getElementsByOffset(attackedTogether[1])[0]
self.assertTrue(thisNote is n22)
playingWhenAttacked = stream1.playingWhenAttacked(n23)
self.assertTrue(playingWhenAttacked is n12)
allPlayingWhileSounding = stream2.allPlayingWhileSounding(n14)
self.assertEqual(len(allPlayingWhileSounding), 1)
self.assertTrue(allPlayingWhileSounding[0] is n24)
# trimPlayingWhileSounding = \
# stream2.trimPlayingWhileSounding(n12)
# assert trimPlayingWhileSounding[0] == n22
# assert trimPlayingWhileSounding[1].duration.quarterLength == 3.5
def testMeasureRange(self):
from music21 import corpus
a = corpus.parse('bach/bwv324.xml')
b = a.parts[3].measures(4,6)
self.assertEqual(len(b.getElementsByClass('Measure')), 3)
#b.show('t')
# first measure now has keu sig
unused_bMeasureFirst = b.getElementsByClass('Measure')[0]
self.assertEqual(len(b.flat.getElementsByClass(
key.KeySignature)), 1)
# first measure now has meter
self.assertEqual(len(b.flat.getElementsByClass(
meter.TimeSignature)), 1)
# first measure now has clef
self.assertEqual(len(b.flat.getElementsByClass(clef.Clef)), 1)
#b.show()
# get first part
p1 = a.parts[0]
# get measure by class; this will not manipulate the measure
mExRaw = p1.getElementsByClass('Measure')[5]
self.assertEqual(str([n for n in mExRaw.notes]), '[<music21.note.Note B>, <music21.note.Note D>]')
self.assertEqual(len(mExRaw.flat), 3)
# get measure by using method; this will add elements
mEx = p1.measure(6)
self.assertEqual(str([n for n in mEx.notes]), '[<music21.note.Note B>, <music21.note.Note D>]')
self.assertEqual(len(mEx.flat), 3)
# make sure source has not chnaged
mExRaw = p1.getElementsByClass('Measure')[5]
self.assertEqual(str([n for n in mExRaw.notes]), '[<music21.note.Note B>, <music21.note.Note D>]')
self.assertEqual(len(mExRaw.flat), 3)
# test measures with no measure numbesr
c = Stream()
for dummy in range(4):
m = Measure()
n = note.Note()
m.repeatAppend(n, 4)
c.append(m)
#c.show()
d = c.measures(2,3)
self.assertEqual(len(d), 2)
#d.show()
# try the score method
a = corpus.parse('bach/bwv324.xml')
b = a.measures(2,4)
self.assertEqual(len(b[0].flat.getElementsByClass(clef.Clef)), 1)
self.assertEqual(len(b[1].flat.getElementsByClass(clef.Clef)), 1)
self.assertEqual(len(b[2].flat.getElementsByClass(clef.Clef)), 1)
self.assertEqual(len(b[3].flat.getElementsByClass(clef.Clef)), 1)
self.assertEqual(len(b[0].flat.getElementsByClass(key.KeySignature)), 1)
self.assertEqual(len(b[1].flat.getElementsByClass(key.KeySignature)), 1)
self.assertEqual(len(b[2].flat.getElementsByClass(key.KeySignature)), 1)
self.assertEqual(len(b[3].flat.getElementsByClass(key.KeySignature)), 1)
#b.show()
def testMeasureOffsetMap(self):
from music21 import corpus
a = corpus.parse('bach/bwv324.xml')
mOffsetMap = a.parts[0].measureOffsetMap()
self.assertEqual(sorted(list(mOffsetMap.keys())),
[0.0, 4.0, 8.0, 12.0, 16.0, 20.0, 24.0, 34.0, 38.0] )
# try on a complete score
a = corpus.parse('bach/bwv324.xml')
mOffsetMap = a.measureOffsetMap()
#environLocal.printDebug([mOffsetMap])
self.assertEqual(sorted(list(mOffsetMap.keys())),
[0.0, 4.0, 8.0, 12.0, 16.0, 20.0, 24.0, 34.0, 38.0] )
for unused_key, value in mOffsetMap.items():
# each key contains 4 measures, one for each part
self.assertEqual(len(value), 4)
# we can get this information from Notes too!
a = corpus.parse('bach/bwv324.xml')
# get notes from one measure
mOffsetMap = a.parts[0].flat.measureOffsetMap(note.Note)
self.assertEqual(sorted(list(mOffsetMap.keys())), [0.0, 4.0, 8.0, 12.0, 16.0, 20.0, 24.0, 34.0, 38.0] )
self.assertEqual(str(mOffsetMap[0.0]), '[<music21.stream.Measure 1 offset=0.0>]')
self.assertEqual(str(mOffsetMap[4.0]), '[<music21.stream.Measure 2 offset=4.0>]')
# TODO: getting inconsistent results with these
# instead of storing a time value for locations, use an index
# count
m1 = a.parts[0].getElementsByClass('Measure')[1]
#m1.show('text')
mOffsetMap = m1.measureOffsetMap(note.Note)
# offset here is that of measure that originally contained this note
#environLocal.printDebug(['m1', m1, 'mOffsetMap', mOffsetMap])
self.assertEqual(sorted(list(mOffsetMap.keys())), [4.0] )
m2 = a.parts[0].getElementsByClass('Measure')[2]
mOffsetMap = m2.measureOffsetMap(note.Note)
# offset here is that of measure that originally contained this note
self.assertEqual(sorted(list(mOffsetMap.keys())), [8.0] )
# this should work but does not yet
# it seems that the flat score does not work as the flat part
# mOffsetMap = a.flat.measureOffsetMap('Note')
# self.assertEqual(sorted(mOffsetMap.keys()), [0.0, 4.0, 8.0, 12.0, 16.0, 20.0, 24.0, 28.0, 32.0] )
def testMeasureOffsetMapPostTie(self):
from music21 import corpus, stream
a = corpus.parse('bach/bwv4.8.xml')
# alto line syncopated/tied notes across bars
#a.show()
alto = a.parts[1]
self.assertEqual(len(alto.flat.notesAndRests), 73)
# offset map for measures looking at the part's Measures
# note that pickup bar is taken into account
post = alto.measureOffsetMap()
self.assertEqual(sorted(list(post.keys())), [0.0, 1.0, 5.0, 9.0, 13.0, 17.0, 21.0, 25.0, 29.0, 33.0, 37.0, 41.0, 45.0, 49.0, 53.0, 57.0, 61.0] )
# looking at Measure and Notes: no problem
post = alto.flat.measureOffsetMap([Measure, note.Note])
self.assertEqual(sorted(list(post.keys())), [0.0, 1.0, 5.0, 9.0, 13.0, 17.0, 21.0, 25.0, 29.0, 33.0, 37.0, 41.0, 45.0, 49.0, 53.0, 57.0, 61.0] )
# after stripping ties, we have a stream with fewer notes
altoPostTie = a.parts[1].stripTies()
# we can get the length of this directly b/c we just of a stream of
# notes, no Measures
self.assertEqual(len(altoPostTie.notesAndRests), 69)
# we can still get measure numbers:
mNo = altoPostTie.notesAndRests[3].getContextByClass(stream.Measure).number
self.assertEqual(mNo, 1)
mNo = altoPostTie.notesAndRests[8].getContextByClass(stream.Measure).number
self.assertEqual(mNo, 2)
mNo = altoPostTie.notesAndRests[15].getContextByClass(stream.Measure).number
self.assertEqual(mNo, 4)
# can we get an offset Measure map by looking for measures
post = altoPostTie.measureOffsetMap(stream.Measure)
# nothing: no Measures:
self.assertEqual(list(post.keys()), [])
# but, we can get an offset Measure map by looking at Notes
post = altoPostTie.measureOffsetMap(note.Note)
# nothing: no Measures:
self.assertEqual(sorted(list(post.keys())), [0.0, 1.0, 5.0, 9.0, 13.0, 17.0, 21.0, 25.0, 29.0, 33.0, 37.0, 41.0, 45.0, 49.0, 53.0, 57.0, 61.0])
#from music21 import graph
#graph.plotStream(altoPostTie, 'scatter', values=['pitchclass','offset'])
def testMusicXMLGenerationViaPropertyA(self):
'''Test output tests above just by calling the musicxml attribute
'''
a = ['c', 'g#', 'd-', 'f#', 'e', 'f' ] * 4
partOffset = 7.5
p = Stream()
for pitchName in a:
n = note.Note(pitchName)
n.quarterLength = 1.5
p.append(n)
p.offset = partOffset
p.transferOffsetToElements()
junk = p.getTimeSignatures(searchContext=True, sortByCreationTime=True)
p.makeRests(refStreamOrTimeRange=[0, 100],
inPlace=True)
self.assertEqual(p.lowestOffset, 0)
self.assertEqual(p.highestTime, 100.0)
GEX = m21ToXml.GeneralObjectExporter()
unused_mx = GEX.parse(p).decode('utf-8')
# can only recreate problem in the context of two Streams
s = Stream()
partOffsetShift = 1.25
partOffset = 7.5
for unused_x in range(2):
p = Stream()
for pitchName in a:
n = note.Note(pitchName)
n.quarterLength = 1.5
p.append(n)
p.offset = partOffset
s.insert(p)
partOffset += partOffsetShift
#s.show()
unused_mx = GEX.parse(p).decode('utf-8')
def testMusicXMLGenerationViaPropertyB(self):
'''Test output tests above just by calling the musicxml attribute
'''
n = note.Note()
n.quarterLength = 3
a = Stream()
a.repeatInsert(n, list(range(0,120,3)))
#a.show() # default time signature used
a.insert( 0, meter.TimeSignature("5/4") )
a.insert(10, meter.TimeSignature("2/4") )
a.insert( 3, meter.TimeSignature("3/16") )
a.insert(20, meter.TimeSignature("9/8") )
a.insert(40, meter.TimeSignature("10/4") )
GEX = m21ToXml.GeneralObjectExporter()
unused_mx = GEX.parse(a).decode('utf-8')
def testMusicXMLGenerationViaPropertyC(self):
'''Test output tests above just by calling the musicxml attribute
'''
a = ['c', 'g#', 'd-', 'f#', 'e', 'f' ] * 4
s = Stream()
partOffsetShift = 1.25
partOffset = 0
for unused_part in range(6):
p = Stream()
for pitchName in a:
n = note.Note(pitchName)
n.quarterLength = 1.5
p.append(n)
p.offset = partOffset
s.insert(p)
partOffset += partOffsetShift
#s.show()
GEX = m21ToXml.GeneralObjectExporter()
unused_mx = GEX.parse(p).decode('utf-8')
def testContextNestedA(self):
'''Testing getting clefs from higher-level streams
'''
s1 = Stream()
s2 = Stream()
n1 = note.Note()
c1 = clef.AltoClef()
s1.append(n1) # this is the model of a stream with a single part
s2.append(s1)
s2.insert(0, c1)
# from the lower level stream, we should be able to get to the
# higher level clef
post = s1.getContextByClass(clef.Clef)
self.assertEqual(isinstance(post, clef.AltoClef), True)
# we can also use getClefs to get this from s1 or s2
post = s1.getClefs()[0]
self.assertEqual(isinstance(post, clef.AltoClef), True)
post = s2.getClefs()[0]
self.assertEqual(isinstance(post, clef.AltoClef), True)
#environLocal.printDebug(['sites.get() of s1', s1.sites.get()])
# attempting to move the substream into a new stream
s3 = Stream()
s3.insert(s1) # insert at same offset as s2
# we cannot get the alto clef from s3; this makes sense
post = s3.getClefs()[0]
self.assertEqual(isinstance(post, clef.TrebleClef), True)
# s1 has both streams as sites
self.assertEqual(s1.hasSite(s3), True)
self.assertEqual(s1.hasSite(s2), True)
# but if we search s1, shuold not it find an alto clef?
post = s1.getClefs()
#environLocal.printDebug(['should not be treble clef:', post])
self.assertEqual(isinstance(post[0], clef.AltoClef), True)
# this all works fine
sMeasures = s2.makeMeasures(finalBarline='regular')
self.assertEqual(len(sMeasures), 1)
self.assertEqual(len(sMeasures.getElementsByClass('Measure')), 1) # one measure
self.assertEqual(len(sMeasures[0]), 3)
# first is clef
self.assertEqual(isinstance(sMeasures[0][0], clef.AltoClef), True)
# second is sig
self.assertEqual(str(sMeasures[0][1]), '<music21.meter.TimeSignature 4/4>')
#environLocal.printDebug(['here', sMeasures[0][2]])
#sMeasures.show('t')
# the third element is a Note; we get it from flattening during
# makeMeasures
self.assertEqual(isinstance(sMeasures[0][2], note.Note), True)
# this shows the proper outpt withs the proper clef.
#sMeasures.show()
# we cannot get clefs from sMeasures b/c that is the topmost
# stream container; there are no clefs here, only at a lower leve
post = sMeasures.getElementsByClass(clef.Clef)
self.assertEqual(len(post), 0)
def testContextNestedB(self):
'''Testing getting clefs from higher-level streams
'''
sInner = Stream()
sInner.id = 'innerStream'
n1 = note.Note()
sInner.append(n1) # this is the model of a stream with a single part
sOuter = Stream()
sOuter.id = 'outerStream'
sOuter.append(sInner)
c1 = clef.AltoClef()
sOuter.insert(0, c1)
# this works fine
post = sInner.getContextByClass(clef.Clef)
self.assertEqual(isinstance(post, clef.AltoClef), True)
# if we flatten sInner, we cannot still get the clef: why?
sInnerFlat = sInner.flat
sInnerFlat.id = 'sInnerFlat'
# # but it has sOuter has a context
# self.assertEqual(sInnerFlat.hasSite(sOuter), True)
# #environLocal.printDebug(['sites.get() of sInnerFlat', sInnerFlat.sites.get()])
# #environLocal.printDebug(['sites.siteDict of sInnerFlat', sInnerFlat.sites.siteDict])
#
#
# self.assertEqual(sInnerFlat.hasSite(sOuter), True)
#
# # this returns the proper dictionary entry
# #environLocal.printDebug(
# # ['sInnerFlat.sites.siteDict[id(sInner)', sInnerFlat.sites.siteDict[id(sOuter)]])
# # we can extract out the same reference
# unused_sOuterOut = sInnerFlat.sites.getById(id(sOuter))
# this works
post = sInnerFlat.getContextByClass(clef.Clef)
self.assertEqual(isinstance(post, clef.AltoClef), True, "post %r is not an AltoClef" % post)
# 2014 April -- timeSpans version -- not needed...
## this will only work if the callerFirst is manually set to sInnerFlat
## otherwise, this interprets the DefinedContext object as the first
## caller
#post = sInnerFlat.sites.getObjByClass(clef.Clef, callerFirst=sInnerFlat)
#self.assertEqual(isinstance(post, clef.AltoClef), True)
def testContextNestedC(self):
'''Testing getting clefs from higher-level streams
'''
from music21 import sites
s1 = Stream()
s1.id = 's1'
s2 = Stream()
s2.id = 's2'
n1 = note.Note()
c1 = clef.AltoClef()
s1.append(n1) # this is the model of a stream with a single part
s2.append(s1)
s2.insert(0, c1)
# this works fine
post = s1.getContextByClass(clef.Clef)
self.assertEqual(isinstance(post, clef.AltoClef), True)
# this is a key tool of the serial reverse search
post = s2.getElementAtOrBefore(0, [clef.Clef])
self.assertEqual(isinstance(post, clef.AltoClef), True)
# this is a key tool of the serial reverse search
post = s2.flat.getElementAtOrBefore(0, [clef.Clef])
self.assertEqual(isinstance(post, clef.AltoClef), True)
# s1 is in s2; but s1.flat is not in s2! -- not true if isFlat is true
self.assertEqual(s2.elementOffset(s1), 0.0)
self.assertRaises(sites.SitesException, s2.elementOffset, s1.flat)
# this did not work before; the clef is in s2; its not in a context of s2
post = s2.getContextByClass(clef.Clef)
self.assertEqual(isinstance(post, clef.AltoClef), True)
# we can find the clef from the flat version of 21
post = s1.flat.getContextByClass(clef.Clef)
self.assertEqual(isinstance(post, clef.AltoClef), True)
def testContextNestedD(self):
'''
Testing getting clefs from higher-level streams
'''
n1 = note.Note()
n2 = note.Note()
s1 = Part()
s1.id = 's1'
s2 = Part()
s2.id = 's2'
sOuter = Score()
sOuter.id = 'sOuter'
s1.append(n1)
s2.append(n2)
sOuter.insert(0, s1)
sOuter.insert(0, s2)
self.assertEqual(s1.activeSite, sOuter)
ac = clef.AltoClef()
ac.priority = -1
sOuter.insert(0, ac)
# both output parts have alto clefs
# get clef form higher level stream; only option
self.assertEqual(s1.activeSite, sOuter)
post = s1.getClefs()[0]
self.assertTrue(isinstance(post, clef.AltoClef))
self.assertEqual(s1.activeSite, sOuter)
post = s2.getClefs()[0]
self.assertTrue(isinstance(post, clef.AltoClef))
# now we in sort a clef in s2; s2 will get this clef first
s2.insert(0, clef.TenorClef())
# only second part should have tenor clef
post = s2.getClefs()[0]
self.assertTrue(isinstance(post, clef.TenorClef))
# but stream s1 should get the alto clef still
#print list(s1.contextSites())
post = s1.getContextByClass('Clef')
#print post
self.assertTrue(isinstance(post, clef.AltoClef))
# s2 flat gets the tenor clef; it was inserted in it
post = s2.flat.getClefs()[0]
self.assertTrue(isinstance(post, clef.TenorClef))
# a copy copies the clef; so we still get the same clef
s2FlatCopy = copy.deepcopy(s2.flat)
post = s2FlatCopy.getClefs()[0]
self.assertTrue(isinstance(post, clef.TenorClef))
# s1 flat will get the alto clef; it still has a pathway
post = s1.flat.getClefs()[0]
self.assertTrue(isinstance(post, clef.AltoClef))
# once we create a deepcopy of s1, it is no longer connected to
# its parent if we purge orphans and it is not in sOuter
s1Flat = s1.flat
s1Flat.id = 's1Flat'
s1FlatCopy = copy.deepcopy(s1Flat)
s1FlatCopy.id = 's1FlatCopy'
self.assertEqual(len(s1FlatCopy.getClefs(returnDefault=False)), 1)
post = s1FlatCopy.getClefs(returnDefault=False)[0]
self.assertTrue(isinstance(post, clef.AltoClef), "post %r is not an AltoClef" % post)
post = s1Flat.getClefs()[0]
self.assertTrue(isinstance(post, clef.AltoClef), post)
#environLocal.printDebug(['s1.activeSite', s1.activeSite])
self.assertTrue(sOuter in s1.sites.getSites())
s1Measures = s1.makeMeasures()
#print s1Measures[0].clef
# this used to be True, but I think it's better as False now...
#self.assertTrue(isinstance(s1Measures[0].clef, clef.AltoClef), s1Measures[0].clef)
self.assertTrue(isinstance(s1Measures[0].clef, clef.TrebleClef), s1Measures[0].clef)
s2Measures = s2.makeMeasures()
self.assertTrue(isinstance(s2Measures[0].clef, clef.TenorClef))
# try making a deep copy of s3
s3copy = copy.deepcopy(sOuter)
#s1Measures = s3copy[0].makeMeasures()
# TODO: had to comment out with changes to getElementAtOrBefore
# problem is sort order of found elements at or before
# if two elements of the same class are found at the same offset
# they cannot be distinguished
# perhaps need to return more than one;
# or getElementAtOrBefore needs to return a list
s2Measures = s3copy.getElementsByClass('Stream')[1].makeMeasures()
self.assertEqual(isinstance(s2Measures[0].clef, clef.TenorClef), True)
#s2Measures.show() # this shows the proper clef
#TODO: this still returns tenor clef for both parts
# need to examine
# now we in sert a clef in s2; s2 will get this clef first
s1.insert(0, clef.BassClef())
post = s1.getClefs()[0]
self.assertEqual(isinstance(post, clef.BassClef), True)
#s3.show()
def testMakeRestsA(self):
a = ['c', 'g#', 'd-', 'f#', 'e', 'f' ] * 4
partOffsetShift = 1.25
partOffset = 2 # start at non zero
for unused_part in range(6):
p = Stream()
for pitchName in a:
n = note.Note(pitchName)
n.quarterLength = 1.5
p.append(n)
p.offset = partOffset
self.assertEqual(p.lowestOffset, 0)
p.transferOffsetToElements()
self.assertEqual(p.lowestOffset, partOffset)
p.makeRests()
#environLocal.printDebug(['first element', p[0], p[0].duration])
# by default, initial rest should be made
sub = p.getElementsByClass(note.Rest)
self.assertEqual(len(sub), 1)
self.assertEqual(sub.duration.quarterLength, partOffset)
# first element should have offset of first dur
self.assertEqual(p[1].offset, sub.duration.quarterLength)
partOffset += partOffsetShift
def testMakeRestsB(self):
# test makeRests fillGaps
from music21 import stream
s = stream.Stream()
m1 = stream.Measure()
m1.timeSignature = meter.TimeSignature('4/4')
m1.insert(2, note.Note())
m2 = stream.Measure()
m2.insert(1, note.Note())
self.assertEqual(m2.isSorted, True)
s.insert(0, m1)
s.insert(4, m2)
# must connect Measures to Streams before filling gaps
m1.makeRests(fillGaps=True, timeRangeFromBarDuration=True)
m2.makeRests(fillGaps=True, timeRangeFromBarDuration=True)
self.assertEqual(m2.isSorted, True)
#m2.sort()
match = str([(n.offset, n, n.duration) for n in m2.flat.notesAndRests])
self.assertEqual(match, '[(0.0, <music21.note.Rest rest>, <music21.duration.Duration 1.0>), (1.0, <music21.note.Note C>, <music21.duration.Duration 1.0>), (2.0, <music21.note.Rest rest>, <music21.duration.Duration 2.0>)]')
match = str([(n.offset, n, n.duration) for n in m2.flat])
self.assertEqual(match, '[(0.0, <music21.note.Rest rest>, <music21.duration.Duration 1.0>), (1.0, <music21.note.Note C>, <music21.duration.Duration 1.0>), (2.0, <music21.note.Rest rest>, <music21.duration.Duration 2.0>)]')
#m2.show()
match = str([n for n in s.flat.notesAndRests])
self.assertEqual(match, '[<music21.note.Rest rest>, <music21.note.Note C>, <music21.note.Rest rest>, <music21.note.Rest rest>, <music21.note.Note C>, <music21.note.Rest rest>]')
match = str([(n, n.duration) for n in s.flat.notesAndRests])
self.assertEqual(match, '[(<music21.note.Rest rest>, <music21.duration.Duration 2.0>), (<music21.note.Note C>, <music21.duration.Duration 1.0>), (<music21.note.Rest rest>, <music21.duration.Duration 1.0>), (<music21.note.Rest rest>, <music21.duration.Duration 1.0>), (<music21.note.Note C>, <music21.duration.Duration 1.0>), (<music21.note.Rest rest>, <music21.duration.Duration 2.0>)]')
GEX = m21ToXml.GeneralObjectExporter()
unused_mx = GEX.parse(s).decode('utf-8')
#s.show('text')
#s.show()
def testMakeMeasuresInPlace(self):
sScr = Stream()
sScr.insert(0, clef.TrebleClef())
sScr.insert(0, meter.TimeSignature('3/4'))
sScr.append(note.Note('C4', quarterLength = 3.0))
sScr.append(note.Note('D4', quarterLength = 3.0))
sScr.makeMeasures(inPlace = True)
self.assertEqual(len(sScr.getElementsByClass('Measure')), 2)
self.assertEqual(sScr.measure(1).notes[0].name, 'C')
self.assertEqual(sScr.measure(2).notes[0].name, 'D')
def testMakeMeasuresMeterStream(self):
'''Testing making measures of various sizes with a supplied single element meter stream. This illustrates an approach to partitioning elements by various sized windows.
'''
from music21 import corpus
sBach = corpus.parse('bach/bwv324.xml')
meterStream = Stream()
meterStream.insert(0, meter.TimeSignature('2/4'))
# need to call make ties to allocate notes
sPartitioned = sBach.flat.makeMeasures(meterStream).makeTies(
inPlace=False)
self.assertEqual(len(sPartitioned.getElementsByClass('Measure')), 21)
meterStream = Stream()
meterStream.insert(0, meter.TimeSignature('1/4'))
# need to call make ties to allocate notes
sPartitioned = sBach.flat.makeMeasures(meterStream).makeTies(
inPlace=False)
self.assertEqual(len(sPartitioned.getElementsByClass('Measure')), 42)
meterStream = Stream()
meterStream.insert(0, meter.TimeSignature('3/4'))
# need to call make ties to allocate notes
sPartitioned = sBach.flat.makeMeasures(meterStream).makeTies(
inPlace=False)
self.assertEqual(len(sPartitioned.getElementsByClass('Measure')), 14)
meterStream = Stream()
meterStream.insert(0, meter.TimeSignature('12/4'))
# need to call make ties to allocate notes
sPartitioned = sBach.flat.makeMeasures(meterStream).makeTies(
inPlace=False)
self.assertEqual(len(sPartitioned.getElementsByClass('Measure')), 4)
meterStream = Stream()
meterStream.insert(0, meter.TimeSignature('48/4'))
# need to call make ties to allocate notes
sPartitioned = sBach.flat.makeMeasures(meterStream).makeTies(
inPlace=False)
self.assertEqual(len(sPartitioned.getElementsByClass('Measure')), 1)
def testMakeMeasuresWithBarlines(self):
'''Test makeMeasures with optional barline parameters.
'''
from music21 import stream
s = stream.Stream()
s.repeatAppend(note.Note(quarterLength=.5), 20)
s.insert(0, meter.TimeSignature('5/8'))
# default is no normal barlines, but a final barline
barred1 = s.makeMeasures()
self.assertEqual(
str(barred1.getElementsByClass('Measure')[-1].rightBarline),
'<music21.bar.Barline style=final>')
#barred1.show()
barred2 = s.makeMeasures(innerBarline='dashed', finalBarline='double')
match = [str(m.rightBarline) for m in
barred2.getElementsByClass('Measure')]
self.assertEqual(match, ['<music21.bar.Barline style=dashed>', '<music21.bar.Barline style=dashed>', '<music21.bar.Barline style=dashed>', '<music21.bar.Barline style=double>'])
#barred2.show()
# try using bar objects
bar1 = bar.Barline('none')
bar2 = bar.Barline('short')
barred3 = s.makeMeasures(innerBarline=bar1, finalBarline=bar2)
#barred3.show()
match = [str(m.rightBarline) for m in
barred3.getElementsByClass('Measure')]
self.assertEqual(match, ['<music21.bar.Barline style=none>', '<music21.bar.Barline style=none>', '<music21.bar.Barline style=none>', '<music21.bar.Barline style=short>'])
# setting to None will not set a barline object at all
barred4 = s.makeMeasures(innerBarline=None, finalBarline=None)
match = [str(m.rightBarline) for m in
barred4.getElementsByClass('Measure')]
self.assertEqual(match, ['None', 'None', 'None', 'None'] )
def testRemove(self):
'''Test removing components from a Stream.
'''
s = Stream()
n1 = note.Note('g')
n2 = note.Note('g#')
n3 = note.Note('a')
s.insert(0, n1)
s.insert(10, n3)
s.insert(5, n2)
self.assertEqual(len(s), 3)
self.assertEqual(n1.activeSite, s)
s.remove(n1)
self.assertEqual(len(s), 2)
# activeSite is Now sent to None
self.assertEqual(n1.activeSite, None)
def testRemoveByClass(self):
from music21 import stream
s = stream.Stream()
s.repeatAppend(clef.BassClef(), 2)
s.repeatAppend(note.Note(), 2)
s.repeatAppend(clef.TrebleClef(), 2)
self.assertEqual(len(s), 6)
s.removeByClass('BassClef')
self.assertEqual(len(s), 4)
self.assertEqual(len(s.notes), 2)
s.removeByClass(clef.Clef)
self.assertEqual(len(s), 2)
self.assertEqual(len(s.notes), 2)
s.removeByClass(['Music21Object'])
self.assertEqual(len(s.notes), 0)
def testReplace(self):
'''Test replacing components from a Stream.
'''
s = Stream()
n1 = note.Note('g')
n2 = note.Note('g#')
n3 = note.Note('a')
n4 = note.Note('c')
s.insert(0, n1)
s.insert(5, n2)
self.assertEqual(len(s), 2)
s.replace(n1, n3)
self.assertEqual([s[0], s[1]], [n3, n2])
s.replace(n2, n4)
self.assertEqual([s[0], s[1]], [n3, n4])
s.replace(n4, n1)
self.assertEqual([s[0], s[1]], [n3, n1])
def testReplaceA1(self):
from music21 import corpus
sBach = corpus.parse('bach/bwv324.xml')
partSoprano = sBach.parts[0]
c1 = partSoprano.flat.getElementsByClass('Clef')[0]
self.assertEqual(isinstance(c1, clef.TrebleClef), True)
# now, replace with a different clef
c2 = clef.AltoClef()
partSoprano.flat.replace(c1, c2, allDerived=True)
# all views of the Stream have been updated
cTest = sBach.parts[0].flat.getElementsByClass('Clef')[0]
self.assertEqual(isinstance(cTest, clef.AltoClef), True)
def testReplaceB(self):
n1 = note.Note('g')
n2 = note.Note('g#')
s0 = Stream()
s1 = copy.deepcopy(s0)
s2 = copy.deepcopy(s1)
s3 = Stream()
s0.insert( 0, n1)
s1.insert(10, n1)
s2.insert(20, n1)
s3.insert(30, n1)
s1.replace(n1, n2, allDerived=True)
# s1 is derived from s0 so n1 is replaced
self.assertIs(s0[0], n2)
self.assertEqual(s0[0].getOffsetBySite(s0), 0)
# s1 was the replacement stream, so definitely n1 becomes n2
self.assertIs(s1[0], n2)
self.assertEqual(s1[0].getOffsetBySite(s1), 10)
# s2 was derived from s0, not vice versa, so n1 is left alone.
self.assertIs(s2[0], n1)
self.assertEqual(s2[0].getOffsetBySite(s2), 20)
# s3 is completely out of any derivation chain, so left alone
self.assertIs(s3[0], n1)
self.assertEqual(s3[0].getOffsetBySite(s3), 30)
def testReplaceDerivated(self):
from music21 import corpus
qj = corpus.parse('ciconia/quod_jactatur').parts[0].measures(1,2)
qj.id = 'measureExcerpt'
qjflat = qj.flat
dc = list(qjflat.derivation.chain())
self.assertIs(dc[0], qj)
k1 = qjflat.getElementsByClass(key.KeySignature)[0]
self.assertEqual(k1.sharps, -1)
k3flats = key.KeySignature(-3)
# put k1 in an unrelated site:
mUnrelated = Measure()
mUnrelated.insert(0, k1)
# here's the big one
qjflat.replace(k1, k3flats, allDerived=True)
kWhich = qjflat.getElementsByClass(key.KeySignature)[0]
self.assertIs(kWhich, k3flats)
self.assertEqual(kWhich.sharps, -3)
kWhich2 = qj.recurse().getElementsByClass(key.KeySignature)[0]
self.assertIs(kWhich2, k3flats)
self.assertEqual(kWhich2.sharps, -3)
# check that unrelated is untouched
self.assertIs(mUnrelated[0], k1)
def testDoubleStreamPlacement(self):
n1 = note.Note()
s1 = Stream()
s1.insert(n1)
#environLocal.printDebug(['n1.siteIds after one insertion', n1, n1.getSites(), n1.sites.getSiteIds()])
s2 = Stream()
s2.insert(s1)
#environLocal.printDebug(['n1.siteIds after container insertion', n1, n1.getSites(), n1.sites.getSiteIds()])
s2Flat = s2.flat
#environLocal.printDebug(['s1', s1, id(s1)])
#environLocal.printDebug(['s2', s2, id(s2)])
#environLocal.printDebug(['s2flat', s2Flat, id(s2Flat)])
#environLocal.printDebug(['n1.siteIds', n1, n1.getSites(), n1.sites.getSiteIds()])
# previously, one of these raised an error
unused_s3 = copy.deepcopy(s2Flat)
s3 = copy.deepcopy(s2.flat)
unused_s3Measures = s3.makeMeasures()
def testBestTimeSignature(self):
'''Get a time signature based on components in a measure.
'''
m = Measure()
for ql in [2,3,2]:
n = note.Note()
n.quarterLength = ql
m.append(n)
ts = m.bestTimeSignature()
self.assertEqual(ts.numerator, 7)
self.assertEqual(ts.denominator, 4)
m = Measure()
for ql in [1.5, 1.5]:
n = note.Note()
n.quarterLength = ql
m.append(n)
ts = m.bestTimeSignature()
self.assertEqual(ts.numerator, 6)
self.assertEqual(ts.denominator, 8)
m = Measure()
for ql in [.25, 1.5]:
n = note.Note()
n.quarterLength = ql
m.append(n)
ts = m.bestTimeSignature()
self.assertEqual(ts.numerator, 7)
self.assertEqual(ts.denominator, 16)
def testGetKeySignatures(self):
'''Searching contexts for key signatures
'''
s = Stream()
ks1 = key.KeySignature(3)
ks2 = key.KeySignature(-3)
s.append(ks1)
s.append(ks2)
post = s.getKeySignatures()
self.assertEqual(post[0], ks1)
self.assertEqual(post[1], ks2)
# try creating a key signature in one of two measures
# try to get last active key signature
ks1 = key.KeySignature(3)
m1 = Measure()
n1 = note.Note()
n1.quarterLength = 4
m1.append(n1)
m1.keySignature = ks1 # assign to measure via property
m2 = Measure()
n2 = note.Note()
n2.quarterLength = 4
m2.append(n2)
s = Stream()
s.append(m1)
s.append(m2)
# can get from measure
post = m1.getKeySignatures()
self.assertEqual(post[0], ks1)
# we can get from the Stream by flattening
post = s.flat.getKeySignatures()
self.assertEqual(post[0], ks1)
# we can get the key signature in m1 from m2
post = m2.getKeySignatures()
self.assertEqual(post[0], ks1)
def testGetKeySignaturesThreeMeasures(self):
'''Searching contexts for key signatures
'''
ks1 = key.KeySignature(3)
ks3 = key.KeySignature(5)
m1 = Measure()
n1 = note.Note()
n1.quarterLength = 4
m1.append(n1)
m1.keySignature = ks1 # assign to measure via property
m2 = Measure()
n2 = note.Note()
n2.quarterLength = 4
m2.append(n2)
m3 = Measure()
n3 = note.Note()
n3.quarterLength = 4
m3.append(n3)
m3.keySignature = ks3 # assign to measure via property
s = Stream()
s.append(m1)
s.append(m2)
s.append(m3)
# can get from measure
post = m1.getKeySignatures()
self.assertEqual(post[0], ks1)
# we can get the key signature in m1 from m2
post = m2.getKeySignatures()
self.assertEqual(post[0], ks1)
# if we search m3, we get the key signature in m3
post = m3.getKeySignatures()
self.assertEqual(post[0], ks3)
def testMakeAccidentalsA(self):
'''Test accidental display setting
'''
s = Stream()
n1 = note.Note('a#')
n2 = note.Note('a4')
r1 = note.Rest()
c1 = chord.Chord(['a#2', 'a4', 'a5'])
n3 = note.Note('a4')
s.append(n1)
s.append(r1)
s.append(n2)
s.append(c1)
s.append(n3)
s.makeAccidentals()
self.assertEqual(n2.pitch.accidental.displayStatus, True)
# both a's in the chord now have naturals but are hidden
self.assertEqual(c1.pitches[1].accidental, None)
#self.assertEqual(c1.pitches[2].accidental.displayStatus, True)
# not getting a natural here because of chord tones
#self.assertEqual(n3.pitch.accidental.displayStatus, True)
#self.assertEqual(n3.pitch.accidental, None)
#s.show()
s = Stream()
n1 = note.Note('a#')
n2 = note.Note('a')
r1 = note.Rest()
c1 = chord.Chord(['a#2', 'a4', 'a5'])
s.append(n1)
s.append(r1)
s.append(n2)
s.append(c1)
s.makeAccidentals(cautionaryPitchClass=False)
# a's in the chord do not have naturals
self.assertEqual(c1.pitches[1].accidental, None)
self.assertEqual(c1.pitches[2].accidental, None)
def testMakeAccidentalsB(self):
from music21 import corpus
s = corpus.parse('monteverdi/madrigal.5.3.rntxt')
m34 = s.parts[0].getElementsByClass('Measure')[33]
c = m34.getElementsByClass('Chord')
# assuming not showing accidental b/c of key
self.assertEqual(str(c[1].pitches), '(<music21.pitch.Pitch B-4>, <music21.pitch.Pitch D5>, <music21.pitch.Pitch F5>)')
# because of key
self.assertEqual(str(c[1].pitches[0].accidental.displayStatus), 'False')
s = corpus.parse('monteverdi/madrigal.5.4.rntxt')
m74 = s.parts[0].getElementsByClass('Measure')[73]
c = m74.getElementsByClass('Chord')
# has correct pitches but natural not showing on C
self.assertEqual(str(c[0].pitches), '(<music21.pitch.Pitch C5>, <music21.pitch.Pitch E5>, <music21.pitch.Pitch G5>)')
self.assertEqual(str(c[0].pitches[0].accidental), 'None')
def testMakeAccidentalsC(self):
from music21 import stream
# this isolates the case where a new measure uses an accidental
# that was used in a past measure
m1 = stream.Measure()
m1.repeatAppend(note.Note('f4'), 2)
m1.repeatAppend(note.Note('f#4'), 2)
m2 = stream.Measure()
m2.repeatAppend(note.Note('f#4'), 4)
ex = stream.Part()
ex.append([m1, m2])
# without applying make accidentals, all sharps are shown
self.assertEqual(len(ex.flat.notes), 8)
self.assertEqual(len(ex.flat.notes[2:]), 6)
#ex.flat.notes[2:].show()
# all sharps, unknown display status (displayStatus == None)
acc = [str(n.pitch.accidental) for n in ex.flat.notes[2:]]
self.assertEqual(acc, ['<accidental sharp>', '<accidental sharp>', '<accidental sharp>', '<accidental sharp>', '<accidental sharp>', '<accidental sharp>'])
display = [n.pitch.accidental.displayStatus for n in ex.flat.notes[2:]]
self.assertEqual(display, [None, None, None, None, None, None])
# call make accidentals
# cautionaryNotImmediateRepeat=True is default
# cautionaryPitchClass=True is default
ex.makeAccidentals(inPlace=True)
display = [n.pitch.accidental.displayStatus for n in ex.flat.notes[2:]]
# need the second true b/c it is the start of a new measure
self.assertEqual(display, [True, False, True, False, False, False])
p = stream.Part()
p.insert(0, meter.TimeSignature('2/4'))
tuplet1 = note.Note("E-4", quarterLength=1.0/3.0)
tuplet2 = note.Note("F#4", quarterLength=2.0/3.0)
p.repeatAppend(tuplet1, 10)
p.repeatAppend(tuplet2, 7)
ex = p.makeNotation()
#ex.show('text')
display = [n.pitch.accidental.displayStatus for n in ex.flat.notes]
self.assertEqual(display, [1,0,0, 0,0,0, 1,0,0, 0, 1, 1, 0, 0, 1, 0, 0])
def testMakeAccidentalsD(self):
from music21 import stream
p1 = stream.Part()
m1 = stream.Measure()
m1.append(meter.TimeSignature('4/4'))
m1.append(note.Note('C#', type='half'))
m1.append(note.Note('C#', type='half'))
m1.rightBarline = 'final'
p1.append(m1)
p1.makeNotation(inPlace=True)
match = [p.accidental.displayStatus for p in p1.pitches]
self.assertEqual(match, [True, False])
m = p1.measure(1)
self.assertEqual(str(m.rightBarline), '<music21.bar.Barline style=final>')
def testMakeAccidentalsWithKeysInMeasures(self):
scale1 = ['c4', 'd4', 'e4', 'f4', 'g4', 'a4', 'b4', 'c5']
scale2 = ['c', 'd', 'e-', 'f', 'g', 'a-', 'b-', 'c5']
scale3 = ['c#', 'd#', 'e#', 'f#', 'g#', 'a#', 'b#', 'c#5']
s = Stream()
for scale in [scale1, scale2, scale3]:
for ks in [key.KeySignature(0), key.KeySignature(2),
key.KeySignature(4), key.KeySignature(7), key.KeySignature(-1),
key.KeySignature(-3)]:
m = Measure()
m.timeSignature = meter.TimeSignature('4/4')
m.keySignature = ks
for p in scale*2:
n = note.Note(p)
n.quarterLength = .25
n.addLyric(n.pitch.name)
m.append(n)
m.makeBeams(inPlace=True)
m.makeAccidentals(inPlace=True)
s.append(m)
# TODO: add tests
#s.show()
def testMakeAccidentalsTies(self):
'''
tests to make sure that Accidental display status is correct after a tie.
'''
from music21 import converter
bm = converter.parse(
"tinynotation: 4/4 c#'2 b-2~ b-8 c#'8~ c#'8 b-8 c#'8 b-8~ b-8~ b-8",
makeNotation=False)
bm.makeNotation(inPlace = True, cautionaryNotImmediateRepeat = False)
allNotes = bm.flat.notes
# 0C# 1B-~ | 2B- 3C#~ 4C# 6B- 7C# 8B-~ 9B-~ 10B-
ds = [True, True, False, True, False, True, False, False, False, False]
for i in range(len(allNotes)):
self.assertEqual(allNotes[i].pitch.accidental.displayStatus,
ds[i],
"%d failed, %s != %s" %
(i, allNotes[i].pitch.accidental.displayStatus, ds[i]))
# add another B-flat just after the tied one...
bm = converter.parse(
"tinynotation: 4/4 c#'2 b-2~ b-8 b-8 c#'8~ c#'8 b-8 c#'8 b-8~ b-8~ b-8",
makeNotation=False)
bm.makeNotation(inPlace = True, cautionaryNotImmediateRepeat = False)
allNotes = bm.flat.notes
# 0C# 1B-~ | 2B- 3B- 4C#~ 5C# 6B- 7C# 8B-~ 9B-~ | 10B-
ds = [True, True, False, True, True, False, False, False, False, False, False]
for i in range(len(allNotes)):
self.assertEqual(allNotes[i].pitch.accidental.displayStatus,
ds[i],
"%d failed, %s != %s" %
(i, allNotes[i].pitch.accidental.displayStatus, ds[i]))
def testMakeAccidentalsOctaveKS(self):
s = Stream()
k = key.KeySignature(-3)
s.append(k)
s.append(note.Note('B-2'))
s.append(note.Note('B-1'))
for n in s.notes:
self.assertEqual(n.pitch.accidental.displayStatus, None)
s.makeAccidentals(inPlace = True)
for n in s.notes:
self.assertEqual(n.pitch.accidental.displayStatus, False)
def testScaleOffsetsBasic(self):
'''
'''
from music21 import stream
def procCompare(s, scalar, match):
oListSrc = [e.offset for e in s]
oListSrc.sort()
sNew = s.scaleOffsets(scalar, inPlace=False)
oListPost = [e.offset for e in sNew]
oListPost.sort()
#environLocal.printDebug(['scaleOffsets', oListSrc, '\npost scaled by:', scalar, oListPost])
self.assertEqual(oListPost[:len(match)], match)
# test equally spaced half notes starting at zero
n = note.Note()
n.quarterLength = 2
s = stream.Stream()
s.repeatAppend(n, 10)
# provide start of resulting values
# half not spacing becomes whole note spacing
procCompare(s, 2, [0.0, 4.0, 8.0])
procCompare(s, 4, [0.0, 8.0, 16.0, 24.0])
procCompare(s, 3, [0.0, 6.0, 12.0, 18.0])
procCompare(s, .5, [0.0, 1.0, 2.0, 3.0])
procCompare(s, .25, [0.0, 0.5, 1.0, 1.5])
# test equally spaced quarter notes start at non-zero
n = note.Note()
n.quarterLength = 1
s = stream.Stream()
s.repeatInsert(n, list(range(100, 110)))
procCompare(s, 1, [100, 101, 102, 103])
procCompare(s, 2, [100, 102, 104, 106])
procCompare(s, 4, [100, 104, 108, 112])
procCompare(s, 1.5, [100, 101.5, 103.0, 104.5])
procCompare(s, .5, [100, 100.5, 101.0, 101.5])
procCompare(s, .25, [100, 100.25, 100.5, 100.75])
# test non equally spaced notes starting at zero
s = stream.Stream()
n1 = note.Note()
n1.quarterLength = 1
s.repeatInsert(n, list(range(0, 30, 3)))
n2 = note.Note()
n2.quarterLength = 2
s.repeatInsert(n, list(range(1, 30, 3)))
# procCompare will sort offsets; this test non sorted operation
procCompare(s, 1, [0.0, 1.0, 3.0, 4.0, 6.0, 7.0])
procCompare(s, .5, [0.0, 0.5, 1.5, 2.0, 3.0, 3.5])
procCompare(s, 2, [0.0, 2.0, 6.0, 8.0, 12.0, 14.0])
# test non equally spaced notes starting at non-zero
s = stream.Stream()
n1 = note.Note()
n1.quarterLength = 1
s.repeatInsert(n, list(range(100, 130, 3)))
n2 = note.Note()
n2.quarterLength = 2
s.repeatInsert(n, list(range(101, 130, 3)))
# procCompare will sort offsets; this test non sorted operation
procCompare(s, 1, [100.0, 101.0, 103.0, 104.0, 106.0, 107.0])
procCompare(s, .5, [100.0, 100.5, 101.5, 102.0, 103.0, 103.5])
procCompare(s, 2, [100.0, 102.0, 106.0, 108.0, 112.0, 114.0])
procCompare(s, 6, [100.0, 106.0, 118.0, 124.0, 136.0, 142.0])
def testScaleOffsetsBasicInPlaceA(self):
'''
'''
from music21 import stream
def procCompare(s, scalar, match):
# test equally spaced half notes starting at zero
n = note.Note()
n.quarterLength = 2
s = stream.Stream()
s.repeatAppend(n, 10)
oListSrc = [e.offset for e in s]
oListSrc.sort()
s.scaleOffsets(scalar, inPlace=True)
oListPost = [e.offset for e in s]
oListPost.sort()
#environLocal.printDebug(['scaleOffsets', oListSrc, '\npost scaled by:', scalar, oListPost])
self.assertEqual(oListPost[:len(match)], match)
s = None # placeholder
# provide start of resulting values
# half not spacing becomes whole note spacing
procCompare(s, 2, [0.0, 4.0, 8.0])
procCompare(s, 4, [0.0, 8.0, 16.0, 24.0])
procCompare(s, 3, [0.0, 6.0, 12.0, 18.0])
procCompare(s, .5, [0.0, 1.0, 2.0, 3.0])
procCompare(s, .25, [0.0, 0.5, 1.0, 1.5])
def testScaleOffsetsBasicInPlaceB(self):
'''
'''
from music21 import stream
def procCompare(s, scalar, match):
# test equally spaced quarter notes start at non-zero
n = note.Note()
n.quarterLength = 1
s = stream.Stream()
s.repeatInsert(n, list(range(100, 110)))
oListSrc = [e.offset for e in s]
oListSrc.sort()
s.scaleOffsets(scalar, inPlace=True)
oListPost = [e.offset for e in s]
oListPost.sort()
#environLocal.printDebug(['scaleOffsets', oListSrc, '\npost scaled by:', scalar, oListPost])
self.assertEqual(oListPost[:len(match)], match)
s = None # placeholder
procCompare(s, 1, [100, 101, 102, 103])
procCompare(s, 2, [100, 102, 104, 106])
procCompare(s, 4, [100, 104, 108, 112])
procCompare(s, 1.5, [100, 101.5, 103.0, 104.5])
procCompare(s, .5, [100, 100.5, 101.0, 101.5])
procCompare(s, .25, [100, 100.25, 100.5, 100.75])
def testScaleOffsetsBasicInPlaceC(self):
'''
'''
from music21 import stream
def procCompare(s, scalar, match):
# test non equally spaced notes starting at zero
s = stream.Stream()
n1 = note.Note()
n1.quarterLength = 1
s.repeatInsert(n1, list(range(0, 30, 3)))
n2 = note.Note()
n2.quarterLength = 2
s.repeatInsert(n2, list(range(1, 30, 3)))
oListSrc = [e.offset for e in s]
oListSrc.sort()
s.scaleOffsets(scalar, inPlace=True)
oListPost = [e.offset for e in s]
oListPost.sort()
#environLocal.printDebug(['scaleOffsets', oListSrc, '\npost scaled by:', scalar, oListPost])
self.assertEqual(oListPost[:len(match)], match)
# procCompare will sort offsets; this test non sorted operation
s = None # placeholder
procCompare(s, 1, [0.0, 1.0, 3.0, 4.0, 6.0, 7.0])
procCompare(s, .5, [0.0, 0.5, 1.5, 2.0, 3.0, 3.5])
procCompare(s, 2, [0.0, 2.0, 6.0, 8.0, 12.0, 14.0])
def testScaleOffsetsBasicInPlaceD(self):
'''
'''
from music21 import stream
def procCompare(s, scalar, match):
# test non equally spaced notes starting at non-zero
s = stream.Stream()
n1 = note.Note()
n1.quarterLength = 1
s.repeatInsert(n1, list(range(100, 130, 3)))
n2 = note.Note()
n2.quarterLength = 2
s.repeatInsert(n2, list(range(101, 130, 3)))
oListSrc = [e.offset for e in s]
oListSrc.sort()
s.scaleOffsets(scalar, inPlace=True)
oListPost = [e.offset for e in s]
oListPost.sort()
#environLocal.printDebug(['scaleOffsets', oListSrc, '\npost scaled by:', scalar, oListPost])
self.assertEqual(oListPost[:len(match)], match)
# procCompare will sort offsets; this test non sorted operation
s = None # placeholder
procCompare(s, 1, [100.0, 101.0, 103.0, 104.0, 106.0, 107.0])
procCompare(s, .5, [100.0, 100.5, 101.5, 102.0, 103.0, 103.5])
procCompare(s, 2, [100.0, 102.0, 106.0, 108.0, 112.0, 114.0])
procCompare(s, 6, [100.0, 106.0, 118.0, 124.0, 136.0, 142.0])
def testScaleOffsetsNested(self):
'''
'''
from music21 import stream
def offsetMap(s): # lists of offsets, with lists of lists
post = []
for e in s:
sub = []
sub.append(e.offset)
#if hasattr(e, 'elements'):
if e.isStream:
sub.append(offsetMap(e))
post.append(sub)
return post
def procCompare(s, scalar, anchorZeroRecurse, match):
oListSrc = offsetMap(s)
oListSrc.sort()
sNew = s.scaleOffsets(scalar, anchorZeroRecurse=anchorZeroRecurse,
inPlace=False)
oListPost = offsetMap(sNew)
oListPost.sort()
#environLocal.printDebug(['scaleOffsets', oListSrc, '\npost scaled by:', scalar, oListPost])
self.assertEqual(oListPost[:len(match)], match)
# test equally spaced half notes starting at zero
n1 = note.Note()
n1.quarterLength = 2
s1 = stream.Stream()
s1.repeatAppend(n1, 4)
n2 = note.Note()
n2.quarterLength = .5
s2 = stream.Stream()
s2.repeatAppend(n2, 4)
s1.append(s2)
# offset map gives us a nested list presentation of all offsets
# usefulfor testing
self.assertEquals(offsetMap(s1),
[[0.0], [2.0], [4.0], [6.0], [8.0, [[0.0], [0.5], [1.0], [1.5]]]])
# provide start of resulting values
# half not spacing becomes whole note spacing
procCompare(s1, 2, 'lowest',
[[0.0], [4.0], [8.0], [12.0], [16.0, [[0.0], [1.0], [2.0], [3.0]]]]
)
procCompare(s1, 4, 'lowest',
[[0.0], [8.0], [16.0], [24.0], [32.0, [[0.0], [2.0], [4.0], [6.0]]]]
)
procCompare(s1, .25, 'lowest',
[[0.0], [0.5], [1.0], [1.5], [2.0, [[0.0], [0.125], [0.25], [0.375]]]]
)
# test unequally spaced notes starting at non-zero
n1 = note.Note()
n1.quarterLength = 1
s1 = stream.Stream()
s1.repeatInsert(n1, [10,14,15,17])
n2 = note.Note()
n2.quarterLength = .5
s2 = stream.Stream()
s2.repeatInsert(n2, [40,40.5,41,41.5])
s1.append(s2)
s1.append(copy.deepcopy(s2))
s1.append(copy.deepcopy(s2))
# note that, with these nested streams,
# the first value of an embeded stream stays in the same
# position relative to that stream.
# it might be necessary, in this case, to scale the start
# time of the first elemen
# that is, it should have no shift
# provide anchorZeroRecurse value
self.assertEquals(offsetMap(s1),
[[10.0], [14.0], [15.0], [17.0],
[18.0, [[40.0], [40.5], [41.0], [41.5]]],
[60.0, [[40.0], [40.5], [41.0], [41.5]]],
[102.0, [[40.0], [40.5], [41.0], [41.5]]]]
)
procCompare(s1, 2, 'lowest',
[[10.0], [18.0], [20.0], [24.0],
[26.0, [[40.0], [41.0], [42.0], [43.0]]],
[110.0, [[40.0], [41.0], [42.0], [43.0]]],
[194.0, [[40.0], [41.0], [42.0], [43.0]]]]
)
# if anchorZeroRecurse is None, embedded stream that do not
# start at zero are scaled proportionally
procCompare(s1, 2, None,
[[10.0], [18.0], [20.0], [24.0],
[26.0, [[80.0], [81.0], [82.0], [83.0]]],
[110.0, [[80.0], [81.0], [82.0], [83.0]]],
[194.0, [[80.0], [81.0], [82.0], [83.0]]]]
)
procCompare(s1, .25, 'lowest',
[[10.0], [11.0], [11.25], [11.75],
[12.0, [[40.0], [40.125], [40.25], [40.375]]],
[22.5, [[40.0], [40.125], [40.25], [40.375]]],
[33.0, [[40.0], [40.125], [40.25], [40.375]]]]
)
# if anchorZeroRecurse is None, embedded stream that do not
# start at zero are scaled proportionally
procCompare(s1, .25, None,
[[10.0], [11.0], [11.25], [11.75],
[12.0, [[10.0], [10.125], [10.25], [10.375]]],
[22.5, [[10.0], [10.125], [10.25], [10.375]]],
[33.0, [[10.0], [10.125], [10.25], [10.375]]]]
)
def testScaleDurationsBasic(self):
'''Scale some durations, independent of offsets.
'''
def procCompare(s, scalar, match):
#oListSrc = [e.quarterLength for e in s]
sNew = s.scaleDurations(scalar, inPlace=False)
oListPost = [e.quarterLength for e in sNew]
self.assertEqual(oListPost[:len(match)], match)
n1 = note.Note()
n1.quarterLength = .5
s1 = Stream()
s1.repeatInsert(n1, list(range(6)))
# test inPlace v/ not inPlace
sNew = s1.scaleDurations(2, inPlace=False)
self.assertEqual([e.duration.quarterLength for e in s1], [0.5, 0.5, 0.5, 0.5, 0.5, 0.5])
self.assertEqual([e.duration.quarterLength for e in sNew], [1.0, 1.0, 1.0, 1.0, 1.0, 1.0])
# basic test
procCompare(s1, .5, [0.25, 0.25, 0.25])
procCompare(s1, 3, [1.5, 1.5, 1.5])
# a sequence of Durations of different values
s1 = Stream()
for ql in [.5, 1.5, 2, 3, .25, .25, .5]:
n = note.Note('g')
n.quarterLength = ql
s1.append(n)
procCompare(s1, .5, [0.25, 0.75, 1.0, 1.5, 0.125, 0.125, 0.25] )
procCompare(s1, .25, [0.125, 0.375, 0.5, 0.75, 0.0625, 0.0625, 0.125] )
procCompare(s1, 4, [2.0, 6.0, 8, 12, 1.0, 1.0, 2.0])
def testAugmentOrDiminishBasic(self):
def procCompare(s, scalar, matchOffset, matchDuration):
#oListSrc = [e.offset for e in s]
#qlListSrc = [e.quarterLength for e in s]
sNew = s.augmentOrDiminish(scalar, inPlace=False)
oListPost = [e.offset for e in sNew]
qlListPost = [e.quarterLength for e in sNew]
self.assertEqual(oListPost[:len(matchOffset)], matchOffset)
self.assertEqual(qlListPost[:len(matchDuration)], matchDuration)
# test that the last offset is the highest offset
self.assertEqual(matchOffset[-1], sNew.highestOffset)
self.assertEqual(matchOffset[-1]+matchDuration[-1],
sNew.highestTime)
# test making measures on this
unused_post = sNew.makeMeasures()
#sNew.show()
# a sequence of Durations of different values
s1 = Stream()
for ql in [.5, 1.5, 2, 3, .25, .25, .5]:
n = note.Note('g')
n.quarterLength = ql
s1.append(n)
# provide offsets, then durations
procCompare(s1, .5,
[0.0, 0.25, 1.0, 2.0, 3.5, 3.625, 3.75] ,
[0.25, 0.75, 1.0, 1.5, 0.125, 0.125, 0.25] )
procCompare(s1, 1.5,
[0.0, 0.75, 3.0, 6.0, 10.5, 10.875, 11.25] ,
[0.75, 2.25, 3.0, 4.5, 0.375, 0.375, 0.75] )
procCompare(s1, 3,
[0.0, 1.5, 6.0, 12.0, 21.0, 21.75, 22.5] ,
[1.5, 4.5, 6, 9, 0.75, 0.75, 1.5] )
def testAugmentOrDiminishHighestTimes(self):
'''Need to make sure that highest offset and time are properly updated
'''
from music21 import corpus
src = corpus.parse('bach/bwv324.xml')
# get some measures of the soprano; just get the notes
ex = src.parts[0].flat.notesAndRests[0:30]
self.assertEqual(ex.highestOffset, 38.0)
self.assertEqual(ex.highestTime, 42.0)
# try first when doing this not in place
newEx = ex.augmentOrDiminish(2, inPlace=False)
self.assertEqual(newEx.notesAndRests[0].offset, 0.0)
self.assertEqual(newEx.notesAndRests[1].offset, 4.0)
self.assertEqual(newEx.highestOffset, 76.0)
self.assertEqual(newEx.highestTime, 84.0)
# try in place
ex.augmentOrDiminish(2, inPlace=True)
self.assertEqual(ex.notesAndRests[1].getOffsetBySite(ex), 4.0)
self.assertEqual(ex.notesAndRests[1].offset, 4.0)
self.assertEqual(ex.highestOffset, 76.0)
self.assertEqual(ex.highestTime, 84.0)
<|fim▁hole|> '''
from music21 import corpus
# first method: iterating through notes
src = corpus.parse('bach/bwv324.xml')
# get some measures of the soprano; just get the notes
#environLocal.printDebug(['testAugmentOrDiminishCorpus()', 'extracting notes:'])
ex = src.parts[0].flat.notesAndRests[0:30]
# attach a couple of transformations
s = Score()
for scalar in [.5, 1.5, 2, .25]:
#n = note.Note()
part = Part()
#environLocal.printDebug(['testAugmentOrDiminishCorpus()', 'pre augment or diminish', 'ex', ex, 'id(ex)', id(ex)])
for n in ex.augmentOrDiminish(scalar, inPlace=False):
part.append(n)
s.insert(0, part)
GEX = m21ToXml.GeneralObjectExporter()
unused_mx = GEX.parse(s).decode('utf-8')
# second method: getting flattened stream
src = corpus.parse('bach/bwv323.xml')
# get notes from one part
ex = src.parts[0].flat.notesAndRests
s = Score()
for scalar in [1, 2, .5, 1.5]:
part = ex.augmentOrDiminish(scalar, inPlace=False)
s.insert(0, part)
unused_mx = GEX.parse(s).decode('utf-8')
#s.show()
def testMeasureBarDurationProportion(self):
from fractions import Fraction
from music21 import stream
m = stream.Measure()
m.timeSignature = meter.TimeSignature('3/4')
n = note.Note("B--2")
n.quarterLength = 1
m.append(copy.deepcopy(n))
self.assertEqual(m.notes[0].offset, 0)
self.assertEqual(m.barDurationProportion(), Fraction(1, 3), 4)
self.assertEqual(m.barDuration.quarterLength, 3, 4)
# temporarily commented out
# m.shiftElementsAsAnacrusis()
# self.assertEqual(m.notesAndRests[0].hasSite(m), True)
# self.assertEqual(m.notesAndRests[0].offset, 2.0)
# # now the duration is full
# self.assertAlmostEqual(m.barDurationProportion(), 1.0, 4)
# self.assertAlmostEqual(m.highestOffset, 2.0, 4)
m = stream.Measure()
m.timeSignature = meter.TimeSignature('5/4')
n1 = note.Note()
n1.quarterLength = .5
n2 = note.Note()
n2.quarterLength = 1.5
m.append(n1)
m.append(n2)
self.assertEqual(m.barDurationProportion(), Fraction(2, 5), 4)
self.assertEqual(m.barDuration.quarterLength, 5.0)
# m.shiftElementsAsAnacrusis()
# self.assertEqual(m.notesAndRests[0].offset, 3.0)
# self.assertEqual(n1.offset, 3.0)
# self.assertEqual(n2.offset, 3.5)
# self.assertAlmostEqual(m.barDurationProportion(), 1.0, 4)
def testInsertAndShiftBasic(self):
offsets = [0, 2, 4, 6, 8, 10, 12]
n = note.Note()
n.quarterLength = 2
s = Stream()
s.repeatInsert(n, offsets)
# qL, insertOffset, newHighOffset, newHighTime
data = [
(.25, 0, 12.25, 14.25),
(3, 0, 15, 17),
(6.5, 0, 18.5, 20.5),
# shifting at a positing where another element starts
(.25, 4, 12.25, 14.25),
(3, 4, 15, 17),
(6.5, 4, 18.5, 20.5),
# shift the same duration at different insert points
(1, 2, 13, 15),
(2, 2, 14, 16),
# this is overlapping element at 2 by 1, ending at 4
# results in no change in new high values
(1, 3, 12, 14),
# since duration is here 2, extend new starts to 5
(2, 3, 13, 15),
(1, 4, 13, 15),
(2, 4, 14, 16),
# here, we do not shift the element at 4, only event at 6
(2, 4.5, 12.5, 14.5),
# here, we insert the start of an element and can shift it
(2.5, 4, 14.5, 16.5),
]
for qL, insertOffset, newHighOffset, newHighTime in data:
sProc = copy.deepcopy(s)
self.assertEqual(sProc.highestOffset, 12)
self.assertEqual(sProc.highestTime, 14)
nAlter = note.Note()
nAlter.quarterLength = qL
sProc.insertAndShift(insertOffset, nAlter)
self.assertEqual(sProc.highestOffset, newHighOffset)
self.assertEqual(sProc.highestTime, newHighTime)
self.assertEqual(len(sProc), len(s)+1)
# try the same with scrambled elements
sProc = copy.deepcopy(s)
random.shuffle(sProc._elements)
sProc.elementsChanged()
self.assertEqual(sProc.highestOffset, 12)
self.assertEqual(sProc.highestTime, 14)
nAlter = note.Note()
nAlter.quarterLength = qL
sProc.insertAndShift(insertOffset, nAlter)
self.assertEqual(sProc.highestOffset, newHighOffset)
self.assertEqual(sProc.highestTime, newHighTime)
self.assertEqual(len(sProc), len(s)+1)
def testInsertAndShiftNoDuration(self):
offsets = [0, 2, 4, 6, 8, 10, 12]
n = note.Note()
n.quarterLength = 2
s = Stream()
s.repeatInsert(n, offsets)
# qL, insertOffset, newHighOffset, newHighTime
data = [
(0, 12, 14),
(0, 12, 14),
(0, 12, 14),
(4, 12, 14),
(4, 12, 14),
(4, 12, 14),
(2, 12, 14),
(2, 12, 14),
(3, 12, 14),
]
for insertOffset, newHighOffset, newHighTime in data:
sProc = copy.deepcopy(s)
self.assertEqual(sProc.highestOffset, 12)
self.assertEqual(sProc.highestTime, 14)
c = clef.Clef()
sProc.insertAndShift(insertOffset, c)
self.assertEqual(sProc.highestOffset, newHighOffset)
self.assertEqual(sProc.highestTime, newHighTime)
self.assertEqual(len(sProc), len(s)+1)
def testInsertAndShiftMultipleElements(self):
offsets = [0, 2, 4, 6, 8, 10, 12]
n = note.Note()
n.quarterLength = 2
s = Stream()
s.repeatInsert(n, offsets)
# qL, insertOffset, newHighOffset, newHighTime
data = [
(.25, 0, 12.25, 14.25),
(3, 0, 15, 17),
(6.5, 0, 18.5, 20.5),
# shifting at a positing where another element starts
(.25, 4, 12.25, 14.25),
(3, 4, 15, 17),
(6.5, 4, 18.5, 20.5),
# shift the same duration at different insert points
(1, 2, 13, 15),
(2, 2, 14, 16),
# this is overlapping element at 2 by 1, ending at 4
# results in no change in new high values
(1, 3, 12, 14),
# since duration is here 2, extend new starts to 5
(2, 3, 13, 15),
(1, 4, 13, 15),
(2, 4, 14, 16),
# here, we do not shift the element at 4, only event at 6
(2, 4.5, 12.5, 14.5),
# here, we insert the start of an element and can shift it
(2.5, 4, 14.5, 16.5),
]
for qL, insertOffset, newHighOffset, newHighTime in data:
sProc = copy.deepcopy(s)
self.assertEqual(sProc.highestOffset, 12)
self.assertEqual(sProc.highestTime, 14)
# fill with sixteenth notes
nAlter = note.Note()
nAlter.quarterLength = .25
itemList = []
o = insertOffset
while o < insertOffset + qL:
itemList.append(o)
itemList.append(copy.deepcopy(nAlter))
o += .25
#environLocal.printDebug(['itemList', itemList])
sProc.insertAndShift(itemList)
self.assertEqual(sProc.highestOffset, newHighOffset)
self.assertEqual(sProc.highestTime, newHighTime)
self.assertEqual(len(sProc), len(s)+len(itemList) / 2)
def testMetadataOnStream(self):
s = Stream()
n1 = note.Note()
s.append(n1)
s.metadata = metadata.Metadata()
s.metadata.composer = 'Frank the Composer'
s.metadata.title = 'work title' # will get as movement name if not set
#s.metadata.movementName = 'movement name'
GEX = m21ToXml.GeneralObjectExporter()
unused_mx = GEX.parse(s).decode('utf-8')
#s.show()
def testMeasureBarline(self):
m1 = Measure()
m1.timeSignature = meter.TimeSignature('3/4')
self.assertEqual(len(m1), 1)
b1 = bar.Barline('heavy')
# this adds to elements list
m1.leftBarline = b1
self.assertEqual(len(m1), 2)
self.assertEqual(m1[0], b1) # this is on elements
self.assertEqual(m1.rightBarline, None) # this is on elements
b2 = bar.Barline('heavy')
self.assertEqual(m1.barDuration.quarterLength, 3.0)
m1.rightBarline = b2
# now have barline, ts, and barline
self.assertEqual(len(m1), 3)
b3 = bar.Barline('double')
b4 = bar.Barline('heavy')
m1.leftBarline = b3
# length should be the same, as we replaced
self.assertEqual(len(m1), 3)
self.assertEqual(m1.leftBarline, b3)
m1.rightBarline = b4
self.assertEqual(len(m1), 3)
self.assertEqual(m1.rightBarline, b4)
p = Part()
p.append(copy.deepcopy(m1))
p.append(copy.deepcopy(m1))
#p.show()
# add right barline first, w/o a time signature
m2 = Measure()
self.assertEqual(len(m2), 0)
m2.rightBarline = b4
self.assertEqual(len(m2), 1)
self.assertEqual(m2.leftBarline, None) # this is on elements
self.assertEqual(m2.rightBarline, b4) # this is on elements
def testMeasureLayout(self):
# test both system layout and measure width
# Note: Measure.layoutWidth is not currently read by musicxml
from music21 import layout
s = Stream()
for i in range(1,10):
n = note.Note()
m = Measure()
m.append(n)
m.layoutWidth = i*100
if i % 2 == 0:
sl = layout.SystemLayout(isNew=True)
m.insert(0, sl)
s.append(m)
#s.show()
GEX = m21ToXml.GeneralObjectExporter()
unused_mx = GEX.parse(s).decode('utf-8')
def testYieldContainers(self):
from music21 import stream
n1 = note.Note()
n1.id = 'n(1a)'
n2 = note.Note()
n2.id = 'n2(2b)'
n3 = note.Note()
n3.id = 'n3(3b)'
n4 = note.Note()
n4.id = 'n4(3b)'
s1 = stream.Stream()
s1.id = '1a'
s1.append(n1)
s2 = stream.Stream()
s2.id = '2a'
s3 = stream.Stream()
s3.id = '2b'
s3.append(n2)
s4 = stream.Stream()
s4.id = '2c'
s5 = stream.Stream()
s5.id = '3a'
s6 = stream.Stream()
s6.id = '3b'
s6.append(n3)
s6.append(n4)
s7 = stream.Stream()
s7.id = '3c'
s8 = stream.Stream()
s8.id = '3d'
s9 = stream.Stream()
s9.id = '3e'
s10 = stream.Stream()
s10.id = '3f'
#environLocal.printDebug(['s1, s2, s3, s4', s1, s2, s3, s4])
s2.append(s5)
s2.append(s6)
s2.append(s7)
s3.append(s8)
s3.append(s9)
s4.append(s10)
s1.append(s2)
s1.append(s3)
s1.append(s4)
#environLocal.printDebug(['downward:'])
match = []
for x in s1.recurse(streamsOnly=True):
match.append(x.id)
#environLocal.printDebug([x, x.id, 'activeSite', x.activeSite])
self.assertEqual(match, ['1a', '2a', '3a', '3b', '3c', '2b', '3d', '3e', '2c', '3f'])
#environLocal.printDebug(['downward with elements:'])
match = []
for x in s1.recurse(streamsOnly=False):
match.append(x.id)
#environLocal.printDebug([x, x.id, 'activeSite', x.activeSite])
self.assertEqual(match, ['1a', 'n(1a)', '2a', '3a', '3b', 'n3(3b)', 'n4(3b)', '3c', '2b', 'n2(2b)', '3d', '3e', '2c', '3f'])
#environLocal.printDebug(['downward from non-topmost element:'])
match = []
for x in s2.recurse(streamsOnly=False):
match.append(x.id)
#environLocal.printDebug([x, x.id, 'activeSite', x.activeSite])
# test downward
self.assertEqual(match, ['2a', '3a', '3b', 'n3(3b)', 'n4(3b)', '3c'])
#environLocal.printDebug(['upward, with skipDuplicates:'])
match = []
# must provide empty list for memo
for x in s7._yieldReverseUpwardsSearch([], streamsOnly=True, skipDuplicates=True):
match.append(x.id)
#environLocal.printDebug([x, x.id, 'activeSite', x.activeSite])
self.assertEqual(match, ['3c', '2a', '1a', '2b', '2c', '3a', '3b'] )
#environLocal.printDebug(['upward from a single node, with skipDuplicates'])
match = []
for x in s10._yieldReverseUpwardsSearch([], streamsOnly=True):
match.append(x.id)
#environLocal.printDebug([x, x.id, 'activeSite', x.activeSite])
self.assertEqual(match, ['3f', '2c', '1a', '2a', '2b'] )
#environLocal.printDebug(['upward with skipDuplicates=False:'])
match = []
for x in s10._yieldReverseUpwardsSearch([], streamsOnly=True, skipDuplicates=False):
match.append(x.id)
#environLocal.printDebug([x, x.id, 'activeSite', x.activeSite])
self.assertEqual(match, ['3f', '2c', '1a', '2a', '1a', '2b', '1a'] )
#environLocal.printDebug(['upward, with skipDuplicates, streamsOnly=False:'])
match = []
# must provide empty list for memo
for x in s8._yieldReverseUpwardsSearch([], streamsOnly=False,
skipDuplicates=True):
match.append(x.id)
environLocal.printDebug([x, x.id, 'activeSite', x.activeSite])
self.assertEqual(match, ['3d', 'n2(2b)', '2b', 'n(1a)', '1a', '2a', '2c', '3e'] )
#environLocal.printDebug(['upward, with skipDuplicates, streamsOnly=False:'])
match = []
# must provide empty list for memo
for x in s4._yieldReverseUpwardsSearch([], streamsOnly=False,
skipDuplicates=True):
match.append(x.id)
#environLocal.printDebug([x, x.id, 'activeSite', x.activeSite])
# notice that this does not get the nonConatainers for 2b
self.assertEqual(match, ['2c', 'n(1a)', '1a', '2a', '2b'] )
def testMidiEventsBuilt(self):
def procCompare(mf, match):
triples = []
for i in range(0, len(mf.tracks[0].events), 2):
d = mf.tracks[0].events[i] # delta
e = mf.tracks[0].events[i+1] # events
triples.append((d.time, e.type, e.pitch))
# TODO: temporary removed
#self.assertEqual(triples, match)
s = Stream()
n = note.Note('g#3')
n.quarterLength = .5
s.repeatAppend(n, 6)
#post = s.midiTracks # get a lost
post = midiTranslate.streamHierarchyToMidiTracks(s)
self.assertEqual(len(post[0].events), 30)
# must be an even number
self.assertEqual(len(post[0].events) % 2, 0)
mf = midiTranslate.streamToMidiFile(s)
match = [(0, 'SEQUENCE_TRACK_NAME', None), (0, 'PITCH_BEND', None), (0, 'NOTE_ON', 56), (512, 'NOTE_OFF', 56), (0, 'NOTE_ON', 56), (512, 'NOTE_OFF', 56), (0, 'NOTE_ON', 56), (512, 'NOTE_OFF', 56), (0, 'NOTE_ON', 56), (512, 'NOTE_OFF', 56), (0, 'NOTE_ON', 56), (512, 'NOTE_OFF', 56), (0, 'NOTE_ON', 56), (512, 'NOTE_OFF', 56), (0, 'END_OF_TRACK', None)]
procCompare(mf, match)
s = Stream()
n = note.Note('g#3')
n.quarterLength = 1.5
s.repeatAppend(n, 3)
mf = midiTranslate.streamToMidiFile(s)
match = [(0, 'SEQUENCE_TRACK_NAME', None), (0, 'PITCH_BEND', None), (0, 'NOTE_ON', 56), (1536, 'NOTE_OFF', 56), (0, 'NOTE_ON', 56), (1536, 'NOTE_OFF', 56), (0, 'NOTE_ON', 56), (1536, 'NOTE_OFF', 56), (0, 'END_OF_TRACK', None)]
procCompare(mf, match)
# combinations of different pitches and durs
s = Stream()
data = [('c2', .25), ('c#3', .5), ('g#3', 1.5), ('a#2', 1), ('a4', 2)]
for p, d in data:
n = note.Note(p)
n.quarterLength = d
s.append(n)
mf = midiTranslate.streamToMidiFile(s)
match = [(0, 'SEQUENCE_TRACK_NAME', None), (0, 'NOTE_ON', 36), (256, 'NOTE_OFF', 36), (0, 'NOTE_ON', 49), (512, 'NOTE_OFF', 49), (0, 'NOTE_ON', 56), (1536, 'NOTE_OFF', 56), (0, 'NOTE_ON', 46), (1024, 'NOTE_OFF', 46), (0, 'NOTE_ON', 69), (2048, 'NOTE_OFF', 69), (0, 'END_OF_TRACK', None)]
procCompare(mf, match)
# rests, basic
#environLocal.printDebug(['rests'])
s = Stream()
data = [('c2', 1), (None, .5), ('c#3', 1), (None, .5), ('a#2', 1), (None, .5), ('a4', 1)]
for p, d in data:
if p == None:
n = note.Rest()
else:
n = note.Note(p)
n.quarterLength = d
s.append(n)
#s.show('midi')
mf = midiTranslate.streamToMidiFile(s)
match = [(0, 'SEQUENCE_TRACK_NAME', None),
(0, 'NOTE_ON', 36), (1024, 'NOTE_OFF', 36),
(512, 'NOTE_ON', 49), (1024, 'NOTE_OFF', 49),
(512, 'NOTE_ON', 46), (1024, 'NOTE_OFF', 46),
(512, 'NOTE_ON', 69), (1024, 'NOTE_OFF', 69),
(0, 'END_OF_TRACK', None)]
procCompare(mf, match)
#environLocal.printDebug(['rests, varied sizes'])
s = Stream()
data = [('c2', 1), (None, .25), ('c#3', 1), (None, 1.5), ('a#2', 1), (None, 2), ('a4', 1)]
for p, d in data:
if p == None:
n = note.Rest()
else:
n = note.Note(p)
n.quarterLength = d
s.append(n)
#s.show('midi')
mf = midiTranslate.streamToMidiFile(s)
match = [(0, 'SEQUENCE_TRACK_NAME', None),
(0, 'NOTE_ON', 36), (1024, 'NOTE_OFF', 36),
(256, 'NOTE_ON', 49), (1024, 'NOTE_OFF', 49),
(1536, 'NOTE_ON', 46), (1024, 'NOTE_OFF', 46),
(2048, 'NOTE_ON', 69), (1024, 'NOTE_OFF', 69),
(0, 'END_OF_TRACK', None)]
procCompare(mf, match)
#environLocal.printDebug(['rests, multiple in a row'])
s = Stream()
data = [('c2', 1), (None, 1), (None, 1), ('c#3', 1), ('c#3', 1), (None, .5), (None, .5), (None, .5), (None, .5), ('a#2', 1), (None, 2), ('a4', 1)]
for p, d in data:
if p == None:
n = note.Rest()
else:
n = note.Note(p)
n.quarterLength = d
s.append(n)
#s.show('midi')
mf = midiTranslate.streamToMidiFile(s)
match = [(0, 'SEQUENCE_TRACK_NAME', None),
(0, 'NOTE_ON', 36), (1024, 'NOTE_OFF', 36),
(2048, 'NOTE_ON', 49), (1024, 'NOTE_OFF', 49),
(0, 'NOTE_ON', 49), (1024, 'NOTE_OFF', 49),
(2048, 'NOTE_ON', 46), (1024, 'NOTE_OFF', 46),
(2048, 'NOTE_ON', 69), (1024, 'NOTE_OFF', 69),
(0, 'END_OF_TRACK', None)]
procCompare(mf, match)
#environLocal.printDebug(['w/ chords'])
s = Stream()
data = [('c2', 1), (None, 1), (['f3', 'a-4', 'c5'], 1), (None, .5), ('a#2', 1), (None, 2), (['d2', 'a4'], .5), (['d-2', 'a#3', 'g#6'], .5), (None, 1), (['f#3', 'a4', 'c#5'], 4)]
for p, d in data:
if p == None:
n = note.Rest()
elif isinstance(p, list):
n = chord.Chord(p)
else:
n = note.Note(p)
n.quarterLength = d
s.append(n)
#s.show('midi')
mf = midiTranslate.streamToMidiFile(s)
match = [(0, 'SEQUENCE_TRACK_NAME', None), (0, 'NOTE_ON', 36), (1024, 'NOTE_OFF', 36), (1024, 'NOTE_ON', 53), (0, 'NOTE_ON', 68), (0, 'NOTE_ON', 72), (1024, 'NOTE_OFF', 53), (0, 'NOTE_OFF', 68), (0, 'NOTE_OFF', 72), (512, 'NOTE_ON', 46), (1024, 'NOTE_OFF', 46), (2048, 'NOTE_ON', 38), (0, 'NOTE_ON', 69), (512, 'NOTE_OFF', 38), (0, 'NOTE_OFF', 69), (0, 'NOTE_ON', 37), (0, 'NOTE_ON', 58), (0, 'NOTE_ON', 92), (512, 'NOTE_OFF', 37), (0, 'NOTE_OFF', 58), (0, 'NOTE_OFF', 92), (1024, 'NOTE_ON', 54), (0, 'NOTE_ON', 69), (0, 'NOTE_ON', 73), (4096, 'NOTE_OFF', 54), (0, 'NOTE_OFF', 69), (0, 'NOTE_OFF', 73), (0, 'END_OF_TRACK', None)]
procCompare(mf, match)
def testMidiEventsImported(self):
from music21 import corpus
def procCompare(mf, match):
triples = []
for i in range(0, len(mf.tracks[0].events), 2):
d = mf.tracks[0].events[i] # delta
e = mf.tracks[0].events[i+1] # events
triples.append((d.time, e.type, e.pitch))
self.assertEqual(triples, match)
s = corpus.parse('bach/bwv66.6')
part = s.parts[0].measures(6,9) # last meausres
#part.show('musicxml')
#part.show('midi')
mf = midiTranslate.streamToMidiFile(part)
match = [(0, 'SEQUENCE_TRACK_NAME', None), (0, 'PROGRAM_CHANGE', None), (0, 'PITCH_BEND', None), (0, 'PROGRAM_CHANGE', None), (0, 'KEY_SIGNATURE', None), (0, 'TIME_SIGNATURE', None), (0, 'NOTE_ON', 69), (1024, 'NOTE_OFF', 69), (0, 'NOTE_ON', 71), (1024, 'NOTE_OFF', 71), (0, 'NOTE_ON', 73), (1024, 'NOTE_OFF', 73), (0, 'NOTE_ON', 69), (1024, 'NOTE_OFF', 69), (0, 'NOTE_ON', 68), (1024, 'NOTE_OFF', 68), (0, 'NOTE_ON', 66), (1024, 'NOTE_OFF', 66), (0, 'NOTE_ON', 68), (2048, 'NOTE_OFF', 68), (0, 'NOTE_ON', 66), (2048, 'NOTE_OFF', 66), (0, 'NOTE_ON', 66), (1024, 'NOTE_OFF', 66), (0, 'NOTE_ON', 66), (2048, 'NOTE_OFF', 66), (0, 'NOTE_ON', 66), (512, 'NOTE_OFF', 66), (0, 'NOTE_ON', 65), (512, 'NOTE_OFF', 65), (0, 'NOTE_ON', 66), (1024, 'NOTE_OFF', 66), (0, 'END_OF_TRACK', None)]
procCompare(mf, match)
def testFindGaps(self):
s = Stream()
n = note.Note()
s.repeatInsert(n, [0, 1.5, 2.5, 4, 8])
post = s.findGaps()
test = [(e.offset, e.offset+e.duration.quarterLength) for e in post]
match = [(1.0, 1.5), (3.5, 4.0), (5.0, 8.0)]
self.assertEqual(test, match)
self.assertEqual(len(s), 5)
s.makeRests(fillGaps=True)
self.assertEqual(len(s), 8)
self.assertEqual(len(s.getElementsByClass(note.Rest)), 3)
def testQuantize(self):
def procCompare(srcOffset, srcDur, dstOffset, dstDur, divList):
s = Stream()
for i in range(len(srcDur)):
n = note.Note()
n.quarterLength = srcDur[i]
s.insert(srcOffset[i], n)
s.quantize(divList, processOffsets=True, processDurations=True, inPlace=True)
targetOffset = [e.offset for e in s]
targetDur = [e.duration.quarterLength for e in s]
self.assertEqual(targetOffset, dstOffset)
self.assertEqual(targetDur, dstDur)
#environLocal.printDebug(['quantization results:', targetOffset, targetDur])
from fractions import Fraction as F
procCompare([0.01, .24, .57, .78], [0.25, 0.25, 0.25, 0.25],
[0.0, .25, .5, .75], [0.25, 0.25, 0.25, 0.25],
[4]) # snap to .25
procCompare([0.01, .24, .52, .78], [0.25, 0.25, 0.25, 0.25],
[0.0, .25, .5, .75], [0.25, 0.25, 0.25, 0.25],
[8]) # snap to .125
procCompare([0.01, .345, .597, 1.02, 1.22],
[0.31, 0.32, 0.33, 0.25, 0.25],
[0.0, F('1/3'), F('2/3'), 1.0, 1.25],
[F('1/3'), F('1/3'), F('1/3'), 0.25, 0.25],
[4, 3]) # snap to .125 and .3333
procCompare([0.01, .345, .687, 0.99, 1.28],
[0.31, 0.32, 0.33, 0.22, 0.21],
[0.0, F('1/3'), F('2/3'), 1.0, 1.25],
[F('1/3'), F('1/3'), F('1/3'), 0.25, 0.25],
[8, 3]) # snap to .125 and .3333
procCompare([0.03, .335, .677, 1.02, 1.28],
[0.32, 0.35, 0.33, 0.22, 0.21],
[0.0, F('1/3'), F('2/3'), 1.0, 1.25],
[F('1/3'), F('1/3'), F('1/3'), 0.25, 0.25],
[8, 6]) # snap to .125 and .1666666
def testAnalyze(self):
from music21 import corpus
s = corpus.parse('bach/bwv66.6')
sub = [s.parts[0], s.parts[1], s.measures(4,5),
s.parts[2].measures(4,5)]
matchAmbitus = [interval.Interval(12),
interval.Interval(15),
interval.Interval(26),
interval.Interval(10)]
for i in range(len(sub)):
sTest = sub[i]
post = sTest.analyze('ambitus')
self.assertEqual(str(post), str(matchAmbitus[i]))
# match values for different analysis strings
for idStr in ['range', 'ambitus', 'span']:
for i in range(len(sub)):
sTest = sub[i]
post = sTest.analyze(idStr)
self.assertEqual(str(post), str(matchAmbitus[i]))
# only match first two values
matchKrumhansl = [(pitch.Pitch('F#'), 'minor'),
(pitch.Pitch('C#'), 'minor'),
(pitch.Pitch('E'), 'major') ,
(pitch.Pitch('E'), 'major') ]
for i in range(len(sub)):
sTest = sub[i]
post = sTest.analyze('KrumhanslSchmuckler')
# returns three values; match 2
self.assertEqual(post.tonic.name, matchKrumhansl[i][0].name)
self.assertEqual(post.mode, matchKrumhansl[i][1])
# match values under different strings provided to analyze
for idStr in ['krumhansl']:
for i in range(len(sub)):
sTest = sub[i]
post = sTest.analyze(idStr)
# returns three values; match 2
self.assertEqual(post.tonic.name, matchKrumhansl[i][0].name)
self.assertEqual(post.mode, matchKrumhansl[i][1])
matchArden = [(pitch.Pitch('F#'), 'minor'),
(pitch.Pitch('C#'), 'minor'),
(pitch.Pitch('F#'), 'minor') ,
(pitch.Pitch('E'), 'major') ]
for idStr in ['arden']:
for i in range(len(sub)):
sTest = sub[i]
post = sTest.analyze(idStr)
# returns three values; match 2
self.assertEqual(post.tonic.name, matchArden[i][0].name)
self.assertEqual(post.mode, matchArden[i][1])
def testMakeTupletBracketsA(self):
'''Creating brackets
'''
from music21.stream import makeNotation
def collectType(s):
post = []
for e in s:
if e.duration.tuplets:
post.append(e.duration.tuplets[0].type)
else:
post.append(None)
return post
def collectBracket(s):
post = []
for e in s:
if e.duration.tuplets:
post.append(e.duration.tuplets[0].bracket)
else:
post.append(None)
return post
# case of incomplete, single tuplet ending the Stream
# remove bracket
s = Stream()
qlList = [1, 2, .5, 1/6.]
for ql in qlList:
n = note.Note()
n.quarterLength = ql
s.append(n)
makeNotation.makeTupletBrackets(s, inPlace = True)
self.assertEqual(collectType(s), [None, None, None, 'startStop'])
self.assertEqual(collectBracket(s), [None, None, None, False])
#s.show()
def testMakeTupletBracketsB(self):
'''Creating brackets
'''
from music21.stream import makeNotation
def collectType(s):
post = []
for e in s:
if e.duration.tuplets:
post.append(e.duration.tuplets[0].type)
else:
post.append(None)
return post
def collectBracket(s):
post = []
for e in s:
if e.duration.tuplets:
post.append(e.duration.tuplets[0].bracket)
else:
post.append(None)
return post
s = Stream()
qlList = [1, 1/3., 1/3., 1/3., 1, 1]
for ql in qlList:
n = note.Note()
n.quarterLength = ql
s.append(n)
makeNotation.makeTupletBrackets(s, inPlace = True)
self.assertEqual(collectType(s), [None, 'start', None, 'stop', None, None])
#s.show()
s = Stream()
qlList = [1, 1/6., 1/6., 1/6., 1/6., 1/6., 1/6., 1, 1]
for ql in qlList:
n = note.Note()
n.quarterLength = ql
s.append(n)
makeNotation.makeTupletBrackets(s, inPlace = True)
# this is the correct type settings but this displays by dividing
# into two brackets
self.assertEqual(collectType(s), [None, 'start', None, 'stop', 'start', None, 'stop', None, None] )
#s.show()
# case of tuplet ending the Stream
s = Stream()
qlList = [1, 2, .5, 1/6., 1/6., 1/6., ]
for ql in qlList:
n = note.Note()
n.quarterLength = ql
s.append(n)
makeNotation.makeTupletBrackets(s, inPlace = True)
self.assertEqual(collectType(s), [None, None, None, 'start', None, 'stop'] )
#s.show()
# case of incomplete, single tuplets in the middle of a Strem
s = Stream()
qlList = [1, 1/3., 1, 1/3., 1, 1/3.]
for ql in qlList:
n = note.Note()
n.quarterLength = ql
s.append(n)
makeNotation.makeTupletBrackets(s, inPlace = True)
self.assertEqual(collectType(s), [None, 'startStop', None, 'startStop', None, 'startStop'])
self.assertEqual(collectBracket(s), [None, False, None, False, None, False])
#s.show()
# diverse groups that sum to a whole
s = Stream()
qlList = [1, 1/3., 2/3., 2/3., 1/6., 1/6., 1]
for ql in qlList:
n = note.Note()
n.quarterLength = ql
s.append(n)
makeNotation.makeTupletBrackets(s, inPlace = True)
self.assertEqual(collectType(s), [None, 'start', 'stop','start', None, 'stop', None])
#s.show()
# diverse groups that sum to a whole
s = Stream()
qlList = [1, 1/3., 2/3., 1, 1/6., 1/3., 1/3., 1/6. ]
for ql in qlList:
n = note.Note()
n.quarterLength = ql
s.append(n)
makeNotation.makeTupletBrackets(s, inPlace = True)
self.assertEqual(collectType(s), [None, 'start', 'stop', None, 'start', 'stop', 'start', 'stop'] )
self.assertEqual(collectBracket(s), [None, True, True, None, True, True, True, True])
#s.show()
# quintuplets
s = Stream()
qlList = [1, 1/5., 1/5., 1/10., 1/10., 1/5., 1/5., 2. ]
for ql in qlList:
n = note.Note()
n.quarterLength = ql
s.append(n)
makeNotation.makeTupletBrackets(s, inPlace = True)
self.assertEqual(collectType(s), [None, 'start', None, None, None, None, 'stop', None] )
self.assertEqual(collectBracket(s), [None, True, True, True, True, True, True, None] )
#s.show()
def testMakeNotationA(self):
'''This is a test of many make procedures
'''
def collectTupletType(s):
post = []
for e in s:
if e.duration.tuplets:
post.append(e.duration.tuplets[0].type)
else:
post.append(None)
return post
def collectTupletBracket(s):
post = []
for e in s:
if e.duration.tuplets:
post.append(e.duration.tuplets[0].bracket)
else:
post.append(None)
return post
# s = Stream()
# qlList = [1, 1/3., 1/3., 1/3., 1, 1, 1/3., 1/3., 1/3., 1, 1]
# for ql in qlList:
# n = note.Note()
# n.quarterLength = ql
# s.append(n)
# postMake = s.makeNotation()
# self.assertEqual(collectTupletType(postMake.flat.notesAndRests), [None, 'start', None, 'stop', None, None, 'start', None, 'stop', None, None])
# #s.show()
s = Stream()
qlList = [1/3.,]
for ql in qlList:
n = note.Note()
n.quarterLength = ql
s.append(n)
postMake = s.makeNotation()
self.assertEqual(collectTupletType(postMake.flat.notes), ['startStop'])
self.assertEqual(collectTupletBracket(postMake.flat.notes), [False])
#s.show()
def testMakeNotationB(self):
'''Testing voices making routines within make notation
'''
from music21 import stream
s = stream.Stream()
s.insert(0, note.Note('C4', quarterLength=8))
s.repeatInsert(note.Note('b-4', quarterLength=.5), [x*.5 for x in range(0,16)])
s.repeatInsert(note.Note('f#5', quarterLength=2), [0, 2, 4, 6])
sPost = s.makeNotation()
#sPost.show()
# make sure original is not changed
self.assertEqual(len(s.voices), 0)
self.assertEqual(len(s.notes), 21)
# we have generated measures, beams, and voices
self.assertEqual(len(sPost.getElementsByClass('Measure')), 2)
self.assertEqual(len(sPost.getElementsByClass('Measure')[0].voices), 3)
self.assertEqual(len(sPost.getElementsByClass('Measure')[1].voices), 3)
# check beaming
for m in sPost.getElementsByClass('Measure'):
for n in m.voices[1].notes: # middle voice has beams
self.assertEqual(len(n.beams) > 0, True)
def testMakeNotationC(self):
'''Test creating diverse, overlapping durations and notes
'''
# TODO: the output of this is missing a tie to the last dotted half
from music21 import stream
s = stream.Stream()
for duration in [.5, 1.5, 3]:
for offset in [0, 1.5, 4, 6]:
# create a midi pitch value from duration
s.insert(offset, note.Note(50+(duration*2)+(offset*2),
quarterLength=duration))
#s.show()
sPost = s.makeNotation()
self.assertEqual(len(sPost.getElementsByClass('Measure')), 3)
self.assertEqual(len(sPost.getElementsByClass('Measure')[0].voices), 4)
self.assertEqual(len(sPost.getElementsByClass('Measure')[1].voices), 4)
def testMakeNotationScoreA(self):
'''Test makeNotation on Score objects
'''
from music21 import stream
s = stream.Score()
p1 = stream.Stream()
p2 = stream.Stream()
for p in [p1, p2]:
p.repeatAppend(note.Note(), 12)
s.insert(0, p)
# this is true as the sub-stream contain notes
self.assertEqual(s.hasPartLikeStreams(), True)
self.assertEqual(s.getElementsByClass('Stream')[0].hasMeasures(), False)
self.assertEqual(s.getElementsByClass('Stream')[1].hasMeasures(), False)
post = s.makeNotation(inPlace=False)
self.assertEqual(post.hasPartLikeStreams(), True)
# three measures are made by default
self.assertEqual(len(post.getElementsByClass(
'Stream')[0].getElementsByClass('Measure')), 3)
self.assertEqual(len(post.getElementsByClass(
'Stream')[1].getElementsByClass('Measure')), 3)
self.assertEqual(len(post.flat.getElementsByClass('TimeSignature')), 2)
self.assertEqual(len(post.flat.getElementsByClass('Clef')), 2)
def testMakeNotationScoreB(self):
'''Test makeNotation on Score objects
'''
from music21 import stream
s = stream.Score()
p1 = stream.Stream()
p2 = stream.Stream()
for p in [p1, p2]:
p.repeatAppend(note.Note(), 12)
s.insert(0, p)
# this is true as the sub-stream contain notes
self.assertEqual(s.hasPartLikeStreams(), True)
self.assertEqual(s.getElementsByClass('Stream')[0].hasMeasures(), False)
self.assertEqual(s.getElementsByClass('Stream')[1].hasMeasures(), False)
# supply a meter stream
post = s.makeNotation(inPlace=False, meterStream=stream.Stream(
[meter.TimeSignature('3/4')]))
self.assertEqual(post.hasPartLikeStreams(), True)
# four measures are made due to passed-in time signature
self.assertEqual(len(post.getElementsByClass(
'Stream')[0].getElementsByClass('Measure')), 4)
self.assertEqual(len(post.getElementsByClass(
'Stream')[1].getElementsByClass('Measure')), 4)
self.assertEqual(len(post.flat.getElementsByClass('TimeSignature')), 2)
self.assertEqual(len(post.flat.getElementsByClass('Clef')), 2)
def testMakeNotationScoreC(self):
'''Test makeNotation on Score objects
'''
from music21 import stream
s = stream.Score()
p1 = stream.Stream()
p2 = stream.Stream()
for p in [p1, p2]:
p.repeatAppend(note.Note(), 12)
s.insert(0, p)
# create measures in the first part
s.getElementsByClass('Stream')[0].makeNotation(inPlace=True,
meterStream=stream.Stream([meter.TimeSignature('3/4')]))
self.assertEqual(s.getElementsByClass('Stream')[0].hasMeasures(), True)
self.assertEqual(s.getElementsByClass('Stream')[1].hasMeasures(), False)
post = s.makeNotation(inPlace=False)
self.assertEqual(len(post.getElementsByClass(
'Stream')[0].getElementsByClass('Measure')), 4)
self.assertEqual(len(post.getElementsByClass(
'Stream')[1].getElementsByClass('Measure')), 3)
self.assertEqual(len(post.flat.getElementsByClass('TimeSignature')), 2)
self.assertEqual(len(post.flat.getElementsByClass('Clef')), 2)
def testMakeTies(self):
from music21 import corpus
def collectAccidentalDisplayStatus(s):
post = []
for e in s.flat.notesAndRests:
if e.pitch.accidental != None:
post.append((e.pitch.name, e.pitch.accidental.displayStatus))
else: # mark as not having an accidental
post.append('x')
return post
s = corpus.parse('bach/bwv66.6')
# this has accidentals in measures 2 and 6
sSub = s.parts[3].measures(2,6)
#sSub.show()
# only notes that deviate from key signature are True
self.assertEqual(collectAccidentalDisplayStatus(sSub), ['x', (u'C#', False), 'x', 'x', (u'E#', True), (u'F#', False), 'x', (u'C#', False), (u'F#', False), (u'F#', False), (u'G#', False), (u'F#', False), (u'G#', False), 'x', 'x', 'x', (u'C#', False), (u'F#', False), (u'G#', False), 'x', 'x', 'x', 'x', (u'E#', True), (u'F#', False)] )
# this removes key signature
sSub = sSub.flat.notesAndRests
self.assertEqual(len(sSub), 25)
sSub.insert(0, meter.TimeSignature('3/8'))
sSub.augmentOrDiminish(2, inPlace=True)
# explicitly call make measures and make ties
mStream = sSub.makeMeasures(finalBarline=None)
mStream.makeTies(inPlace=True)
self.assertEqual(len(mStream.flat), 45)
#mStream.show()
# this as expected: the only True accidental display status is those
# that were in the orignal. in Finale display, however, sharps are
# displayed when the should not be.
self.assertEqual(collectAccidentalDisplayStatus(mStream), ['x', (u'C#', False), (u'C#', False), 'x', 'x', 'x', 'x', (u'E#', True), (u'E#', False), (u'F#', False), 'x', (u'C#', False), (u'C#', False), (u'F#', False), (u'F#', False), (u'F#', False), (u'F#', False), (u'G#', False), (u'G#', False), (u'F#', False), (u'G#', False), 'x', 'x', 'x', 'x', (u'C#', False), (u'C#', False), (u'F#', False), (u'F#', False), (u'G#', False), (u'G#', False), 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', (u'E#', True), (u'E#', False), (u'F#', False), (u'F#', False)]
)
# transposing should reset all transposed accidentals
mStream.flat.transpose('p5', inPlace=True)
#mStream.show()
# after transposition all accidentals are reset
# note: last d# is not showing in Finale, but this seems to be a
# finale error, as the musicxml is the same in all D# cases
self.assertEqual(collectAccidentalDisplayStatus(mStream), ['x', ('G#', None), ('G#', None), 'x', 'x', 'x', 'x', ('B#', None), ('B#', None), ('C#', None), ('F#', None), ('G#', None), ('G#', None), ('C#', None), ('C#', None), ('C#', None), ('C#', None), ('D#', None), ('D#', None), ('C#', None), ('D#', None), 'x', 'x', ('F#', None), ('F#', None), ('G#', None), ('G#', None), ('C#', None), ('C#', None), ('D#', None), ('D#', None), 'x', 'x', 'x', 'x', 'x', 'x', ('F#', None), ('F#', None), ('B#', None), ('B#', None), ('C#', None), ('C#', None)]
)
def testMeasuresAndMakeMeasures(self):
from music21 import converter
s = converter.parse('tinynotation: 2/8 g8 e f g e f g a')
sSub = s.measures(3,3)
self.assertEqual(str(sSub.pitches), "[<music21.pitch.Pitch E4>, <music21.pitch.Pitch F4>]")
#sSub.show()
def testSortAndAutoSort(self):
s = Stream()
s.autoSort = False
n1 = note.Note('A')
n2 = note.Note('B')
s.insert(100, n2) # add 'b' first
s.insert(0, n1) # now n1 has a higher index than n2
self.assertEqual([x.name for x in s], ['B', 'A'])
# try getting sorted
sSorted = s.sorted
# original unchanged
self.assertEqual([x.name for x in s], ['B', 'A'])
# new is chnaged
self.assertEqual([x.name for x in sSorted], ['A', 'B'])
# sort in place
s.sort()
self.assertEqual([x.name for x in s], ['A', 'B'])
# test getElements sorting through .notesAndRests w/ autoSort
s = Stream()
s.autoSort = True
n1 = note.Note('A')
n2 = note.Note('B')
s.insert(100, n2) # add 'b' first
s.insert(0, n1) # now n1 (A) has a higher index than n2 (B)
# if we get .notesAndRests, we are getting elements by class, and thus getting
# sorted version
self.assertEqual([x.name for x in s.notesAndRests], ['A', 'B'])
# test getElements sorting through .notesAndRests w/o autoSort
s = Stream()
s.autoSort = False
n1 = note.Note('a')
n2 = note.Note('b')
s.insert(100, n2) # add 'b' first
s.insert(0, n1) # now n1 (A) has a higher index than n2 (B)
self.assertEqual([x.name for x in s.notesAndRests], ['B', 'A'])
# test __getitem__ calls w/ autoSort
s = Stream()
s.autoSort = False
n1 = note.Note('A')
n2 = note.Note('B')
s.insert(100, n2) # add 'b' first
s.insert(0, n1) # now n1 (A) has a higher index than n2 (B)
self.assertEqual(s[0].name, 'B')
self.assertEqual(s[1].name, 'A')
# test __getitem__ calls w autoSort
s = Stream()
s.autoSort = True
n1 = note.Note('a')
n2 = note.Note('b')
s.insert(100, n2) # add 'b' first
s.insert(0, n1) # now n1 (A) has a higher index than n2 (B)
self.assertEqual(s[0].name, 'A')
self.assertEqual(s[1].name, 'B')
# test .elements calls w/ autoSort
s = Stream()
s.autoSort = False
n1 = note.Note('a')
n2 = note.Note('b')
s.insert(100, n2) # add 'b' first
s.insert(0, n1) # now n1 (A) has a higher index than n2 (B)
self.assertEqual(s.elements[0].name, 'B')
self.assertEqual(s.elements[1].name, 'A')
# test .elements calls w autoSort
s = Stream()
s.autoSort = True
n1 = note.Note('a')
n2 = note.Note('b')
s.insert(100, n2) # add 'b' first
s.insert(0, n1) # now n1 (A) has a higher index than n2 (B)
self.assertEqual(s.elements[0].name, 'A')
self.assertEqual(s.elements[1].name, 'B')
# test possible problematic casses of overlapping parts
# store start time, dur
pairs = [(20, 2), (15, 10), (22,1), (10, 2), (5, 25), (8, 10), (0, 2), (0, 30)]
# with autoSort false
s = Stream()
s.autoSort = False
for o, d in pairs:
n = note.Note()
n.quarterLength = d
s.insert(o, n)
match = []
for n in s.notesAndRests:
match.append((n.offset, n.quarterLength))
self.assertEqual(pairs, match)
# with autoSort True
s = Stream()
s.autoSort = True
for o, d in pairs:
n = note.Note()
n.quarterLength = d
s.insert(o, n)
match = []
for n in s.notesAndRests:
match.append((n.offset, n.quarterLength))
self.assertEqual([(0.0, 2), (0.0, 30), (5.0, 25), (8.0, 10), (10.0, 2), (15.0, 10), (20.0, 2), (22.0, 1.0)], match)
def testMakeChordsBuiltA(self):
from music21 import stream
# test with equal durations
pitchCol = [('A2', 'C2'),
('A#1', 'C-3', 'G5'),
('D3', 'B-1', 'C4', 'D#2')]
# try with different duration assignments; should always get
# the same results
for durCol in [[1, 1, 1], [.5, 2, 3], [.25, .25, .5], [6, 6, 8]]:
s = stream.Stream()
o = 0
for i in range(len(pitchCol)):
ql = durCol[i]
for pStr in pitchCol[i]:
n = note.Note(pStr)
n.quarterLength = ql
s.insert(o, n)
o += ql
self.assertEqual(len(s), 9)
self.assertEqual(len(s.getElementsByClass('Chord')), 0)
# do both in place and not in place, compare results
sMod = s.makeChords(inPlace=False)
s.makeChords(inPlace=True)
for sEval in [s, sMod]:
self.assertEqual(len(sEval.getElementsByClass('Chord')), 3)
# make sure we have all the original pitches
for i in range(len(pitchCol)):
match = [p.nameWithOctave for p in
sEval.getElementsByClass('Chord')[i].pitches]
self.assertEqual(match, list(pitchCol[i]))
# print 'post makeChords'
# s.show('t')
#sMod.show('t')
#s.show()
def testMakeChordsBuiltB(self):
from music21 import stream
n1 = note.Note('c2')
n1.quarterLength = 2
n2 = note.Note('d3')
n2.quarterLength = .5
n3 = note.Note('e4')
n3.quarterLength = 2
n4 = note.Note('f5')
n4.quarterLength = .5
s = stream.Stream()
s.insert(0, n1)
s.insert(1, n2) # overlapping, starting after n1 but finishing before
s.insert(2, n3)
s.insert(3, n4) # overlapping, starting after n3 but finishing before
self.assertEqual([e.offset for e in s], [0.0, 1.0, 2.0, 3.0])
# this results in two chords; n2 and n4 are effectively shifted
# to the start of n1 and n3
sMod = s.makeChords(inPlace=False)
s.makeChords(inPlace=True)
for sEval in [s, sMod]:
self.assertEqual(len(sEval.getElementsByClass('Chord')), 2)
self.assertEqual([c.offset for c in sEval], [0.0, 2.0])
# do the same, but reverse the short/long duration relation
# because the default min window is .25, the first and last
# notes are not gathered into chords
# into a chord
n1 = note.Note('c2')
n1.quarterLength = .5
n2 = note.Note('d3')
n2.quarterLength = 1.5
n3 = note.Note('e4')
n3.quarterLength = .5
n4 = note.Note('f5')
n4.quarterLength = 1.5
s = stream.Stream()
s.insert(0, n1)
s.insert(1, n2) # overlapping, starting after n1 but finishing before
s.insert(2, n3)
s.insert(3, n4) # overlapping, starting after n3 but finishing before
#s.makeRests(fillGaps=True)
# this results in two chords; n2 and n4 are effectively shifted
# to the start of n1 and n3
sMod = s.makeChords(inPlace=False)
#sMod.show()
s.makeChords(inPlace=True)
for sEval in [s, sMod]:
# have three chords, even though 1 only has more than 1 pitch
# might change this?
self.assertEqual(len(sEval.getElementsByClass('Chord')), 3)
self.assertEqual([c.offset for c in sEval], [0.0, 0.5, 1.0, 2.5, 3.0] )
def testMakeChordsBuiltC(self):
# test removal of redundant pitches
from music21 import stream
n1 = note.Note('c2')
n1.quarterLength = .5
n2 = note.Note('c2')
n2.quarterLength = .5
n3 = note.Note('g2')
n3.quarterLength = .5
n4 = note.Note('e4')
n4.quarterLength = .5
n5 = note.Note('e4')
n5.quarterLength = .5
n6 = note.Note('f#4')
n6.quarterLength = .5
s1 = stream.Stream()
s1.insert(0, n1)
s1.insert(0, n2)
s1.insert(0, n3)
s1.insert(.5, n4)
s1.insert(.5, n5)
s1.insert(.5, n6)
sMod = s1.makeChords(inPlace=False, removeRedundantPitches=True)
self.assertEquals([p.nameWithOctave for p in sMod.getElementsByClass('Chord')[0].pitches], ['C2', 'G2'])
self.assertEquals([p.nameWithOctave for p in sMod.getElementsByClass('Chord')[1].pitches], ['E4', 'F#4'])
# without redundant pitch gathering
sMod = s1.makeChords(inPlace=False, removeRedundantPitches=False)
self.assertEquals([p.nameWithOctave for p in sMod.getElementsByClass('Chord')[0].pitches], ['C2', 'C2', 'G2'])
self.assertEquals([p.nameWithOctave for p in sMod.getElementsByClass('Chord')[1].pitches], ['E4', 'E4', 'F#4'] )
def testMakeChordsBuiltD(self):
# attempt to isolate case
from music21 import stream
p1 = stream.Part()
p1.append([note.Note('G4', quarterLength=2),
note.Note('B4', quarterLength=2),
note.Note('C4', quarterLength=4),
note.Rest(quarterLength=1),
note.Note('C4', quarterLength=1),
note.Note('B4', quarterLength=1),
note.Note('A4', quarterLength=1),
])
p2 = stream.Part()
p2.append([note.Note('A3', quarterLength=4),
note.Note('F3', quarterLength=4),])
p3 = stream.Part()
p3.append([note.Rest(quarterLength=8),
note.Rest(quarterLength=4),
])
s = stream.Score()
s.insert([0, p1])
s.insert([0, p2])
s.insert([0, p3])
post = s.flat.makeChords()
#post.show('t')
self.assertEqual(len(post.getElementsByClass('Rest')), 1)
self.assertEqual(len(post.getElementsByClass('Chord')), 5)
#post.show()
def testMakeChordsImported(self):
from music21 import corpus
s = corpus.parse('bach/bwv66.6')
#s.show()
# using in place to get the stored flat version
sMod = s.flat.makeChords(includePostWindow=False)
self.assertEqual(len(sMod.getElementsByClass('Chord')), 35)
#sMod.show()
self.assertEqual(
[len(c.pitches) for c in sMod.getElementsByClass('Chord')],
[3, 4, 4, 3, 4, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 3, 4, 3, 4, 3, 4, 3, 4, 4, 4, 4, 3, 4, 4, 2, 4, 3, 4, 4])
# when we include post-window, we get more tones, per chord
# but the same number of chords
sMod = s.flat.makeChords(includePostWindow=True)
self.assertEqual(len(sMod.getElementsByClass('Chord')), 35)
self.assertEqual(
[len(c.pitches) for c in sMod.getElementsByClass('Chord')],
[6, 4, 4, 3, 4, 5, 5, 4, 4, 4, 4, 5, 4, 4, 5, 5, 5, 4, 5, 5, 3, 4, 3, 4, 4, 4, 7, 5, 4, 6, 2, 6, 4, 5, 4] )
#sMod.show()
def testGetElementAtOrBeforeBarline(self):
'''
problems with getting elements at or before
when triplets are involved...
'''
from music21 import converter
import os
bugtestFile = os.path.join(common.getSourceFilePath(), 'stream', 'tripletOffsetBugtest.xml')
s = converter.parse(bugtestFile)
p = s.parts[0]
m = p.getElementAtOrBefore(2)
self.assertEqual(m.number, 2)
def testElementsHighestTimeA(self):
'''Test adding elements at the highest time position
'''
n1 = note.Note()
n1.quarterLength = 30
n2 = note.Note()
n2.quarterLength = 20
b1 = bar.Barline()
s = Stream()
s.append(n1)
self.assertEqual(s.highestTime, 30)
self.assertEqual(len(s), 1)
self.assertEqual(s[0], n1)
self.assertEqual(s.index(n1), 0)
self.assertEqual(s[0].activeSite, s)
# insert bar in highest time position
s.storeAtEnd(b1)
self.assertEqual(len(s), 2)
self.assertEqual(s[1], b1)
self.assertEqual(s.index(b1), 1)
self.assertEqual(s[1].activeSite, s)
# offset of b1 is at the highest time
self.assertEqual([e.offset for e in s], [0.0, 30.0])
s.append(n2)
self.assertEqual(len(s), 3)
self.assertEqual(s[1], n2)
self.assertEqual(s.index(n2), 1)
self.assertEqual(s[2], b1)
self.assertEqual(s.index(b1), 2)
self.assertEqual(s.highestTime, 50)
# there are now three elements, and the third is the bar
self.assertEqual([e.offset for e in s], [0.0, 30, 50.0])
# get offset by elements
self.assertEqual(s.elementOffset(n1), 0.0)
self.assertEqual(s.elementOffset(b1), 50)
# get elements by offset
found1 = s.getElementsByOffset(0, 40)
self.assertEqual(len(found1.notesAndRests), 2)
# check within the maximum range
found2 = s.getElementsByOffset(40, 60)
self.assertEqual(len(found2.notesAndRests), 0)
# found the barline
self.assertEqual(found2[0], b1)
# should get the barline
self.assertEqual(s.getElementAtOrBefore(50), b1)
self.assertEqual(s.getElementAtOrBefore(49), n2)
# can get element after element
self.assertEqual(s.getElementAfterElement(n1), n2)
self.assertEqual(s.getElementAfterElement(n2), b1)
# try to get elements by class
sub1 = s.getElementsByClass('Barline')
self.assertEqual(len(sub1), 1)
# only found item is barline
self.assertEqual(sub1[0], b1)
self.assertEqual([e.offset for e in sub1], [0.0])
# if we append a new element, the old barline should report
# an offset at the last element
n3 = note.Note()
n3.quarterLength = 10
sub1.append(n3) # places this before barline
self.assertEqual(sub1[sub1.index(b1)].offset, 10.0)
self.assertEqual([e.offset for e in sub1], [0.0, 10.0])
# try to get elements not of class; only have notes
sub2 = s.getElementsNotOfClass(bar.Barline)
self.assertEqual(len(sub2), 2)
self.assertEqual(len(sub2.notesAndRests), 2)
sub3 = s.getElementsNotOfClass(note.Note)
self.assertEqual(len(sub3), 1)
self.assertEqual(len(sub3.notesAndRests), 0)
# make a copy:
sCopy = copy.deepcopy(s)
self.assertEqual([e.offset for e in sCopy], [0.0, 30, 50.0])
# not equal b/c a deepcopy was made
self.assertEqual(id(sCopy[2]) == id(b1), False)
# can still match class
self.assertEqual(isinstance(sCopy[2], bar.Barline), True)
# create another barline and try to replace
b2 = bar.Barline()
s.replace(b1, b2)
self.assertEqual(id(s[2]), id(b2))
# try to remove elements; the second index is the barline
self.assertEqual(s.pop(2), b2)
self.assertEqual(len(s), 2)
self.assertEqual([e.offset for e in s], [0.0, 30])
# add back again.
s.storeAtEnd(b1)
self.assertEqual([e.offset for e in s], [0.0, 30, 50.0])
# try to remove intermediary elements
self.assertEqual(s.pop(1), n2)
# offset of highest time element has shifted
self.assertEqual([e.offset for e in s], [0.0, 30.0])
# index is now 1
self.assertEqual(s.index(b1), 1)
def testElementsHighestTimeB(self):
'''Test adding elements at the highest time position
'''
n1 = note.Note()
n1.quarterLength = 30
n2 = note.Note()
n2.quarterLength = 20
b1 = bar.Barline()
s = Stream()
s.append(n1)
s.append(n2)
s.storeAtEnd(b1)
self.assertEqual([e.offset for e in s], [0.0, 30.0, 50.0])
# can shift elements, altering all, but only really shifting
# standard elements
s.shiftElements(5)
self.assertEqual([e.offset for e in s], [5.0, 35.0, 55.0])
# got all
found1 = s.extractContext(n2, 30)
self.assertEqual([e.offset for e in found1], [5.0, 35.0, 55.0])
# just after, none before
found1 = s.extractContext(n2, 0, 30)
self.assertEqual([e.offset for e in found1], [35.0, 55.0])
def testElementsHighestTimeC(self):
n1 = note.Note()
n1.quarterLength = 30
n2 = note.Note()
n2.quarterLength = 20
ts1 = meter.TimeSignature('6/8')
b1 = bar.Barline()
c1 = clef.Treble8vaClef()
s = Stream()
s.append(n1)
self.assertEqual([e.offset for e in s], [0.0])
s.storeAtEnd(b1)
s.storeAtEnd(c1)
s.storeAtEnd(ts1)
self.assertEqual([e.offset for e in s], [0.0, 30.0, 30.0, 30.0] )
s.append(n2)
self.assertEqual([e.offset for e in s], [0.0, 30.0, 50.0, 50.0, 50.0] )
# sorting of objects is by class
self.assertEqual([e.classes[0] for e in s], ['Note', 'Note', 'Barline', 'Treble8vaClef', 'TimeSignature'] )
b2 = bar.Barline()
s.storeAtEnd(b2)
self.assertEqual([e.classes[0] for e in s], ['Note', 'Note', 'Barline', 'Barline', 'Treble8vaClef', 'TimeSignature'] )
def testSliceByQuarterLengthsBuilt(self):
from music21 import stream
s = Stream()
n1 = note.Note()
n1.quarterLength = 1
n2 = note.Note()
n2.quarterLength = 2
n3 = note.Note()
n3.quarterLength = .5
n4 = note.Note()
n4.quarterLength = 1.5
for n in [n1,n2,n3,n4]:
s.append(n)
post = s.sliceByQuarterLengths(.125, inPlace=False)
self.assertEqual([n.tie.type for n in post.notesAndRests], ['start', 'continue', 'continue', 'continue', 'continue', 'continue', 'continue', 'stop', 'start', 'continue', 'continue', 'continue', 'continue', 'continue', 'continue', 'continue', 'continue', 'continue', 'continue', 'continue', 'continue', 'continue', 'continue', 'stop', 'start', 'continue', 'continue', 'stop', 'start', 'continue', 'continue', 'continue', 'continue', 'continue', 'continue', 'continue', 'continue', 'continue', 'continue', 'stop'] )
post = s.sliceByQuarterLengths(.25, inPlace=False)
self.assertEqual([n.tie.type for n in post.notesAndRests], ['start', 'continue', 'continue', 'stop', 'start', 'continue', 'continue', 'continue', 'continue', 'continue', 'continue', 'stop', 'start', 'stop', 'start', 'continue', 'continue', 'continue', 'continue', 'stop'] )
post = s.sliceByQuarterLengths(.5, inPlace=False)
self.assertEqual([n.tie == None for n in post.notesAndRests], [False, False, False, False, False, False, True, False, False, False] )
# cannot map .3333 into .5, so this raises an exception
self.assertRaises(stream.StreamException, lambda: s.sliceByQuarterLengths(1/3., inPlace=False))
post = s.sliceByQuarterLengths(1/6., inPlace=False)
self.assertEqual([n.tie.type for n in post.notesAndRests], ['start', 'continue', 'continue', 'continue', 'continue', 'stop', 'start', 'continue', 'continue', 'continue', 'continue', 'continue', 'continue', 'continue', 'continue', 'continue', 'continue', 'stop', 'start', 'continue', 'stop', 'start', 'continue', 'continue', 'continue', 'continue', 'continue', 'continue', 'continue', 'stop'])
#post.show()
# try to slice just a target
post = s.sliceByQuarterLengths(.125, target=n2, inPlace=False)
self.assertEqual([n.tie == None for n in post.notesAndRests], [True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, True, True] )
#post.show()
# test case where we have an existing tied note in a multi Measure structure that we do not want to break
s = Stream()
n1 = note.Note()
n1.quarterLength = 8
n2 = note.Note()
n2.quarterLength = 8
n3 = note.Note()
n3.quarterLength = 8
s.append(n1)
s.append(n2)
s.append(n3)
self.assertEqual(s.highestTime, 24)
sMeasures = s.makeMeasures()
sMeasures.makeTies(inPlace=True)
self.assertEquals([n.tie.type for n in sMeasures.flat.notesAndRests],
['start', 'stop', 'start', 'stop', 'start', 'stop'] )
# this shows that the previous ties across the bar line are maintained
# even after slicing
sMeasures.sliceByQuarterLengths([.5], inPlace=True)
self.assertEquals([n.tie.type for n in sMeasures.flat.notesAndRests],
['start', 'continue', 'continue', 'continue', 'continue', 'continue', 'continue', 'continue', 'continue', 'continue', 'continue', 'continue', 'continue', 'continue', 'continue', 'stop', 'start', 'continue', 'continue', 'continue', 'continue', 'continue', 'continue', 'continue', 'continue', 'continue', 'continue', 'continue', 'continue', 'continue', 'continue', 'stop', 'start', 'continue', 'continue', 'continue', 'continue', 'continue', 'continue', 'continue', 'continue', 'continue', 'continue', 'continue', 'continue', 'continue', 'continue', 'stop'] )
#sMeasures.show()
s = Stream()
n1 = note.Note('c#')
n1.quarterLength = 1
n2 = note.Note('d-')
n2.quarterLength = 2
n3 = note.Note('f#')
n3.quarterLength = .5
n4 = note.Note('g#')
n4.quarterLength = 1.5
for n in [n1,n2,n3,n4]:
s.append(n)
post = s.sliceByQuarterLengths(.125, inPlace=False)
#post.show()
self.assertEqual([n.tie == None for n in post.notesAndRests], [False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False])
s = Stream()
n1 = note.Note()
n1.quarterLength = .25
n2 = note.Note()
n2.quarterLength = .5
n3 = note.Note()
n3.quarterLength = 1
n4 = note.Note()
n4.quarterLength = 1.5
for n in [n1,n2,n3,n4]:
s.append(n)
post = s.sliceByQuarterLengths(.5, inPlace=False)
self.assertEqual([n.tie == None for n in post.notesAndRests], [True, True, False, False, False, False, False])
def testSliceByQuarterLengthsImported(self):
from music21 import corpus
sSrc = corpus.parse('bwv66.6')
s = copy.deepcopy(sSrc)
for p in s.parts:
p.sliceByQuarterLengths(.5, inPlace=True, addTies=False)
p.makeBeams(inPlace=True)
self.assertEqual(len(s.parts[0].flat.notesAndRests), 72)
self.assertEqual(len(s.parts[1].flat.notesAndRests), 72)
self.assertEqual(len(s.parts[2].flat.notesAndRests), 72)
self.assertEqual(len(s.parts[3].flat.notesAndRests), 72)
s = copy.deepcopy(sSrc)
for p in s.parts:
p.sliceByQuarterLengths(.25, inPlace=True, addTies=False)
p.makeBeams(inPlace=True)
self.assertEqual(len(s.parts[0].flat.notesAndRests), 144)
self.assertEqual(len(s.parts[1].flat.notesAndRests), 144)
self.assertEqual(len(s.parts[2].flat.notesAndRests), 144)
self.assertEqual(len(s.parts[3].flat.notesAndRests), 144)
# test applying to a complete score; works fine
s = copy.deepcopy(sSrc)
s.sliceByQuarterLengths(.5, inPlace=True, addTies=False)
#s.show()
self.assertEqual(len(s.parts[0].flat.notesAndRests), 72)
self.assertEqual(len(s.parts[1].flat.notesAndRests), 72)
self.assertEqual(len(s.parts[2].flat.notesAndRests), 72)
self.assertEqual(len(s.parts[3].flat.notesAndRests), 72)
def testSliceByGreatestDivisorBuilt(self):
s = Stream()
n1 = note.Note()
n1.quarterLength = 1.75
n2 = note.Note()
n2.quarterLength = 2
n3 = note.Note()
n3.quarterLength = .5
n4 = note.Note()
n4.quarterLength = 1.5
for n in [n1,n2,n3,n4]:
s.append(n)
post = s.sliceByGreatestDivisor(inPlace=False)
self.assertEqual(len(post.flat.notesAndRests), 23)
self.assertEqual([n.tie.type for n in post.notesAndRests], ['start', 'continue', 'continue', 'continue', 'continue', 'continue', 'stop', 'start', 'continue', 'continue', 'continue', 'continue', 'continue', 'continue', 'stop', 'start', 'stop', 'start', 'continue', 'continue', 'continue', 'continue', 'stop'])
s = Stream()
n1 = note.Note()
n1.quarterLength = 2
n2 = note.Note()
n2.quarterLength = 1/3.
n3 = note.Note()
n3.quarterLength = .5
n4 = note.Note()
n4.quarterLength = 1.5
for n in [n1,n2,n3,n4]:
s.append(n)
post = s.sliceByGreatestDivisor(inPlace=False)
self.assertEqual(len(post.flat.notesAndRests), 26)
self.assertEqual([n.tie.type for n in post.notesAndRests], ['start', 'continue', 'continue', 'continue', 'continue', 'continue', 'continue', 'continue', 'continue', 'continue', 'continue', 'stop', 'start', 'stop', 'start', 'continue', 'stop', 'start', 'continue', 'continue', 'continue', 'continue', 'continue', 'continue', 'continue', 'stop'] )
def testSliceByGreatestDivisorImported(self):
from music21 import corpus
sSrc = corpus.parse('bwv66.6')
s = copy.deepcopy(sSrc)
for p in s.parts:
p.sliceByGreatestDivisor(inPlace=True, addTies=True)
#p.makeBeams(inPlace=True) # uncomment when debugging, otherwise just slows down the test
#s.show()
# parts have different numbers of notes, as splitting is done on
# a note per note basis
self.assertEqual(len(s.parts[0].flat.notesAndRests), 44)
self.assertEqual(len(s.parts[1].flat.notesAndRests), 59)
self.assertEqual(len(s.parts[2].flat.notesAndRests), 61)
self.assertEqual(len(s.parts[3].flat.notesAndRests), 53)
s = copy.deepcopy(sSrc)
s.sliceByGreatestDivisor(inPlace=True, addTies=True)
#s.flat.makeChords().show()
#s.show()
def testSliceAtOffsetsSimple(self):
s = Stream()
n = note.Note()
n.quarterLength = 4
s.append(n)
unused_post = s.sliceAtOffsets([1, 2, 3], inPlace=True)
a = [(e.offset, e.quarterLength) for e in s]
b = [(0.0, 1.0), (1.0, 1.0), (2.0, 1.0), (3.0, 1.0)]
self.assertEqual(a, b)
def testSliceAtOffsetsBuilt(self):
from music21 import stream
s = stream.Stream()
for p, ql in [('d2',4)]:
n = note.Note(p)
n.quarterLength = ql
s.append(n)
self.assertEqual([e.offset for e in s], [0.0])
s1 = s.sliceAtOffsets([0.5, 1, 1.5, 2, 2.5, 3, 3.5], inPlace=False)
self.assertEqual([(e.offset, e.quarterLength) for e in s1], [(0.0, 0.5), (0.5, 0.5), (1.0, 0.5), (1.5, 0.5), (2.0, 0.5), (2.5, 0.5), (3.0, 0.5), (3.5, 0.5)] )
s1 = s.sliceAtOffsets([.5], inPlace=False)
self.assertEqual([(e.offset, e.quarterLength) for e in s1], [(0.0, 0.5), (0.5, 3.5)])
s = stream.Stream()
for p, ql in [('a2',1.5), ('a2',1.5), ('a2',1.5)]:
n = note.Note(p)
n.quarterLength = ql
s.append(n)
self.assertEqual([e.offset for e in s], [0.0, 1.5, 3.0])
s1 = s.sliceAtOffsets([.5], inPlace=False)
self.assertEqual([e.offset for e in s1], [0.0, 0.5, 1.5, 3.0])
s1.sliceAtOffsets([1.0, 2.5], inPlace=True)
self.assertEqual([e.offset for e in s1], [0.0, 0.5, 1.0, 1.5, 2.5, 3.0])
s1.sliceAtOffsets([3.0, 2.0, 3.5, 4.0], inPlace=True)
self.assertEqual([e.offset for e in s1], [0.0, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0, 3.5, 4.0])
self.assertEqual([e.quarterLength for e in s1], [0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5])
def testSliceAtOffsetsImported(self):
from music21 import corpus
sSrc = corpus.parse('bwv66.6')
post = sSrc.parts[0].flat.sliceAtOffsets([.25, 1.25, 3.25])
self.assertEqual([e.offset for e in post], [0.0, 0.0, 0.0, 0.0, 0.0, 0.25, 0.5, 1.0, 1.25, 2.0, 3.0, 3.25, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 9.0, 9.5, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 29.0, 31.0, 32.0, 33.0, 34.0, 34.5, 35.0, 36.0] )
# will also work on measured part
post = sSrc.parts[0].sliceAtOffsets([.25, 1.25, 3.25, 35.125])
self.assertEqual([e.offset for e in
post.getElementsByClass('Measure')[0].notesAndRests], [0.0, 0.25, 0.5])
self.assertEqual([e.offset for e in
post.getElementsByClass('Measure')[1].notesAndRests], [0.0, 0.25, 1.0, 2.0, 2.25, 3.0])
# check for alteration in last measure
self.assertEqual([e.offset for e in
post.getElementsByClass('Measure')[-1].notesAndRests], [0.0, 1.0, 1.5, 2.0, 2.125] )
def testSliceByBeatBuilt(self):
from music21 import stream
s = stream.Stream()
ts1 = meter.TimeSignature('3/4')
s.insert(0, ts1)
for p, ql in [('d2',3)]:
n = note.Note(p)
n.quarterLength = ql
s.append(n)
# have time signature and one note
self.assertEqual([e.offset for e in s], [0.0, 0.0])
s1 = s.sliceByBeat()
self.assertEqual([(e.offset, e.quarterLength) for e in s1.notesAndRests], [(0.0, 1.0), (1.0, 1.0), (2.0, 1.0)] )
# replace old ts with a new
s.remove(ts1)
ts2 = meter.TimeSignature('6/8')
s.insert(0, ts2)
s1 = s.sliceByBeat()
self.assertEqual([(e.offset, e.quarterLength) for e in s1.notesAndRests], [(0.0, 1.5), (1.5, 1.5)] )
def testSliceByBeatImported(self):
from music21 import corpus
sSrc = corpus.parse('bwv66.6')
post = sSrc.parts[0].sliceByBeat()
self.assertEqual([e.offset for e in post.flat.notesAndRests], [0.0, 0.5, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 9.5, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, 32.0, 33.0, 34.0, 34.5, 35.0])
#post.show()
def testChordifyImported(self):
from music21 import corpus
s = corpus.parse('luca/gloria')
#s.show()
post = s.measures(0, 20, gatherSpanners=False)
# somehow, this is doubling measures
#post.show()
self.assertEqual([e.offset for e in post.parts[0].flat.notesAndRests], [0.0, 3.0, 3.5, 4.5, 5.0, 6.0, 6.5, 7.5, 8.5, 9.0, 10.5, 12.0, 15.0, 16.5, 17.5, 18.0, 18.5, 19.0, 19.5, 20.0, 20.5, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 30.0, 33.0, 34.5, 35.5, 36.0, 37.5, 38.0, 39.0, 40.0, 41.0, 42.0, 43.5, 45.0, 45.5, 46.5, 47.0, 48.0, 49.5, 51.0, 51.5, 52.0, 52.5, 53.0, 53.5, 54.0, 55.5, 57.0, 58.5])
post = post.chordify()
#post.show('t')
#post.show()
self.assertEqual([e.offset for e in post.flat.notes], [0.0, 3.0, 3.5, 4.5, 5.0, 5.5, 6.0, 6.5, 7.5, 8.5, 9.0, 10.5, 12.0, 15.0, 16.5, 17.5, 18.0, 18.5, 19.0, 19.5, 20.0, 20.5, 21.0, 21.5, 22.0, 22.5, 23.0, 23.5, 24.0, 24.5, 25.0, 25.5, 26.0, 26.5, 27.0, 30.0, 33.0, 34.5, 35.5, 36.0, 37.5, 38.0, 39.0, 40.0, 40.5, 41.0, 42.0, 43.5, 45.0, 45.5, 46.0, 46.5, 47.0, 47.5, 48.0, 49.5, 51.0, 51.5, 52.0, 52.5, 53.0, 53.5, 54.0, 54.5, 55.0, 55.5, 56.0, 56.5, 57.0, 58.5, 59.5])
self.assertEqual(len(post.flat.getElementsByClass('Chord')), 71) # Careful! one version of the caching is screwing up m. 20 which definitely should not have rests in it -- was creating 69 notes, not 71.
def testChordifyRests(self):
# test that chordify does not choke on rests
from music21 import stream
p1 = stream.Part()
for p, ql in [(None, 2), ('d2',2), (None, 2), ('e3',2), ('f3', 2)]:
if p == None:
n = note.Rest()
else:
n = note.Note(p)
n.quarterLength = ql
p1.append(n)
p2 = stream.Part()
for p, ql in [(None, 2), ('c#3',1), ('d#3',1), (None, 2), ('e-5',2), (None, 2)]:
if p == None:
n = note.Rest()
else:
n = note.Note(p)
n.quarterLength = ql
p2.append(n)
self.assertEqual([e.offset for e in p1], [0.0, 2.0, 4.0, 6.0, 8.0])
self.assertEqual([e.offset for e in p2], [0.0, 2.0, 3.0, 4.0, 6.0, 8.0])
score = stream.Score()
score.insert(0, p1)
score.insert(0, p2)
# parts retain their characteristics
# rests are recast
scoreChords = score.makeChords()
#scoreChords.show()
self.assertEqual(len(scoreChords.parts[0].flat), 5)
self.assertEqual(len(scoreChords.parts[0].flat.getElementsByClass(
'Chord')), 3)
self.assertEqual(len(scoreChords.parts[0].flat.getElementsByClass(
'Rest')), 2)
self.assertEqual(len(scoreChords.parts[1].flat), 6)
self.assertEqual(len(scoreChords.parts[1].flat.getElementsByClass(
'Chord')), 3)
self.assertEqual(len(scoreChords.parts[1].flat.getElementsByClass(
'Rest')), 3)
# calling this on a flattened version
scoreFlat = score.flat
scoreChords = scoreFlat.makeChords()
self.assertEqual(len(scoreChords.flat.getElementsByClass(
'Chord')), 3)
self.assertEqual(len(scoreChords.flat.getElementsByClass(
'Rest')), 2)
scoreChordify = score.chordify()
self.assertEqual(len(scoreChordify.flat.getElementsByClass(
'Chord')), 4)
self.assertEqual(len(scoreChordify.flat.getElementsByClass(
'Rest')), 2)
self.assertEqual(str(scoreChordify.getElementsByClass(
'Chord')[0].pitches), '(<music21.pitch.Pitch D2>, <music21.pitch.Pitch C#3>)')
self.assertEqual(str(scoreChordify.getElementsByClass(
'Chord')[1].pitches), '(<music21.pitch.Pitch D2>, <music21.pitch.Pitch D#3>)')
def testChordifyA(self):
from music21 import stream, expressions
p1 = stream.Part()
p1.insert(0, note.Note(quarterLength=12.0))
p1.insert(0.25, expressions.TextExpression('test'))
self.assertEqual(p1.highestTime, 12.0)
p2 = stream.Part()
p2.repeatAppend(note.Note('g4'), 12)
s = stream.Score()
s.insert(0, p1)
s.insert(0, p2)
post = s.chordify()
self.assertEqual(len(post.getElementsByClass('Chord')), 12)
self.assertEqual(str(post.getElementsByClass('Chord')[0].pitches),
'(<music21.pitch.Pitch C4>, <music21.pitch.Pitch G4>)')
p1 = stream.Part()
p1.insert(0, note.Note(quarterLength=12.0))
p1.insert(0.25, expressions.TextExpression('test'))
self.assertEqual(p1.highestTime, 12.0)
p2 = stream.Part()
p2.repeatAppend(note.Note('g4', quarterLength=6.0), 2)
#p2.repeatAppend(note.Note('g4'), 12)
s = stream.Score()
s.insert(0, p1)
s.insert(0, p2)
post = s.chordify()
self.assertEqual(len(post.getElementsByClass('Chord')), 2)
self.assertEqual(str(post.getElementsByClass('Chord')[0].pitches),
'(<music21.pitch.Pitch C4>, <music21.pitch.Pitch G4>)')
#post.show()
#s.show()
def testChordifyB(self):
from music21 import stream
p1 = stream.Part()
m1a = stream.Measure()
m1a.timeSignature = meter.TimeSignature('4/4')
m1a.insert(0, note.Note())
m1a.padAsAnacrusis()
self.assertEqual(m1a.paddingLeft, 3.0)
#m1a.paddingLeft = 3.0 # a quarter pickup
m2a = stream.Measure()
m2a.repeatAppend(note.Note(), 4)
p1.append([m1a, m2a])
p2 = stream.Part()
m1b = stream.Measure()
m1b.timeSignature = meter.TimeSignature('4/4')
m1b.repeatAppend(note.Rest(), 1)
m1b.padAsAnacrusis()
self.assertEqual(m1b.paddingLeft, 3.0)
m2b = stream.Measure()
m2b.repeatAppend(note.Note('g4'), 4)
p2.append([m1b, m2b])
s = stream.Score()
s.insert(0, p1)
s.insert(0, p2)
#s.show()
post = s.chordify()
self.assertEqual(len(post.getElementsByClass('Measure')), 2)
m1 = post.getElementsByClass('Measure')[0]
# test that padding has been maintained
self.assertEqual(m1.paddingLeft, 3.0)
#post.show()
def testChordifyC(self):
from music21 import corpus
s = corpus.parse('schoenberg/opus19/movement6')
#s.show()
m1 = s.parts[0].getElementsByClass('Measure')[0]
self.assertEqual(m1.highestTime, 1.0)
self.assertEqual(m1.paddingLeft, 3.0)
self.assertEqual(m1.duration.quarterLength, 1.0)
self.assertEqual([e.offset for e in m1.notes], [0.0])
#s.parts[0].show()
post = s.chordify()
self.assertEqual(post.getElementsByClass('Measure')[0].paddingLeft, 3.0)
#self.assertEqual(len(post.flat), 3)
#post.show()
# make sure we do not have any voices after chordifying
match = []
for m in post.getElementsByClass('Measure'):
self.assertEqual(m.hasVoices(), False)
match.append(len(m.pitches))
self.assertEqual(match, [3, 9, 9, 25, 25, 21, 12, 6, 21, 29])
self.assertEqual(len(post.flat.getElementsByClass('Rest')), 4)
def testChordifyD(self):
from music21 import stream
# test on a Stream of Streams.
s1 = stream.Stream()
s1.repeatAppend(note.Note(quarterLength=3), 4)
s2 = stream.Stream()
s2.repeatAppend(note.Note('g4', quarterLength=2), 6)
s3 = stream.Stream()
s3.insert(0, s1)
s3.insert(0, s2)
post = s3.chordify()
self.assertEqual(len(post.getElementsByClass('Chord')), 8)
def testChordifyE(self):
from music21 import stream
s1 = stream.Stream()
m1 = stream.Measure()
v1 = stream.Voice()
v1.repeatAppend(note.Note('g4', quarterLength=1.5), 3)
v2 = stream.Voice()
v2.repeatAppend(note.Note(quarterLength=1), 6)
m1.insert(0, v1)
m1.insert(0, v2)
#m1.timeSignature = m1.flat.bestTimeSignature()
#self.assertEqual(str(m1.timeSignature), '')
s1.append(m1)
#s1.show()
post = s1.chordify()
#post.show()
self.assertEqual(len(post.flat.getElementsByClass('Chord')), 8)
def testOpusSearch(self):
from music21 import corpus
import re
o = corpus.parse('essenFolksong/erk5')
s = o.getScoreByTitle('blauen')
self.assertEqual(s.metadata.title, 'Ich sach mir einen blauen Storchen')
s = o.getScoreByTitle('pfal.gr.f')
self.assertEqual(s.metadata.title, 'Es fuhr sich ein Pfalzgraf')
s = o.getScoreByTitle(re.compile('Pfal(.*)'))
self.assertEqual(s.metadata.title, 'Es fuhr sich ein Pfalzgraf')
def testActiveSiteMangling(self):
s1 = Stream()
s2 = Stream()
s2.append(s1)
self.assertEqual(s1.activeSite, s2)
junk = s1.semiFlat
self.assertEqual(s1.activeSite, s2)
junk = s1.flat # the order of these two calls ensures that _getFlatFromSemiflat is called
self.assertEqual(s1.activeSite, s2)
# this works fine
junk = s2.flat
self.assertEqual(s1.activeSite, s2)
# this was the key problem: getting the semiFlat of the activeSite
# looses the activeSite of the sub-stream; this is fixed by the inserting
# of the sub-Stream with setActiveSite False
junk = s2.semiFlat
self.assertEqual(s1.activeSite, s2)
# these test prove that getting a semiFlat stream does not change the
# activeSite
junk = s1.sites.getObjByClass(meter.TimeSignature)
self.assertEqual(s1.activeSite, s2)
junk = s1.sites.getObjByClass(clef.Clef)
self.assertEqual(s1.activeSite, s2)
junk = s1.getContextByClass('Clef')
self.assertEqual(s1.activeSite, s2)
def testGetElementsByContextStream(self):
from music21 import corpus
s = corpus.parse('bwv66.6')
for p in s.parts:
for m in p.getElementsByClass('Measure'):
post = m.getContextByClass(clef.Clef)
self.assertEqual(isinstance(post, clef.Clef), True)
post = m.getContextByClass(meter.TimeSignature)
self.assertEqual(isinstance(post, meter.TimeSignature), True)
post = m.getContextByClass(key.KeySignature)
self.assertEqual(isinstance(post, key.KeySignature), True)
def testVoicesA(self):
v1 = Voice()
n1 = note.Note('d5')
n1.quarterLength = .5
v1.repeatAppend(n1, 8)
v2 = Voice()
n2 = note.Note('c4')
n2.quarterLength = 1
v2.repeatAppend(n2, 4)
s = Measure()
s.insert(0, v1)
s.insert(0, v2)
# test allocating streams and assigning indices
oMap = s.offsetMap
oMapStr = "[\n" # construct string from dict in fixed order...
for ob in oMap:
oMapStr += "{'voiceIndex': " + str(ob.voiceIndex) + ", 'element': " + str(ob.element) + ", 'endTime': " + str(ob.endTime) + ", 'offset': " + str(ob.offset) + "},\n"
oMapStr += "]\n"
#print oMapStr
self.assertEqual(oMapStr,
'''[
{'voiceIndex': 0, 'element': <music21.note.Note D>, 'endTime': 0.5, 'offset': 0.0},
{'voiceIndex': 0, 'element': <music21.note.Note D>, 'endTime': 1.0, 'offset': 0.5},
{'voiceIndex': 0, 'element': <music21.note.Note D>, 'endTime': 1.5, 'offset': 1.0},
{'voiceIndex': 0, 'element': <music21.note.Note D>, 'endTime': 2.0, 'offset': 1.5},
{'voiceIndex': 0, 'element': <music21.note.Note D>, 'endTime': 2.5, 'offset': 2.0},
{'voiceIndex': 0, 'element': <music21.note.Note D>, 'endTime': 3.0, 'offset': 2.5},
{'voiceIndex': 0, 'element': <music21.note.Note D>, 'endTime': 3.5, 'offset': 3.0},
{'voiceIndex': 0, 'element': <music21.note.Note D>, 'endTime': 4.0, 'offset': 3.5},
{'voiceIndex': 1, 'element': <music21.note.Note C>, 'endTime': 1.0, 'offset': 0.0},
{'voiceIndex': 1, 'element': <music21.note.Note C>, 'endTime': 2.0, 'offset': 1.0},
{'voiceIndex': 1, 'element': <music21.note.Note C>, 'endTime': 3.0, 'offset': 2.0},
{'voiceIndex': 1, 'element': <music21.note.Note C>, 'endTime': 4.0, 'offset': 3.0},
]
''')
oMeasures = Part()
oMeasures.insert(0, s)
self.assertEqual(len(oMeasures[0].voices), 2)
self.assertEqual([e.offset for e in oMeasures[0].voices[0]], [0.0, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0, 3.5])
self.assertEqual([e.offset for e in oMeasures[0].voices[1]], [0.0, 1.0, 2.0, 3.0])
GEX = m21ToXml.GeneralObjectExporter()
unused_mx = GEX.parse(s).decode('utf-8')
# try version longer than 1 measure, more than 2 voices
v1 = Voice()
n1 = note.Note('c5')
n1.quarterLength = .5
v1.repeatAppend(n1, 32)
v2 = Voice()
n2 = note.Note('c4')
n2.quarterLength = 1
v2.repeatAppend(n2, 16)
v3 = Voice()
n3 = note.Note('c3')
n3.quarterLength = .25
v3.repeatAppend(n3, 64)
v4 = Voice()
n4 = note.Note('c2')
n4.quarterLength = 4
v4.repeatAppend(n4, 4)
s = Part()
s.insert(0, v1)
s.insert(0, v2)
s.insert(0, v3)
s.insert(0, v4)
oMeasures = s.makeMeasures()
# each measures has the same number of voices
for i in range(3):
self.assertEqual(len(oMeasures[i].voices), 4)
# each measures has the same total number of voices
for i in range(3):
self.assertEqual(len(oMeasures[i].flat.notesAndRests), 29)
# each measures has the same number of notes for each voices
for i in range(3):
self.assertEqual(len(oMeasures[i].voices[0].notesAndRests), 8)
self.assertEqual(len(oMeasures[i].voices[1].notesAndRests), 4)
self.assertEqual(len(oMeasures[i].voices[2].notesAndRests), 16)
self.assertEqual(len(oMeasures[i].voices[3].notesAndRests), 1)
GEX = m21ToXml.GeneralObjectExporter()
unused_mx = GEX.parse(oMeasures).decode('utf-8')
#s.show()
def testVoicesB(self):
# make sure strip ties works
from music21 import stream
v1 = stream.Voice()
n1 = note.Note('c5')
n1.quarterLength = .5
v1.repeatAppend(n1, 27)
v2 = stream.Voice()
n2 = note.Note('c4')
n2.quarterLength = 3
v2.repeatAppend(n2, 6)
v3 = stream.Voice()
n3 = note.Note('c3')
n3.quarterLength = 8
v3.repeatAppend(n3, 4)
s = stream.Stream()
s.insert(0, v1)
s.insert(0, v2)
s.insert(0, v3)
sPost = s.makeNotation()
# voices are retained for all measures after make notation
self.assertEqual(len(sPost.getElementsByClass('Measure')), 8)
self.assertEqual(len(sPost.getElementsByClass('Measure')[0].voices), 3)
self.assertEqual(len(sPost.getElementsByClass('Measure')[1].voices), 3)
self.assertEqual(len(sPost.getElementsByClass('Measure')[5].voices), 3)
self.assertEqual(len(sPost.getElementsByClass('Measure')[7].voices), 3)
#s.show()
def testVoicesC(self):
from music21 import stream
v1 = stream.Voice()
n1 = note.Note('c5')
n1.quarterLength = .25
v1.repeatInsert(n1, [2, 4.5, 7.25, 11.75])
v2 = stream.Voice()
n2 = note.Note('c4')
n2.quarterLength = .25
v2.repeatInsert(n2, [.25, 3.75, 5.5, 13.75])
s = stream.Stream()
s.insert(0, v1)
s.insert(0, v2)
sPost = s.makeRests(fillGaps=True, inPlace=False)
self.assertEqual(str([n for n in sPost.voices[0].notesAndRests]), '[<music21.note.Rest rest>, <music21.note.Note C>, <music21.note.Rest rest>, <music21.note.Note C>, <music21.note.Rest rest>, <music21.note.Note C>, <music21.note.Rest rest>, <music21.note.Note C>, <music21.note.Rest rest>]')
self.assertEqual(str([n for n in sPost.voices[1].notesAndRests]), '[<music21.note.Rest rest>, <music21.note.Note C>, <music21.note.Rest rest>, <music21.note.Note C>, <music21.note.Rest rest>, <music21.note.Note C>, <music21.note.Rest rest>, <music21.note.Note C>]')
#sPost.show()
def testPartsToVoicesA(self):
from music21 import corpus
s0 = corpus.parse('bwv66.6')
#s.show()
s1 = s0.partsToVoices(2)
#s1.show()
#s1.show('t')
self.assertEqual(len(s1.parts), 2)
p1 = s1.parts[0]
self.assertEqual(len(p1.flat.getElementsByClass('Clef')), 1)
#p1.show('t')
# look at individual measure; check counts; these should not
# change after measure extraction
m1Raw = p1.getElementsByClass('Measure')[1]
#environLocal.printDebug(['m1Raw', m1Raw])
self.assertEqual(len(m1Raw.flat), 8)
#m1Raw.show('t')
m2Raw = p1.getElementsByClass('Measure')[2]
#environLocal.printDebug(['m2Raw', m2Raw])
self.assertEqual(len(m2Raw.flat), 9)
# get a measure from this part
# NOTE: we no longer get Clef here, as we return clefs in the
# Part outside of a Measure when using measures()
#m1 = p1.measure(2)
#self.assertEqual(len(m1.flat.getElementsByClass('Clef')), 1)
# look at individual measure; check counts; these should not
# change after measure extraction
m1Raw = p1.getElementsByClass('Measure')[1]
#environLocal.printDebug(['m1Raw', m1Raw])
self.assertEqual(len(m1Raw.flat), 8)
#m1Raw.show('t')
m2Raw = p1.getElementsByClass('Measure')[2]
#environLocal.printDebug(['m2Raw', m2Raw])
self.assertEqual(len(m2Raw.flat), 9)
#m2Raw.show('t')
#self.assertEqual(len(m1.flat.getElementsByClass('Clef')), 1)
ex1 = p1.measures(1,3)
self.assertEqual(len(ex1.flat.getElementsByClass('Clef')), 1)
#ex1.show()
for p in s1.parts:
# need to look in measures to get at voices
self.assertEqual(len(p.getElementsByClass('Measure')[0].voices), 2)
self.assertEqual(len(p.measure(2).voices), 2)
self.assertEqual(len(p.measures(
1,3).getElementsByClass('Measure')[2].voices), 2)
#s1.show()
#p1.show()
def testPartsToVoicesB(self):
from music21 import corpus
# this work has five parts: results in e parts
s0 = corpus.parse('corelli/opus3no1/1grave')
self.assertEqual(len(s0.parts), 3)
s1 = s0.partsToVoices(2, permitOneVoicePerPart=True)
self.assertEqual(len(s1.parts), 2)
self.assertEqual(len(s1.parts[0].getElementsByClass(
'Measure')[0].voices), 2)
self.assertEqual(len(s1.parts[1].getElementsByClass(
'Measure')[0].voices), 1)
#s1.show()
# s0 = corpus.parse('hwv56', '1-05')
# # can use index values
# s2 = s0.partsToVoices(([0,1], [2,4], 3), permitOneVoicePerPart=True)
# self.assertEqual(len(s2.parts), 3)
# self.assertEqual(len(s2.parts[0].getElementsByClass(
# 'Measure')[0].voices), 2)
# self.assertEqual(len(s2.parts[1].getElementsByClass(
# 'Measure')[0].voices), 2)
# self.assertEqual(len(s2.parts[2].getElementsByClass(
# 'Measure')[0].voices), 1)
#
# s2 = s0.partsToVoices((['Violino I','Violino II'], ['Viola','Bassi'], ['Basso']), permitOneVoicePerPart=True)
# self.assertEqual(len(s2.parts), 3)
# self.assertEqual(len(s2.parts[0].getElementsByClass(
# 'Measure')[0].voices), 2)
# self.assertEqual(len(s2.parts[1].getElementsByClass(
# 'Measure')[0].voices), 2)
# self.assertEqual(len(s2.parts[2].getElementsByClass(
# 'Measure')[0].voices), 1)
#
#
# # this will keep the voice part unaltered
# s2 = s0.partsToVoices((['Violino I','Violino II'], ['Viola','Bassi'], 'Basso'), permitOneVoicePerPart=False)
# self.assertEqual(len(s2.parts), 3)
# self.assertEqual(len(s2.parts[0].getElementsByClass(
# 'Measure')[0].voices), 2)
# self.assertEqual(len(s2.parts[1].getElementsByClass(
# 'Measure')[0].voices), 2)
# self.assertEqual(s2.parts[2].getElementsByClass(
# 'Measure')[0].hasVoices(), False)
#
#
# # mm 16-19 are a good examples
# s1 = corpus.parse('hwv56', '1-05').measures(16, 19)
# s2 = s1.partsToVoices((['Violino I','Violino II'], ['Viola','Bassi'], 'Basso'))
# #s2.show()
#
# self.assertEqual(len(s2.parts), 3)
# self.assertEqual(len(s2.parts[0].getElementsByClass(
# 'Measure')[0].voices), 2)
# self.assertEqual(len(s2.parts[1].getElementsByClass(
# 'Measure')[0].voices), 2)
# self.assertEqual(s2.parts[2].getElementsByClass(
# 'Measure')[0].hasVoices(), False)
def testVoicesToPartsA(self):
from music21 import corpus
s0 = corpus.parse('bwv66.6')
#s.show()
s1 = s0.partsToVoices(2) # produce two parts each with two voices
s2 = s1.parts[0].voicesToParts()
# now a two part score
self.assertEqual(len(s2.parts), 2)
# makes sure we have what we started with
self.assertEqual(len(s2.parts[0].flat.notesAndRests), len(s0.parts[0].flat.notesAndRests))
s1 = s0.partsToVoices(4) # create one staff with all parts
self.assertEqual(s1.classes[0], 'Score') # we get a Score back
# we have a Score with one part and measures, each with 4 voices
self.assertEqual(len(s1.parts[0].getElementsByClass(
'Measure')[0].voices), 4)
# need to access part
s2 = s1.voicesToParts() # return to four parts in a score;
# make sure we have what we started with
self.assertEqual(len(s2.parts[0].flat.notesAndRests),
len(s0.parts[0].flat.notesAndRests))
self.assertEqual(str([n for n in s2.parts[0].flat.notesAndRests]),
str([n for n in s0.parts[0].flat.notesAndRests]))
self.assertEqual(len(s2.parts[1].flat.notesAndRests),
len(s0.parts[1].flat.notesAndRests))
self.assertEqual(str([n for n in s2.parts[1].flat.notesAndRests]),
str([n for n in s0.parts[1].flat.notesAndRests]))
self.assertEqual(len(s2.parts[2].flat.notesAndRests),
len(s0.parts[2].flat.notesAndRests))
self.assertEqual(str([n for n in s2.parts[2].flat.notesAndRests]),
str([n for n in s0.parts[2].flat.notesAndRests]))
self.assertEqual(len(s2.parts[3].flat.notesAndRests),
len(s0.parts[3].flat.notesAndRests))
self.assertEqual(str([n for n in s2.parts[3].flat.notesAndRests]),
str([n for n in s0.parts[3].flat.notesAndRests]))
# try on a built Stream that has no Measures
# build a stream
s0 = Stream()
v1 = Voice()
v1.repeatAppend(note.Note('c3'), 4)
v2 = Voice()
v2.repeatAppend(note.Note('g4'), 4)
v3 = Voice()
v3.repeatAppend(note.Note('b5'), 4)
s0.insert(0, v1)
s0.insert(0, v2)
s0.insert(0, v3)
#s2.show()
s1 = s0.voicesToParts()
self.assertEqual(len(s1.parts), 3)
#self.assertEqual(len(s1.parts[0].flat), len(v1.flat))
self.assertEqual([e for e in s1.parts[0].flat], [e for e in v1.flat])
self.assertEqual(len(s1.parts[1].flat), len(v2.flat))
self.assertEqual([e for e in s1.parts[1].flat], [e for e in v2.flat])
self.assertEqual(len(s1.parts[2].flat), len(v3.flat))
self.assertEqual([e for e in s1.parts[2].flat], [e for e in v3.flat])
#s1.show()
def testMergeElements(self):
from music21 import stream
s1 = stream.Stream()
s2 = stream.Stream()
s3 = stream.Stream()
n1 = note.Note('f#')
n2 = note.Note('g')
s1.append(n1)
s1.append(n2)
s2.mergeElements(s1)
self.assertEqual(len(s2), 2)
self.assertEqual(id(s1[0]) == id(s2[0]), True)
self.assertEqual(id(s1[1]) == id(s2[1]), True)
s3.mergeElements(s1, classFilterList=['Rest'])
self.assertEqual(len(s3), 0)
s3.mergeElements(s1, classFilterList=['GeneralNote'])
self.assertEqual(len(s3), 2)
def testInternalize(self):
s = Stream()
n1 = note.Note()
s.repeatAppend(n1, 4)
self.assertEqual(len(s), 4)
s.internalize()
# now have one component
self.assertEqual(len(s), 1)
self.assertEqual(s[0].classes[0], 'Voice') # default is a Voice
self.assertEqual(len(s[0]), 4)
self.assertEqual(str([n for n in s.voices[0].notesAndRests]), '[<music21.note.Note C>, <music21.note.Note C>, <music21.note.Note C>, <music21.note.Note C>]')
def testDeepcopySpanners(self):
from music21 import spanner, stream
n1 = note.Note()
n2 = note.Note('a4')
n3 = note.Note('g#4')
n3.quarterLength = .25
su1 = spanner.Slur(n1, n2)
s1 = stream.Stream()
s1.append(n1)
s1.repeatAppend(n3, 4)
s1.append(n2)
s1.insert(su1)
self.assertEqual(s1.notesAndRests[0] in s1.spanners[0].getSpannedElements(), True)
self.assertEqual(s1.notesAndRests[-1] in s1.spanners[0].getSpannedElements(), True)
s2 = copy.deepcopy(s1)
# old relations are still valid
self.assertEqual(len(s1.spanners), 1)
self.assertEqual(s1.notesAndRests[0] in s1.spanners[0].getSpannedElements(), True)
self.assertEqual(s1.notesAndRests[-1] in s1.spanners[0].getSpannedElements(), True)
# new relations exist in new stream.
self.assertEqual(len(s2.spanners), 1)
self.assertEqual(s2.notesAndRests[0] in s2.spanners[0].getSpannedElements(), True)
self.assertEqual(s2.notesAndRests[-1] in s2.spanners[0].getSpannedElements(), True)
self.assertEqual(s2.spanners[0].getSpannedElements(), [s2.notesAndRests[0], s2.notesAndRests[-1]])
GEX = m21ToXml.GeneralObjectExporter()
unused_mx = GEX.parse(s2).decode('utf-8')
#s2.show('t')
#s2.show()
def testAddSlurByMelisma(self):
from music21 import corpus, spanner
s = corpus.parse('luca/gloria')
ex = s.parts[0]
nStart = None
nEnd = None
exFlatNotes = ex.flat.notesAndRests
nLast = exFlatNotes[-1]
for i, n in enumerate(exFlatNotes):
if i < len(exFlatNotes) - 1:
nNext = exFlatNotes[i+1]
else:
continue
if n.lyrics:
nStart = n
# if next is a begin, then this is an end
elif nStart is not None and nNext.lyrics and n.tie is None:
nEnd = n
elif nNext is nLast:
nEnd = n
if nStart is not None and nEnd is not None:
# insert in top-most container
ex.insert(spanner.Slur(nStart, nEnd))
nStart = None
nEnd = None
#ex.show()
exFlat = ex.flat
melismaByBeat = {}
for sp in ex.spanners:
n = sp.getFirst()
oMin, oMax = sp.getDurationSpanBySite(exFlat)
dur = oMax - oMin
beatStr = n.beatStr
if beatStr not in melismaByBeat:
melismaByBeat[beatStr] = []
melismaByBeat[beatStr].append(dur)
#environLocal.printDebug(['start note:', n, 'beat:', beatStr, 'slured duration:', dur])
for beatStr in sorted(list(melismaByBeat.keys())):
unused_avg = sum(melismaByBeat[beatStr]) / len(melismaByBeat[beatStr])
#environLocal.printDebug(['melisma beat:', beatStr.ljust(6), 'average duration:', avg])
def testTwoZeroOffset(self):
from music21 import stream
p = stream.Part()
#p.append(instrument.Voice())
p.append(note.Note("D#4"))
#environLocal.printDebug([p.offsetMap])
def testStripTiesBuiltB(self):
from music21 import stream
s1 = stream.Stream()
s1.append(meter.TimeSignature('4/4'))
s1.append(note.Note(type='quarter'))
s1.append(note.Note(type='half'))
s1.append(note.Note(type='half'))
s1.append(note.Note(type='half'))
s1.append(note.Note(type='quarter'))
s2 = s1.makeNotation()
self.assertEqual(len(s2.flat.notesAndRests), 6)
self.assertEqual(str([n.tie for n in s2.flat.notesAndRests]), '[None, None, <music21.tie.Tie start>, <music21.tie.Tie stop>, None, None]')
self.assertEqual([n.quarterLength for n in s2.flat.notesAndRests], [1.0, 2.0, 1.0, 1.0, 2.0, 1.0])
s3 = s2.stripTies(retainContainers=True)
self.assertEqual(str([n.tie for n in s3.flat.notesAndRests]), '[None, None, None, None, None]')
self.assertEqual([n.quarterLength for n in s3.flat.notesAndRests], [1.0, 2.0, 2.0, 2.0, 1.0])
self.assertEqual([n.offset for n in s3.getElementsByClass('Measure')[0].notesAndRests], [0.0, 1.0, 3.0])
self.assertEqual([n.quarterLength for n in s3.getElementsByClass('Measure')[0].notesAndRests], [1.0, 2.0, 2.0])
self.assertEqual([n.beatStr for n in s3.getElementsByClass('Measure')[0].notesAndRests], ['1', '2', '4'])
self.assertEqual([n.offset for n in s3.getElementsByClass('Measure')[1].notesAndRests], [1.0, 3.0])
self.assertEqual([n.quarterLength for n in s3.getElementsByClass('Measure')[1].notesAndRests], [2.0, 1.0])
self.assertEqual([n.beatStr for n in s3.getElementsByClass('Measure')[1].notesAndRests], ['2', '4'])
#s3.show()
def testStripTiesImportedB(self):
from music21 import corpus
# this file was imported by sibelius and does not have completeing ties
sMonte = corpus.parse('monteverdi/madrigal.4.2.xml')
s1 = sMonte.parts['Alto']
mStream = s1.getElementsByClass('Measure')
self.assertEqual([n.offset for n in mStream[3].notesAndRests], [0.0])
self.assertEqual(str([n.tie for n in mStream[3].notesAndRests]), '[<music21.tie.Tie start>]')
self.assertEqual([n.offset for n in mStream[4].notesAndRests], [0.0, 2.0])
self.assertEqual(str([n.tie for n in mStream[4].notesAndRests]), '[None, None]')
# post strip ties; must use matchByPitch
s2 = s1.stripTies(retainContainers=True, matchByPitch=True)
mStream = s2.getElementsByClass('Measure')
self.assertEqual([n.offset for n in mStream[3].notesAndRests], [0.0])
self.assertEqual(str([n.tie for n in mStream[3].notesAndRests]), '[None]')
self.assertEqual([n.offset for n in mStream[4].notesAndRests], [2.0])
self.assertEqual(str([n.tie for n in mStream[4].notesAndRests]), '[None]')
self.assertEqual([n.offset for n in mStream[5].notesAndRests], [0.0, 0.5, 1.0, 1.5, 2.0, 3.0])
def testDerivationA(self):
from music21 import stream, corpus
s1 = stream.Stream()
s1.repeatAppend(note.Note(), 10)
s1.repeatAppend(chord.Chord(), 10)
# for testing against
s2 = stream.Stream()
s3 = s1.getElementsByClass('GeneralNote')
self.assertEqual(len(s3), 20)
#environLocal.printDebug(['s3.derivation.origin', s3.derivation.origin])
self.assertEqual(s3.derivation.origin is s1, True)
self.assertEqual(s3.derivation.origin is not s2, True)
s4 = s3.getElementsByClass('Chord')
self.assertEqual(len(s4), 10)
self.assertEqual(s4.derivation.origin is s3, True)
# test imported and flat
s = corpus.parse('bach/bwv66.6')
p1 = s.parts[0]
# the part is not derived from anything yet
self.assertEqual(p1.derivation.origin, None)
p1Flat = p1.flat
self.assertEqual(p1.flat.derivation.origin is p1, True)
self.assertEqual(p1.flat.derivation.origin is s, False)
p1FlatNotes = p1Flat.notesAndRests
self.assertEqual(p1FlatNotes.derivation.origin is p1Flat, True)
self.assertEqual(p1FlatNotes.derivation.origin is p1, False)
self.assertEqual(list(p1FlatNotes.derivation.chain()), [p1Flat, p1])
# we cannot do this, as each call to flat produces a new Stream
self.assertEqual(p1.flat.notesAndRests.derivation.origin is p1.flat, False)
# chained calls to .derives from can be used
self.assertEqual(p1.flat.notesAndRests.derivation.origin.derivation.origin is p1, True)
# can use rootDerivation to get there faster
self.assertEqual(p1.flat.notesAndRests.derivation.rootDerivation is p1, True)
# this does not work because are taking an item via in index
# value, and this Measure is not derived from a Part
self.assertEqual(p1.getElementsByClass(
'Measure')[3].flat.notesAndRests.derivation.rootDerivation is p1, False)
# the root here is the Measure
self.assertEqual(p1.getElementsByClass(
'Measure')[3].flat.notesAndRests.derivation.rootDerivation is p1.getElementsByClass(
'Measure')[3], True)
m4 = p1.measure(4)
self.assertTrue(m4.flat.notesAndRests.derivation.rootDerivation is m4, list(m4.flat.notesAndRests.derivation.chain()))
# part is the root derivation of a measures() call
mRange = p1.measures(4, 6)
self.assertEqual(mRange.derivation.rootDerivation, p1)
self.assertEqual(mRange.flat.notesAndRests.derivation.rootDerivation, p1)
self.assertEqual(s.flat.getElementsByClass(
'Rest').derivation.rootDerivation is s, True)
# we cannot use the activeSite to get the Part from the Measure, as
# the activeSite was set when doing the getElementsByClass operation
self.assertEqual(p1.getElementsByClass(
'Measure')[3].activeSite is p1, False)
def testDerivationB(self):
from music21 import stream
s1 = stream.Stream()
s1.repeatAppend(note.Note(), 10)
s1Flat = s1.flat
self.assertEqual(s1Flat.derivation.origin is s1, True)
# check what the derivation object thinks its container is
self.assertEqual(s1Flat._derivation.client is s1Flat, True)
s2 = copy.deepcopy(s1Flat)
self.assertEqual(s2.derivation.origin is s1Flat, True)
self.assertEqual(s2.derivation.origin.derivation.origin is s1, True)
# check low level attrbiutes
self.assertEqual(s2._derivation.client is s2, True)
def testDerivationC(self):
from music21 import corpus
s = corpus.parse('bach/bwv66.6')
p1 = s.parts['Soprano']
pMeasures = p1.measures(3, 10)
pMeasuresFlat = pMeasures.flat
pMeasuresFlatNotes = pMeasuresFlat.notesAndRests
self.assertEqual(list(pMeasuresFlatNotes.derivation.chain()), [pMeasuresFlat, pMeasures, p1])
def testDerivationMethodA(self):
from music21 import stream, converter
s1 = stream.Stream()
s1.repeatAppend(note.Note(), 10)
s1Flat = s1.flat
self.assertEqual(s1Flat.derivation.origin is s1, True)
self.assertEqual(s1Flat.derivation.method is 'flat', True)
s1Elements = s1Flat.getElementsByClass('Note')
self.assertEqual(s1Elements.derivation.method is 'getElementsByClass', True)
s1 = converter.parse("tinyNotation: 4/4 C2 D2")
s1m = s1.makeMeasures()
self.assertEqual(s1m.derivation.method, 'makeMeasures')
s1m1 = s1m.measure(1)
self.assertEqual(s1m1.derivation.origin, None)
def testcontainerHierarchyA(self):
from music21 import corpus
s = corpus.parse('bach/bwv66.6')
# the part is not derived from anything yet
self.assertEqual([str(e.__class__) for e in s[1][2][3].containerHierarchy], ["<class 'music21.stream.Measure'>", "<class 'music21.stream.Part'>", "<class 'music21.stream.Score'>"])
# after extraction and changing activeSite, cannot find
n = s.flat.notesAndRests[0]
self.assertEqual([common.classToClassStr(e.__class__) for e in n.containerHierarchy], ['Score', 'Score'] )
# still cannot get hierarchy
#self.assertEqual([str(e.__class__) for e in s.parts[0].containerHierarchy], [])
def testMakeMeasuresTimeSignatures(self):
from music21 import stream
sSrc = stream.Stream()
sSrc.append(note.Note('C4', type='quarter'))
sSrc.append(note.Note('D4', type='quarter'))
sSrc.append(note.Note('E4', type='quarter'))
sMeasures = sSrc.makeMeasures()
# added 4/4 here as default
self.assertEqual(str(sMeasures[0].timeSignature), '<music21.meter.TimeSignature 4/4>')
# no time signature are in the source
self.assertEqual(len(sSrc.flat.getElementsByClass('TimeSignature')), 0)
# we add one time signature
sSrc.insert(0.0, meter.TimeSignature('2/4'))
self.assertEqual(len(sSrc.flat.getElementsByClass('TimeSignature')), 1)
sMeasuresTwoFour = sSrc.makeMeasures()
self.assertEqual(str(sMeasuresTwoFour[0].timeSignature), '<music21.meter.TimeSignature 2/4>')
self.assertEqual(sMeasuresTwoFour.isSorted, True)
# check how many time signature we have:
# we should have 1
self.assertEqual(len(
sMeasuresTwoFour.flat.getElementsByClass('TimeSignature')), 1)
def testDeepcopyActiveSite(self):
# test that active sites make sense after deepcopying
from music21 import stream, corpus
s = stream.Stream()
n = note.Note()
s.append(n)
self.assertEqual(id(n.activeSite), id(s))
# test that elements in stream get their active site properly copied
s1 = copy.deepcopy(s)
n1 = s1[0]
self.assertEqual(id(n1.activeSite), id(s1))
s = stream.Stream()
m = stream.Measure()
n = note.Note()
m.append(n)
s.append(m)
self.assertEqual(id(n.activeSite), id(m))
self.assertEqual(id(m.activeSite), id(s))
s1 = copy.deepcopy(s)
m1 = s1[0]
n1 = m1[0]
self.assertEqual(id(n1.activeSite), id(m1))
self.assertEqual(id(m1.activeSite), id(s1))
# try imported
s = corpus.parse('madrigal.5.8.rntxt')
p = s[1] # for test, not .parts
m = p[2] # for test, not .getElementsByClass('Measure')
rn = m[2]
self.assertEqual(id(rn.activeSite), id(m))
self.assertEqual(id(m.activeSite), id(p))
self.assertEqual(id(p.activeSite), id(s))
s1 = copy.deepcopy(s)
p1 = s1[1]
m1 = p1[2]
rn1 = m1[2]
self.assertEqual(id(rn1.activeSite), id(m1))
self.assertEqual(id(m1.activeSite), id(p1))
self.assertEqual(id(p1.activeSite), id(s1))
def testRecurseA(self):
from music21 import corpus
s = corpus.parse('bwv66.6')
# default
rElements = list(s.recurse()) # NOTE: list(s.recurse())
# removes self, while [x for x in s.recurse()] does not.
self.assertTrue(s in rElements)
self.assertEqual(len(rElements), 240)
return
rElements = list(s.recurse(streamsOnly=True))
self.assertEqual(len(rElements), 45)
p1 = rElements[1]
m1 = rElements[2]
#m2 = rElements[3]
m2 = rElements[4]
self.assertIs(p1.activeSite, s)
self.assertIs(m1.activeSite, p1)
self.assertIs(m2.activeSite, p1)
rElements = list(s.recurse(classFilter='KeySignature'))
self.assertEqual(len(rElements), 4)
# the first elements active site is the measure
self.assertEqual(id(rElements[0].activeSite), id(m1))
rElements = list(s.recurse(classFilter=['TimeSignature']))
self.assertEqual(len(rElements), 4)
# s = corpus.parse('bwv66.6')
# m1 = s[2][1] # cannot use parts here as breaks active site
# rElements = list(m1.recurse(direction='upward'))
# self.assertEqual([str(e.classes[0]) for e in rElements], ['Measure',
# 'Instrument',
# 'Part',
# 'Metadata',
# 'Part',
# 'Score',
# 'Part',
# 'Part',
# 'StaffGroup',
# 'Measure',
# 'Measure',
# 'Measure',
# 'Measure',
# 'Measure',
# 'Measure',
# 'Measure',
# 'Measure',
# 'Measure'])
# self.assertEqual(len(rElements), 18)
def testRecurseB(self):
from music21 import corpus
s = corpus.parse('madrigal.5.8.rntxt')
self.assertEqual(len(s.flat.getElementsByClass('KeySignature')), 1)
for e in s.recurse(classFilter='KeySignature'):
e.activeSite.remove(e)
self.assertEqual(len(s.flat.getElementsByClass('KeySignature')), 0)
def testTransposeScore(self):
from music21 import corpus
s = corpus.parse('bwv66.6')
p1 = s.parts[0]
pitch1 = p1.flat.notesAndRests[0]
pitch2 = pitch1.transpose('P4', inPlace=False)
self.assertEqual(str(pitch1), '<music21.note.Note C#>')
self.assertEqual(str(pitch2), '<music21.note.Note F#>')
# can now transpose a part alone as is recursive
p2 = p1.transpose('P4', inPlace=False)
self.assertEqual(str(p1.flat.notesAndRests[0]), '<music21.note.Note C#>')
self.assertEqual(str(p2.flat.notesAndRests[0]), '<music21.note.Note F#>')
p2 = p1.flat.transpose('P4', inPlace=False)
self.assertEqual(str(p1.flat.notesAndRests[0]), '<music21.note.Note C#>')
self.assertEqual(str(p2.flat.notesAndRests[0]), '<music21.note.Note F#>')
def testExtendDurationA(self):
# spanners in this were causing some problems
from music21.musicxml import testFiles
from music21 import converter
# testing a file a file with dynamics
a = converter.parse(testFiles.schumannOp48No1) # @UndefinedVariable
unused_b = a.flat
#b = a.flat.extendDuration(dynamics.Dynamic)
def testSpannerTransferA(self):
from music21 import corpus
# test getting spanners after .measures extraction
s = corpus.parse('corelli/opus3no1/1grave')
post = s.parts[0].measures(5, 10)
# two per part
rbSpanners = post.getElementsByClass('Slur')
self.assertEqual(len(rbSpanners), 6)
#post.parts[0].show()
unused_firstSpannedElementIds = [id(x) for x in rbSpanners[0].getSpannedElements()]
unused_secondSpannedElementIds = [id(x) for x in rbSpanners[1].getSpannedElements()]
#self.assertEqual()
# TODO: compare ids of new measures
def testMeasureGrouping(self):
from music21 import corpus
def parseMeasures(piece):
#The measures of the piece, for a unique extraction
voicesMeasures = []
for part in piece.parts:
# not all things in a Part are Measure objects; you might
# also find Instruments and Spanners, for example.
# thus, filter by Measure first to get the highest measure number
mMax = part.getElementsByClass('Measure')[-1].number
# the measures() method returns more than just measures;
# it the Part it returns includes Slurs, that may reside at the
# Part level
voicesMeasures.append(part.measures(0, mMax))
#The problem itself : print a measure to check if len(notes) == 0
for voice in voicesMeasures:
# only get the Measures, not everything in the Part
for meas in voice.getElementsByClass('Measure'):
# some Measures contain Voices, some do not
# do get all notes regardless of Voices, take a flat measure
self.assertEqual(len(meas.flat.notesAndRests) != 0, True)
piece = corpus.parse('corelli/opus3no1/1grave')
parseMeasures(piece)
piece = corpus.parse('bach/bwv7.7')
parseMeasures(piece)
def testMakeNotationByMeasuresA(self):
from music21 import stream
m = stream.Measure()
m.repeatAppend(note.Note('c#', quarterLength=.5), 4)
m.repeatAppend(note.Note('c', quarterLength=1/3.), 6)
# calls makeAccidentals, makeBeams, makeTuplets
m.makeNotation(inPlace=True)
# after running, there should only be two displayed accidentals
self.assertEqual([str(n.pitch.accidental) for n in m.notes],
['<accidental sharp>', '<accidental sharp>', '<accidental sharp>', '<accidental sharp>', '<accidental natural>', 'None', 'None', 'None', 'None', 'None'])
self.assertEqual([n.pitch.accidental.displayStatus for n in m.notes[:5]], [True, False, False, False, True])
GEX = m21ToXml.GeneralObjectExporter()
raw = GEX.parse(m).decode('utf-8')
self.assertTrue(raw.find('<tuplet bracket="yes" placement="above"') > 0, raw)
self.assertTrue(raw.find('<beam number="1">begin</beam>') > 0, raw)
def testMakeNotationByMeasuresB(self):
from music21 import stream
m = stream.Measure()
m.repeatAppend(note.Note('c#', quarterLength=.5), 4)
m.repeatAppend(note.Note('c', quarterLength=1/3.), 6)
GEX = m21ToXml.GeneralObjectExporter()
raw = GEX.parse(m).decode('utf-8')
self.assertEqual(raw.find('<beam number="1">begin</beam>') > 0, True)
self.assertEqual(raw.find('<tuplet bracket="yes" placement="above"') > 0, True)
def testHaveAccidentalsBeenMadeA(self):
from music21 import stream
m = stream.Measure()
m.append(note.Note('c#'))
m.append(note.Note('c'))
m.append(note.Note('c#'))
m.append(note.Note('c'))
#m.show() on musicxml output, accidentals will be made
self.assertEqual(m.haveAccidentalsBeenMade(), False)
m.makeAccidentals()
self.assertEqual(m.haveAccidentalsBeenMade(), True)
def testHaveAccidentalsBeenMadeB(self):
from music21 import stream
m1 = stream.Measure()
m1.repeatAppend(note.Note('c#'), 4)
m2 = stream.Measure()
m2.repeatAppend(note.Note('c'), 4)
p = stream.Part()
p.append([m1, m2])
#p.show()
# test result of xml output to make sure a natural has been hadded
GEX = m21ToXml.GeneralObjectExporter()
raw = GEX.parse(p).decode('utf-8')
self.assertEqual(raw.find('<accidental>natural</accidental>') > 0, True)
# make sure original is not chagned
self.assertEqual(p.haveAccidentalsBeenMade(), False)
def testHaveBeamsBeenMadeA(self):
from music21 import stream
m1 = stream.Measure()
m1.timeSignature = meter.TimeSignature('4/4')
m1.repeatAppend(note.Note('c#', quarterLength=.5), 8)
m2 = stream.Measure()
m2.repeatAppend(note.Note('c', quarterLength=.5), 8)
p = stream.Part()
p.append([m1, m2])
self.assertEqual(p.streamStatus.haveBeamsBeenMade(), False)
p.makeBeams(inPlace=True)
self.assertEqual(p.streamStatus.haveBeamsBeenMade(), True)
def testHaveBeamsBeenMadeB(self):
from music21 import stream
m1 = stream.Measure()
m1.timeSignature = meter.TimeSignature('4/4')
m1.repeatAppend(note.Note('c#', quarterLength=.5), 8)
m2 = stream.Measure()
m2.repeatAppend(note.Note('c', quarterLength=.5), 8)
p = stream.Part()
p.append([m1, m2])
self.assertEqual(p.streamStatus.haveBeamsBeenMade(), False)
GEX = m21ToXml.GeneralObjectExporter()
raw = GEX.parse(p).decode('utf-8')
# after getting musicxml, make sure that we have not changed the source
#p.show()
self.assertEqual(p.streamStatus.haveBeamsBeenMade(), False)
self.assertEqual(raw.find('<beam number="1">end</beam>') > 0, True)
def testFlatCachingA(self):
from music21 import corpus
s = corpus.parse('bwv66.6')
flat1 = s.flat
flat2 = s.flat
self.assertEqual(id(flat1), id(flat2))
flat1.insert(0, note.Note('g'))
self.assertNotEqual(id(flat1), s.flat)
def testFlatCachingB(self):
from music21 import corpus
sSrc = corpus.parse('bach/bwv13.6.xml')
sPart = sSrc.getElementById('Alto')
ts = meter.TimeSignature('6/8')
# for n in sPart.flat.notesAndRests:
# bs = n.beatStr
#environLocal.printDebug(['calling makeMeasures'])
sPartFlat = sPart.flat
unused_notesAndRests = sPartFlat.notesAndRests
# test cache...
sMeasures = sPart.flat.notesAndRests.makeMeasures(ts)
target = []
for n in sMeasures.flat.notesAndRests:
target.append(n.beatStr)
self.assertEqual(target, ['1', '1 2/3', '2 1/3', '1', '1 2/3', '2 1/3', '1', '1 2/3', '2 1/3', '2 2/3', '1', '1 1/3', '1 2/3', '2', '2 1/3', '1', '1 2/3', '1', '1 2/3', '2 1/3', '1', '1 2/3', '2 1/3', '1', '1', '1 2/3', '2 1/3', '1', '1 2/3', '2 1/3', '1 2/3', '2 1/3', '1', '1 1/3', '1 2/3', '2', '2 1/3', '2 2/3', '1', '1 1/3', '1 2/3', '2 1/3', '1', '1 2/3', '2 1/3', '1', '1 1/3', '1 2/3', '2 1/3', '2 2/3', '1', '1 2/3', '2', '2 1/3'])
def testFlatCachingC(self):
from music21 import corpus, stream
qj = corpus.parse('ciconia/quod_jactatur').parts[0]
unused_idFlat1 = id(qj.flat)
#environLocal.printDebug(['idFlat1', idFlat1])
k1 = qj.flat.getElementsByClass(key.KeySignature)[0]
qj.flat.replace(k1, key.KeySignature(-3))
unused_idFlat2 = id(qj.flat)
#environLocal.printDebug(['idFlat2', idFlat2])
unused_m1 = qj.getElementsByClass(stream.Measure)[1]
#m1.show('t')
#m1.insert(0, key.KeySignature(5))
qj[1].insert(0, key.KeySignature(5))
#qj.elementsChanged()
unused_keySigSearch = qj.flat.getElementsByClass(key.KeySignature)
for n in qj.flat.notes:
junk = n.getContextByClass(key.KeySignature)
#print junk
unused_qj2 = qj.invertDiatonic(note.Note('F4'), inPlace = False)
#qj2.measures(1,2).show('text')
def testSemiFlatCachingA(self):
from music21 import corpus
s = corpus.parse('bwv66.6')
ssf1 = s.semiFlat
ssf2 = s.semiFlat
self.assertEqual(id(ssf1), id(ssf2))
ts = s.parts[0].getElementsByClass(
'Measure')[3].getContextByClass('TimeSignature')
self.assertEqual(str(ts), '<music21.meter.TimeSignature 4/4>')
#environLocal.printDebug(['ts', ts])
beatStr = s.parts[0].getElementsByClass(
'Measure')[3].notes[3].beatStr
self.assertEqual(beatStr, '3')
#environLocal.printDebug(['beatStr', beatStr])
# def testDeepCopyLocations(self):
# from music21 import stream, note
# s1 = stream.Stream()
# n1 = note.Note()
# s1.append(n1)
# print [id(x) for x in n1.getSites()]
# s2 = copy.deepcopy(s1)
# #print s2[0].getSites()
# print [id(x) for x in s2[0].getSites()]
def testFlattenUnnecessaryVoicesA(self):
from music21 import stream
s = stream.Stream()
v1 = stream.Voice()
v2 = stream.Voice()
s.insert(0, v1)
s.insert(0, v2)
self.assertEqual(len(s.voices), 2)
s.flattenUnnecessaryVoices(inPlace=True)
# as empty, are removed
self.assertEqual(len(s.voices), 0)
# next case: one voice empty, other with notes
s = stream.Stream()
v1 = stream.Voice()
v2 = stream.Voice()
n1 = note.Note()
n2 = note.Note()
v1.insert(10, n1)
v1.insert(20, n2)
s.insert(50, v1) # need to test inclusion of this offset
s.insert(50, v2)
self.assertEqual(len(s.voices), 2)
s.flattenUnnecessaryVoices(inPlace=True)
# as empty, are removed
self.assertEqual(len(s.voices), 0)
self.assertEqual(len(s.notes), 2)
self.assertEqual(n1.getOffsetBySite(s), 60)
self.assertEqual(n2.getOffsetBySite(s), 70)
# last case: two voices with notes
s = stream.Stream()
v1 = stream.Voice()
v2 = stream.Voice()
n1 = note.Note()
n2 = note.Note()
n3 = note.Note()
v1.insert(10, n1)
v1.insert(20, n2)
v2.insert(20, n3)
s.insert(50, v1) # need to test inclusion of this offset
s.insert(50, v2)
self.assertEqual(len(s.voices), 2)
s.flattenUnnecessaryVoices(inPlace=True)
# none are removed by default
self.assertEqual(len(s.voices), 2)
# can force
s.flattenUnnecessaryVoices(force=True, inPlace=True)
self.assertEqual(len(s.voices), 0)
self.assertEqual(len(s.notes), 3)
def testGetElementBeforeOffsetA(self):
from music21 import stream
s = stream.Stream()
n1 = note.Note()
n2 = note.Note()
n3 = note.Note()
s.insert(0, n1)
s.insert(3, n2)
s.insert(5, n3)
self.assertEqual(s.getElementBeforeOffset(5), n2)
self.assertEqual(s.getElementBeforeOffset(5.1), n3)
self.assertEqual(s.getElementBeforeOffset(3), n1)
self.assertEqual(s.getElementBeforeOffset(3.2), n2)
self.assertEqual(s.getElementBeforeOffset(0), None)
self.assertEqual(s.getElementBeforeOffset(0.3), n1)
self.assertEqual(s.getElementBeforeOffset(5, ['Note']), n2)
self.assertEqual(s.getElementBeforeOffset(0.3, ['GeneralNote']), n1)
def testGetElementBeforeOffsetB(self):
from music21 import stream
s = stream.Stream()
# fill with clefs to test class matching
n1 = note.Note()
n2 = note.Note()
n3 = note.Note()
s.insert(0, n1)
s.insert(0, clef.SopranoClef())
s.insert(2, clef.BassClef())
s.insert(3, n2)
s.insert(3, clef.TrebleClef())
s.insert(3.1, clef.TenorClef())
s.insert(5, n3)
self.assertEqual(s.getElementBeforeOffset(5, ['Note']), n2)
self.assertEqual(s.getElementBeforeOffset(5.1, ['Note']), n3)
self.assertEqual(s.getElementBeforeOffset(3, ['Note']), n1)
self.assertEqual(s.getElementBeforeOffset(3.2, ['Note']), n2)
self.assertEqual(s.getElementBeforeOffset(0, ['Note']), None)
self.assertEqual(s.getElementBeforeOffset(0.3, ['Note']), n1)
def testFinalBarlinePropertyA(self):
from music21 import stream
s = stream.Stream()
m1 = stream.Measure()
m1.repeatAppend(note.Note(quarterLength=2.0), 2)
m2 = stream.Measure()
m2.repeatAppend(note.Note(quarterLength=2.0), 2)
s.append([m1, m2])
s.finalBarline = 'dotted'
self.assertEqual(str(s.getElementsByClass('Measure')[-1].rightBarline), '<music21.bar.Barline style=dotted>')
self.assertEqual(str(s.finalBarline), '<music21.bar.Barline style=dotted>')
s.finalBarline = 'final'
self.assertEqual(str(s.getElementsByClass('Measure')[-1].rightBarline), '<music21.bar.Barline style=final>')
self.assertEqual(str(s.finalBarline), '<music21.bar.Barline style=final>')
#s.show()
def testFinalBarlinePropertyB(self):
from music21 import corpus
s = corpus.parse('bwv66.6')
sop = s.parts[0]
self.assertEqual(str(sop.finalBarline), '<music21.bar.Barline style=final>')
sop.finalBarline = 'double'
self.assertEqual(str(sop.finalBarline), '<music21.bar.Barline style=double>')
# process entire Score
s.finalBarline = 'tick'
self.assertEqual(str(s.finalBarline), '[<music21.bar.Barline style=tick>, <music21.bar.Barline style=tick>, <music21.bar.Barline style=tick>, <music21.bar.Barline style=tick>]')
# can set a heterogenous final barlines
s.finalBarline = ['final', 'none']
self.assertEqual(str(s.finalBarline), '[<music21.bar.Barline style=final>, <music21.bar.Barline style=none>, <music21.bar.Barline style=final>, <music21.bar.Barline style=none>]')
# def testGraceNoteSortingA(self):
# from music21 import stream
#
# n1 = note.Note('C', type='16th')
# n2 = note.Note('D', type='16th')
# n3 = note.Note('E', type='16th')
# n4 = note.Note('F', type='16th')
# n5 = note.Note('G', type='16th')
#
# s = stream.Stream()
#
# n1.makeGrace()
# s.append(n1)
# n2.makeGrace()
# s.append(n2)
#
# s.append(n3)
#
# n4.makeGrace()
# s.append(n4)
# s.append(n5)
#
# self.assertEqual(s._getGracesAtOffset(0), [n1, n2])
# self.assertEqual(s._getGracesAtOffset(.25), [n4])
#
# match = [(n.name, n.offset, n.quarterLength, n.priority) for n in s]
# self.assertEqual(match,
# [('C', 0.0, 0.0, -100),
# ('D', 0.0, 0.0, -99),
# ('E', 0.0, 0.25, 0),
# ('F', 0.25, 0.0, -100),
# ('G', 0.25, 0.25, 0)])
# def testGraceNoteSortingB(self):
# from music21 import stream
#
# n1 = note.Note('C', type='16th')
# n2 = note.Note('D', type='16th')
# n3 = note.Note('E', type='16th')
# n4 = note.Note('F', type='16th')
# n5 = note.Note('G', type='16th')
# s = stream.Stream()
#
# n1.makeGrace()
# s.append(n1)
# n2.makeGrace()
# s.append(n2)
# n3.makeGrace()
# s.append(n3)
#
# s.append(n4)
# n5.makeGrace() # grace at end
# s.append(n5)
#
# #s.show('t')
#
# self.assertEqual(s._getGracesAtOffset(0), [n1, n2, n3])
# self.assertEqual(s._getGracesAtOffset(.25), [n5])
#
# match = [(n.name, n.offset, n.quarterLength, n.priority) for n in s]
# self.assertEqual(match,
# [('C', 0.0, 0.0, -100),
# ('D', 0.0, 0.0, -99),
# ('E', 0.0, 0.0, -98),
# ('F', 0.0, 0.25, 0),
# ('G', 0.25, 0.0, -100)])
# add a clef; test sorting
# problem: this sorts priority before class
# c1 = clef.AltoClef()
# s.insert(0, c1)
# s.show('t')
# self.assertEqual(c1, s[0]) # should be first
def testStreamElementsComparison(self):
from music21 import stream
s1 = stream.Stream()
s1.repeatAppend(note.Note(), 7)
n1 = note.Note()
s1.append(n1)
s2 = stream.Stream()
s2.elements = s1
match = []
for e in s2.elements:
match.append(e.getOffsetBySite(s2))
self.assertEqual(match, [0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0])
# have the same object in each stream
self.assertEqual(id(s2[-1]), id(s1[-1]))
s3 = stream.Stream()
s4 = stream.Stream()
s4.insert(25, n1) # active site is now changed
s3.elements = s1.elements
match = []
for e in s3.elements:
match.append(e.getOffsetBySite(s3))
# this is not desirable but results from setting of last active site
# before elements assignment
self.assertEqual(match, [0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 25.0])
#s3.elements = s1
s3 = s1[:]
match = []
for e in s3.elements:
match.append(e.getOffsetBySite(s3))
self.assertEqual(match, [0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0])
# this resets active site, so we get the right offsets on element
# assignment
s3.elements = s1
match = []
for e in s3.elements:
match.append(e.getOffsetBySite(s3))
self.assertEqual(match, [0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0])
s5 = stream.Stream()
s5.elements = s1
match = []
for e in s5.elements:
match.append(e.getOffsetBySite(s5))
self.assertEqual(match, [0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0])
def testSecondsPropertyA(self):
from music21 import stream, tempo
# simple case of one tempo
s = stream.Stream()
s.insert(0, tempo.MetronomeMark(number=60))
s.repeatAppend(note.Note(), 60)
self.assertEqual(s.seconds, 60.0)
s = stream.Stream()
s.insert(0, tempo.MetronomeMark(number=90))
s.repeatAppend(note.Note(), 60)
self.assertEqual(s.seconds, 40.0)
s = stream.Stream()
s.insert(0, tempo.MetronomeMark(number=120))
s.repeatAppend(note.Note(), 60)
self.assertEqual(s.seconds, 30.0)
# changing tempo mid-stream
s = stream.Stream()
s.insert(0, tempo.MetronomeMark(number=60))
s.repeatAppend(note.Note(), 60)
s.insert(30, tempo.MetronomeMark(number=120))
# 30 notes at 60, 30 notes at 120
self.assertEqual(s.seconds, 30.0 + 15.0)
s = stream.Stream()
s.insert(0, tempo.MetronomeMark(number=60))
s.repeatAppend(note.Note(), 60)
s.insert(15, tempo.MetronomeMark(number=120))
s.insert(30, tempo.MetronomeMark(number=240))
s.insert(45, tempo.MetronomeMark(number=480))
# 15 notes at 60, 15 notes at 120, 15 at 240, 15 at 480
self.assertEqual(s.seconds, 15.0 + 7.5 + 3.75 + 1.875)
def testSecondsPropertyB(self):
from music21 import corpus, tempo
s = corpus.parse('bwv66.6')
sFlat = s.flat
# we have not tempo
self.assertEqual(len(sFlat.getElementsByClass('TempoIndication')), 0)
sFlat.insert(0, tempo.MetronomeMark('adagio'))
self.assertAlmostEquals(sFlat.seconds, 38.57142857)
sFlat.removeByClass('TempoIndication')
sFlat.insert(0, tempo.MetronomeMark('presto'))
self.assertAlmostEquals(sFlat.seconds, 11.73913043)
sFlat.removeByClass('TempoIndication')
sFlat.insert(0, tempo.MetronomeMark('prestissimo'))
self.assertAlmostEquals(sFlat.seconds, 10.38461538)
def testSecondsPropertyC(self):
from music21 import stream, tempo
s = stream.Stream()
m1 = stream.Measure()
m1.timeSignature = meter.TimeSignature('3/4')
mm = tempo.MetronomeMark(number=60)
m1.insert(0, mm)
m1.insert(note.Note(quarterLength=3))
s.append(m1)
m2 = stream.Measure()
m2.timeSignature = meter.TimeSignature('5/4')
m2.insert(note.Note(quarterLength=5))
s.append(m2)
m3 = stream.Measure()
m3.timeSignature = meter.TimeSignature('2/4')
m3.insert(note.Note(quarterLength=2))
s.append(m3)
self.assertEqual([m.seconds for m in s.getElementsByClass('Measure')], [3.0, 5.0, 2.0])
mm.number = 120
self.assertEqual([m.seconds for m in s.getElementsByClass('Measure')], [1.5, 2.5, 1.0])
mm.number = 30
self.assertEqual([m.seconds for m in s.getElementsByClass('Measure')], [6.0, 10.0, 4.0])
# TODO: New piece with Metronome Mark Boundaries
# def testMetronomeMarkBoundaries(self):
# from music21 import corpus
# s = corpus.parse('hwv56/movement2-09.md')
# mmBoundaries = s.metronomeMarkBoundaries()
# self.assertEqual(str(mmBoundaries), '[(0.0, 20.0, <music21.tempo.MetronomeMark Largo e piano Quarter=46>)]')
def testAccumulatedTimeA(self):
from music21 import stream, tempo
s = stream.Stream()
s.repeatAppend(note.Note(), 8)
s.insert([0, tempo.MetronomeMark(number=60)])
mmBoundaries = s.metronomeMarkBoundaries()
self.assertEqual(s._accumulatedSeconds(mmBoundaries, 0, 1), 1.0)
self.assertEqual(s._accumulatedSeconds(mmBoundaries, 0, 2), 2.0)
self.assertEqual(s._accumulatedSeconds(mmBoundaries, 0, 8), 8.0)
# changing in the middle of boundary
s = stream.Stream()
s.repeatAppend(note.Note(), 8)
s.insert([0, tempo.MetronomeMark(number=60),
4, tempo.MetronomeMark(number=120)])
mmBoundaries = s.metronomeMarkBoundaries()
self.assertEqual(s._accumulatedSeconds(mmBoundaries, 0, 4), 4.0)
self.assertEqual(s._accumulatedSeconds(mmBoundaries, 4, 8), 2.0)
self.assertEqual(s._accumulatedSeconds(mmBoundaries, 0, 8), 6.0)
def testAccumulatedTimeB(self):
from music21 import stream, tempo
# changing in the middle of boundary
s = stream.Stream()
s.repeatAppend(note.Note(), 8)
s.insert([0, tempo.MetronomeMark(number=60),
4, tempo.MetronomeMark(number=120),
6, tempo.MetronomeMark(number=240)])
mmBoundaries = s.metronomeMarkBoundaries()
self.assertEqual(s._accumulatedSeconds(mmBoundaries, 0, 4), 4.0)
self.assertEqual(s._accumulatedSeconds(mmBoundaries, 4, 6), 1.0)
self.assertEqual(s._accumulatedSeconds(mmBoundaries, 6, 8), 0.5)
self.assertEqual(s._accumulatedSeconds(mmBoundaries, 0, 8), 5.5)
def testSecondsMapA(self):
from music21 import stream, tempo
s = stream.Stream()
s.repeatAppend(note.Note(), 8)
s.insert([0, tempo.MetronomeMark(number=90),
4, tempo.MetronomeMark(number=120),
6, tempo.MetronomeMark(number=240)])
self.assertEqual(str(s.metronomeMarkBoundaries()), '[(0.0, 4.0, <music21.tempo.MetronomeMark maestoso Quarter=90>), (4.0, 6.0, <music21.tempo.MetronomeMark animato Quarter=120>), (6.0, 8.0, <music21.tempo.MetronomeMark Quarter=240>)]')
# not starting
s = stream.Stream()
s.repeatAppend(note.Note(), 8)
s.insert([4, tempo.MetronomeMark(number=120),
6, tempo.MetronomeMark(number=240)])
self.assertEqual(str(s.metronomeMarkBoundaries()), '[(0.0, 4.0, <music21.tempo.MetronomeMark animato Quarter=120>), (4.0, 6.0, <music21.tempo.MetronomeMark animato Quarter=120>), (6.0, 8.0, <music21.tempo.MetronomeMark Quarter=240>)]')
# none
s = stream.Stream()
s.repeatAppend(note.Note(), 8)
self.assertEqual(str(s.metronomeMarkBoundaries()), '[(0.0, 8.0, <music21.tempo.MetronomeMark animato Quarter=120>)]')
# ont mid stream
s = stream.Stream()
s.repeatAppend(note.Note(), 8)
s.insert([6, tempo.MetronomeMark(number=240)])
self.assertEqual(str(s.metronomeMarkBoundaries()), '[(0.0, 6.0, <music21.tempo.MetronomeMark animato Quarter=120>), (6.0, 8.0, <music21.tempo.MetronomeMark Quarter=240>)]')
# one start stream
s = stream.Stream()
s.repeatAppend(note.Note(), 8)
s.insert([0, tempo.MetronomeMark(number=240)])
self.assertEqual(str(s.metronomeMarkBoundaries()), '[(0.0, 8.0, <music21.tempo.MetronomeMark Quarter=240>)]')
def testSecondsMapB(self):
from music21 import stream, tempo
# one start stream
s = stream.Stream()
s.repeatAppend(note.Note(), 2)
s.insert([0, tempo.MetronomeMark(number=60)])
sMap = s._getSecondsMap()
sMapStr = "[" # construct string from dict in fixed order...
for ob in sMap:
sMapStr += "{'durationSeconds': " + str(ob['durationSeconds']) + ", 'voiceIndex': " + str(ob['voiceIndex']) + ", 'element': " + str(ob['element']) + ", 'offsetSeconds': " + str(ob['offsetSeconds']) + ", 'endTimeSeconds': " + str(ob['endTimeSeconds']) + "}, "
sMapStr = sMapStr[0:-2]
sMapStr += "]"
self.assertEqual(sMapStr, """[{'durationSeconds': 0.0, 'voiceIndex': None, 'element': <music21.tempo.MetronomeMark larghetto Quarter=60>, 'offsetSeconds': 0.0, 'endTimeSeconds': 0.0}, {'durationSeconds': 1.0, 'voiceIndex': None, 'element': <music21.note.Note C>, 'offsetSeconds': 0.0, 'endTimeSeconds': 1.0}, {'durationSeconds': 1.0, 'voiceIndex': None, 'element': <music21.note.Note C>, 'offsetSeconds': 1.0, 'endTimeSeconds': 2.0}]""")
s = stream.Stream()
s.repeatAppend(note.Note(), 2)
s.insert([0, tempo.MetronomeMark(number=15)])
sMap = s._getSecondsMap()
sMapStr = "[" # construct string from dict in fixed order...
for ob in sMap:
sMapStr += "{'durationSeconds': " + str(ob['durationSeconds']) + ", 'voiceIndex': " + str(ob['voiceIndex']) + ", 'element': " + str(ob['element']) + ", 'offsetSeconds': " + str(ob['offsetSeconds']) + ", 'endTimeSeconds': " + str(ob['endTimeSeconds']) + "}, "
sMapStr = sMapStr[0:-2]
sMapStr += "]"
self.assertEqual(str(sMapStr), """[{'durationSeconds': 0.0, 'voiceIndex': None, 'element': <music21.tempo.MetronomeMark larghissimo Quarter=15>, 'offsetSeconds': 0.0, 'endTimeSeconds': 0.0}, {'durationSeconds': 4.0, 'voiceIndex': None, 'element': <music21.note.Note C>, 'offsetSeconds': 0.0, 'endTimeSeconds': 4.0}, {'durationSeconds': 4.0, 'voiceIndex': None, 'element': <music21.note.Note C>, 'offsetSeconds': 4.0, 'endTimeSeconds': 8.0}]""")
s = stream.Stream()
s.repeatAppend(note.Note(), 2)
s.insert([0, tempo.MetronomeMark(number=15),
1, tempo.MetronomeMark(number=60)])
sMap = s._getSecondsMap()
sMapStr = "[" # construct string from dict in fixed order...
for ob in sMap:
sMapStr += "{'durationSeconds': " + str(ob['durationSeconds']) + ", 'voiceIndex': " + str(ob['voiceIndex']) + ", 'element': " + str(ob['element']) + ", 'offsetSeconds': " + str(ob['offsetSeconds']) + ", 'endTimeSeconds': " + str(ob['endTimeSeconds']) + "}, "
sMapStr = sMapStr[0:-2]
sMapStr += "]"
self.assertEqual(sMapStr, """[{'durationSeconds': 0.0, 'voiceIndex': None, 'element': <music21.tempo.MetronomeMark larghissimo Quarter=15>, 'offsetSeconds': 0.0, 'endTimeSeconds': 0.0}, {'durationSeconds': 4.0, 'voiceIndex': None, 'element': <music21.note.Note C>, 'offsetSeconds': 0.0, 'endTimeSeconds': 4.0}, {'durationSeconds': 0.0, 'voiceIndex': None, 'element': <music21.tempo.MetronomeMark larghetto Quarter=60>, 'offsetSeconds': 4.0, 'endTimeSeconds': 4.0}, {'durationSeconds': 1.0, 'voiceIndex': None, 'element': <music21.note.Note C>, 'offsetSeconds': 4.0, 'endTimeSeconds': 5.0}]""")
s = stream.Stream()
s.repeatAppend(note.Note(quarterLength=2.0), 1)
s.insert([0, tempo.MetronomeMark(number=15),
1, tempo.MetronomeMark(number=60)])
sMap = s._getSecondsMap()
sMapStr = "[" # construct string from dict in fixed order...
for ob in sMap:
sMapStr += "{'durationSeconds': " + str(ob['durationSeconds']) + ", 'voiceIndex': " + str(ob['voiceIndex']) + ", 'element': " + str(ob['element']) + ", 'offsetSeconds': " + str(ob['offsetSeconds']) + ", 'endTimeSeconds': " + str(ob['endTimeSeconds']) + "}, "
sMapStr = sMapStr[0:-2]
sMapStr += "]"
self.assertEqual(sMapStr, """[{'durationSeconds': 0.0, 'voiceIndex': None, 'element': <music21.tempo.MetronomeMark larghissimo Quarter=15>, 'offsetSeconds': 0.0, 'endTimeSeconds': 0.0}, {'durationSeconds': 5.0, 'voiceIndex': None, 'element': <music21.note.Note C>, 'offsetSeconds': 0.0, 'endTimeSeconds': 5.0}, {'durationSeconds': 0.0, 'voiceIndex': None, 'element': <music21.tempo.MetronomeMark larghetto Quarter=60>, 'offsetSeconds': 4.0, 'endTimeSeconds': 4.0}]""")
def testPartDurationA(self):
from music21 import stream
#s = corpus.parse('bach/bwv7.7')
p1 = stream.Part()
p1.append(note.Note(quarterLength=72))
p2 = stream.Part()
p2.append(note.Note(quarterLength=72))
sNew = stream.Score()
sNew.append(p1)
self.assertEqual(str(sNew.duration), '<music21.duration.Duration 72.0>')
self.assertEqual(sNew.duration.quarterLength, 72.0)
sNew.append(p2)
self.assertEqual(sNew.duration.quarterLength, 144.0)
#sPost = sNew.chordify()
#sPost.show()
def testPartDurationB(self):
from music21 import stream, corpus
s = corpus.parse('bach/bwv66.6')
sNew = stream.Score()
sNew.append(s.parts[0])
self.assertEqual(str(s.parts[0].duration), '<music21.duration.Duration 36.0>')
self.assertEqual(str(sNew.duration), '<music21.duration.Duration 36.0>')
self.assertEqual(sNew.duration.quarterLength, 36.0)
sNew.append(s.parts[1])
self.assertEqual(sNew.duration.quarterLength, 72.0)
def testChordifyTagPartA(self):
from music21 import stream
p1 = stream.Stream()
p1.id = 'a'
p1.repeatAppend(note.Note('g4', quarterLength=2), 6)
p2 = stream.Stream()
p2.repeatAppend(note.Note('c4', quarterLength=3), 4)
p2.id = 'b'
s = stream.Score()
s.insert(0, p1)
s.insert(0, p2)
post = s.chordify(addPartIdAsGroup=True, removeRedundantPitches=False)
self.assertEqual(len(post.flat.notes), 8)
# test that each note has its original group
idA = []
idB = []
for c in post.flat.getElementsByClass('Chord'):
for p in c.pitches:
if 'a' in p.groups:
idA.append(p.name)
if 'b' in p.groups:
idB.append(p.name)
self.assertEqual(idA, ['G', 'G', 'G', 'G', 'G', 'G', 'G', 'G'])
self.assertEqual(idB, ['C', 'C', 'C', 'C', 'C', 'C', 'C', 'C'])
def testChordifyTagPartB(self):
from music21 import corpus
s = corpus.parse('bwv66.6')
idSoprano = []
idAlto = []
idTenor = []
idBass = []
post = s.chordify(addPartIdAsGroup=True, removeRedundantPitches=False)
for c in post.flat.getElementsByClass('Chord'):
for p in c.pitches:
if 'Soprano' in p.groups:
idSoprano.append(p.name)
if 'Alto' in p.groups:
idAlto.append(p.name)
if 'Tenor' in p.groups:
idTenor.append(p.name)
if 'Bass' in p.groups:
idBass.append(p.name)
self.assertEqual(idSoprano, [u'C#', u'B', u'A', u'B', u'C#', u'E', u'C#', u'C#', u'B', u'B', u'A', u'C#', u'A', u'B', u'G#', u'G#', u'F#', u'A', u'B', u'B', u'B', u'B', u'F#', u'F#', u'E', u'A', u'A', u'B', u'B', u'C#', u'C#', u'A', u'B', u'C#', u'A', u'G#', u'G#', u'F#', u'F#', u'G#', u'F#', u'F#', u'F#', u'F#', u'F#', u'F#', u'F#', u'F#', u'F#', u'E#', u'F#'])
self.assertEqual(idAlto, [u'E', u'E', u'F#', u'E', u'E', u'E', u'E', u'A', u'G#', u'G#', u'E', u'G#', u'F#', u'G#', u'E#', u'E#', u'C#', u'F#', u'F#', u'F#', u'E', u'E', u'D#', u'D#', u'C#', u'C#', u'F#', u'E', u'E', u'E', u'A', u'F#', u'F#', u'G#', u'F#', u'F#', u'E#', u'F#', u'F#', u'C#', u'C#', u'D', u'E', u'E', u'D', u'C#', u'B', u'C#', u'D', u'D', u'C#'])
# length should be the same
self.assertEqual(len(idSoprano), len(idAlto))
def testTransposeByPitchA(self):
from music21 import stream, instrument
i1 = instrument.EnglishHorn() # -p5
i2 = instrument.Clarinet() # -M2
p1 = stream.Part()
p1.repeatAppend(note.Note('C4'), 4)
p1.insert(0, i1)
p1.insert(2, i2)
p2 = stream.Part()
p2.repeatAppend(note.Note('C4'), 4)
p2.insert(0, i2)
s = stream.Score()
s.insert(0, p1)
s.insert(0, p2)
self.assertEqual([str(p) for p in p1.pitches], ['C4', 'C4', 'C4', 'C4'])
test = p1._transposeByInstrument(inPlace=False)
self.assertEqual([str(p) for p in test.pitches], ['F3', 'F3', 'B-3', 'B-3'])
test = p1._transposeByInstrument(inPlace=False, reverse=True)
self.assertEqual([str(p) for p in test.pitches], ['G4', 'G4', 'D4', 'D4'])
# declare that at written pitch
p1.atSoundingPitch = False
test = p1.toSoundingPitch(inPlace=False)
# all transpositions should be downward
self.assertEqual([str(p) for p in test.pitches], ['F3', 'F3', 'B-3', 'B-3'])
# declare that at written pitch
p1.atSoundingPitch = False
test = p1.toWrittenPitch(inPlace=False)
# no change; already at written
self.assertEqual([str(p) for p in test.pitches], ['C4', 'C4', 'C4', 'C4'])
# declare that at sounding pitch
p1.atSoundingPitch = True
# no change happens
test = p1.toSoundingPitch(inPlace=False)
self.assertEqual([str(p) for p in test.pitches], ['C4', 'C4', 'C4', 'C4'])
# declare at sounding pitch
p1.atSoundingPitch = True
# reverse intervals; app pitches should be upward
test = p1.toWrittenPitch(inPlace=False)
self.assertEqual([str(p) for p in test.pitches], ['G4', 'G4', 'D4', 'D4'])
# test on a complete score
s.parts[0].atSoundingPitch = False
s.parts[1].atSoundingPitch = False
test = s.toSoundingPitch(inPlace=False)
self.assertEqual([str(p) for p in test.parts[0].pitches], ['F3', 'F3', 'B-3', 'B-3'])
self.assertEqual([str(p) for p in test.parts[1].pitches], ['B-3', 'B-3', 'B-3', 'B-3'])
# test same in place
s.parts[0].atSoundingPitch = False
s.parts[1].atSoundingPitch = False
s.toSoundingPitch(inPlace=True)
self.assertEqual([str(p) for p in s.parts[0].pitches], ['F3', 'F3', 'B-3', 'B-3'])
self.assertEqual([str(p) for p in test.parts[1].pitches], ['B-3', 'B-3', 'B-3', 'B-3'])
def testTransposeByPitchB(self):
from music21.musicxml import testPrimitive
from music21 import converter
s = converter.parse(testPrimitive.transposingInstruments72a)
self.assertEqual(s.parts[0].atSoundingPitch, False)
self.assertEqual(s.parts[1].atSoundingPitch, False)
self.assertEqual(str(s.parts[0].getElementsByClass(
'Instrument')[0].transposition), '<music21.interval.Interval M-2>')
self.assertEqual(str(s.parts[1].getElementsByClass(
'Instrument')[0].transposition), '<music21.interval.Interval M-6>')
self.assertEqual([str(p) for p in s.parts[0].pitches], ['D4', 'E4', 'F#4', 'G4', 'A4', 'B4', 'C#5', 'D5'])
self.assertEqual([str(p) for p in s.parts[1].pitches], ['A4', 'B4', 'C#5', 'D5', 'E5', 'F#5', 'G#5', 'A5'])
s.toSoundingPitch(inPlace=True)
self.assertEqual([str(p) for p in s.parts[0].pitches], ['C4', 'D4', 'E4', 'F4', 'G4', 'A4', 'B4', 'C5'] )
self.assertEqual([str(p) for p in s.parts[1].pitches], ['C4', 'D4', 'E4', 'F4', 'G4', 'A4', 'B4', 'C5'] )
def testExtendTiesA(self):
from music21 import stream
s = stream.Stream()
s.append(note.Note('g4'))
s.append(chord.Chord(['c3', 'g4', 'a5']))
s.append(note.Note('a5'))
s.append(chord.Chord(['c4', 'a5']))
s.extendTies()
post = []
for n in s.flat.getElementsByClass('GeneralNote'):
if 'Chord' in n.classes:
post.append([q.tie for q in n])
else:
post.append(n.tie)
self.assertEqual(str(post), '[<music21.tie.Tie start>, [None, <music21.tie.Tie stop>, <music21.tie.Tie start>], <music21.tie.Tie continue>, [None, <music21.tie.Tie stop>]]')
def testExtendTiesB(self):
from music21 import corpus
s = corpus.parse('bwv66.6')
sChords = s.measures(9, 9).chordify()
#sChords = s.chordify()
sChords.extendTies()
post = []
for chord in sChords.flat.getElementsByClass('Chord'):
post.append([n.tie for n in chord])
self.assertEqual(str(post), '[[<music21.tie.Tie continue>, <music21.tie.Tie start>, <music21.tie.Tie start>], [<music21.tie.Tie continue>, None, <music21.tie.Tie continue>, <music21.tie.Tie stop>], [<music21.tie.Tie stop>, <music21.tie.Tie start>, <music21.tie.Tie continue>, <music21.tie.Tie start>], [None, <music21.tie.Tie stop>, <music21.tie.Tie stop>, <music21.tie.Tie stop>], [None, None, None, None]]')
#sChords.show()
def testInsertIntoNoteOrChordA(self):
from music21 import stream
s = stream.Stream()
s.repeatAppend(note.Note('d4'), 8)
s.insertIntoNoteOrChord(3, note.Note('g4'))
self.assertEqual(str([e for e in s]), '[<music21.note.Note D>, <music21.note.Note D>, <music21.note.Note D>, <music21.chord.Chord D4 G4>, <music21.note.Note D>, <music21.note.Note D>, <music21.note.Note D>, <music21.note.Note D>]')
s.insertIntoNoteOrChord(3, note.Note('b4'))
self.assertEqual(str([e for e in s]), '[<music21.note.Note D>, <music21.note.Note D>, <music21.note.Note D>, <music21.chord.Chord D4 G4 B4>, <music21.note.Note D>, <music21.note.Note D>, <music21.note.Note D>, <music21.note.Note D>]')
s.insertIntoNoteOrChord(5, note.Note('b4'))
self.assertEqual(str([e for e in s]), '[<music21.note.Note D>, <music21.note.Note D>, <music21.note.Note D>, <music21.chord.Chord D4 G4 B4>, <music21.note.Note D>, <music21.chord.Chord D4 B4>, <music21.note.Note D>, <music21.note.Note D>]')
s.insertIntoNoteOrChord(5, chord.Chord(['c5', 'e-5']))
self.assertEqual(str([e for e in s]), '[<music21.note.Note D>, <music21.note.Note D>, <music21.note.Note D>, <music21.chord.Chord D4 G4 B4>, <music21.note.Note D>, <music21.chord.Chord D4 B4 C5 E-5>, <music21.note.Note D>, <music21.note.Note D>]')
#s.show('text')
def testInsertIntoNoteOrChordB(self):
from music21 import stream
s = stream.Stream()
s.repeatAppend(chord.Chord(['c4', 'e4', 'g4']), 8)
s.insertIntoNoteOrChord(5, note.Note('b4'))
s.insertIntoNoteOrChord(3, note.Note('b4'))
s.insertIntoNoteOrChord(6, chord.Chord(['d5', 'e-5', 'b-5']))
self.assertEqual(str([e for e in s]), '[<music21.chord.Chord C4 E4 G4>, <music21.chord.Chord C4 E4 G4>, <music21.chord.Chord C4 E4 G4>, <music21.chord.Chord C4 E4 G4 B4>, <music21.chord.Chord C4 E4 G4>, <music21.chord.Chord C4 E4 G4 B4>, <music21.chord.Chord C4 E4 G4 D5 E-5 B-5>, <music21.chord.Chord C4 E4 G4>]')
def testSortingAfterInsertA(self):
from music21 import corpus
import math
s = corpus.parse('bwv66.6')
#s.show()
p = s.parts[0]
for m in p.getElementsByClass('Measure'):
for n in m.notes:
targetOffset = n.getOffsetBySite(m)
if targetOffset != math.floor(targetOffset):
## remove all offbeats
r = note.Rest(quarterLength=n.quarterLength)
m.remove(n)
m.insert(targetOffset, r)
# if we iterate, we get a sorted version
#self.assertEqual([str(n) for n in p.flat.notesAndRests], [])
# when we just call show(), we were not geting a sorted version;
# this was due to making the stream immutable before sorting
# this is now fixed
# m. 3
match = """ <note>
<pitch>
<step>A</step>
<octave>4</octave>
</pitch>
<duration>5040</duration>
<type>eighth</type>
<stem>up</stem>
</note>
<note>
<rest/>
<duration>5040</duration>
<type>eighth</type>
</note>
<note>
<pitch>
<step>G</step>
<alter>1</alter>
<octave>4</octave>
</pitch>
<duration>10080</duration>
<type>quarter</type>
<stem>up</stem>
</note>
<note>"""
GEX = m21ToXml.GeneralObjectExporter()
originalRaw = GEX.parse(p).decode('utf-8')
match = match.replace(' ', '')
match = match.replace('\n', '')
raw = originalRaw.replace(' ', '')
raw = raw.replace('\n', '')
self.assertEqual(raw.find(match) > 0, True, (match, originalRaw))
def testInvertDiatonicA(self):
# TODO: Check results
from music21 import corpus, stream
qj = corpus.parse('ciconia/quod_jactatur').parts[0]
k1 = qj.flat.getElementsByClass(key.KeySignature)[0]
qj.flat.replace(k1, key.KeySignature(-3))
qj.getElementsByClass(stream.Measure)[1].insert(0, key.KeySignature(5))
unused_qj2 = qj.invertDiatonic(note.Note('F4'), inPlace = False)
def testMeasuresA(self):
from music21 import corpus
s = corpus.parse('bwv66.6')
ex = s.parts[0].measures(3,6)
self.assertEqual(str(ex.flat.getElementsByClass('Clef')[0]), '<music21.clef.TrebleClef>')
self.assertEqual(str(ex.flat.getElementsByClass('Instrument')[0]), 'P1: Soprano: Instrument 1')
# check that we have the exact same Measure instance
mTarget = s.parts[0].getElementsByClass('Measure')[3]
self.assertEqual(id(ex.getElementsByClass('Measure')[0]), id(mTarget))
for m in ex.getElementsByClass('Measure'):
for n in m.notes:
if n.name == 'B':
o = n.getOffsetBySite(m)
m.remove(n)
r = note.Rest(quarterLength=n.quarterLength)
m.insert(o, r)
#s.parts[0].show()
self.assertEqual(len(ex.flat.getElementsByClass('Rest')), 5)
def testMeasuresB(self):
from music21 import corpus
s = corpus.parse('luca/gloria')
y = s.measures(50,90)
self.assertEqual(len(
y.parts[0].flat.getElementsByClass('TimeSignature')), 2)
# make sure that ts is being found in musicxml score generation
# as it is in the Part, and not the Measure, this req an extra check
GEX = m21ToXml.GeneralObjectExporter()
raw = GEX.parse(y.parts[0]).decode('utf-8')
match = """ <time>
<beats>2</beats>
<beat-type>4</beat-type>
</time>
"""
raw = raw.replace(' ', '')
raw = raw.replace('\n', '')
match = match.replace(' ', '')
match = match.replace('\n', '')
self.assertEqual(raw.find(match)>0, True)
def testMeasuresC(self):
from music21 import corpus
s = corpus.parse('bwv66.6')
ex = s.parts[0].measures(3,6)
for n in list(ex.recurse(classFilter=['Note'])):
if n.name == 'B': # should do a list(recurse()) because manipulating
o = n.offset # the stream while iterating.
site = n.activeSite
n.activeSite.remove(n)
r = note.Rest(quarterLength=n.quarterLength)
site.insert(o, r)
self.assertEqual(len(ex.flat.getElementsByClass('Rest')), 5)
#ex.show()
def testChordifyF(self):
# testing chordify handling of triplets
from music21.musicxml import testPrimitive
from music21 import converter
# TODO: there are still errors in this chordify output
s = converter.parse(testPrimitive.triplets01)
#s.parts[0].show()
self.maxDiff = None
self.assertMultiLineEqual(s.parts[0].getElementsByClass('Measure')[0]._reprText(addEndTimes=True, useMixedNumerals=True),
'''{0 - 0} <music21.layout.SystemLayout>
{0 - 0} <music21.clef.TrebleClef>
{0 - 0} <music21.key.KeySignature of 2 flats, mode major>
{0 - 0} <music21.meter.TimeSignature 4/4>
{0 - 2/3} <music21.note.Note B->
{2/3 - 1 1/3} <music21.note.Note C>
{1 1/3 - 2} <music21.note.Note B->
{2 - 4} <music21.note.Note A>''')
self.assertMultiLineEqual(s.parts[1].getElementsByClass('Measure')[0]._reprText(addEndTimes=True),
'''{0.0 - 0.0} <music21.clef.BassClef>
{0.0 - 0.0} <music21.key.KeySignature of 2 flats, mode major>
{0.0 - 0.0} <music21.meter.TimeSignature 4/4>
{0.0 - 4.0} <music21.note.Note B->''')
chords = s.chordify()
m1 = chords.getElementsByClass('Measure')[0]
self.assertMultiLineEqual(m1._reprText(addEndTimes=True, useMixedNumerals=True),
'''{0 - 0} <music21.layout.SystemLayout>
{0 - 0} <music21.clef.TrebleClef>
{0 - 0} <music21.key.KeySignature of 2 flats, mode major>
{0 - 0} <music21.meter.TimeSignature 4/4>
{0 - 2/3} <music21.chord.Chord B-4 B-2>
{2/3 - 1 1/3} <music21.chord.Chord C5 B-2>
{1 1/3 - 2} <music21.chord.Chord B-4 B-2>
{2 - 4} <music21.chord.Chord A4 B-2>''')
match = [([str(p) for p in n.pitches], str(round(float(n.offset), 2)), str(round(float(n.quarterLength), 3))) for n in m1.notes]
self.assertEqual(str(match), "[(['B-4', 'B-2'], '0.0', '0.667'), " + \
"(['C5', 'B-2'], '0.67', '0.667'), " + \
"(['B-4', 'B-2'], '1.33', '0.667'), " + \
"(['A4', 'B-2'], '2.0', '2.0')]")
#chords.show()
GEX = m21ToXml.GeneralObjectExporter()
raw = GEX.parse(m1).decode('utf-8')
# there should only be 2 tuplet indications in the produced chords: start and stop...
self.assertEqual(raw.count('<tuplet'), 2, raw)
# pitch grouping in measure index 1 was not allocated properly
#for c in chords.getElementsByClass('Chord'):
# self.assertEqual(len(c), 2)
def testChordifyG(self):
from music21 import stream
# testing a problem in triplets in makeChords
s = stream.Stream()
s.repeatAppend(note.Note('G4', quarterLength=1/3.), 6)
s.insert(0, note.Note('C4', quarterLength=2))
chords = s.chordify()
#s.chordify().show('t')
for c in chords.getElementsByClass('Chord'):
self.assertEqual(len(c), 2)
# try with small divisions
s = stream.Stream()
s.repeatAppend(note.Note('G4', quarterLength=1/6.), 12)
s.insert(0, note.Note('C4', quarterLength=2))
chords = s.chordify()
#s.chordify().show('t')
for c in chords.getElementsByClass('Chord'):
self.assertEqual(len(c), 2)
s = stream.Stream()
s.repeatAppend(note.Note('G4', quarterLength=1/12.), 24)
s.insert(0, note.Note('C4', quarterLength=2))
chords = s.chordify()
#s.chordify().show('t')
for c in chords.getElementsByClass('Chord'):
self.assertEqual(len(c), 2)
s = stream.Stream()
s.repeatAppend(note.Note('G4', quarterLength=1/24.), 48)
s.insert(0, note.Note('C4', quarterLength=2))
chords = s.chordify()
#s.chordify().show('t')
for c in chords.getElementsByClass('Chord'):
self.assertEqual(len(c), 2)
def testMakeVoicesA(self):
from music21 import stream
s = stream.Stream()
s.repeatAppend(note.Note('d-4', quarterLength=1), 8)
s.insert(0, note.Note('C4', quarterLength=8))
s.makeVoices(inPlace=True)
self.assertEqual(len(s.voices), 2)
self.assertEqual(len(s.voices[0]), 8)
self.assertEqual(len(s.voices[1]), 1)
#s.show()
def testMakeVoicesB(self):
from music21 import corpus
s = corpus.parse('bwv66.6')
#s.measures(6,7).show()
sMeasures = s.measures(6,7)
sFlatVoiced = sMeasures.flat.makeVoices(inPlace=False)
#sFlatVoiced.show('t')
#sFlatVoiced.show()
self.assertEqual(len(sMeasures.flat.notes), len(sFlatVoiced.flat.notes))
self.assertEqual(sMeasures.flat.highestTime,
sFlatVoiced.flat.notes.highestTime)
self.assertEqual(len(sFlatVoiced.voices), 4)
def testSplitAtQuarterLengthA(self):
from music21 import stream
s = stream.Measure()
s.append(note.Note('a', quarterLength=1))
s.append(note.Note('b', quarterLength=2))
s.append(note.Note('c', quarterLength=1))
l, r = s.splitAtQuarterLength(2, retainOrigin=True)
# if retain origin is true, l is the original
self.assertEqual(l, s)
self.assertEqual(l.highestTime, 2)
self.assertEqual(len(l.notes), 2)
self.assertEqual(r.highestTime, 2)
self.assertEqual(len(r.notes), 2)
sPost = stream.Stream()
sPost.append(l)
sPost.append(r)
def testSplitAtQuarterLengthB(self):
'''Test if recursive calls work over voices in a Measure
'''
from music21 import stream
m1 = stream.Measure()
v1 = stream.Voice()
v1.repeatAppend(note.Note('g4', quarterLength=2), 3)
v2 = stream.Voice()
v2.repeatAppend(note.Note(quarterLength=6), 1)
m1.insert(0, v1)
m1.insert(0, v2)
#m1.show()
mLeft, mRight = m1.splitAtQuarterLength(3)
self.assertEqual(len(mLeft.flat.notes), 3)
self.assertEqual(len(mLeft.voices), 2)
self.assertEqual(len(mRight.flat.notes), 3)
self.assertEqual(len(mRight.voices), 2)
sPost = stream.Stream()
sPost.append(mLeft)
sPost.append(mRight)
#sPost.show()
def testSplitAtQuarterLengthC(self):
'''Test splitting a Score
'''
from music21 import corpus
s = corpus.parse('bwv66.6')
sLeft, sRight = s.splitAtQuarterLength(6)
self.assertEqual(len(sLeft.parts), 4)
self.assertEqual(len(sRight.parts), 4)
for i in range(4):
self.assertEqual(
str(sLeft.parts[i].getElementsByClass('Measure')[0].timeSignature), str(sRight.parts[i].getElementsByClass('Measure')[0].timeSignature))
for i in range(4):
self.assertEqual(
str(sLeft.parts[i].getElementsByClass('Measure')[0].clef), str(sRight.parts[i].getElementsByClass('Measure')[0].clef))
for i in range(4):
self.assertEqual(
str(sLeft.parts[i].getElementsByClass('Measure')[0].keySignature), str(sRight.parts[i].getElementsByClass('Measure')[0].keySignature))
#sLeft.show()
#sRight.show()
# def testGraceStreamA(self):
#
# from music21 import stream, spanner
#
# # the GraceStream transforms generic notes into Notes w/ grace
# # durations; otherwise it is not necssary
# gs = stream.GraceStream()
# # the notes here are copies of the created notes
# gs.append(note.Note('c4', quarterLength=.25))
# gs.append(note.Note('d#4', quarterLength=.25))
# gs.append(note.Note('g#4', quarterLength=.5))
#
# #gs.show('t')
# #gs.show()
#
# # the total duration of the
# self.assertEqual(gs.duration.quarterLength, 0.0)
#
# s = stream.Measure()
# s.append(note.Note('G3'))
# s.append(gs)
# s.append(note.Note('A4'))
#
# sp = spanner.Slur(gs[0], s[-1])
# s.append(sp)
#
# match = [str(x) for x in s.pitches]
# self.assertEqual(match, ['G3', 'C4', 'D#4', 'G#4', 'A4'])
#s.show('text')
# p = stream.Part()
# p.append(s)
# p.show()
def testGraceStreamB(self):
'''testing a graces w/o a grace stream'''
from music21 import stream, duration, dynamics
s = stream.Measure()
s.append(note.Note('G3'))
self.assertEqual(s.highestTime, 1.0)
# shows up in the same position as the following note, not the grace
s.append(dynamics.Dynamic('mp'))
gn1 = note.Note('d#4', quarterLength=.5)
# could create a NoteRest method to get a GraceNote from a Note
gn1.duration = gn1.duration.getGraceDuration()
self.assertEqual(gn1.duration.quarterLength, 0.0)
s.append(gn1)
# highest time is the same after adding the gracenote
self.assertEqual(s.highestTime, 1.0)
s.append(note.Note('A4'))
self.assertEqual(s.highestTime, 2.0)
# this works just fine
#s.show()
match = [str(e.pitch) for e in s.notes]
self.assertEqual(match, ['G3', 'D#4', 'A4'])
#s.sort()
# this insert and shift creates an ambiguous situation
# the grace note seems to move with the note itself
s.insertAndShift(1, note.Note('c4'))
match = [str(e) for e in s.pitches]
self.assertEqual(match, ['G3', 'C4', 'D#4', 'A4'])
#s.show('t')
#s.show()
# inserting and shifting this results in it appearing before
# the note at offset 2
gn2 = note.Note('c#4', quarterLength=.25).getGrace()
gn2.duration.slash = False
s.insertAndShift(1, gn2)
#s.show('t')
#s.show()
match = [str(e) for e in s.pitches]
self.assertEqual(match, ['G3', 'C#4', 'C4', 'D#4', 'A4'])
def testGraceStreamC(self):
from music21 import stream
s = stream.Measure()
s.append(chord.Chord(['G3', 'd4']))
gc1 = chord.Chord(['d#4', 'a#4'], quarterLength=.5)
gc1.duration = gc1.duration.getGraceDuration()
s.append(gc1)
gc2 = chord.Chord(['e4', 'b4'], quarterLength=.5)
gc2.duration = gc2.duration.getGraceDuration()
s.append(gc2)
s.append(chord.Chord(['f4', 'c5'], quarterLength=2))
gc3 = chord.Chord(['f#4', 'c#5'], quarterLength=.5)
gc3.duration = gc3.duration.getGraceDuration()
s.append(gc3)
s.append(chord.Chord(['e4', 'b4'], quarterLength=1))
#s.show()
def testScoreShowA(self):
# this checks the specific handling of Score.makeNotation()
from music21 import stream
s = stream.Stream()
s.append(key.Key('G'))
GEX = m21ToXml.GeneralObjectExporter()
raw = GEX.parse(s).decode('utf-8')
self.assertTrue(raw.find('<fifths>1</fifths>') > 0, raw)
def testGetVariantsA(self):
from music21 import stream, variant
s = stream.Stream()
v1 = variant.Variant()
v2 = variant.Variant()
s.append(v1)
s.append(v2)
self.assertEqual(len(s.variants), 2)
def testActivateVariantsA(self):
'''This tests a single-measure variant
'''
from music21 import stream, variant
s = stream.Stream()
s.repeatAppend(note.Note('d2'), 12)
s.makeMeasures(inPlace=True)
v1 = variant.Variant()
m2Alt = stream.Measure()
m2Alt.repeatAppend(note.Note('G#4'), 4)
v1.append(m2Alt) # embed a complete Measure in v1
# insert the variant at the desired location
s.insert(4, v1)
self.assertEqual(str([p.name for p in s.pitches]), "['D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D']")
self.assertEqual(len(s.variants), 1)
s.activateVariants(matchBySpan=False, inPlace=True)
self.assertEqual(str([p.name for p in s.pitches]), "['D', 'D', 'D', 'D', 'G#', 'G#', 'G#', 'G#', 'D', 'D', 'D', 'D']")
self.assertEqual(len(s.variants), 1)
# activating again will restore the previous
s.activateVariants(matchBySpan=False, inPlace=True)
self.assertEqual(str([p.name for p in s.pitches]), "['D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D']")
self.assertEqual(len(s.variants), 1)
def testActivateVariantsB(self):
'''This tests two variants with different groups, each a single measure
'''
from music21 import stream, variant
s = stream.Stream()
s.repeatAppend(note.Note('d2'), 12)
s.makeMeasures(inPlace=True)
v1 = variant.Variant()
m2Alt = stream.Measure()
m2Alt.repeatAppend(note.Note('a#4'), 4)
v1.append(m2Alt) # embed a complete Measure in v1
v1.groups.append('m2-a')
v2 = variant.Variant()
m2Alt = stream.Measure()
m2Alt.repeatAppend(note.Note('b-4'), 4)
v2.append(m2Alt) # embed a complete Measure in v1
v2.groups.append('m2-b')
# insert the variant at the desired location
s.insert(4, v1)
s.insert(4, v2)
self.assertEqual(str([p.name for p in s.pitches]), "['D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D']")
self.assertEqual(len(s.variants), 2)
s.activateVariants(group='m2-a', matchBySpan=False, inPlace=True)
self.assertEqual(str([p.name for p in s.pitches]), "['D', 'D', 'D', 'D', 'A#', 'A#', 'A#', 'A#', 'D', 'D', 'D', 'D']")
self.assertEqual(len(s.variants), 2)
# if we try the same group twice, it is now not active, so there is no change
s.activateVariants(group='m2-a', matchBySpan=False, inPlace=True)
self.assertEqual(str([p.name for p in s.pitches]), "['D', 'D', 'D', 'D', 'A#', 'A#', 'A#', 'A#', 'D', 'D', 'D', 'D']")
self.assertEqual(len(s.variants), 2)
# activate a different variant
s.activateVariants('m2-b', matchBySpan=False, inPlace=True)
self.assertEqual(str([p.name for p in s.pitches]), "['D', 'D', 'D', 'D', 'B-', 'B-', 'B-', 'B-', 'D', 'D', 'D', 'D']")
self.assertEqual(len(s.variants), 2)
# TODO: keep groups
# we now have 2 variants that have been stripped of their groups
match = [e.groups for e in s.variants]
self.assertEqual(str(match), "[['default'], ['default']]")
def testActivateVariantsC(self):
'''This tests a two-measure variant
'''
from music21 import stream, variant
s = stream.Stream()
s.repeatAppend(note.Note('d2'), 12)
s.makeMeasures(inPlace=True)
v1 = variant.Variant()
m2Alt = stream.Measure()
m2Alt.repeatAppend(note.Note('G#4'), 4)
v1.append(m2Alt) # embed a complete Measure in v1
m3Alt = stream.Measure()
m3Alt.repeatAppend(note.Note('A#4'), 4)
v1.append(m3Alt) # embed a complete Measure in v1
# insert the variant at the desired location
s.insert(4, v1)
self.assertEqual(str([p.name for p in s.pitches]), "['D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D']")
self.assertEqual(len(s.variants), 1)
s.activateVariants(matchBySpan=False, inPlace=True)
self.assertEqual(str([p.name for p in s.pitches]), "['D', 'D', 'D', 'D', 'G#', 'G#', 'G#', 'G#', 'A#', 'A#', 'A#', 'A#']")
self.assertEqual(len(s.variants), 1)
#s.show('t')
# can restore the removed two measures
s.activateVariants(matchBySpan=False, inPlace=True)
self.assertEqual(str([p.name for p in s.pitches]), "['D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D']")
self.assertEqual(len(s.variants), 1)
def testActivateVariantsD(self):
'''This tests a note-level variant
'''
from music21 import stream, variant
s = stream.Stream()
s.repeatAppend(note.Note('d2'), 12)
v = variant.Variant()
v.append(note.Note('G#4'))
v.append(note.Note('a#4'))
v.append(note.Note('c#5'))
s.insert(5, v)
self.assertEqual(str([p.name for p in s.pitches]), "['D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D']")
self.assertEqual(len(s.notes), 12)
self.assertEqual(len(s.variants), 1)
s.activateVariants(matchBySpan=False, inPlace=True)
self.assertEqual(str([p.name for p in s.pitches]), "['D', 'D', 'D', 'D', 'D', 'G#', 'A#', 'C#', 'D', 'D', 'D', 'D']")
self.assertEqual(len(s.notes), 12)
self.assertEqual(len(s.variants), 1)
#s.show('t')
s.activateVariants(matchBySpan=False, inPlace=True)
self.assertEqual(str([p.name for p in s.pitches]), "['D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D']")
self.assertEqual(len(s.notes), 12)
self.assertEqual(len(s.variants), 1)
# note that if the start times of each component do not match, the
# variant part will not be matched
def testActivateVariantsE(self):
'''This tests a note-level variant with miss-matched rhythms
'''
from music21 import stream, variant
s = stream.Stream()
s.repeatAppend(note.Note('d2'), 12)
v = variant.Variant()
v.append(note.Note('G#4', quarterLength=.5))
v.append(note.Note('a#4', quarterLength=1.5))
v.append(note.Note('c#5', quarterLength=1))
s.insert(5, v)
self.assertEqual(str([p.name for p in s.pitches]), "['D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D']")
self.assertEqual(len(s.notes), 12)
self.assertEqual(len(s.variants), 1)
s.activateVariants(matchBySpan=False, inPlace=True)
# TODO
# this only matches the Notes that start at the same position
self.assertEqual(str([p.name for p in s.pitches]), "['D', 'D', 'D', 'D', 'D', 'G#', 'D', 'C#', 'D', 'D', 'D', 'D']")
self.assertEqual(len(s.notes), 12)
self.assertEqual(len(s.variants), 1)
self.assertEqual(str([p for p in s.variants[0].elements]), "[<music21.note.Note D>, <music21.note.Note D>]")
def testActivateVariantsBySpanA(self):
# this tests replacing 1 note with a 3-note variant
from music21 import stream, variant, dynamics
s = stream.Stream()
s.repeatAppend(note.Note('d2'), 12)
v = variant.Variant()
v.insert(0, dynamics.Dynamic('ff'))
v.append(note.Note('G#4', quarterLength=.5))
v.append(note.Note('a#4', quarterLength=.25))
v.append(note.Note('c#5', quarterLength=.25))
s.insert(5, v)
# pre-check
self.assertEqual(len(s.flat.notes), 12)
self.assertEqual(str([p.name for p in s.pitches]),
"['D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D']")
self.assertEqual(len(s.getElementsByClass('Dynamic')), 0)
s.activateVariants(matchBySpan=True, inPlace=True)
self.assertEqual(len(s.flat.notes), 14) # replace 1 w/ 3, for +2
self.assertEqual(str([p.name for p in s.pitches]),
"['D', 'D', 'D', 'D', 'D', 'G#', 'A#', 'C#', 'D', 'D', 'D', 'D', 'D', 'D']")
self.assertEqual(len(s.getElementsByClass('Dynamic')), 1)
s.activateVariants(matchBySpan=True, inPlace=True)
self.assertEqual(len(s.flat.notes), 12)
self.assertEqual(str([p.name for p in s.pitches]),
"['D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D']")
# TODO: as we are presently matching removal by classes in the Variant
# the variant now has no dynamics, and thus leaves the dyn from the
# old variant here
self.assertEqual(len(s.getElementsByClass('Dynamic')), 1)
#s.show()
def testActivateVariantsBySpanB(self):
# this tests replacing 2 measures by a longer single measure
from music21 import stream, variant
s = stream.Stream()
s.repeatAppend(note.Note('d2'), 16)
s.makeMeasures(inPlace=True)
v1 = variant.Variant()
m2Alt = stream.Measure()
m2Alt.repeatAppend(note.Note('a#4'), 8)
m2Alt.timeSignature = meter.TimeSignature('8/4')
v1.append(m2Alt) # embed a complete Measure in v1
v1.groups.append('m2-a')
# insert the variant at the desired location
s.insert(4, v1)
self.assertEqual(len(s.flat.notes), 16)
self.assertEqual(len(s.getElementsByClass('Measure')), 4)
self.assertEqual(str([p.name for p in s.pitches]),
"['D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D']")
# replace 2 measures for 1
s.activateVariants(matchBySpan=True, inPlace=True)
self.assertEqual(len(s.flat.notes), 16)
self.assertEqual(len(s.getElementsByClass('Measure')), 3)
self.assertEqual(str([p.name for p in s.pitches]),
"['D', 'D', 'D', 'D', 'A#', 'A#', 'A#', 'A#', 'A#', 'A#', 'A#', 'A#', 'D', 'D', 'D', 'D']")
# replace the one for two
s.activateVariants("default", matchBySpan=True, inPlace=True)
self.assertEqual(len(s.getElementsByClass('Measure')), 4)
self.assertEqual(str([p.name for p in s.pitches]),
"['D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D']")
#s.show()
def testMeasureTemplateAll(self):
from music21 import corpus
b = corpus.parse('bwv66.6')
bass = b.parts[3]
bassEmpty = bass.measureTemplate(fillWithRests=False, customRemove=True)
for x in bassEmpty:
if 'Measure' in x.classes:
self.assertEqual(len(x), 0)
def testSetElements(self):
from music21 import dynamics
s = Stream()
s.append(note.Note('C', type='half'))
s.append(note.Note('D', type='half'))
s.append(note.Note('E', type='half'))
s.append(note.Note('F', type='half'))
n1 = s.notes[0]
n2 = s.notes[len(s.notes) // 2]
n3 = s.notes[-1]
sp1 = dynamics.Diminuendo(n1, n2)
sp2 = dynamics.Crescendo(n2, n3)
s.append(sp1)
s.append(sp2)
s2 = Stream()
s2.elements = s # do not set elements to s.elements, use s instead.
for el in s2:
self.assertEqual(el.getOffsetBySite(s2),
el.getOffsetBySite(s))
def testGetElementAfterElement(self):
n1 = note.Note('A3')
n2 = note.Note('B3')
n2.id = 'firstB'
n3 = note.Note('B3')
n3.id = 'secondB'
n4 = note.Note('D4')
m1 = note.Note('E4')
m2 = note.Note('F4')
m3 = note.Note('G4')
m4 = note.Note('A-5')
bass = Stream()
bass.append([n1, n2, n3, n4])
sop = Stream()
sop.append([m1, m2, m3, m4])
for i in range(len(bass.notes)-1):
note1 = bass.notes[i]
note2 = bass.getElementAfterElement(note1, ['Note'])
note3 = sop.playingWhenAttacked(note1)
note4 = sop.playingWhenAttacked(note2)
#print(note1, note2, note3, note4)
#print(note1.id, note2.id, note3.id, note4.id)
# TEST???
#------------------------------------------------------------------------------
if __name__ == "__main__":
import music21
music21.mainTest(Test, 'verbose') #, runTest='testGetElementAfterElement')
#------------------------------------------------------------------------------
# eof<|fim▁end|> |
def testAugmentOrDiminishCorpus(self):
'''Extract phrases from the corpus and use for testing |
<|file_name|>plugin_utils.py<|end_file_name|><|fim▁begin|>import json
import datetime
import threading
from base_plugin import *
import base_plugin
#=============================================Messaging===================================
def send_message(recipient, message, mtype='chat'):
'''
Send a message to recipient.
:param recipient: The To field of your message.
:param message: the message string to send.
:para mtype: The message type to send, supports public/private and xmpp style chat/groupchat.
'''
if mtype == 'private':
mtype = 'chat'
if mtype == 'public':
mtype = 'groupchat'
base_plugin.PluginContext.client.send_message(mto=recipient, mbody=message, mtype=mtype)
#=============================================FILTERS=====================================
#FIXME: this seems broken.
def self_message(event, plugin):
'''
filter for self generated events.
:param event: the event being filtered
:param plugin: the plugin hosting the filter
returns - true if not self generated event, false otherwise.
'''
if msg.From_Nick != plugin.client.nick and plugin.client.nick in msg.Body:
return True
return False
def on_message(event, plugin):
'''
filter for group chat events.
:param event: the event being filtered
:param plugin: the plugin hosting the filter
returns - true if a group chat event, false otherwise.
'''
if event.Type in ["groupchat"]:
return True
return False
def on_private_message(event, plugin):
'''
filter for private message events.
:param event: the event being filtered
:param plugin: the plugin hosting the filter
returns - true if a private message event, false otherwise.
'''
if not event.Room:
return True
return False
def on_presence(event, plugin):
'''
filter for join/part type events.
:param event: the event being filtered
:param plugin: the plugin hosting the filter
returns - true if a presence event, false otherwise.
'''
if event.Type in ["available", "unavailable"]:
return True
return False
#=============================================FILE OPERATORS=====================================
def put_object_to_file(item, path):
'''
Syntactic sugar, write jsonified object to file.
:param item: Any json-able item.
:param path: path to log file.
'''
with open(path, 'w+') as f:
f.write(json.dumps(item))
def get_object_from_file(path):
'''
Syntactic sugar, read jsonified object from file.
:param path: path to log file where item is stored.
Returns - json expanded item from log file.
'''
with open(path, 'r') as f:
item_str = f.read()
return json.loads(item_str)
def append_to_file(string, path):
'''
Syntactic sugar, append string to file.
:param item: Any json-able item.
:param path: path to log file.
'''
with open(path, 'a') as f:
f.write(string)
def write_to_file(string, path):
'''
Syntactic sugar, write string to file.
:param item: Any json-able item.
:param path: path to log file.
'''
with open(path, 'w+') as f:
f.write(string)
def read_from_file(path):
'''
Syntactic sugar, read from file.
:param path: path to log file where item is stored.
Returns - string contents of log file.
'''
with open(path, 'r') as f:
return f.read()
def read_lines_from_file(path):
'''
Read lines from file, as seperated by newline/enter.
:param path: path to log file
Returns - list of lines
'''
return read_from_file(path).split('\n')
#===========================================TIMED EVENTS=====================================
def schedule_event_by_delay(delay, event, args=[]):
'''
Schedule an event by a delay in seconds.
:param delay: number of seconds until event triggers.
:param event: the action to be triggered.
:param args: the arguments to pass when the event is called. (default [])
'''
threading.Timer(delay, call_function_with_variable_arguments, [event, args]).start()
def schedule_event(time, event, args=[]):
'''
Schedule an event by an absolute time
:param time: the datetime object representing the trigger time.
:param event: the action to be triggered.
:param args: the arguments to pass when the event is called. (default [])
'''
delta = time - datetime.datetime.now()
threading.Timer(delta.total_seconds(), call_function_with_variable_arguments, [event, args]).start()
def schedule_event(year, month, day, hour, minute, second, event, args=[]):
'''
Schedule an event by an absolute time<|fim▁hole|> :param month: month of the event
:param day: day of the event
:param hour: hour of the event
:param minute: minute of the event
:param second: second of the event
:param event: the action to be triggered.
:param args: the arguments to pass when the event is called. (default [])
'''
time = datetime.datetime(year, month, day, hour, minute, second)
delta = time - datetime.datetime.now()
threading.Timer(delta.total_seconds(), call_function_with_variable_arguments, [event, args]).start()
#==========================================HERE THERE BE DRAGONS=================================================
def call_function_with_variable_arguments(function, arguments):
'''
Takes functions, takes arguments, makes it fit.
:param function: The function to call
:param arguments: The argument list to make fit.
'''
iterator = len(arguments)
while True:
real_exception = None
try:
function(*(arguments[:iterator]))
return
except Exception as e:
if not real_exception or "takes exactly" not in str(e) or "arguments" not in str(e):
real_exception = e
iterator -= 1
if iterator < 0:
raise real_exception<|fim▁end|> |
:param year: year of the event |
<|file_name|>mod.rs<|end_file_name|><|fim▁begin|>// The MIT License (MIT)
// Copyright (c) 2015 Y. T. Chung <[email protected]><|fim▁hole|>// Permission is hereby granted, free of charge, to any person obtaining a copy of
// this software and associated documentation files (the "Software"), to deal in
// the Software without restriction, including without limitation the rights to
// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
// the Software, and to permit persons to whom the Software is furnished to do so,
// subject to the following conditions:
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
// COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
// IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
// CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
pub use self::processor::Processor;
pub mod processor;<|fim▁end|> | |
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
# Copyright(c) 2016-2020 Jonas Sjöberg <[email protected]>
# Source repository: https://github.com/jonasjberg/autonameow
#
# This file is part of autonameow.
#
# autonameow is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation.
#
# autonameow is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with autonameow. If not, see <http://www.gnu.org/licenses/>.
from .extractor_crossplatform import CrossPlatformFileSystemExtractor
from .extractor_epub import EpubMetadataExtractor
from .extractor_exiftool import ExiftoolMetadataExtractor<|fim▁hole|><|fim▁end|> | from .extractor_filetags import FiletagsMetadataExtractor
from .extractor_guessit import GuessitMetadataExtractor
from .extractor_jpeginfo import JpeginfoMetadataExtractor
from .extractor_pandoc import PandocMetadataExtractor |
<|file_name|>remote.go<|end_file_name|><|fim▁begin|>package remote
//go:generate mockery -name Remote -output mock -case=underscore
import (
"net/http"
"time"
"github.com/drone/drone/model"
"golang.org/x/net/context"
)
type Remote interface {
// Login authenticates the session and returns the
// remote user details.
Login(w http.ResponseWriter, r *http.Request) (*model.User, error)
// Auth authenticates the session and returns the remote user
// login for the given token and secret
Auth(token, secret string) (string, error)
// Teams fetches a list of team memberships from the remote system.
Teams(u *model.User) ([]*model.Team, error)
// TeamPerm fetches the named organization permissions from
// the remote system for the specified user.
TeamPerm(u *model.User, org string) (*model.Perm, error)
// Repo fetches the named repository from the remote system.
Repo(u *model.User, owner, repo string) (*model.Repo, error)
// Repos fetches a list of repos from the remote system.
Repos(u *model.User) ([]*model.RepoLite, error)
// Perm fetches the named repository permissions from
// the remote system for the specified user.
Perm(u *model.User, owner, repo string) (*model.Perm, error)
// File fetches a file from the remote repository and returns in string
// format.
File(u *model.User, r *model.Repo, b *model.Build, f string) ([]byte, error)
// Status sends the commit status to the remote system.
// An example would be the GitHub pull request status.
Status(u *model.User, r *model.Repo, b *model.Build, link string) error
// Netrc returns a .netrc file that can be used to clone
// private repositories from a remote system.
Netrc(u *model.User, r *model.Repo) (*model.Netrc, error)
// Activate activates a repository by creating the post-commit hook.
Activate(u *model.User, r *model.Repo, link string) error
// Deactivate deactivates a repository by removing all previously created
// post-commit hooks matching the given link.
Deactivate(u *model.User, r *model.Repo, link string) error
// Hook parses the post-commit hook from the Request body and returns the
// required data in a standard format.
Hook(r *http.Request) (*model.Repo, *model.Build, error)
}
// Refresher refreshes an oauth token and expiration for the given user. It
// returns true if the token was refreshed, false if the token was not refreshed,
// and error if it failed to refersh.
type Refresher interface {
Refresh(*model.User) (bool, error)
}
// Login authenticates the session and returns the
// remote user details.
func Login(c context.Context, w http.ResponseWriter, r *http.Request) (*model.User, error) {
return FromContext(c).Login(w, r)
}
// Auth authenticates the session and returns the remote user
// login for the given token and secret
func Auth(c context.Context, token, secret string) (string, error) {
return FromContext(c).Auth(token, secret)
}
// Teams fetches a list of team memberships from the remote system.
func Teams(c context.Context, u *model.User) ([]*model.Team, error) {
return FromContext(c).Teams(u)
}
// TeamPerm fetches the named organization permissions from
// the remote system for the specified user.
func TeamPerm(c context.Context, u *model.User, org string) (*model.Perm, error) {
return FromContext(c).TeamPerm(u, org)
}
// Repo fetches the named repository from the remote system.
func Repo(c context.Context, u *model.User, owner, repo string) (*model.Repo, error) {
return FromContext(c).Repo(u, owner, repo)
}
// Repos fetches a list of repos from the remote system.
func Repos(c context.Context, u *model.User) ([]*model.RepoLite, error) {
return FromContext(c).Repos(u)
}
// Perm fetches the named repository permissions from
// the remote system for the specified user.
func Perm(c context.Context, u *model.User, owner, repo string) (*model.Perm, error) {
return FromContext(c).Perm(u, owner, repo)
}
// File fetches a file from the remote repository and returns in string format.
func File(c context.Context, u *model.User, r *model.Repo, b *model.Build, f string) (out []byte, err error) {
for i:=0;i<5;i++ {
out, err = FromContext(c).File(u, r, b, f)
if err == nil {
return
}
time.Sleep(1*time.Second)
}
return
}
// Status sends the commit status to the remote system.
// An example would be the GitHub pull request status.
func Status(c context.Context, u *model.User, r *model.Repo, b *model.Build, link string) error {
return FromContext(c).Status(u, r, b, link)
}
// Netrc returns a .netrc file that can be used to clone
// private repositories from a remote system.
func Netrc(c context.Context, u *model.User, r *model.Repo) (*model.Netrc, error) {
return FromContext(c).Netrc(u, r)
}
// Activate activates a repository by creating the post-commit hook and
// adding the SSH deploy key, if applicable.
func Activate(c context.Context, u *model.User, r *model.Repo, link string) error {
return FromContext(c).Activate(u, r, link)
}
// Deactivate removes a repository by removing all the post-commit hooks
// which are equal to link and removing the SSH deploy key.
func Deactivate(c context.Context, u *model.User, r *model.Repo, link string) error {
return FromContext(c).Deactivate(u, r, link)
}
// Hook parses the post-commit hook from the Request body
// and returns the required data in a standard format.
func Hook(c context.Context, r *http.Request) (*model.Repo, *model.Build, error) {
return FromContext(c).Hook(r)
}
<|fim▁hole|> remote := FromContext(c)
refresher, ok := remote.(Refresher)
if !ok {
return false, nil
}
return refresher.Refresh(u)
}<|fim▁end|> | // Refresh refreshes an oauth token and expiration for the given
// user. It returns true if the token was refreshed, false if the
// token was not refreshed, and error if it failed to refersh.
func Refresh(c context.Context, u *model.User) (bool, error) { |
<|file_name|>HiveSourceTableLoader.java<|end_file_name|><|fim▁begin|>/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kylin.metadata.tool;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.collect.Sets;
import org.apache.hadoop.hive.metastore.api.FieldSchema;
import org.apache.hadoop.hive.metastore.api.Table;
import org.apache.kylin.common.KylinConfig;
import org.apache.kylin.common.util.HadoopUtil;
import org.apache.kylin.common.util.HiveClient;
import org.apache.kylin.metadata.MetadataConstants;
import org.apache.kylin.metadata.MetadataManager;
import org.apache.kylin.metadata.model.ColumnDesc;
import org.apache.kylin.metadata.model.TableDesc;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.util.*;
/**
* Management class to sync hive table metadata with command See main method for
* how to use the class
*
* @author jianliu
*/
public class HiveSourceTableLoader {
@SuppressWarnings("unused")
private static final Logger logger = LoggerFactory.getLogger(HiveSourceTableLoader.class);
public static final String OUTPUT_SURFIX = "json";
public static final String TABLE_FOLDER_NAME = "table";
public static final String TABLE_EXD_FOLDER_NAME = "table_exd";
public static Set<String> reloadHiveTables(String[] hiveTables, KylinConfig config) throws IOException {
Map<String, Set<String>> db2tables = Maps.newHashMap();
for (String table : hiveTables) {
String[] parts = HadoopUtil.parseHiveTableName(table);
Set<String> set = db2tables.get(parts[0]);
if (set == null) {
set = Sets.newHashSet();
db2tables.put(parts[0], set);
}
set.add(parts[1]);
}
// extract from hive
Set<String> loadedTables = Sets.newHashSet();
for (String database : db2tables.keySet()) {
List<String> loaded = extractHiveTables(database, db2tables.get(database), config);
loadedTables.addAll(loaded);
}
return loadedTables;
}
private static List<String> extractHiveTables(String database, Set<String> tables, KylinConfig config) throws IOException {
List<String> loadedTables = Lists.newArrayList();
MetadataManager metaMgr = MetadataManager.getInstance(KylinConfig.getInstanceFromEnv());
for (String tableName : tables) {
Table table = null;
HiveClient hiveClient = new HiveClient();
List<FieldSchema> partitionFields = null;
List<FieldSchema> fields = null;
try {
table = hiveClient.getHiveTable(database, tableName);
partitionFields = table.getPartitionKeys();
fields = hiveClient.getHiveTableFields(database, tableName);
} catch (Exception e) {
e.printStackTrace();
throw new IOException(e);
}
if (fields != null && partitionFields != null && partitionFields.size() > 0) {
fields.addAll(partitionFields);
}
long tableSize = hiveClient.getFileSizeForTable(table);
long tableFileNum = hiveClient.getFileNumberForTable(table);
TableDesc tableDesc = metaMgr.getTableDesc(database + "." + tableName);
if (tableDesc == null) {
tableDesc = new TableDesc();<|fim▁hole|> tableDesc.setDatabase(database.toUpperCase());
tableDesc.setName(tableName.toUpperCase());
tableDesc.setUuid(UUID.randomUUID().toString());
tableDesc.setLastModified(0);
}
int columnNumber = fields.size();
List<ColumnDesc> columns = new ArrayList<ColumnDesc>(columnNumber);
for (int i = 0; i < columnNumber; i++) {
FieldSchema field = fields.get(i);
ColumnDesc cdesc = new ColumnDesc();
cdesc.setName(field.getName().toUpperCase());
cdesc.setDatatype(field.getType());
cdesc.setId(String.valueOf(i + 1));
columns.add(cdesc);
}
tableDesc.setColumns(columns.toArray(new ColumnDesc[columnNumber]));
StringBuffer partitionColumnString = new StringBuffer();
for (int i = 0, n = partitionFields.size(); i < n; i++) {
if (i > 0)
partitionColumnString.append(", ");
partitionColumnString.append(partitionFields.get(i).getName().toUpperCase());
}
Map<String, String> map = metaMgr.getTableDescExd(tableDesc.getIdentity());
if (map == null) {
map = Maps.newHashMap();
}
map.put(MetadataConstants.TABLE_EXD_TABLENAME, table.getTableName());
map.put(MetadataConstants.TABLE_EXD_LOCATION, table.getSd().getLocation());
map.put(MetadataConstants.TABLE_EXD_IF, table.getSd().getInputFormat());
map.put(MetadataConstants.TABLE_EXD_OF, table.getSd().getOutputFormat());
map.put(MetadataConstants.TABLE_EXD_OWNER, table.getOwner());
map.put(MetadataConstants.TABLE_EXD_LAT, String.valueOf(table.getLastAccessTime()));
map.put(MetadataConstants.TABLE_EXD_PC, partitionColumnString.toString());
map.put(MetadataConstants.TABLE_EXD_TFS, String.valueOf(tableSize));
map.put(MetadataConstants.TABLE_EXD_TNF, String.valueOf(tableFileNum));
map.put(MetadataConstants.TABLE_EXD_PARTITIONED, Boolean.valueOf(partitionFields != null && partitionFields.size() > 0).toString());
metaMgr.saveSourceTable(tableDesc);
metaMgr.saveTableExd(tableDesc.getIdentity(), map);
loadedTables.add(tableDesc.getIdentity());
}
return loadedTables;
}
}<|fim▁end|> | |
<|file_name|>GameClient.py<|end_file_name|><|fim▁begin|>import Network
from time import sleep
from threading import Thread
CALL_ROOMLIST = 0
CALL_WEAPLIST = 1
CALL_PLAYERLIST = 2
CALL_NEWPLAYER = 3
CALL_PLAYERLEFT = 4
CALL_CHAT = 5
CALL_PLAYERDAT = 6
CALL_ROOMSTAT = 7
CALL_LEAVEROOM = 8
CALL_SHOOT = 9
CALL_SCORE = 10
class GameClient(Network.Client):
CONNECTING = 0
JOINING_ROOM = 1
LEAVING_ROOM = 2
rooms = []
players = []
weapList= []
scores = {}
response = {}
currRoomInfo = None
main = None
status = -1
charId = 0
roomState = -1
roomId = 0
roomName = ""
stateDict = {
"WAITING":0,
"PLAYING":1,
"DEAD":99
}
invStateDict = {
0:"WAITING",
1:"PLAYING",
99:"DEAD"
}
winnerId = -1
def __init__(self, main):
super(GameClient, self).__init__()
self.main = main
self.rooms = []
self.scores = {}
self.players =[]
self.weapList = []
self.response = {}
def connect(self, name, addr, evt=False): #Blocks
self.status = self.CONNECTING
super(GameClient, self).connect(name, addr)
if evt:
self.onConnect(self.complete(self.CONNECTING))
else:
return self.complete(self.CONNECTING)
def connect_async(self, name, addr): #Doesn't block
t = Thread(target=self.connect, args=[name, addr, True])
t.start()
# NETWORK FUNCTIONS
def complete(self, event, timeout = 2):
waited = 0
while event == self.status and waited <= timeout:
sleep(.1)
waited += .1
if waited >= timeout:
return False
return self.response[event]
def done(self, event, response):
self.response[event] = response
self.status = -1
def playerById(self, pId):
low = 0
high = len(self.players) - 1
while low <= high:
mid = (low + high) >> 1
midId = self.players[mid][0]
if midId < pId:
low = mid + 1
elif midId > pId:
high = mid - 1
else:
return mid
return None
def getPlayers(self):
return self.players
def getRooms(self):
return self.rooms
def clearScores(self):
self.scores = {}
# EVENT FUNCTIONS
def onConnect(self, result):
self.main.onConnect(result)
def onRoomList(self, data):
self.rooms = data
self.main.handleNetworkCall(CALL_ROOMLIST, (self.rooms,))
def onWeapList(self, data):
self.weapList = data
self.main.handleNetworkCall(CALL_WEAPLIST, (self.weapList,))
def onPlayerList(self, playerList, roomId, roomState, yourId):
self.players = playerList
self.playerId = yourId
self.players.sort()
self.roomId = roomId
self.roomState = roomState
if self.status in [self.CONNECTING, self.JOINING_ROOM, self.LEAVING_ROOM]:
self.done(self.status, True)
self.main.handleNetworkCall(CALL_PLAYERLIST, (self.players,))
def onNewPlayer(self, player):
#playername = player[0][:player[0].find('\00')]
self.players.append(player)
self.players.sort()
self.main.handleNetworkCall(CALL_NEWPLAYER, (player,))
def onPlayerLeft(self, data):
playerPos = self.playerById(data[0])
player = self.players[playerPos]
del self.players[playerPos]
if data[2] != -1:
self.players[self.playerById(data[2])] = self.changeTuple(self.players[self.playerById(data[2])], 4, True)
self.main.handleNetworkCall(CALL_PLAYERLEFT, (player,))
def changeTuple(self, tup, key, value):
flist = list(tup)
flist[key] = value
return tuple(flist)
def onChat(self, data):
self.main.handleNetworkCall(CALL_CHAT, (data,))
def onPlayerData(self, data):
self.main.handleNetworkCall(CALL_PLAYERDAT, (data,))
def onRoomStat(self, data):
self.winnerId = data[1]
self.main.handleNetworkCall(CALL_ROOMSTAT, (data,))
#if data[0] == 0:
# self.main.endGame()
#elif data[0] == 1:
# print "starting game"
# self.main.startGame()
def onRoomSwitch(self, action, result):<|fim▁hole|> return result
def onLeaveRoom(self):
if self.status in [self.JOINING_ROOM]:
self.done(self.status, False)
def onShoot(self, bulletdata):
self.main.handleNetworkCall(CALL_SHOOT, (bulletdata,))
def onScore(self, score):
self.scores[score[0]] = score[1], score[2]
self.scores[score[3]] = score[4], score[5]
self.main.handleNetworkCall(CALL_SCORE, (score,))
def onChangeChar(self, charId, playerId):
playerPos = self.playerById(playerId)
player = self.players[playerPos]
self.players[playerPos] = self.changeTuple(self.players[playerPos], 3, charId)
def onDisconnect(self):
self.main.onDisconnect()
## SENDING FUNCTIONS
def joinRoom(self, roomid, roomName, block=False):
if block:
self.status = self.JOINING_ROOM
self.sendDataReliable(Network.Structs.joinRoom.dataType, Network.Structs.joinRoom.pack(roomid)).join()
# This function blocks...
return self.onRoomSwitch(self.JOINING_ROOM, self.complete(self.JOINING_ROOM))
else:
self.winnerId = -1
self.roomName = roomName
Thread(target=self.joinRoom, args=[roomid, roomName, True]).start()
def makeRoom(self, roomName, block=False):
if block:
self.status = self.JOINING_ROOM
self.sendDataReliable(Network.Structs.makeRoom.dataType, Network.Structs.makeRoom.pack(len(roomName))+roomName)
return self.onRoomSwitch(self.JOINING_ROOM, self.complete(self.JOINING_ROOM))
else:
self.winnerId = -1
self.roomName = roomName
Thread(target=self.makeRoom, args=[roomName, True]).start()
def leaveRoom(self, block=False):
if block:
self.status = self.LEAVING_ROOM
self.sendDataReliable(Network.Structs.leaveRoom.dataType, Network.Structs.leaveRoom.pack())
return self.onRoomSwitch(self.LEAVING_ROOM, self.complete(self.LEAVING_ROOM))
else:
self.winnerId = -1
Thread(target=self.leaveRoom, args=[True]).start()
def startGame(self):
self.sendDataReliable(Network.Structs.startGame.dataType, Network.Structs.startGame.pack(0))
def sendGameData(self, gameData):
self.sendData(Network.Structs.playerDat.dataType, gameData)
def sendShoot(self, bullet):
self.sendDataReliable(Network.Structs.shoot.dataType, Network.Structs.shoot.pack(-1, bullet.x, bullet.y, bullet.angle, bullet.type))
def setCharacter(self, charId):
self.sendDataReliable(Network.Structs.setCharacter.dataType, Network.Structs.setCharacter.pack(charId, 0))
self.charId = charId
def sendDeath(self, killerid):
self.sendDataReliable(Network.Structs.onDeath.dataType, Network.Structs.onDeath.pack(killerid))
def sendPicked(self, serverId):
self.sendDataReliable(Network.Structs.takeWeap.dataType, Network.Structs.takeWeap.pack(serverId))
def sendChat(self, data):
self.sendDataReliable(Network.Structs.preChat.dataType, Network.Structs.preChat.pack(len(data)) + data)
def __del__(self):
super(GameClient, self).__del__()<|fim▁end|> | self.main.onRoomSwitch(action, result) |
<|file_name|>app.component.ts<|end_file_name|><|fim▁begin|>// ==================================================================================================================
// ,::i BBB
// BBBBBi EBBB
// MBBNBBU BBB,
// BBB. BBB BBB,BBBBM BBB UBBB MBB, LBBBBBO, :BBG,BBB :BBB .BBBU kBBBBBF
// BBB, BBB 7BBBBS2BBBO BBB iBBBB YBBJ :BBBMYNBBB: FBBBBBB: OBB: 5BBB, BBBi ,M,
// MBBY BBB. 8BBB :BBB BBB .BBUBB BB1 BBBi kBBB BBBM BBBjBBBr BBB1
// BBBBBBBBBBBu BBB FBBP MBM BB. BB BBM 7BBB MBBY .BBB 7BBGkBB1 JBBBBi
// PBBBFE0GkBBBB 7BBX uBBB MBBMBu .BBOBB rBBB kBBB ZBBq BBB: BBBJ . iBBB
//BBBB iBBB BBBBBBBBBE EBBBB ,BBBB MBBBBBBBM BBB, iBBB .BBB2 :BBBBBBB7
//vr7 777 BBBu8O5: .77r Lr7 .7EZk; L77 .Y7r irLY JNMMF:
// LBBj
//
// Apworks Application Development Framework
// Copyright (C) 2009-2017 by daxnet.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// ==================================================================================================================
import { Component } from '@angular/core';
@Component({
selector: 'app-root',
templateUrl: './app.component.html',
styleUrls: ['./app.component.css']
})
<|fim▁hole|><|fim▁end|> | export class AppComponent {
title = 'app works!';
} |
<|file_name|>webclient.py<|end_file_name|><|fim▁begin|>import logging
import requests
import slacksocket.errors as errors
log = logging.getLogger('slacksocket')
class WebClient(requests.Session):
""" Minimal client for connecting to Slack web API """
def __init__(self, token):
self._token = token
super(WebClient, self).__init__()
def get(self, url, method='GET', max_attempts=3, **params):
if max_attempts == 0:
raise errors.SlackAPIError('Max retries exceeded')
elif max_attempts < 0:
message = 'Expected max_attempts >= 0, got {0}'\
.format(max_attempts)
raise ValueError(message)
params['token'] = self._token
res = self.request(method, url, params=params)
try:
res.raise_for_status()
except requests.exceptions.HTTPError as e:
raise errors.SlackAPIError(e)
rj = res.json()
if rj['ok']:
return rj
# process error
if rj['error'] == 'migration_in_progress':
log.info('socket in migration state, retrying')
time.sleep(2)
return self.get(url,
method=method,<|fim▁hole|> **params)
else:
raise errors.SlackAPIError('Error from slack api:\n%s' % res.text)<|fim▁end|> | max_attempts=max_attempts - 1, |
<|file_name|>migrated_0115.js<|end_file_name|><|fim▁begin|><|fim▁hole|><|fim▁end|> | a enum; |
<|file_name|>gruntfile.js<|end_file_name|><|fim▁begin|>/***********************************************
gruntfile.js for jquery-bootstrap
https://github.com/fcoo/jquery-bootstrap
***********************************************/
module.exports = function(grunt) {
"use strict";
//***********************************************
grunt.initConfig({
"fcoo_grunt_plugin":{
default: {
"haveJavaScript": true, //true if the packages have js-files
"haveStyleSheet": true, //true if the packages have css and/or scss-files<|fim▁hole|> "haveGhPages" : true, //true if there is a branch "gh-pages" used for demos
"beforeProdCmd": "", //Cmd to be run at the start of prod-task. Multi cmd can be seperated by "&"
"beforeDevCmd" : "", //Cmd to be run at the start of dev-task
"afterProdCmd" : "", //Cmd to be run at the end of prod-task
"afterDevCmd" : "", //Cmd to be run at the end of dev-task
"DEBUG" : false //if true different debugging is on and the tempoary files are not deleted
}
}
});
//****************************************************************
//Load grunt-packages
grunt.loadNpmTasks('grunt-fcoo-grunt-plugin');
};<|fim▁end|> | |
<|file_name|>std_hash.hpp<|end_file_name|><|fim▁begin|>#ifndef STD_HASH_HPP
#define STD_HASH_HPP
#include <functional>
// this is largely inspired by boost's hash combine as can be found in
// "The C++ Standard Library" 2nd Edition. Nicolai M. Josuttis. 2012.
<|fim▁hole|>template <typename T> void hash_combine(std::size_t &seed, const T &val)
{
seed ^= std::hash<T>()(val) + 0x9e3779b9 + (seed << 6) + (seed >> 2);
}
template <typename T> void hash_val(std::size_t &seed, const T &val) { hash_combine(seed, val); }
template <typename T, typename... Types>
void hash_val(std::size_t &seed, const T &val, const Types &... args)
{
hash_combine(seed, val);
hash_val(seed, args...);
}
template <typename... Types> std::size_t hash_val(const Types &... args)
{
std::size_t seed = 0;
hash_val(seed, args...);
return seed;
}
namespace std
{
template <typename T1, typename T2> struct hash<std::pair<T1, T2>>
{
size_t operator()(const std::pair<T1, T2> &pair) const
{
return hash_val(pair.first, pair.second);
}
};
}
#endif // STD_HASH_HPP<|fim▁end|> | |
<|file_name|>issue-3099-a.rs<|end_file_name|><|fim▁begin|>// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.<|fim▁hole|>//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
enum a { b, c }
enum a { d, e } //~ ERROR duplicate definition of type `a`
fn main() {}<|fim▁end|> | |
<|file_name|>common.py<|end_file_name|><|fim▁begin|># This is modified from a generated file! Please edit source .ksy file and use kaitai-struct-compiler to rebuild
import array
import struct
import zlib
from enum import Enum
from pkg_resources import parse_version
from kaitaistruct import __version__ as ks_version, KaitaiStruct, KaitaiStream, BytesIO
if parse_version(ks_version) < parse_version('0.7'):
raise Exception(
"Incompatible Kaitai Struct Python API: 0.7 or later is required, but you have %s" % (ks_version))
from .cfg_2 import Cfg2
from .header import Header
from .data import Data
from .cfg_3 import Cfg3
from .command import Command
def _kaitai_repr(self):
_repr_list = []
for item in vars(self):
if not item.startswith('_'):
_r = getattr(self, item)
if type(_r) in (int, float, str, bytes, bool):
_repr_list.append("=".join((item, _r.__repr__())))
else:
_repr_list.append(item)
return "<" + self.__class__.__name__ + " |" + ", ".join(_repr_list) + ">"
def _enum_repr(self):
_repr_list = []
for item in ("name", "value"):
_r = getattr(self, item)
_repr_list.append("=".join((item, _r.__repr__())))
return "<" + self.__class__.__name__[:-4] + " |" + ", ".join(_repr_list) + ">"
def _kaitai_show(self, parent_path=' '):
if type(self) in (int, float, str, bytes, bool):
print(" == ".join((parent_path, self.__repr__())))
elif type(self) == list:
for i, item in enumerate(self):
try:
item.show('{}[{}]'.format(parent_path,i))
except:
_kaitai_show(item,'{}[{}]'.format(parent_path,i))
else:
for item in sorted(vars(self)):
if not item.startswith('_'):
_r = getattr(self, item)
try:
_r.show(parent_path+'.'+item)
except:
_kaitai_show(_r,parent_path+'.'+item)
def _enum_show(self, parent_path=' '):
for item in ("name", "value"):
_r = getattr(self, item)
print(parent_path+'.'+item+' == '+_r.__repr__())
KaitaiStruct.__repr__ = _kaitai_repr
Enum.__repr__ = _enum_repr
KaitaiStruct.show = _kaitai_show
Enum.show = _enum_show
#msg.show()
class PhasorMessage(KaitaiStruct):
def __repr__(self):
_repr_list = [
"time=" + str(self.time)] if self.fracsec.fraction_of_second else []
for item in vars(self):
if not item.startswith('_'):
_r = getattr(self, item)
if type(_r) in (int, float, str, bytes):
_repr_list.append("=".join((item, _r.__repr__())))
else:
_repr_list.append(item)
return "<" + self.__class__.__name__ + " |" + ", ".join(_repr_list) + ">"
def show(self, parent_path=' '):
if self.fracsec.fraction_of_second:
print(parent_path+'.time == '+str(self.time))
_kaitai_show(self, parent_path)
def __init__(self, _io, _parent=None, _root=None, _mini_cfgs=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._pkt_pos = self._io.pos()
self.sync = self._root.SyncWord(self._io, self, self._root)
self.framesize = self._io.read_u2be()
self.idcode = self._io.read_u2be()
self._mini_cfg = _mini_cfgs.mini_cfg[self.idcode]
self.soc = self._io.read_u4be()
self.fracsec = self._root.Fracsec(self._io, self, self._root,
self._mini_cfg.time_base.time_base if self._mini_cfg else None)
_on = self.sync.frame_type.value
if _on == 0:
if self._mini_cfg:
self.data = Data(self._io, _mini_cfg=self._mini_cfg)
else:
self.data = self._io.read_bytes((self.framesize - 16))
elif _on == 3:
self._raw_data = self._io.read_bytes((self.framesize - 16))
io = KaitaiStream(BytesIO(self._raw_data))
self.data = Cfg2(io)
_mini_cfgs.add_cfg(self.idcode, self.data)
elif _on == 4:
self._raw_data = self._io.read_bytes((self.framesize - 16))
io = KaitaiStream(BytesIO(self._raw_data))
self.data = Command(io)
elif _on == 5:
_mini_cfgs.add_cfg(self.raw_pkt)
self._raw_data = self._io.read_bytes((self.framesize - 16))
io = KaitaiStream(BytesIO(self._raw_data))
self.data = Cfg3(io)
elif _on == 2:
self._raw_data = self._io.read_bytes((self.framesize - 16))
io = KaitaiStream(BytesIO(self._raw_data))
self.data = Cfg2(io)
elif _on == 1:
self._raw_data = self._io.read_bytes((self.framesize - 16))
io = KaitaiStream(BytesIO(self._raw_data))
self.data = Header(io)
self.chk = self._io.read_u2be()
class SyncWord(KaitaiStruct):
class FrameTypeEnum(Enum):
data = 0
header = 1
cfg1 = 2
cfg2 = 3
cmd = 4
cfg3 = 5
<|fim▁hole|> c_37_118_2005 = 1
c_37_118_2_2011 = 2
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self.magic = self._io.ensure_fixed_contents(struct.pack('1b', -86))
self.reserved = self._io.read_bits_int(1) != 0
self.frame_type = self._root.SyncWord.FrameTypeEnum(
self._io.read_bits_int(3))
self.version_number = self._root.SyncWord.VersionNumberEnum(
self._io.read_bits_int(4))
class Fracsec(KaitaiStruct):
def __repr__(self):
_repr_list = ["fraction_of_second=" +
str(self.fraction_of_second)] if self.fraction_of_second else []
for item in vars(self):
if not item.startswith('_'):
_r = getattr(self, item)
if type(_r) in (int, float, str):
_repr_list.append("=".join((item, _r.__repr__())))
else:
_repr_list.append(item)
return "<" + self.__class__.__name__ + " |" + ", ".join(_repr_list) + ">"
def show(self, parent_path):
if self.fraction_of_second:
print(parent_path+'.fraction_of_second == ' + str(self.fraction_of_second))
_kaitai_show(self, parent_path)
class LeapSecondDirectionEnum(Enum):
add = 0
delete = 1
class MsgTqEnum(Enum):
normal_operation_clock_locked_to_utc_traceable_source = 0
time_within_10_to_9_s_of_utc = 1
time_within_10_to_8_s_of_utc = 2
time_within_10_to_7_s_of_utc = 3
time_within_10_to_6_s_of_utc = 4
time_within_10_to_5_s_of_utc = 5
time_within_10_to_4_s_of_utc = 6
time_within_10_to_3_s_of_utc = 7
time_within_10_to_2_s_of_utc = 8
time_within_10_to_1_s_of_utc = 9
time_within_1_s_of_utc = 10
time_within_10_s_of_utc = 11
fault_clock_failure_time_not_reliable = 15
def __init__(self, _io, _parent=None, _root=None, _time_base=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._time_base = _time_base
self.reserved = self._io.read_bits_int(1) != 0
self.leap_second_direction = self._root.Fracsec.LeapSecondDirectionEnum(
self._io.read_bits_int(1))
self.leap_second_occurred = self._io.read_bits_int(1) != 0
self.leap_second_pending = self._io.read_bits_int(1) != 0
self.time_quality = self._root.Fracsec.MsgTqEnum(
self._io.read_bits_int(4))
self.raw_fraction_of_second = self._io.read_bits_int(24)
@property
def fraction_of_second(self):
if hasattr(self, '_m_fraction_of_second'):
return self._m_fraction_of_second if hasattr(self, '_m_fraction_of_second') else None
if self._time_base:
self._m_fraction_of_second = self.raw_fraction_of_second / self._time_base
return self._m_fraction_of_second if hasattr(self, '_m_fraction_of_second') else None
@property
def time(self):
if hasattr(self, '_m_time'):
return self._m_time if hasattr(self, '_m_time') else None
self._m_time = self.soc + self.fracsec.fraction_of_second
return self._m_time if hasattr(self, '_m_time') else None
@property
def chk_body(self):
if hasattr(self, '_m_chk_body'):
return self._m_chk_body if hasattr(self, '_m_chk_body') else None
_pos = self._io.pos()
self._io.seek(0)
self._m_chk_body = self._io.read_bytes((self.framesize - 2))
self._io.seek(_pos)
return self._m_chk_body if hasattr(self, '_m_chk_body') else None
@property
def raw_pkt(self):
if hasattr(self, '_m_pkt'):
return self._m_pkt if hasattr(self, '_m_pkt') else None
_pos = self._io.pos()
self._io.seek(self._pkt_pos)
self._m_pkt = self._io.read_bytes(self.framesize)
self._io.seek(_pos)
return self._m_pkt if hasattr(self, '_m_pkt') else None<|fim▁end|> | class VersionNumberEnum(Enum): |
<|file_name|>sw_config_backup.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# vim:set sw=4 ts=4 et:
import sys
import pexpect
import threading
import os
import shutil
import logging
import time
import configparser
import ast
import subprocess
def backup(switch, server):
if switch['type'].lower() == '3com':
return backup_3com(switch, server)
elif switch['type'].lower() == 'hp':
return backup_hp(switch, server)
else:
logging.error("Unsupported type of switch (type: %s)" % (switch['type']))
return 4
def backup_3com(switch, server):
try:
ssh=pexpect.spawn('ssh -o StrictHostKeyChecking=no %s@%s' % (switch['username'], switch['ip']))
logging.debug('%s: connecting to ip: %s' % (switch['name'], switch['ip']))
ssh.expect('password')
except:
logging.error("Connection failed(%s)\n \t%s" % (switch['name'], ssh.before))
return 1
try:
ssh.sendline('%s' % switch['password'])
logging.debug('%s: authenticating username: %s' % (switch['name'], switch['username']))
ssh.expect('login')
except:
logging.error("Authorization failed(%s)\n \tusername: %s" % (switch['name'], switch['username']))
return 2
try:
ssh.sendline("backup fabric current-configuration to %s %s.cfg" % (server, switch['name']))
logging.debug('%s: backuping to server: %s' % (switch['name'], server))
ssh.expect('finished!\s+<.*>',timeout=30)
ssh.sendline('quit')
except:
logging.error("Backup failed(%s)\n \t%s" % (switch['name'], ssh.before))
return 3
logging.info("Configuration from %s uploaded to tftp server %s" % (switch['name'], server))
return 0
def backup_hp(switch, server):
try:
ssh=pexpect.spawn('ssh -o StrictHostKeyChecking=no %s@%s' % (switch['username'], switch['ip']))
logging.debug('%s: connecting to ip: %s' % (switch['name'], switch['ip']))
ssh.expect('password')
except:
logging.error("Connection failed(%s)\n \t%s" % (switch['name'], ssh.before))
return 1
try:
ssh.sendline('%s' % switch['password'])
logging.debug('%s: authenticating username: %s' % (switch['name'], switch['username']))
ssh.expect('>')
except:
logging.error("Authorization failed(%s)\n \tusername: %s" % (switch['name'], switch['username']))
return 2
try:
ssh.sendline("backup startup-configuration to %s %s.cfg" % (server, switch['name']))
logging.debug('%s: backuping to server: %s' % (switch['name'], server))
ssh.expect('finished!\s+<.*>',timeout=30)
ssh.sendline('quit')
except:
logging.error("Backup failed(%s)\n \t%s" % (switch['name'], ssh.before))
return 3
logging.info("Configuration from %s uploaded to tftp server %s" % (switch['name'], server))
return 0
def sws_cfg_check(sws_cfg):
keys = {'username', 'password', 'name', 'ip', 'units', 'type'}
for section in sws_cfg:
for key in keys:
if not key in sws_cfg[section]:
raise Exception("Key \'%s\' in switches configuration in section \'%s\' is missing" % (key, section))
def load_switches_cfg():
sws_cfg = configparser.ConfigParser()
sws_cfg.read("%s/conf/switches.cfg" % (sys.path[0]))
retval = dict()
for section in sws_cfg.sections():
retval[section] = dict(sws_cfg.items(section))
sws_cfg_check(retval)
return retval
def app_cfg_check(app_cfg):
keys = {'backup_dir_path', 'backup_server', 'file_expiration_timeout', 'tftp_dir_path', 'log_file', 'git_autocommit'}
for key in keys:
if not key in app_cfg:
raise Exception("Key \'%s\' in application configuration file is missing" % (key))
def load_app_cfg():
app_cfg = configparser.ConfigParser()
app_cfg.read("%s/conf/app.cfg" % (sys.path[0]))
retval = dict(app_cfg.items('APP'))
app_cfg_check(retval)
retval['git_autocommit'] = retval['git_autocommit'].lower() in ['true', '1', 'yes', 'y']
return retval
def git_autocommit(app_cfg):
command = "cd %s; git add -A; git commit -a -m 'autocommit on change'" % (app_cfg['backup_dir_path'])
subprocess.Popen(command,stdout=subprocess.PIPE, shell=True)<|fim▁hole|> logging.basicConfig(filename=app_cfg['log_file'], level=logging.INFO, format='%(asctime)s %(levelname)s %(message)s')
switches_cfg = load_switches_cfg()
threads = []
for switch in switches_cfg:
t = threading.Thread(target = backup, args = (switches_cfg[switch], app_cfg['backup_server']))
t.start()
threads.append(t)
for t in threads:
t.join()
end_time = time.time()
file_expiration_timeout = int(app_cfg['file_expiration_timeout'])
for section in switches_cfg:
switch = switches_cfg[section]
if switch['type'].lower() == '3com':
units = ast.literal_eval(switch['units'])
for unit in units:
tmp_file_path = "%s/%s_%d.cfg" % (app_cfg['tftp_dir_path'],switch['name'],unit)
if not os.access(tmp_file_path, os.R_OK):
logging.warning("Fail to read %s unit %d, expected file %s" % (switch['name'],unit,tmp_file_path))
elif (end_time - os.stat(tmp_file_path).st_mtime) > file_expiration_timeout:
logging.error("Configuration of %s unit %d, file %s is older than %d s, file will be ignored" % (switch['name'],unit,tmp_file_path, file_expiration_timeout))
else:
shutil.copy2(tmp_file_path, app_cfg['backup_dir_path'])
logging.info("Saved %s unit %d configuration" % (switch['name'],unit))
elif switch['type'].lower() == 'hp':
tmp_file_path = "%s/%s.cfg" % (app_cfg['tftp_dir_path'],switch['name'])
if not os.access(tmp_file_path, os.R_OK):
logging.warning("Fail to read %s, expected file %s" % (switch['name'],tmp_file_path))
elif (end_time - os.stat(tmp_file_path).st_mtime) > file_expiration_timeout:
logging.error("Configuration of %s, file %s is older than %d s, file will be ignored" % (switch['name'],tmp_file_path, file_expiration_timeout))
else:
shutil.copy2(tmp_file_path, app_cfg['backup_dir_path'])
logging.info("Saved %s configuration" % (switch['name']))
if app_cfg['git_autocommit'] is True:
git_autocommit(app_cfg)
return 0
if __name__ == '__main__':
main()<|fim▁end|> |
def main():
app_cfg = load_app_cfg() |
<|file_name|>roundd.py<|end_file_name|><|fim▁begin|># (c) Nelen & Schuurmans. GPL licensed, see LICENSE.rst.
# -*- coding: utf-8 -*-
"""
Round raster to set decimals.
"""
from os.path import dirname, exists
import argparse
import os
from osgeo import gdal
import numpy as np
from raster_tools import datasets
# output driver and optinos
DRIVER = gdal.GetDriverByName('gtiff')
OPTIONS = ['compress=deflate', 'tiled=yes']
progress = True
class Exchange(object):
def __init__(self, source_path, target_path):
"""
Read source, create target array.
"""
dataset = gdal.Open(source_path)
band = dataset.GetRasterBand(1)
self.source = band.ReadAsArray()
self.no_data_value = band.GetNoDataValue()
self.shape = self.source.shape
self.kwargs = {
'no_data_value': self.no_data_value,
'projection': dataset.GetProjection(),
'geo_transform': dataset.GetGeoTransform(),
}
self.target_path = target_path
self.target = np.full_like(self.source, self.no_data_value)
def round(self, decimals):
""" Round target. """
active = self.source != self.no_data_value
self.target[active] = self.source[active].round(decimals)
def save(self):
""" Save. """
# prepare dirs
subdir = dirname(self.target_path)
if subdir:
os.makedirs(subdir, exist_ok=True)
# write tiff
array = self.target[np.newaxis]
with datasets.Dataset(array, **self.kwargs) as dataset:
DRIVER.CreateCopy(self.target_path, dataset, options=OPTIONS)
def roundd(source_path, target_path, decimals):
""" Round decimals. """
# skip existing
if exists(target_path):
print('{} skipped.'.format(target_path))
return
# skip when missing sources
if not exists(source_path):
print('Raster source "{}" not found.'.format(source_path))
return
# read
exchange = Exchange(source_path, target_path)
if decimals:
exchange.round(decimals)
# save
exchange.save()
def get_parser():
""" Return argument parser. """
parser = argparse.ArgumentParser(
description=__doc__,
)
# positional arguments
parser.add_argument(
'source_path',
metavar='SOURCE',
)
parser.add_argument(
'target_path',
metavar='TARGET',<|fim▁hole|> dest='decimals',
help='Round the result to this number of decimals.',
)
return parser
def main():
""" Call command with args from parser. """
roundd(**vars(get_parser().parse_args()))<|fim▁end|> | )
parser.add_argument(
'-r', '--round',
type=int, |
<|file_name|>minhash.go<|end_file_name|><|fim▁begin|>// Package minhash implements a probabilistic data structure for computing the
// similarity between datasets.
//
// The original MinHash paper:
// http://cs.brown.edu/courses/cs253/papers/nearduplicate.pdf
//
// The b-Bit MinWise Hashing:
// http://research.microsoft.com/pubs/120078/wfc0398-lips.pdf
// paper explains the idea behind the one-bit MinHash and storage space saving.
package minhash
import (
"encoding/binary"
"errors"
"math"
"math/rand"
)
// Hash32 is a relaxed version of hash.Hash32
type Hash32 interface {
Sum32() uint32
}
const (
mersennePrime = (1 << 61) - 1
)
// http://en.wikipedia.org/wiki/Universal_hashing
type permutation func(uint32) uint32
func createPermutation(a, b uint32, p uint64, m int) permutation {
return func(x uint32) uint32 {
return uint32((uint(a*x+b) % uint(p)) % uint(m))
}
}
// The MinHash signagure
type MinHash struct {
Permutations []permutation
HashValues []uint32
Seed int64
}<|fim▁hole|>// `numPerm` number of permuation functions will
// be generated.
// Higher number of permutations results in better estimation,
// but reduces performance. 128 is a good number to start.
func New(numPerm int, seed int64) (*MinHash, error) {
if numPerm <= 0 {
return nil, errors.New("Cannot have non-positive number of permutations")
}
s := new(MinHash)
s.HashValues = make([]uint32, numPerm)
s.Permutations = make([]permutation, numPerm)
s.Seed = seed
rand.Seed(s.Seed)
var a uint32
for i := 0; i < numPerm; i++ {
s.HashValues[i] = math.MaxUint32
for {
a = rand.Uint32()
if a != 0 {
break
}
}
s.Permutations[i] = createPermutation(a,
rand.Uint32(), mersennePrime, (1 << 32))
}
return s, nil
}
// Clear sets the MinHash back to initial state
func (sig *MinHash) Clear() {
for i := range sig.HashValues {
sig.HashValues[i] = math.MaxUint32
}
}
// Digest consumes a 32-bit hash and then computes all permutations and retains
// the minimum value for each permutations.
// Using a good hash function is decisive in estimation accuracy. See
// http://programmers.stackexchange.com/a/145633.
// You can use the murmur3 hash function in /hashfunc/murmur3 directory.
func (sig *MinHash) Digest(item Hash32) {
hv := item.Sum32()
var phv uint32
for i := range sig.Permutations {
phv = (sig.Permutations[i])(hv)
if phv < sig.HashValues[i] {
sig.HashValues[i] = phv
}
}
}
// Merge takes another MinHash and combines it with MinHash sig,
// making sig the union of both.
func (sig *MinHash) Merge(other *MinHash) error {
if sig.Seed != other.Seed {
return errors.New("Cannot merge MinHashs with different seed.")
}
for i, v := range other.HashValues {
if v < sig.HashValues[i] {
sig.HashValues[i] = v
}
}
return nil
}
// ByteSize returns the size of the serialized object.
func (sig *MinHash) ByteSize() int {
return 8 + 4 + 4*len(sig.HashValues)
}
// Serialize the MinHash signature to bytes stored in buffer
func (sig *MinHash) Serialize(buffer []byte) error {
if len(buffer) < sig.ByteSize() {
return errors.New("The buffer does not have enough space to " +
"hold the MinHash signature.")
}
b := binary.LittleEndian
b.PutUint64(buffer, uint64(sig.Seed))
b.PutUint32(buffer[8:], uint32(len(sig.HashValues)))
offset := 8 + 4
for _, v := range sig.HashValues {
b.PutUint32(buffer[offset:], v)
offset += 4
}
return nil
}
// Deserialize reconstructs a MinHash signature from the buffer
func Deserialize(buffer []byte) (*MinHash, error) {
if len(buffer) < 12 {
return nil, errors.New("The buffer does not contain enough bytes to " +
"reconstruct a MinHash.")
}
b := binary.LittleEndian
seed := int64(b.Uint64(buffer))
numPerm := int(b.Uint32(buffer[8:]))
offset := 12
if len(buffer[offset:]) < numPerm {
return nil, errors.New("The buffer does not contain enough bytes to " +
"reconstruct a MinHash.")
}
m, err := New(numPerm, seed)
if err != nil {
return nil, err
}
for i := range m.HashValues {
m.HashValues[i] = b.Uint32(buffer[offset:])
offset += 4
}
return m, nil
}
// Jaccard computes the estimation of Jaccard Similarity among
// MinHash signatures.
func Jaccard(sigs ...*MinHash) (float64, error) {
if sigs == nil || len(sigs) < 2 {
return 0.0, errors.New("Less than 2 MinHash signatures were given")
}
numPerm := len(sigs[0].Permutations)
for _, sig := range sigs[1:] {
if sigs[0].Seed != sig.Seed {
return 0.0, errors.New("Cannot compare MinHash signatures with " +
"different seed")
}
if numPerm != len(sig.Permutations) {
return 0.0, errors.New("Cannot compare MinHash signatures with " +
"different numbers of permutations")
}
}
intersection := 0
var currRowAgree int
for i := 0; i < numPerm; i++ {
currRowAgree = 1
for _, sig := range sigs[1:] {
if sigs[0].HashValues[i] != sig.HashValues[i] {
currRowAgree = 0
break
}
}
intersection += currRowAgree
}
return float64(intersection) / float64(numPerm), nil
}<|fim▁end|> |
// New creates a new MinHash signature.
// `seed` is used to generate random permutation functions. |
<|file_name|>portpicker_test.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
#
# Copyright 2007 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Unittests for the portpicker module."""
from __future__ import print_function
import errno
import os
import random
import socket
import sys
import unittest
from contextlib import ExitStack
if sys.platform == 'win32':
import _winapi
else:
_winapi = None
try:
# pylint: disable=no-name-in-module
from unittest import mock # Python >= 3.3.
except ImportError:
import mock # https://pypi.python.org/pypi/mock
import portpicker
class PickUnusedPortTest(unittest.TestCase):
def IsUnusedTCPPort(self, port):
return self._bind(port, socket.SOCK_STREAM, socket.IPPROTO_TCP)
def IsUnusedUDPPort(self, port):
return self._bind(port, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
def setUp(self):
# So we can Bind even if portpicker.bind is stubbed out.
self._bind = portpicker.bind
portpicker._owned_ports.clear()
portpicker._free_ports.clear()
portpicker._random_ports.clear()
def testPickUnusedPortActuallyWorks(self):
"""This test can be flaky."""
for _ in range(10):
port = portpicker.pick_unused_port()
self.assertTrue(self.IsUnusedTCPPort(port))
self.assertTrue(self.IsUnusedUDPPort(port))
@unittest.skipIf('PORTSERVER_ADDRESS' not in os.environ,
'no port server to test against')
def testPickUnusedCanSuccessfullyUsePortServer(self):
with mock.patch.object(portpicker, '_pick_unused_port_without_server'):
portpicker._pick_unused_port_without_server.side_effect = (
Exception('eek!')
)
# Since _PickUnusedPortWithoutServer() raises an exception, if we
# can successfully obtain a port, the portserver must be working.
port = portpicker.pick_unused_port()
self.assertTrue(self.IsUnusedTCPPort(port))
self.assertTrue(self.IsUnusedUDPPort(port))
@unittest.skipIf('PORTSERVER_ADDRESS' not in os.environ,
'no port server to test against')
def testPickUnusedCanSuccessfullyUsePortServerAddressKwarg(self):
with mock.patch.object(portpicker, '_pick_unused_port_without_server'):
portpicker._pick_unused_port_without_server.side_effect = (
Exception('eek!')
)
# Since _PickUnusedPortWithoutServer() raises an exception, and
# we've temporarily removed PORTSERVER_ADDRESS from os.environ, if
# we can successfully obtain a port, the portserver must be working.
addr = os.environ.pop('PORTSERVER_ADDRESS')
try:
port = portpicker.pick_unused_port(portserver_address=addr)
self.assertTrue(self.IsUnusedTCPPort(port))
self.assertTrue(self.IsUnusedUDPPort(port))
finally:
os.environ['PORTSERVER_ADDRESS'] = addr
@unittest.skipIf('PORTSERVER_ADDRESS' not in os.environ,
'no port server to test against')
def testGetPortFromPortServer(self):
"""Exercise the get_port_from_port_server() helper function."""
for _ in range(10):
port = portpicker.get_port_from_port_server(
os.environ['PORTSERVER_ADDRESS'])
self.assertTrue(self.IsUnusedTCPPort(port))
self.assertTrue(self.IsUnusedUDPPort(port))
def testSendsPidToPortServer(self):
with ExitStack() as stack:
if _winapi:
create_file_mock = mock.Mock()
create_file_mock.return_value = 0
read_file_mock = mock.Mock()
write_file_mock = mock.Mock()
read_file_mock.return_value = (b'42768\n', 0)
stack.enter_context(
mock.patch('_winapi.CreateFile', new=create_file_mock))
stack.enter_context(
mock.patch('_winapi.WriteFile', new=write_file_mock))
stack.enter_context(
mock.patch('_winapi.ReadFile', new=read_file_mock))
port = portpicker.get_port_from_port_server(
'portserver', pid=1234)
write_file_mock.assert_called_once_with(0, b'1234\n')
else:
server = mock.Mock()
server.recv.return_value = b'42768\n'
stack.enter_context(
mock.patch.object(socket, 'socket', return_value=server))
port = portpicker.get_port_from_port_server(
'portserver', pid=1234)
server.sendall.assert_called_once_with(b'1234\n')
self.assertEqual(port, 42768)
def testPidDefaultsToOwnPid(self):
with ExitStack() as stack:
stack.enter_context(
mock.patch.object(os, 'getpid', return_value=9876))
if _winapi:
create_file_mock = mock.Mock()
create_file_mock.return_value = 0
read_file_mock = mock.Mock()
write_file_mock = mock.Mock()
read_file_mock.return_value = (b'52768\n', 0)
stack.enter_context(
mock.patch('_winapi.CreateFile', new=create_file_mock))
stack.enter_context(
mock.patch('_winapi.WriteFile', new=write_file_mock))
stack.enter_context(
mock.patch('_winapi.ReadFile', new=read_file_mock))
port = portpicker.get_port_from_port_server('portserver')
write_file_mock.assert_called_once_with(0, b'9876\n')
else:
server = mock.Mock()
server.recv.return_value = b'52768\n'
stack.enter_context(
mock.patch.object(socket, 'socket', return_value=server))
port = portpicker.get_port_from_port_server('portserver')
server.sendall.assert_called_once_with(b'9876\n')
self.assertEqual(port, 52768)
@mock.patch.dict(os.environ,{'PORTSERVER_ADDRESS': 'portserver'})
def testReusesPortServerPorts(self):
with ExitStack() as stack:
if _winapi:
read_file_mock = mock.Mock()
read_file_mock.side_effect = [
(b'12345\n', 0),
(b'23456\n', 0),
(b'34567\n', 0),
]
stack.enter_context(mock.patch('_winapi.CreateFile'))
stack.enter_context(mock.patch('_winapi.WriteFile'))
stack.enter_context(
mock.patch('_winapi.ReadFile', new=read_file_mock))
else:
server = mock.Mock()
server.recv.side_effect = [b'12345\n', b'23456\n', b'34567\n']
stack.enter_context(
mock.patch.object(socket, 'socket', return_value=server))
self.assertEqual(portpicker.pick_unused_port(), 12345)
self.assertEqual(portpicker.pick_unused_port(), 23456)
portpicker.return_port(12345)
self.assertEqual(portpicker.pick_unused_port(), 12345)
@mock.patch.dict(os.environ,{'PORTSERVER_ADDRESS': ''})
def testDoesntReuseRandomPorts(self):
ports = set()
for _ in range(10):
try:
port = portpicker.pick_unused_port()
except portpicker.NoFreePortFoundError:
# This sometimes happens when not using portserver. Just
# skip to the next attempt.
continue
ports.add(port)<|fim▁hole|> portpicker.return_port(port)
self.assertGreater(len(ports), 5) # Allow some random reuse.
def testReturnsReservedPorts(self):
with mock.patch.object(portpicker, '_pick_unused_port_without_server'):
portpicker._pick_unused_port_without_server.side_effect = (
Exception('eek!'))
# Arbitrary port. In practice you should get this from somewhere
# that assigns ports.
reserved_port = 28465
portpicker.add_reserved_port(reserved_port)
ports = set()
for _ in range(10):
port = portpicker.pick_unused_port()
ports.add(port)
portpicker.return_port(port)
self.assertEqual(len(ports), 1)
self.assertEqual(ports.pop(), reserved_port)
@mock.patch.dict(os.environ,{'PORTSERVER_ADDRESS': ''})
def testFallsBackToRandomAfterRunningOutOfReservedPorts(self):
# Arbitrary port. In practice you should get this from somewhere
# that assigns ports.
reserved_port = 23456
portpicker.add_reserved_port(reserved_port)
self.assertEqual(portpicker.pick_unused_port(), reserved_port)
self.assertNotEqual(portpicker.pick_unused_port(), reserved_port)
def testRandomlyChosenPorts(self):
# Unless this box is under an overwhelming socket load, this test
# will heavily exercise the "pick a port randomly" part of the
# port picking code, but may never hit the "OS assigns a port"
# code.
ports = 0
for _ in range(100):
try:
port = portpicker._pick_unused_port_without_server()
except portpicker.NoFreePortFoundError:
# Without the portserver, pick_unused_port can sometimes fail
# to find a free port. Check that it passes most of the time.
continue
self.assertTrue(self.IsUnusedTCPPort(port))
self.assertTrue(self.IsUnusedUDPPort(port))
ports += 1
# Getting a port shouldn't have failed very often, even on machines
# with a heavy socket load.
self.assertGreater(ports, 95)
def testOSAssignedPorts(self):
self.last_assigned_port = None
def error_for_explicit_ports(port, socket_type, socket_proto):
# Only successfully return a port if an OS-assigned port is
# requested, or if we're checking that the last OS-assigned port
# is unused on the other protocol.
if port == 0 or port == self.last_assigned_port:
self.last_assigned_port = self._bind(port, socket_type,
socket_proto)
return self.last_assigned_port
else:
return None
with mock.patch.object(portpicker, 'bind', error_for_explicit_ports):
# Without server, this can be little flaky, so check that it
# passes most of the time.
ports = 0
for _ in range(100):
try:
port = portpicker._pick_unused_port_without_server()
except portpicker.NoFreePortFoundError:
continue
self.assertTrue(self.IsUnusedTCPPort(port))
self.assertTrue(self.IsUnusedUDPPort(port))
ports += 1
self.assertGreater(ports, 70)
def pickUnusedPortWithoutServer(self):
# Try a few times to pick a port, to avoid flakiness and to make sure
# the code path we want was exercised.
for _ in range(5):
try:
port = portpicker._pick_unused_port_without_server()
except portpicker.NoFreePortFoundError:
continue
else:
self.assertTrue(self.IsUnusedTCPPort(port))
self.assertTrue(self.IsUnusedUDPPort(port))
return
self.fail("Failed to find a free port")
def testPickPortsWithoutServer(self):
# Test the first part of _pick_unused_port_without_server, which
# tries a few random ports and checks is_port_free.
self.pickUnusedPortWithoutServer()
# Now test the second part, the fallback from above, which asks the
# OS for a port.
def mock_port_free(port):
return False
with mock.patch.object(portpicker, 'is_port_free', mock_port_free):
self.pickUnusedPortWithoutServer()
def checkIsPortFree(self):
"""This might be flaky unless this test is run with a portserver."""
# The port should be free initially.
port = portpicker.pick_unused_port()
self.assertTrue(portpicker.is_port_free(port))
cases = [
(socket.AF_INET, socket.SOCK_STREAM, None),
(socket.AF_INET6, socket.SOCK_STREAM, 1),
(socket.AF_INET, socket.SOCK_DGRAM, None),
(socket.AF_INET6, socket.SOCK_DGRAM, 1),
]
# Using v6only=0 on Windows doesn't result in collisions
if not _winapi:
cases.extend([
(socket.AF_INET6, socket.SOCK_STREAM, 0),
(socket.AF_INET6, socket.SOCK_DGRAM, 0),
])
for (sock_family, sock_type, v6only) in cases:
# Occupy the port on a subset of possible protocols.
try:
sock = socket.socket(sock_family, sock_type, 0)
except socket.error:
print('Kernel does not support sock_family=%d' % sock_family,
file=sys.stderr)
# Skip this case, since we cannot occupy a port.
continue
if not hasattr(socket, 'IPPROTO_IPV6'):
v6only = None
if v6only is not None:
try:
sock.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY,
v6only)
except socket.error:
print('Kernel does not support IPV6_V6ONLY=%d' % v6only,
file=sys.stderr)
# Don't care; just proceed with the default.
# Socket may have been taken in the mean time, so catch the
# socket.error with errno set to EADDRINUSE and skip this
# attempt.
try:
sock.bind(('', port))
except socket.error as e:
if e.errno == errno.EADDRINUSE:
raise portpicker.NoFreePortFoundError
raise
# The port should be busy.
self.assertFalse(portpicker.is_port_free(port))
sock.close()
# Now it's free again.
self.assertTrue(portpicker.is_port_free(port))
def testIsPortFree(self):
# This can be quite flaky on a busy host, so try a few times.
for _ in range(10):
try:
self.checkIsPortFree()
except portpicker.NoFreePortFoundError:
pass
else:
return
self.fail("checkPortIsFree failed every time.")
def testIsPortFreeException(self):
port = portpicker.pick_unused_port()
with mock.patch.object(socket, 'socket') as mock_sock:
mock_sock.side_effect = socket.error('fake socket error', 0)
self.assertFalse(portpicker.is_port_free(port))
def testThatLegacyCapWordsAPIsExist(self):
"""The original APIs were CapWords style, 1.1 added PEP8 names."""
self.assertEqual(portpicker.bind, portpicker.Bind)
self.assertEqual(portpicker.is_port_free, portpicker.IsPortFree)
self.assertEqual(portpicker.pick_unused_port, portpicker.PickUnusedPort)
self.assertEqual(portpicker.get_port_from_port_server,
portpicker.GetPortFromPortServer)
if __name__ == '__main__':
unittest.main()<|fim▁end|> | |
<|file_name|>main.go<|end_file_name|><|fim▁begin|>/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"flag"
"fmt"
"io/ioutil"
"os"
"strings"
)
var (
yamlPaths = flag.String("yaml", "", "comma-separated list of input YAML files")
printText = flag.Bool("print-text", false, "print generated proto in text format to stdout")
outputPath = flag.String("output", "", "output path to save generated protobuf data")
)
func errExit(format string, a ...interface{}) {
fmt.Fprintf(os.Stderr, format, a...)
os.Exit(1)
}
func main() {
flag.Parse()
yamlFiles := strings.Split(*yamlPaths, ",")
if len(yamlFiles) == 0 || yamlFiles[0] == "" {
errExit("Must specify one or more YAML files with --yaml\n")
}
if !*printText && *outputPath == "" {
errExit("Must set --print-text or --output\n")
}
if *printText && *outputPath != "" {
errExit("Cannot set both --print-text and --output\n")
}
var c Config
for _, file := range yamlFiles {
b, err := ioutil.ReadFile(file)
if err != nil {
errExit("IO Error : Cannot Read File %s : %v\n", file, err)
}
if err = c.Update(b); err != nil {
errExit("Error parsing file %s : %v\n", file, err)
}
}
if *printText {
if err := c.MarshalText(os.Stdout); err != nil {
errExit("err printing proto: %v", err)
}<|fim▁hole|> if err != nil {
errExit("err encoding proto: %v", err)
}
if err = ioutil.WriteFile(*outputPath, b, 0644); err != nil {
errExit("IO Error : Cannot Write File %v\n", outputPath)
}
}
}<|fim▁end|> | } else {
b, err := c.MarshalBytes() |
<|file_name|>laplace2d-pa.py<|end_file_name|><|fim▁begin|><|fim▁hole|>
import time
import numpy as np
from numba import jit, stencil
@stencil
def jacobi_kernel(A):
return 0.25 * (A[0,1] + A[0,-1] + A[-1,0] + A[1,0])
@jit(parallel=True)
def jacobi_relax_core(A, Anew):
error = 0.0
n = A.shape[0]
m = A.shape[1]
Anew = jacobi_kernel(A)
error = np.max(np.abs(Anew - A))
return error
def main():
NN = 3000
NM = 3000
A = np.zeros((NN, NM), dtype=np.float64)
Anew = np.zeros((NN, NM), dtype=np.float64)
n = NN
m = NM
iter_max = 1000
tol = 1.0e-6
error = 1.0
for j in range(n):
A[j, 0] = 1.0
Anew[j, 0] = 1.0
print("Jacobi relaxation Calculation: %d x %d mesh" % (n, m))
timer = time.time()
iter = 0
while error > tol and iter < iter_max:
error = jacobi_relax_core(A, Anew)
# swap A and Anew
tmp = A
A = Anew
Anew = tmp
if iter % 100 == 0:
print("%5d, %0.6f (elapsed: %f s)" % (iter, error, time.time()-timer))
iter += 1
runtime = time.time() - timer
print(" total: %f s" % runtime)
if __name__ == '__main__':
main()<|fim▁end|> | #!/usr/bin/env python
from __future__ import print_function |
<|file_name|>control_get_firmware.py<|end_file_name|><|fim▁begin|>#! /usr/bin/env python
"""
# control_get_firmware.py: get firmware version of Gemalto readers
# Copyright (C) 2009-2012 Ludovic Rousseau
"""
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#<|fim▁hole|>
from smartcard.System import readers
from smartcard.pcsc.PCSCPart10 import (SCARD_SHARE_DIRECT,
SCARD_LEAVE_CARD, SCARD_CTL_CODE, getTlvProperties)
for reader in readers():
cardConnection = reader.createConnection()
cardConnection.connect(mode=SCARD_SHARE_DIRECT,
disposition=SCARD_LEAVE_CARD)
print "Reader:", reader
# properties returned by IOCTL_FEATURE_GET_TLV_PROPERTIES
properties = getTlvProperties(cardConnection)
# Gemalto devices supports a control code to get firmware
if properties['PCSCv2_PART10_PROPERTY_wIdVendor'] == 0x08E6:
get_firmware = [0x02]
IOCTL_SMARTCARD_VENDOR_IFD_EXCHANGE = SCARD_CTL_CODE(1)
res = cardConnection.control(IOCTL_SMARTCARD_VENDOR_IFD_EXCHANGE,
get_firmware)
print " Firmware:", "".join([chr(x) for x in res])
else:
print " Not a Gemalto reader"
try:
res = properties['PCSCv2_PART10_PROPERTY_sFirmwareID']
print " Firmware:", frimware
except KeyError:
print " PCSCv2_PART10_PROPERTY_sFirmwareID not supported"<|fim▁end|> | # You should have received a copy of the GNU General Public License along
# with this program; if not, see <http://www.gnu.org/licenses/>. |
<|file_name|>account.js<|end_file_name|><|fim▁begin|>var passport = require('passport');
var LocalStrategy = require('passport-local').Strategy;
var bodyParser = require('body-parser')
var chip = require('../../chip.js');
var Syndicate = require('../../model/syndicate');
var Account = require('../../model/account');
var Group = require('../../model/group');
var Engine = require('../../model/engine');
var util = require('../../lib/util');
module.exports = function(face, rootPath) {
face.post('/api/register', bodyParser.json(), bodyParser.urlencoded(), register);
face.get('/groups/:token', function(req, res) {
res.sendFile(rootPath + 'assets/static/groups.html');
});
face.get('/api/groups/:token', function(req, res) {
Account.getByToken(req.params.token).then(function(account) {
return Engine.get(account.eid);
}).then(function(engine) {
var e = chip.getEngine(engine.sid);
// I dunno what's going on here, but sometimes this gets weird
e.loadGroups().then(function() { res.json({ synd: e.synd, groups: e.groups }); });
});
});
face.route('/api/group/:gid')
.get(getGroup)
.post(bodyParser.json(), bodyParser.urlencoded(), updateGroup, getGroup);
face.put('/api/group', createGroup);
};
function register(req, res) {
return Syndicate.getByName(req.body.synd).then(function(synd) {
if (synd) {
return Engine.getBySynd(synd.sid).then(function(engine) {
if (engine) {
res.json({ error: 'Syndicate already registered' });
} else {
var e = Engine.create({
sid: synd.sid
});
var token = Math.random().toString(36).slice(2);
return e.save().then(function() {
return chip.db.queryAsync('INSERT INTO account SET ?', {
email: req.body.email,
eid: e.eid,
token: token
});
}).then(function(result) {
chip.addEngine(e);
res.json({ token: token });
});
}
});
} else {
res.json({ error: 'Syndicate not found' });
}
})
};<|fim▁hole|> res.json(g);
})
}
function createGroup(req, res) {
var g = Group.create({
sid: req.body.sid,
groupme_id: req.body.groupme_id,
bot_key: req.body.bot_key,
output: req.body.output == 'true',
scout: req.body.scout == 'true',
debug: req.body.debug == 'true'
});
return g.save().then(function() {
req.params.gid = g.gid;
chip.addGroup(g);
res.json(g);
});
}
function updateGroup(req, res, next) {
return Group.get(req.params.gid).then(function(group) {
var g = chip.getGroup(group.groupme_id);
if (g) chip.removeGroup(g.groupme_id);
else g = group;
g.groupme_id = req.body.groupme_id;
g.bot_key = req.body.bot_key;
g.bot_id = null;
g.output = req.body.output == 'true';
g.scout = req.body.scout == 'true';
g.debug = req.body.debug == 'true';
return g.save().then(function() {
delete g.bot_id;
g.getEngine().loadGroups().then(function() { res.json(g); });
});
});
}
passport.use(new LocalStrategy(
function(key, password, done) {
//chip.db.queryAsync(
/*
connection.query("SELECT * FROM player p WHERE sid IS NOT NULL AND name='"+username+"'",
function(err, result) {
if (err) throw err;
if (result.length > 0 && password == 'oredic37') {
var player = User.createPlayer(result[0]);
done(null, player);
} else {
done(null, null, { message: 'Invalid login' });
}
});
*/
})
);
passport.serializeUser(function(player, done) {
done(null, player.pid);
});
passport.deserializeUser(function(pid, done) {
User.getUser(pid).then(function(user) {
done(null, user);
});
});
function loggedIn(req, res, next) {
if (req.isAuthenticated()) {
next();
} else {
req.session.loginRedirect = req.url;
res.redirect('/');
}
}<|fim▁end|> |
function getGroup(req, res) {
return Group.get(req.params.gid).then(function(group) {
var g = chip.getGroup(group.groupme_id); |
<|file_name|>same-origin-test.js<|end_file_name|><|fim▁begin|>// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
function waitUntilIdle() {
return new Promise(resolve=>{
window.requestIdleCallback(()=>resolve());
});
}
(async function() {
TestRunner.addResult(`Tests V8 code cache for javascript resources\n`);
await TestRunner.loadLegacyModule('timeline'); await TestRunner.loadTestModule('performance_test_runner');
await TestRunner.showPanel('timeline');
// Clear browser cache to avoid any existing entries for the fetched
// scripts in the cache.
SDK.multitargetNetworkManager.clearBrowserCache();
// There are two scripts:
// [A] http://127.0.0.1:8000/devtools/resources/v8-cache-script.cgi
// [B] http://localhost:8000/devtools/resources/v8-cache-script.cgi
// An iframe that loads [A].
// The script is executed as a parser-inserted script,
// to keep the ScriptResource on the MemoryCache.
// ScriptResources for dynamically-inserted <script>s can be
// garbage-collected and thus removed from MemoryCache after its execution.
const scope = 'resources/same-origin-script.html';
// An iframe that loads [B].
const scopeCrossOrigin = 'resources/cross-origin-script.html';
TestRunner.addResult('--- Trace events related to code caches ------');
await PerformanceTestRunner.startTimeline();
async function stopAndPrintTimeline() {
await PerformanceTestRunner.stopTimeline();
await PerformanceTestRunner.printTimelineRecordsWithDetails(
TimelineModel.TimelineModel.RecordType.CompileScript,
TimelineModel.TimelineModel.RecordType.CacheScript);
}
async function expectationComment(msg) {
await stopAndPrintTimeline();
TestRunner.addResult(msg);
await PerformanceTestRunner.startTimeline();
}
// Load [A] thrice. With the current V8 heuristics (defined in
// third_party/blink/renderer/bindings/core/v8/v8_code_cache.cc) we produce
// cache on second fetch and consume it in the third fetch. This tests these
// heuristics.
// Note that addIframe() waits for iframe's load event, which waits for the
// <script> loading.
await expectationComment('Load [A] 1st time. Produce timestamp. -->');
await TestRunner.addIframe(scope);
await expectationComment('Load [A] 2nd time. Produce code cache. -->');
await TestRunner.addIframe(scope);
await waitUntilIdle();
await expectationComment('Load [A] 3rd time. Consume code cache. -->');
await TestRunner.addIframe(scope);
await expectationComment('Load [B]. Should not use the cached code. -->');
await TestRunner.addIframe(scopeCrossOrigin);
await expectationComment('Load [A] again from MemoryCache. ' +
'Should use the cached code. -->');
await TestRunner.addIframe(scope);
await expectationComment('Clear [A] from MemoryCache. -->');
// Blink evicts previous Resource when a new request to the same URL but with
// different resource type is started. We fetch() to the URL of [A], and thus
// evicts the previous ScriptResource of [A].
await TestRunner.evaluateInPageAsync(
`fetch('/devtools/resources/v8-cache-script.cgi')`);<|fim▁hole|> // hits Disk Cache.
await TestRunner.addIframe(scope);
await stopAndPrintTimeline();
TestRunner.addResult('-----------------------------------------------');
TestRunner.completeTest();
})();<|fim▁end|> |
await expectationComment('Load [A] from Disk Cache. -->');
// As we cleared [A] from MemoryCache, this doesn't hit MemoryCache, but still |
<|file_name|>profitbricks.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: profitbricks
short_description: Create, destroy, start, stop, and reboot a ProfitBricks virtual machine.
description:
- Create, destroy, update, start, stop, and reboot a ProfitBricks virtual machine. When the virtual machine is created it can optionally wait for it to be 'running' before returning. This module has a dependency on profitbricks >= 1.0.0
version_added: "2.0"
options:
auto_increment:
description:
- Whether or not to increment a single number in the name for created virtual machines.
default: yes
choices: ["yes", "no"]
name:
description:
- The name of the virtual machine.
required: true
image:
description:
- The system image ID for creating the virtual machine, e.g. a3eae284-a2fe-11e4-b187-5f1f641608c8.
required: true
image_password:
description:
- Password set for the administrative user.
required: false
version_added: '2.2'
ssh_keys:
description:
- Public SSH keys allowing access to the virtual machine.
required: false
version_added: '2.2'
datacenter:
description:
- The datacenter to provision this virtual machine.
required: false
default: null
cores:
description:
- The number of CPU cores to allocate to the virtual machine.
required: false
default: 2
ram:
description:
- The amount of memory to allocate to the virtual machine.
required: false
default: 2048
cpu_family:
description:
- The CPU family type to allocate to the virtual machine.
required: false
default: AMD_OPTERON
choices: [ "AMD_OPTERON", "INTEL_XEON" ]
version_added: '2.2'
volume_size:
description:
- The size in GB of the boot volume.
required: false
default: 10
bus:
description:
- The bus type for the volume.
required: false
default: VIRTIO
choices: [ "IDE", "VIRTIO"]
instance_ids:
description:
- list of instance ids, currently only used when state='absent' to remove instances.
required: false
count:
description:
- The number of virtual machines to create.
required: false
default: 1
location:
description:
- The datacenter location. Use only if you want to create the Datacenter or else this value is ignored.
required: false
default: us/las
choices: [ "us/las", "de/fra", "de/fkb" ]
assign_public_ip:
description:
- This will assign the machine to the public LAN. If no LAN exists with public Internet access it is created.
required: false
default: false
lan:
description:
- The ID of the LAN you wish to add the servers to.
required: false
default: 1
subscription_user:
description:
- The ProfitBricks username. Overrides the PB_SUBSCRIPTION_ID environement variable.
required: false
default: null
subscription_password:
description:
- THe ProfitBricks password. Overrides the PB_PASSWORD environement variable.
required: false
default: null
wait:
description:
- wait for the instance to be in state 'running' before returning
required: false
default: "yes"
choices: [ "yes", "no" ]
wait_timeout:
description:
- how long before wait gives up, in seconds
default: 600
remove_boot_volume:
description:
- remove the bootVolume of the virtual machine you're destroying.
required: false
default: "yes"
choices: ["yes", "no"]
state:
description:
- create or terminate instances
required: false
default: 'present'
choices: [ "running", "stopped", "absent", "present" ]
requirements:
- "profitbricks"
- "python >= 2.6"
author: Matt Baldwin ([email protected])
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Provisioning example. This will create three servers and enumerate their names.
- profitbricks:
datacenter: Tardis One
name: web%02d.stackpointcloud.com
cores: 4
ram: 2048
volume_size: 50
cpu_family: INTEL_XEON
image: a3eae284-a2fe-11e4-b187-5f1f641608c8
location: us/las
count: 3
assign_public_ip: true
# Removing Virtual machines
- profitbricks:
datacenter: Tardis One
instance_ids:
- 'web001.stackpointcloud.com'
- 'web002.stackpointcloud.com'
- 'web003.stackpointcloud.com'
wait_timeout: 500
state: absent
# Starting Virtual Machines.
- profitbricks:
datacenter: Tardis One
instance_ids:
- 'web001.stackpointcloud.com'
- 'web002.stackpointcloud.com'
- 'web003.stackpointcloud.com'
wait_timeout: 500
state: running
# Stopping Virtual Machines
- profitbricks:
datacenter: Tardis One
instance_ids:
- 'web001.stackpointcloud.com'
- 'web002.stackpointcloud.com'
- 'web003.stackpointcloud.com'
wait_timeout: 500
state: stopped
'''
import re
import uuid
import time
HAS_PB_SDK = True
try:
from profitbricks.client import ProfitBricksService, Volume, Server, Datacenter, NIC, LAN
except ImportError:
HAS_PB_SDK = False
LOCATIONS = ['us/las',
'de/fra',
'de/fkb']
uuid_match = re.compile(
'[\w]{8}-[\w]{4}-[\w]{4}-[\w]{4}-[\w]{12}', re.I)
def _wait_for_completion(profitbricks, promise, wait_timeout, msg):
if not promise: return
wait_timeout = time.time() + wait_timeout
while wait_timeout > time.time():
time.sleep(5)
operation_result = profitbricks.get_request(
request_id=promise['requestId'],
status=True)
if operation_result['metadata']['status'] == "DONE":
return
elif operation_result['metadata']['status'] == "FAILED":
raise Exception(
'Request failed to complete ' + msg + ' "' + str(
promise['requestId']) + '" to complete.')
raise Exception(
'Timed out waiting for async operation ' + msg + ' "' + str(
promise['requestId']
) + '" to complete.')
def _create_machine(module, profitbricks, datacenter, name):
cores = module.params.get('cores')
ram = module.params.get('ram')
cpu_family = module.params.get('cpu_family')
volume_size = module.params.get('volume_size')
disk_type = module.params.get('disk_type')
image_password = module.params.get('image_password')
ssh_keys = module.params.get('ssh_keys')
bus = module.params.get('bus')
lan = module.params.get('lan')
assign_public_ip = module.params.get('assign_public_ip')
subscription_user = module.params.get('subscription_user')
subscription_password = module.params.get('subscription_password')
location = module.params.get('location')
image = module.params.get('image')
assign_public_ip = module.boolean(module.params.get('assign_public_ip'))
wait = module.params.get('wait')
wait_timeout = module.params.get('wait_timeout')
if assign_public_ip:
public_found = False
lans = profitbricks.list_lans(datacenter)
for lan in lans['items']:
if lan['properties']['public']:
public_found = True
lan = lan['id']
if not public_found:
i = LAN(
name='public',
public=True)
lan_response = profitbricks.create_lan(datacenter, i)
_wait_for_completion(profitbricks, lan_response,
wait_timeout, "_create_machine")
lan = lan_response['id']
v = Volume(
name=str(uuid.uuid4()).replace('-', '')[:10],
size=volume_size,
image=image,
image_password=image_password,
ssh_keys=ssh_keys,
disk_type=disk_type,
bus=bus)
n = NIC(
lan=int(lan)
)
s = Server(
name=name,
ram=ram,
cores=cores,
cpu_family=cpu_family,
create_volumes=[v],
nics=[n],
)
try:
create_server_response = profitbricks.create_server(
datacenter_id=datacenter, server=s)
_wait_for_completion(profitbricks, create_server_response,
wait_timeout, "create_virtual_machine")
server_response = profitbricks.get_server(
datacenter_id=datacenter,
server_id=create_server_response['id'],
depth=3
)
except Exception as e:
module.fail_json(msg="failed to create the new server: %s" % str(e))
else:
return server_response
def _startstop_machine(module, profitbricks, datacenter_id, server_id):<|fim▁hole|> profitbricks.start_server(datacenter_id, server_id)
else:
profitbricks.stop_server(datacenter_id, server_id)
return True
except Exception as e:
module.fail_json(msg="failed to start or stop the virtual machine %s: %s" % (name, str(e)))
def _create_datacenter(module, profitbricks):
datacenter = module.params.get('datacenter')
location = module.params.get('location')
wait_timeout = module.params.get('wait_timeout')
i = Datacenter(
name=datacenter,
location=location
)
try:
datacenter_response = profitbricks.create_datacenter(datacenter=i)
_wait_for_completion(profitbricks, datacenter_response,
wait_timeout, "_create_datacenter")
return datacenter_response
except Exception as e:
module.fail_json(msg="failed to create the new server(s): %s" % str(e))
def create_virtual_machine(module, profitbricks):
"""
Create new virtual machine
module : AnsibleModule object
profitbricks: authenticated profitbricks object
Returns:
True if a new virtual machine was created, false otherwise
"""
datacenter = module.params.get('datacenter')
name = module.params.get('name')
auto_increment = module.params.get('auto_increment')
count = module.params.get('count')
lan = module.params.get('lan')
wait_timeout = module.params.get('wait_timeout')
failed = True
datacenter_found = False
virtual_machines = []
virtual_machine_ids = []
# Locate UUID for datacenter if referenced by name.
datacenter_list = profitbricks.list_datacenters()
datacenter_id = _get_datacenter_id(datacenter_list, datacenter)
if datacenter_id:
datacenter_found = True
if not datacenter_found:
datacenter_response = _create_datacenter(module, profitbricks)
datacenter_id = datacenter_response['id']
_wait_for_completion(profitbricks, datacenter_response,
wait_timeout, "create_virtual_machine")
if auto_increment:
numbers = set()
count_offset = 1
try:
name % 0
except TypeError, e:
if e.message.startswith('not all'):
name = '%s%%d' % name
else:
module.fail_json(msg=e.message)
number_range = xrange(count_offset, count_offset + count + len(numbers))
available_numbers = list(set(number_range).difference(numbers))
names = []
numbers_to_use = available_numbers[:count]
for number in numbers_to_use:
names.append(name % number)
else:
names = [name]
# Prefetch a list of servers for later comparison.
server_list = profitbricks.list_servers(datacenter_id)
for name in names:
# Skip server creation if the server already exists.
if _get_server_id(server_list, name):
continue
create_response = _create_machine(module, profitbricks, str(datacenter_id), name)
nics = profitbricks.list_nics(datacenter_id, create_response['id'])
for n in nics['items']:
if lan == n['properties']['lan']:
create_response.update({'public_ip': n['properties']['ips'][0]})
virtual_machines.append(create_response)
failed = False
results = {
'failed': failed,
'machines': virtual_machines,
'action': 'create',
'instance_ids': {
'instances': [i['id'] for i in virtual_machines],
}
}
return results
def remove_virtual_machine(module, profitbricks):
"""
Removes a virtual machine.
This will remove the virtual machine along with the bootVolume.
module : AnsibleModule object
profitbricks: authenticated profitbricks object.
Not yet supported: handle deletion of attached data disks.
Returns:
True if a new virtual server was deleted, false otherwise
"""
datacenter = module.params.get('datacenter')
instance_ids = module.params.get('instance_ids')
remove_boot_volume = module.params.get('remove_boot_volume')
changed = False
if not isinstance(module.params.get('instance_ids'), list) or len(module.params.get('instance_ids')) < 1:
module.fail_json(msg='instance_ids should be a list of virtual machine ids or names, aborting')
# Locate UUID for datacenter if referenced by name.
datacenter_list = profitbricks.list_datacenters()
datacenter_id = _get_datacenter_id(datacenter_list, datacenter)
if not datacenter_id:
module.fail_json(msg='Virtual data center \'%s\' not found.' % str(datacenter))
# Prefetch server list for later comparison.
server_list = profitbricks.list_servers(datacenter_id)
for instance in instance_ids:
# Locate UUID for server if referenced by name.
server_id = _get_server_id(server_list, instance)
if server_id:
# Remove the server's boot volume
if remove_boot_volume:
_remove_boot_volume(module, profitbricks, datacenter_id, server_id)
# Remove the server
try:
server_response = profitbricks.delete_server(datacenter_id, server_id)
except Exception as e:
module.fail_json(msg="failed to terminate the virtual server: %s" % str(e))
else:
changed = True
return changed
def _remove_boot_volume(module, profitbricks, datacenter_id, server_id):
"""
Remove the boot volume from the server
"""
try:
server = profitbricks.get_server(datacenter_id, server_id)
volume_id = server['properties']['bootVolume']['id']
volume_response = profitbricks.delete_volume(datacenter_id, volume_id)
except Exception as e:
module.fail_json(msg="failed to remove the server's boot volume: %s" % str(e))
def startstop_machine(module, profitbricks, state):
"""
Starts or Stops a virtual machine.
module : AnsibleModule object
profitbricks: authenticated profitbricks object.
Returns:
True when the servers process the action successfully, false otherwise.
"""
if not isinstance(module.params.get('instance_ids'), list) or len(module.params.get('instance_ids')) < 1:
module.fail_json(msg='instance_ids should be a list of virtual machine ids or names, aborting')
wait = module.params.get('wait')
wait_timeout = module.params.get('wait_timeout')
changed = False
datacenter = module.params.get('datacenter')
instance_ids = module.params.get('instance_ids')
# Locate UUID for datacenter if referenced by name.
datacenter_list = profitbricks.list_datacenters()
datacenter_id = _get_datacenter_id(datacenter_list, datacenter)
if not datacenter_id:
module.fail_json(msg='Virtual data center \'%s\' not found.' % str(datacenter))
# Prefetch server list for later comparison.
server_list = profitbricks.list_servers(datacenter_id)
for instance in instance_ids:
# Locate UUID of server if referenced by name.
server_id = _get_server_id(server_list, instance)
if server_id:
_startstop_machine(module, profitbricks, datacenter_id, server_id)
changed = True
if wait:
wait_timeout = time.time() + wait_timeout
while wait_timeout > time.time():
matched_instances = []
for res in profitbricks.list_servers(datacenter_id)['items']:
if state == 'running':
if res['properties']['vmState'].lower() == state:
matched_instances.append(res)
elif state == 'stopped':
if res['properties']['vmState'].lower() == 'shutoff':
matched_instances.append(res)
if len(matched_instances) < len(instance_ids):
time.sleep(5)
else:
break
if wait_timeout <= time.time():
# waiting took too long
module.fail_json(msg="wait for virtual machine state timeout on %s" % time.asctime())
return (changed)
def _get_datacenter_id(datacenters, identity):
"""
Fetch and return datacenter UUID by datacenter name if found.
"""
for datacenter in datacenters['items']:
if identity in (datacenter['properties']['name'], datacenter['id']):
return datacenter['id']
return None
def _get_server_id(servers, identity):
"""
Fetch and return server UUID by server name if found.
"""
for server in servers['items']:
if identity in (server['properties']['name'], server['id']):
return server['id']
return None
def main():
module = AnsibleModule(
argument_spec=dict(
datacenter=dict(),
name=dict(),
image=dict(),
cores=dict(type='int', default=2),
ram=dict(type='int', default=2048),
cpu_family=dict(choices=['AMD_OPTERON', 'INTEL_XEON'],
default='AMD_OPTERON'),
volume_size=dict(type='int', default=10),
disk_type=dict(choices=['HDD', 'SSD'], default='HDD'),
image_password=dict(default=None),
ssh_keys=dict(type='list', default=[]),
bus=dict(choices=['VIRTIO', 'IDE'], default='VIRTIO'),
lan=dict(type='int', default=1),
count=dict(type='int', default=1),
auto_increment=dict(type='bool', default=True),
instance_ids=dict(type='list', default=[]),
subscription_user=dict(),
subscription_password=dict(),
location=dict(choices=LOCATIONS, default='us/las'),
assign_public_ip=dict(type='bool', default=False),
wait=dict(type='bool', default=True),
wait_timeout=dict(type='int', default=600),
remove_boot_volume=dict(type='bool', default=True),
state=dict(default='present'),
)
)
if not HAS_PB_SDK:
module.fail_json(msg='profitbricks required for this module')
subscription_user = module.params.get('subscription_user')
subscription_password = module.params.get('subscription_password')
wait = module.params.get('wait')
wait_timeout = module.params.get('wait_timeout')
profitbricks = ProfitBricksService(
username=subscription_user,
password=subscription_password)
state = module.params.get('state')
if state == 'absent':
if not module.params.get('datacenter'):
module.fail_json(msg='datacenter parameter is required ' +
'for running or stopping machines.')
try:
(changed) = remove_virtual_machine(module, profitbricks)
module.exit_json(changed=changed)
except Exception as e:
module.fail_json(msg='failed to set instance state: %s' % str(e))
elif state in ('running', 'stopped'):
if not module.params.get('datacenter'):
module.fail_json(msg='datacenter parameter is required for ' +
'running or stopping machines.')
try:
(changed) = startstop_machine(module, profitbricks, state)
module.exit_json(changed=changed)
except Exception as e:
module.fail_json(msg='failed to set instance state: %s' % str(e))
elif state == 'present':
if not module.params.get('name'):
module.fail_json(msg='name parameter is required for new instance')
if not module.params.get('image'):
module.fail_json(msg='image parameter is required for new instance')
if not module.params.get('subscription_user'):
module.fail_json(msg='subscription_user parameter is ' +
'required for new instance')
if not module.params.get('subscription_password'):
module.fail_json(msg='subscription_password parameter is ' +
'required for new instance')
try:
(machine_dict_array) = create_virtual_machine(module, profitbricks)
module.exit_json(**machine_dict_array)
except Exception as e:
module.fail_json(msg='failed to set instance state: %s' % str(e))
from ansible.module_utils.basic import *
main()<|fim▁end|> | state = module.params.get('state')
try:
if state == 'running': |
<|file_name|>vote-taker.componet.ts<|end_file_name|><|fim▁begin|>import {Component} from '@angular/core';
import { HEROES } from './mock-heroes';
@Component({
selector: 'vote-taker',
template: `<|fim▁hole|> [name]="voter.name"
(onVoted)="onVoted($event)">
</my-voter>
`
})
export class VoteTakerComponent {
public agreed = 0;
public disagreed = 0;
// public voters = ['Mr. IQ', 'Ms. Universe aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaal', 'Bombasto'];
public heroes = HEROES;
public onVoted(agreed: boolean) {
agreed ? this.agreed++ : this.disagreed++;
}
}<|fim▁end|> | <h2>Should mankind colonize the Universe?</h2>
<h3>Agree: {{agreed}}, Disagree: {{disagreed}}</h3>
<my-voter *ngFor="let voter of heroes"
length=5 |
<|file_name|>StopMojo.java<|end_file_name|><|fim▁begin|>/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.<|fim▁hole|>
import java.io.FileReader;
import java.io.IOException;
import java.io.Reader;
import java.util.ArrayList;
import java.util.List;
import java.util.Properties;
import org.apache.commons.io.IOUtils;
import org.apache.maven.plugin.MojoExecutionException;
import org.apache.maven.plugins.annotations.LifecyclePhase;
import org.apache.maven.plugins.annotations.Mojo;
/**
* Stops the running launchpad instances.
*
*/
@Mojo(
name = "stop",
defaultPhase = LifecyclePhase.POST_INTEGRATION_TEST,
threadSafe = true
)
public class StopMojo extends StartMojo {
@Override
public void execute() throws MojoExecutionException {
if (this.skipLaunchpad) {
this.getLog().info("Executing of the stop-multiple launchpad mojo is disabled by configuration.");
return;
}
// read configurations
final Properties launchpadConfigProps = new Properties();
Reader reader = null;
try {
reader = new FileReader(this.systemPropertiesFile);
launchpadConfigProps.load(reader);
} catch ( final IOException ioe) {
throw new MojoExecutionException("Unable to read launchpad runner configuration properties.", ioe);
} finally {
IOUtils.closeQuietly(reader);
}
final int instances = Integer.valueOf(launchpadConfigProps.getProperty("launchpad.instances"));
final List<ProcessDescription> configurations = new ArrayList<ProcessDescription>();
for(int i=1;i<=instances;i++) {
final String id = launchpadConfigProps.getProperty("launchpad.instance.id." + String.valueOf(i));
final ProcessDescription config = ProcessDescriptionProvider.getInstance().getRunConfiguration(id);
if ( config == null ) {
getLog().warn("No launchpad configuration found for instance " + id);
} else {
configurations.add(config);
}
}
if (configurations.size() > 0) {
getLog().info(new StringBuilder("Stopping ").append(configurations.size()).append(" Launchpad instances").toString());
for (final ProcessDescription cfg : configurations) {
try {
LauncherCallable.stop(this.getLog(), cfg);
ProcessDescriptionProvider.getInstance().removeRunConfiguration(cfg.getId());
} catch (Exception e) {
throw new MojoExecutionException("Could not stop launchpad " + cfg.getId(), e);
}
}
} else {
getLog().warn("No stored configuration file was found at " + this.systemPropertiesFile + " - no Launchapd will be stopped");
}
}
}<|fim▁end|> | * See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.sling.maven.slingstart.run; |
<|file_name|>test_get_query_entities.py<|end_file_name|><|fim▁begin|>import sqlalchemy as sa
from sqlalchemy_utils import get_query_entities
from tests import TestCase
class TestGetQueryEntities(TestCase):
def create_models(self):
class TextItem(self.Base):
__tablename__ = 'text_item'
id = sa.Column(sa.Integer, primary_key=True)
type = sa.Column(sa.Unicode(255))
__mapper_args__ = {
'polymorphic_on': type,
}
class Article(TextItem):
__tablename__ = 'article'
id = sa.Column(
sa.Integer, sa.ForeignKey(TextItem.id), primary_key=True
)
category = sa.Column(sa.Unicode(255))
__mapper_args__ = {
'polymorphic_identity': u'article'
}
class BlogPost(TextItem):
__tablename__ = 'blog_post'
id = sa.Column(
sa.Integer, sa.ForeignKey(TextItem.id), primary_key=True
)
__mapper_args__ = {
'polymorphic_identity': u'blog_post'
}
self.TextItem = TextItem
self.Article = Article
self.BlogPost = BlogPost
def test_mapper(self):
query = self.session.query(sa.inspect(self.TextItem))
assert list(get_query_entities(query)) == [self.TextItem]
def test_entity(self):
query = self.session.query(self.TextItem)
assert list(get_query_entities(query)) == [self.TextItem]
def test_instrumented_attribute(self):
query = self.session.query(self.TextItem.id)
assert list(get_query_entities(query)) == [self.TextItem]
def test_column(self):
query = self.session.query(self.TextItem.__table__.c.id)
assert list(get_query_entities(query)) == [self.TextItem.__table__]
def test_aliased_selectable(self):
selectable = sa.orm.with_polymorphic(self.TextItem, [self.BlogPost])
query = self.session.query(selectable)<|fim▁hole|> def test_joined_entity(self):
query = self.session.query(self.TextItem).join(
self.BlogPost, self.BlogPost.id == self.TextItem.id
)
assert list(get_query_entities(query)) == [
self.TextItem, self.BlogPost
]
def test_joined_aliased_entity(self):
alias = sa.orm.aliased(self.BlogPost)
query = self.session.query(self.TextItem).join(
alias, alias.id == self.TextItem.id
)
assert list(get_query_entities(query)) == [self.TextItem, alias]
def test_column_entity_with_label(self):
query = self.session.query(self.Article.id.label('id'))
assert list(get_query_entities(query)) == [sa.inspect(self.Article)]
def test_with_subquery(self):
number_of_articles = (
sa.select(
[sa.func.count(self.Article.id)],
)
.select_from(
self.Article.__table__
)
).label('number_of_articles')
query = self.session.query(self.Article, number_of_articles)
assert list(get_query_entities(query)) == [self.Article, number_of_articles]
def test_aliased_entity(self):
alias = sa.orm.aliased(self.Article)
query = self.session.query(alias)
assert list(get_query_entities(query)) == [alias]<|fim▁end|> | assert list(get_query_entities(query)) == [selectable]
|
<|file_name|>home.client.controller.test.js<|end_file_name|><|fim▁begin|>'use strict';
(function() {
describe('HomeController', function() {
//Initialize global variables
var scope,
HomeController,
myFactory;
// Load the main application module
beforeEach(module(ApplicationConfiguration.applicationModuleName));
beforeEach(inject(function($controller, $rootScope) {
scope = $rootScope.$new();
HomeController = $controller('HomeController', {
$scope: scope
});
}));
it('should expose the authentication service', function() {
expect(scope.authentication).toBeTruthy();
});
it('should see the result', function(){<|fim▁hole|> });
})();<|fim▁end|> | scope.feedSrc = 'http://rss.cnn.com/rss/cnn_topstories.rss';
}); |
<|file_name|>not-found.page.ts<|end_file_name|><|fim▁begin|>import { Component } from '@angular/core';
import { Store } from '@ngrx/store';
import { AppState } from '../store/root.reducer';
@Component({
template: `
<ais-header pageTitle="Page not found"></ais-header>
<ion-content padding class="marginTopAdjusted">
<ion-item>
Page is not found at {{(routerDetails$ | async).path}}
</ion-item>
</ion-content>
`
})
export class NotFoundPage {
routerDetails$ = this.store.select(store => store.router);
constructor(private store:Store<AppState>) {}<|fim▁hole|><|fim▁end|> | } |
<|file_name|>block-title.d.ts<|end_file_name|><|fim▁begin|>import { ComponentType } from 'react';<|fim▁hole|> interface Props {
children?: never;
clientId: string;
}
}
declare const BlockTitle: ComponentType<BlockTitle.Props>;
export default BlockTitle;<|fim▁end|> |
declare namespace BlockTitle { |
<|file_name|>webide.py<|end_file_name|><|fim▁begin|># coding: utf-8
import os
import sys
import logging
import webbrowser
import socket
import time
import json
import traceback
import cv2
import tornado.ioloop
import tornado.web
import tornado.websocket
from tornado.concurrent import run_on_executor
from concurrent.futures import ThreadPoolExecutor # `pip install futures` for python2
import atx
from atx import logutils
from atx import imutils
from atx import base
__dir__ = os.path.dirname(os.path.abspath(__file__))
log = logutils.getLogger("webide", level=logging.DEBUG)
log.setLevel(logging.DEBUG)
IMAGE_PATH = ['.', 'imgs', 'images']
workdir = '.'
device = None
atx_settings = {}
def read_file(filename, default=''):
if not os.path.isfile(filename):
return default
with open(filename, 'rb') as f:
return f.read()
def write_file(filename, content):
with open(filename, 'w') as f:
f.write(content.encode('utf-8'))
def get_valid_port():
for port in range(10010, 10100):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
result = sock.connect_ex(('127.0.0.1', port))<|fim▁hole|> return port
raise SystemError("Can not find a unused port, amazing!")
class FakeStdout(object):
def __init__(self, fn=sys.stdout.write):
self._fn = fn
def write(self, s):
self._fn(s)
def flush(self):
pass
class ImageHandler(tornado.web.RequestHandler):
def get(self):
imgs = base.list_images(path=IMAGE_PATH)
images = []
for name in imgs:
realpath = name.replace('\\', '/') # fix for windows
name = os.path.basename(name).split('@')[0]
images.append([name, realpath])
self.write({
'images': images,
'baseURL': self.request.protocol + '://' + self.request.host+'/static_imgs/'
})
class MainHandler(tornado.web.RequestHandler):
def get(self):
imgs = base.list_images(path=IMAGE_PATH)
imgs = [(os.path.basename(name), name) for name in imgs]
self.render('index.html', images=imgs)
def post(self):
print self.get_argument('xml_text')
self.write("Good")
class DebugWebSocket(tornado.websocket.WebSocketHandler):
executor = ThreadPoolExecutor(max_workers=1)
def open(self):
log.info("WebSocket connected")
self._run = False
def _highlight_block(self, id):
self.write_message({'type': 'highlight', 'id': id})
if not self._run:
raise RuntimeError("stopped")
else:
time.sleep(.1)
def write_console(self, s):
self.write_message({'type': 'console', 'output': s})
def run_blockly(self, code):
filename = '__tmp.py'
fake_sysout = FakeStdout(self.write_console)
__sysout = sys.stdout
sys.stdout = fake_sysout # TODOs
self.write_message({'type': 'console', 'output': '# '+time.strftime('%H:%M:%S') + ' start running\n'})
try:
# python code always UTF-8
code = code.encode('utf-8')
# hot patch
code = code.replace('atx.click_image', 'd.click_image')
exec code in {
'highlight_block': self._highlight_block,
'__name__': '__main__',
'__file__': filename}
except RuntimeError as e:
if str(e) != 'stopped':
raise
print 'Program stopped'
except Exception as e:
self.write_message({'type': 'traceback', 'output': traceback.format_exc()})
finally:
self._run = False
self.write_message({'type': 'run', 'status': 'ready'})
sys.stdout = __sysout
@run_on_executor
def background_task(self, code):
self.write_message({'type': 'run', 'status': 'running'})
self.run_blockly(code)
return True
@tornado.gen.coroutine
def on_message(self, message_text):
message = None
try:
message = json.loads(message_text)
except:
print 'Invalid message from browser:', message_text
return
command = message.get('command')
if command == 'refresh':
imgs = base.list_images(path=IMAGE_PATH)
imgs = [dict(
path=name.replace('\\', '/'), name=os.path.basename(name)) for name in imgs]
self.write_message({'type': 'image_list', 'data': list(imgs)})
elif command == 'stop':
self._run = False
self.write_message({'type': 'run', 'notify': '停止中'})
elif command == 'run':
if self._run:
self.write_message({'type': 'run', 'notify': '运行中'})
return
self._run = True
res = yield self.background_task(message.get('code'))
self.write_message({'type': 'run', 'status': 'ready', 'notify': '运行结束', 'result': res})
else:
self.write_message(u"You said: " + message)
def on_close(self):
log.info("WebSocket closed")
def check_origin(self, origin):
return True
class WorkspaceHandler(tornado.web.RequestHandler):
def get(self):
ret = {}
ret['xml_text'] = read_file('blockly.xml', default='<xml xmlns="http://www.w3.org/1999/xhtml"></xml>')
ret['python_text'] = read_file('blockly.py')
self.write(ret)
def post(self):
log.info("Save workspace")
xml_text = self.get_argument('xml_text')
python_text = self.get_argument('python_text')
write_file('blockly.xml', xml_text)
write_file('blockly.py', python_text)
class ScreenshotHandler(tornado.web.RequestHandler):
def get(self):
d = atx.connect(**atx_settings)
d.screenshot('_screen.png')
self.set_header('Content-Type', 'image/png')
with open('_screen.png', 'rb') as f:
while 1:
data = f.read(16000)
if not data:
break
self.write(data)
self.finish()
def post(self):
raw_image = self.get_argument('raw_image')
filename = self.get_argument('filename')
image = imutils.open(raw_image)
cv2.imwrite(filename, image)
self.write({'status': 'ok'})
class StaticFileHandler(tornado.web.StaticFileHandler):
def get(self, path=None, include_body=True):
path = path.encode(base.SYSTEM_ENCODING) # fix for windows
return super(StaticFileHandler, self).get(path, include_body)
def make_app(settings={}):
static_path = os.getcwd()
application = tornado.web.Application([
(r"/", MainHandler),
(r'/ws', DebugWebSocket), # code debug
(r"/workspace", WorkspaceHandler), # save and write workspace
(r"/images/screenshot", ScreenshotHandler),
(r'/api/images', ImageHandler),
(r'/static_imgs/(.*)', StaticFileHandler, {'path': static_path}),
], **settings)
return application
def main(web_port=None, host=None, port=None, open_browser=True, workdir='.'):
application = make_app({
'static_path': os.path.join(__dir__, 'static'),
'template_path': os.path.join(__dir__, 'static'),
'debug': True,
})
if not web_port:
web_port = get_valid_port()
global device
# global workdir
atx_settings['host'] = host
atx_settings['port'] = port
# device = atx.connect(host=kws.get('host'), port=kws.get('port'))
# TODO
# filename = 'blockly.py'
IMAGE_PATH.append('images/blockly')
if open_browser:
url = 'http://127.0.0.1:{}'.format(web_port)
webbrowser.open(url, new=2) # 2: open new tab if possible
application.listen(web_port)
log.info("Server started.")
log.info("Listening port on 127.0.0.1:{}".format(web_port))
tornado.ioloop.IOLoop.instance().start()
if __name__ == "__main__":
main()<|fim▁end|> | sock.close()
if result != 0: |
<|file_name|>0004_auto__add_imagetranslationtranslation__add_unique_imagetranslationtran.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'ImageTranslationTranslation'
db.create_table('cmsplugin_filer_image_translated_imagetranslation_translation', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=256, blank=True)),
('description', self.gf('django.db.models.fields.TextField')(max_length=256, blank=True)),
('alt_text', self.gf('django.db.models.fields.CharField')(max_length=512, blank=True)),
('caption', self.gf('django.db.models.fields.CharField')(max_length=512, blank=True)),
('language_code', self.gf('django.db.models.fields.CharField')(max_length=15, db_index=True)),
('master', self.gf('django.db.models.fields.related.ForeignKey')(related_name='translations', null=True, to=orm['cmsplugin_filer_image_translated.ImageTranslation'])),
))
db.send_create_signal('cmsplugin_filer_image_translated', ['ImageTranslationTranslation'])
# Adding unique constraint on 'ImageTranslationTranslation', fields ['language_code', 'master']
db.create_unique('cmsplugin_filer_image_translated_imagetranslation_translation', ['language_code', 'master_id'])
# Adding model 'ImageTranslation'
db.create_table('cmsplugin_filer_image_translated_imagetranslation', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('image', self.gf('django.db.models.fields.related.OneToOneField')(related_name='translation', unique=True, to=orm['filer.Image'])),
))
db.send_create_signal('cmsplugin_filer_image_translated', ['ImageTranslation'])
def backwards(self, orm):
# Removing unique constraint on 'ImageTranslationTranslation', fields ['language_code', 'master']
db.delete_unique('cmsplugin_filer_image_translated_imagetranslation_translation', ['language_code', 'master_id'])
# Deleting model 'ImageTranslationTranslation'
db.delete_table('cmsplugin_filer_image_translated_imagetranslation_translation')
# Deleting model 'ImageTranslation'
db.delete_table('cmsplugin_filer_image_translated_imagetranslation')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'cmsplugin_filer_image_translated.imagetranslation': {
'Meta': {'object_name': 'ImageTranslation'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'translation'", 'unique': 'True', 'to': "orm['filer.Image']"})
},
'cmsplugin_filer_image_translated.imagetranslationrenamed': {<|fim▁hole|> 'trans_alt_text': ('django.db.models.fields.CharField', [], {'max_length': '512', 'blank': 'True'}),
'trans_caption': ('django.db.models.fields.CharField', [], {'max_length': '512', 'blank': 'True'}),
'trans_description': ('django.db.models.fields.TextField', [], {'max_length': '256', 'blank': 'True'}),
'trans_name': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'})
},
'cmsplugin_filer_image_translated.imagetranslationtranslation': {
'Meta': {'unique_together': "[('language_code', 'master')]", 'object_name': 'ImageTranslationTranslation', 'db_table': "'cmsplugin_filer_image_translated_imagetranslation_translation'"},
'alt_text': ('django.db.models.fields.CharField', [], {'max_length': '512', 'blank': 'True'}),
'caption': ('django.db.models.fields.CharField', [], {'max_length': '512', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'max_length': '256', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language_code': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'master': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'translations'", 'null': 'True', 'to': "orm['cmsplugin_filer_image_translated.ImageTranslation']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'filer.file': {
'Meta': {'object_name': 'File'},
'_file_size': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'folder': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'all_files'", 'null': 'True', 'to': "orm['filer.Folder']"}),
'has_all_mandatory_data': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'original_filename': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'owned_files'", 'null': 'True', 'to': "orm['auth.User']"}),
'polymorphic_ctype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'polymorphic_filer.file_set'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"}),
'sha1': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '40', 'blank': 'True'}),
'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
'filer.folder': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('parent', 'name'),)", 'object_name': 'Folder'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'filer_owned_folders'", 'null': 'True', 'to': "orm['auth.User']"}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['filer.Folder']"}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
'filer.image': {
'Meta': {'object_name': 'Image', '_ormbases': ['filer.File']},
'_height': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'_width': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'author': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'date_taken': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'default_alt_text': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'default_caption': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'file_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['filer.File']", 'unique': 'True', 'primary_key': 'True'}),
'must_always_publish_author_credit': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'must_always_publish_copyright': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'subject_location': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '64', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['cmsplugin_filer_image_translated']<|fim▁end|> | 'Meta': {'object_name': 'ImageTranslationRenamed'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['filer.Image']"}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '2'}), |
<|file_name|>projectlists.go<|end_file_name|><|fim▁begin|>package builder
import (
"fmt"
"github.com/bitrise-io/go-xamarin/analyzers/project"
"github.com/bitrise-io/go-xamarin/constants"
"github.com/bitrise-io/go-xamarin/utility"
)
func (builder Model) whitelistedProjects() []project.Model {
projects := []project.Model{}
for _, proj := range builder.solution.ProjectMap {
if !whitelistAllows(proj.SDK, builder.projectTypeWhitelist...) {
continue
}
if proj.SDK != constants.SDKUnknown {
projects = append(projects, proj)
}
}
return projects
}
func (builder Model) buildableProjects(configuration, platform string) ([]project.Model, []string) {
projects := []project.Model{}
warnings := []string{}
solutionConfig := utility.ToConfig(configuration, platform)
whitelistedProjects := builder.whitelistedProjects()
for _, proj := range whitelistedProjects {
//
// Solution config - project config mapping
_, ok := proj.ConfigMap[solutionConfig]
if !ok {
warnings = append(warnings, fmt.Sprintf("Project (%s) do not have config for solution config (%s), skipping...", proj.Name, solutionConfig))
continue
}
if (proj.SDK == constants.SDKIOS ||
proj.SDK == constants.SDKMacOS ||
proj.SDK == constants.SDKTvOS) &&
proj.OutputType != "exe" {
warnings = append(warnings, fmt.Sprintf("Project (%s) is not archivable based on output type (%s), skipping...", proj.Name, proj.OutputType))
continue
}
if proj.SDK == constants.SDKAndroid &&
!proj.AndroidApplication {
warnings = append(warnings, fmt.Sprintf("(%s) is not an android application project, skipping...", proj.Name))
continue
}
if proj.SDK != constants.SDKUnknown {
projects = append(projects, proj)
}
}
return projects, warnings
}
func (builder Model) buildableXamarinUITestProjectsAndReferredProjects(configuration, platform string) ([]project.Model, []project.Model, []string) {
testProjects := []project.Model{}
referredProjects := []project.Model{}
warnings := []string{}
solutionConfig := utility.ToConfig(configuration, platform)
for _, proj := range builder.solution.ProjectMap {
// Check if is XamarinUITest project
if proj.TestFramework != constants.TestFrameworkXamarinUITest {
continue
}
// Check if contains config mapping
_, ok := proj.ConfigMap[solutionConfig]
if !ok {
warnings = append(warnings, fmt.Sprintf("Project (%s) do not have config for solution config (%s), skipping...", proj.Name, solutionConfig))
continue
}
// Collect referred projects<|fim▁hole|> if len(proj.ReferredProjectIDs) == 0 {
warnings = append(warnings, fmt.Sprintf("No referred projects found for test project: %s, skipping...", proj.Name))
continue
}
for _, projectID := range proj.ReferredProjectIDs {
referredProj, ok := builder.solution.ProjectMap[projectID]
if !ok {
warnings = append(warnings, fmt.Sprintf("Project reference exist with project id: %s, but project not found in solution", projectID))
continue
}
if referredProj.SDK == constants.SDKUnknown {
warnings = append(warnings, fmt.Sprintf("Project's (%s) project type is unkown", referredProj.Name))
continue
}
if whitelistAllows(referredProj.SDK, builder.projectTypeWhitelist...) {
referredProjects = append(referredProjects, referredProj)
}
}
if len(referredProjects) == 0 {
warnings = append(warnings, fmt.Sprintf("Test project (%s) does not refers to any project, with project type whitelist (%v), skipping...", proj.Name, builder.projectTypeWhitelist))
continue
}
testProjects = append(testProjects, proj)
}
return testProjects, referredProjects, warnings
}
func (builder Model) buildableNunitTestProjects(configuration, platform string) ([]project.Model, []string) {
testProjects := []project.Model{}
warnings := []string{}
solutionConfig := utility.ToConfig(configuration, platform)
for _, proj := range builder.solution.ProjectMap {
// Check if is nunit test project
if proj.TestFramework != constants.TestFrameworkNunitTest {
continue
}
// Check if contains config mapping
_, ok := proj.ConfigMap[solutionConfig]
if !ok {
warnings = append(warnings, fmt.Sprintf("Project (%s) do not have config for solution config (%s), skipping...", proj.Name, solutionConfig))
continue
}
testProjects = append(testProjects, proj)
}
return testProjects, warnings
}<|fim▁end|> | |
<|file_name|>MainHandler.py<|end_file_name|><|fim▁begin|>import BaseHTTPServer
import thread
import urlparse
import string
from move import Move
from move import MoveInfo
from battery import BatteryStatus
move = Move()
def sendResponse(s, code, message):
print "... ", s.path
s.send_response(code)
s.send_header("Content-type", "text/html")
s.end_headers()
m = "<html><body><p>" +message +"</p></body></html>"
s.wfile.write(m)
def hello_handler():
pass
def handlerMoveBackward(move, nrSteps):
move.moveBackward(nrSteps)
def handlerMoveForward(move, nrSteps):
move.moveForward(nrSteps)
def handlerMoveRight(move, nrSteps):
move.moveRight(nrSteps)
def handlerMoveLeft(move, nrSteps):
move.moveLeft(nrSteps)
def handlerTurnLeft(move):
move.turnLeft()
def handlerTurnRight(move):
move.turnRight()
def handlerExecute(move):
move.turnRight()
def executeGenericMove(move, moveInfo):
move.executeGenericMove(moveInfo)<|fim▁hole|> move.executeGenericTurn(moveInfo)
class MainHandler(BaseHTTPServer.BaseHTTPRequestHandler):
def do_HEAD(s):
s.send_response(200)
def do_GET(s):
"""Respond to a GET request."""
global move
if s.path == "/hello":
try:
thread.start_new_thread(hello_handler,())
s.send_response(200)
except:
print "Error: cannot start the thread"
url = s.path
parsed = urlparse.urlparse(url)
if string.find(s.path,"/moveBackward") != -1:
nrSteps = 0
nrSteps = int(urlparse.parse_qs(parsed.query)['nrSteps'][0])
thread.start_new_thread(handlerMoveBackward, (move, nrSteps))
sendResponse(s, 200, "handlerMoveBackward")
return
if string.find(s.path,"/moveForward") != -1:
nrSteps = 0
nrSteps = int(urlparse.parse_qs(parsed.query)['nrSteps'][0])
thread.start_new_thread(handlerMoveForward, (move, nrSteps))
sendResponse(s, 200, "")
return
if string.find(s.path,"/moveRight") != -1:
nrSteps = 0
nrSteps = int(urlparse.parse_qs(parsed.query)['nrSteps'][0])
thread.start_new_thread(handlerMoveRight, (move, nrSteps))
sendResponse(s, 200, "")
return
if string.find(s.path,"/moveLeft") != -1:
nrSteps = 0
nrSteps = int(urlparse.parse_qs(parsed.query)['nrSteps'][0])
thread.start_new_thread(handlerMoveLeft, (move, nrSteps))
sendResponse(s, 200, "")
return
if string.find(s.path,"/turnLeft") != -1:
thread.start_new_thread(handlerTurnLeft, (move))
sendResponse(s, 200, "")
return
if string.find(s.path,"/turnRight") != -1:
thread.start_new_thread(handlerTurnRight, (move))
sendResponse(s, 200, "")
return
if string.find(s.path,"/executeMove") != -1:
nrSteps = int(urlparse.parse_qs(parsed.query)['nrSteps'][0])
x = float(urlparse.parse_qs(parsed.query)['x'][0])
y = float(urlparse.parse_qs(parsed.query)['y'][0])
tetha = float(urlparse.parse_qs(parsed.query)['tetha'][0])
speed = float(urlparse.parse_qs(parsed.query)['speed'][0])
component = urlparse.parse_qs(parsed.query)['component'][0]
moveInfo = MoveInfo(component, x, y, tetha, speed, nrSteps)
thread.start_new_thread(executeGenericMove, (move, moveInfo))
sendResponse(s, 200, "")
return
if string.find(s.path,"/motorsOff") != -1:
print "motorsOff"
move.StiffnessOff()
sendResponse(s, 200, "")
return
if string.find(s.path,"/motorsOn") != -1:
print "motorsOn"
move.StiffnessOn()
sendResponse(s, 200, "")
return
if string.find(s.path,"/batteryStatus") != -1:
print "batteryStatus"
sendResponse(s, 200, "")
return
move.StiffnessOn()
if string.find(s.path,"/turn") != -1:
print "turn"
x = float(urlparse.parse_qs(parsed.query)['x'][0])
y = float(urlparse.parse_qs(parsed.query)['y'][0])
tetha = float(urlparse.parse_qs(parsed.query)['tetha'][0])
moveInfo = MoveInfo(None, x, y, tetha, None, None)
thread.start_new_thread(executeGenericTurn, (move, moveInfo))
sendResponse(s, 200, "")
return<|fim▁end|> |
def executeGenericTurn(move, moveInfo): |
<|file_name|>vue.ts<|end_file_name|><|fim▁begin|>import Vue from "vue";
import * as Vuex from "../index";
const store = new Vuex.Store({
state: {
value: 1
}
});
const vm = new Vue({<|fim▁hole|>});
vm.$store.state.value;<|fim▁end|> | store |
<|file_name|>reverse_graph.rs<|end_file_name|><|fim▁begin|>use crate::core::{
property::{AddEdge, RemoveEdge},
Directed, Ensure, Graph, GraphDerefMut, GraphMut,
};
use delegate::delegate;
use std::borrow::Borrow;
#[derive(Debug)]
pub struct ReverseGraph<C: Ensure>(C)
where
C::Graph: Graph<Directedness = Directed>;
impl<C: Ensure> ReverseGraph<C>
where
C::Graph: Graph<Directedness = Directed>,
{
/// Creates the a reversed graph from the given graph.
pub fn new(c: C) -> Self
{
Self(c)
}
}
impl<C: Ensure> Graph for ReverseGraph<C>
where
C::Graph: Graph<Directedness = Directed>,
{
type Directedness = Directed;
type EdgeWeight = <C::Graph as Graph>::EdgeWeight;
type Vertex = <C::Graph as Graph>::Vertex;
type VertexWeight = <C::Graph as Graph>::VertexWeight;
delegate! {
to self.0.graph() {
fn all_vertices_weighted<'a>(&'a self) -> Box<dyn 'a + Iterator<Item=
(Self::Vertex, &'a Self::VertexWeight)>>;
}
}
fn edges_between<'a: 'b, 'b>(
&'a self,
source: impl 'b + Borrow<Self::Vertex>,
sink: impl 'b + Borrow<Self::Vertex>,
) -> Box<dyn 'b + Iterator<Item = &'a Self::EdgeWeight>>
{
self.0.graph().edges_between(sink, source)
}
}
impl<C: Ensure + GraphDerefMut> GraphMut for ReverseGraph<C>
where
C::Graph: GraphMut<Directedness = Directed>,
{
delegate! {
to self.0.graph_mut() {
fn all_vertices_weighted_mut<'a>(&'a mut self) -> Box<dyn 'a + Iterator<Item=
(Self::Vertex, &'a mut Self::VertexWeight)>>;
}
}
fn edges_between_mut<'a: 'b, 'b>(<|fim▁hole|> source: impl 'b + Borrow<Self::Vertex>,
sink: impl 'b + Borrow<Self::Vertex>,
) -> Box<dyn 'b + Iterator<Item = &'a mut Self::EdgeWeight>>
{
Box::new(self.0.graph_mut().edges_between_mut(sink, source))
}
}
impl<C: Ensure + GraphDerefMut> AddEdge for ReverseGraph<C>
where
C::Graph: AddEdge<Directedness = Directed>,
{
fn add_edge_weighted(
&mut self,
source: impl Borrow<Self::Vertex>,
sink: impl Borrow<Self::Vertex>,
weight: Self::EdgeWeight,
) -> Result<(), ()>
{
self.0.graph_mut().add_edge_weighted(sink, source, weight)
}
}
impl<C: Ensure + GraphDerefMut> RemoveEdge for ReverseGraph<C>
where
C::Graph: RemoveEdge<Directedness = Directed>,
{
fn remove_edge_where_weight<F>(
&mut self,
source: impl Borrow<Self::Vertex>,
sink: impl Borrow<Self::Vertex>,
f: F,
) -> Result<Self::EdgeWeight, ()>
where
F: Fn(&Self::EdgeWeight) -> bool,
{
self.0.graph_mut().remove_edge_where_weight(source, sink, f)
}
}
base_graph! {
use<C> ReverseGraph<C>: NewVertex, RemoveVertex, HasVertex
as (self.0): C
where
C: Ensure,
C::Graph: Graph<Directedness = Directed>
}<|fim▁end|> | &'a mut self, |
<|file_name|>bitcoin_fa_IR.ts<|end_file_name|><|fim▁begin|><?xml version="1.0" ?><!DOCTYPE TS><TS language="fa_IR" version="2.0">
<defauproodec>UTF-8</defauproodec>
<context>
<name>AboutDialog</name>
<message>
<location filename="../forms/aboutdialog.ui" line="+14"/>
<source>About Procoin</source>
<translation>در مورد بیتکویین</translation>
</message>
<message>
<location line="+39"/>
<source><b>Procoin</b> version</source>
<translation><b>Procoin</b> version</translation>
</message>
<message>
<location line="+57"/>
<source>
This is experimental software.
Distributed under the MIT/X11 software license, see the accompanying file COPYING or http://www.opensource.org/licenses/mit-license.php.
This product includes software developed by the OpenSSL Project for use in the OpenSSL Toolkit (http://www.openssl.org/) and cryptographic software written by Eric Young ([email protected]) and UPnP software written by Thomas Bernard.</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../aboutdialog.cpp" line="+14"/>
<source>Copyright</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<source>The Procoin developers</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>AddressBookPage</name>
<message>
<location filename="../forms/addressbookpage.ui" line="+14"/>
<source>Address Book</source>
<translation>دفترچه آدرس</translation>
</message>
<message>
<location line="+19"/>
<source>Double-click to edit address or label</source>
<translation>برای ویرایش آدرس/برچسب دوبار کلیک نمایید</translation>
</message>
<message>
<location line="+27"/>
<source>Create a new address</source>
<translation>یک آدرس جدید بسازید</translation>
</message>
<message>
<location line="+14"/>
<source>Copy the currently selected address to the system clipboard</source>
<translation>آدرس انتخاب شده را در کلیپ بوردِ سیستم کپی کنید</translation>
</message>
<message>
<location line="-11"/>
<source>&New Address</source>
<translation>و آدرس جدید</translation>
</message>
<message>
<location filename="../addressbookpage.cpp" line="+63"/>
<source>These are your Procoin addresses for receiving payments. You may want to give a different one to each sender so you can keep track of who is paying you.</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../forms/addressbookpage.ui" line="+14"/>
<source>&Copy Address</source>
<translation>و کپی آدرس</translation>
</message>
<message>
<location line="+11"/>
<source>Show &QR Code</source>
<translation>نشان و کد QR</translation>
</message>
<message>
<location line="+11"/>
<source>Sign a message to prove you own a Procoin address</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Sign &Message</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+25"/>
<source>Delete the currently selected address from the list</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+27"/>
<source>Export the data in the current tab to a file</source>
<translation>صدور داده نوار جاری به یک فایل</translation>
</message>
<message>
<location line="+3"/>
<source>&Export</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-44"/>
<source>Verify a message to ensure it was signed with a specified Procoin address</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>&Verify Message</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+14"/>
<source>&Delete</source>
<translation>و حذف</translation>
</message>
<message>
<location filename="../addressbookpage.cpp" line="-5"/>
<source>These are your Procoin addresses for sending payments. Always check the amount and the receiving address before sending coins.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+13"/>
<source>Copy &Label</source>
<translation>کپی و برچسب</translation>
</message>
<message>
<location line="+1"/>
<source>&Edit</source>
<translation>و ویرایش</translation>
</message>
<message>
<location line="+1"/>
<source>Send &Coins</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+260"/>
<source>Export Address Book Data</source>
<translation>انتقال اطلاعات دفترچه آدرس</translation>
</message>
<message>
<location line="+1"/>
<source>Comma separated file (*.csv)</source>
<translation>سی.اس.وی. (فایل جداگانه دستوری)</translation>
</message>
<message>
<location line="+13"/>
<source>Error exporting</source>
<translation>صدور پیام خطا</translation>
</message>
<message>
<location line="+0"/>
<source>Could not write to file %1.</source>
<translation>قابل کپی در فایل نیست %1</translation>
</message>
</context>
<context>
<name>AddressTableModel</name>
<message>
<location filename="../addresstablemodel.cpp" line="+144"/>
<source>Label</source>
<translation>برچسب</translation>
</message>
<message>
<location line="+0"/>
<source>Address</source>
<translation>آدرس</translation>
</message>
<message>
<location line="+36"/>
<source>(no label)</source>
<translation>(برچسب ندارد)</translation>
</message>
</context>
<context>
<name>AskPassphraseDialog</name>
<message>
<location filename="../forms/askpassphrasedialog.ui" line="+26"/>
<source>Passphrase Dialog</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+21"/>
<source>Enter passphrase</source>
<translation>رمز/پَس فرِیز را وارد کنید</translation>
</message>
<message>
<location line="+14"/>
<source>New passphrase</source>
<translation>رمز/پَس فرِیز جدید را وارد کنید</translation>
</message>
<message>
<location line="+14"/>
<source>Repeat new passphrase</source>
<translation>رمز/پَس فرِیز را دوباره وارد کنید</translation>
</message>
<message>
<location filename="../askpassphrasedialog.cpp" line="+33"/>
<source>Enter the new passphrase to the wallet.<br/>Please use a passphrase of <b>10 or more random characters</b>, or <b>eight or more words</b>.</source>
<translation>رمز/پَس فرِیز جدید را در wallet وارد کنید. برای انتخاب رمز/پَس فرِیز از 10 کاراکتر تصادفی یا بیشتر و یا هشت کلمه یا بیشتر استفاده کنید. </translation>
</message>
<message>
<location line="+1"/>
<source>Encrypt wallet</source>
<translation>wallet را رمزگذاری کنید</translation>
</message>
<message>
<location line="+3"/>
<source>This operation needs your wallet passphrase to unlock the wallet.</source>
<translation>برای انجام این عملکرد به رمز/پَس فرِیزِwallet نیاز است تا آن را از حالت قفل درآورد.</translation>
</message>
<message>
<location line="+5"/>
<source>Unlock wallet</source>
<translation>باز کردن قفل wallet </translation>
</message>
<message>
<location line="+3"/>
<source>This operation needs your wallet passphrase to decrypt the wallet.</source>
<translation>برای کشف رمز wallet، به رمز/پَس فرِیزِwallet نیاز است.</translation>
</message>
<message>
<location line="+5"/>
<source>Decrypt wallet</source>
<translation>کشف رمز wallet</translation>
</message>
<message>
<location line="+3"/>
<source>Change passphrase</source>
<translation>تغییر رمز/پَس فرِیز</translation>
</message>
<message>
<location line="+1"/>
<source>Enter the old and new passphrase to the wallet.</source>
<translation>رمز/پَس فرِیزِ قدیم و جدید را در wallet وارد کنید</translation>
</message>
<message>
<location line="+46"/>
<source>Confirm wallet encryption</source>
<translation>رمزگذاری wallet را تایید کنید</translation>
</message>
<message>
<location line="+1"/>
<source>Warning: If you encrypt your wallet and lose your passphrase, you will <b>LOSE ALL OF YOUR PROCOINS</b>!</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<source>Are you sure you wish to encrypt your wallet?</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+15"/>
<source>IMPORTANT: Any previous backups you have made of your wallet file should be replaced with the newly generated, encrypted wallet file. For security reasons, previous backups of the unencrypted wallet file will become useless as soon as you start using the new, encrypted wallet.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+100"/>
<location line="+24"/>
<source>Warning: The Caps Lock key is on!</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-130"/>
<location line="+58"/>
<source>Wallet encrypted</source>
<translation>تایید رمزگذاری</translation>
</message>
<message>
<location line="-56"/>
<source>Procoin will close now to finish the encryption process. Remember that encrypting your wallet cannot fully protect your procoins from being stolen by malware infecting your computer.</source>
<translation>Procoin برای اتمام فرایند رمزگذاری بسته خواهد شد. به خاطر داشته باشید که رمزگذاری WALLET شما، کامپیوتر شما را از آلودگی به بدافزارها مصون نمی دارد.</translation>
</message>
<message>
<location line="+13"/>
<location line="+7"/>
<location line="+42"/>
<location line="+6"/>
<source>Wallet encryption failed</source>
<translation>رمزگذاری تایید نشد</translation>
</message>
<message>
<location line="-54"/>
<source>Wallet encryption failed due to an internal error. Your wallet was not encrypted.</source>
<translation>رمزگذاری به علت خطای داخلی تایید نشد. wallet شما رمزگذاری نشد</translation>
</message>
<message>
<location line="+7"/>
<location line="+48"/>
<source>The supplied passphrases do not match.</source>
<translation>رمزهای/پَس فرِیزهایِ وارد شده با هم تطابق ندارند</translation>
</message>
<message>
<location line="-37"/>
<source>Wallet unlock failed</source>
<translation>قفل wallet باز نشد</translation>
</message>
<message>
<location line="+1"/>
<location line="+11"/>
<location line="+19"/>
<source>The passphrase entered for the wallet decryption was incorrect.</source>
<translation>رمزهای/پَس فرِیزهایِ وارد شده wallet برای کشف رمز اشتباه است.</translation>
</message>
<message>
<location line="-20"/>
<source>Wallet decryption failed</source>
<translation>کشف رمز wallet انجام نشد</translation>
</message>
<message>
<location line="+14"/>
<source>Wallet passphrase was successfully changed.</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>BitcoinGUI</name>
<message>
<location filename="../bitcoingui.cpp" line="+233"/>
<source>Sign &message...</source>
<translation>امضا و پیام</translation>
</message>
<message>
<location line="+280"/>
<source>Synchronizing with network...</source>
<translation>به روز رسانی با شبکه...</translation>
</message>
<message>
<location line="-349"/>
<source>&Overview</source>
<translation>و بازبینی</translation>
</message>
<message>
<location line="+1"/>
<source>Show general overview of wallet</source>
<translation>نمای کلی از wallet را نشان بده</translation>
</message>
<message>
<location line="+20"/>
<source>&Transactions</source>
<translation>و تراکنش</translation>
</message>
<message>
<location line="+1"/>
<source>Browse transaction history</source>
<translation>تاریخچه تراکنش را باز کن</translation>
</message>
<message>
<location line="+7"/>
<source>Edit the list of stored addresses and labels</source>
<translation>فهرست آدرسها و برچسبهای ذخیره شده را ویرایش کن</translation>
</message>
<message>
<location line="-14"/>
<source>Show the list of addresses for receiving payments</source>
<translation>فهرست آدرسها را برای دریافت وجه نشان بده</translation>
</message>
<message>
<location line="+31"/>
<source>E&xit</source>
<translation>خروج</translation>
</message>
<message>
<location line="+1"/>
<source>Quit application</source>
<translation>از "درخواست نامه"/ application خارج شو</translation>
</message>
<message>
<location line="+4"/>
<source>Show information about Procoin</source>
<translation>اطلاعات در مورد Procoin را نشان بده</translation>
</message>
<message>
<location line="+2"/>
<source>About &Qt</source>
<translation>درباره و QT</translation>
</message>
<message>
<location line="+1"/>
<source>Show information about Qt</source>
<translation>نمایش اطلاعات درباره QT</translation>
</message>
<message>
<location line="+2"/>
<source>&Options...</source>
<translation>و انتخابها</translation>
</message>
<message>
<location line="+6"/>
<source>&Encrypt Wallet...</source>
<translation>و رمزگذاری wallet</translation>
</message>
<message>
<location line="+3"/>
<source>&Backup Wallet...</source>
<translation>و گرفتن نسخه پیشتیبان از wallet</translation>
</message>
<message>
<location line="+2"/>
<source>&Change Passphrase...</source>
<translation>تغییر رمز/پَس فرِیز</translation>
</message>
<message>
<location line="+285"/>
<source>Importing blocks from disk...</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Reindexing blocks on disk...</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-347"/>
<source>Send coins to a Procoin address</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+49"/>
<source>Modify configuration options for Procoin</source>
<translation>اصلاح انتخابها برای پیکربندی Procoin</translation>
</message>
<message>
<location line="+9"/>
<source>Backup wallet to another location</source>
<translation>گرفتن نسخه پیشتیبان در آدرسی دیگر</translation>
</message>
<message>
<location line="+2"/>
<source>Change the passphrase used for wallet encryption</source>
<translation>رمز مربوط به رمزگذاریِ wallet را تغییر دهید</translation>
</message>
<message>
<location line="+6"/>
<source>&Debug window</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Open debugging and diagnostic console</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-4"/>
<source>&Verify message...</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-165"/>
<location line="+530"/>
<source>Procoin</source>
<translation>procoin</translation>
</message>
<message>
<location line="-530"/>
<source>Wallet</source>
<translation>کیف پول</translation>
</message>
<message>
<location line="+101"/>
<source>&Send</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>&Receive</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+14"/>
<source>&Addresses</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+22"/>
<source>&About Procoin</source>
<translation>&در مورد بیتکویین</translation>
</message>
<message>
<location line="+9"/>
<source>&Show / Hide</source>
<translation>&نمایش/ عدم نمایش و</translation>
</message>
<message>
<location line="+1"/>
<source>Show or hide the main Window</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Encrypt the private keys that belong to your wallet</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>Sign messages with your Procoin addresses to prove you own them</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Verify messages to ensure they were signed with specified Procoin addresses</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+28"/>
<source>&File</source>
<translation>و فایل</translation>
</message>
<message>
<location line="+7"/>
<source>&Settings</source>
<translation>و تنظیمات</translation>
</message>
<message>
<location line="+6"/>
<source>&Help</source>
<translation>و راهنما</translation>
</message>
<message>
<location line="+9"/>
<source>Tabs toolbar</source>
<translation>نوار ابزار</translation>
</message>
<message>
<location line="+17"/>
<location line="+10"/>
<source>[testnet]</source>
<translation>[testnet]</translation>
</message>
<message>
<location line="+47"/>
<source>Procoin client</source>
<translation>مشتری procoin</translation>
</message>
<message numerus="yes">
<location line="+141"/>
<source>%n active connection(s) to Procoin network</source>
<translation><numerusform>%n ارتباط فعال به شبکه Procoin
%n ارتباط فعال به شبکه Procoin</numerusform></translation>
</message>
<message>
<location line="+22"/>
<source>No block source available...</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+12"/>
<source>Processed %1 of %2 (estimated) blocks of transaction history.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+4"/>
<source>Processed %1 blocks of transaction history.</source>
<translation type="unfinished"/>
</message>
<message numerus="yes">
<location line="+20"/>
<source>%n hour(s)</source>
<translation type="unfinished"><numerusform></numerusform></translation>
</message>
<message numerus="yes">
<location line="+4"/>
<source>%n day(s)</source>
<translation type="unfinished"><numerusform></numerusform></translation>
</message>
<message numerus="yes">
<location line="+4"/>
<source>%n week(s)</source>
<translation type="unfinished"><numerusform></numerusform></translation>
</message>
<message>
<location line="+4"/>
<source>%1 behind</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+14"/>
<source>Last received block was generated %1 ago.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Transactions after this will not yet be visible.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+22"/>
<source>Error</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Warning</source><|fim▁hole|> <translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Information</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+70"/>
<source>This transaction is over the size limit. You can still send it for a fee of %1, which goes to the nodes that process your transaction and helps to support the network. Do you want to pay the fee?</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-140"/>
<source>Up to date</source>
<translation>روزآمد</translation>
</message>
<message>
<location line="+31"/>
<source>Catching up...</source>
<translation>در حال روزآمد سازی..</translation>
</message>
<message>
<location line="+113"/>
<source>Confirm transaction fee</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+8"/>
<source>Sent transaction</source>
<translation>ارسال تراکنش</translation>
</message>
<message>
<location line="+0"/>
<source>Incoming transaction</source>
<translation>تراکنش دریافتی</translation>
</message>
<message>
<location line="+1"/>
<source>Date: %1
Amount: %2
Type: %3
Address: %4
</source>
<translation>تاریخ: %1⏎ میزان وجه : %2⏎ نوع: %3⏎ آدرس: %4⏎
</translation>
</message>
<message>
<location line="+33"/>
<location line="+23"/>
<source>URI handling</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-23"/>
<location line="+23"/>
<source>URI can not be parsed! This can be caused by an invalid Procoin address or malformed URI parameters.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+17"/>
<source>Wallet is <b>encrypted</b> and currently <b>unlocked</b></source>
<translation>wallet رمزگذاری شد و در حال حاضر از حالت قفل در آمده است</translation>
</message>
<message>
<location line="+8"/>
<source>Wallet is <b>encrypted</b> and currently <b>locked</b></source>
<translation>wallet رمزگذاری شد و در حال حاضر قفل است</translation>
</message>
<message>
<location filename="../bitcoin.cpp" line="+111"/>
<source>A fatal error occurred. Procoin can no longer continue safely and will quit.</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>ClientModel</name>
<message>
<location filename="../clientmodel.cpp" line="+104"/>
<source>Network Alert</source>
<translation>هشدار شبکه</translation>
</message>
</context>
<context>
<name>EditAddressDialog</name>
<message>
<location filename="../forms/editaddressdialog.ui" line="+14"/>
<source>Edit Address</source>
<translation>ویرایش آدرسها</translation>
</message>
<message>
<location line="+11"/>
<source>&Label</source>
<translation>و برچسب</translation>
</message>
<message>
<location line="+10"/>
<source>The label associated with this address book entry</source>
<translation>برچسب مربوط به این دفترچه آدرس</translation>
</message>
<message>
<location line="+7"/>
<source>&Address</source>
<translation>و آدرس</translation>
</message>
<message>
<location line="+10"/>
<source>The address associated with this address book entry. This can only be modified for sending addresses.</source>
<translation>برچسب مربوط به این دفترچه آدرس و تنها ب</translation>
</message>
<message>
<location filename="../editaddressdialog.cpp" line="+21"/>
<source>New receiving address</source>
<translation>آدرسِ دریافت کننده جدید</translation>
</message>
<message>
<location line="+4"/>
<source>New sending address</source>
<translation>آدرس ارسال کننده جدید</translation>
</message>
<message>
<location line="+3"/>
<source>Edit receiving address</source>
<translation>ویرایش آدرسِ دریافت کننده</translation>
</message>
<message>
<location line="+4"/>
<source>Edit sending address</source>
<translation>ویرایش آدرسِ ارسال کننده</translation>
</message>
<message>
<location line="+76"/>
<source>The entered address "%1" is already in the address book.</source>
<translation>آدرس وارد شده %1 قبلا به فهرست آدرسها اضافه شده بوده است.</translation>
</message>
<message>
<location line="-5"/>
<source>The entered address "%1" is not a valid Procoin address.</source>
<translation>آدرس وارد شده "%1" یک آدرس صحیح برای procoin نسشت</translation>
</message>
<message>
<location line="+10"/>
<source>Could not unlock wallet.</source>
<translation>عدم توانیی برای قفل گشایی wallet</translation>
</message>
<message>
<location line="+5"/>
<source>New key generation failed.</source>
<translation>عدم توانیی در ایجاد کلید جدید</translation>
</message>
</context>
<context>
<name>GUIUtil::HelpMessageBox</name>
<message>
<location filename="../guiutil.cpp" line="+424"/>
<location line="+12"/>
<source>Procoin-Qt</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-12"/>
<source>version</source>
<translation>نسخه</translation>
</message>
<message>
<location line="+2"/>
<source>Usage:</source>
<translation>میزان استفاده:</translation>
</message>
<message>
<location line="+1"/>
<source>command-line options</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+4"/>
<source>UI options</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Set language, for example "de_DE" (default: system locale)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Start minimized</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Show splash screen on startup (default: 1)</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>OptionsDialog</name>
<message>
<location filename="../forms/optionsdialog.ui" line="+14"/>
<source>Options</source>
<translation>انتخاب/آپشن</translation>
</message>
<message>
<location line="+16"/>
<source>&Main</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+6"/>
<source>Optional transaction fee per kB that helps make sure your transactions are processed quickly. Most transactions are 1 kB.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+15"/>
<source>Pay transaction &fee</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+31"/>
<source>Automatically start Procoin after logging in to the system.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>&Start Procoin on system login</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+35"/>
<source>Reset all client options to default.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>&Reset Options</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+13"/>
<source>&Network</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+6"/>
<source>Automatically open the Procoin client port on the router. This only works when your router supports UPnP and it is enabled.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Map port using &UPnP</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>Connect to the Procoin network through a SOCKS proxy (e.g. when connecting through Tor).</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>&Connect through SOCKS proxy:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+9"/>
<source>Proxy &IP:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+19"/>
<source>IP address of the proxy (e.g. 127.0.0.1)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>&Port:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+19"/>
<source>Port of the proxy (e.g. 9050)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>SOCKS &Version:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+13"/>
<source>SOCKS version of the proxy (e.g. 5)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+36"/>
<source>&Window</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+6"/>
<source>Show only a tray icon after minimizing the window.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>&Minimize to the tray instead of the taskbar</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>Minimize instead of exit the application when the window is closed. When this option is enabled, the application will be closed only after selecting Quit in the menu.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>M&inimize on close</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+21"/>
<source>&Display</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+8"/>
<source>User Interface &language:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+13"/>
<source>The user interface language can be set here. This setting will take effect after restarting Procoin.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+11"/>
<source>&Unit to show amounts in:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+13"/>
<source>Choose the default subdivision unit to show in the interface and when sending coins.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+9"/>
<source>Whether to show Procoin addresses in the transaction list or not.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>&Display addresses in transaction list</source>
<translation>و نمایش آدرسها در فهرست تراکنش</translation>
</message>
<message>
<location line="+71"/>
<source>&OK</source>
<translation>و تایید</translation>
</message>
<message>
<location line="+7"/>
<source>&Cancel</source>
<translation>و رد</translation>
</message>
<message>
<location line="+10"/>
<source>&Apply</source>
<translation>و به کار گرفتن</translation>
</message>
<message>
<location filename="../optionsdialog.cpp" line="+53"/>
<source>default</source>
<translation>پیش فرض</translation>
</message>
<message>
<location line="+130"/>
<source>Confirm options reset</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Some settings may require a client restart to take effect.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<source>Do you want to proceed?</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+42"/>
<location line="+9"/>
<source>Warning</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-9"/>
<location line="+9"/>
<source>This setting will take effect after restarting Procoin.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+29"/>
<source>The supplied proxy address is invalid.</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>OverviewPage</name>
<message>
<location filename="../forms/overviewpage.ui" line="+14"/>
<source>Form</source>
<translation>فرم</translation>
</message>
<message>
<location line="+50"/>
<location line="+166"/>
<source>The displayed information may be out of date. Your wallet automatically synchronizes with the Procoin network after a connection is established, but this process has not completed yet.</source>
<translation>اطلاعات نمایش داده شده ممکن است روزآمد نباشد. wallet شما به صورت خودکار بعد از برقراری اتصال با شبکه procoin به روز می شود اما این فرایند هنوز تکمیل نشده است.</translation>
</message>
<message>
<location line="-124"/>
<source>Balance:</source>
<translation>مانده حساب:</translation>
</message>
<message>
<location line="+29"/>
<source>Unconfirmed:</source>
<translation>تایید نشده</translation>
</message>
<message>
<location line="-78"/>
<source>Wallet</source>
<translation>کیف پول</translation>
</message>
<message>
<location line="+107"/>
<source>Immature:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+13"/>
<source>Mined balance that has not yet matured</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+46"/>
<source><b>Recent transactions</b></source>
<translation>تراکنشهای اخیر</translation>
</message>
<message>
<location line="-101"/>
<source>Your current balance</source>
<translation>مانده حساب جاری</translation>
</message>
<message>
<location line="+29"/>
<source>Total of transactions that have yet to be confirmed, and do not yet count toward the current balance</source>
<translation>تعداد تراکنشهایی که نیاز به تایید دارند و هنوز در مانده حساب جاری شما به حساب نیامده اند</translation>
</message>
<message>
<location filename="../overviewpage.cpp" line="+116"/>
<location line="+1"/>
<source>out of sync</source>
<translation>خارج از روزآمد سازی</translation>
</message>
</context>
<context>
<name>PaymentServer</name>
<message>
<location filename="../paymentserver.cpp" line="+107"/>
<source>Cannot start procoin: click-to-pay handler</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>QRCodeDialog</name>
<message>
<location filename="../forms/qrcodedialog.ui" line="+14"/>
<source>QR Code Dialog</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+59"/>
<source>Request Payment</source>
<translation>درخواست وجه</translation>
</message>
<message>
<location line="+56"/>
<source>Amount:</source>
<translation>میزان وجه:</translation>
</message>
<message>
<location line="-44"/>
<source>Label:</source>
<translation>برچسب:</translation>
</message>
<message>
<location line="+19"/>
<source>Message:</source>
<translation>پیام:</translation>
</message>
<message>
<location line="+71"/>
<source>&Save As...</source>
<translation>و ذخیره با عنوانِ...</translation>
</message>
<message>
<location filename="../qrcodedialog.cpp" line="+62"/>
<source>Error encoding URI into QR Code.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+40"/>
<source>The entered amount is invalid, please check.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+23"/>
<source>Resulting URI too long, try to reduce the text for label / message.</source>
<translation>متن وارد شده طولانی است، متنِ برچسب/پیام را کوتاه کنید</translation>
</message>
<message>
<location line="+25"/>
<source>Save QR Code</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<source>PNG Images (*.png)</source>
<translation>تصاویر با فرمت PNG
(*.png)</translation>
</message>
</context>
<context>
<name>RPCConsole</name>
<message>
<location filename="../forms/rpcconsole.ui" line="+46"/>
<source>Client name</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+10"/>
<location line="+23"/>
<location line="+26"/>
<location line="+23"/>
<location line="+23"/>
<location line="+36"/>
<location line="+53"/>
<location line="+23"/>
<location line="+23"/>
<location filename="../rpcconsole.cpp" line="+339"/>
<source>N/A</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-217"/>
<source>Client version</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-45"/>
<source>&Information</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+68"/>
<source>Using OpenSSL version</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+49"/>
<source>Startup time</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+29"/>
<source>Network</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>Number of connections</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+23"/>
<source>On testnet</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+23"/>
<source>Block chain</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>Current number of blocks</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+23"/>
<source>Estimated total blocks</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+23"/>
<source>Last block time</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+52"/>
<source>&Open</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+16"/>
<source>Command-line options</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>Show the Procoin-Qt help message to get a list with possible Procoin command-line options.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>&Show</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+24"/>
<source>&Console</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-260"/>
<source>Build date</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-104"/>
<source>Procoin - Debug window</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+25"/>
<source>Procoin Core</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+279"/>
<source>Debug log file</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>Open the Procoin debug log file from the current data directory. This can take a few seconds for large log files.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+102"/>
<source>Clear console</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../rpcconsole.cpp" line="-30"/>
<source>Welcome to the Procoin RPC console.</source>
<translation>به کنسول آر.پی.سی. PROCOIN خوش آمدید</translation>
</message>
<message>
<location line="+1"/>
<source>Use up and down arrows to navigate history, and <b>Ctrl-L</b> to clear screen.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Type <b>help</b> for an overview of available commands.</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>SendCoinsDialog</name>
<message>
<location filename="../forms/sendcoinsdialog.ui" line="+14"/>
<location filename="../sendcoinsdialog.cpp" line="+124"/>
<location line="+5"/>
<location line="+5"/>
<location line="+5"/>
<location line="+6"/>
<location line="+5"/>
<location line="+5"/>
<source>Send Coins</source>
<translation>سکه های ارسالی</translation>
</message>
<message>
<location line="+50"/>
<source>Send to multiple recipients at once</source>
<translation>ارسال همزمان به گیرنده های متعدد</translation>
</message>
<message>
<location line="+3"/>
<source>Add &Recipient</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+20"/>
<source>Remove all transaction fields</source>
<translation>تمامی فیلدهای تراکنش حذف شوند</translation>
</message>
<message>
<location line="+3"/>
<source>Clear &All</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+22"/>
<source>Balance:</source>
<translation>مانده حساب:</translation>
</message>
<message>
<location line="+10"/>
<source>123.456 BTC</source>
<translation>123.456 BTC</translation>
</message>
<message>
<location line="+31"/>
<source>Confirm the send action</source>
<translation>تایید عملیات ارسال </translation>
</message>
<message>
<location line="+3"/>
<source>S&end</source>
<translation>و ارسال</translation>
</message>
<message>
<location filename="../sendcoinsdialog.cpp" line="-59"/>
<source><b>%1</b> to %2 (%3)</source>
<translation>%1 به %2 (%3)</translation>
</message>
<message>
<location line="+5"/>
<source>Confirm send coins</source>
<translation>تایید ارسال سکه ها</translation>
</message>
<message>
<location line="+1"/>
<source>Are you sure you want to send %1?</source>
<translation>شما مطمئن هستید که می خواهید %1 را ارسال کنید؟</translation>
</message>
<message>
<location line="+0"/>
<source> and </source>
<translation>و</translation>
</message>
<message>
<location line="+23"/>
<source>The recipient address is not valid, please recheck.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>The amount to pay must be larger than 0.</source>
<translation>میزان پرداخت باید بیشتر از 0 باشد</translation>
</message>
<message>
<location line="+5"/>
<source>The amount exceeds your balance.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>The total exceeds your balance when the %1 transaction fee is included.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+6"/>
<source>Duplicate address found, can only send to each address once per send operation.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>Error: Transaction creation failed!</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>Error: The transaction was rejected. This might happen if some of the coins in your wallet were already spent, such as if you used a copy of wallet.dat and coins were spent in the copy but not marked as spent here.</source>
<translation>خطا: تراکنش تایید نشد. این خطا ممکن است به این دلیل اتفاق بیافتد که سکه های wallet شما خرج شده باشند مثلا اگر wallet.dat را مپی کرده باشید و سکه های شما در آن کپی استفاده شده باشند اما در اینجا نمایش داده نشده اند.</translation>
</message>
</context>
<context>
<name>SendCoinsEntry</name>
<message>
<location filename="../forms/sendcoinsentry.ui" line="+14"/>
<source>Form</source>
<translation>فرم</translation>
</message>
<message>
<location line="+15"/>
<source>A&mount:</source>
<translation>و میزان وجه</translation>
</message>
<message>
<location line="+13"/>
<source>Pay &To:</source>
<translation>پرداخت و به چه کسی</translation>
</message>
<message>
<location line="+34"/>
<source>The address to send the payment to (e.g. Ler4HNAEfwYhBmGXcFP2Po1NpRUEiK8km2)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+60"/>
<location filename="../sendcoinsentry.cpp" line="+26"/>
<source>Enter a label for this address to add it to your address book</source>
<translation>یک برچسب برای این آدرس بنویسید تا به دفترچه آدرسهای شما اضافه شود</translation>
</message>
<message>
<location line="-78"/>
<source>&Label:</source>
<translation>و برچسب</translation>
</message>
<message>
<location line="+28"/>
<source>Choose address from address book</source>
<translation>آدرس از فهرست آدرس انتخاب کنید</translation>
</message>
<message>
<location line="+10"/>
<source>Alt+A</source>
<translation>Alt و A</translation>
</message>
<message>
<location line="+7"/>
<source>Paste address from clipboard</source>
<translation>آدرس را بر کلیپ بورد کپی کنید</translation>
</message>
<message>
<location line="+10"/>
<source>Alt+P</source>
<translation>Alt و P</translation>
</message>
<message>
<location line="+7"/>
<source>Remove this recipient</source>
<translation>این گیرنده را حذف کن</translation>
</message>
<message>
<location filename="../sendcoinsentry.cpp" line="+1"/>
<source>Enter a Procoin address (e.g. Ler4HNAEfwYhBmGXcFP2Po1NpRUEiK8km2)</source>
<translation>یک آدرس procoin وارد کنید (مثال Ler4HNAEfwYhBmGXcFP2Po1NpRUEiK8km2)</translation>
</message>
</context>
<context>
<name>SignVerifyMessageDialog</name>
<message>
<location filename="../forms/signverifymessagedialog.ui" line="+14"/>
<source>Signatures - Sign / Verify a Message</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+13"/>
<source>&Sign Message</source>
<translation>و امضای پیام </translation>
</message>
<message>
<location line="+6"/>
<source>You can sign messages with your addresses to prove you own them. Be careful not to sign anything vague, as phishing attacks may try to trick you into signing your identity over to them. Only sign fully-detailed statements you agree to.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+18"/>
<source>The address to sign the message with (e.g. Ler4HNAEfwYhBmGXcFP2Po1NpRUEiK8km2)</source>
<translation>یک آدرس procoin وارد کنید (مثال Ler4HNAEfwYhBmGXcFP2Po1NpRUEiK8km2)</translation>
</message>
<message>
<location line="+10"/>
<location line="+213"/>
<source>Choose an address from the address book</source>
<translation>آدرس از فهرست آدرس انتخاب کنید</translation>
</message>
<message>
<location line="-203"/>
<location line="+213"/>
<source>Alt+A</source>
<translation>Alt و A</translation>
</message>
<message>
<location line="-203"/>
<source>Paste address from clipboard</source>
<translation>آدرس را بر کلیپ بورد کپی کنید</translation>
</message>
<message>
<location line="+10"/>
<source>Alt+P</source>
<translation>Alt و P</translation>
</message>
<message>
<location line="+12"/>
<source>Enter the message you want to sign here</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>Signature</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+27"/>
<source>Copy the current signature to the system clipboard</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+21"/>
<source>Sign the message to prove you own this Procoin address</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Sign &Message</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+14"/>
<source>Reset all sign message fields</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<location line="+146"/>
<source>Clear &All</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-87"/>
<source>&Verify Message</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+6"/>
<source>Enter the signing address, message (ensure you copy line breaks, spaces, tabs, etc. exactly) and signature below to verify the message. Be careful not to read more into the signature than what is in the signed message itself, to avoid being tricked by a man-in-the-middle attack.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+21"/>
<source>The address the message was signed with (e.g. Ler4HNAEfwYhBmGXcFP2Po1NpRUEiK8km2)</source>
<translation>یک آدرس procoin وارد کنید (مثال Ler4HNAEfwYhBmGXcFP2Po1NpRUEiK8km2)</translation>
</message>
<message>
<location line="+40"/>
<source>Verify the message to ensure it was signed with the specified Procoin address</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Verify &Message</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+14"/>
<source>Reset all verify message fields</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../signverifymessagedialog.cpp" line="+27"/>
<location line="+3"/>
<source>Enter a Procoin address (e.g. Ler4HNAEfwYhBmGXcFP2Po1NpRUEiK8km2)</source>
<translation>یک آدرس procoin وارد کنید (مثال Ler4HNAEfwYhBmGXcFP2Po1NpRUEiK8km2)</translation>
</message>
<message>
<location line="-2"/>
<source>Click "Sign Message" to generate signature</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Enter Procoin signature</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+82"/>
<location line="+81"/>
<source>The entered address is invalid.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-81"/>
<location line="+8"/>
<location line="+73"/>
<location line="+8"/>
<source>Please check the address and try again.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-81"/>
<location line="+81"/>
<source>The entered address does not refer to a key.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-73"/>
<source>Wallet unlock was cancelled.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+8"/>
<source>Private key for the entered address is not available.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+12"/>
<source>Message signing failed.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>Message signed.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+59"/>
<source>The signature could not be decoded.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<location line="+13"/>
<source>Please check the signature and try again.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<source>The signature did not match the message digest.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>Message verification failed.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>Message verified.</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>SplashScreen</name>
<message>
<location filename="../splashscreen.cpp" line="+22"/>
<source>The Procoin developers</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>[testnet]</source>
<translation>[testnet]</translation>
</message>
</context>
<context>
<name>TransactionDesc</name>
<message>
<location filename="../transactiondesc.cpp" line="+20"/>
<source>Open until %1</source>
<translation>باز کن تا %1</translation>
</message>
<message>
<location line="+6"/>
<source>%1/offline</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>%1/unconfirmed</source>
<translation>%1 غیرقابل تایید</translation>
</message>
<message>
<location line="+2"/>
<source>%1 confirmations</source>
<translation>%1 تاییدها</translation>
</message>
<message>
<location line="+18"/>
<source>Status</source>
<translation type="unfinished"/>
</message>
<message numerus="yes">
<location line="+7"/>
<source>, broadcast through %n node(s)</source>
<translation type="unfinished"><numerusform></numerusform></translation>
</message>
<message>
<location line="+4"/>
<source>Date</source>
<translation>تاریخ</translation>
</message>
<message>
<location line="+7"/>
<source>Source</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<source>Generated</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<location line="+17"/>
<source>From</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<location line="+22"/>
<location line="+58"/>
<source>To</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-77"/>
<location line="+2"/>
<source>own address</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-2"/>
<source>label</source>
<translation>برچسب</translation>
</message>
<message>
<location line="+37"/>
<location line="+12"/>
<location line="+45"/>
<location line="+17"/>
<location line="+30"/>
<source>Credit</source>
<translation type="unfinished"/>
</message>
<message numerus="yes">
<location line="-102"/>
<source>matures in %n more block(s)</source>
<translation type="unfinished"><numerusform></numerusform></translation>
</message>
<message>
<location line="+2"/>
<source>not accepted</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+44"/>
<location line="+8"/>
<location line="+15"/>
<location line="+30"/>
<source>Debit</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-39"/>
<source>Transaction fee</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+16"/>
<source>Net amount</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+6"/>
<source>Message</source>
<translation>پیام</translation>
</message>
<message>
<location line="+2"/>
<source>Comment</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Transaction ID</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Generated coins must mature 120 blocks before they can be spent. When you generated this block, it was broadcast to the network to be added to the block chain. If it fails to get into the chain, its state will change to "not accepted" and it won't be spendable. This may occasionally happen if another node generates a block within a few seconds of yours.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>Debug information</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+8"/>
<source>Transaction</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Inputs</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+23"/>
<source>Amount</source>
<translation>میزان</translation>
</message>
<message>
<location line="+1"/>
<source>true</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<source>false</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-209"/>
<source>, has not been successfully broadcast yet</source>
<translation>تا به حال با موفقیت انتشار نیافته است</translation>
</message>
<message numerus="yes">
<location line="-35"/>
<source>Open for %n more block(s)</source>
<translation type="unfinished"><numerusform></numerusform></translation>
</message>
<message>
<location line="+70"/>
<source>unknown</source>
<translation>ناشناس</translation>
</message>
</context>
<context>
<name>TransactionDescDialog</name>
<message>
<location filename="../forms/transactiondescdialog.ui" line="+14"/>
<source>Transaction details</source>
<translation>جزئیات تراکنش</translation>
</message>
<message>
<location line="+6"/>
<source>This pane shows a detailed description of the transaction</source>
<translation>این بخش جزئیات تراکنش را نشان می دهد</translation>
</message>
</context>
<context>
<name>TransactionTableModel</name>
<message>
<location filename="../transactiontablemodel.cpp" line="+225"/>
<source>Date</source>
<translation>تاریخ</translation>
</message>
<message>
<location line="+0"/>
<source>Type</source>
<translation>نوع</translation>
</message>
<message>
<location line="+0"/>
<source>Address</source>
<translation>آدرس</translation>
</message>
<message>
<location line="+0"/>
<source>Amount</source>
<translation>میزان وجه</translation>
</message>
<message numerus="yes">
<location line="+57"/>
<source>Open for %n more block(s)</source>
<translation type="unfinished"><numerusform></numerusform></translation>
</message>
<message>
<location line="+3"/>
<source>Open until %1</source>
<translation>باز کن تا %1</translation>
</message>
<message>
<location line="+3"/>
<source>Offline (%1 confirmations)</source>
<translation>برون خطی (%1 تاییدها)</translation>
</message>
<message>
<location line="+3"/>
<source>Unconfirmed (%1 of %2 confirmations)</source>
<translation>تایید نشده (%1 از %2 تاییدها)</translation>
</message>
<message>
<location line="+3"/>
<source>Confirmed (%1 confirmations)</source>
<translation>تایید شده (%1 تاییدها)</translation>
</message>
<message numerus="yes">
<location line="+8"/>
<source>Mined balance will be available when it matures in %n more block(s)</source>
<translation type="unfinished"><numerusform></numerusform></translation>
</message>
<message>
<location line="+5"/>
<source>This block was not received by any other nodes and will probably not be accepted!</source>
<translation>این block توسط گره های دیگری دریافت نشده است و ممکن است قبول نشود</translation>
</message>
<message>
<location line="+3"/>
<source>Generated but not accepted</source>
<translation>تولید شده اما قبول نشده است</translation>
</message>
<message>
<location line="+43"/>
<source>Received with</source>
<translation>قبول با </translation>
</message>
<message>
<location line="+2"/>
<source>Received from</source>
<translation>دریافت شده از</translation>
</message>
<message>
<location line="+3"/>
<source>Sent to</source>
<translation>ارسال به</translation>
</message>
<message>
<location line="+2"/>
<source>Payment to yourself</source>
<translation>وجه برای شما </translation>
</message>
<message>
<location line="+2"/>
<source>Mined</source>
<translation>استخراج شده</translation>
</message>
<message>
<location line="+38"/>
<source>(n/a)</source>
<translation>خالی</translation>
</message>
<message>
<location line="+199"/>
<source>Transaction status. Hover over this field to show number of confirmations.</source>
<translation>وضعیت تراکنش. با اشاره به این بخش تعداد تاییدها نمایش داده می شود</translation>
</message>
<message>
<location line="+2"/>
<source>Date and time that the transaction was received.</source>
<translation>زمان و تاریخی که تراکنش دریافت شده است</translation>
</message>
<message>
<location line="+2"/>
<source>Type of transaction.</source>
<translation>نوع تراکنش</translation>
</message>
<message>
<location line="+2"/>
<source>Destination address of transaction.</source>
<translation>آدرس مقصد در تراکنش</translation>
</message>
<message>
<location line="+2"/>
<source>Amount removed from or added to balance.</source>
<translation>میزان وجه کم شده یا اضافه شده به حساب</translation>
</message>
</context>
<context>
<name>TransactionView</name>
<message>
<location filename="../transactionview.cpp" line="+52"/>
<location line="+16"/>
<source>All</source>
<translation>همه</translation>
</message>
<message>
<location line="-15"/>
<source>Today</source>
<translation>امروز</translation>
</message>
<message>
<location line="+1"/>
<source>This week</source>
<translation>این هفته</translation>
</message>
<message>
<location line="+1"/>
<source>This month</source>
<translation>این ماه</translation>
</message>
<message>
<location line="+1"/>
<source>Last month</source>
<translation>ماه گذشته</translation>
</message>
<message>
<location line="+1"/>
<source>This year</source>
<translation>این سال</translation>
</message>
<message>
<location line="+1"/>
<source>Range...</source>
<translation>حدود..</translation>
</message>
<message>
<location line="+11"/>
<source>Received with</source>
<translation>دریافت با</translation>
</message>
<message>
<location line="+2"/>
<source>Sent to</source>
<translation>ارسال به</translation>
</message>
<message>
<location line="+2"/>
<source>To yourself</source>
<translation>به شما</translation>
</message>
<message>
<location line="+1"/>
<source>Mined</source>
<translation>استخراج شده</translation>
</message>
<message>
<location line="+1"/>
<source>Other</source>
<translation>دیگر</translation>
</message>
<message>
<location line="+7"/>
<source>Enter address or label to search</source>
<translation>آدرس یا برچسب را برای جستجو وارد کنید</translation>
</message>
<message>
<location line="+7"/>
<source>Min amount</source>
<translation>حداقل میزان وجه</translation>
</message>
<message>
<location line="+34"/>
<source>Copy address</source>
<translation>آدرس را کپی کنید</translation>
</message>
<message>
<location line="+1"/>
<source>Copy label</source>
<translation>برچسب را کپی کنید</translation>
</message>
<message>
<location line="+1"/>
<source>Copy amount</source>
<translation>میزان وجه کپی شود</translation>
</message>
<message>
<location line="+1"/>
<source>Copy transaction ID</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Edit label</source>
<translation>برچسب را ویرایش کنید</translation>
</message>
<message>
<location line="+1"/>
<source>Show transaction details</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+139"/>
<source>Export Transaction Data</source>
<translation>داده های تراکنش را صادر کنید</translation>
</message>
<message>
<location line="+1"/>
<source>Comma separated file (*.csv)</source>
<translation>Comma separated file (*.csv) فایل جداگانه دستوری</translation>
</message>
<message>
<location line="+8"/>
<source>Confirmed</source>
<translation>تایید شده</translation>
</message>
<message>
<location line="+1"/>
<source>Date</source>
<translation>تاریخ</translation>
</message>
<message>
<location line="+1"/>
<source>Type</source>
<translation>نوع</translation>
</message>
<message>
<location line="+1"/>
<source>Label</source>
<translation>برچسب</translation>
</message>
<message>
<location line="+1"/>
<source>Address</source>
<translation>آدرس</translation>
</message>
<message>
<location line="+1"/>
<source>Amount</source>
<translation>میزان</translation>
</message>
<message>
<location line="+1"/>
<source>ID</source>
<translation>شناسه کاربری</translation>
</message>
<message>
<location line="+4"/>
<source>Error exporting</source>
<translation>خطا در ارسال</translation>
</message>
<message>
<location line="+0"/>
<source>Could not write to file %1.</source>
<translation>قابل کپی به فایل نیست %1.</translation>
</message>
<message>
<location line="+100"/>
<source>Range:</source>
<translation>دامنه:</translation>
</message>
<message>
<location line="+8"/>
<source>to</source>
<translation>به</translation>
</message>
</context>
<context>
<name>WalletModel</name>
<message>
<location filename="../walletmodel.cpp" line="+193"/>
<source>Send Coins</source>
<translation>سکه های ارسالی</translation>
</message>
</context>
<context>
<name>WalletView</name>
<message>
<location filename="../walletview.cpp" line="+42"/>
<source>&Export</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Export the data in the current tab to a file</source>
<translation>صدور داده نوار جاری به یک فایل</translation>
</message>
<message>
<location line="+193"/>
<source>Backup Wallet</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<source>Wallet Data (*.dat)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Backup Failed</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<source>There was an error trying to save the wallet data to the new location.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+4"/>
<source>Backup Successful</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<source>The wallet data was successfully saved to the new location.</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>bitcoin-core</name>
<message>
<location filename="../bitcoinstrings.cpp" line="+94"/>
<source>Procoin version</source>
<translation>نسخه procoin</translation>
</message>
<message>
<location line="+102"/>
<source>Usage:</source>
<translation>میزان استفاده:</translation>
</message>
<message>
<location line="-29"/>
<source>Send command to -server or procoind</source>
<translation>ارسال دستور به سرور یا procoined</translation>
</message>
<message>
<location line="-23"/>
<source>List commands</source>
<translation>فهرست دستورها</translation>
</message>
<message>
<location line="-12"/>
<source>Get help for a command</source>
<translation>درخواست کمک برای یک دستور</translation>
</message>
<message>
<location line="+24"/>
<source>Options:</source>
<translation>انتخابها:</translation>
</message>
<message>
<location line="+24"/>
<source>Specify configuration file (default: procoin.conf)</source>
<translation>فایل پیکربندیِ را مشخص کنید (پیش فرض: procoin.conf)</translation>
</message>
<message>
<location line="+3"/>
<source>Specify pid file (default: procoind.pid)</source>
<translation>فایل pid را مشخص کنید (پیش فرض: procoind.pid)</translation>
</message>
<message>
<location line="-1"/>
<source>Specify data directory</source>
<translation>دایرکتوری داده را مشخص کن</translation>
</message>
<message>
<location line="-9"/>
<source>Set database cache size in megabytes (default: 25)</source>
<translation>حافظه بانک داده را به مگابایت تنظیم کنید (پیش فرض: 25)</translation>
</message>
<message>
<location line="-28"/>
<source>Listen for connections on <port> (default: 9333 or testnet: 19333)</source>
<translation>ارتباطات را در <PORT> بشنوید (پیش فرض: 9333 or testnet: 19333)</translation>
</message>
<message>
<location line="+5"/>
<source>Maintain at most <n> connections to peers (default: 125)</source>
<translation>نگهداری <N> ارتباطات برای قرینه سازی (پیش فرض:125)</translation>
</message>
<message>
<location line="-48"/>
<source>Connect to a node to retrieve peer addresses, and disconnect</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+82"/>
<source>Specify your own public address</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Threshold for disconnecting misbehaving peers (default: 100)</source>
<translation>آستانه قطع برای قرینه سازی اشتباه (پیش فرض:100)</translation>
</message>
<message>
<location line="-134"/>
<source>Number of seconds to keep misbehaving peers from reconnecting (default: 86400)</source>
<translation>تعداد ثانیه ها برای اتصال دوباره قرینه های اشتباه (پیش فرض:86400)</translation>
</message>
<message>
<location line="-29"/>
<source>An error occurred while setting up the RPC port %u for listening on IPv4: %s</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+27"/>
<source>Listen for JSON-RPC connections on <port> (default: 9332 or testnet: 19332)</source>
<translation>ارتباطاتِ JSON-RPC را در <port> گوش کنید (پیش فرض:9332)</translation>
</message>
<message>
<location line="+37"/>
<source>Accept command line and JSON-RPC commands</source>
<translation>command line و JSON-RPC commands را قبول کنید</translation>
</message>
<message>
<location line="+76"/>
<source>Run in the background as a daemon and accept commands</source>
<translation>به عنوان daemon بک گراند را اجرا کنید و دستورات را قبول نمایید</translation>
</message>
<message>
<location line="+37"/>
<source>Use the test network</source>
<translation>از تستِ شبکه استفاده نمایید</translation>
</message>
<message>
<location line="-112"/>
<source>Accept connections from outside (default: 1 if no -proxy or -connect)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-80"/>
<source>%s, you must set a rpcpassword in the configuration file:
%s
It is recommended you use the following random password:
rpcuser=procoinrpc
rpcpassword=%s
(you do not need to remember this password)
The username and password MUST NOT be the same.
If the file does not exist, create it with owner-readable-only file permissions.
It is also recommended to set alertnotify so you are notified of problems;
for example: alertnotify=echo %%s | mail -s "Procoin Alert" [email protected]
</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+17"/>
<source>An error occurred while setting up the RPC port %u for listening on IPv6, falling back to IPv4: %s</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Bind to given address and always listen on it. Use [host]:port notation for IPv6</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Cannot obtain a lock on data directory %s. Procoin is probably already running.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Error: The transaction was rejected! This might happen if some of the coins in your wallet were already spent, such as if you used a copy of wallet.dat and coins were spent in the copy but not marked as spent here.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+4"/>
<source>Error: This transaction requires a transaction fee of at least %s because of its amount, complexity, or use of recently received funds!</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Execute command when a relevant alert is received (%s in cmd is replaced by message)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Execute command when a wallet transaction changes (%s in cmd is replaced by TxID)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+11"/>
<source>Set maximum size of high-priority/low-fee transactions in bytes (default: 27000)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+6"/>
<source>This is a pre-release test build - use at your own risk - do not use for mining or merchant applications</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>Warning: -paytxfee is set very high! This is the transaction fee you will pay if you send a transaction.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Warning: Displayed transactions may not be correct! You may need to upgrade, or other nodes may need to upgrade.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Warning: Please check that your computer's date and time are correct! If your clock is wrong Procoin will not work properly.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Warning: error reading wallet.dat! All keys read correctly, but transaction data or address book entries might be missing or incorrect.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Warning: wallet.dat corrupt, data salvaged! Original wallet.dat saved as wallet.{timestamp}.bak in %s; if your balance or transactions are incorrect you should restore from a backup.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+14"/>
<source>Attempt to recover private keys from a corrupt wallet.dat</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Block creation options:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>Connect only to the specified node(s)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Corrupted block database detected</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Discover own IP address (default: 1 when listening and no -externalip)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Do you want to rebuild the block database now?</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Error initializing block database</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Error initializing wallet database environment %s!</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Error loading block database</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+4"/>
<source>Error opening block database</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Error: Disk space is low!</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Error: Wallet locked, unable to create transaction!</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Error: system error: </source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Failed to listen on any port. Use -listen=0 if you want this.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Failed to read block info</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Failed to read block</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Failed to sync block index</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Failed to write block index</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Failed to write block info</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Failed to write block</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Failed to write file info</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Failed to write to coin database</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Failed to write transaction index</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Failed to write undo data</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Find peers using DNS lookup (default: 1 unless -connect)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Generate coins (default: 0)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>How many blocks to check at startup (default: 288, 0 = all)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>How thorough the block verification is (0-4, default: 3)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+19"/>
<source>Not enough file descriptors available.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+8"/>
<source>Rebuild block chain index from current blk000??.dat files</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+16"/>
<source>Set the number of threads to service RPC calls (default: 4)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+26"/>
<source>Verifying blocks...</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Verifying wallet...</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-69"/>
<source>Imports blocks from external blk000??.dat file</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-76"/>
<source>Set the number of script verification threads (up to 16, 0 = auto, <0 = leave that many cores free, default: 0)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+77"/>
<source>Information</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Invalid -tor address: '%s'</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Invalid amount for -minrelaytxfee=<amount>: '%s'</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Invalid amount for -mintxfee=<amount>: '%s'</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+8"/>
<source>Maintain a full transaction index (default: 0)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Maximum per-connection receive buffer, <n>*1000 bytes (default: 5000)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Maximum per-connection send buffer, <n>*1000 bytes (default: 1000)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Only accept block chain matching built-in checkpoints (default: 1)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Only connect to nodes in network <net> (IPv4, IPv6 or Tor)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Output extra debugging information. Implies all other -debug* options</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Output extra network debugging information</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Prepend debug output with timestamp</source>
<translation>برونداد اشکال زدایی با timestamp</translation>
</message>
<message>
<location line="+5"/>
<source>SSL options: (see the Procoin Wiki for SSL setup instructions)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Select the version of socks proxy to use (4-5, default: 5)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Send trace/debug info to console instead of debug.log file</source>
<translation>ارسال اطلاعات پیگیری/خطایابی به کنسول به جای ارسال به فایل debug.log</translation>
</message>
<message>
<location line="+1"/>
<source>Send trace/debug info to debugger</source>
<translation>ارسال اطاعات خطایابی/پیگیری به سیستم خطایاب</translation>
</message>
<message>
<location line="+5"/>
<source>Set maximum block size in bytes (default: 250000)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Set minimum block size in bytes (default: 0)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Shrink debug.log file on client startup (default: 1 when no -debug)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Signing transaction failed</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Specify connection timeout in milliseconds (default: 5000)</source>
<translation>تعیین مدت زمان وقفه (time out) به هزارم ثانیه</translation>
</message>
<message>
<location line="+4"/>
<source>System error: </source>
<translation type="unfinished"/>
</message>
<message>
<location line="+4"/>
<source>Transaction amount too small</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Transaction amounts must be positive</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Transaction too large</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>Use UPnP to map the listening port (default: 0)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Use UPnP to map the listening port (default: 1 when listening)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Use proxy to reach tor hidden services (default: same as -proxy)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Username for JSON-RPC connections</source>
<translation>شناسه کاربری برای ارتباطاتِ JSON-RPC</translation>
</message>
<message>
<location line="+4"/>
<source>Warning</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Warning: This version is obsolete, upgrade required!</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>You need to rebuild the databases using -reindex to change -txindex</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>wallet.dat corrupt, salvage failed</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-50"/>
<source>Password for JSON-RPC connections</source>
<translation>رمز برای ارتباطاتِ JSON-RPC</translation>
</message>
<message>
<location line="-67"/>
<source>Allow JSON-RPC connections from specified IP address</source>
<translation>ارتباطاتِ JSON-RPC را از آدرس آی.پی. مشخصی برقرار کنید.</translation>
</message>
<message>
<location line="+76"/>
<source>Send commands to node running on <ip> (default: 127.0.0.1)</source>
<translation>دستورات را به گره اجرا شده در<ip> ارسال کنید (پیش فرض:127.0.0.1)</translation>
</message>
<message>
<location line="-120"/>
<source>Execute command when the best block changes (%s in cmd is replaced by block hash)</source>
<translation>دستور را وقتی بهترین بلاک تغییر کرد اجرا کن (%s در دستور توسط block hash جایگزین شده است)</translation>
</message>
<message>
<location line="+147"/>
<source>Upgrade wallet to latest format</source>
<translation>wallet را به جدیدترین نسخه روزآمد کنید</translation>
</message>
<message>
<location line="-21"/>
<source>Set key pool size to <n> (default: 100)</source>
<translation>حجم key pool را به اندازه <n> تنظیم کنید (پیش فرض:100)</translation>
</message>
<message>
<location line="-12"/>
<source>Rescan the block chain for missing wallet transactions</source>
<translation>زنجیره بلاک را برای تراکنش جا افتاده در WALLET دوباره اسکن کنید</translation>
</message>
<message>
<location line="+35"/>
<source>Use OpenSSL (https) for JSON-RPC connections</source>
<translation>برای ارتباطاتِ JSON-RPC از OpenSSL (https) استفاده کنید</translation>
</message>
<message>
<location line="-26"/>
<source>Server certificate file (default: server.cert)</source>
<translation>فایل certificate سرور (پیش فرض server.cert)</translation>
</message>
<message>
<location line="+1"/>
<source>Server private key (default: server.pem)</source>
<translation>رمز اختصاصی سرور (پیش فرض: server.pem)</translation>
</message>
<message>
<location line="-151"/>
<source>Acceptable ciphers (default: TLSv1+HIGH:!SSLv2:!aNULL:!eNULL:!AH:!3DES:@STRENGTH)</source>
<translation>ciphers قابل قبول (پیش فرض: default: TLSv1+HIGH:!SSLv2:!aNULL:!eNULL:!AH:!3DES:@STRENGTH)</translation>
</message>
<message>
<location line="+165"/>
<source>This help message</source>
<translation>این پیام راهنما</translation>
</message>
<message>
<location line="+6"/>
<source>Unable to bind to %s on this computer (bind returned error %d, %s)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-91"/>
<source>Connect through socks proxy</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-10"/>
<source>Allow DNS lookups for -addnode, -seednode and -connect</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+55"/>
<source>Loading addresses...</source>
<translation>لود شدن آدرسها..</translation>
</message>
<message>
<location line="-35"/>
<source>Error loading wallet.dat: Wallet corrupted</source>
<translation>خطا در هنگام لود شدن wallet.dat: Wallet corrupted</translation>
</message>
<message>
<location line="+1"/>
<source>Error loading wallet.dat: Wallet requires newer version of Procoin</source>
<translation>خطا در هنگام لود شدن wallet.dat. به نسخه جدید Bitocin برای wallet نیاز است.</translation>
</message>
<message>
<location line="+93"/>
<source>Wallet needed to be rewritten: restart Procoin to complete</source>
<translation>wallet نیاز به بازنویسی دارد. Procoin را برای تکمیل عملیات دوباره اجرا کنید.</translation>
</message>
<message>
<location line="-95"/>
<source>Error loading wallet.dat</source>
<translation>خطا در هنگام لود شدن wallet.dat</translation>
</message>
<message>
<location line="+28"/>
<source>Invalid -proxy address: '%s'</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+56"/>
<source>Unknown network specified in -onlynet: '%s'</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-1"/>
<source>Unknown -socks proxy version requested: %i</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-96"/>
<source>Cannot resolve -bind address: '%s'</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Cannot resolve -externalip address: '%s'</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+44"/>
<source>Invalid amount for -paytxfee=<amount>: '%s'</source>
<translation>میزان اشتباه است for -paytxfee=<amount>: '%s'</translation>
</message>
<message>
<location line="+1"/>
<source>Invalid amount</source>
<translation>میزان اشتباه است</translation>
</message>
<message>
<location line="-6"/>
<source>Insufficient funds</source>
<translation>وجوه ناکافی</translation>
</message>
<message>
<location line="+10"/>
<source>Loading block index...</source>
<translation>لود شدن نمایه بلاکها..</translation>
</message>
<message>
<location line="-57"/>
<source>Add a node to connect to and attempt to keep the connection open</source>
<translation>یک گره برای اتصال اضافه کنید و تلاش کنید تا اتصال را باز نگاه دارید</translation>
</message>
<message>
<location line="-25"/>
<source>Unable to bind to %s on this computer. Procoin is probably already running.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+64"/>
<source>Fee per KB to add to transactions you send</source>
<translation>هزینه بر اساس کیلو بایت برای اضافه شدن به تراکنشی که ارسال کرده اید</translation>
</message>
<message>
<location line="+19"/>
<source>Loading wallet...</source>
<translation>wallet در حال لود شدن است...</translation>
</message>
<message>
<location line="-52"/>
<source>Cannot downgrade wallet</source>
<translation>قابلیت برگشت به نسخه قبلی برای wallet امکان پذیر نیست</translation>
</message>
<message>
<location line="+3"/>
<source>Cannot write default address</source>
<translation>آدرس پیش فرض قابل ذخیره نیست</translation>
</message>
<message>
<location line="+64"/>
<source>Rescanning...</source>
<translation>اسکنِ دوباره...</translation>
</message>
<message>
<location line="-57"/>
<source>Done loading</source>
<translation>اتمام لود شدن</translation>
</message>
<message>
<location line="+82"/>
<source>To use the %s option</source>
<translation>برای استفاده از %s از اختیارات</translation>
</message>
<message>
<location line="-74"/>
<source>Error</source>
<translation>خطا</translation>
</message>
<message>
<location line="-31"/>
<source>You must set rpcpassword=<password> in the configuration file:
%s
If the file does not exist, create it with owner-readable-only file permissions.</source>
<translation>شما باید یک رمز rpcpassword=<password> را در فایل تنظیمات ایجاد کنید⏎ %s ⏎ اگر فایل ایجاد نشده است، آن را با یک فایل "فقط متنی" ایجاد کنید.
</translation>
</message>
</context>
</TS><|fim▁end|> | |
<|file_name|>mapStateModelValueInStrict.common.js<|end_file_name|><|fim▁begin|>/**
* vuex-mapstate-modelvalue-instrict- v0.0.4
* (c) 2017 fsy0718
* @license MIT
*/
'use strict';
var push = Array.prototype.push;
var pop = Array.prototype.pop;
var _mapStateModelValueInStrict = function (modelValue, stateName, type, opts, setWithPayload, copts) {
if ( opts === void 0 ) opts = {};
if ( copts === void 0 ) copts = {};
if (process.env.NODE_ENV === 'development' && (!modelValue || !stateName || !type)) {
throw new Error(("vuex-mapstate-modelvalue-instrict: the " + modelValue + " at least 3 parameters are required"))
}
var getFn = opts.getFn || copts.getFn;
var modulePath = opts.modulePath || copts.modulePath;
return {
get: function get () {
if (getFn) {
return getFn(this.$store.state, modelValue, stateName, modulePath)
}
if (modulePath) {
var paths = modulePath.split('/') || [];
var result;
try {
result = paths.reduce(function (r, c) {
return r[c]
}, this.$store.state);
result = result[stateName];
} catch (e) {
if (process.env.NODE_ENV === 'development') {
throw e
}
result = undefined;
}
return result
}
return this.$store.state[stateName]
},
set: function set (value) {
var mutation = setWithPayload ? ( obj = {}, obj[stateName] = value, obj ) : value;
var obj;<|fim▁hole|> this.$store.commit(_type, mutation, modulePath ? opts.param || copts.param : undefined);
}
}
};
var _mapStateModelValuesInStrict = function () {
var args = arguments;
var setWithPayload = pop.call(args);
var result = {};
if (Array.isArray(args[0])) {
var opts = args[1];
args[0].forEach(function (item) {
result[item[0]] = _mapStateModelValueInStrict(item[0], item[1], item[2], item[3], setWithPayload, opts);
});
} else {
result[args[0]] = _mapStateModelValueInStrict(args[0], args[1], args[2], args[3], setWithPayload);
}
return result
};
// mapStateModelValuesInStrict(modelValue, stateName, type, {getFn, setWithPayload, modulePath}})
// mapStateModelValuesInStrict([[modelValue, stateName, type, {getFn1}], [modelValue, stateName, type]], {getFn, setWithPayload})
var mapStateModelValuesInStrictWithPayload = function () {
var args = arguments;
push.call(arguments, true);
return _mapStateModelValuesInStrict.apply(null, args)
};
var mapStateModelValuesInStrict = function () {
var args = arguments;
push.call(arguments, false);
return _mapStateModelValuesInStrict.apply(null, args)
};
var index = {
mapStateModelValuesInStrict: mapStateModelValuesInStrict,
mapStateModelValuesInStrictWithPayload: mapStateModelValuesInStrictWithPayload,
version: '0.0.4'
};
module.exports = index;<|fim▁end|> | var _type = modulePath ? (modulePath + "/" + type) : type; |
<|file_name|>index.js<|end_file_name|><|fim▁begin|>(function() {
'use strict';
var mergeModuleExports = require('../../utils/mergemoduleexports');
<|fim▁hole|><|fim▁end|> | mergeModuleExports(module.exports, require('./loader'));
mergeModuleExports(module.exports, require('./any'));
})(); |
<|file_name|>walletkit_server.go<|end_file_name|><|fim▁begin|>// +build walletrpc
package walletrpc
import (
"bytes"
"errors"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/btcsuite/btcd/txscript"
"github.com/btcsuite/btcd/wire"
"github.com/lightningnetwork/lnd/input"
"github.com/lightningnetwork/lnd/keychain"
"github.com/lightningnetwork/lnd/lnrpc"
"github.com/lightningnetwork/lnd/lnrpc/signrpc"
"github.com/lightningnetwork/lnd/lnwallet"
"github.com/lightningnetwork/lnd/sweep"
"golang.org/x/net/context"
"google.golang.org/grpc"
"gopkg.in/macaroon-bakery.v2/bakery"
)
const (
// subServerName is the name of the sub rpc server. We'll use this name
// to register ourselves, and we also require that the main
// SubServerConfigDispatcher instance recognize as the name of our
subServerName = "WalletKitRPC"
)
var (
// macaroonOps are the set of capabilities that our minted macaroon (if
// it doesn't already exist) will have.
macaroonOps = []bakery.Op{
{
Entity: "address",
Action: "write",
},
{
Entity: "address",
Action: "read",
},
{
Entity: "onchain",
Action: "write",
},
{
Entity: "onchain",
Action: "read",
},
}
// macPermissions maps RPC calls to the permissions they require.
macPermissions = map[string][]bakery.Op{
"/walletrpc.WalletKit/DeriveNextKey": {{
Entity: "address",
Action: "read",
}},
"/walletrpc.WalletKit/DeriveKey": {{
Entity: "address",
Action: "read",
}},
"/walletrpc.WalletKit/NextAddr": {{
Entity: "address",
Action: "read",
}},
"/walletrpc.WalletKit/PublishTransaction": {{
Entity: "onchain",
Action: "write",
}},
"/walletrpc.WalletKit/SendOutputs": {{
Entity: "onchain",
Action: "write",
}},
"/walletrpc.WalletKit/EstimateFee": {{
Entity: "onchain",
Action: "read",
}},
"/walletrpc.WalletKit/PendingSweeps": {{
Entity: "onchain",
Action: "read",
}},
"/walletrpc.WalletKit/BumpFee": {{
Entity: "onchain",
Action: "write",
}},
}
// DefaultWalletKitMacFilename is the default name of the wallet kit
// macaroon that we expect to find via a file handle within the main
// configuration file in this package.
DefaultWalletKitMacFilename = "walletkit.macaroon"
)
// WalletKit is a sub-RPC server that exposes a tool kit which allows clients
// to execute common wallet operations. This includes requesting new addresses,
// keys (for contracts!), and publishing transactions.
type WalletKit struct {
cfg *Config
}
// A compile time check to ensure that WalletKit fully implements the
// WalletKitServer gRPC service.
var _ WalletKitServer = (*WalletKit)(nil)
// New creates a new instance of the WalletKit sub-RPC server.
func New(cfg *Config) (*WalletKit, lnrpc.MacaroonPerms, error) {
// If the path of the wallet kit macaroon wasn't specified, then we'll
// assume that it's found at the default network directory.
if cfg.WalletKitMacPath == "" {
cfg.WalletKitMacPath = filepath.Join(
cfg.NetworkDir, DefaultWalletKitMacFilename,
)
}
// Now that we know the full path of the wallet kit macaroon, we can
// check to see if we need to create it or not.
macFilePath := cfg.WalletKitMacPath
if !lnrpc.FileExists(macFilePath) && cfg.MacService != nil {
log.Infof("Baking macaroons for WalletKit RPC Server at: %v",
macFilePath)
// At this point, we know that the wallet kit macaroon doesn't
// yet, exist, so we need to create it with the help of the<|fim▁hole|> walletKitMac, err := cfg.MacService.Oven.NewMacaroon(
context.Background(), bakery.LatestVersion, nil,
macaroonOps...,
)
if err != nil {
return nil, nil, err
}
walletKitMacBytes, err := walletKitMac.M().MarshalBinary()
if err != nil {
return nil, nil, err
}
err = ioutil.WriteFile(macFilePath, walletKitMacBytes, 0644)
if err != nil {
os.Remove(macFilePath)
return nil, nil, err
}
}
walletKit := &WalletKit{
cfg: cfg,
}
return walletKit, macPermissions, nil
}
// Start launches any helper goroutines required for the sub-server to function.
//
// NOTE: This is part of the lnrpc.SubServer interface.
func (w *WalletKit) Start() error {
return nil
}
// Stop signals any active goroutines for a graceful closure.
//
// NOTE: This is part of the lnrpc.SubServer interface.
func (w *WalletKit) Stop() error {
return nil
}
// Name returns a unique string representation of the sub-server. This can be
// used to identify the sub-server and also de-duplicate them.
//
// NOTE: This is part of the lnrpc.SubServer interface.
func (w *WalletKit) Name() string {
return subServerName
}
// RegisterWithRootServer will be called by the root gRPC server to direct a
// sub RPC server to register itself with the main gRPC root server. Until this
// is called, each sub-server won't be able to have requests routed towards it.
//
// NOTE: This is part of the lnrpc.SubServer interface.
func (w *WalletKit) RegisterWithRootServer(grpcServer *grpc.Server) error {
// We make sure that we register it with the main gRPC server to ensure
// all our methods are routed properly.
RegisterWalletKitServer(grpcServer, w)
log.Debugf("WalletKit RPC server successfully registered with " +
"root gRPC server")
return nil
}
// DeriveNextKey attempts to derive the *next* key within the key family
// (account in BIP43) specified. This method should return the next external
// child within this branch.
func (w *WalletKit) DeriveNextKey(ctx context.Context,
req *KeyReq) (*signrpc.KeyDescriptor, error) {
nextKeyDesc, err := w.cfg.KeyRing.DeriveNextKey(
keychain.KeyFamily(req.KeyFamily),
)
if err != nil {
return nil, err
}
return &signrpc.KeyDescriptor{
KeyLoc: &signrpc.KeyLocator{
KeyFamily: int32(nextKeyDesc.Family),
KeyIndex: int32(nextKeyDesc.Index),
},
RawKeyBytes: nextKeyDesc.PubKey.SerializeCompressed(),
}, nil
}
// DeriveKey attempts to derive an arbitrary key specified by the passed
// KeyLocator.
func (w *WalletKit) DeriveKey(ctx context.Context,
req *signrpc.KeyLocator) (*signrpc.KeyDescriptor, error) {
keyDesc, err := w.cfg.KeyRing.DeriveKey(keychain.KeyLocator{
Family: keychain.KeyFamily(req.KeyFamily),
Index: uint32(req.KeyIndex),
})
if err != nil {
return nil, err
}
return &signrpc.KeyDescriptor{
KeyLoc: &signrpc.KeyLocator{
KeyFamily: int32(keyDesc.Family),
KeyIndex: int32(keyDesc.Index),
},
RawKeyBytes: keyDesc.PubKey.SerializeCompressed(),
}, nil
}
// NextAddr returns the next unused address within the wallet.
func (w *WalletKit) NextAddr(ctx context.Context,
req *AddrRequest) (*AddrResponse, error) {
addr, err := w.cfg.Wallet.NewAddress(lnwallet.WitnessPubKey, false)
if err != nil {
return nil, err
}
return &AddrResponse{
Addr: addr.String(),
}, nil
}
// Attempts to publish the passed transaction to the network. Once this returns
// without an error, the wallet will continually attempt to re-broadcast the
// transaction on start up, until it enters the chain.
func (w *WalletKit) PublishTransaction(ctx context.Context,
req *Transaction) (*PublishResponse, error) {
switch {
// If the client doesn't specify a transaction, then there's nothing to
// publish.
case len(req.TxHex) == 0:
return nil, fmt.Errorf("must provide a transaction to " +
"publish")
}
tx := &wire.MsgTx{}
txReader := bytes.NewReader(req.TxHex)
if err := tx.Deserialize(txReader); err != nil {
return nil, err
}
err := w.cfg.Wallet.PublishTransaction(tx)
if err != nil {
return nil, err
}
return &PublishResponse{}, nil
}
// SendOutputs is similar to the existing sendmany call in Bitcoind, and allows
// the caller to create a transaction that sends to several outputs at once.
// This is ideal when wanting to batch create a set of transactions.
func (w *WalletKit) SendOutputs(ctx context.Context,
req *SendOutputsRequest) (*SendOutputsResponse, error) {
switch {
// If the client didn't specify any outputs to create, then we can't
// proceed .
case len(req.Outputs) == 0:
return nil, fmt.Errorf("must specify at least one output " +
"to create")
}
// Before we can request this transaction to be created, we'll need to
// amp the protos back into the format that the internal wallet will
// recognize.
outputsToCreate := make([]*wire.TxOut, 0, len(req.Outputs))
for _, output := range req.Outputs {
outputsToCreate = append(outputsToCreate, &wire.TxOut{
Value: output.Value,
PkScript: output.PkScript,
})
}
// Now that we have the outputs mapped, we can request that the wallet
// attempt to create this transaction.
tx, err := w.cfg.Wallet.SendOutputs(
outputsToCreate, lnwallet.SatPerKWeight(req.SatPerKw),
)
if err != nil {
return nil, err
}
var b bytes.Buffer
if err := tx.Serialize(&b); err != nil {
return nil, err
}
return &SendOutputsResponse{
RawTx: b.Bytes(),
}, nil
}
// EstimateFee attempts to query the internal fee estimator of the wallet to
// determine the fee (in sat/kw) to attach to a transaction in order to achieve
// the confirmation target.
func (w *WalletKit) EstimateFee(ctx context.Context,
req *EstimateFeeRequest) (*EstimateFeeResponse, error) {
switch {
// A confirmation target of zero doesn't make any sense. Similarly, we
// reject confirmation targets of 1 as they're unreasonable.
case req.ConfTarget == 0 || req.ConfTarget == 1:
return nil, fmt.Errorf("confirmation target must be greater " +
"than 1")
}
satPerKw, err := w.cfg.FeeEstimator.EstimateFeePerKW(
uint32(req.ConfTarget),
)
if err != nil {
return nil, err
}
return &EstimateFeeResponse{
SatPerKw: int64(satPerKw),
}, nil
}
// PendingSweeps returns lists of on-chain outputs that lnd is currently
// attempting to sweep within its central batching engine. Outputs with similar
// fee rates are batched together in order to sweep them within a single
// transaction. The fee rate of each sweeping transaction is determined by
// taking the average fee rate of all the outputs it's trying to sweep.
func (w *WalletKit) PendingSweeps(ctx context.Context,
in *PendingSweepsRequest) (*PendingSweepsResponse, error) {
// Retrieve all of the outputs the UtxoSweeper is currently trying to
// sweep.
pendingInputs, err := w.cfg.Sweeper.PendingInputs()
if err != nil {
return nil, err
}
// Convert them into their respective RPC format.
rpcPendingSweeps := make([]*PendingSweep, 0, len(pendingInputs))
for _, pendingInput := range pendingInputs {
var witnessType WitnessType
switch pendingInput.WitnessType {
case input.CommitmentTimeLock:
witnessType = WitnessType_COMMITMENT_TIME_LOCK
case input.CommitmentNoDelay:
witnessType = WitnessType_COMMITMENT_NO_DELAY
case input.CommitmentRevoke:
witnessType = WitnessType_COMMITMENT_REVOKE
case input.HtlcOfferedRevoke:
witnessType = WitnessType_HTLC_OFFERED_REVOKE
case input.HtlcAcceptedRevoke:
witnessType = WitnessType_HTLC_ACCEPTED_REVOKE
case input.HtlcOfferedTimeoutSecondLevel:
witnessType = WitnessType_HTLC_OFFERED_TIMEOUT_SECOND_LEVEL
case input.HtlcAcceptedSuccessSecondLevel:
witnessType = WitnessType_HTLC_ACCEPTED_SUCCESS_SECOND_LEVEL
case input.HtlcOfferedRemoteTimeout:
witnessType = WitnessType_HTLC_OFFERED_REMOTE_TIMEOUT
case input.HtlcAcceptedRemoteSuccess:
witnessType = WitnessType_HTLC_ACCEPTED_REMOTE_SUCCESS
case input.HtlcSecondLevelRevoke:
witnessType = WitnessType_HTLC_SECOND_LEVEL_REVOKE
case input.WitnessKeyHash:
witnessType = WitnessType_WITNESS_KEY_HASH
case input.NestedWitnessKeyHash:
witnessType = WitnessType_NESTED_WITNESS_KEY_HASH
default:
log.Warnf("Unhandled witness type %v for input %v",
pendingInput.WitnessType, pendingInput.OutPoint)
}
op := &lnrpc.OutPoint{
TxidBytes: pendingInput.OutPoint.Hash[:],
OutputIndex: pendingInput.OutPoint.Index,
}
amountSat := uint32(pendingInput.Amount)
satPerByte := uint32(pendingInput.LastFeeRate.FeePerKVByte() / 1000)
broadcastAttempts := uint32(pendingInput.BroadcastAttempts)
nextBroadcastHeight := uint32(pendingInput.NextBroadcastHeight)
rpcPendingSweeps = append(rpcPendingSweeps, &PendingSweep{
Outpoint: op,
WitnessType: witnessType,
AmountSat: amountSat,
SatPerByte: satPerByte,
BroadcastAttempts: broadcastAttempts,
NextBroadcastHeight: nextBroadcastHeight,
})
}
return &PendingSweepsResponse{
PendingSweeps: rpcPendingSweeps,
}, nil
}
// unmarshallOutPoint converts an outpoint from its lnrpc type to its canonical
// type.
func unmarshallOutPoint(op *lnrpc.OutPoint) (*wire.OutPoint, error) {
if op == nil {
return nil, fmt.Errorf("empty outpoint provided")
}
var hash chainhash.Hash
switch {
case len(op.TxidBytes) == 0 && len(op.TxidStr) == 0:
fallthrough
case len(op.TxidBytes) != 0 && len(op.TxidStr) != 0:
return nil, fmt.Errorf("either TxidBytes or TxidStr must be " +
"specified, but not both")
// The hash was provided as raw bytes.
case len(op.TxidBytes) != 0:
copy(hash[:], op.TxidBytes)
// The hash was provided as a hex-encoded string.
case len(op.TxidStr) != 0:
h, err := chainhash.NewHashFromStr(op.TxidStr)
if err != nil {
return nil, err
}
hash = *h
}
return &wire.OutPoint{
Hash: hash,
Index: op.OutputIndex,
}, nil
}
// BumpFee allows bumping the fee rate of an arbitrary input. A fee preference
// can be expressed either as a specific fee rate or a delta of blocks in which
// the output should be swept on-chain within. If a fee preference is not
// explicitly specified, then an error is returned. The status of the input
// sweep can be checked through the PendingSweeps RPC.
func (w *WalletKit) BumpFee(ctx context.Context,
in *BumpFeeRequest) (*BumpFeeResponse, error) {
// Parse the outpoint from the request.
op, err := unmarshallOutPoint(in.Outpoint)
if err != nil {
return nil, err
}
// Construct the request's fee preference.
satPerKw := lnwallet.SatPerKVByte(in.SatPerByte * 1000).FeePerKWeight()
feePreference := sweep.FeePreference{
ConfTarget: uint32(in.TargetConf),
FeeRate: satPerKw,
}
// We'll attempt to bump the fee of the input through the UtxoSweeper.
// If it is currently attempting to sweep the input, then it'll simply
// bump its fee, which will result in a replacement transaction (RBF)
// being broadcast. If it is not aware of the input however,
// lnwallet.ErrNotMine is returned.
_, err = w.cfg.Sweeper.BumpFee(*op, feePreference)
switch err {
case nil:
return &BumpFeeResponse{}, nil
case lnwallet.ErrNotMine:
break
default:
return nil, err
}
// Since we're unable to perform a bump through RBF, we'll assume the
// user is attempting to bump an unconfirmed transaction's fee rate by
// sweeping an output within it under control of the wallet with a
// higher fee rate, essentially performing a Child-Pays-For-Parent
// (CPFP).
//
// We'll gather all of the information required by the UtxoSweeper in
// order to sweep the output.
utxo, err := w.cfg.Wallet.FetchInputInfo(op)
if err != nil {
return nil, err
}
// We're only able to bump the fee of unconfirmed transactions.
if utxo.Confirmations > 0 {
return nil, errors.New("unable to bump fee of a confirmed " +
"transaction")
}
var witnessType input.WitnessType
switch utxo.AddressType {
case lnwallet.WitnessPubKey:
witnessType = input.WitnessKeyHash
case lnwallet.NestedWitnessPubKey:
witnessType = input.NestedWitnessKeyHash
default:
return nil, fmt.Errorf("unknown input witness %v", op)
}
signDesc := &input.SignDescriptor{
Output: &wire.TxOut{
PkScript: utxo.PkScript,
Value: int64(utxo.Value),
},
HashType: txscript.SigHashAll,
}
// We'll use the current height as the height hint since we're dealing
// with an unconfirmed transaction.
_, currentHeight, err := w.cfg.Chain.GetBestBlock()
if err != nil {
return nil, fmt.Errorf("unable to retrieve current height: %v",
err)
}
input := input.NewBaseInput(op, witnessType, signDesc, uint32(currentHeight))
if _, err = w.cfg.Sweeper.SweepInput(input, feePreference); err != nil {
return nil, err
}
return &BumpFeeResponse{}, nil
}<|fim▁end|> | // main macaroon service. |
<|file_name|>MyfastfileCom.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
from module.common.json_layer import json_loads
from module.network.RequestFactory import getURL
from module.plugins.internal.MultiHoster import MultiHoster
class MyfastfileCom(MultiHoster):
__name__ = "MyfastfileCom"
__type__ = "hook"
__version__ = "0.02"
<|fim▁hole|> ("hosterList", "str", "Hoster list (comma separated)", ""),
("unloadFailing", "bool", "Revert to standard download if download fails", False),
("interval", "int", "Reload interval in hours (0 to disable)", 24)]
__description__ = """Myfastfile.com hook plugin"""
__license__ = "GPLv3"
__authors__ = [("stickell", "[email protected]")]
def getHoster(self):
json_data = getURL('http://myfastfile.com/api.php?hosts', decode=True)
self.logDebug("JSON data", json_data)
json_data = json_loads(json_data)
return json_data['hosts']<|fim▁end|> | __config__ = [("hosterListMode", "all;listed;unlisted", "Use for hosters (if supported)", "all"), |
<|file_name|>index.ts<|end_file_name|><|fim▁begin|><|fim▁hole|>export * from './ManagedReader';
export * from './ManagedWriter';
export * from './ManagedResourceDetailsIndicator';
export * from './managedResourceDetailsIndicator.component';
export * from './ManagedResourceStatusIndicator';
export * from './managed.module';
export * from './managed.dataSource';
export * from './managedResourceDecorators';
export * from './ManagedMenuItem';
export * from './toggleResourceManagement';
export * from './resources/resourceRegistry';<|fim▁end|> | export * from './DeployingIntoManagedClusterWarning'; |
<|file_name|>symbolize.cc<|end_file_name|><|fim▁begin|>// Copyright (c) 2006, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Author: Satoru Takabayashi
// Stack-footprint reduction work done by Raksit Ashok
//
// Implementation note:
//
// We don't use heaps but only use stacks. We want to reduce the
// stack consumption so that the symbolizer can run on small stacks.
//
// Here are some numbers collected with GCC 4.1.0 on x86:
// - sizeof(Elf32_Sym) = 16
// - sizeof(Elf32_Shdr) = 40
// - sizeof(Elf64_Sym) = 24
// - sizeof(Elf64_Shdr) = 64
//
// This implementation is intended to be async-signal-safe but uses
// some functions which are not guaranteed to be so, such as memchr()
// and memmove(). We assume they are async-signal-safe.
//
// Additional header can be specified by the GLOG_BUILD_CONFIG_INCLUDE
// macro to add platform specific defines (e.g. OS_OPENBSD).
#ifdef GLOG_BUILD_CONFIG_INCLUDE
#include GLOG_BUILD_CONFIG_INCLUDE
#endif // GLOG_BUILD_CONFIG_INCLUDE
#include "build/build_config.h"
#include "utilities.h"
#if defined(HAVE_SYMBOLIZE)
#include <string.h>
#include <algorithm>
#include <limits>
#include "symbolize.h"
#include "demangle.h"
_START_GOOGLE_NAMESPACE_
// We don't use assert() since it's not guaranteed to be
// async-signal-safe. Instead we define a minimal assertion
// macro. So far, we don't need pretty printing for __FILE__, etc.
// A wrapper for abort() to make it callable in ? :.
static int AssertFail() {
abort();
return 0; // Should not reach.
}
#define SAFE_ASSERT(expr) ((expr) ? 0 : AssertFail())
static SymbolizeCallback g_symbolize_callback = NULL;
void InstallSymbolizeCallback(SymbolizeCallback callback) {
g_symbolize_callback = callback;
}
static SymbolizeOpenObjectFileCallback g_symbolize_open_object_file_callback =
NULL;
void InstallSymbolizeOpenObjectFileCallback(
SymbolizeOpenObjectFileCallback callback) {
g_symbolize_open_object_file_callback = callback;
}
// This function wraps the Demangle function to provide an interface
// where the input symbol is demangled in-place.
// To keep stack consumption low, we would like this function to not
// get inlined.
static ATTRIBUTE_NOINLINE void DemangleInplace(char *out, int out_size) {
char demangled[256]; // Big enough for sane demangled symbols.
if (Demangle(out, demangled, sizeof(demangled))) {
// Demangling succeeded. Copy to out if the space allows.
size_t len = strlen(demangled);
if (len + 1 <= (size_t)out_size) { // +1 for '\0'.
SAFE_ASSERT(len < sizeof(demangled));
memmove(out, demangled, len + 1);
}
}
}
_END_GOOGLE_NAMESPACE_
#if defined(__ELF__)
#if defined(HAVE_DLFCN_H)
#include <dlfcn.h>
#endif
#if BUILDFLAG(IS_OPENBSD)
#include <sys/exec_elf.h>
#else
#include <elf.h>
#endif
#include <errno.h>
#include <fcntl.h>
#include <limits.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <stddef.h>
#include <string.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <unistd.h>
#include "symbolize.h"
#include "config.h"
#include "glog/raw_logging.h"
// Re-runs fn until it doesn't cause EINTR.
#define NO_INTR(fn) do {} while ((fn) < 0 && errno == EINTR)
_START_GOOGLE_NAMESPACE_
// Read up to "count" bytes from "offset" in the file pointed by file
// descriptor "fd" into the buffer starting at "buf" while handling short reads
// and EINTR. On success, return the number of bytes read. Otherwise, return
// -1.
ssize_t ReadFromOffset(const int fd,
void* buf,
const size_t count,
const off_t offset) {
SAFE_ASSERT(fd >= 0);
SAFE_ASSERT(count <= std::numeric_limits<ssize_t>::max());
char *buf0 = reinterpret_cast<char *>(buf);
ssize_t num_bytes = 0;
while (num_bytes < count) {
ssize_t len;
NO_INTR(len = pread(fd, buf0 + num_bytes, count - num_bytes,
offset + num_bytes));
if (len < 0) { // There was an error other than EINTR.
return -1;
}
if (len == 0) { // Reached EOF.
break;
}
num_bytes += len;
}
SAFE_ASSERT(num_bytes <= count);
return num_bytes;
}
// Try reading exactly "count" bytes from "offset" bytes in a file
// pointed by "fd" into the buffer starting at "buf" while handling
// short reads and EINTR. On success, return true. Otherwise, return
// false.
static bool ReadFromOffsetExact(const int fd, void *buf,
const size_t count, const off_t offset) {
ssize_t len = ReadFromOffset(fd, buf, count, offset);
return len == count;
}
// Returns elf_header.e_type if the file pointed by fd is an ELF binary.
static int FileGetElfType(const int fd) {
ElfW(Ehdr) elf_header;
if (!ReadFromOffsetExact(fd, &elf_header, sizeof(elf_header), 0)) {
return -1;
}
if (memcmp(elf_header.e_ident, ELFMAG, SELFMAG) != 0) {
return -1;
}
return elf_header.e_type;
}
// Read the section headers in the given ELF binary, and if a section
// of the specified type is found, set the output to this section header
// and return true. Otherwise, return false.
// To keep stack consumption low, we would like this function to not get
// inlined.
static ATTRIBUTE_NOINLINE bool
GetSectionHeaderByType(const int fd, ElfW(Half) sh_num, const off_t sh_offset,
ElfW(Word) type, ElfW(Shdr) *out) {
// Read at most 16 section headers at a time to save read calls.
ElfW(Shdr) buf[16];
for (int i = 0; i < sh_num;) {
const ssize_t num_bytes_left = (sh_num - i) * sizeof(buf[0]);
const ssize_t num_bytes_to_read =
(sizeof(buf) > num_bytes_left) ? num_bytes_left : sizeof(buf);
const ssize_t len = ReadFromOffset(fd, buf, num_bytes_to_read,
sh_offset + i * sizeof(buf[0]));
if (len == -1) {
return false;
}
SAFE_ASSERT(len % sizeof(buf[0]) == 0);
const ssize_t num_headers_in_buf = len / sizeof(buf[0]);
SAFE_ASSERT(num_headers_in_buf <= sizeof(buf) / sizeof(buf[0]));
for (int j = 0; j < num_headers_in_buf; ++j) {
if (buf[j].sh_type == type) {
*out = buf[j];
return true;
}
}
i += num_headers_in_buf;
}
return false;
}
// There is no particular reason to limit section name to 63 characters,
// but there has (as yet) been no need for anything longer either.
const int kMaxSectionNameLen = 64;
// name_len should include terminating '\0'.
bool GetSectionHeaderByName(int fd, const char *name, size_t name_len,
ElfW(Shdr) *out) {
ElfW(Ehdr) elf_header;
if (!ReadFromOffsetExact(fd, &elf_header, sizeof(elf_header), 0)) {
return false;
}
ElfW(Shdr) shstrtab;
off_t shstrtab_offset = (elf_header.e_shoff +
elf_header.e_shentsize * elf_header.e_shstrndx);
if (!ReadFromOffsetExact(fd, &shstrtab, sizeof(shstrtab), shstrtab_offset)) {
return false;
}
for (int i = 0; i < elf_header.e_shnum; ++i) {
off_t section_header_offset = (elf_header.e_shoff +
elf_header.e_shentsize * i);
if (!ReadFromOffsetExact(fd, out, sizeof(*out), section_header_offset)) {
return false;
}
char header_name[kMaxSectionNameLen];
if (sizeof(header_name) < name_len) {
RAW_LOG(WARNING, "Section name '%s' is too long (%" PRIuS "); "
"section will not be found (even if present).", name, name_len);
// No point in even trying.
return false;
}
off_t name_offset = shstrtab.sh_offset + out->sh_name;
ssize_t n_read = ReadFromOffset(fd, &header_name, name_len, name_offset);
if (n_read == -1) {
return false;
} else if (n_read != name_len) {
// Short read -- name could be at end of file.
continue;
}
if (memcmp(header_name, name, name_len) == 0) {
return true;
}
}
return false;
}
// Read a symbol table and look for the symbol containing the
// pc. Iterate over symbols in a symbol table and look for the symbol
// containing "pc". On success, return true and write the symbol name
// to out. Otherwise, return false.
// To keep stack consumption low, we would like this function to not get
// inlined.
static ATTRIBUTE_NOINLINE bool
FindSymbol(uint64_t pc, const int fd, char *out, int out_size,
uint64_t symbol_offset, const ElfW(Shdr) *strtab,
const ElfW(Shdr) *symtab) {
if (symtab == NULL) {
return false;
}
const int num_symbols = symtab->sh_size / symtab->sh_entsize;
for (int i = 0; i < num_symbols;) {
off_t offset = symtab->sh_offset + i * symtab->sh_entsize;
// If we are reading Elf64_Sym's, we want to limit this array to
// 32 elements (to keep stack consumption low), otherwise we can
// have a 64 element Elf32_Sym array.
#if __WORDSIZE == 64
#define NUM_SYMBOLS 32
#else
#define NUM_SYMBOLS 64
#endif
// Read at most NUM_SYMBOLS symbols at once to save read() calls.
ElfW(Sym) buf[NUM_SYMBOLS];
int num_symbols_to_read = std::min(NUM_SYMBOLS, num_symbols - i);
const ssize_t len =
ReadFromOffset(fd, &buf, sizeof(buf[0]) * num_symbols_to_read, offset);
SAFE_ASSERT(len % sizeof(buf[0]) == 0);
const ssize_t num_symbols_in_buf = len / sizeof(buf[0]);
SAFE_ASSERT(num_symbols_in_buf <= num_symbols_to_read);
for (int j = 0; j < num_symbols_in_buf; ++j) {
const ElfW(Sym)& symbol = buf[j];
uint64_t start_address = symbol.st_value;
start_address += symbol_offset;
uint64_t end_address = start_address + symbol.st_size;
if (symbol.st_value != 0 && // Skip null value symbols.
symbol.st_shndx != 0 && // Skip undefined symbols.
start_address <= pc && pc < end_address) {
ssize_t len1 = ReadFromOffset(fd, out, out_size,
strtab->sh_offset + symbol.st_name);
if (len1 <= 0 || memchr(out, '\0', out_size) == NULL) {
memset(out, 0, out_size);
return false;
}
return true; // Obtained the symbol name.
}
}
i += num_symbols_in_buf;
}
return false;
}
// Get the symbol name of "pc" from the file pointed by "fd". Process
// both regular and dynamic symbol tables if necessary. On success,
// write the symbol name to "out" and return true. Otherwise, return
// false.
static bool GetSymbolFromObjectFile(const int fd,
uint64_t pc,
char* out,
int out_size,
uint64_t base_address) {
// Read the ELF header.
ElfW(Ehdr) elf_header;
if (!ReadFromOffsetExact(fd, &elf_header, sizeof(elf_header), 0)) {
return false;
}
ElfW(Shdr) symtab, strtab;
// Consult a regular symbol table first.
if (GetSectionHeaderByType(fd, elf_header.e_shnum, elf_header.e_shoff,
SHT_SYMTAB, &symtab)) {
if (!ReadFromOffsetExact(fd, &strtab, sizeof(strtab), elf_header.e_shoff +
symtab.sh_link * sizeof(symtab))) {
return false;
}
if (FindSymbol(pc, fd, out, out_size, base_address, &strtab, &symtab)) {
return true; // Found the symbol in a regular symbol table.
}
}
// If the symbol is not found, then consult a dynamic symbol table.
if (GetSectionHeaderByType(fd, elf_header.e_shnum, elf_header.e_shoff,
SHT_DYNSYM, &symtab)) {
if (!ReadFromOffsetExact(fd, &strtab, sizeof(strtab), elf_header.e_shoff +
symtab.sh_link * sizeof(symtab))) {
return false;
}
if (FindSymbol(pc, fd, out, out_size, base_address, &strtab, &symtab)) {
return true; // Found the symbol in a dynamic symbol table.
}
}
return false;
}
// Thin wrapper around a file descriptor so that the file descriptor
// gets closed for sure.
FileDescriptor::FileDescriptor(int fd) : fd_(fd) {}
FileDescriptor::~FileDescriptor() {
if (fd_ >= 0) {
close(fd_);
}
}
namespace {
// Helper class for reading lines from file.
//
// Note: we don't use ProcMapsIterator since the object is big (it has
// a 5k array member) and uses async-unsafe functions such as sscanf()
// and snprintf().
class LineReader {
public:
explicit LineReader(int fd, char *buf, int buf_len, off_t offset)
: fd_(fd),
buf_(buf),
buf_len_(buf_len),
offset_(offset),
bol_(buf),
eol_(buf),
eod_(buf) {}
// Read '\n'-terminated line from file. On success, modify "bol"
// and "eol", then return true. Otherwise, return false.
//
// Note: if the last line doesn't end with '\n', the line will be
// dropped. It's an intentional behavior to make the code simple.
bool ReadLine(const char **bol, const char **eol) {
if (BufferIsEmpty()) { // First time.
const ssize_t num_bytes = ReadFromOffset(fd_, buf_, buf_len_, offset_);
if (num_bytes <= 0) { // EOF or error.
return false;
}
offset_ += num_bytes;
eod_ = buf_ + num_bytes;
bol_ = buf_;
} else {
bol_ = eol_ + 1; // Advance to the next line in the buffer.
SAFE_ASSERT(bol_ <= eod_); // "bol_" can point to "eod_".
if (!HasCompleteLine()) {
const int incomplete_line_length = eod_ - bol_;
// Move the trailing incomplete line to the beginning.
memmove(buf_, bol_, incomplete_line_length);
// Read text from file and append it.
char * const append_pos = buf_ + incomplete_line_length;
const int capacity_left = buf_len_ - incomplete_line_length;
const ssize_t num_bytes =
ReadFromOffset(fd_, append_pos, capacity_left, offset_);
if (num_bytes <= 0) { // EOF or error.
return false;
}
offset_ += num_bytes;
eod_ = append_pos + num_bytes;
bol_ = buf_;
}
}
eol_ = FindLineFeed();
if (eol_ == NULL) { // '\n' not found. Malformed line.
return false;
}
*eol_ = '\0'; // Replace '\n' with '\0'.
*bol = bol_;
*eol = eol_;
return true;
}
// Beginning of line.
const char *bol() {
return bol_;
}
// End of line.
const char *eol() {
return eol_;
}
private:
explicit LineReader(const LineReader&);
void operator=(const LineReader&);
char *FindLineFeed() {
return reinterpret_cast<char *>(memchr(bol_, '\n', eod_ - bol_));
}
bool BufferIsEmpty() {
return buf_ == eod_;
}
bool HasCompleteLine() {
return !BufferIsEmpty() && FindLineFeed() != NULL;
}
const int fd_;
char * const buf_;
const int buf_len_;
off_t offset_;
char *bol_;
char *eol_;
const char *eod_; // End of data in "buf_".
};
} // namespace
// Place the hex number read from "start" into "*hex". The pointer to
// the first non-hex character or "end" is returned.
static char *GetHex(const char *start, const char *end, uint64_t *hex) {
*hex = 0;
const char *p;
for (p = start; p < end; ++p) {
int ch = *p;
if ((ch >= '0' && ch <= '9') ||
(ch >= 'A' && ch <= 'F') || (ch >= 'a' && ch <= 'f')) {
*hex = (*hex << 4) | (ch < 'A' ? ch - '0' : (ch & 0xF) + 9);
} else { // Encountered the first non-hex character.
break;
}
}
SAFE_ASSERT(p <= end);
return const_cast<char *>(p);
}
// Searches for the object file (from /proc/self/maps) that contains
// the specified pc. If found, sets |start_address| to the start address
// of where this object file is mapped in memory, sets the module base
// address into |base_address|, copies the object file name into
// |out_file_name|, and attempts to open the object file. If the object
// file is opened successfully, returns the file descriptor. Otherwise,
// returns -1. |out_file_name_size| is the size of the file name buffer
// (including the null-terminator).
ATTRIBUTE_NOINLINE int OpenObjectFileContainingPcAndGetStartAddress(
uint64_t pc,
uint64_t& start_address,
uint64_t& end_address,
uint64_t& base_address,
char* out_file_name,
int out_file_name_size) {
int object_fd;
int maps_fd;
NO_INTR(maps_fd = open("/proc/self/maps", O_RDONLY));
FileDescriptor wrapped_maps_fd(maps_fd);
if (wrapped_maps_fd.get() < 0) {
return -1;
}
int mem_fd;
NO_INTR(mem_fd = open("/proc/self/mem", O_RDONLY));
FileDescriptor wrapped_mem_fd(mem_fd);
if (wrapped_mem_fd.get() < 0) {
return -1;
}
// Iterate over maps and look for the map containing the pc. Then
// look into the symbol tables inside.
char buf[1024]; // Big enough for line of sane /proc/self/maps
int num_maps = 0;
LineReader reader(wrapped_maps_fd.get(), buf, sizeof(buf), 0);
while (true) {
num_maps++;
const char *cursor;
const char *eol;
if (!reader.ReadLine(&cursor, &eol)) { // EOF or malformed line.
return -1;
}
// Start parsing line in /proc/self/maps. Here is an example:
//
// 08048000-0804c000 r-xp 00000000 08:01 2142121 /bin/cat
//
// We want start address (08048000), end address (0804c000), flags
// (r-xp) and file name (/bin/cat).
// Read start address.
cursor = GetHex(cursor, eol, &start_address);
if (cursor == eol || *cursor != '-') {
return -1; // Malformed line.
}
++cursor; // Skip '-'.
// Read end address.
cursor = GetHex(cursor, eol, &end_address);
if (cursor == eol || *cursor != ' ') {
return -1; // Malformed line.
}
++cursor; // Skip ' '.
// Read flags. Skip flags until we encounter a space or eol.
const char * const flags_start = cursor;
while (cursor < eol && *cursor != ' ') {
++cursor;
}
// We expect at least four letters for flags (ex. "r-xp").
if (cursor == eol || cursor < flags_start + 4) {
return -1; // Malformed line.
}
// Determine the base address by reading ELF headers in process memory.
ElfW(Ehdr) ehdr;
// Skip non-readable maps.
if (flags_start[0] == 'r' &&
ReadFromOffsetExact(mem_fd, &ehdr, sizeof(ElfW(Ehdr)), start_address) &&
memcmp(ehdr.e_ident, ELFMAG, SELFMAG) == 0) {
switch (ehdr.e_type) {
case ET_EXEC:
base_address = 0;
break;
case ET_DYN:
// Find the segment containing file offset 0. This will correspond
// to the ELF header that we just read. Normally this will have
// virtual address 0, but this is not guaranteed. We must subtract
// the virtual address from the address where the ELF header was
// mapped to get the base address.
//
// If we fail to find a segment for file offset 0, use the address
// of the ELF header as the base address.
base_address = start_address;
for (unsigned i = 0; i != ehdr.e_phnum; ++i) {
ElfW(Phdr) phdr;
if (ReadFromOffsetExact(
mem_fd, &phdr, sizeof(phdr),
start_address + ehdr.e_phoff + i * sizeof(phdr)) &&
phdr.p_type == PT_LOAD && phdr.p_offset == 0) {
base_address = start_address - phdr.p_vaddr;
break;
}
}
break;
default:
// ET_REL or ET_CORE. These aren't directly executable, so they don't
// affect the base address.
break;
}
}<|fim▁hole|> }
// Check flags. We are only interested in "r*x" maps.
if (flags_start[0] != 'r' || flags_start[2] != 'x') {
continue; // We skip this map.
}
++cursor; // Skip ' '.
// Read file offset.
uint64_t file_offset;
cursor = GetHex(cursor, eol, &file_offset);
if (cursor == eol || *cursor != ' ') {
return -1; // Malformed line.
}
++cursor; // Skip ' '.
// Skip to file name. "cursor" now points to dev. We need to
// skip at least two spaces for dev and inode.
int num_spaces = 0;
while (cursor < eol) {
if (*cursor == ' ') {
++num_spaces;
} else if (num_spaces >= 2) {
// The first non-space character after skipping two spaces
// is the beginning of the file name.
break;
}
++cursor;
}
if (cursor == eol) {
return -1; // Malformed line.
}
// Finally, "cursor" now points to file name of our interest.
NO_INTR(object_fd = open(cursor, O_RDONLY));
if (object_fd < 0) {
// Failed to open object file. Copy the object file name to
// |out_file_name|.
strncpy(out_file_name, cursor, out_file_name_size);
// Making sure |out_file_name| is always null-terminated.
out_file_name[out_file_name_size - 1] = '\0';
return -1;
}
return object_fd;
}
}
// POSIX doesn't define any async-signal safe function for converting
// an integer to ASCII. We'll have to define our own version.
// itoa_r() converts a (signed) integer to ASCII. It returns "buf", if the
// conversion was successful or NULL otherwise. It never writes more than "sz"
// bytes. Output will be truncated as needed, and a NUL character is always
// appended.
// NOTE: code from sandbox/linux/seccomp-bpf/demo.cc.
char* itoa_r(intptr_t i, char* buf, size_t sz, int base, size_t padding) {
// Make sure we can write at least one NUL byte.
size_t n = 1;
if (n > sz)
return NULL;
if (base < 2 || base > 16) {
buf[0] = '\000';
return NULL;
}
char *start = buf;
uintptr_t j = i;
// Handle negative numbers (only for base 10).
if (i < 0 && base == 10) {
// This does "j = -i" while avoiding integer overflow.
j = static_cast<uintptr_t>(-(i + 1)) + 1;
// Make sure we can write the '-' character.
if (++n > sz) {
buf[0] = '\000';
return NULL;
}
*start++ = '-';
}
// Loop until we have converted the entire number. Output at least one
// character (i.e. '0').
char *ptr = start;
do {
// Make sure there is still enough space left in our output buffer.
if (++n > sz) {
buf[0] = '\000';
return NULL;
}
// Output the next digit.
*ptr++ = "0123456789abcdef"[j % base];
j /= base;
if (padding > 0)
padding--;
} while (j > 0 || padding > 0);
// Terminate the output with a NUL character.
*ptr = '\000';
// Conversion to ASCII actually resulted in the digits being in reverse
// order. We can't easily generate them in forward order, as we can't tell
// the number of characters needed until we are done converting.
// So, now, we reverse the string (except for the possible "-" sign).
while (--ptr > start) {
char ch = *ptr;
*ptr = *start;
*start++ = ch;
}
return buf;
}
// Safely appends string |source| to string |dest|. Never writes past the
// buffer size |dest_size| and guarantees that |dest| is null-terminated.
static void SafeAppendString(const char* source, char* dest, int dest_size) {
int dest_string_length = strlen(dest);
SAFE_ASSERT(dest_string_length < dest_size);
dest += dest_string_length;
dest_size -= dest_string_length;
strncpy(dest, source, dest_size);
// Making sure |dest| is always null-terminated.
dest[dest_size - 1] = '\0';
}
// Converts a 64-bit value into a hex string, and safely appends it to |dest|.
// Never writes past the buffer size |dest_size| and guarantees that |dest| is
// null-terminated.
static void SafeAppendHexNumber(uint64_t value, char* dest, int dest_size) {
// 64-bit numbers in hex can have up to 16 digits.
char buf[17] = {'\0'};
SafeAppendString(itoa_r(value, buf, sizeof(buf), 16, 0), dest, dest_size);
}
// The implementation of our symbolization routine. If it
// successfully finds the symbol containing "pc" and obtains the
// symbol name, returns true and write the symbol name to "out".
// Otherwise, returns false. If Callback function is installed via
// InstallSymbolizeCallback(), the function is also called in this function,
// and "out" is used as its output.
// To keep stack consumption low, we would like this function to not
// get inlined.
static ATTRIBUTE_NOINLINE bool SymbolizeAndDemangle(void *pc, char *out,
int out_size) {
uint64_t pc0 = reinterpret_cast<uintptr_t>(pc);
uint64_t start_address = 0;
uint64_t end_address = 0;
uint64_t base_address = 0;
int object_fd = -1;
if (out_size < 1) {
return false;
}
out[0] = '\0';
SafeAppendString("(", out, out_size);
if (g_symbolize_open_object_file_callback) {
object_fd = g_symbolize_open_object_file_callback(pc0, start_address,
base_address, out + 1,
out_size - 1);
} else {
object_fd = OpenObjectFileContainingPcAndGetStartAddress(
pc0, start_address, base_address, end_address, out + 1, out_size - 1);
}
FileDescriptor wrapped_object_fd(object_fd);
#if defined(PRINT_UNSYMBOLIZED_STACK_TRACES)
{
#else
// Check whether a file name was returned.
if (object_fd < 0) {
#endif
if (out[1]) {
// The object file containing PC was determined successfully however the
// object file was not opened successfully. This is still considered
// success because the object file name and offset are known and tools
// like asan_symbolize.py can be used for the symbolization.
out[out_size - 1] = '\0'; // Making sure |out| is always null-terminated.
SafeAppendString("+0x", out, out_size);
SafeAppendHexNumber(pc0 - base_address, out, out_size);
SafeAppendString(")", out, out_size);
return true;
}
// Failed to determine the object file containing PC. Bail out.
return false;
}
int elf_type = FileGetElfType(wrapped_object_fd.get());
if (elf_type == -1) {
return false;
}
if (g_symbolize_callback) {
// Run the call back if it's installed.
// Note: relocation (and much of the rest of this code) will be
// wrong for prelinked shared libraries and PIE executables.
uint64_t relocation = (elf_type == ET_DYN) ? start_address : 0;
int num_bytes_written = g_symbolize_callback(wrapped_object_fd.get(),
pc, out, out_size,
relocation);
if (num_bytes_written > 0) {
out += num_bytes_written;
out_size -= num_bytes_written;
}
}
if (!GetSymbolFromObjectFile(wrapped_object_fd.get(), pc0,
out, out_size, base_address)) {
if (out[1] && !g_symbolize_callback) {
// The object file containing PC was opened successfully however the
// symbol was not found. The object may have been stripped. This is still
// considered success because the object file name and offset are known
// and tools like asan_symbolize.py can be used for the symbolization.
out[out_size - 1] = '\0'; // Making sure |out| is always null-terminated.
SafeAppendString("+0x", out, out_size);
SafeAppendHexNumber(pc0 - base_address, out, out_size);
SafeAppendString(")", out, out_size);
return true;
}
return false;
}
// Symbolization succeeded. Now we try to demangle the symbol.
DemangleInplace(out, out_size);
return true;
}
_END_GOOGLE_NAMESPACE_
#elif BUILDFLAG(IS_APPLE) && defined(HAVE_DLADDR)
#include <dlfcn.h>
#include <string.h>
_START_GOOGLE_NAMESPACE_
static ATTRIBUTE_NOINLINE bool SymbolizeAndDemangle(void *pc, char *out,
int out_size) {
Dl_info info;
if (dladdr(pc, &info)) {
if ((int)strlen(info.dli_sname) < out_size) {
strcpy(out, info.dli_sname);
// Symbolization succeeded. Now we try to demangle the symbol.
DemangleInplace(out, out_size);
return true;
}
}
return false;
}
_END_GOOGLE_NAMESPACE_
#elif defined(OS_WINDOWS) || defined(OS_CYGWIN)
#include <windows.h>
#include <dbghelp.h>
#ifdef _MSC_VER
#pragma comment(lib, "dbghelp")
#endif
_START_GOOGLE_NAMESPACE_
class SymInitializer {
public:
HANDLE process;
bool ready;
SymInitializer() : process(NULL), ready(false) {
// Initialize the symbol handler.
// https://msdn.microsoft.com/en-us/library/windows/desktop/ms680344(v=vs.85).aspx
process = GetCurrentProcess();
// Defer symbol loading.
// We do not request undecorated symbols with SYMOPT_UNDNAME
// because the mangling library calls UnDecorateSymbolName.
SymSetOptions(SYMOPT_DEFERRED_LOADS);
if (SymInitialize(process, NULL, true)) {
ready = true;
}
}
~SymInitializer() {
SymCleanup(process);
// We do not need to close `HANDLE process` because it's a "pseudo handle."
}
private:
SymInitializer(const SymInitializer&);
SymInitializer& operator=(const SymInitializer&);
};
static ATTRIBUTE_NOINLINE bool SymbolizeAndDemangle(void *pc, char *out,
int out_size) {
const static SymInitializer symInitializer;
if (!symInitializer.ready) {
return false;
}
// Resolve symbol information from address.
// https://msdn.microsoft.com/en-us/library/windows/desktop/ms680578(v=vs.85).aspx
char buf[sizeof(SYMBOL_INFO) + MAX_SYM_NAME];
SYMBOL_INFO *symbol = reinterpret_cast<SYMBOL_INFO *>(buf);
symbol->SizeOfStruct = sizeof(SYMBOL_INFO);
symbol->MaxNameLen = MAX_SYM_NAME;
// We use the ANSI version to ensure the string type is always `char *`.
// This could break if a symbol has Unicode in it.
BOOL ret = SymFromAddr(symInitializer.process,
reinterpret_cast<DWORD64>(pc), 0, symbol);
if (ret == 1 && static_cast<int>(symbol->NameLen) < out_size) {
// `NameLen` does not include the null terminating character.
strncpy(out, symbol->Name, static_cast<size_t>(symbol->NameLen) + 1);
out[static_cast<size_t>(symbol->NameLen)] = '\0';
// Symbolization succeeded. Now we try to demangle the symbol.
DemangleInplace(out, out_size);
return true;
}
return false;
}
_END_GOOGLE_NAMESPACE_
#else
# error BUG: HAVE_SYMBOLIZE was wrongly set
#endif
_START_GOOGLE_NAMESPACE_
bool Symbolize(void *pc, char *out, int out_size) {
SAFE_ASSERT(out_size >= 0);
return SymbolizeAndDemangle(pc, out, out_size);
}
_END_GOOGLE_NAMESPACE_
#else /* HAVE_SYMBOLIZE */
#include <assert.h>
#include "config.h"
_START_GOOGLE_NAMESPACE_
// TODO: Support other environments.
bool Symbolize(void *pc, char *out, int out_size) {
assert(0);
return false;
}
_END_GOOGLE_NAMESPACE_
#endif<|fim▁end|> |
// Check start and end addresses.
if (!(start_address <= pc && pc < end_address)) {
continue; // We skip this map. PC isn't in this map. |
<|file_name|>ReadSavedData.java<|end_file_name|><|fim▁begin|>import java.io.BufferedReader;
import java.io.FileNotFoundException;
import java.io.FileReader;
import java.io.IOException;
public class ReadSavedData
{
public static void StartRead() throws IOException
{
FileReader file = new FileReader("C:/Users/Public/Documents/SavedData.txt");
BufferedReader reader = new BufferedReader(file);
String text = "";
String line = reader.readLine();
while (line != null)
{
text += line;
line = reader.readLine();
}
reader.close();
System.out.println(text);
<|fim▁hole|>
}
}<|fim▁end|> | |
<|file_name|>validation.py<|end_file_name|><|fim▁begin|>from voluptuous import Invalid
import re
def Url(msg=None):
def f(v):
regex = re.compile(
r'^(?:http|ftp)s?://' # http:// or https://
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' #domain...<|fim▁hole|>
if re.match(regex, str(v)):
return str(v)
else:
raise Invalid(msg or "value is not correct Uniform Resource Locator")
return f
def TrackableCid(msg=None):
def f(v):
regex = re.compile(r'^[A-Z]{2}-[A-Z]{2}-[1-9]{1}[0-9]*$', re.IGNORECASE)
if re.match(regex, str(v)):
return str(v)
else:
raise Invalid(msg or "value is not correct trackable CID")
return f<|fim▁end|> | r'localhost|' #localhost...
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' # ...or ip
r'(?::\d+)?' # optional port
r'(?:/?|[/?]\S+)$', re.IGNORECASE) |
<|file_name|>issue-7660.rs<|end_file_name|><|fim▁begin|>// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Regresion test for issue 7660
// rvalue lifetime too short when equivalent `match` works
extern crate collections;
use collections::HashMap;
struct A(int, int);
pub fn main() {
let mut m: HashMap<int, A> = HashMap::new();
m.insert(1, A(0, 0));
<|fim▁hole|><|fim▁end|> | let A(ref _a, ref _b) = *m.get(&1);
let (a, b) = match *m.get(&1) { A(ref _a, ref _b) => (_a, _b) };
} |
<|file_name|>cloudns.py<|end_file_name|><|fim▁begin|># Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License.You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__all__ = [
'ClouDNSDNSDriver'
]
from libcloud.common.base import JsonResponse, ConnectionUserAndKey
from libcloud.dns.types import Provider, RecordType
from libcloud.dns.types import ZoneDoesNotExistError, RecordDoesNotExistError
from libcloud.dns.types import ZoneAlreadyExistsError
from libcloud.dns.base import DNSDriver, Zone, Record
VALID_RECORD_EXTRA_PARAMS = ['priority', 'ttl']
class ClouDNSDNSResponse(JsonResponse):
def success(self):
if not super(ClouDNSDNSResponse, self).success():
return False
body = self.parse_body()
if type(body) is dict and body.get('status') == 'Failed':
return False
return True
<|fim▁hole|> status_description = self.parse_body()['statusDescription']
if status_description == u'{} has been already added.'.format(
context['id']):
if context['resource'] == 'zone':
raise ZoneAlreadyExistsError(value='', driver=self,
zone_id=context['id'])
super(ClouDNSDNSResponse, self).parse_error()
return self.body
class ClouDNSDNSConnection(ConnectionUserAndKey):
host = 'api.cloudns.net'
secure = True
responseCls = ClouDNSDNSResponse
def add_default_params(self, params):
params['auth-id'] = self.user_id
params['auth-password'] = self.key
return params
def request(self, action, params=None, data='', headers=None,
method='POST'):
return super(ClouDNSDNSConnection, self).request(action=action,
params=params,
data=data,
method=method,
headers=headers)
class ClouDNSDNSDriver(DNSDriver):
type = Provider.CLOUDNS
name = 'ClouDNS DNS'
website = 'https://www.cloudns.net'
connectionCls = ClouDNSDNSConnection
RECORD_TYPE_MAP = {
RecordType.A: 'A',
RecordType.AAAA: 'AAAA',
RecordType.CNAME: 'CNAME',
RecordType.MX: 'MX',
RecordType.NS: 'SPF',
RecordType.SRV: 'SRV',
RecordType.TXT: 'TXT',
}
def _to_zone(self, item):
ttl = item.get('ttl', 3600)
zone = Zone(id=item['name'], domain=item['name'],
type=item['type'], ttl=ttl, driver=self)
return zone
def _to_record(self, item, zone=None):
extra = {'ttl': item['ttl']}
record = Record(id=item['id'], name=item['host'],
type=item['type'], data=item['record'],
zone=zone, driver=self, extra=extra)
return record
def get_zone(self, zone_id):
self.connection.set_context({'resource': 'zone', 'id': zone_id})
params = {'page': 1, 'rows-per-page': 10, 'search': zone_id}
zone_result = self.connection.request(
'/dns/list-zones.json', params=params).object
if not zone_result:
raise ZoneDoesNotExistError(value='', driver=self,
zone_id=zone_id)
return self._to_zone(zone_result[0])
def iterate_zones(self):
page = 1
rows_per_page = 100
params = {'page': page, 'rows-per-page': rows_per_page}
zones_list = []
while True:
page_result = self.connection.request(
'/dns/list-zones.json', params=params).object
if not page_result:
break
zones_list.extend(page_result)
params['page'] += 1
for item in zones_list:
yield self._to_zone(item)
def create_zone(self, domain, ttl=None, extra=None):
self.connection.set_context({'resource': 'zone', 'id': domain})
params = {'domain-name': domain, 'zone-type': 'master'}
self.connection.request(
'/dns/register.json', params=params).object
zone = Zone(id=domain, domain=domain,
type='master', ttl=3600, driver=self)
return zone
def delete_zone(self, zone):
self.connection.set_context({'resource': 'zone', 'id': zone.id})
params = {'domain-name': zone.id}
self.connection.request(
'/dns/delete.json', params=params).object
return True
def iterate_records(self, zone):
self.connection.set_context({'resource': 'zone', 'id': zone.id})
params = {'domain-name': zone.id}
records_list = self.connection.request(
'/dns/records.json', params=params).object
if not len(records_list):
return
for item in records_list.values():
yield self._to_record(item, zone=zone)
def get_record(self, zone_id, record_id):
zone = self.get_zone(zone_id=zone_id)
for record in self.iterate_records(zone):
if record.id == record_id:
return record
raise RecordDoesNotExistError(value='', driver=self,
record_id=record_id)
def delete_record(self, record):
self.connection.set_context({'resource': 'record', 'id': record.id})
params = {'domain-name': record.zone.id, 'record-id': record.id}
self.connection.request(
action='/dns/delete-record.json', params=params)
return True
def create_record(self, name, zone, type, data, extra=None):
params = {
'domain-name': zone.id,
'host': name,
'record-type': type,
'record': data,
'ttl': 3600
}
if extra:
if extra.get('ttl'):
params['ttl'] = extra['ttl']
if extra.get('priority'):
params['priority'] = extra['priority']
record_result = self.connection.request(
action='/dns/add-record.json', params=params).object
return Record(id=record_result['data']['id'], name=name,
type=type, data=data,
zone=zone, driver=self, extra=extra)<|fim▁end|> | def parse_error(self):
context = self.connection.context |
<|file_name|>mime_classifier.rs<|end_file_name|><|fim▁begin|>/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use std::borrow::ToOwned;
pub struct MIMEClassifier {
image_classifier: GroupedClassifier,
audio_video_classifier: GroupedClassifier,
scriptable_classifier: GroupedClassifier,
plaintext_classifier: GroupedClassifier,
archive_classifier: GroupedClassifier,
binary_or_plaintext: BinaryOrPlaintextClassifier,
feeds_classifier: FeedsClassifier
}
pub enum MediaType {
Xml,
Html,
AudioVideo,
Image,
}
pub enum ApacheBugFlag {
ON,
OFF
}
#[derive(PartialEq)]
pub enum NoSniffFlag {
ON,
OFF
}
impl MIMEClassifier {
//Performs MIME Type Sniffing Algorithm (section 7)
pub fn classify(&self,
no_sniff_flag: NoSniffFlag,
apache_bug_flag: ApacheBugFlag,
supplied_type: &Option<(String, String)>,
data: &[u8]) -> (String, String) {
match *supplied_type {
None => self.sniff_unknown_type(no_sniff_flag, data),
Some(ref supplied_type) => {
let &(ref media_type, ref media_subtype) = supplied_type;
if MIMEClassifier::is_explicit_unknown(media_type, media_subtype) {
self.sniff_unknown_type(no_sniff_flag, data)
} else {
match no_sniff_flag {
NoSniffFlag::ON => supplied_type.clone(),
NoSniffFlag::OFF => match apache_bug_flag {
ApacheBugFlag::ON => self.sniff_text_or_data(data),
ApacheBugFlag::OFF => match MIMEClassifier::get_media_type(media_type,
media_subtype) {
Some(MediaType::Xml) => None,
Some(MediaType::Html) => self.feeds_classifier.classify(data),
Some(MediaType::Image) => self.image_classifier.classify(data),
Some(MediaType::AudioVideo) => self.audio_video_classifier.classify(data),
None => None
}.unwrap_or(supplied_type.clone())
}
}
}
}
}
}
pub fn new() -> MIMEClassifier {
MIMEClassifier {
image_classifier: GroupedClassifier::image_classifer(),
audio_video_classifier: GroupedClassifier::audio_video_classifier(),
scriptable_classifier: GroupedClassifier::scriptable_classifier(),
plaintext_classifier: GroupedClassifier::plaintext_classifier(),
archive_classifier: GroupedClassifier::archive_classifier(),
binary_or_plaintext: BinaryOrPlaintextClassifier,
feeds_classifier: FeedsClassifier
}
}
//some sort of iterator over the classifiers might be better?
fn sniff_unknown_type(&self, no_sniff_flag: NoSniffFlag, data: &[u8]) -> (String, String) {
let should_sniff_scriptable = no_sniff_flag == NoSniffFlag::OFF;
let sniffed = if should_sniff_scriptable {
self.scriptable_classifier.classify(data)
} else {
None
};
sniffed.or_else(|| self.plaintext_classifier.classify(data))
.or_else(|| self.image_classifier.classify(data))
.or_else(|| self.audio_video_classifier.classify(data))
.or_else(|| self.archive_classifier.classify(data))
.or_else(|| self.binary_or_plaintext.classify(data))
.expect("BinaryOrPlaintextClassifier always succeeds")
}
fn sniff_text_or_data(&self, data: &[u8]) -> (String, String) {
self.binary_or_plaintext.classify(data).expect("BinaryOrPlaintextClassifier always succeeds")
}
fn is_xml(tp: &str, sub_tp: &str) -> bool {
sub_tp.ends_with("+xml") ||
match (tp, sub_tp) {
("application", "xml") | ("text", "xml") => true,
_ => false
}
}
fn is_html(tp: &str, sub_tp: &str) -> bool {
tp == "text" && sub_tp == "html"
}
fn is_image(tp: &str) -> bool {
tp == "image"
}
fn is_audio_video(tp: &str, sub_tp: &str) -> bool {
tp == "audio" ||
tp == "video" ||
(tp == "application" && sub_tp == "ogg")
}
fn is_explicit_unknown(tp: &str, sub_tp: &str) -> bool {
match(tp, sub_tp) {
("unknown", "unknown") |
("application", "unknown") |
("*", "*") => true,
_ => false
}
}
fn get_media_type(media_type: &str,
media_subtype: &str) -> Option<MediaType> {
if MIMEClassifier::is_xml(media_type, media_subtype) {
Some(MediaType::Xml)
} else if MIMEClassifier::is_html(media_type, media_subtype) {
Some(MediaType::Html)
} else if MIMEClassifier::is_image(media_type) {
Some(MediaType::Image)
} else if MIMEClassifier::is_audio_video(media_type, media_subtype) {
Some(MediaType::AudioVideo)
} else {
None
}
}
}
pub fn as_string_option(tup: Option<(&'static str, &'static str)>) -> Option<(String, String)> {
tup.map(|(a, b)| (a.to_owned(), b.to_owned()))
}
//Interface used for composite types
trait MIMEChecker {
fn classify(&self, data: &[u8]) -> Option<(String, String)>;
}
trait Matches {
fn matches(&mut self, matches: &[u8]) -> bool;
}
impl <'a, T: Iterator<Item=&'a u8> + Clone> Matches for T {
// Matching function that works on an iterator.
// see if the next matches.len() bytes in data_iterator equal matches
// move iterator and return true or just return false
//
// Params
// self: an iterator
// matches: a vector of bytes to match
//
// Return
// true if the next n elements of self match n elements of matches
// false otherwise
//
// Side effects
// moves the iterator when match is found
fn matches(&mut self, matches: &[u8]) -> bool {
if self.clone().nth(matches.len()).is_none() {
// there are less than matches.len() elements in self
return false
}
let result = self.clone().zip(matches).all(|(s, m)| *s == *m);
if result {
self.nth(matches.len());
}
result
}
}
struct ByteMatcher {
pattern: &'static [u8],
mask: &'static [u8],
leading_ignore: &'static [u8],
content_type: (&'static str, &'static str)
}
impl ByteMatcher {
fn matches(&self, data: &[u8]) -> Option<usize> {
if data.len() < self.pattern.len() {
None
} else if data == self.pattern {
Some(self.pattern.len())
} else {
data[..data.len() - self.pattern.len()].iter()
.position(|x| !self.leading_ignore.contains(x))
.and_then(|start|
if data[start..].iter()
.zip(self.pattern.iter()).zip(self.mask.iter())
.all(|((&data, &pattern), &mask)| (data & mask) == (pattern & mask)) {
Some(start + self.pattern.len())
} else {
None
})
}
}
}
impl MIMEChecker for ByteMatcher {
fn classify(&self, data: &[u8]) -> Option<(String, String)> {
self.matches(data).map(|_| {
(self.content_type.0.to_owned(), self.content_type.1.to_owned())
})
}
}
struct TagTerminatedByteMatcher {
matcher: ByteMatcher
}
impl MIMEChecker for TagTerminatedByteMatcher {
fn classify(&self, data: &[u8]) -> Option<(String, String)> {
self.matcher.matches(data).and_then(|j|
if j < data.len() && (data[j] == b' ' || data[j] == b'>') {
Some((self.matcher.content_type.0.to_owned(),
self.matcher.content_type.1.to_owned()))
} else {
None
})
}
}
pub struct Mp4Matcher;
impl Mp4Matcher {
pub fn matches(&self, data: &[u8]) -> bool {
if data.len() < 12 {
return false;
}
let box_size = ((data[0] as u32) << 3 | (data[1] as u32) << 2 |
(data[2] as u32) << 1 | (data[3] as u32)) as usize;
if (data.len() < box_size) || (box_size % 4 != 0) {
return false;
}
let ftyp = [0x66, 0x74, 0x79, 0x70];
if !data[4..].starts_with(&ftyp) {
return false;
}
let mp4 = [0x6D, 0x70, 0x34];
data[8..].starts_with(&mp4) ||
data[16..box_size].chunks(4).any(|chunk| chunk.starts_with(&mp4))
}
}
impl MIMEChecker for Mp4Matcher {
fn classify(&self, data: &[u8]) -> Option<(String, String)> {
if self.matches(data) {
Some(("video".to_owned(), "mp4".to_owned()))
} else {
None
}
}
}
struct BinaryOrPlaintextClassifier;
impl BinaryOrPlaintextClassifier {
fn classify_impl(&self, data: &[u8]) -> (&'static str, &'static str) {
if data.starts_with(&[0xFFu8, 0xFEu8]) ||
data.starts_with(&[0xFEu8, 0xFFu8]) ||
data.starts_with(&[0xEFu8, 0xBBu8, 0xBFu8])
{
("text", "plain")
} else if data.iter().any(|&x| x <= 0x08u8 ||
x == 0x0Bu8 ||
(x >= 0x0Eu8 && x <= 0x1Au8) ||
(x >= 0x1Cu8 && x <= 0x1Fu8)) {
("application", "octet-stream")
} else {
("text", "plain")
}
}
}
impl MIMEChecker for BinaryOrPlaintextClassifier {
fn classify(&self, data: &[u8]) -> Option<(String, String)> {
as_string_option(Some(self.classify_impl(data)))
}
}
struct GroupedClassifier {
byte_matchers: Vec<Box<MIMEChecker + Send + Sync>>,
}
impl GroupedClassifier {
fn image_classifer() -> GroupedClassifier {
GroupedClassifier {
byte_matchers: vec![
box ByteMatcher::image_x_icon(),
box ByteMatcher::image_x_icon_cursor(),
box ByteMatcher::image_bmp(),
box ByteMatcher::image_gif89a(),
box ByteMatcher::image_gif87a(),
box ByteMatcher::image_webp(),
box ByteMatcher::image_png(),
box ByteMatcher::image_jpeg(),
]
}
}
fn audio_video_classifier() -> GroupedClassifier {
GroupedClassifier {
byte_matchers: vec![
box ByteMatcher::video_webm(),
box ByteMatcher::audio_basic(),
box ByteMatcher::audio_aiff(),
box ByteMatcher::audio_mpeg(),
box ByteMatcher::application_ogg(),
box ByteMatcher::audio_midi(),
box ByteMatcher::video_avi(),
box ByteMatcher::audio_wave(),
box Mp4Matcher
]
}
}
fn scriptable_classifier() -> GroupedClassifier {
GroupedClassifier {
byte_matchers: vec![
box ByteMatcher::text_html_doctype(),
box ByteMatcher::text_html_page(),
box ByteMatcher::text_html_head(),
box ByteMatcher::text_html_script(),
box ByteMatcher::text_html_iframe(),
box ByteMatcher::text_html_h1(),
box ByteMatcher::text_html_div(),
box ByteMatcher::text_html_font(),
box ByteMatcher::text_html_table(),
box ByteMatcher::text_html_a(),
box ByteMatcher::text_html_style(),
box ByteMatcher::text_html_title(),
box ByteMatcher::text_html_b(),
box ByteMatcher::text_html_body(),
box ByteMatcher::text_html_br(),
box ByteMatcher::text_html_p(),
box ByteMatcher::text_html_comment(),
box ByteMatcher::text_xml(),
box ByteMatcher::application_pdf()
]
}
}
fn plaintext_classifier() -> GroupedClassifier {
GroupedClassifier {
byte_matchers: vec![
box ByteMatcher::text_plain_utf_8_bom(),
box ByteMatcher::text_plain_utf_16le_bom(),
box ByteMatcher::text_plain_utf_16be_bom(),
box ByteMatcher::application_postscript()
]
}
}
fn archive_classifier() -> GroupedClassifier {
GroupedClassifier {
byte_matchers: vec![
box ByteMatcher::application_x_gzip(),
box ByteMatcher::application_zip(),
box ByteMatcher::application_x_rar_compressed()
]
}
}
// TODO: Use this in font context classifier
#[allow(dead_code)]
fn font_classifier() -> GroupedClassifier {
GroupedClassifier {
byte_matchers: vec![
box ByteMatcher::application_font_woff(),
box ByteMatcher::true_type_collection(),
box ByteMatcher::open_type(),
box ByteMatcher::true_type(),
box ByteMatcher::application_vnd_ms_font_object(),
]
}
}
}
impl MIMEChecker for GroupedClassifier {
fn classify(&self, data: &[u8]) -> Option<(String, String)> {
self.byte_matchers
.iter()
.filter_map(|matcher| matcher.classify(data))
.next()
}
}
enum Match {
Start,
DidNotMatch,
StartAndEnd
}
impl Match {
fn chain<F: FnOnce() -> Match>(self, f: F) -> Match {
if let Match::DidNotMatch = self {
return f();
}
self
}
}
fn eats_until<'a, T>(matcher: &mut T, start: &[u8], end: &[u8]) -> Match
where T: Iterator<Item=&'a u8> + Clone {
if !matcher.matches(start) {
Match::DidNotMatch
} else if end.len() == 1 {
if matcher.any(|&x| x == end[0]) {
Match::StartAndEnd
} else {
Match::Start
}
} else {
while !matcher.matches(end) {
if matcher.next().is_none() {
return Match::Start;
}
}
Match::StartAndEnd
}
}
struct FeedsClassifier;
impl FeedsClassifier {
// Implements sniffing for mislabeled feeds (https://mimesniff.spec.whatwg.org/#sniffing-a-mislabeled-feed)
fn classify_impl(&self, data: &[u8]) -> Option<(&'static str, &'static str)> {
// Step 4: can not be feed unless length is > 3
if data.len() < 3 {
return None;
}
let mut matcher = data.iter();
// eat the first three acceptable byte sequences if they are equal to UTF-8 BOM
let utf8_bom = &[0xEFu8, 0xBBu8, 0xBFu8];
matcher.matches(utf8_bom);
// continuously search for next "<" until end of matcher
// TODO: need max_bytes to prevent inadvertently examining html document
// eg. an html page with a feed example
loop {
if matcher.find(|&x| *x == b'<').is_none() {
return None;
}
// Steps 5.2.1 to 5.2.4
match eats_until(&mut matcher, b"?", b"?>")
.chain(|| eats_until(&mut matcher, b"!--", b"-->"))
.chain(|| eats_until(&mut matcher, b"!", b">")) {
Match::StartAndEnd => continue,
Match::DidNotMatch => {},
Match::Start => return None
}
// Step 5.2.5
if matcher.matches(b"rss") {
return Some(("application", "rss+xml"));
}
// Step 5.2.6
if matcher.matches(b"feed") {
return Some(("application", "atom+xml"));
}
// Step 5.2.7
if matcher.matches(b"rdf:RDF") {
while matcher.next().is_some() {
match eats_until(&mut matcher,
b"http://purl.org/rss/1.0/",
b"http://www.w3.org/1999/02/22-rdf-syntax-ns#")
.chain(|| eats_until(&mut matcher,
b"http://www.w3.org/1999/02/22-rdf-syntax-ns#",
b"http://purl.org/rss/1.0/")) {
Match::StartAndEnd => return Some(("application", "rss+xml")),
Match::DidNotMatch => {},
Match::Start => return None
}
}
return None;
}
}
}
}
impl MIMEChecker for FeedsClassifier {
fn classify(&self, data: &[u8]) -> Option<(String, String)> {
as_string_option(self.classify_impl(data))
}
}
//Contains hard coded byte matchers
//TODO: These should be configured and not hard coded
impl ByteMatcher {
//A Windows Icon signature
fn image_x_icon() -> ByteMatcher {
ByteMatcher {
pattern: b"\x00\x00\x01\x00",
mask: b"\xFF\xFF\xFF\xFF",
content_type: ("image", "x-icon"),
leading_ignore: &[]
}
}
//A Windows Cursor signature.
fn image_x_icon_cursor() -> ByteMatcher {
ByteMatcher {
pattern: b"\x00\x00\x02\x00",
mask: b"\xFF\xFF\xFF\xFF",
content_type: ("image", "x-icon"),
leading_ignore: &[]
}
}
//The string "BM", a BMP signature.
fn image_bmp() -> ByteMatcher {
ByteMatcher {
pattern: b"BM",
mask: b"\xFF\xFF",
content_type: ("image", "bmp"),
leading_ignore: &[]
}
}
//The string "GIF89a", a GIF signature.
fn image_gif89a() -> ByteMatcher {
ByteMatcher {
pattern: b"GIF89a",
mask: b"\xFF\xFF\xFF\xFF\xFF\xFF",
content_type: ("image", "gif"),
leading_ignore: &[]
}
}
//The string "GIF87a", a GIF signature.
fn image_gif87a() -> ByteMatcher {
ByteMatcher {
pattern: b"GIF87a",
mask: b"\xFF\xFF\xFF\xFF\xFF\xFF",
content_type: ("image", "gif"),
leading_ignore: &[]
}
}
//The string "RIFF" followed by four bytes followed by the string "WEBPVP".
fn image_webp() -> ByteMatcher {
ByteMatcher {
pattern: b"RIFF\x00\x00\x00\x00WEBPVP",
mask: b"\xFF\xFF\xFF\xFF\x00\x00\x00\x00,\xFF\xFF\xFF\xFF\xFF\xFF",
content_type: ("image", "webp"),
leading_ignore: &[]
}
}
//An error-checking byte followed by the string "PNG" followed by CR LF SUB LF, the PNG
//signature.
fn image_png() -> ByteMatcher {
ByteMatcher {
pattern: b"\x89PNG\r\n\x1A\n",
mask: b"\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF",
content_type: ("image", "png"),
leading_ignore: &[]
}
}
// The JPEG Start of Image marker followed by the indicator byte of another marker.
fn image_jpeg() -> ByteMatcher {
ByteMatcher {
pattern: b"\xFF\xD8\xFF",
mask: b"\xFF\xFF\xFF",
content_type: ("image", "jpeg"),
leading_ignore: &[]
}
}
//The WebM signature. [TODO: Use more bytes?]
fn video_webm() -> ByteMatcher {
ByteMatcher {
pattern: b"\x1A\x45\xDF\xA3",<|fim▁hole|> }
//The string ".snd", the basic audio signature.
fn audio_basic() -> ByteMatcher {
ByteMatcher {
pattern: b".snd",
mask: b"\xFF\xFF\xFF\xFF",
content_type: ("audio", "basic"),
leading_ignore: &[]
}
}
//The string "FORM" followed by four bytes followed by the string "AIFF", the AIFF signature.
fn audio_aiff() -> ByteMatcher {
ByteMatcher {
pattern: b"FORM\x00\x00\x00\x00AIFF",
mask: b"\xFF\xFF\xFF\xFF\x00\x00\x00\x00\xFF\xFF\xFF\xFF",
content_type: ("audio", "aiff"),
leading_ignore: &[]
}
}
//The string "ID3", the ID3v2-tagged MP3 signature.
fn audio_mpeg() -> ByteMatcher {
ByteMatcher {
pattern: b"ID3",
mask: b"\xFF\xFF\xFF",
content_type: ("audio", "mpeg"),
leading_ignore: &[]
}
}
//The string "OggS" followed by NUL, the Ogg container signature.
fn application_ogg() -> ByteMatcher {
ByteMatcher {
pattern: b"OggS",
mask: b"\xFF\xFF\xFF\xFF\xFF",
content_type: ("application", "ogg"),
leading_ignore: &[]
}
}
//The string "MThd" followed by four bytes representing the number 6 in 32 bits (big-endian),
//the MIDI signature.
fn audio_midi() -> ByteMatcher {
ByteMatcher {
pattern: b"MThd\x00\x00\x00\x06",
mask: b"\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF",
content_type: ("audio", "midi"),
leading_ignore: &[]
}
}
//The string "RIFF" followed by four bytes followed by the string "AVI ", the AVI signature.
fn video_avi() -> ByteMatcher {
ByteMatcher {
pattern: b"RIFF\x00\x00\x00\x00AVI ",
mask: b"\xFF\xFF\xFF\xFF\x00\x00\x00\x00\xFF\xFF\xFF\xFF",
content_type: ("video", "avi"),
leading_ignore: &[]
}
}
// The string "RIFF" followed by four bytes followed by the string "WAVE", the WAVE signature.
fn audio_wave() -> ByteMatcher {
ByteMatcher {
pattern: b"RIFF\x00\x00\x00\x00WAVE",
mask: b"\xFF\xFF\xFF\xFF\x00\x00\x00\x00\xFF\xFF\xFF\xFF",
content_type: ("audio", "wave"),
leading_ignore: &[]
}
}
// doctype terminated with Tag terminating (TT) Byte
fn text_html_doctype() -> TagTerminatedByteMatcher {
TagTerminatedByteMatcher {
matcher: ByteMatcher {
pattern: b"<!DOCTYPE HTML",
mask: b"\xFF\xFF\xDF\xDF\xDF\xDF\xDF\xDF\xDF\xFF\xDF\xDF\xDF\xDF",
content_type: ("text", "html"),
leading_ignore: b"\t\n\x0C\r "
}
}
}
// HTML terminated with Tag terminating (TT) Byte: 0x20 (SP)
fn text_html_page() -> TagTerminatedByteMatcher {
TagTerminatedByteMatcher {
matcher: ByteMatcher {
pattern: b"<HTML",
mask: b"\xFF\xDF\xDF\xDF\xDF\xFF",
content_type: ("text", "html"),
leading_ignore: b"\t\n\x0C\r "
}
}
}
// head terminated with Tag Terminating (TT) Byte
fn text_html_head() -> TagTerminatedByteMatcher {
TagTerminatedByteMatcher {
matcher: ByteMatcher {
pattern: b"<HEAD",
mask: b"\xFF\xDF\xDF\xDF\xDF",
content_type: ("text", "html"),
leading_ignore: b"\t\n\x0C\r "
}
}
}
// script terminated with Tag Terminating (TT) Byte
fn text_html_script() -> TagTerminatedByteMatcher {
TagTerminatedByteMatcher {
matcher: ByteMatcher {
pattern: b"<SCRIPT",
mask: b"\xFF\xDF\xDF\xDF\xDF\xDF\xDF",
content_type: ("text", "html"),
leading_ignore: b"\t\n\x0C\r "
}
}
}
// iframe terminated with Tag Terminating (TT) Byte
fn text_html_iframe() -> TagTerminatedByteMatcher {
TagTerminatedByteMatcher {
matcher: ByteMatcher {
pattern: b"<IFRAME",
mask: b"\xFF\xDF\xDF\xDF\xDF\xDF\xDF",
content_type: ("text", "html"),
leading_ignore: b"\t\n\x0C\r "
}
}
}
// h1 terminated with Tag Terminating (TT) Byte
fn text_html_h1() -> TagTerminatedByteMatcher {
TagTerminatedByteMatcher {
matcher: ByteMatcher {
pattern: b"<H1",
mask: b"\xFF\xDF\xFF",
content_type: ("text", "html"),
leading_ignore: b"\t\n\x0C\r "
}
}
}
// div terminated with Tag Terminating (TT) Byte
fn text_html_div() -> TagTerminatedByteMatcher {
TagTerminatedByteMatcher {
matcher: ByteMatcher {
pattern: b"<DIV",
mask: b"\xFF\xDF\xDF\xDF",
content_type: ("text", "html"),
leading_ignore: b"\t\n\x0C\r "
}
}
}
// font terminated with Tag Terminating (TT) Byte
fn text_html_font() -> TagTerminatedByteMatcher {
TagTerminatedByteMatcher {
matcher: ByteMatcher {
pattern: b"<FONT",
mask: b"\xFF\xDF\xDF\xDF\xDF",
content_type: ("text", "html"),
leading_ignore: b"\t\n\x0C\r "
}
}
}
// table terminated with Tag Terminating (TT) Byte
fn text_html_table() -> TagTerminatedByteMatcher {
TagTerminatedByteMatcher {
matcher: ByteMatcher {
pattern: b"<TABLE",
mask: b"\xFF\xDF\xDF\xDF\xDF\xDF",
content_type: ("text", "html"),
leading_ignore: b"\t\n\x0C\r "
}
}
}
// a terminated with Tag Terminating (TT) Byte
fn text_html_a() -> TagTerminatedByteMatcher {
TagTerminatedByteMatcher {
matcher: ByteMatcher {
pattern: b"<A",
mask: b"\xFF\xDF",
content_type: ("text", "html"),
leading_ignore: b"\t\n\x0C\r "
}
}
}
// style terminated with Tag Terminating (TT) Byte
fn text_html_style() -> TagTerminatedByteMatcher {
TagTerminatedByteMatcher {
matcher: ByteMatcher {
pattern: b"<STYLE",
mask: b"\xFF\xDF\xDF\xDF\xDF\xDF",
content_type: ("text", "html"),
leading_ignore: b"\t\n\x0C\r "
}
}
}
// title terminated with Tag Terminating (TT) Byte
fn text_html_title() -> TagTerminatedByteMatcher {
TagTerminatedByteMatcher {
matcher: ByteMatcher {
pattern: b"<TITLE",
mask: b"\xFF\xDF\xDF\xDF\xDF\xDF",
content_type: ("text", "html"),
leading_ignore: b"\t\n\x0C\r "
}
}
}
// b terminated with Tag Terminating (TT) Byte
fn text_html_b() -> TagTerminatedByteMatcher {
TagTerminatedByteMatcher {
matcher: ByteMatcher {
pattern: b"<B",
mask: b"\xFF\xDF",
content_type: ("text", "html"),
leading_ignore: b"\t\n\x0C\r "
}
}
}
// body terminated with Tag Terminating (TT) Byte
fn text_html_body() -> TagTerminatedByteMatcher {
TagTerminatedByteMatcher {
matcher: ByteMatcher {
pattern: b"<BODY",
mask: b"\xFF\xDF\xDF\xDF\xDF",
content_type: ("text", "html"),
leading_ignore: b"\t\n\x0C\r "
}
}
}
// br terminated with Tag Terminating (TT) Byte
fn text_html_br() -> TagTerminatedByteMatcher {
TagTerminatedByteMatcher {
matcher: ByteMatcher {
pattern: b"<BR",
mask: b"\xFF\xDF\xDF",
content_type: ("text", "html"),
leading_ignore: b"\t\n\x0C\r "
}
}
}
// p terminated with Tag Terminating (TT) Byte
fn text_html_p() -> TagTerminatedByteMatcher {
TagTerminatedByteMatcher {
matcher: ByteMatcher {
pattern: b"<P",
mask: b"\xFF\xDF",
content_type: ("text", "html"),
leading_ignore: b"\t\n\x0C\r "
}
}
}
// comment terminated with Tag Terminating (TT) Byte
fn text_html_comment() -> TagTerminatedByteMatcher {
TagTerminatedByteMatcher {
matcher: ByteMatcher {
pattern: b"<!--",
mask: b"\xFF\xFF\xFF\xFF",
content_type: ("text", "html"),
leading_ignore: b"\t\n\x0C\r "
}
}
}
//The string "<?xml".
fn text_xml() -> ByteMatcher {
ByteMatcher {
pattern: b"<?xml",
mask: b"\xFF\xFF\xFF\xFF\xFF",
content_type: ("text", "xml"),
leading_ignore: b"\t\n\x0C\r "
}
}
//The string "%PDF-", the PDF signature.
fn application_pdf() -> ByteMatcher {
ByteMatcher {
pattern: b"%PDF",
mask: b"\xFF\xFF\xFF\xFF\xFF",
content_type: ("application", "pdf"),
leading_ignore: &[]
}
}
//34 bytes followed by the string "LP", the Embedded OpenType signature.
// TODO: Use this in font context classifier
#[allow(dead_code)]
fn application_vnd_ms_font_object() -> ByteMatcher {
ByteMatcher {
pattern: b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00LP",
mask: b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\xFF\xFF",
content_type: ("application", "vnd.ms-fontobject"),
leading_ignore: &[]
}
}
//4 bytes representing the version number 1.0, a TrueType signature.
// TODO: Use this in font context classifier
#[allow(dead_code)]
fn true_type() -> ByteMatcher {
ByteMatcher {
pattern: b"\x00\x01\x00\x00",
mask: b"\xFF\xFF\xFF\xFF",
content_type: ("(TrueType)", ""),
leading_ignore: &[]
}
}
//The string "OTTO", the OpenType signature.
// TODO: Use this in font context classifier
#[allow(dead_code)]
fn open_type() -> ByteMatcher {
ByteMatcher {
pattern: b"OTTO",
mask: b"\xFF\xFF\xFF\xFF",
content_type: ("(OpenType)", ""),
leading_ignore: &[]
}
}
// The string "ttcf", the TrueType Collection signature.
// TODO: Use this in font context classifier
#[allow(dead_code)]
fn true_type_collection() -> ByteMatcher {
ByteMatcher {
pattern: b"ttcf",
mask: b"\xFF\xFF\xFF\xFF",
content_type: ("(TrueType Collection)", ""),
leading_ignore: &[]
}
}
// The string "wOFF", the Web Open Font Format signature.
// TODO: Use this in font context classifier
#[allow(dead_code)]
fn application_font_woff() -> ByteMatcher {
ByteMatcher {
pattern: b"wOFF",
mask: b"\xFF\xFF\xFF\xFF",
content_type: ("application", "font-woff"),
leading_ignore: &[]
}
}
//The GZIP archive signature.
fn application_x_gzip() -> ByteMatcher {
ByteMatcher {
pattern: b"\x1F\x8B\x08",
mask: b"\xFF\xFF\xFF",
content_type: ("application", "x-gzip"),
leading_ignore: &[]
}
}
//The string "PK" followed by ETX EOT, the ZIP archive signature.
fn application_zip() -> ByteMatcher {
ByteMatcher {
pattern: b"PK\x03\x04",
mask: b"\xFF\xFF\xFF\xFF",
content_type: ("application", "zip"),
leading_ignore: &[]
}
}
//The string "Rar " followed by SUB BEL NUL, the RAR archive signature.
fn application_x_rar_compressed() -> ByteMatcher {
ByteMatcher {
pattern: b"Rar \x1A\x07\x00",
mask: b"\xFF\xFF\xFF\xFF\xFF\xFF\xFF",
content_type: ("application", "x-rar-compressed"),
leading_ignore: &[]
}
}
// The string "%!PS-Adobe-", the PostScript signature.
fn application_postscript() -> ByteMatcher {
ByteMatcher {
pattern: b"%!PS-Adobe-",
mask: b"\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF",
content_type: ("application", "postscript"),
leading_ignore: &[]
}
}
// UTF-16BE BOM
fn text_plain_utf_16be_bom() -> ByteMatcher {
ByteMatcher {
pattern: b"\xFE\xFF\x00\x00",
mask: b"\xFF\xFF\x00\x00",
content_type: ("text", "plain"),
leading_ignore: &[]
}
}
//UTF-16LE BOM
fn text_plain_utf_16le_bom() -> ByteMatcher {
ByteMatcher {
pattern: b"\xFF\xFE\x00\x00",
mask: b"\xFF\xFF\x00\x00",
content_type: ("text", "plain"),
leading_ignore: &[]
}
}
//UTF-8 BOM
fn text_plain_utf_8_bom() -> ByteMatcher {
ByteMatcher {
pattern: b"\xEF\xBB\xBF\x00",
mask: b"\xFF\xFF\xFF\x00",
content_type: ("text", "plain"),
leading_ignore: &[]
}
}
}<|fim▁end|> | mask: b"\xFF\xFF\xFF\xFF",
content_type: ("video", "webm"),
leading_ignore: &[]
} |
<|file_name|>problem2.py<|end_file_name|><|fim▁begin|>import matplotlib.pyplot as plt
import numpy as np
import matplotlib.mlab as mlab
import math
import scipy.special as sps
mean = 0
variance = 1
sigma = math.sqrt(variance)
def drawSampleNormal(sampleSize):
samples = np.random.normal(mean, sigma, sampleSize)
count, bins, ignored = plt.hist(samples, 80, normed=True)
plt.plot(bins,mlab.normpdf(bins,mean,sigma))
plt.show()
plt.savefig("normal_" + str(sampleSize) + "_samples.png")
plt.clf()
<|fim▁hole|>
alpha = 7.5
beta = 10
def drawSampleGamma(sampleSize):
samples = np.random.gamma(alpha, beta, sampleSize)
count, bins, ignored = plt.hist(samples, 80, normed=True)
pdf = bins**(alpha-1)*(np.exp(-bins/beta) / (sps.gamma(alpha)*beta**alpha))
plt.plot(bins, pdf, linewidth=2, color='r')
plt.show()
plt.savefig("gamma_" + str(sampleSize) + "_samples.png")
plt.clf()
drawSampleGamma(20)
drawSampleGamma(50)
drawSampleGamma(100)
drawSampleGamma(500)<|fim▁end|> | drawSampleNormal(20)
drawSampleNormal(50)
drawSampleNormal(100)
drawSampleNormal(500) |
<|file_name|>UsbDeviceSpinnerAdapter.java<|end_file_name|><|fim▁begin|>package com.autowp.canreader;
import android.content.Context;
import android.hardware.usb.UsbDevice;
import android.os.Build;
import android.view.LayoutInflater;
import android.view.View;
import android.view.ViewGroup;
import android.widget.ArrayAdapter;
import android.widget.TextView;
import java.util.List;
/**
* Created by autow on 13.02.2016.
*/
public class UsbDeviceSpinnerAdapter extends ArrayAdapter<UsbDevice> {
public UsbDeviceSpinnerAdapter(Context context, int resource, List<UsbDevice> objects) {
super(context, resource, objects);
}
@Override
public View getDropDownView(int position, View convertView,
ViewGroup parent) {
return getCustomView(position, convertView, parent);
}
@Override
public View getView(int position, View convertView, ViewGroup parent) {
return getCustomView(position, convertView, parent);
}
public View getCustomView(int position, View convertView, ViewGroup parent) {
View v = convertView;<|fim▁hole|> vi = LayoutInflater.from(getContext());
v = vi.inflate(R.layout.usbdevice_spinner_item, null);
}
UsbDevice device = getItem(position);
if (device != null) {
TextView tvProductName = (TextView)v.findViewById(R.id.textViewProductName);
TextView tvDeviceInto = (TextView)v.findViewById(R.id.textViewDeviceInfo);
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.LOLLIPOP) {
tvProductName.setText(device.getProductName());
String deviceInfo = String.format(
"%s %04X/%04X, %s",
device.getManufacturerName(),
device.getVendorId(),
device.getProductId(),
device.getDeviceName()
);
tvDeviceInto.setText(deviceInfo);
} else {
tvProductName.setText(device.getDeviceName());
String deviceInfo = String.format(
"%04X/%04X",
device.getVendorId(),
device.getProductId()
);
tvDeviceInto.setText(deviceInfo);
}
}
return v;
}
}<|fim▁end|> |
if (v == null) {
LayoutInflater vi; |
<|file_name|>apiwrappers.py<|end_file_name|><|fim▁begin|>import urllib.parse
import urllib.request
import json
import logging
import requests
log = logging.getLogger('tyggbot')
class APIBase:
@staticmethod
def _get(url, headers={}):
try:
req = urllib.request.Request(url, None, headers)
response = urllib.request.urlopen(req)
except Exception as e:
return None
try:
return response.read().decode('utf-8')
except Exception as e:
log.error(e)
return None
return None
@staticmethod
def _get_json(url, headers={}):
try:
data = APIBase._get(url, headers)
if data:
return json.loads(data)
else:
return data
except Exception:
log.exception('Caught exception while trying to parse json data.')
return None
return None
def get_url(self, endpoints=[], parameters={}):
return self.base_url + '/'.join(endpoints) + ('' if len(parameters) == 0 else '?' + urllib.parse.urlencode(parameters))
def getraw(self, endpoints=[], parameters={}):
return APIBase._get(self.get_url(endpoints, parameters), self.headers)
def get(self, endpoints, parameters={}):
try:
data = self.getraw(endpoints, parameters)
if data:
return json.loads(data)
else:
return data
except Exception as e:
log.error(e)
return None
return None
def post(self, endpoints=[], parameters={}, data={}):
try:
req = urllib.request.Request(self.get_url(endpoints, parameters), urllib.parse.urlencode(data).encode('utf-8'), self.headers)
response = urllib.request.urlopen(req)
except Exception as e:
log.error(e)
return None
try:
return response.read().decode('utf-8')
except Exception as e:
log.error(e)
return None
return None
class ChatDepotAPI(APIBase):
def __init__(self):
APIBase.__init__(self)
self.base_url = 'http://chatdepot.twitch.tv/'
self.headers = {
'Accept': 'application/vnd.twitchtv.v3+json'
}
<|fim▁hole|>
self.base_url = 'https://imraising.tv/api/v1/'
self.headers = {
'Authorization': 'APIKey apikey="{0}"'.format(apikey),
'Content-Type': 'application/json',
}
class StreamtipAPI(APIBase):
def __init__(self, client_id, access_token):
APIBase.__init__(self)
self.base_url = 'https://streamtip.com/api/'
self.headers = {
'Authorization': client_id + ' ' + access_token,
}
class TwitchAPI(APIBase):
def __init__(self, client_id=None, oauth=None, type='kraken'):
APIBase.__init__(self)
self.base_url = 'https://api.twitch.tv/{0}/'.format(type)
self.headers = {
'Accept': 'application/vnd.twitchtv.v3+json',
}
if client_id:
self.headers['Client-ID'] = client_id
if oauth:
self.headers['Authorization'] = 'OAuth ' + oauth
class SafeBrowsingAPI:
def __init__(self, apikey, appname, appvers):
self.apikey = apikey
self.appname = appname
self.appvers = appvers
return
def check_url(self, url):
base_url = 'https://sb-ssl.google.com/safebrowsing/api/lookup?client=' + self.appname + '&key=' + self.apikey + '&appver=' + self.appvers + '&pver=3.1&url='
url2 = base_url + urllib.parse.quote(url, '')
r = requests.get(url2)
if r.status_code == 200:
return True # malware or phishing
return False # some handling of error codes should be added, they're just ignored for now<|fim▁end|> | class ImraisingAPI(APIBase):
def __init__(self, apikey):
APIBase.__init__(self) |
<|file_name|>configuration_server.py<|end_file_name|><|fim▁begin|><|fim▁hole|>import zmq
import sys
import ConfigParser
import os.path
import proto_objs.venue_configuration_pb2
import daemon
import signal
import lockfile
from optparse import OptionParser
import datetime
full_config = proto_objs.venue_configuration_pb2.configuration()
bind_addr="tcp://127.0.0.1:11111"
def parse(filename):
config = ConfigParser.ConfigParser()
config.read(filename)
sections = config.sections()
full_config.Clear()
i = 0
for s in sections:
if s == 'global':
full_config.trade_serialization_addr = config.get(s, 'trade_serialization_addr')
full_config.recovery_listener_addr = config.get(s, 'recovery_listener_addr')
full_config.aggregated_bbo_book_addr = config.get(s, 'aggregated_bbo_book_addr')
full_config.aggregated_bbo_book_id = config.getint(s, 'aggregated_bbo_book_id')
else:
i+=1
print ("Adding venue: %d " % i)
single_venue_config = full_config.configs.add()
make_protobuf(s, config, single_venue_config)
print full_config.__str__()
return True
def make_protobuf(section, config, single_venue_config):
single_venue_config.venue_id = config.getint(section, 'venue_id')
single_venue_config.mic_name = config.get(section, 'mic_name')
single_venue_config.order_interface_addr = config.get(section, 'order_interface_addr')
single_venue_config.order_ping_addr = config.get(section, 'order_ping_addr')
single_venue_config.market_data_broadcast_addr = config.get(section, 'market_data_broadcast_addr')
if config.has_option(section, 'use_synthetic_cancel_replace'):
single_venue_config.use_synthetic_cancel_replace = config.getboolean(section, 'use_synthetic_cancel_replace')
def run(config_filename):
# Create context and connect
context = zmq.Context()
socket = context.socket(zmq.REP)
socket.setsockopt(zmq.LINGER, 0)
print "Binding to: ", bind_addr
socket.bind(bind_addr)
while True:
contents = socket.recv()
print datetime.datetime.now(), "Received msg:<", contents, ">"
if contents == 'R':
print "Refresh request"
refresh_ret = parse(config_filename)
if (refresh_ret == True):
refresh_status = "OK"
else:
refresh_status = "ERROR"
socket.send_multipart(["REFRESH", refresh_status])
elif contents == 'C':
print "Config request"
socket.send_multipart(["CONFIG", full_config.SerializeToString()])
else:
print "Unknown request - ERROR"
socket.send_multipart(["ERROR", "unknown message"])
def terminate():
print "Terminate"
socket.close()
context.close()
def main():
parser = OptionParser(usage="usage: %prog [options] <config_filename>")
parser.add_option("-D", "--daemon",
dest="runAsDaemon",
help="Run configuration server as daemon",
action="store_true",
default=False)
(options, args) = parser.parse_args();
if len(args) < 1:
parser.error("Missing arguments")
config_filename = args[0]
log_filename = "configuration_server.log"
log = open(log_filename, 'w+')
print "Using config file: ", config_filename
if os.path.exists(config_filename) == False:
print "Config file: ", config_filename, " does not exist"
raise Exception("Config file: ", config_filename, " does not exist")
if options.runAsDaemon == True:
context = daemon.DaemonContext(
working_directory='.',
umask=0o002,
#pidfile=lockfile.FileLock('./configuration_server.pid'),
stdout=log,
stderr=log)
#context.signal_map = {
#signal.SIGTERM: 'terminate',
#signal.SIGHUP: 'terminate',
#signal.SIGUSR1: 'terminate',
#}
#with daemon.DaemonContext():
with context:
parse(config_filename)
run(config_filename)
else:
parse(config_filename)
run(config_filename)
if __name__ == "__main__":
main()<|fim▁end|> | |
<|file_name|>test_submit_group.py<|end_file_name|><|fim▁begin|># This file is part of the Simulation Manager project for VecNet.
# For copyright and licensing information about this project, see the
# NOTICE.txt and LICENSE.md files in its top-level directory; they are
# available at https://github.com/vecnet/simulation-manager
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License (MPL), version 2.0. If a copy of the MPL was not distributed
# with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
"""
Tests for the submit_group.py script.
"""
import random
import sys
from crc_nd.utils.test_io import WritesOutputFiles
from django.test import LiveServerTestCase
from mock import patch
from path import path
from vecnet.simulation import ExecutionRequest, sim_model, Simulation, SimulationGroup as SimGroup, submission_status
from .constants import TEST_OUTPUT_ROOT
from .mixins import UsesDatabaseApi
from sim_manager import scripts, working_dirs
from sim_manager.models import SimulationGroup
from sim_manager.scripts import api_urls, batch, input_files, submit_group
from sim_manager.scripts.batch import test_utils
from sim_manager.scripts.constants import SIMULATION_DEFINITION_FILENAME, SIMULATION_SCRIPT
class MainTests(LiveServerTestCase, UsesDatabaseApi, WritesOutputFiles):
"""
Tests for the script's main function.<|fim▁hole|> super(MainTests, cls).setUpClass()
cls.setup_database_api_user()
cls.set_output_root(TEST_OUTPUT_ROOT)
working_dirs.TestingApi.use_testing_root()
# Add the scripts package's directory to the module search path so the loading of the batch system in the
# submit_group.py script works. When the script is executed at the command line, the package directory will
# automatically be added to the search path. But here in the test suite, the package is imported, so it's
# directory is not added automatically. Therefore, we explicitly add it.
scripts_dir = path(scripts.__file__).dirname()
sys.path.append(scripts_dir)
cls.simulation_script = scripts_dir / SIMULATION_SCRIPT
@classmethod
def tearDownClass(cls):
cls.remove_database_api_user()
working_dirs.TestingApi.reset_root_to_default()
sys.path.pop()
@patch('sim_manager.scripts.submit_group.BATCH_SYSTEM', batch.MOCK)
def test_run_script(self):
group = SimulationGroup.objects.create(submitter=self.test_user)
self.group_id = group.id
self.assertEqual(group.script_status, submission_status.READY_TO_RUN)
self.sim_group = SimGroup()
simulation_1 = Simulation(model=sim_model.OPEN_MALARIA, model_version='32', id_on_client='349',
output_url='http://ingestor.example.com/output-files/')
simulation_1.input_files['scenario.xml'] = 'http://www.example.com/data/scenarios/1234/scenario.xml'
simulation_2 = Simulation(model=sim_model.EMOD, model_version='1.6', cmd_line_args=['--foo', 'bar'],
id_on_client='350', output_url=simulation_1.output_url)
simulation_2.input_files['config.json'] = 'https://files.vecnet.org/4710584372'
simulation_2.input_files['campaign.json'] = 'https://files.vecnet.org/678109'
self.sim_group.simulations = [simulation_1, simulation_2]
self.execution_request = ExecutionRequest(simulation_group=self.sim_group)
group.setup_working_dir(self.execution_request)
group_url = self.live_server_url + ('/api/v1/sim-groups/%s/' % group.id)
simulations_url = self.live_server_url + '/api/v1/simulations/'
api_urls.write_for_group(group.working_dir, group_url, simulations_url)
self.check_expected_state = self.expect_script_started
group.working_dir.chdir()
self.initialize_output_dir()
stdout = self.get_output_dir() / 'stdout.txt'
with stdout.open('w') as f:
exit_status = submit_group.main('foo', 'bar', stdout=f, test_callback=self.callback)
self.assertEqual(exit_status, 0)
group = SimulationGroup.objects.get(id=group.id)
self.assertEqual(group.script_status, submission_status.SCRIPT_DONE)
def callback(self):
if self.check_expected_state:
self.check_expected_state()
else:
self.fail('callback unexpectedly called')
def expect_script_started(self):
"""
Confirm that the submission script was started.
"""
self.assertGroupScriptStatus(submission_status.STARTED_SCRIPT)
self.check_expected_state = self.expect_cached_files
def expect_cached_files(self):
"""
Confirm that the submission script cached input files.
"""
self.assertGroupScriptStatus(submission_status.CACHING_FILES)
self.assertTrue(input_files.TestingApi.add_to_cache_mock.called)
args, kwargs = input_files.TestingApi.add_to_cache_mock.call_args
self.assertEqual((self.execution_request.input_files,), args)
self.check_expected_state = self.expect_simulation_created
self.simulations_created = 0
test_utils.Mocks.submit_job.reset_mock()
test_utils.Mocks.submit_job.return_value = generate_job_id()
def expect_simulation_created(self):
"""
Confirm that the submission script has created a new simulation in the database.
"""
self.assertGroupScriptStatus(submission_status.SUBMITTING_JOBS)
group = SimulationGroup.objects.get(id=self.group_id)
self.assertEqual(group.simulation_set.count(), self.simulations_created + 1)
self.simulations_created += 1
# Check that the working directory is set up properly for the simulation that was just created
simulation = group.simulation_set.order_by('created_when').last()
self.assertTrue(simulation.working_dir.isdir())
sim_definition_path = simulation.working_dir / SIMULATION_DEFINITION_FILENAME
self.assertTrue(sim_definition_path.isfile())
sim_definition = Simulation.read_json_file(sim_definition_path)
expected_sim_definition = self.sim_group.simulations[self.simulations_created - 1]
self.assertEqual(sim_definition.model, expected_sim_definition.model)
self.assertEqual(sim_definition.model_version, expected_sim_definition.model_version)
self.assertEqual(sim_definition.input_files, expected_sim_definition.input_files)
self.assertEqual(sim_definition.cmd_line_args, expected_sim_definition.cmd_line_args)
self.assertEqual(sim_definition.id_on_client, expected_sim_definition.id_on_client)
self.assertEqual(sim_definition.output_url, expected_sim_definition.output_url)
# Check that the simulation was submitted to the batch system.
self.assertTrue(test_utils.Mocks.submit_job.called)
args, kwargs = test_utils.Mocks.submit_job.call_args
executable, working_dir, cmd_args = args[0], args[1], args[2:]
self.assertEqual(executable, sys.executable)
self.assertEqual(working_dir, simulation.working_dir)
self.assertEqual(list(cmd_args), [self.simulation_script])
self.assertEqual(simulation.batch_job_id, test_utils.Mocks.submit_job.return_value)
test_utils.Mocks.submit_job.reset_mock()
if self.simulations_created < len(self.sim_group.simulations):
test_utils.Mocks.submit_job.return_value = generate_job_id()
else:
self.check_expected_state = None
def assertGroupScriptStatus(self, expected_status):
group = SimulationGroup.objects.get(id=self.group_id)
self.assertEqual(group.script_status, expected_status)
def generate_job_id():
return str(random.randint(1, 100000))<|fim▁end|> | """
@classmethod
def setUpClass(cls): |
<|file_name|>AppFemGui.cpp<|end_file_name|><|fim▁begin|>/***************************************************************************
* Copyright (c) 2008 Jürgen Riegel ([email protected]) *
* *
* This file is part of the FreeCAD CAx development system. *
* *
* This library is free software; you can redistribute it and/or *
* modify it under the terms of the GNU Library General Public *
* License as published by the Free Software Foundation; either *
* version 2 of the License, or (at your option) any later version. *
* *
* This library is distributed in the hope that it will be useful, *
* but WITHOUT ANY WARRANTY; without even the implied warranty of *
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
* GNU Library General Public License for more details. *
* *
* You should have received a copy of the GNU Library General Public *
* License along with this library; see the file COPYING.LIB. If not, *
* write to the Free Software Foundation, Inc., 59 Temple Place, *
* Suite 330, Boston, MA 02111-1307, USA *
* *
***************************************************************************/
#include "PreCompiled.h"
#ifndef _PreComp_
# include <Python.h>
# include <Standard_math.hxx>
#endif
#include <Base/Console.h>
#include <Base/Interpreter.h>
#include <Gui/Application.h>
#include <Gui/WidgetFactory.h>
#include <Gui/Language/Translator.h>
#include "DlgSettingsFemImp.h"
#include "ViewProviderFemMesh.h"
#include "ViewProviderFemMeshShape.h"
#include "ViewProviderFemMeshShapeNetgen.h"
#include "ViewProviderAnalysis.h"
#include "ViewProviderSetNodes.h"
#include "ViewProviderSetElements.h"
#include "ViewProviderSetFaces.h"
#include "ViewProviderSetGeometry.h"
#include "ViewProviderFemConstraint.h"
#include "ViewProviderFemConstraintBearing.h"
#include "ViewProviderFemConstraintFixed.h"
#include "ViewProviderFemConstraintForce.h"
#include "ViewProviderFemConstraintPressure.h"
#include "ViewProviderFemConstraintGear.h"
#include "ViewProviderFemConstraintPulley.h"
#include "ViewProviderResult.h"
#include "Workbench.h"
//#include "resources/qrc_Fem.cpp"
// use a different name to CreateCommand()
void CreateFemCommands(void);
<|fim▁hole|> Q_INIT_RESOURCE(Fem);
Gui::Translator::instance()->refresh();
}
/* registration table */
extern struct PyMethodDef FemGui_Import_methods[];
/* Python entry */
extern "C" {
void FemGuiExport initFemGui()
{
if (!Gui::Application::Instance) {
PyErr_SetString(PyExc_ImportError, "Cannot load Gui module in console application.");
return;
}
(void) Py_InitModule("FemGui", FemGui_Import_methods); /* mod name, table ptr */
Base::Console().Log("Loading GUI of Fem module... done\n");
// instantiating the commands
CreateFemCommands();
// addition objects
FemGui::Workbench ::init();
FemGui::ViewProviderFemAnalysis ::init();
FemGui::ViewProviderFemAnalysisPython ::init();
FemGui::ViewProviderFemMesh ::init();
FemGui::ViewProviderFemMeshShape ::init();
FemGui::ViewProviderFemMeshShapeNetgen ::init();
FemGui::ViewProviderSetNodes ::init();
FemGui::ViewProviderSetElements ::init();
FemGui::ViewProviderSetFaces ::init();
FemGui::ViewProviderSetGeometry ::init();
FemGui::ViewProviderFemConstraint ::init();
FemGui::ViewProviderFemConstraintBearing ::init();
FemGui::ViewProviderFemConstraintFixed ::init();
FemGui::ViewProviderFemConstraintForce ::init();
FemGui::ViewProviderFemConstraintPressure ::init();
FemGui::ViewProviderFemConstraintGear ::init();
FemGui::ViewProviderFemConstraintPulley ::init();
FemGui::ViewProviderResult ::init();
FemGui::ViewProviderResultPython ::init();
// register preferences pages
new Gui::PrefPageProducer<FemGui::DlgSettingsFemImp> ("FEM");
// add resources and reloads the translators
loadFemResource();
}
} // extern "C" {<|fim▁end|> | void loadFemResource()
{
// add resources and reloads the translators
|
<|file_name|>TeeOutputStream.java<|end_file_name|><|fim▁begin|>/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.commons.io.output;
<|fim▁hole|> * Classic splitter of OutputStream. Named after the unix 'tee'
* command. It allows a stream to be branched off so there
* are now two streams.
*
*/
public class TeeOutputStream extends ProxyOutputStream {
/** the second OutputStream to write to */
protected OutputStream branch; //TODO consider making this private
/**
* Constructs a TeeOutputStream.
* @param out the main OutputStream
* @param branch the second OutputStream
*/
public TeeOutputStream(final OutputStream out, final OutputStream branch) {
super(out);
this.branch = branch;
}
/**
* Write the bytes to both streams.
* @param b the bytes to write
* @throws IOException if an I/O error occurs
*/
@Override
public synchronized void write(final byte[] b) throws IOException {
super.write(b);
this.branch.write(b);
}
/**
* Write the specified bytes to both streams.
* @param b the bytes to write
* @param off The start offset
* @param len The number of bytes to write
* @throws IOException if an I/O error occurs
*/
@Override
public synchronized void write(final byte[] b, final int off, final int len) throws IOException {
super.write(b, off, len);
this.branch.write(b, off, len);
}
/**
* Write a byte to both streams.
* @param b the byte to write
* @throws IOException if an I/O error occurs
*/
@Override
public synchronized void write(final int b) throws IOException {
super.write(b);
this.branch.write(b);
}
/**
* Flushes both streams.
* @throws IOException if an I/O error occurs
*/
@Override
public void flush() throws IOException {
super.flush();
this.branch.flush();
}
/**
* Closes both output streams.
*
* If closing the main output stream throws an exception, attempt to close the branch output stream.
*
* If closing the main and branch output streams both throw exceptions, which exceptions is thrown by this method is
* currently unspecified and subject to change.
*
* @throws IOException
* if an I/O error occurs
*/
@Override
public void close() throws IOException {
try {
super.close();
} finally {
this.branch.close();
}
}
}<|fim▁end|> | import java.io.IOException;
import java.io.OutputStream;
/** |
<|file_name|>environment_tools_test.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python2.4
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the<|fim▁hole|># contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Test for environment_tools. These are SMALL and MEDIUM tests."""
import os
import unittest
import TestFramework
class EnvToolsTests(unittest.TestCase):
"""Tests for environment_tools module."""
def setUp(self):
"""Per-test setup."""
self.env = self.root_env.Clone()
def testFilterOut(self):
"""Test FilterOut()."""
env = self.env
env.Replace(
TEST1=['ant', 'bear', 'cat'],
TEST2=[1, 2, 3, 4],
)
# Simple filter
env.FilterOut(TEST1=['bear'])
self.assertEqual(env['TEST1'], ['ant', 'cat'])
# Filter multiple
env.FilterOut(TEST1=['ant'], TEST2=[1, 3])
self.assertEqual(env['TEST1'], ['cat'])
self.assertEqual(env['TEST2'], [2, 4])
# Filter doesn't care if the variable or value doesn't exist
env.FilterOut(TEST1=['dog'], TEST3=[2])
self.assertEqual(env['TEST1'], ['cat'])
self.assertEqual(env['TEST2'], [2, 4])
def testFilterOutRepeated(self):
"""Test FilterOut() filters all matches."""
env = self.env
env['TEST3'] = ['A', 'B', 'B', 'C']
env.FilterOut(TEST3=['B'])
self.assertEqual(env['TEST3'], ['A', 'C'])
def testFilterOutNested(self):
"""Test FilterOut on nested lists."""
env = self.env
# FilterOut does not currently flatten lists, nor remove values from
# sub-lists. This is related to not evaluating environment variables (see
# below).
env['TEST4'] = ['A', ['B', 'C'], 'D']
env.FilterOut(TEST4=['B'])
self.assertEqual(env['TEST4'], ['A', ['B', 'C'], 'D'])
# If you specify the entire sub-list, it will be filtered
env.FilterOut(TEST4=[['B', 'C']])
self.assertEqual(env['TEST4'], ['A', 'D'])
def testFilterOutNoEval(self):
"""Test FilterOut does not evaluate variables in the list."""
env = self.env
# FilterOut does not evaluate variables in the list. (Doing so would
# defeat much of the purpose of variables.) Note that this means it does
# not filter variables which evaluate partially or wholly to the filtered
# string. On the plus side, this means you CAN filter out variables.
env.Replace(
TEST5=['$V1', '$V2', '$V3', '$V4'],
V1='A',
# (V2 intentionally undefined at this point)
V3=['A', 'B'],
V4='C',
)
env.FilterOut(TEST5=['A', '$V4'])
self.assertEqual(env['TEST5'], ['$V1', '$V2', '$V3'])
def testOverlap(self):
"""Test Overlap()."""
env = self.env
env.Replace(
OLVAR='baz',
OLLIST=['2', '3', '4'],
)
# Simple string compares
self.assertEqual(env.Overlap('foo', 'foo'), ['foo'])
self.assertEqual(env.Overlap('foo', 'food'), [])
# String compare with variable substitution
self.assertEqual(env.Overlap('foobaz', 'foo$OLVAR'), ['foobaz'])
# Simple list overlap
# Need to use set() for comparison, since the order of entries in the
# output list is indeterminate
self.assertEqual(set(env.Overlap(['1', '2', '3'], ['2', '3', '4'])),
set(['2', '3']))
# Overlap removes duplicates
self.assertEqual(env.Overlap(['1', '2', '2'], ['2', '3', '2']), ['2'])
# List and string
self.assertEqual(env.Overlap('3', ['1', '2', '3']), ['3'])
self.assertEqual(env.Overlap('4', ['1', '2', '3']), [])
self.assertEqual(env.Overlap(['1', '$OLVAR', '3'], '$OLVAR'), ['baz'])
# Variable substitition will replace and flatten lists
self.assertEqual(set(env.Overlap(['1', '2', '3'], '$OLLIST')),
set(['2', '3']))
# Substitution flattens lists
self.assertEqual(set(env.Overlap([['1', '2'], '3'], ['2', ['3', '4']])),
set(['2', '3']))
def testSubstList2(self):
"""Test SubstList2()."""
env = self.env
# Empty args should return empty list
self.assertEqual(env.SubstList2(), [])
# Undefined variable also returns empty list
self.assertEqual(env.SubstList2('$NO_SUCH_VAR'), [])
# Simple substitution (recursively evaluates variables)
env['STR1'] = 'FOO$STR2'
env['STR2'] = 'BAR'
self.assertEqual(env.SubstList2('$STR1'), ['FOOBAR'])
# Simple list substitution
env['LIST1'] = ['A', 'B']
self.assertEqual(env.SubstList2('$LIST1'), ['A', 'B'])
# Nested lists
env['LIST2'] = ['C', '$LIST1']
self.assertEqual(env.SubstList2('$LIST2'), ['C', 'A', 'B'])
# Multiple variables in a single entry stay a single entry
self.assertEqual(env.SubstList2('$STR1 $STR2'), ['FOOBAR BAR'])
# Multiple args to command
self.assertEqual(env.SubstList2('$LIST2', '$STR2'), ['C', 'A', 'B', 'BAR'])
# Items in list are actually strings, not some subclass
self.assert_(type(env.SubstList2('$STR1')[0]) is str)
def testRelativePath(self):
"""Test RelativePath()."""
env = self.env
# Trivial cases - directory or file relative to itself
self.assertEqual(env.RelativePath('a', 'a'), '.')
self.assertEqual(env.RelativePath('a/b/c', 'a/b/c'), '.')
self.assertEqual(env.RelativePath('a', 'a', source_is_file=True), 'a')
self.assertEqual(env.RelativePath('a/b/c', 'a/b/c', source_is_file=True),
'c')
# Can pass in directory or file nodes
self.assertEqual(env.RelativePath(env.Dir('a'), env.File('b/c'), sep='/'),
'../b/c')
# Separator argument is respected
self.assertEqual(env.RelativePath('.', 'a/b/c', sep='BOOGA'),
'aBOOGAbBOOGAc')
# Default separator is os.sep
self.assertEqual(env.RelativePath('.', 'a/b'),
'a' + os.sep + 'b')
# No common dirs
self.assertEqual(env.RelativePath('a/b/c', 'd/e/f', sep='/'),
'../../../d/e/f')
self.assertEqual(
env.RelativePath('a/b/c', 'd/e/f', sep='/', source_is_file=True),
'../../d/e/f')
# Common dirs
self.assertEqual(env.RelativePath('a/b/c/d', 'a/b/e/f', sep='/'),
'../../e/f')
# Source or destination path is different length
self.assertEqual(env.RelativePath('a/b/c/d', 'a/b', sep='/'), '../..')
self.assertEqual(env.RelativePath('a/b', 'a/b/c/d', sep='/'), 'c/d')
# Current directory on either side
self.assertEqual(env.RelativePath('a/b/c', '.', sep='/'), '../../..')
self.assertEqual(env.RelativePath('.', 'a/b/c', sep='/'), 'a/b/c')
# Variables are evaluated
env.Replace(
DIR1='foo',
DIR2='bar',
)
self.assertEqual(env.RelativePath('foo/$DIR2/a', '$DIR1/bar/b', sep='/'),
'../b')
def testApplyBuildSConscript(self):
"""Test ApplySConscript() and BuildSConscript() (MEDIUM test)."""
env = self.env
env['SUB1'] = 'nougat'
# ApplySConscript() affects the calling environment
env.ApplySConscript('SConscript1')
self.assertEqual(env.get('SUB2'), 'orange')
# BuildSConscript() does not affect the calling environment
env.BuildSConscript('SConscript2')
self.assertEqual(env.get('SUB2'), 'orange')
# BuildSConscript finds build.scons in preference to SConscript
env.BuildSConscript('abs1')
# But does look for SConscript if there isn't build.scons
env.BuildSConscript('abs2')
def TestSConstruct(scons_globals):
"""Test SConstruct file.
Args:
scons_globals: Global variables dict from the SConscript file.
"""
# Get globals from SCons
Environment = scons_globals['Environment']
env = Environment(tools=['environment_tools'])
# Run unit tests
TestFramework.RunUnitTests(EnvToolsTests, root_env=env)
sconscript1_contents = """
Import('env')
if env.get('SUB1') != 'nougat':
raise ValueError('ApplySConscript() failure in sconscript1')
env['SUB2'] = 'orange'
"""
sconscript2_contents = """
Import('env')
if env.get('SUB1') != 'nougat':
raise ValueError('BuildSConscript() failure in sconscript2')
env['SUB2'] = 'pizza'
"""
sconscript3_contents = """
Import('env')
filename = '%s'
env.Execute(Touch(filename))
"""
def main():
test = TestFramework.TestFramework()
test.subdir('environment_tools')
base = 'environment_tools/'
test.WriteSConscript(base + 'SConstruct', TestSConstruct)
test.write(base + 'SConscript1', sconscript1_contents)
test.write(base + 'SConscript2', sconscript2_contents)
test.subdir(base + 'abs1')
test.write(base + 'abs1/build.scons', sconscript3_contents % 'yes1')
test.write(base + 'abs1/SConscript', sconscript3_contents % 'no')
test.subdir(base + 'abs2')
test.write(base + 'abs2/SConscript', sconscript3_contents % 'yes2')
# Ignore stderr since unittest prints its output there
test.run(chdir=base, stderr=None)
test.must_exist(base + 'abs1/yes1')
test.must_not_exist(base + 'abs1/no')
test.must_exist(base + 'abs2/yes2')
test.pass_test()
if __name__ == '__main__':
main()<|fim▁end|> | # distribution.
# * Neither the name of Google Inc. nor the names of its |
<|file_name|>msgfmt.py<|end_file_name|><|fim▁begin|>""" msgfmt tool """
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
__revision__ = "src/engine/SCons/Tool/msgfmt.py 2014/03/02 14:18:15 garyo"
from SCons.Builder import BuilderBase
#############################################################################
class _MOFileBuilder(BuilderBase):
""" The builder class for `MO` files.
The reason for this builder to exists and its purpose is quite simillar
as for `_POFileBuilder`. This time, we extend list of sources, not targets,
and call `BuilderBase._execute()` only once (as we assume single-target
here).
"""
def _execute(self, env, target, source, *args, **kw):<|fim▁hole|> # in this case, as it is called too late (after multiple sources
# are handled single_source builder.
import SCons.Util
from SCons.Tool.GettextCommon import _read_linguas_from_files
linguas_files = None
if env.has_key('LINGUAS_FILE') and env['LINGUAS_FILE'] is not None:
linguas_files = env['LINGUAS_FILE']
# This should prevent from endless recursion.
env['LINGUAS_FILE'] = None
# We read only languages. Suffixes shall be added automatically.
linguas = _read_linguas_from_files(env, linguas_files)
if SCons.Util.is_List(source):
source.extend(linguas)
elif source is not None:
source = [source] + linguas
else:
source = linguas
result = BuilderBase._execute(self,env,target,source,*args, **kw)
if linguas_files is not None:
env['LINGUAS_FILE'] = linguas_files
return result
#############################################################################
#############################################################################
def _create_mo_file_builder(env, **kw):
""" Create builder object for `MOFiles` builder """
import SCons.Action
# FIXME: What factory use for source? Ours or their?
kw['action'] = SCons.Action.Action('$MSGFMTCOM','$MSGFMTCOMSTR')
kw['suffix'] = '$MOSUFFIX'
kw['src_suffix'] = '$POSUFFIX'
kw['src_builder'] = '_POUpdateBuilder'
kw['single_source'] = True
return _MOFileBuilder(**kw)
#############################################################################
#############################################################################
def generate(env,**kw):
""" Generate `msgfmt` tool """
import SCons.Util
from SCons.Tool.GettextCommon import _detect_msgfmt
try:
env['MSGFMT'] = _detect_msgfmt(env)
except:
env['MSGFMT'] = 'msgfmt'
env.SetDefault(
MSGFMTFLAGS = [ SCons.Util.CLVar('-c') ],
MSGFMTCOM = '$MSGFMT $MSGFMTFLAGS -o $TARGET $SOURCE',
MSGFMTCOMSTR = '',
MOSUFFIX = ['.mo'],
POSUFFIX = ['.po']
)
env.Append( BUILDERS = { 'MOFiles' : _create_mo_file_builder(env) } )
#############################################################################
#############################################################################
def exists(env):
""" Check if the tool exists """
from SCons.Tool.GettextCommon import _msgfmt_exists
try:
return _msgfmt_exists(env)
except:
return False
#############################################################################
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:<|fim▁end|> | # Here we add support for 'LINGUAS_FILE' keyword. Emitter is not suitable |
<|file_name|>lookup.rs<|end_file_name|><|fim▁begin|>// CITA
// Copyright 2016-2017 Cryptape Technologies LLC.
// This program is free software: you can redistribute it
// and/or modify it under the terms of the GNU General Public
// License as published by the Free Software Foundation,
// either version 3 of the License, or (at your option) any
// later version.
// This program is distributed in the hope that it will be
// useful, but WITHOUT ANY WARRANTY; without even the implied
// warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
// PURPOSE. See the GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
use super::{AVLError, Query};
use super::node::*;
use H256;
use hashdb::HashDB;
use rlp::*;
/// AVL lookup helper object.
pub struct Lookup<'a, Q: Query> {
/// database to query from.
pub db: &'a HashDB,
/// Query object to record nodes and transform data.
pub query: Q,
/// Hash to start at
pub hash: H256,
}
impl<'a, Q: Query> Lookup<'a, Q> {
/// Look up the given key. If the value is found, it will be passed to the given
/// function to decode or copy.
pub fn look_up(mut self, key: NodeKey) -> super::Result<Option<Q::Item>> {
let mut hash = self.hash;
// this loop iterates through non-inline nodes.
for depth in 0.. {
let node_data = match self.db.get(&hash) {
Some(value) => value,
None => {
return Err(Box::new(match depth {
0 => AVLError::InvalidStateRoot(hash),<|fim▁hole|> _ => AVLError::IncompleteDatabase(hash),
}));
}
};
self.query.record(&hash, &node_data, depth);
// this loop iterates through all inline children (usually max 1)
// without incrementing the depth.
let mut node_data = &node_data[..];
loop {
match Node::decoded(node_data) {
Node::Leaf(k, value) => {
return Ok(match k == key {
true => Some(self.query.decode(value)),
false => None,
});
}
Node::Branch(_, k, children) => {
let idx = if key < k { 0 } else { 1 };
node_data = children[idx as usize];
}
_ => return Ok(None),
}
// check if new node data is inline or hash.
let r = Rlp::new(node_data);
if r.is_data() && r.size() == 32 {
hash = r.as_val();
break;
}
}
}
Ok(None)
}
}<|fim▁end|> | |
<|file_name|>index.js<|end_file_name|><|fim▁begin|>'use strict'
module.exports = {
info: {
key: 'javascript',
title: 'JavaScript',
extname: '.js',
default: 'xhr'<|fim▁hole|> jquery: require('./jquery'),
fetch: require('./fetch'),
xhr: require('./xhr'),
axios: require('./axios')
}<|fim▁end|> | },
|
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
# __init__.py
# Copyright (C) 2015 LEAP
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
A U1DB backend for encrypting data before sending to server and decrypting
after receiving.
"""
import os
from twisted.web.client import Agent
from twisted.internet import reactor
from leap.common.http import getPolicyForHTTPS
from leap.soledad.common.log import getLogger
from leap.soledad.client.http_target.send import HTTPDocSender
from leap.soledad.client.http_target.api import SyncTargetAPI
from leap.soledad.client.http_target.fetch import HTTPDocFetcher
from leap.soledad.client import crypto as old_crypto
<|fim▁hole|>
logger = getLogger(__name__)
# we may want to collect statistics from the sync process
DO_STATS = False
if os.environ.get('SOLEDAD_STATS'):
DO_STATS = True
class SoledadHTTPSyncTarget(SyncTargetAPI, HTTPDocSender, HTTPDocFetcher):
"""
A SyncTarget that encrypts data before sending and decrypts data after
receiving.
Normally encryption will have been written to the sync database upon
document modification. The sync database is also used to write temporarily
the parsed documents that the remote send us, before being decrypted and
written to the main database.
"""
def __init__(self, url, source_replica_uid, creds, crypto, cert_file):
"""
Initialize the sync target.
:param url: The server sync url.
:type url: str
:param source_replica_uid: The source replica uid which we use when
deferring decryption.
:type source_replica_uid: str
:param creds: A dictionary containing the uuid and token.
:type creds: creds
:param crypto: An instance of SoledadCrypto so we can encrypt/decrypt
document contents when syncing.
:type crypto: soledad._crypto.SoledadCrypto
:param cert_file: Path to the certificate of the ca used to validate
the SSL certificate used by the remote soledad
server.
:type cert_file: str
"""
if url.endswith("/"):
url = url[:-1]
self._url = str(url) + "/sync-from/" + str(source_replica_uid)
self.source_replica_uid = source_replica_uid
self._auth_header = None
self._uuid = None
self.set_creds(creds)
self._crypto = crypto
# TODO: DEPRECATED CRYPTO
self._deprecated_crypto = old_crypto.SoledadCrypto(crypto.secret)
self._insert_doc_cb = None
# Twisted default Agent with our own ssl context factory
factory = getPolicyForHTTPS(cert_file)
self._http = Agent(reactor, factory)
if DO_STATS:
self.sync_exchange_phase = [0]<|fim▁end|> | |
<|file_name|>tests.py<|end_file_name|><|fim▁begin|>from django.test import TestCase, Client
from jpspapp.models import Club, Activity,UserProfile
from django.contrib.auth.models import User
from django.contrib.auth import authenticate, login
import datetime
# Create your tests here.
class ClubTestCase(TestCase):
def setUp(self):
User.objects.create_user(username="clubtest", email='[email protected]', password='jp123456')
Club.objects.create(ClubObject=User.objects.get(username='clubtest'), ClubName="测试社团", ClubId=601, Type='1',
ShezhangName="社长", ShezhangQq="12345678", ShezhangGrade='1', ShezhangClass='1',
IfRecruit=True, EnrollGroupQq='12345678')
def test_club_update(self):
club = Club.objects.get(ClubName="测试社团")
club.ShezhangName = "社长姓名"
club.save()
self.assertEqual(club.ShezhangName, "社长姓名")
def test_club_del(selfs):
club = Club.objects.get(ClubName="测试社团")
club.delete()
user = User.objects.get(username="clubtest")
user.delete()
class ActivityModelTest(TestCase):
def setUp(self):
User.objects.create_user(username="clubtest", email='[email protected]', password='jp123456')
Club.objects.create(ClubObject=User.objects.get(username='clubtest'), ClubName="测试社团", ClubId=601, Type='1',
ShezhangName="社长", ShezhangQq="12345678", ShezhangGrade='1', ShezhangClass='1',
IfRecruit=True, EnrollGroupQq='12345678')
Activity.objects.create(Name="活动名称", Region="活动地点", ClubObject=Club.objects.get(ClubName="测试社团"),
Content="活动内容", Date1=datetime.datetime.now(),
Date2=datetime.datetime.now() + datetime.timedelta(days=1), State='0', Type='普通')
def test_update(self):
activity = Activity.objects.get(Name="活动名称")
activity.Content = "活动内容测试"
activity.save()
self.assertEqual(activity.Content, '活动内容测试')
def test_delete(self):
Activity.objects.get(Region="活动地点").delete()
Club.objects.get(ShezhangName='社长').delete()
User.objects.get(username='clubtest').delete()
class UserProfileModelTest(TestCase):
def setUp(self):
User.objects.create(username='userprofiletest',email='[email protected]',password='jp123456')
UserProfile.objects.create(UserObject=User.objects.get(username='userprofiletest'),UserName='测试用户',Class=1,Grade=1,AttendYear='2017',QQ='12345678',Phone='12345678901',Email='[email protected]')
def test_update(self):
user = UserProfile.objects.get(UserName='测试用户')
user.Class= 2
user.save()
self.assertEqual(user.Class,2)
def test_delete(self):
user = UserProfile.objects.get(UserName='测试用户')
user.delete()
class UserModelTest(TestCase):
def create(self):
pass
def update(selfs):
pass
def delete(self):
pass
class PostModelTest(TestCase):
def test(self):
pass
def update(selfs):
pass<|fim▁hole|><|fim▁end|> |
def delete(self):
pass |
<|file_name|>gsrt_config.py<|end_file_name|><|fim▁begin|>import os
# maintain python 2.7 compatibility for time being. GSRTConfig is a dependency on everything else,
# including ow-swagger-library, which supports both python2 and python3. I think we should
# stop supporting 2.x if needed, but since many people outside GSRT are still just beginning
# to transition to python3 yet use our client library, we should maintain support here. We want
# to continue to support other groups using and relying on OW and our client is the fastest way to that
# without an immediate hard blocker if they have a long python3 road ahead of them (aka, WF team)
# Also, it's an incredibly basic module so writing this blurb was more effort than adding compatibility.
try:
import ConfigParser as configparser
py2 = True
except ImportError:
import configparser
py2 = False
# Need to pull data from env variables > kube secret > config file
class GSRTConfig(object):
"""
Quick module to handle a normalized config pull. Will search environment sections. To use environment varibles
for invidual settings, the ENV should be named with {SECTION}_{KEY}. So for overwatch apikey, you should set a
variable OVERWATCH_APIKEY={some apikey}
"""
def __init__(self, config_section, defaults=None, config_path=None, secrets_dir=None,
throw_exception=False, allow_no_value=True):
"""
Config files will be checked in /etc/panw be default. If a PANW_CONFIG env exists, it will pull the path from
there When setting variable values, make sure that you can have an ALL_CAPS setting that will work without
colliding in an environment variable Settings and sections should be lower_case_underscore
"""
if defaults is None:
defaults = {}
if not secrets_dir and 'SECRETS_DIR' in os.environ:
secrets_dir = os.environ.get("SECRETS_DIR")
self.secrets_dir = secrets_dir
# GSRTTECH-5222
self.parser = configparser.ConfigParser(defaults, allow_no_value=allow_no_value)
if not config_path and 'PANW_CONFIG' in os.environ:
config_path = os.environ.get('PANW_CONFIG')
if not config_path:
for known_path in [os.path.expanduser("~") + "/.config/panw", "/opt/.config/panw", "/etc/panw/config"]:
if os.path.isfile(known_path):
config_path = known_path
break
self.config_path = config_path
# Only read the file if the config_path is a true value
if config_path:
if os.path.isfile(config_path):
self.parser.read(os.path.expanduser(config_path))
else:
raise Exception("PANW_CONFIG=%s is not a valid file" % config_path)
# We'll stub out a blank section in case it doesn't exist, this prevents exceptions from being thrown
if not self.parser.has_section(config_section):
self.parser.add_section(config_section)
self.config_section = config_section
self.throw_exception = throw_exception
def get(self, *args, **kwargs):
""" Returns raw value
Returns:
str: raw value from env variable or config file
"""
return self.get_setting(*args, **kwargs)
def get_int(self, *args, **kwargs):
""" Cast raw value to int
Returns:
int: value cast to int
"""
# TODO: Make this mimic the config parser behavior. Does it throw exceptions?
return int(self.get_setting(*args, **kwargs))
def getint(self, *args, **kwargs):
""" backwards compatibility with configparser """
return self.get_int(*args, **kwargs)
def get_boolean(self, *args, **kwargs):
""" Returns boolean for parsed value. Parsed value must be one of
["1", "yes", "on", "true", "0", "no", "off", "false"]
Returns:
bool: boolean value representation of provided value
Raises:
ValueError: value provided was not a known boolean string value.
"""
value = self.get_setting(*args, **kwargs)
value = str(value).lower()
if value in ["1", "yes", "on", "true"]:
return True
elif value in ["0", "no", "off", "false"]:
return False
else:
raise ValueError("unexpected value '%s' provided" % value)
def getboolean(self, *args, **kwargs):
""" backwards compatibility with configparser """
return self.get_boolean(*args, **kwargs)
def get_setting(self, name, section=None, throw_exception=None):
"""
Setting names should always be lower_case_underscore
Well check the config and environment variables for the name. Environment variables will be made all caps
when checked
Args:
name (str): attribute name to get
section (Optional[str]): section name to retrieve attribute from, will default to self.config_section
throw_exception (Optional[bool]): throw exceptions or not if invalid, default to self.throw_exception
"""
if not section:
section = self.config_section
env_key = section.upper() + "_" + name.upper()
if env_key in os.environ:
return os.environ.get(env_key)
if self.secrets_dir:<|fim▁hole|> if os.path.isfile(secrets_file):
with open(secrets_file, "r") as fh:
return fh.read().rstrip()
if throw_exception is None:
throw_exception = self.throw_exception
# ensure section exists - needed here in addition to init for cases where user specifies section in
# `get_setting()`
if not self.parser.has_section(section):
self.parser.add_section(section)
if throw_exception:
return self.parser.get(section, name)
if py2:
try:
return self.parser.get(section, name)
except configparser.NoOptionError:
return None
else:
return self.parser.get(section, name, fallback=None)
# Automatically updated on Tue Jul 21 16:05:28 UTC 2020<|fim▁end|> | secrets_file = os.path.join(self.secrets_dir,
"{}_{}".format(section, name)) |
<|file_name|>sample_stats_test.py<|end_file_name|><|fim▁begin|># Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Sample Stats Ops."""<|fim▁hole|>from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.distributions.python.ops import sample_stats
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
rng = np.random.RandomState(0)
class PercentileTestWithLowerInterpolation(test.TestCase):
_interpolation = "lower"
def test_one_dim_odd_input(self):
x = [1., 5., 3., 2., 4.]
for q in [0, 10, 25, 49.9, 50, 50.01, 90, 95, 100]:
expected_percentile = np.percentile(
x, q=q, interpolation=self._interpolation, axis=0)
with self.test_session():
pct = sample_stats.percentile(
x, q=q, interpolation=self._interpolation, axis=[0])
self.assertAllEqual((), pct.get_shape())
self.assertAllClose(expected_percentile, pct.eval())
def test_one_dim_even_input(self):
x = [1., 5., 3., 2., 4., 5.]
for q in [0, 10, 25, 49.9, 50, 50.01, 90, 95, 100]:
expected_percentile = np.percentile(
x, q=q, interpolation=self._interpolation)
with self.test_session():
pct = sample_stats.percentile(x, q=q, interpolation=self._interpolation)
self.assertAllEqual((), pct.get_shape())
self.assertAllClose(expected_percentile, pct.eval())
def test_two_dim_odd_input_axis_0(self):
x = np.array([[-1., 50., -3.5, 2., -1], [0., 0., 3., 2., 4.]]).T
for q in [0, 10, 25, 49.9, 50, 50.01, 90, 95, 100]:
expected_percentile = np.percentile(
x, q=q, interpolation=self._interpolation, axis=0)
with self.test_session():
# Get dim 1 with negative and positive indices.
pct_neg_index = sample_stats.percentile(
x, q=q, interpolation=self._interpolation, axis=[0])
pct_pos_index = sample_stats.percentile(
x, q=q, interpolation=self._interpolation, axis=[0])
self.assertAllEqual((2,), pct_neg_index.get_shape())
self.assertAllEqual((2,), pct_pos_index.get_shape())
self.assertAllClose(expected_percentile, pct_neg_index.eval())
self.assertAllClose(expected_percentile, pct_pos_index.eval())
def test_two_dim_even_axis_0(self):
x = np.array([[1., 2., 4., 50.], [1., 2., -4., 5.]]).T
for q in [0, 10, 25, 49.9, 50, 50.01, 90, 95, 100]:
expected_percentile = np.percentile(
x, q=q, interpolation=self._interpolation, axis=0)
with self.test_session():
pct = sample_stats.percentile(
x, q=q, interpolation=self._interpolation, axis=[0])
self.assertAllEqual((2,), pct.get_shape())
self.assertAllClose(expected_percentile, pct.eval())
def test_two_dim_even_input_and_keep_dims_true(self):
x = np.array([[1., 2., 4., 50.], [1., 2., -4., 5.]]).T
for q in [0, 10, 25, 49.9, 50, 50.01, 90, 95, 100]:
expected_percentile = np.percentile(
x, q=q, interpolation=self._interpolation, keepdims=True, axis=0)
with self.test_session():
pct = sample_stats.percentile(
x,
q=q,
interpolation=self._interpolation,
keep_dims=True,
axis=[0])
self.assertAllEqual((1, 2), pct.get_shape())
self.assertAllClose(expected_percentile, pct.eval())
def test_four_dimensional_input(self):
x = rng.rand(2, 3, 4, 5)
for axis in [None, 0, 1, -2, (0,), (-1,), (-1, 1), (3, 1), (-3, 0)]:
expected_percentile = np.percentile(
x, q=0.77, interpolation=self._interpolation, axis=axis)
with self.test_session():
pct = sample_stats.percentile(
x,
q=0.77,
interpolation=self._interpolation,
axis=axis)
self.assertAllEqual(expected_percentile.shape, pct.get_shape())
self.assertAllClose(expected_percentile, pct.eval())
def test_four_dimensional_input_and_keepdims(self):
x = rng.rand(2, 3, 4, 5)
for axis in [None, 0, 1, -2, (0,), (-1,), (-1, 1), (3, 1), (-3, 0)]:
expected_percentile = np.percentile(
x,
q=0.77,
interpolation=self._interpolation,
axis=axis,
keepdims=True)
with self.test_session():
pct = sample_stats.percentile(
x,
q=0.77,
interpolation=self._interpolation,
axis=axis,
keep_dims=True)
self.assertAllEqual(expected_percentile.shape, pct.get_shape())
self.assertAllClose(expected_percentile, pct.eval())
def test_four_dimensional_input_x_static_ndims_but_dynamic_sizes(self):
x = rng.rand(2, 3, 4, 5)
x_ph = array_ops.placeholder(dtypes.float64, shape=[None, None, None, None])
for axis in [None, 0, 1, -2, (0,), (-1,), (-1, 1), (3, 1), (-3, 0)]:
expected_percentile = np.percentile(
x, q=0.77, interpolation=self._interpolation, axis=axis)
with self.test_session():
pct = sample_stats.percentile(
x_ph,
q=0.77,
interpolation=self._interpolation,
axis=axis)
self.assertAllClose(expected_percentile, pct.eval(feed_dict={x_ph: x}))
def test_four_dimensional_input_and_keepdims_x_static_ndims_dynamic_sz(self):
x = rng.rand(2, 3, 4, 5)
x_ph = array_ops.placeholder(dtypes.float64, shape=[None, None, None, None])
for axis in [None, 0, 1, -2, (0,), (-1,), (-1, 1), (3, 1), (-3, 0)]:
expected_percentile = np.percentile(
x,
q=0.77,
interpolation=self._interpolation,
axis=axis,
keepdims=True)
with self.test_session():
pct = sample_stats.percentile(
x_ph,
q=0.77,
interpolation=self._interpolation,
axis=axis,
keep_dims=True)
self.assertAllClose(expected_percentile, pct.eval(feed_dict={x_ph: x}))
def test_with_integer_dtype(self):
x = [1, 5, 3, 2, 4]
for q in [0, 10, 25, 49.9, 50, 50.01, 90, 95, 100]:
expected_percentile = np.percentile(
x, q=q, interpolation=self._interpolation)
with self.test_session():
pct = sample_stats.percentile(x, q=q, interpolation=self._interpolation)
self.assertEqual(dtypes.int32, pct.dtype)
self.assertAllEqual((), pct.get_shape())
self.assertAllClose(expected_percentile, pct.eval())
class PercentileTestWithHigherInterpolation(
PercentileTestWithLowerInterpolation):
_interpolation = "higher"
class PercentileTestWithNearestInterpolation(test.TestCase):
"""Test separately because np.round and tf.round make different choices."""
_interpolation = "nearest"
def test_one_dim_odd_input(self):
x = [1., 5., 3., 2., 4.]
for q in [0, 10.1, 25.1, 49.9, 50.1, 50.01, 89, 100]:
expected_percentile = np.percentile(
x, q=q, interpolation=self._interpolation)
with self.test_session():
pct = sample_stats.percentile(x, q=q, interpolation=self._interpolation)
self.assertAllEqual((), pct.get_shape())
self.assertAllClose(expected_percentile, pct.eval())
def test_one_dim_even_input(self):
x = [1., 5., 3., 2., 4., 5.]
for q in [0, 10.1, 25.1, 49.9, 50.1, 50.01, 89, 100]:
expected_percentile = np.percentile(
x, q=q, interpolation=self._interpolation)
with self.test_session():
pct = sample_stats.percentile(x, q=q, interpolation=self._interpolation)
self.assertAllEqual((), pct.get_shape())
self.assertAllClose(expected_percentile, pct.eval())
def test_invalid_interpolation_raises(self):
x = [1., 5., 3., 2., 4.]
with self.assertRaisesRegexp(ValueError, "interpolation"):
sample_stats.percentile(x, q=0.5, interpolation="bad")
def test_vector_q_raises_static(self):
x = [1., 5., 3., 2., 4.]
with self.assertRaisesRegexp(ValueError, "Expected.*ndims"):
sample_stats.percentile(x, q=[0.5])
def test_vector_q_raises_dynamic(self):
x = [1., 5., 3., 2., 4.]
q_ph = array_ops.placeholder(dtypes.float32)
pct = sample_stats.percentile(x, q=q_ph, validate_args=True)
with self.test_session():
with self.assertRaisesOpError("rank"):
pct.eval(feed_dict={q_ph: [0.5]})
if __name__ == "__main__":
test.main()<|fim▁end|> |
from __future__ import absolute_import |
<|file_name|>test_match_without_extension.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python3
# encoding: utf-8
from nose import with_setup
from tests.utils import *
@with_setup(usual_setup_func, usual_teardown_func)
def test_negative():
create_file('xxx', 'b.png')
create_file('xxx', 'a.png')
create_file('xxx', 'a')
head, *data, footer = run_rmlint('-i')
assert footer['total_files'] == 3
assert footer['total_lint_size'] == 0
assert footer['duplicates'] == 0
@with_setup(usual_setup_func, usual_teardown_func)<|fim▁hole|> create_file('xxx', 'a.jpg')
head, *data, footer = run_rmlint('-i')
assert footer['total_files'] == 2
assert footer['total_lint_size'] == 3
assert footer['duplicates'] == 1<|fim▁end|> | def test_positive():
create_file('xxx', 'a.png') |
<|file_name|>shapes.ts<|end_file_name|><|fim▁begin|>import { Actor } from './game';
import { Point, Size } from './foundation';
export class Rectangle implements Actor {
public isAlive: boolean;
protected rectCenter: Point;
protected rectSize: Size;
protected horizontalDirection: number;
protected verticalDirection: number;
constructor(center: Point, size: Size) {
this.isAlive = true;
this.rectCenter = center;
this.rectSize = size;
this.horizontalDirection = 0;
this.verticalDirection = 0;
}
get center(): Point {
return this.rectCenter;
}
get size(): Size {
return this.rectSize;
}
public update(stageBounds: Size) {
if (this.isMovingToLeft()) {
if (!this.reachLeftLimit(stageBounds)) {
this.rectCenter.x -= 5;
} else {
this.horizontalDirection = 1;
}
} else {
if (!this.reachRightLimit(stageBounds)) {
this.rectCenter.x += 5;
} else {
this.horizontalDirection = 0;
}
}
if (this.isMovingToTop()) {
if (!this.reachTopLimit(stageBounds)) {
this.rectCenter.y -= 5;
} else {
this.verticalDirection = 1;
}
} else {
if (!this.reachBottomLimit(stageBounds)) {
this.rectCenter.y += 5;
} else {
this.verticalDirection = 0;
}
}
}
public paint(context: CanvasRenderingContext2D) {
context.fillStyle = 'rgb(255, 255, 255)';
context.fillRect(
this.rectCenter.x - this.rectSize.width / 2,
this.rectCenter.y - this.rectSize.height / 2,
this.rectSize.width,
this.rectSize.height);
}
protected isMovingToLeft(): boolean {
return this.horizontalDirection === 0;
}
// protected isMovingToRight(): boolean {
// return this.horizontalDirection === 1;
// }
protected isMovingToTop(): boolean {
return this.verticalDirection === 0;
}
<|fim▁hole|> // protected isMovingToBottom(): boolean {
// return this.verticalDirection === 1;
// }
protected reachLeftLimit(bounds: Size): boolean {
return this.rectCenter.x - this.rectSize.width / 2 <= 0;
}
protected reachRightLimit(bounds: Size): boolean {
return this.rectCenter.x + this.rectSize.width / 2 >= bounds.width;
}
protected reachTopLimit(bounds: Size): boolean {
return this.rectCenter.y - this.rectSize.height / 2 <= 0;
}
protected reachBottomLimit(bounds: Size): boolean {
return this.rectCenter.y + this.rectSize.height / 2 >= bounds.height;
}
}<|fim▁end|> | |
<|file_name|>lib.rs<|end_file_name|><|fim▁begin|>//! AWS Support
//!
//! If you're using the service, you're probably looking for [AWSSupportClient](struct.AWSSupportClient.html) and [AWSSupport](trait.AWSSupport.html).
extern crate hyper;
extern crate rusoto_core;
extern crate serde;
#[macro_use]<|fim▁hole|>extern crate serde_derive;
extern crate serde_json;
mod generated;
mod custom;
pub use generated::*;
pub use custom::*;<|fim▁end|> | |
<|file_name|>UPnP.SOAPRequest.js<|end_file_name|><|fim▁begin|>class upnp_soaprequest {
constructor() {
}<|fim▁hole|> // System.Runtime.Remoting.ObjRef CreateObjRef(type requestedType)
CreateObjRef() {
}
// bool Equals(System.Object obj)
Equals() {
}
// int GetHashCode()
GetHashCode() {
}
// System.Object GetLifetimeService()
GetLifetimeService() {
}
// type GetType()
GetType() {
}
// System.Object InitializeLifetimeService()
InitializeLifetimeService() {
}
// string ToString()
ToString() {
}
}
module.exports = upnp_soaprequest;<|fim▁end|> | |
<|file_name|>sanitizer.pipe.ts<|end_file_name|><|fim▁begin|>import { Pipe, PipeTransform } from '@angular/core';
import { DomSanitizer, SafeHtml } from '@angular/platform-browser';
<|fim▁hole|>})
export class SanitizerPipe implements PipeTransform {
constructor(private _sanitizer: DomSanitizer) {
}
transform(v: string): SafeHtml {
const html = this._sanitizer.bypassSecurityTrustHtml(v);
if (html.hasOwnProperty('changingThisBreaksApplicationSecurity') &&
/^<p>\d+\./.test(html['changingThisBreaksApplicationSecurity'])) {
html['changingThisBreaksApplicationSecurity'] =
'<p>' + html['changingThisBreaksApplicationSecurity']
.substr(html['changingThisBreaksApplicationSecurity'].indexOf('.') + 1);
}
return html;
}
}<|fim▁end|> | @Pipe({
name: 'sanitizeHtml',
pure: false |
<|file_name|>webserver.js<|end_file_name|><|fim▁begin|>webserver.prototype.__proto__ = require('events').EventEmitter.prototype;
webserver.prototype.server = null;
webserver.prototype.config = null;
webserver.prototype.endpoints = [];
const bodyParser = require('body-parser');<|fim▁hole|>const formData = multer({ storage: multer.memoryStorage() });
const https = require('https');
const http = require('http');
const fs = require('fs');
/**
* Initalize a webserver with options such as SSL
* @param port
* @param options
*/
function webserver(logger, port, options) {
var self = this;
self.logger = logger;
self.port = port;
self.options = options;
self.app = express();
self.app.use(bodyParser.urlencoded({extended: true}));
self.app.set('json spaces', 2);
}
webserver.prototype.start = function (callback) {
var self = this;
if (self.HTTPserver != null && self.HTTPSserver != null)
return callback({Error: "Server is already active... Try restarting instead."});
if (self.HTTPSserver == null) {
try {
self.ssl = {};
if (self.options && self.options.hasOwnProperty("ssl")) {
// Create a service (the app object is just a callback).
// This line is from the Node.js HTTPS documentation.
if (self.options.ssl.hasOwnProperty("key") && self.options.ssl.hasOwnProperty("cert")) {
var options = {
key: self.options.ssl.key,
cert: self.options.ssl.cert
};
self.HTTPSserver = https.createServer(options, self.app).listen(443);// Create an HTTPS service identical to the HTTP service.
self.logger.log("debug", "Started SSL server on port 443.");
}
}
if (self.HTTPserver == null) {
try {
self.HTTPserver = http.createServer(self.app).listen(self.port);// Create an HTTP service.
self.logger.log("debug", "Started HTTP server on port " + self.port);
if (callback)
return callback();
} catch (e){
console.log(e);
if (callback)
return callback({Error: "Failed to start HTTP server due to " + e});
}
}
} catch (e){
console.log(e);
if (callback)
return callback({Error: "Failed to start HTTP server due to " + e});
}
} else if (self.HTTPserver == null) {
try {
self.ssl = {};
if (self.HTTPserver == null) {
try {
self.HTTPserver = http.createServer(self.app).listen(self.port);// Create an HTTP service.
self.logger.log("debug", "Started HTTP server on port " + self.port);
if (self.options && self.options.hasOwnProperty("ssl")) {
if (self.options.ssl.hasOwnProperty("key") && self.options.ssl.hasOwnProperty("cert")) {
var options = {
key: self.options.ssl.key,
cert: self.options.ssl.cert
};
self.HTTPSserver = https.createServer(options, self.app).listen(443);// Create an HTTPS service identical to the HTTP service.
self.logger.log("debug", "Started SSL server on port 443.");
}
}
} catch (e){
console.log(e);
if (callback)
return callback({Error: "Failed to start HTTP server due to " + e});
}
}
} catch (e){
console.log(e);
if (callback)
return callback({Error: "Failed to start HTTP server due to " + e});
}
}
};
webserver.prototype.addEndpoint = function (method, url, callback) {
var self = this;
self.endpoints.push({method: method, url: url, callback: callback});
switch (method.toLowerCase()) {
case 'post':
// console.log("Reg post...");
//
// var func = new Function(
// "return function " + method + "_" + url + "(req, res, next){ " +
// "console.log('Test' + self.app._router.stack);}"
// )();
self.app.post(url, formData.array(), callback);
break;
case 'get':
self.app.get(url, callback);
break;
default:
self.app.post(url, formData.array(), callback);
break;
}
};
webserver.prototype.removeEndpoint = function (method, url) {
var self = this;
console.log("Added route " + self.app._router.stack);
var routes = self.app._router.stack;
routes.forEach(removeMiddlewares);
function removeMiddlewares(route, i, routes) {
switch (route.handle.name) {
case method + "_" + url:
routes.splice(i, 1);
}
if (route.route)
route.route.stack.forEach(removeMiddlewares);
}
};
webserver.prototype.restart = function () {
var self = this;
if (self.HTTPSserver != null && self.HTTPserver != null) {
self.HTTPSserver.close();
self.HTTPserver.close();
for (var endpoint in self.endpoints)
if (self.endpoints.hasOwnProperty(endpoint))
self.addEndpoint(self.endpoints[endpoint].method, self.endpoints[endpoint].url, self.endpoints[endpoint].callback);
self.start();
}
};
module.exports = webserver;<|fim▁end|> | const express = require('express');
const multer = require('multer'); |
<|file_name|>default.py<|end_file_name|><|fim▁begin|>import sys, xbmcplugin, xbmcgui,xbmc
_id = "plugin.video.italian-news"
_resdir = "special://home/addons/" + _id + "/resources"
_thisPlugin = int(sys.argv[1])
_icons = _resdir + "/icons/"
sys.path.append( xbmc.translatePath(_resdir + "/lib/"))
import rai
_tg1Icon=xbmc.translatePath(_icons +"Tg1_logo.png")
_tg2Icon=xbmc.translatePath(_icons +"Tg2_logo.png")
_tg3Icon=xbmc.translatePath(_icons +"Tg3_logo.png")
def _addItem(label,uri,icon,isFolder=False):
item = xbmcgui.ListItem(label, iconImage=icon)
xbmcplugin.addDirectoryItem(_thisPlugin,uri,item,isFolder)
def _get_params():
param=[]
paramstring=sys.argv[2]
if len(paramstring)>=2:
params=sys.argv[2]
cleanedparams=params.replace('?','')
if (params[len(params)-1]=='/'):
params=params[0:len(params)-2]
pairsofparams=cleanedparams.split('&')
param={}
for i in range(len(pairsofparams)):
splitparams={}
splitparams=pairsofparams[i].split('=')
if (len(splitparams))==2:
param[splitparams[0]]=splitparams[1]
return param
param = _get_params()
plugins = {
'1':(rai.RaiUno, 'Guarda il TG1',_tg1Icon),
'2':(rai.RaiDue, 'Guarda il TG2',_tg2Icon),
'3':(rai.RaiTre, 'Guarda il TG3',_tg3Icon)
}
if 'plugin' in param:
(engine, title, eicon)=plugins[param['plugin']]
for (name,url,icon) in engine().get():
if icon == '':
icon = eicon
_addItem(name,url,icon)
xbmcplugin.endOfDirectory(_thisPlugin)
else:
for n in sorted(plugins.iterkeys()):
(engine, title, icon)=plugins[n]
print title
_addItem(title,sys.argv[0]+'?plugin='+n,icon,isFolder=True)<|fim▁hole|>#for (name,url,icon) in tg1:
# _addItem(name,url,icon)
#xbmcplugin.endOfDirectory(_thisPlugin)<|fim▁end|> | xbmcplugin.endOfDirectory(_thisPlugin)
|
<|file_name|>preferencesRenderers.ts<|end_file_name|><|fim▁begin|>/*---------------------------------------------------------------------------------------------
* Copyright (c) Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See License.txt in the project root for license information.
*--------------------------------------------------------------------------------------------*/
import { TPromise } from 'vs/base/common/winjs.base';
import * as nls from 'vs/nls';
import { Delayer } from 'vs/base/common/async';
import { Disposable, IDisposable, dispose } from 'vs/base/common/lifecycle';
import { IAction } from 'vs/base/common/actions';
import { IJSONSchema } from 'vs/base/common/jsonSchema';
import Event, { Emitter } from 'vs/base/common/event';
import { Registry } from 'vs/platform/registry/common/platform';
import * as editorCommon from 'vs/editor/common/editorCommon';
import { Range, IRange } from 'vs/editor/common/core/range';
import { IConfigurationRegistry, Extensions as ConfigurationExtensions, ConfigurationScope, IConfigurationPropertySchema } from 'vs/platform/configuration/common/configurationRegistry';
import { IInstantiationService } from 'vs/platform/instantiation/common/instantiation';
import { IPreferencesService, ISettingsGroup, ISetting, IPreferencesEditorModel, IFilterResult, ISettingsEditorModel } from 'vs/workbench/parts/preferences/common/preferences';
import { SettingsEditorModel, DefaultSettingsEditorModel } from 'vs/workbench/parts/preferences/common/preferencesModels';
import { ICodeEditor, IEditorMouseEvent, MouseTargetType } from 'vs/editor/browser/editorBrowser';
import { IContextMenuService, ContextSubMenu } from 'vs/platform/contextview/browser/contextView';
import { SettingsGroupTitleWidget, EditPreferenceWidget, SettingsHeaderWidget } from 'vs/workbench/parts/preferences/browser/preferencesWidgets';
import { ITelemetryService } from 'vs/platform/telemetry/common/telemetry';
import { RangeHighlightDecorations } from 'vs/workbench/common/editor/rangeDecorations';
import { IConfigurationEditingService, ConfigurationEditingError, ConfigurationEditingErrorCode, ConfigurationTarget } from 'vs/workbench/services/configuration/common/configurationEditing';
import { ITextFileService } from 'vs/workbench/services/textfile/common/textfiles';
import { overrideIdentifierFromKey } from 'vs/platform/configuration/common/model';
import { IMarkerService, IMarkerData } from 'vs/platform/markers/common/markers';
import { IWorkspaceConfigurationService } from 'vs/workbench/services/configuration/common/configuration';
import { IMessageService, Severity } from 'vs/platform/message/common/message';
import { IWorkbenchEditorService } from 'vs/workbench/services/editor/common/editorService';
import { ICursorPositionChangedEvent } from 'vs/editor/common/controller/cursorEvents';
import { ModelDecorationOptions } from 'vs/editor/common/model/textModelWithDecorations';
import { IWorkspaceContextService, WorkbenchState } from 'vs/platform/workspace/common/workspace';
import { MarkdownString } from 'vs/base/common/htmlContent';
export interface IPreferencesRenderer<T> extends IDisposable {
preferencesModel: IPreferencesEditorModel<T>;
associatedPreferencesModel: IPreferencesEditorModel<T>;
onFocusPreference: Event<T>;
onClearFocusPreference: Event<T>;
onUpdatePreference: Event<{ key: string, value: any, source: T }>;
render(): void;
updatePreference(key: string, value: any, source: T): void;
filterPreferences(filterResult: IFilterResult): void;
focusPreference(setting: T): void;
clearFocus(setting: T): void;
}
export class UserSettingsRenderer extends Disposable implements IPreferencesRenderer<ISetting> {
private settingHighlighter: SettingHighlighter;
private editSettingActionRenderer: EditSettingRenderer;
private highlightMatchesRenderer: HighlightMatchesRenderer;
private modelChangeDelayer: Delayer<void> = new Delayer<void>(200);
private _onFocusPreference: Emitter<ISetting> = new Emitter<ISetting>();
public readonly onFocusPreference: Event<ISetting> = this._onFocusPreference.event;
private _onUpdatePreference: Emitter<{ key: string, value: any, source: ISetting }> = new Emitter<{ key: string, value: any, source: ISetting }>();
public readonly onUpdatePreference: Event<{ key: string, value: any, source: ISetting }> = this._onUpdatePreference.event;
private _onClearFocusPreference: Emitter<ISetting> = new Emitter<ISetting>();
public readonly onClearFocusPreference: Event<ISetting> = this._onClearFocusPreference.event;
private filterResult: IFilterResult;
constructor(protected editor: ICodeEditor, public readonly preferencesModel: SettingsEditorModel, private _associatedPreferencesModel: IPreferencesEditorModel<ISetting>,
@IPreferencesService protected preferencesService: IPreferencesService,
@ITelemetryService private telemetryService: ITelemetryService,
@ITextFileService private textFileService: ITextFileService,
@IConfigurationEditingService private configurationEditingService: IConfigurationEditingService,
@IMessageService private messageService: IMessageService,
@IInstantiationService protected instantiationService: IInstantiationService
) {
super();
this._register(preferencesModel);
this.settingHighlighter = this._register(instantiationService.createInstance(SettingHighlighter, editor, this._onFocusPreference, this._onClearFocusPreference));
this.highlightMatchesRenderer = this._register(instantiationService.createInstance(HighlightMatchesRenderer, editor));
this.editSettingActionRenderer = this._register(this.instantiationService.createInstance(EditSettingRenderer, this.editor, this.preferencesModel, this.settingHighlighter));
this._register(this.editSettingActionRenderer.onUpdateSetting(({ key, value, source }) => this.updatePreference(key, value, source)));
this._register(this.editor.getModel().onDidChangeContent(() => this.modelChangeDelayer.trigger(() => this.onModelChanged())));
this.createHeader();
}
public get associatedPreferencesModel(): IPreferencesEditorModel<ISetting> {
return this._associatedPreferencesModel;
}
public set associatedPreferencesModel(associatedPreferencesModel: IPreferencesEditorModel<ISetting>) {
this._associatedPreferencesModel = associatedPreferencesModel;
this.editSettingActionRenderer.associatedPreferencesModel = associatedPreferencesModel;
}
protected createHeader(): void {
this._register(new SettingsHeaderWidget(this.editor, '')).setMessage(nls.localize('emptyUserSettingsHeader', "Place your settings here to overwrite the Default Settings."));
}
public render(): void {
this.editSettingActionRenderer.render(this.preferencesModel.settingsGroups, this.associatedPreferencesModel);
if (this.filterResult) {
this.filterPreferences(this.filterResult);
}
}
public updatePreference(key: string, value: any, source: ISetting): void {
this.telemetryService.publicLog('defaultSettingsActions.copySetting', { userConfigurationKeys: [key] });
const overrideIdentifier = source.overrideOf ? overrideIdentifierFromKey(source.overrideOf.key) : null;
const resource = this.preferencesModel.uri;
this.configurationEditingService.writeConfiguration(this.preferencesModel.configurationTarget, { key, value }, { donotSave: this.textFileService.isDirty(resource), donotNotifyError: true, scopes: { overrideIdentifier, resource } })
.then(() => this.onSettingUpdated(source), error => {
this.messageService.show(Severity.Error, this.toErrorMessage(error, this.preferencesModel.configurationTarget));
});
}
private toErrorMessage(error: ConfigurationEditingError, target: ConfigurationTarget): string {
switch (error.code) {
case ConfigurationEditingErrorCode.ERROR_INVALID_CONFIGURATION: {
return nls.localize('errorInvalidConfiguration', "Unable to write into settings. Correct errors/warnings in the file and try again.");
};
}
return error.message;
}
private onModelChanged(): void {
if (!this.editor.getModel()) {
// model could have been disposed during the delay
return;
}
this.render();
}
private onSettingUpdated(setting: ISetting) {
this.editor.focus();
setting = this.getSetting(setting);
if (setting) {
// TODO:@sandy Selection range should be template range
this.editor.setSelection(setting.valueRange);
this.settingHighlighter.highlight(setting, true);
}
}
private getSetting(setting: ISetting): ISetting {
const { key, overrideOf } = setting;
if (overrideOf) {
const setting = this.getSetting(overrideOf);
for (const override of setting.overrides) {
if (override.key === key) {
return override;
}
}
return null;
}
return this.preferencesModel.getPreference(key);
}
public filterPreferences(filterResult: IFilterResult): void {
this.filterResult = filterResult;
this.settingHighlighter.clear(true);
this.highlightMatchesRenderer.render(filterResult ? filterResult.matches : []);
}
public focusPreference(setting: ISetting): void {
const s = this.getSetting(setting);
if (s) {
this.settingHighlighter.highlight(s, true);
} else {
this.settingHighlighter.clear(true);
}
}
public clearFocus(setting: ISetting): void {
this.settingHighlighter.clear(true);
}
}
export class WorkspaceSettingsRenderer extends UserSettingsRenderer implements IPreferencesRenderer<ISetting> {
private untrustedSettingRenderer: UnsupportedWorkspaceSettingsRenderer;
private workspaceConfigurationRenderer: WorkspaceConfigurationRenderer;
constructor(editor: ICodeEditor, preferencesModel: SettingsEditorModel, associatedPreferencesModel: IPreferencesEditorModel<ISetting>,
@IPreferencesService preferencesService: IPreferencesService,
@ITelemetryService telemetryService: ITelemetryService,
@ITextFileService textFileService: ITextFileService,
@IConfigurationEditingService configurationEditingService: IConfigurationEditingService,
@IMessageService messageService: IMessageService,
@IInstantiationService instantiationService: IInstantiationService
) {
super(editor, preferencesModel, associatedPreferencesModel, preferencesService, telemetryService, textFileService, configurationEditingService, messageService, instantiationService);
this.untrustedSettingRenderer = this._register(instantiationService.createInstance(UnsupportedWorkspaceSettingsRenderer, editor, preferencesModel));
this.workspaceConfigurationRenderer = this._register(instantiationService.createInstance(WorkspaceConfigurationRenderer, editor, preferencesModel));
}
protected createHeader(): void {
this._register(new SettingsHeaderWidget(this.editor, '')).setMessage(nls.localize('emptyWorkspaceSettingsHeader', "Place your settings here to overwrite the User Settings."));
}
public render(): void {
super.render();
this.untrustedSettingRenderer.render();
this.workspaceConfigurationRenderer.render();
}
}
export class FolderSettingsRenderer extends UserSettingsRenderer implements IPreferencesRenderer<ISetting> {
private unsupportedWorkbenchSettingsRenderer: UnsupportedWorkbenchSettingsRenderer;
constructor(editor: ICodeEditor, preferencesModel: SettingsEditorModel, associatedPreferencesModel: IPreferencesEditorModel<ISetting>,
@IPreferencesService preferencesService: IPreferencesService,
@ITelemetryService telemetryService: ITelemetryService,
@ITextFileService textFileService: ITextFileService,
@IConfigurationEditingService configurationEditingService: IConfigurationEditingService,
@IMessageService messageService: IMessageService,
@IInstantiationService instantiationService: IInstantiationService
) {
super(editor, preferencesModel, associatedPreferencesModel, preferencesService, telemetryService, textFileService, configurationEditingService, messageService, instantiationService);
this.unsupportedWorkbenchSettingsRenderer = this._register(instantiationService.createInstance(UnsupportedWorkbenchSettingsRenderer, editor, preferencesModel));
}
protected createHeader(): void {
this._register(new SettingsHeaderWidget(this.editor, '')).setMessage(nls.localize('emptyFolderSettingsHeader', "Place your folder settings here to overwrite those from the Workspace Settings."));
}
public render(): void {
super.render();
this.unsupportedWorkbenchSettingsRenderer.render();
}
}
export class DefaultSettingsRenderer extends Disposable implements IPreferencesRenderer<ISetting> {
private _associatedPreferencesModel: IPreferencesEditorModel<ISetting>;
private settingHighlighter: SettingHighlighter;
private settingsHeaderRenderer: DefaultSettingsHeaderRenderer;
private settingsGroupTitleRenderer: SettingsGroupTitleRenderer;
private filteredMatchesRenderer: FilteredMatchesRenderer;
private hiddenAreasRenderer: HiddenAreasRenderer;
private editSettingActionRenderer: EditSettingRenderer;
private _onUpdatePreference: Emitter<{ key: string, value: any, source: ISetting }> = new Emitter<{ key: string, value: any, source: ISetting }>();
public readonly onUpdatePreference: Event<{ key: string, value: any, source: ISetting }> = this._onUpdatePreference.event;
private _onFocusPreference: Emitter<ISetting> = new Emitter<ISetting>();
public readonly onFocusPreference: Event<ISetting> = this._onFocusPreference.event;
private _onClearFocusPreference: Emitter<ISetting> = new Emitter<ISetting>();
public readonly onClearFocusPreference: Event<ISetting> = this._onClearFocusPreference.event;
private filterResult: IFilterResult;
constructor(protected editor: ICodeEditor, public readonly preferencesModel: DefaultSettingsEditorModel,
@IPreferencesService protected preferencesService: IPreferencesService,
@IWorkbenchEditorService private editorService: IWorkbenchEditorService,
@IInstantiationService protected instantiationService: IInstantiationService
) {
super();
this.settingHighlighter = this._register(instantiationService.createInstance(SettingHighlighter, editor, this._onFocusPreference, this._onClearFocusPreference));
this.settingsHeaderRenderer = this._register(instantiationService.createInstance(DefaultSettingsHeaderRenderer, editor, preferencesModel.configurationScope));
this.settingsGroupTitleRenderer = this._register(instantiationService.createInstance(SettingsGroupTitleRenderer, editor));
this.filteredMatchesRenderer = this._register(instantiationService.createInstance(FilteredMatchesRenderer, editor));
this.editSettingActionRenderer = this._register(instantiationService.createInstance(EditSettingRenderer, editor, preferencesModel, this.settingHighlighter));
this._register(this.editSettingActionRenderer.onUpdateSetting(e => this._onUpdatePreference.fire(e)));
const paranthesisHidingRenderer = this._register(instantiationService.createInstance(StaticContentHidingRenderer, editor, preferencesModel.settingsGroups));
this.hiddenAreasRenderer = this._register(instantiationService.createInstance(HiddenAreasRenderer, editor, [this.settingsGroupTitleRenderer, this.filteredMatchesRenderer, paranthesisHidingRenderer]));
this._register(this.settingsGroupTitleRenderer.onHiddenAreasChanged(() => this.hiddenAreasRenderer.render()));
}
public get associatedPreferencesModel(): IPreferencesEditorModel<ISetting> {
return this._associatedPreferencesModel;
}
public set associatedPreferencesModel(associatedPreferencesModel: IPreferencesEditorModel<ISetting>) {
this._associatedPreferencesModel = associatedPreferencesModel;
this.editSettingActionRenderer.associatedPreferencesModel = associatedPreferencesModel;
}
public render() {
this.settingsGroupTitleRenderer.render(this.preferencesModel.settingsGroups);
this.editSettingActionRenderer.render(this.preferencesModel.settingsGroups, this._associatedPreferencesModel);
this.hiddenAreasRenderer.render();
this.settingHighlighter.clear(true);
this.settingsGroupTitleRenderer.showGroup(1);
this.hiddenAreasRenderer.render();
}
public filterPreferences(filterResult: IFilterResult): void {
this.filterResult = filterResult;
if (!filterResult) {
this.settingHighlighter.clear(true);
this.filteredMatchesRenderer.render(null);
this.settingsHeaderRenderer.render(this.preferencesModel.settingsGroups);
this.settingsGroupTitleRenderer.render(this.preferencesModel.settingsGroups);
this.settingsGroupTitleRenderer.showGroup(1);
this.editSettingActionRenderer.render(this.preferencesModel.settingsGroups, this._associatedPreferencesModel);
} else {
this.filteredMatchesRenderer.render(filterResult);
this.settingsHeaderRenderer.render(filterResult.filteredGroups);
this.settingsGroupTitleRenderer.render(filterResult.filteredGroups);
this.settingHighlighter.clear(true);
this.editSettingActionRenderer.render(filterResult.filteredGroups, this._associatedPreferencesModel);
}
this.hiddenAreasRenderer.render();
}
public focusPreference(s: ISetting): void {
const setting = this.getSetting(s);
if (setting) {
this.settingsGroupTitleRenderer.showSetting(setting);
this.settingHighlighter.highlight(setting, true);
} else {
this.settingHighlighter.clear(true);
}
}
private getSetting(setting: ISetting): ISetting {
const { key, overrideOf } = setting;
if (overrideOf) {
const setting = this.getSetting(overrideOf);
for (const override of setting.overrides) {
if (override.key === key) {
return override;
}
}
return null;
}
const settingsGroups = this.filterResult ? this.filterResult.filteredGroups : this.preferencesModel.settingsGroups;
return this.getPreference(key, settingsGroups);
}
private getPreference(key: string, settingsGroups: ISettingsGroup[]): ISetting {
for (const group of settingsGroups) {
for (const section of group.sections) {
for (const setting of section.settings) {
if (setting.key === key) {
return setting;
}
}
}
}
return null;
}
public clearFocus(setting: ISetting): void {
this.settingHighlighter.clear(true);
}
public collapseAll() {
this.settingsGroupTitleRenderer.collapseAll();
}
public updatePreference(key: string, value: any, source: ISetting): void {
}
}
export interface HiddenAreasProvider {
hiddenAreas: IRange[];
}
export class StaticContentHidingRenderer extends Disposable implements HiddenAreasProvider {
constructor(private editor: ICodeEditor, private settingsGroups: ISettingsGroup[]
) {
super();
}
get hiddenAreas(): IRange[] {
const model = this.editor.getModel();
return [
{
startLineNumber: 1,
startColumn: model.getLineMinColumn(1),
endLineNumber: 2,
endColumn: model.getLineMaxColumn(2)
},
{
startLineNumber: this.settingsGroups[0].range.endLineNumber + 1,
startColumn: model.getLineMinColumn(this.settingsGroups[0].range.endLineNumber + 1),
endLineNumber: this.settingsGroups[0].range.endLineNumber + 4,
endColumn: model.getLineMaxColumn(this.settingsGroups[0].range.endLineNumber + 4)
},
{
startLineNumber: model.getLineCount() - 1,
startColumn: model.getLineMinColumn(model.getLineCount() - 1),
endLineNumber: model.getLineCount(),
endColumn: model.getLineMaxColumn(model.getLineCount())
}
];
}
}
class DefaultSettingsHeaderRenderer extends Disposable {
private settingsHeaderWidget: SettingsHeaderWidget;
constructor(private editor: ICodeEditor, scope: ConfigurationScope) {
super();
const title = scope === ConfigurationScope.RESOURCE ? nls.localize('defaultFolderSettingsTitle', "Default Folder Settings") : nls.localize('defaultSettingsTitle', "Default Settings");
this.settingsHeaderWidget = this._register(new SettingsHeaderWidget(editor, title));
}
public render(settingsGroups: ISettingsGroup[]) {
if (settingsGroups.length) {
this.settingsHeaderWidget.setMessage('');
} else {
this.settingsHeaderWidget.setMessage(nls.localize('noSettingsFound', "No Settings Found."));
}
}
}
export class SettingsGroupTitleRenderer extends Disposable implements HiddenAreasProvider {
private _onHiddenAreasChanged: Emitter<void> = new Emitter<void>();
get onHiddenAreasChanged(): Event<void> { return this._onHiddenAreasChanged.event; };
private settingsGroups: ISettingsGroup[];
private hiddenGroups: ISettingsGroup[] = [];
private settingsGroupTitleWidgets: SettingsGroupTitleWidget[];
private disposables: IDisposable[] = [];
constructor(private editor: ICodeEditor,
@IInstantiationService private instantiationService: IInstantiationService
) {
super();
}
public get hiddenAreas(): IRange[] {
const hiddenAreas: IRange[] = [];
for (const group of this.hiddenGroups) {
hiddenAreas.push(group.range);
}
return hiddenAreas;
}
public render(settingsGroups: ISettingsGroup[]) {
this.disposeWidgets();
this.settingsGroups = settingsGroups.slice();
this.settingsGroupTitleWidgets = [];
for (const group of this.settingsGroups.slice().reverse()) {
const settingsGroupTitleWidget = this.instantiationService.createInstance(SettingsGroupTitleWidget, this.editor, group);
settingsGroupTitleWidget.render();
this.settingsGroupTitleWidgets.push(settingsGroupTitleWidget);
this.disposables.push(settingsGroupTitleWidget);
this.disposables.push(settingsGroupTitleWidget.onToggled(collapsed => this.onToggled(collapsed, settingsGroupTitleWidget.settingsGroup)));
}
this.settingsGroupTitleWidgets.reverse();
}
public showGroup(group: number) {
this.hiddenGroups = this.settingsGroups.filter((g, i) => i !== group - 1);
for (const groupTitleWidget of this.settingsGroupTitleWidgets.filter((g, i) => i !== group - 1)) {
groupTitleWidget.toggleCollapse(true);
}
this._onHiddenAreasChanged.fire();
}
public showSetting(setting: ISetting): void {
const settingsGroupTitleWidget = this.settingsGroupTitleWidgets.filter(widget => Range.containsRange(widget.settingsGroup.range, setting.range))[0];
if (settingsGroupTitleWidget && settingsGroupTitleWidget.isCollapsed()) {
settingsGroupTitleWidget.toggleCollapse(false);
this.hiddenGroups.splice(this.hiddenGroups.indexOf(settingsGroupTitleWidget.settingsGroup), 1);
this._onHiddenAreasChanged.fire();
}
}
public collapseAll() {
this.editor.setPosition({ lineNumber: 1, column: 1 });
this.hiddenGroups = this.settingsGroups.slice();
for (const groupTitleWidget of this.settingsGroupTitleWidgets) {
groupTitleWidget.toggleCollapse(true);
}
this._onHiddenAreasChanged.fire();
}
private onToggled(collapsed: boolean, group: ISettingsGroup) {
const index = this.hiddenGroups.indexOf(group);
if (collapsed) {
const currentPosition = this.editor.getPosition();
if (group.range.startLineNumber <= currentPosition.lineNumber && group.range.endLineNumber >= currentPosition.lineNumber) {
this.editor.setPosition({ lineNumber: group.range.startLineNumber - 1, column: 1 });
}
this.hiddenGroups.push(group);
} else {
this.hiddenGroups.splice(index, 1);
}
this._onHiddenAreasChanged.fire();
}
private disposeWidgets() {
this.hiddenGroups = [];
this.disposables = dispose(this.disposables);
}
public dispose() {
this.disposeWidgets();
super.dispose();
}
}
export class HiddenAreasRenderer extends Disposable {
constructor(private editor: ICodeEditor, private hiddenAreasProviders: HiddenAreasProvider[],
@IInstantiationService private instantiationService: IInstantiationService
) {
super();
}
public render() {
const ranges: IRange[] = [];
for (const hiddenAreaProvider of this.hiddenAreasProviders) {
ranges.push(...hiddenAreaProvider.hiddenAreas);
}
this.editor.setHiddenAreas(ranges);
}
public dispose() {
this.editor.setHiddenAreas([]);
super.dispose();
}
}
export class FilteredMatchesRenderer extends Disposable implements HiddenAreasProvider {
private decorationIds: string[] = [];
public hiddenAreas: IRange[] = [];
constructor(private editor: ICodeEditor,
@IInstantiationService private instantiationService: IInstantiationService
) {
super();
}
public render(result: IFilterResult): void {
const model = this.editor.getModel();
this.hiddenAreas = [];
this.editor.changeDecorations(changeAccessor => {
this.decorationIds = changeAccessor.deltaDecorations(this.decorationIds, []);
});
if (result) {
this.hiddenAreas = this.computeHiddenRanges(result.filteredGroups, result.allGroups, model);
this.editor.changeDecorations(changeAccessor => {
this.decorationIds = changeAccessor.deltaDecorations(this.decorationIds, result.matches.map(match => this.createDecoration(match, model)));
});
}
}
private createDecoration(range: IRange, model: editorCommon.IModel): editorCommon.IModelDeltaDecoration {
return {
range,
options: {
stickiness: editorCommon.TrackedRangeStickiness.NeverGrowsWhenTypingAtEdges,
className: 'findMatch'
}
};
}
private computeHiddenRanges(filteredGroups: ISettingsGroup[], allSettingsGroups: ISettingsGroup[], model: editorCommon.IModel): IRange[] {
const notMatchesRanges: IRange[] = [];
for (const group of allSettingsGroups) {
const filteredGroup = filteredGroups.filter(g => g.title === group.title)[0];
if (!filteredGroup) {
notMatchesRanges.push({
startLineNumber: group.range.startLineNumber - 1,
startColumn: model.getLineMinColumn(group.range.startLineNumber - 1),
endLineNumber: group.range.endLineNumber,
endColumn: model.getLineMaxColumn(group.range.endLineNumber),
});
} else {
for (const section of group.sections) {
if (section.titleRange) {
if (!this.containsLine(section.titleRange.startLineNumber, filteredGroup)) {
notMatchesRanges.push(this.createCompleteRange(section.titleRange, model));
}
}
for (const setting of section.settings) {
if (!this.containsLine(setting.range.startLineNumber, filteredGroup)) {
notMatchesRanges.push(this.createCompleteRange(setting.range, model));
}
}
}
}
}
return notMatchesRanges;
}
private containsLine(lineNumber: number, settingsGroup: ISettingsGroup): boolean {
if (settingsGroup.titleRange && lineNumber >= settingsGroup.titleRange.startLineNumber && lineNumber <= settingsGroup.titleRange.endLineNumber) {
return true;
}
for (const section of settingsGroup.sections) {
if (section.titleRange && lineNumber >= section.titleRange.startLineNumber && lineNumber <= section.titleRange.endLineNumber) {
return true;
}
for (const setting of section.settings) {
if (lineNumber >= setting.range.startLineNumber && lineNumber <= setting.range.endLineNumber) {
return true;
}
}
}
return false;
}
private createCompleteRange(range: IRange, model: editorCommon.IModel): IRange {
return {
startLineNumber: range.startLineNumber,
startColumn: model.getLineMinColumn(range.startLineNumber),
endLineNumber: range.endLineNumber,
endColumn: model.getLineMaxColumn(range.endLineNumber)
};
}
public dispose() {
if (this.decorationIds) {
this.decorationIds = this.editor.changeDecorations(changeAccessor => {
return changeAccessor.deltaDecorations(this.decorationIds, []);
});
}
super.dispose();
}
}
export class HighlightMatchesRenderer extends Disposable {
private decorationIds: string[] = [];
constructor(private editor: ICodeEditor,
@IInstantiationService private instantiationService: IInstantiationService
) {
super();
}
public render(matches: IRange[]): void {
const model = this.editor.getModel();
this.editor.changeDecorations(changeAccessor => {
this.decorationIds = changeAccessor.deltaDecorations(this.decorationIds, []);
});
if (matches.length) {
this.editor.changeDecorations(changeAccessor => {
this.decorationIds = changeAccessor.deltaDecorations(this.decorationIds, matches.map(match => this.createDecoration(match, model)));
});
}
}
private static _FIND_MATCH = ModelDecorationOptions.register({
stickiness: editorCommon.TrackedRangeStickiness.NeverGrowsWhenTypingAtEdges,
className: 'findMatch'
});
private createDecoration(range: IRange, model: editorCommon.IModel): editorCommon.IModelDeltaDecoration {
return {
range,
options: HighlightMatchesRenderer._FIND_MATCH
};
}
public dispose() {
if (this.decorationIds) {
this.decorationIds = this.editor.changeDecorations(changeAccessor => {
return changeAccessor.deltaDecorations(this.decorationIds, []);
});
}
super.dispose();
}
}
class EditSettingRenderer extends Disposable {
private editPreferenceWidgetForCusorPosition: EditPreferenceWidget<ISetting>;
private editPreferenceWidgetForMouseMove: EditPreferenceWidget<ISetting>;
private settingsGroups: ISettingsGroup[];
public associatedPreferencesModel: IPreferencesEditorModel<ISetting>;
private toggleEditPreferencesForMouseMoveDelayer: Delayer<void>;
private _onUpdateSetting: Emitter<{ key: string, value: any, source: ISetting }> = new Emitter<{ key: string, value: any, source: ISetting }>();
public readonly onUpdateSetting: Event<{ key: string, value: any, source: ISetting }> = this._onUpdateSetting.event;
constructor(private editor: ICodeEditor, private masterSettingsModel: ISettingsEditorModel,
private settingHighlighter: SettingHighlighter,
@IPreferencesService private preferencesService: IPreferencesService,
@IInstantiationService private instantiationService: IInstantiationService,
@IContextMenuService private contextMenuService: IContextMenuService
) {
super();
this.editPreferenceWidgetForCusorPosition = this._register(this.instantiationService.createInstance(EditPreferenceWidget, editor));
this.editPreferenceWidgetForMouseMove = this._register(this.instantiationService.createInstance(EditPreferenceWidget, editor));
this.toggleEditPreferencesForMouseMoveDelayer = new Delayer<void>(75);
this._register(this.editPreferenceWidgetForCusorPosition.onClick(e => this.onEditSettingClicked(this.editPreferenceWidgetForCusorPosition, e)));
this._register(this.editPreferenceWidgetForMouseMove.onClick(e => this.onEditSettingClicked(this.editPreferenceWidgetForMouseMove, e)));
this._register(this.editor.onDidChangeCursorPosition(positionChangeEvent => this.onPositionChanged(positionChangeEvent)));
this._register(this.editor.onMouseMove(mouseMoveEvent => this.onMouseMoved(mouseMoveEvent)));
this._register(this.editor.onDidChangeConfiguration(() => this.onConfigurationChanged()));
}
public render(settingsGroups: ISettingsGroup[], associatedPreferencesModel: IPreferencesEditorModel<ISetting>): void {
this.editPreferenceWidgetForCusorPosition.hide();
this.editPreferenceWidgetForMouseMove.hide();
this.settingsGroups = settingsGroups;
this.associatedPreferencesModel = associatedPreferencesModel;
const settings = this.getSettings(this.editor.getPosition().lineNumber);
if (settings.length) {
this.showEditPreferencesWidget(this.editPreferenceWidgetForCusorPosition, settings);
}
}
private isDefaultSettings(): boolean {
return this.masterSettingsModel instanceof DefaultSettingsEditorModel;
}
private onConfigurationChanged(): void {
if (!this.editor.getConfiguration().viewInfo.glyphMargin) {
this.editPreferenceWidgetForCusorPosition.hide();
this.editPreferenceWidgetForMouseMove.hide();
}
}
private onPositionChanged(positionChangeEvent: ICursorPositionChangedEvent) {
this.editPreferenceWidgetForMouseMove.hide();
const settings = this.getSettings(positionChangeEvent.position.lineNumber);
if (settings.length) {
this.showEditPreferencesWidget(this.editPreferenceWidgetForCusorPosition, settings);
} else {
this.editPreferenceWidgetForCusorPosition.hide();
}
}
private onMouseMoved(mouseMoveEvent: IEditorMouseEvent): void {
const editPreferenceWidget = this.getEditPreferenceWidgetUnderMouse(mouseMoveEvent);
if (editPreferenceWidget) {
this.onMouseOver(editPreferenceWidget);
return;
}
this.settingHighlighter.clear();
this.toggleEditPreferencesForMouseMoveDelayer.trigger(() => this.toggleEidtPreferenceWidgetForMouseMove(mouseMoveEvent));
}
private getEditPreferenceWidgetUnderMouse(mouseMoveEvent: IEditorMouseEvent): EditPreferenceWidget<ISetting> {
if (mouseMoveEvent.target.type === MouseTargetType.GUTTER_GLYPH_MARGIN) {
const line = mouseMoveEvent.target.position.lineNumber;
if (this.editPreferenceWidgetForMouseMove.getLine() === line && this.editPreferenceWidgetForMouseMove.isVisible()) {
return this.editPreferenceWidgetForMouseMove;
}
if (this.editPreferenceWidgetForCusorPosition.getLine() === line && this.editPreferenceWidgetForCusorPosition.isVisible()) {
return this.editPreferenceWidgetForCusorPosition;
}
}
return null;
}
private toggleEidtPreferenceWidgetForMouseMove(mouseMoveEvent: IEditorMouseEvent): void {
const settings = mouseMoveEvent.target.position ? this.getSettings(mouseMoveEvent.target.position.lineNumber) : null;
if (settings && settings.length) {
this.showEditPreferencesWidget(this.editPreferenceWidgetForMouseMove, settings);
} else {
this.editPreferenceWidgetForMouseMove.hide();
}
}
private showEditPreferencesWidget(editPreferencesWidget: EditPreferenceWidget<ISetting>, settings: ISetting[]) {
const line = settings[0].valueRange.startLineNumber;
if (this.editor.getConfiguration().viewInfo.glyphMargin && this.marginFreeFromOtherDecorations(line)) {
editPreferencesWidget.show(line, nls.localize('editTtile', "Edit"), settings);
const editPreferenceWidgetToHide = editPreferencesWidget === this.editPreferenceWidgetForCusorPosition ? this.editPreferenceWidgetForMouseMove : this.editPreferenceWidgetForCusorPosition;
editPreferenceWidgetToHide.hide();
}
}
private marginFreeFromOtherDecorations(line: number): boolean {
const decorations = this.editor.getLineDecorations(line);
if (decorations) {
for (const { options } of decorations) {
if (options.glyphMarginClassName && options.glyphMarginClassName.indexOf(EditPreferenceWidget.GLYPH_MARGIN_CLASS_NAME) === -1) {
return false;
}
}
}
return true;
}
private getSettings(lineNumber: number): ISetting[] {
const configurationMap = this.getConfigurationsMap();
return this.getSettingsAtLineNumber(lineNumber).filter(setting => {
let configurationNode = configurationMap[setting.key];
if (configurationNode) {
if (this.isDefaultSettings()) {
return true;
}
if (configurationNode.type === 'boolean' || configurationNode.enum) {
if ((<SettingsEditorModel>this.masterSettingsModel).configurationTarget !== ConfigurationTarget.FOLDER) {
return true;
}
if (configurationNode.scope === ConfigurationScope.RESOURCE) {
return true;
}
}
}
return false;
});
}
private getSettingsAtLineNumber(lineNumber: number): ISetting[] {
const settings = [];
for (const group of this.settingsGroups) {
if (group.range.startLineNumber > lineNumber) {
break;
}
if (lineNumber >= group.range.startLineNumber && lineNumber <= group.range.endLineNumber) {
for (const section of group.sections) {
for (const setting of section.settings) {
if (setting.range.startLineNumber > lineNumber) {
break;
}
if (lineNumber >= setting.range.startLineNumber && lineNumber <= setting.range.endLineNumber) {
if (!this.isDefaultSettings() && setting.overrides.length) {
// Only one level because override settings cannot have override settings
for (const overrideSetting of setting.overrides) {
if (lineNumber >= overrideSetting.range.startLineNumber && lineNumber <= overrideSetting.range.endLineNumber) {
settings.push(overrideSetting);
}
}
} else {
settings.push(setting);
}
}
}
}
}
}
return settings;
}
private onMouseOver(editPreferenceWidget: EditPreferenceWidget<ISetting>): void {
this.settingHighlighter.highlight(editPreferenceWidget.preferences[0]);
}
private onEditSettingClicked(editPreferenceWidget: EditPreferenceWidget<ISetting>, e: IEditorMouseEvent): void {
const anchor = { x: e.event.posx, y: e.event.posy + 10 };
const actions = this.getSettings(editPreferenceWidget.getLine()).length === 1 ? this.getActions(editPreferenceWidget.preferences[0], this.getConfigurationsMap()[editPreferenceWidget.preferences[0].key])
: editPreferenceWidget.preferences.map(setting => new ContextSubMenu(setting.key, this.getActions(setting, this.getConfigurationsMap()[setting.key])));
this.contextMenuService.showContextMenu({
getAnchor: () => anchor,
getActions: () => TPromise.wrap(actions)
});
}
private getConfigurationsMap(): { [qualifiedKey: string]: IConfigurationPropertySchema } {
return Registry.as<IConfigurationRegistry>(ConfigurationExtensions.Configuration).getConfigurationProperties();
}
private getActions(setting: ISetting, jsonSchema: IJSONSchema): IAction[] {
if (jsonSchema.type === 'boolean') {
return [<IAction>{
id: 'truthyValue',
label: 'true',
enabled: true,
run: () => this.updateSetting(setting.key, true, setting)
}, <IAction>{
id: 'falsyValue',
label: 'false',
enabled: true,
run: () => this.updateSetting(setting.key, false, setting)
}];
}
if (jsonSchema.enum) {
return jsonSchema.enum.map(value => {
return <IAction>{
id: value,
label: JSON.stringify(value),
enabled: true,
run: () => this.updateSetting(setting.key, value, setting)
};
});
}
return this.getDefaultActions(setting);
}
private getDefaultActions(setting: ISetting): IAction[] {
const settingInOtherModel = this.associatedPreferencesModel.getPreference(setting.key);
if (this.isDefaultSettings()) {
return [<IAction>{
id: 'setDefaultValue',
label: settingInOtherModel ? nls.localize('replaceDefaultValue', "Replace in Settings") : nls.localize('copyDefaultValue', "Copy to Settings"),
enabled: true,
run: () => this.updateSetting(setting.key, setting.value, setting)
}];
}
return [];
}
private updateSetting(key: string, value: any, source: ISetting): void {
this._onUpdateSetting.fire({ key, value, source });
}
}
class SettingHighlighter extends Disposable {
private fixedHighlighter: RangeHighlightDecorations;
private volatileHighlighter: RangeHighlightDecorations;
private highlightedSetting: ISetting;
constructor(private editor: editorCommon.ICommonCodeEditor, private focusEventEmitter: Emitter<ISetting>, private clearFocusEventEmitter: Emitter<ISetting>,
@IInstantiationService instantiationService: IInstantiationService
) {
super();
this.fixedHighlighter = this._register(instantiationService.createInstance(RangeHighlightDecorations));
this.volatileHighlighter = this._register(instantiationService.createInstance(RangeHighlightDecorations));
this.fixedHighlighter.onHighlghtRemoved(() => this.clearFocusEventEmitter.fire(this.highlightedSetting));
this.volatileHighlighter.onHighlghtRemoved(() => this.clearFocusEventEmitter.fire(this.highlightedSetting));
}
highlight(setting: ISetting, fix: boolean = false) {
this.highlightedSetting = setting;
this.volatileHighlighter.removeHighlightRange();
this.fixedHighlighter.removeHighlightRange();
const highlighter = fix ? this.fixedHighlighter : this.volatileHighlighter;
highlighter.highlightRange({
range: setting.valueRange,
resource: this.editor.getModel().uri
}, this.editor);
this.editor.revealLineInCenterIfOutsideViewport(setting.valueRange.startLineNumber, editorCommon.ScrollType.Smooth);
this.focusEventEmitter.fire(setting);
}
clear(fix: boolean = false): void {
this.volatileHighlighter.removeHighlightRange();
if (fix) {
this.fixedHighlighter.removeHighlightRange();
}
this.clearFocusEventEmitter.fire(this.highlightedSetting);
}
}
class UnsupportedWorkspaceSettingsRenderer extends Disposable {
constructor(private editor: editorCommon.ICommonCodeEditor, private workspaceSettingsEditorModel: SettingsEditorModel,
@IWorkspaceConfigurationService private configurationService: IWorkspaceConfigurationService,
@IMarkerService private markerService: IMarkerService
) {
super();
this._register(this.configurationService.onDidUpdateConfiguration(() => this.render()));
}
private getMarkerMessage(settingKey: string): string {
switch (settingKey) {
case 'php.validate.executablePath':
return nls.localize('unsupportedPHPExecutablePathSetting', "This setting must be a User Setting. To configure PHP for the workspace, open a PHP file and click on 'PHP Path' in the status bar.");
default:
return nls.localize('unsupportedWorkspaceSetting', "This setting must be a User Setting.");
}
}
public render(): void {
const unsupportedWorkspaceKeys = this.configurationService.getUnsupportedWorkspaceKeys();
if (unsupportedWorkspaceKeys.length) {
const markerData: IMarkerData[] = [];
for (const unsupportedKey of unsupportedWorkspaceKeys) {<|fim▁hole|> if (setting) {
markerData.push({
severity: Severity.Warning,
startLineNumber: setting.keyRange.startLineNumber,
startColumn: setting.keyRange.startColumn,
endLineNumber: setting.keyRange.endLineNumber,
endColumn: setting.keyRange.endColumn,
message: this.getMarkerMessage(unsupportedKey)
});
}
}
if (markerData.length) {
this.markerService.changeOne('preferencesEditor', this.workspaceSettingsEditorModel.uri, markerData);
} else {
this.markerService.remove('preferencesEditor', [this.workspaceSettingsEditorModel.uri]);
}
}
}
public dispose(): void {
this.markerService.remove('preferencesEditor', [this.workspaceSettingsEditorModel.uri]);
super.dispose();
}
}
class UnsupportedWorkbenchSettingsRenderer extends Disposable {
private decorationIds: string[] = [];
private renderingDelayer: Delayer<void> = new Delayer<void>(200);
constructor(private editor: editorCommon.ICommonCodeEditor, private workspaceSettingsEditorModel: SettingsEditorModel,
@IWorkspaceConfigurationService private configurationService: IWorkspaceConfigurationService,
) {
super();
this._register(this.editor.getModel().onDidChangeContent(() => this.renderingDelayer.trigger(() => this.render())));
}
public render(): void {
const ranges: IRange[] = [];
const configurationRegistry = Registry.as<IConfigurationRegistry>(ConfigurationExtensions.Configuration).getConfigurationProperties();
for (const settingsGroup of this.workspaceSettingsEditorModel.settingsGroups) {
for (const section of settingsGroup.sections) {
for (const setting of section.settings) {
if (configurationRegistry[setting.key] && configurationRegistry[setting.key].scope === ConfigurationScope.WINDOW) {
ranges.push({
startLineNumber: setting.keyRange.startLineNumber,
startColumn: setting.keyRange.startColumn - 1,
endLineNumber: setting.valueRange.endLineNumber,
endColumn: setting.valueRange.endColumn
});
}
}
}
}
this.editor.changeDecorations(changeAccessor => this.decorationIds = changeAccessor.deltaDecorations(this.decorationIds, ranges.map(range => this.createDecoration(range, this.editor.getModel()))));
}
private static _DIM_CONFIGUARATION_ = ModelDecorationOptions.register({
stickiness: editorCommon.TrackedRangeStickiness.NeverGrowsWhenTypingAtEdges,
inlineClassName: 'dim-configuration',
beforeContentClassName: 'unsupportedWorkbenhSettingInfo',
hoverMessage: new MarkdownString().appendText(nls.localize('unsupportedWorkbenchSetting', "This setting cannot be applied now. It will be applied when you open this folder directly."))
});
private createDecoration(range: IRange, model: editorCommon.IModel): editorCommon.IModelDeltaDecoration {
return {
range,
options: UnsupportedWorkbenchSettingsRenderer._DIM_CONFIGUARATION_
};
}
public dispose(): void {
if (this.decorationIds) {
this.decorationIds = this.editor.changeDecorations(changeAccessor => {
return changeAccessor.deltaDecorations(this.decorationIds, []);
});
}
super.dispose();
}
}
class WorkspaceConfigurationRenderer extends Disposable {
private decorationIds: string[] = [];
private renderingDelayer: Delayer<void> = new Delayer<void>(200);
constructor(private editor: editorCommon.ICommonCodeEditor, private workspaceSettingsEditorModel: SettingsEditorModel,
@IWorkspaceContextService private workspaceContextService: IWorkspaceContextService
) {
super();
this._register(this.editor.getModel().onDidChangeContent(() => this.renderingDelayer.trigger(() => this.render())));
}
public render(): void {
if (this.workspaceContextService.getWorkbenchState() === WorkbenchState.WORKSPACE) {
this.editor.changeDecorations(changeAccessor => this.decorationIds = changeAccessor.deltaDecorations(this.decorationIds, []));
const ranges: IRange[] = [];
for (const settingsGroup of this.workspaceSettingsEditorModel.settingsGroups) {
for (const section of settingsGroup.sections) {
for (const setting of section.settings) {
if (setting.key !== 'settings') {
ranges.push({
startLineNumber: setting.keyRange.startLineNumber,
startColumn: setting.keyRange.startColumn - 1,
endLineNumber: setting.valueRange.endLineNumber,
endColumn: setting.valueRange.endColumn
});
}
}
}
}
this.editor.changeDecorations(changeAccessor => this.decorationIds = changeAccessor.deltaDecorations(this.decorationIds, ranges.map(range => this.createDecoration(range, this.editor.getModel()))));
}
}
private static _DIM_CONFIGURATION_ = ModelDecorationOptions.register({
stickiness: editorCommon.TrackedRangeStickiness.NeverGrowsWhenTypingAtEdges,
inlineClassName: 'dim-configuration'
});
private createDecoration(range: IRange, model: editorCommon.IModel): editorCommon.IModelDeltaDecoration {
return {
range,
options: WorkspaceConfigurationRenderer._DIM_CONFIGURATION_
};
}
public dispose(): void {
if (this.decorationIds) {
this.decorationIds = this.editor.changeDecorations(changeAccessor => {
return changeAccessor.deltaDecorations(this.decorationIds, []);
});
}
super.dispose();
}
}<|fim▁end|> | const setting = this.workspaceSettingsEditorModel.getPreference(unsupportedKey); |
<|file_name|>SyncSet.py<|end_file_name|><|fim▁begin|>"""
Represents a group of conduits
Copyright: John Stowers, 2007
License: GPLv2
"""
import traceback
import os
import xml.dom.minidom
import gobject
import logging
log = logging.getLogger("SyncSet")
import conduit
import conduit.Conduit as Conduit
import conduit.Settings as Settings
import conduit.XMLSerialization as XMLSerialization
SETTINGS_VERSION = XMLSerialization.Settings.XML_VERSION
class SyncSet(gobject.GObject):
"""
Represents a group of conduits
"""
__gsignals__ = {
#Fired when a new instantiatable DP becomes available. It is described via
#a wrapper because we do not actually instantiate it till later - to save memory
"conduit-added" : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, [
gobject.TYPE_PYOBJECT]), # The ConduitModel that was added
"conduit-removed" : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, [
gobject.TYPE_PYOBJECT]), # The ConduitModel that was removed
}
def __init__(self, moduleManager, syncManager, xmlSettingFilePath="settings.xml"):
gobject.GObject.__init__(self)
self.moduleManager = moduleManager
self.syncManager = syncManager
self.xmlSettingFilePath = xmlSettingFilePath
self.conduits = []
self.moduleManager.connect("dataprovider-available", self.on_dataprovider_available_unavailable)
self.moduleManager.connect("dataprovider-unavailable", self.on_dataprovider_available_unavailable)
# FIXME: temporary hack - need to let factories know about this factory :-\!
self.moduleManager.emit("syncset-added", self)
def _restore_dataprovider(self, cond, wrapperKey, dpName="", dpxml="", trySourceFirst=True):
"""
Adds the dataprovider back onto the canvas at the specifed
location and configures it with the given settings
"""
log.debug("Restoring %s to (source=%s)" % (wrapperKey,trySourceFirst))
wrapper = self.moduleManager.get_module_wrapper_with_instance(wrapperKey)
if dpName:
wrapper.set_name(dpName)
if wrapper is not None:
if dpxml:
for i in dpxml.childNodes:
if i.nodeType == i.ELEMENT_NODE and i.localName == "configuration":
wrapper.set_configuration_xml(xmltext=i.toxml())
cond.add_dataprovider(wrapper, trySourceFirst)
def on_dataprovider_available_unavailable(self, loader, dpw):
"""
Removes all PendingWrappers corresponding to dpw and replaces with new dpw instances
"""
key = dpw.get_key()
for c in self.get_all_conduits():
for dp in c.get_dataproviders_by_key(key):
new = self.moduleManager.get_module_wrapper_with_instance(key)
#retain configuration information
new.set_configuration_xml(dp.get_configuration_xml())
new.set_name(dp.get_name())
c.change_dataprovider(
oldDpw=dp,
newDpw=new
)
def emit(self, *args):
"""
Override the gobject signal emission so that all signals are emitted
from the main loop on an idle handler
"""
gobject.idle_add(gobject.GObject.emit,self,*args)
def create_preconfigured_conduit(self, sourceKey, sinkKey, twoway):
cond = Conduit.Conduit(self.syncManager)
self.add_conduit(cond)
if twoway == True:
cond.enable_two_way_sync()
self._restore_dataprovider(cond, sourceKey, trySourceFirst=True)
self._restore_dataprovider(cond, sinkKey, trySourceFirst=False)
def add_conduit(self, cond):
self.conduits.append(cond)
self.emit("conduit-added", cond)
def remove_conduit(self, cond):
self.emit("conduit-removed", cond)
cond.quit()
self.conduits.remove(cond)
def get_all_conduits(self):
return self.conduits
def get_conduit(self, index):
return self.conduits[index]
def index (self, conduit):
return self.conduits.index(conduit)
def num_conduits(self):
return len(self.conduits)
def clear(self):
for c in self.conduits[:]:
self.remove_conduit(c)
def save_to_xml(self, xmlSettingFilePath=None):
"""
Saves the synchronisation settings (icluding all dataproviders and how
they are connected) to an xml file so that the 'sync set' can
be restored later
"""
if xmlSettingFilePath == None:
xmlSettingFilePath = self.xmlSettingFilePath
log.info("Saving Sync Set to %s" % self.xmlSettingFilePath)
#Build the application settings xml document
doc = xml.dom.minidom.Document()
rootxml = doc.createElement("conduit-application")
rootxml.setAttribute("application-version", conduit.VERSION)
rootxml.setAttribute("settings-version", SETTINGS_VERSION)
doc.appendChild(rootxml)
#Store the conduits
for cond in self.conduits:
conduitxml = doc.createElement("conduit")
conduitxml.setAttribute("uid",cond.uid)
conduitxml.setAttribute("twoway",str(cond.is_two_way()))
conduitxml.setAttribute("autosync",str(cond.do_auto_sync()))<|fim▁hole|> cond.get_policy(policyName)
)
rootxml.appendChild(conduitxml)
#Store the source
source = cond.datasource
if source is not None:
sourcexml = doc.createElement("datasource")
sourcexml.setAttribute("key", source.get_key())
sourcexml.setAttribute("name", source.get_name())
conduitxml.appendChild(sourcexml)
#Store source settings
configxml = xml.dom.minidom.parseString(source.get_configuration_xml())
sourcexml.appendChild(configxml.documentElement)
#Store all sinks
sinksxml = doc.createElement("datasinks")
for sink in cond.datasinks:
sinkxml = doc.createElement("datasink")
sinkxml.setAttribute("key", sink.get_key())
sinkxml.setAttribute("name", sink.get_name())
sinksxml.appendChild(sinkxml)
#Store sink settings
configxml = xml.dom.minidom.parseString(sink.get_configuration_xml())
sinkxml.appendChild(configxml.documentElement)
conduitxml.appendChild(sinksxml)
#Save to disk
try:
file_object = open(xmlSettingFilePath, "w")
file_object.write(doc.toxml())
#file_object.write(doc.toprettyxml())
file_object.close()
except IOError, err:
log.warn("Could not save settings to %s (Error: %s)" % (xmlSettingFilePath, err.strerror))
def restore_from_xml(self, xmlSettingFilePath=None):
"""
Restores sync settings from the xml file
"""
if xmlSettingFilePath == None:
xmlSettingFilePath = self.xmlSettingFilePath
log.info("Restoring Sync Set from %s" % xmlSettingFilePath)
#Check the file exists
if not os.path.isfile(xmlSettingFilePath):
log.info("%s not present" % xmlSettingFilePath)
return
try:
#Open
doc = xml.dom.minidom.parse(xmlSettingFilePath)
#check the xml file is in a version we can read.
if doc.documentElement.hasAttribute("settings-version"):
xml_version = doc.documentElement.getAttribute("settings-version")
try:
xml_version = int(xml_version)
except ValueError, TypeError:
log.error("%s xml file version is not valid" % xmlSettingFilePath)
os.remove(xmlSettingFilePath)
return
if int(SETTINGS_VERSION) < xml_version:
log.warning("%s xml file is incorrect version" % xmlSettingFilePath)
os.remove(xmlSettingFilePath)
return
else:
log.info("%s xml file version not found, assuming too old, removing" % xmlSettingFilePath)
os.remove(xmlSettingFilePath)
return
#Parse...
for conds in doc.getElementsByTagName("conduit"):
#create a new conduit
cond = Conduit.Conduit(self.syncManager, conds.getAttribute("uid"))
self.add_conduit(cond)
#restore conduit specific settings
twoway = Settings.string_to_bool(conds.getAttribute("twoway"))
if twoway == True:
cond.enable_two_way_sync()
auto = Settings.string_to_bool(conds.getAttribute("autosync"))
if auto == True:
cond.enable_auto_sync()
for policyName in Conduit.CONFLICT_POLICY_NAMES:
cond.set_policy(
policyName,
conds.getAttribute("%s_policy" % policyName)
)
#each dataprovider
for i in conds.childNodes:
#keep a ref to the dataproider was added to so that we
#can apply settings to it at the end
#one datasource
if i.nodeType == i.ELEMENT_NODE and i.localName == "datasource":
key = i.getAttribute("key")
name = i.getAttribute("name")
#add to canvas
if len(key) > 0:
self._restore_dataprovider(cond, key, name, i, True)
#many datasinks
elif i.nodeType == i.ELEMENT_NODE and i.localName == "datasinks":
#each datasink
for sink in i.childNodes:
if sink.nodeType == sink.ELEMENT_NODE and sink.localName == "datasink":
key = sink.getAttribute("key")
name = sink.getAttribute("name")
#add to canvas
if len(key) > 0:
self._restore_dataprovider(cond, key, name, sink, False)
except:
log.warn("Error parsing %s. Exception:\n%s" % (xmlSettingFilePath, traceback.format_exc()))
os.remove(xmlSettingFilePath)
def quit(self):
"""
Calls unitialize on all dataproviders
"""
for c in self.conduits:
c.quit()<|fim▁end|> | for policyName in Conduit.CONFLICT_POLICY_NAMES:
conduitxml.setAttribute(
"%s_policy" % policyName, |
<|file_name|>api.py<|end_file_name|><|fim▁begin|>import itertools
import json
import re
import flask
from flask import request
from web.cache import cache
import rethinkdb as r
import web.api.api_util as api_util
import db<|fim▁hole|>
api = flask.Blueprint("api", __name__, url_prefix="/api")
r_conn = db.util.r_conn
def _should_skip_get_plugins_cache():
"""Whether the current request to /api/plugins should not be cached."""
page = int(request.args.get('page', 1))
search = request.args.get('query', '')
# Only cache empty searches for now.
# TODO(david): Also cache simple category and tag searches. May also want
# to actually use a proper cache backend like Redis so we can
# arbitrarily cache (right now we use an in-memory cache).
should_cache = search == '' and (1 <= page <= 10)
return not should_cache
def _make_get_plugins_cache_key():
"""Get a cache key for the /api/plugins route.
By default this is just request.path which ignores query params.
"""
page = int(request.args.get('page', 1))
search = request.args.get('query', '')
return '%s_%s_%s' % (request.path, page, search)
# TODO(david): Consider saving categories just as special tags. Would make
# search implementation simpler but determining which category a plugin
# belongs to harder. See discussion on
# http://phabricator.benalpert.com/D171
def _apply_category_filters(results, tokens):
"""Consumes and applies category filters (e.g. "cat:other") to results.
Arguments:
results: List of search result plugins.
tokens: Remaining search text tokens that have not been consumed.
Returns:
(results, tokens): Results that match the given category, and tokens
that have not been consumed.
"""
category_filter = lambda t: t.startswith('cat:')
category_tokens = filter(category_filter, tokens)
tokens = list(itertools.ifilterfalse(category_filter, tokens))
if category_tokens:
category_ids = set(t[len('cat:'):] for t in category_tokens)
results = filter(lambda plugin:
plugin['category'] in category_ids, results)
return results, tokens
def _apply_tag_filters(results, tokens):
"""Consumes and applies tag filters (e.g. "tag:python") to search results.
Arguments:
results: List of search result plugins.
tokens: Remaining search text tokens that have not been consumed.
Returns:
(results, tokens): Results that match the given tag, and tokens
that have not been consumed.
"""
tag_filter = lambda t: t.startswith('tag:')
tag_tokens = filter(tag_filter, tokens)
tokens = list(itertools.ifilterfalse(tag_filter, tokens))
if tag_tokens:
required_tags = set(t[len('tag:'):] for t in tag_tokens)
results = filter(lambda plugin:
required_tags <= set(plugin['tags']), results)
return results, tokens
def _apply_keyword_filters(results, tokens):
"""Filters results that match the given keywords (tokens).
Arguments:
results: List of search result plugins.
tokens: Keywords to filter results on.
Returns:
List of plugins that match the given keywords.
"""
if tokens:
# Create a regex that matches a string S iff for each keyword K in
# `search` there is a corresponding word in S that begins with K.
tokens_regex = (r'\b%s' % re.escape(t) for t in tokens)
search_regex = re.compile('.*'.join(tokens_regex))
# Surprisingly, regex matching like this is slightly faster than
# prefix-matching two sorted lists of tokens.
results = filter(lambda plugin:
search_regex.search(plugin['keywords']), results)
return results
@api.route('/plugins', methods=['GET'])
@cache.cached(timeout=60 * 60 * 25, key_prefix=_make_get_plugins_cache_key,
unless=_should_skip_get_plugins_cache)
def get_plugins():
RESULTS_PER_PAGE = 20
page = int(request.args.get('page', 1))
search = request.args.get('query', '')
results = get_search_index_cached()
if search:
tokens = [t.lower() for t in sorted(search.split())]
results, tokens = _apply_category_filters(results, tokens)
results, tokens = _apply_tag_filters(results, tokens)
results = _apply_keyword_filters(results, tokens)
count = len(results)
total_pages = (count + RESULTS_PER_PAGE - 1) / RESULTS_PER_PAGE # ceil
results = results[((page - 1) * RESULTS_PER_PAGE):
(page * RESULTS_PER_PAGE)]
return api_util.jsonify({
'plugins': results,
'total_pages': total_pages,
'total_results': count,
'results_per_page': RESULTS_PER_PAGE,
})
@api.route('/plugins/<slug>', methods=['GET'])
def get_plugin(slug):
plugin = r.table('plugins').get(slug).run(r_conn())
if plugin:
return api_util.jsonify(db.plugins.to_json(plugin))
else:
return api_util.api_not_found('No plugin with slug %s' % slug)
# TODO(david): Make it not so easy for an attacker to completely obliterate all
# of our tags, or at least be able to recover from it.
@api.route('/plugins/<slug>/tags', methods=['POST', 'PUT'])
def update_plugin_tags(slug):
data = json.loads(flask.request.data)
plugin = r.table('plugins').get(slug).run(r_conn())
if not plugin:
return api_util.api_not_found('No plugin with slug %s' % slug)
db.plugins.update_tags(plugin, data['tags'])
r.table('plugins').update(plugin).run(r_conn())
return api_util.jsonify({
'tags': plugin['tags']
})
@api.route('/tags', methods=['GET'])
@cache.cached(timeout=60 * 60)
def get_tags():
tags = r.table('tags').filter({}).run(r_conn())
return api_util.jsonify(list(tags))
@api.route('/categories', methods=['GET'])
@cache.cached(timeout=60 * 60)
def get_categories():
return api_util.jsonify(get_all_categories_cached())
@api.route('/plugins/<slug>/category/<category>', methods=['PUT'])
def update_plugin_category(slug, category):
plugin = r.table('plugins').get(slug).run(r_conn())
if not plugin:
return api_util.api_not_found('No plugin with slug %s' % slug)
if category not in (c['id'] for c in get_all_categories_cached()):
return api_util.api_bad_request('No such category %s' % category)
# TODO(david): Also update search index (stale cache)
plugin['category'] = category
r.table('plugins').update(plugin).run(r_conn())
return api_util.jsonify({
'category': plugin['category']
})
@api.route('/submit', methods=['POST'])
def submit_plugin():
plugin_data = flask.request.form.to_dict()
plugin_data['tags'] = json.loads(plugin_data['tags'])
db.submitted_plugins.insert(plugin_data)
plugin_markdown = "```\n%s\n```" % json.dumps(plugin_data, indent=4)
util.log_to_gitter("Someone just submitted a plugin!\n%s" % plugin_markdown)
return flask.redirect('/thanks-for-submitting')
@cache.cached(timeout=60 * 60 * 26, key_prefix='search_index')
def get_search_index_cached():
return db.plugins.get_search_index()
@cache.cached(timeout=60 * 60 * 27, key_prefix='all_categories')
def get_all_categories_cached():
return db.categories.get_all()<|fim▁end|> | import util |
<|file_name|>main.rs<|end_file_name|><|fim▁begin|>struct User {<|fim▁hole|>}
fn main() {
let user1 = User {
email: "[email protected]",
username: "someusername123",
active: true,
sign_in_count: 1,
};
}<|fim▁end|> | active: bool,
username: &str,
email: &str,
sign_in_count: u64, |
<|file_name|>create.expense.component.ts<|end_file_name|><|fim▁begin|>import { Component } from '@angular/core';
import {ApiService} from "../services/api.service";
@Component({
moduleId: module.id,
selector: 'recharge',
templateUrl: 'create.expense.component.html',
styleUrls: ['./create.expense.component.less']
})
export class ExpenseComponent {<|fim▁hole|> public isSend:boolean = false;
public dataResponse:any;
constructor(private _apiService:ApiService){};
sendExpense(){
let config = {client: {phone: this.phone}, expense: {time: this.time}};
this.isSend = true;
this._apiService.setExpense(config)
.subscribe(res => {
this.isSend = false;
this.dataResponse = res.data;
})
}
}<|fim▁end|> | public phone:any;
public time:any;
|
<|file_name|>build.rs<|end_file_name|><|fim▁begin|>#[cfg(any(feature = "build_stub_miniz", feature = "build_orig_miniz"))]
extern crate cc;
#[cfg(not(any(feature = "build_stub_miniz", feature = "build_orig_miniz")))]<|fim▁hole|>fn main() {
cc::Build::new()
.files(&[
"miniz_stub/miniz.c",
"miniz_stub/miniz_zip.c",
"miniz_stub/miniz_tinfl.c",
"miniz_stub/miniz_tdef.c",
])
.compile("miniz");
}
#[cfg(feature = "build_orig_miniz")]
fn main() {
cc::Build::new().files(&["miniz/miniz.c"]).compile("miniz");
}<|fim▁end|> | fn main() {}
#[cfg(feature = "build_stub_miniz")] |
<|file_name|>GSML1FEC.cpp<|end_file_name|><|fim▁begin|>/*
* Copyright 2008 Free Software Foundation, Inc.
*
* This software is distributed under the terms of the GNU Public License.
* See the COPYING file in the main directory for details.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#define XNDEBUG
#include "GSML1FEC.h"
#include "GSMCommon.h"
#include "RxBurst.h"
//#include "GSMSAPMux.h"
//#include "GSMConfig.h"
#include "GSMTDMA.h"
#include "GSM610Tables.h"
#include "GSM660Tables.h"
#include "GSM690Tables.h"
#include "Assert.h"
using namespace std;
using namespace GSM;
/*
Compilation flags:
NOCONTROL Compile without referencing control layer functions.
*/
/*
Notes on reading the GSM specifications.
Every FEC section in GSM 05.03 uses standard names for the bits at
different stages of the encoding/decoding process.
This is all described formally in GSM 05.03 2.2.
"d" -- data bits. The actual payloads from L2 and the vocoders.
"p" -- parity bits. These are calculated from d.
"u" -- uncoded bits. A concatenation of d, p and inner tail bits.
"c" -- coded bits. These are the convolutionally encoded from u.
"i" -- interleaved bits. These are the output of the interleaver.
"e" -- "encrypted" bits. These are the channel bits in the radio bursts.
The "e" bits are call "encrypted" even when encryption is not used.
The encoding process is:
L2 -> d -> -> calc p -> u -> c -> i -> e -> radio bursts
The decoding process is:
radio bursts -> e -> i -> c -> u -> check p -> d -> L2
Bit ordering in d is LSB-first in each octet.
Bit ordering everywhere else in the OpenBTS code is MSB-first
in every field to give contiguous fields across byte boundaries.
We use the BitVector::LSB8MSB() method to translate.
*/
TCHFACCHL1Decoder::TCHFACCHL1Decoder(const TDMAMapping& wMapping)
: mTCHU(189), mTCHD(260), mC(456),
mClass1_c(mC.head(378)), mClass1A_d(mTCHD.head(50)), mClass2_c(mC.segment(378, 78)),
mTCHParity(0x0b, 3, 50), mMapping(wMapping), mMode(MODE_SPEECH_FR)
{
for (int i = 0; i < 8; i++) {
mI[i] = SoftVector(114);
}
}
void TCHFACCHL1Decoder::writeLowSide(const RxBurst& inBurst)
{
OBJDCOUT("TCHFACCHL1Decoder::writeLowSide " << inBurst);
// If the channel is closed, ignore the burst.
// if (!active()) {
// OBJDCOUT("TCHFACCHL1Decoder::writeLowSide not active, ignoring input");
// return;
// }
processBurst(inBurst);
}
bool TCHFACCHL1Decoder::processBurst( const RxBurst& inBurst)
{
// Accept the burst into the deinterleaving buffer.
// Return true if we are ready to interleave.
// TODO -- One quick test of burst validity is to look at the tail bits.
// We could do that as a double-check against putting garbage into
// the interleaver or accepting bad parameters.
// Get the physical parameters of the burst.
// RSSI is dB wrt full scale.
// mRSSI = inBurst.RSSI();
// Timing error is a float in symbol intervals.
// mTimingError = inBurst.timingError();
// This flag is used as a half-ass semaphore.
// It is cleared when the new value is read.
// mPhyNew = true;
// The reverse index runs 0..3 as the bursts arrive.
// It is the "B" index of GSM 05.03 3.1.3 and 3.1.4.
int B = mMapping.reverseMapping(inBurst.time().FN()) % 8;
// A negative value means that the demux is misconfigured.
assert(B >= 0);
OBJDCOUT("TCHFACCHL1Decoder::processBurst B=" << B << " " << inBurst);
OBJDCOUT("time=" << inBurst.time().FN() << " ts=" << inBurst.time().TN() << "\n");
// Pull the data fields (e-bits) out of the burst and put them into i[B][].
// GSM 05.03 3.1.4
inBurst.data1().copyToSegment(mI[B], 0);
inBurst.data2().copyToSegment(mI[B], 57);
// Every 4th frame is the start of a new block.
// So if this isn't a "4th" frame, return now.
if (B % 4 != 3) return false;
// Deinterleave according to the diagonal "phase" of B.
// See GSM 05.03 3.1.3.
// Deinterleaves i[] to c[]
if (B == 3) deinterleave(4);
else deinterleave(0);
// See if this was the end of a stolen frame, GSM 05.03 4.2.5.
bool stolen = inBurst.Hl();
OBJDCOUT("TCHFACCHL!Decoder::processBurst Hl=" << inBurst.Hl() << " Hu=" << inBurst.Hu());
/* if (stolen) {
if (decode()) {
OBJDCOUT("TCHFACCHL1Decoder::processBurst good FACCH frame");
countGoodFrame();
handleGoodFrame();
} else {
OBJDCOUT("TCHFACCHL1Decoder::processBurst bad FACCH frame");
countBadFrame();
}
}*/
// Always feed the traffic channel, even on a stolen frame.
// decodeTCH will handle the GSM 06.11 bad frmae processing.
bool traffic = decodeTCH(stolen);
// if (traffic) {
OBJDCOUT("TCHFACCHL1Decoder::processBurst good TCH frame");
// countGoodFrame();
// Don't let the channel timeout.
// mLock.lock();
// mT3109.set();
// mLock.unlock();
// }
// else countBadFrame();
return traffic;
}
void TCHFACCHL1Decoder::deinterleave(int blockOffset )
{
OBJDCOUT("TCHFACCHL1Decoder::deinterleave blockOffset=" << blockOffset);
for (int k = 0; k < 456; k++) {
int B = ( k + blockOffset ) % 8;
int j = 2 * ((49 * k) % 57) + ((k % 8) / 4);
mC[k] = mI[B][j];
mI[B][j] = 0.5F;
//OBJDCOUT("deinterleave k="<<k<<" B="<<B<<" j="<<j);
}
}
bool TCHFACCHL1Decoder::decodeTCH(bool stolen)
{
// GSM 05.02 3.1.2, but backwards
// If the frame wasn't stolen, we'll update this with parity later.
bool good = !stolen;
if (!stolen) {
// 3.1.2.2<|fim▁hole|> // 3.1.2.2
// copy class 2 bits c[] to d[]
mClass2_c.sliced().copyToSegment(mTCHD, 182);
//mC.segment(378,78).sliced().copyToSegment(mTCHD,182);
// 3.1.2.1
// copy class 1 bits u[] to d[]
for (unsigned k = 0; k <= 90; k++) {
mTCHD[2*k] = mTCHU[k];
mTCHD[2*k+1] = mTCHU[184-k];
}
// 3.1.2.1
// check parity of class 1A
unsigned sentParity = (~mTCHU.peekField(91, 3)) & 0x07;
//unsigned calcParity = mTCHD.head(50).parity(mTCHParity) & 0x07;
unsigned calcParity = mClass1A_d.parity(mTCHParity) & 0x07;
// 3.1.2.2
// Check the tail bits, too.
unsigned tail = mTCHU.peekField(185, 4);
OBJDCOUT("TCHFACCHL1Decoder::decodeTCH c[]=" << mC);
//OBJDCOUT("TCHFACCHL1Decoder::decodeTCH u[]=" << mTCHU);
OBJDCOUT("TCHFACCHL1Decoder::decodeTCH d[]=" << mTCHD);
OBJDCOUT("TCHFACCHL1Decoder::decodeTCH sentParity=" << sentParity
<< " calcParity=" << calcParity << " tail=" << tail);
good = (sentParity == calcParity) && (tail == 0);
if (good) {
if (mMode == MODE_SPEECH_FR) {
// Undo Um's importance-sorted bit ordering.
// See GSM 05.03 3.1 and Tablee 2.
BitVector payload = mVFrame.payload();
mTCHD.unmap(g610BitOrder, 260, payload);
mVFrame.pack(mPrevGoodFrame);
mPrevGoodFrameLength = 33;
} else if (mMode == MODE_SPEECH_EFR) {
BitVector payload = mVFrameAMR.payload();
BitVector TCHW(260), EFRBits(244);
// Undo Um's EFR bit ordering.
mTCHD.unmap(g660BitOrder, 260, TCHW);
// Remove repeating bits and CRC to get raw EFR frame (244 bits)
for (unsigned k=0; k<71; k++)
EFRBits[k] = TCHW[k] & 1;
for (unsigned k=73; k<123; k++)
EFRBits[k-2] = TCHW[k] & 1;
for (unsigned k=125; k<178; k++)
EFRBits[k-4] = TCHW[k] & 1;
for (unsigned k=180; k<230; k++)
EFRBits[k-6] = TCHW[k] & 1;
for (unsigned k=232; k<252; k++)
EFRBits[k-8] = TCHW[k] & 1;
// Map bits as AMR 12.2k
EFRBits.map(g690_12_2_BitOrder, 244, payload);
// Put the whole frame (hdr + payload)
mVFrameAMR.pack(mPrevGoodFrame);
mPrevGoodFrameLength = 32;
}
return true;
}
}
return false;
}
// vim: ts=4 sw=4<|fim▁end|> | // decode from c[] to u[]
mClass1_c.decode(mVCoder, mTCHU);
//mC.head(378).decode(mVCoder,mTCHU);
|
<|file_name|>render.py<|end_file_name|><|fim▁begin|>import os
import sys
import json
from optional_django import staticfiles
from optional_django.serializers import JSONEncoder
from optional_django.safestring import mark_safe
from optional_django import six
from js_host.function import Function
from js_host.exceptions import FunctionError
from react.render import RenderedComponent
from react.exceptions import ComponentSourceFileNotFound
from react.exceptions import ReactRenderingError
from react_router.conf import settings
from react_router.templates import MOUNT_JS
from react_router.bundle import bundle_component
from webpack.compiler import WebpackBundle
class RouteRenderedComponent(RenderedComponent):
def get_client_asset(self):
client_asset = None
bundled_component = self.get_bundle()
assets = bundled_component.get_assets()
for asset in assets:
if asset['path'] == self.path_to_source:
client_asset = asset
break
return client_asset
def get_var(self):
client_asset = self.get_client_asset()
if client_asset:
return 'client'
raise Exception("Client asset not found.")
def render_js(self):
client_asset = self.get_client_asset()
if client_asset:
client_bundle = mark_safe(WebpackBundle.render_tag(client_asset['url']))
return mark_safe(
'\n{bundle}\n<script>\n{mount_js}\n</script>\n'.format(
bundle=client_bundle,
mount_js=self.render_mount_js(),
)
)
def render_mount_js(self):
return mark_safe(
MOUNT_JS.format(
var=self.get_var(),
props=self.serialized_props or 'null',
container_id=self.get_container_id()
)
)
class RouteRedirect(object):
def __init__(self, pathname, query = None, state = None, *args, **kwargs):
self.path = pathname
self.query = query
if state and 'nextPathname' in state:
self.nextPath = state['nextPathname']
else:
self.nextPath = None
if self.path is None:
raise ReactRenderingError("No path returned for redirection.")
super(RouteRedirect, self).__init__(*args, **kwargs)
@property
def url(self):
if self.query:
return "%s?next=%s&%s" % (self.path, self.nextPath, self.query)
else:
return "%s?next=%s" % (self.path, self.nextPath)
class RouteNotFound(object):
def __init__(self, *args, **kwargs):
super(RouteNotFound, self).__init__(*args, **kwargs)
js_host_function = Function(settings.JS_HOST_FUNCTION)
def render_route(
# Rendering options
path, # path to routes file
client_path, # path to client routes file
request, # pass in request object
props=None,
to_static_markup=None,
# Bundling options
bundle=None,
translate=None,
# Prop handling
json_encoder=None
):
if not os.path.isabs(path):
abs_path = staticfiles.find(path)
if not abs_path:
raise ComponentSourceFileNotFound(path)
path = abs_path
if not os.path.exists(path):
raise ComponentSourceFileNotFound(path)
if not os.path.isabs(client_path):
abs_client_path = staticfiles.find(client_path)
if not abs_client_path:
raise ComponentSourceFileNotFound(client_path)
client_path = abs_client_path
if not os.path.exists(client_path):
raise ComponentSourceFileNotFound(client_path)
bundled_component = None
import re
client_re = re.compile(r"client-(?:\w*\d*).js",re.IGNORECASE)
server_re = re.compile(r"server-(?:\w*\d*).js",re.IGNORECASE)
if bundle or translate:
bundled_component = bundle_component(path, client_path, translate=translate)
assets = bundled_component.get_assets()
for asset in assets:
m = client_re.search(asset['name'])
if m:
client_path = asset['path']
m = server_re.search(asset['name'])
if m:
path = asset['path']
if json_encoder is None:
json_encoder = JSONEncoder
if props is not None:
serialized_props = json.dumps(props, cls=json_encoder)
else:
serialized_props = None
try:
location = {
'pathname': request.path,
'query': request.GET.dict()
}
cbData = json.loads(js_host_function.call(
path=path,
location=location,
serializedProps=serialized_props,
toStaticMarkup=to_static_markup
))
except FunctionError as e:<|fim▁hole|> raise six.reraise(ReactRenderingError, ReactRenderingError(*e.args), sys.exc_info()[2])
if cbData['match']:
return RouteRenderedComponent(cbData['markup'], client_path, props, serialized_props, bundled_component, to_static_markup)
else:
if cbData['redirectInfo']:
return RouteRedirect(**cbData['redirectInfo'])
else:
return RouteNotFound()<|fim▁end|> | |
<|file_name|>boot.rs<|end_file_name|><|fim▁begin|>use arch::device_tree;
use arch::memory;
use klog;
const SERIAL_PORT_ADDRESS: usize = 0x0900_0000;
const DEVICE_TREE_ADDRESS: usize = 0x4000_0000;
// TODO: replace with a proper serial port handling code
fn write(s: &str) {
let ptr = SERIAL_PORT_ADDRESS as *mut u8;
for c in s.chars() {
unsafe {
*ptr = c as u8;
}
}
}
#[no_mangle]
pub unsafe extern fn __boot() {
klog::init(write, klog::Level::Debug);
device_tree::init(DEVICE_TREE_ADDRESS);<|fim▁hole|> memory::init();
}<|fim▁end|> | |
<|file_name|>hrtb-parse.rs<|end_file_name|><|fim▁begin|>// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Test that we can parse all the various places that a `for` keyword
// can appear representing universal quantification.
#![feature(unboxed_closures)]<|fim▁hole|>trait Get<A,R> {
fn get(&self, arg: A) -> R;
}
// Parse HRTB with explicit `for` in a where-clause:
fn foo00<T>(t: T)
where T : for<'a> Get<&'a int, &'a int>
{
}
fn foo01<T: for<'a> Get<&'a int, &'a int>>(t: T)
{
}
// Parse HRTB with explicit `for` in various sorts of types:
fn foo10(t: Box<for<'a> Get<int, int>>) { }
fn foo11(t: Box<for<'a> Get(int) -> int>) { }
fn foo20(t: for<'a> fn(int) -> int) { }
fn foo21(t: for<'a> unsafe fn(int) -> int) { }
fn foo22(t: for<'a> extern "C" fn(int) -> int) { }
fn foo23(t: for<'a> unsafe extern "C" fn(int) -> int) { }
fn foo30(t: for<'a> |int| -> int) { }
fn foo31(t: for<'a> unsafe |int| -> int) { }
//fn foo40(t: for<'a> proc(int) -> int) { }
fn main() {
}<|fim▁end|> | #![allow(unused_variables)]
#![allow(dead_code)]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.