prompt
large_stringlengths 70
991k
| completion
large_stringlengths 0
1.02k
|
---|---|
<|file_name|>tests.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
import unittest
from hanspell import spell_checker
from hanspell.constants import CheckResult
from textwrap import dedent as trim
class SpellCheckerTests(unittest.TestCase):
def setUp(self):
pass
def test_basic_check(self):
result = spell_checker.check(u'안녕 하세요. 저는 한국인 입니다. 이문장은 한글로 작성됬습니다.')
assert result.errors == 4
assert result.checked == u'안녕하세요. 저는 한국인입니다. 이 문장은 한글로 작성됐습니다.'<|fim▁hole|> assert result.errors == 4
items = result.words
assert items[u'한'] == CheckResult.WRONG_SPACING
assert items[u'아이가'] == CheckResult.WRONG_SPACING
assert items[u'장난감을'] == CheckResult.STATISTICAL_CORRECTION
assert items[u'갖고'] == CheckResult.WRONG_SPACING
assert items[u'놀고'] == CheckResult.WRONG_SPACING
assert items[u'있다.'] == CheckResult.WRONG_SPACING
assert items[u'그만하게'] == CheckResult.PASSED
assert items[u'할까?'] == CheckResult.WRONG_SPELLING
def test_list(self):
results = spell_checker.check([u'안녕 하세요.', u'저는 한국인 입니다.'])
assert results[0].checked == u'안녕하세요.'
assert results[1].checked == u'저는 한국인입니다.'
def test_long_paragraph(self):
paragraph = trim("""
ubit.info(유빗인포)는 코나미 리듬게임, 유비트의 플레이 데이터 관리 및 열람 서비스입니다. 등록 후에 자신과 친구의 기록을 p.eagate.573.jp에 접속할 필요 없이 본 웹 사이트에서 바로 확인할 수 있습니다.
등록 후에는 "https://ubit.info/별칭"으로 자신의 개인 페이지가 생성되며 이 주소(별칭)를 아는 사람만 접속할 수 있습니다. 다른 친구에게 기록을 보여주고 싶다면 본인의 인포 주소를 알려주면 됩니다.
이 사이트는 최신 브라우저 환경만을 제대로 지원합니다. 만약 크롬, 파이어폭스 등의 최신 브라우저 안정버전(stable)을 사용하고 있는데도 페이지 레이아웃이 깨지는 경우 사이트 관리자에게 문의해주세요.
등록 과정은 간단합니다. 상단 메뉴에서 등록을 클릭한 후 양식에 맞게 입력하시면 자동으로 공개설정이 완료됨과 동시에 유빗인포 계정이 생성됩니다.
""")
result = spell_checker.check(paragraph)
if __name__ == '__main__':
unittest.main()<|fim▁end|> |
def test_words(self):
result = spell_checker.check(u'한아이가 장난깜을 갖고놀고있다. 그만하게 할가?') |
<|file_name|>urls.py<|end_file_name|><|fim▁begin|>"""dryorm URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
from django.views.generic import TemplateView
from core import views
urlpatterns = [
url(r'^$', TemplateView.as_view(template_name='index.html')),
url(r'^save', views.save, name='save'),
url(r'^(?P<pk>[0-9a-zA-Z\-]+)', views.detail, name='detail'),<|fim▁hole|><|fim▁end|> | ] |
<|file_name|>handler_functions_test.go<|end_file_name|><|fim▁begin|>package aws
import (
"net/http"
"os"
"testing"
"github.com/stretchr/testify/assert"
"github.com/trayio/bunny/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awserr"
"github.com/trayio/bunny/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials"
)
func TestValidateEndpointHandler(t *testing.T) {
os.Clearenv()
svc := NewService(&Config{Region: "us-west-2"})
svc.Handlers.Clear()
svc.Handlers.Validate.PushBack(ValidateEndpointHandler)
req := NewRequest(svc, &Operation{Name: "Operation"}, nil, nil)
err := req.Build()
assert.NoError(t, err)<|fim▁hole|> os.Clearenv()
svc := NewService(nil)
svc.Handlers.Clear()
svc.Handlers.Validate.PushBack(ValidateEndpointHandler)
req := NewRequest(svc, &Operation{Name: "Operation"}, nil, nil)
err := req.Build()
assert.Error(t, err)
assert.Equal(t, ErrMissingRegion, err)
}
type mockCredsProvider struct {
expired bool
retreiveCalled bool
}
func (m *mockCredsProvider) Retrieve() (credentials.Value, error) {
m.retreiveCalled = true
return credentials.Value{}, nil
}
func (m *mockCredsProvider) IsExpired() bool {
return m.expired
}
func TestAfterRetryRefreshCreds(t *testing.T) {
os.Clearenv()
credProvider := &mockCredsProvider{}
svc := NewService(&Config{Credentials: credentials.NewCredentials(credProvider), MaxRetries: 1})
svc.Handlers.Clear()
svc.Handlers.ValidateResponse.PushBack(func(r *Request) {
r.Error = awserr.New("UnknownError", "", nil)
r.HTTPResponse = &http.Response{StatusCode: 400}
})
svc.Handlers.UnmarshalError.PushBack(func(r *Request) {
r.Error = awserr.New("ExpiredTokenException", "", nil)
})
svc.Handlers.AfterRetry.PushBack(func(r *Request) {
AfterRetryHandler(r)
})
assert.True(t, svc.Config.Credentials.IsExpired(), "Expect to start out expired")
assert.False(t, credProvider.retreiveCalled)
req := NewRequest(svc, &Operation{Name: "Operation"}, nil, nil)
req.Send()
assert.True(t, svc.Config.Credentials.IsExpired())
assert.False(t, credProvider.retreiveCalled)
_, err := svc.Config.Credentials.Get()
assert.NoError(t, err)
assert.True(t, credProvider.retreiveCalled)
}<|fim▁end|> | }
func TestValidateEndpointHandlerErrorRegion(t *testing.T) { |
<|file_name|>rangelist.go<|end_file_name|><|fim▁begin|>package user
import (
"strconv"
"strings"
)
// RangeList is a list of user ranges
type RangeList []*Range
// ParseRangeList parses a string that contains a comma-separated list of ranges
func ParseRangeList(str string) (*RangeList, error) {
rl := RangeList{}
if len(str) == 0 {
return &rl, nil
}
parts := strings.Split(str, ",")
for _, p := range parts {
r, err := ParseRange(p)
if err != nil {
return nil, err
}
rl = append(rl, r)<|fim▁hole|>}
// Empty returns true if the RangeList is empty
func (l *RangeList) Empty() bool {
if len(*l) == 0 {
return true
}
for _, r := range *l {
if !r.Empty() {
return false
}
}
return true
}
// Contains returns true if the uid is contained by any range in the RangeList
func (l *RangeList) Contains(uid int) bool {
for _, r := range *l {
if r.Contains(uid) {
return true
}
}
return false
}
// Type returns the type of a RangeList object
func (l *RangeList) Type() string {
return "user.RangeList"
}
// Set sets the value of a RangeList object
func (l *RangeList) Set(value string) error {
newRangeList, err := ParseRangeList(value)
if err != nil {
return err
}
*l = *newRangeList
return nil
}
// String returns a parseable string representation of a RangeList
func (l *RangeList) String() string {
rangeStrings := []string{}
for _, r := range *l {
rangeStrings = append(rangeStrings, r.String())
}
return strings.Join(rangeStrings, ",")
}
// IsUserAllowed checks that the given user is numeric and is
// contained by the given RangeList
func IsUserAllowed(user string, allowed *RangeList) bool {
uid, err := strconv.Atoi(user)
if err != nil {
return false
}
return allowed.Contains(uid)
}<|fim▁end|> | }
return &rl, nil |
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|>'''
Task Coach - Your friendly task manager
Copyright (C) 2004-2013 Task Coach developers <[email protected]>
Task Coach is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Task Coach is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the<|fim▁hole|>
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
from attachment import *
from attachmentowner import AttachmentOwner
from attachmentlist import AttachmentList
from sorter import AttachmentSorter<|fim▁end|> | GNU General Public License for more details. |
<|file_name|>input.trigger.js<|end_file_name|><|fim▁begin|>/*
* The trigger API
*
* - Documentation: ../docs/input-trigger.md
*/
+function ($) { "use strict";
var TriggerOn = function (element, options) {
var $el = this.$el = $(element);
this.options = options || {};
if (this.options.triggerCondition === false)
throw new Error('Trigger condition is not specified.')
if (this.options.trigger === false)
throw new Error('Trigger selector is not specified.')
if (this.options.triggerAction === false)
throw new Error('Trigger action is not specified.')
<|fim▁hole|> if (this.options.triggerCondition.indexOf('value') == 0) {
var match = this.options.triggerCondition.match(/[^[\]]+(?=])/g)
this.triggerCondition = 'value'
this.triggerConditionValue = (match) ? match : [""]
}
this.triggerParent = this.options.triggerClosestParent !== undefined
? $el.closest(this.options.triggerClosestParent)
: undefined
if (
this.triggerCondition == 'checked' ||
this.triggerCondition == 'unchecked' ||
this.triggerCondition == 'value'
) {
$(document).on('change', this.options.trigger, $.proxy(this.onConditionChanged, this))
}
var self = this
$el.on('oc.triggerOn.update', function(e){
e.stopPropagation()
self.onConditionChanged()
})
self.onConditionChanged()
}
TriggerOn.prototype.onConditionChanged = function() {
if (this.triggerCondition == 'checked') {
this.updateTarget(!!$(this.options.trigger + ':checked', this.triggerParent).length)
}
else if (this.triggerCondition == 'unchecked') {
this.updateTarget(!$(this.options.trigger + ':checked', this.triggerParent).length)
}
else if (this.triggerCondition == 'value') {
var trigger, triggerValue = ''
trigger = $(this.options.trigger, this.triggerParent)
.not('input[type=checkbox], input[type=radio], input[type=button], input[type=submit]')
if (!trigger.length) {
trigger = $(this.options.trigger, this.triggerParent)
.not(':not(input[type=checkbox]:checked, input[type=radio]:checked)')
}
if (!!trigger.length) {
triggerValue = trigger.val()
}
this.updateTarget($.inArray(triggerValue, this.triggerConditionValue) != -1)
}
}
TriggerOn.prototype.updateTarget = function(status) {
var self = this,
actions = this.options.triggerAction.split('|')
$.each(actions, function(index, action) {
self.updateTargetAction(action, status)
})
$(window).trigger('resize')
this.$el.trigger('oc.triggerOn.afterUpdate', status)
}
TriggerOn.prototype.updateTargetAction = function(action, status) {
if (action == 'show') {
this.$el
.toggleClass('hide', !status)
.trigger('hide.oc.triggerapi', [!status])
}
else if (action == 'hide') {
this.$el
.toggleClass('hide', status)
.trigger('hide.oc.triggerapi', [status])
}
else if (action == 'enable') {
this.$el
.prop('disabled', !status)
.toggleClass('control-disabled', !status)
.trigger('disable.oc.triggerapi', [!status])
}
else if (action == 'disable') {
this.$el
.prop('disabled', status)
.toggleClass('control-disabled', status)
.trigger('disable.oc.triggerapi', [status])
}
else if (action == 'empty' && status) {
this.$el
.not('input[type=checkbox], input[type=radio], input[type=button], input[type=submit]')
.val('')
this.$el
.not(':not(input[type=checkbox], input[type=radio])')
.prop('checked', false)
this.$el
.trigger('empty.oc.triggerapi')
.trigger('change')
}
if (action == 'show' || action == 'hide') {
this.fixButtonClasses()
}
}
TriggerOn.prototype.fixButtonClasses = function() {
var group = this.$el.closest('.btn-group')
if (group.length > 0 && this.$el.is(':last-child'))
this.$el.prev().toggleClass('last', this.$el.hasClass('hide'))
}
TriggerOn.DEFAULTS = {
triggerAction: false,
triggerCondition: false,
triggerClosestParent: undefined,
trigger: false
}
// TRIGGERON PLUGIN DEFINITION
// ============================
var old = $.fn.triggerOn
$.fn.triggerOn = function (option) {
return this.each(function () {
var $this = $(this)
var data = $this.data('oc.triggerOn')
var options = $.extend({}, TriggerOn.DEFAULTS, $this.data(), typeof option == 'object' && option)
if (!data) $this.data('oc.triggerOn', (data = new TriggerOn(this, options)))
})
}
$.fn.triggerOn.Constructor = TriggerOn
// TRIGGERON NO CONFLICT
// =================
$.fn.triggerOn.noConflict = function () {
$.fn.triggerOn = old
return this
}
// TRIGGERON DATA-API
// ===============
$(document).render(function(){
$('[data-trigger]').triggerOn()
})
}(window.jQuery);<|fim▁end|> | this.triggerCondition = this.options.triggerCondition
|
<|file_name|>deepcopy.go<|end_file_name|><|fim▁begin|>/*
Copyright 2015 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package generators
import (
"fmt"
"io"
"path/filepath"
"strings"
"k8s.io/kubernetes/cmd/libs/go2idl/args"
"k8s.io/kubernetes/cmd/libs/go2idl/generator"
"k8s.io/kubernetes/cmd/libs/go2idl/namer"
"k8s.io/kubernetes/cmd/libs/go2idl/types"
"k8s.io/kubernetes/pkg/util/sets"
"github.com/golang/glog"
)
// TODO: This is created only to reduce number of changes in a single PR.
// Remove it and use PublicNamer instead.
func deepCopyNamer() *namer.NameStrategy {
return &namer.NameStrategy{
Join: func(pre string, in []string, post string) string {
return strings.Join(in, "_")
},
PrependPackageNames: 1,
}
}
// NameSystems returns the name system used by the generators in this package.
func NameSystems() namer.NameSystems {
return namer.NameSystems{
"public": deepCopyNamer(),
"raw": namer.NewRawNamer("", nil),
}
}
// DefaultNameSystem returns the default name system for ordering the types to be
// processed by the generators in this package.
func DefaultNameSystem() string {
return "public"
}
func Packages(context *generator.Context, arguments *args.GeneratorArgs) generator.Packages {
boilerplate, err := arguments.LoadGoBoilerplate()
if err != nil {
glog.Fatalf("Failed loading boilerplate: %v", err)
}
inputs := sets.NewString(arguments.InputDirs...)
packages := generator.Packages{}
header := append([]byte(
`// +build !ignore_autogenerated
`), boilerplate...)
header = append(header, []byte(
`
// This file was autogenerated by deepcopy-gen. Do not edit it manually!
`)...)
for _, p := range context.Universe {
copyableType := false
for _, t := range p.Types {
if copyableWithinPackage(t) {
copyableType = true
}
}
if copyableType {
path := p.Path
packages = append(packages,
&generator.DefaultPackage{
PackageName: filepath.Base(path),
PackagePath: path,
HeaderText: header,
GeneratorFunc: func(c *generator.Context) (generators []generator.Generator) {
generators = []generator.Generator{}
generators = append(
generators, NewGenDeepCopy("deep_copy_generated", path, inputs.Has(path)))
return generators
},
FilterFunc: func(c *generator.Context, t *types.Type) bool {
return t.Name.Package == path
},
})
}
}
return packages
}
const (
apiPackagePath = "k8s.io/kubernetes/pkg/api"
conversionPackagePath = "k8s.io/kubernetes/pkg/conversion"
)
// genDeepCopy produces a file with autogenerated deep-copy functions.
type genDeepCopy struct {
generator.DefaultGen
targetPackage string
imports namer.ImportTracker
typesForInit []*types.Type
generateInitFunc bool
}
func NewGenDeepCopy(sanitizedName, targetPackage string, generateInitFunc bool) generator.Generator {
return &genDeepCopy{
DefaultGen: generator.DefaultGen{
OptionalName: sanitizedName,
},
targetPackage: targetPackage,
imports: generator.NewImportTracker(),
typesForInit: make([]*types.Type, 0),
generateInitFunc: generateInitFunc,
}
}
func (g *genDeepCopy) Namers(c *generator.Context) namer.NameSystems {
// Have the raw namer for this file track what it imports.
return namer.NameSystems{"raw": namer.NewRawNamer(g.targetPackage, g.imports)}
}
func (g *genDeepCopy) Filter(c *generator.Context, t *types.Type) bool {
// Filter out all types not copyable within the package.
copyable := copyableWithinPackage(t)
if copyable {
g.typesForInit = append(g.typesForInit, t)
}
return copyable
}
func copyableWithinPackage(t *types.Type) bool {
if !strings.HasPrefix(t.Name.Package, "k8s.io/kubernetes/") {
return false
}
if types.ExtractCommentTags("+", t.CommentLines)["gencopy"] == "false" {
return false
}
// TODO: Consider generating functions for other kinds too.
if t.Kind != types.Struct {
return false
}
// Also, filter out private types.
if namer.IsPrivateGoName(t.Name.Name) {
return false
}
return true
}
func (g *genDeepCopy) isOtherPackage(pkg string) bool {
if pkg == g.targetPackage {
return false
}
if strings.HasSuffix(pkg, "\""+g.targetPackage+"\"") {
return false
}
return true
}
func (g *genDeepCopy) Imports(c *generator.Context) (imports []string) {
importLines := []string{}
if g.isOtherPackage(apiPackagePath) && g.generateInitFunc {
importLines = append(importLines, "api \""+apiPackagePath+"\"")
}
if g.isOtherPackage(conversionPackagePath) {
importLines = append(importLines, "conversion \""+conversionPackagePath+"\"")
}
for _, singleImport := range g.imports.ImportLines() {
if g.isOtherPackage(singleImport) {
importLines = append(importLines, singleImport)
}
}
return importLines
}
func argsFromType(t *types.Type) interface{} {
return map[string]interface{}{
"type": t,
}
}
func (g *genDeepCopy) funcNameTmpl(t *types.Type) string {
tmpl := "DeepCopy_$.type|public$"
g.imports.AddType(t)
if t.Name.Package != g.targetPackage {
tmpl = g.imports.LocalNameOf(t.Name.Package) + "." + tmpl
}
return tmpl
}
func (g *genDeepCopy) Init(c *generator.Context, w io.Writer) error {
if !g.generateInitFunc {
// TODO: We should come up with a solution to register all generated
// deep-copy functions. However, for now, to avoid import cycles
// we register only those explicitly requested.
return nil
}
sw := generator.NewSnippetWriter(w, c, "$", "$")
sw.Do("func init() {\n", nil)
if g.targetPackage == apiPackagePath {
sw.Do("if err := Scheme.AddGeneratedDeepCopyFuncs(\n", nil)
} else {
sw.Do("if err := api.Scheme.AddGeneratedDeepCopyFuncs(\n", nil)
}
for _, t := range g.typesForInit {
sw.Do(fmt.Sprintf("%s,\n", g.funcNameTmpl(t)), argsFromType(t))
}
sw.Do("); err != nil {\n", nil)
sw.Do("// if one of the deep copy functions is malformed, detect it immediately.\n", nil)
sw.Do("panic(err)\n", nil)
sw.Do("}\n", nil)
sw.Do("}\n\n", nil)
return sw.Error()
}
func (g *genDeepCopy) GenerateType(c *generator.Context, t *types.Type, w io.Writer) error {
sw := generator.NewSnippetWriter(w, c, "$", "$")
funcName := g.funcNameTmpl(t)
if g.targetPackage == conversionPackagePath {
sw.Do(fmt.Sprintf("func %s(in $.type|raw$, out *$.type|raw$, c *Cloner) error {\n", funcName), argsFromType(t))
} else {
sw.Do(fmt.Sprintf("func %s(in $.type|raw$, out *$.type|raw$, c *conversion.Cloner) error {\n", funcName), argsFromType(t))
}
g.generateFor(t, sw)
sw.Do("return nil\n", nil)
sw.Do("}\n\n", nil)
return sw.Error()
}
// we use the system of shadowing 'in' and 'out' so that the same code is valid
// at any nesting level. This makes the autogenerator easy to understand, and
// the compiler shouldn't care.
func (g *genDeepCopy) generateFor(t *types.Type, sw *generator.SnippetWriter) {
var f func(*types.Type, *generator.SnippetWriter)
switch t.Kind {
case types.Builtin:
f = g.doBuiltin
case types.Map:
f = g.doMap
case types.Slice:
f = g.doSlice
case types.Struct:
f = g.doStruct
case types.Interface:
f = g.doInterface
case types.Pointer:
f = g.doPointer
case types.Alias:
f = g.doAlias
default:
f = g.doUnknown
}
f(t, sw)
}
func (g *genDeepCopy) doBuiltin(t *types.Type, sw *generator.SnippetWriter) {
sw.Do("*out = in\n", nil)
}
func (g *genDeepCopy) doMap(t *types.Type, sw *generator.SnippetWriter) {
sw.Do("*out = make($.|raw$)\n", t)
if t.Key.IsAssignable() {
sw.Do("for key, val := range in {\n", nil)
if t.Elem.IsAssignable() {
sw.Do("(*out)[key] = val\n", nil)
} else {
if copyableWithinPackage(t.Elem) {
sw.Do("newVal := new($.|raw$)\n", t.Elem)
funcName := g.funcNameTmpl(t.Elem)
sw.Do(fmt.Sprintf("if err := %s(val, newVal, c); err != nil {\n", funcName), argsFromType(t.Elem))
sw.Do("return err\n", nil)
sw.Do("}\n", nil)
sw.Do("(*out)[key] = *newVal\n", nil)
} else {
sw.Do("if newVal, err := c.DeepCopy(val); err != nil {\n", nil)
sw.Do("return err\n", nil)
sw.Do("} else {\n", nil)
sw.Do("(*out)[key] = newVal.($.|raw$)\n", t.Elem)
sw.Do("}\n", nil)
}
}
} else {
// TODO: Implement it when necessary.
sw.Do("for range in {\n", nil)
sw.Do("// FIXME: Copying unassignable keys unsupported $.|raw$\n", t.Key)
}
sw.Do("}\n", nil)
}
func (g *genDeepCopy) doSlice(t *types.Type, sw *generator.SnippetWriter) {
sw.Do("*out = make($.|raw$, len(in))\n", t)
if t.Elem.Kind == types.Builtin {
sw.Do("copy(*out, in)\n", nil)
} else {
sw.Do("for i := range in {\n", nil)
if t.Elem.IsAssignable() {
sw.Do("(*out)[i] = in[i]\n", nil)
} else if copyableWithinPackage(t.Elem) {
funcName := g.funcNameTmpl(t.Elem)
sw.Do(fmt.Sprintf("if err := %s(in[i], &(*out)[i], c); err != nil {\n", funcName), argsFromType(t.Elem))
sw.Do("return err\n", nil)
sw.Do("}\n", nil)
} else {
sw.Do("if newVal, err := c.DeepCopy(in[i]); err != nil {\n", nil)<|fim▁hole|> sw.Do("}\n", nil)
}
sw.Do("}\n", nil)
}
}
func (g *genDeepCopy) doStruct(t *types.Type, sw *generator.SnippetWriter) {
for _, m := range t.Members {
args := map[string]interface{}{
"type": m.Type,
"name": m.Name,
}
switch m.Type.Kind {
case types.Builtin:
sw.Do("out.$.name$ = in.$.name$\n", args)
case types.Map, types.Slice, types.Pointer:
sw.Do("if in.$.name$ != nil {\n", args)
sw.Do("in, out := in.$.name$, &out.$.name$\n", args)
g.generateFor(m.Type, sw)
sw.Do("} else {\n", nil)
sw.Do("out.$.name$ = nil\n", args)
sw.Do("}\n", nil)
case types.Struct:
if copyableWithinPackage(m.Type) {
funcName := g.funcNameTmpl(m.Type)
sw.Do(fmt.Sprintf("if err := %s(in.$.name$, &out.$.name$, c); err != nil {\n", funcName), args)
sw.Do("return err\n", nil)
sw.Do("}\n", nil)
} else {
sw.Do("if newVal, err := c.DeepCopy(in.$.name$); err != nil {\n", args)
sw.Do("return err\n", nil)
sw.Do("} else {\n", nil)
sw.Do("out.$.name$ = newVal.($.type|raw$)\n", args)
sw.Do("}\n", nil)
}
default:
if m.Type.Kind == types.Alias && m.Type.Underlying.Kind == types.Builtin {
sw.Do("out.$.name$ = in.$.name$\n", args)
} else {
sw.Do("if in.$.name$ == nil {\n", args)
sw.Do("out.$.name$ = nil\n", args)
sw.Do("} else if newVal, err := c.DeepCopy(in.$.name$); err != nil {\n", args)
sw.Do("return err\n", nil)
sw.Do("} else {\n", nil)
sw.Do("out.$.name$ = newVal.($.type|raw$)\n", args)
sw.Do("}\n", nil)
}
}
}
}
func (g *genDeepCopy) doInterface(t *types.Type, sw *generator.SnippetWriter) {
// TODO: Add support for interfaces.
g.doUnknown(t, sw)
}
func (g *genDeepCopy) doPointer(t *types.Type, sw *generator.SnippetWriter) {
sw.Do("*out = new($.Elem|raw$)\n", t)
if t.Elem.Kind == types.Builtin {
sw.Do("**out = *in", nil)
} else if copyableWithinPackage(t.Elem) {
funcName := g.funcNameTmpl(t.Elem)
sw.Do(fmt.Sprintf("if err := %s(*in, *out, c); err != nil {\n", funcName), argsFromType(t.Elem))
sw.Do("return err\n", nil)
sw.Do("}\n", nil)
} else {
sw.Do("if newVal, err := c.DeepCopy(*in); err != nil {\n", nil)
sw.Do("return err\n", nil)
sw.Do("} else {\n", nil)
sw.Do("**out = newVal.($.|raw$)\n", t.Elem)
sw.Do("}\n", nil)
}
}
func (g *genDeepCopy) doAlias(t *types.Type, sw *generator.SnippetWriter) {
// TODO: Add support for aliases.
g.doUnknown(t, sw)
}
func (g *genDeepCopy) doUnknown(t *types.Type, sw *generator.SnippetWriter) {
sw.Do("// FIXME: Type $.|raw$ is unsupported.\n", t)
}<|fim▁end|> | sw.Do("return err\n", nil)
sw.Do("} else {\n", nil)
sw.Do("(*out)[i] = newVal.($.|raw$)\n", t.Elem) |
<|file_name|>tensor.py<|end_file_name|><|fim▁begin|>"""
This module defines tensors with abstract index notation.
The abstract index notation has been first formalized by Penrose.
Tensor indices are formal objects, with a tensor type; there is no
notion of index range, it is only possible to assign the dimension,
used to trace the Kronecker delta; the dimension can be a Symbol.
The Einstein summation convention is used.
The covariant indices are indicated with a minus sign in front of the index.
For instance the tensor ``t = p(a)*A(b,c)*q(-c)`` has the index ``c``
contracted.
A tensor expression ``t`` can be called; called with its
indices in sorted order it is equal to itself:
in the above example ``t(a, b) == t``;
one can call ``t`` with different indices; ``t(c, d) == p(c)*A(d,a)*q(-a)``.
The contracted indices are dummy indices, internally they have no name,
the indices being represented by a graph-like structure.
Tensors are put in canonical form using ``canon_bp``, which uses
the Butler-Portugal algorithm for canonicalization using the monoterm
symmetries of the tensors.
If there is a (anti)symmetric metric, the indices can be raised and
lowered when the tensor is put in canonical form.
"""
from __future__ import print_function, division
from collections import defaultdict
from sympy import Matrix, Rational
from sympy.combinatorics.tensor_can import get_symmetric_group_sgs, \
bsgs_direct_product, canonicalize, riemann_bsgs
from sympy.core import Basic, sympify, Add, S
from sympy.core.compatibility import string_types, reduce, range
from sympy.core.containers import Tuple
from sympy.core.decorators import deprecated
from sympy.core.symbol import Symbol, symbols
from sympy.core.sympify import CantSympify
from sympy.external import import_module
from sympy.utilities.decorator import doctest_depends_on
from sympy.matrices import eye
class TIDS(CantSympify):
"""
Tensor-index data structure. This contains internal data structures about
components of a tensor expression, its free and dummy indices.
To create a ``TIDS`` object via the standard constructor, the required
arguments are
WARNING: this class is meant as an internal representation of tensor data
structures and should not be directly accessed by end users.
Parameters
==========
components : ``TensorHead`` objects representing the components of the tensor expression.
free : Free indices in their internal representation.
dum : Dummy indices in their internal representation.
Examples
========
>>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, TIDS, tensorhead
>>> Lorentz = TensorIndexType('Lorentz', dummy_fmt='L')
>>> m0, m1, m2, m3 = tensor_indices('m0,m1,m2,m3', Lorentz)
>>> T = tensorhead('T', [Lorentz]*4, [[1]*4])
>>> TIDS([T], [(m0, 0, 0), (m3, 3, 0)], [(1, 2, 0, 0)])
TIDS([T(Lorentz,Lorentz,Lorentz,Lorentz)], [(m0, 0, 0), (m3, 3, 0)], [(1, 2, 0, 0)])
Notes
=====
In short, this has created the components, free and dummy indices for
the internal representation of a tensor T(m0, m1, -m1, m3).
Free indices are represented as a list of triplets. The elements of
each triplet identify a single free index and are
1. TensorIndex object
2. position inside the component
3. component number
Dummy indices are represented as a list of 4-plets. Each 4-plet stands
for couple for contracted indices, their original TensorIndex is not
stored as it is no longer required. The four elements of the 4-plet
are
1. position inside the component of the first index.
2. position inside the component of the second index.
3. component number of the first index.
4. component number of the second index.
"""
def __init__(self, components, free, dum):
self.components = components
self.free = free
self.dum = dum
self._ext_rank = len(self.free) + 2*len(self.dum)
self.dum.sort(key=lambda x: (x[2], x[0]))
def get_tensors(self):
"""
Get a list of ``Tensor`` objects having the same ``TIDS`` if multiplied
by one another.
"""
indices = self.get_indices()
components = self.components
tensors = [None for i in components] # pre-allocate list
ind_pos = 0
for i, component in enumerate(components):
prev_pos = ind_pos
ind_pos += component.rank
tensors[i] = Tensor(component, indices[prev_pos:ind_pos])
return tensors
def get_components_with_free_indices(self):
"""
Get a list of components with their associated indices.
Examples
========
>>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, TIDS, tensorhead
>>> Lorentz = TensorIndexType('Lorentz', dummy_fmt='L')
>>> m0, m1, m2, m3 = tensor_indices('m0,m1,m2,m3', Lorentz)
>>> T = tensorhead('T', [Lorentz]*4, [[1]*4])
>>> A = tensorhead('A', [Lorentz], [[1]])
>>> t = TIDS.from_components_and_indices([T], [m0, m1, -m1, m3])
>>> t.get_components_with_free_indices()
[(T(Lorentz,Lorentz,Lorentz,Lorentz), [(m0, 0, 0), (m3, 3, 0)])]
>>> t2 = (A(m0)*A(-m0))._tids
>>> t2.get_components_with_free_indices()
[(A(Lorentz), []), (A(Lorentz), [])]
>>> t3 = (A(m0)*A(-m1)*A(-m0)*A(m1))._tids
>>> t3.get_components_with_free_indices()
[(A(Lorentz), []), (A(Lorentz), []), (A(Lorentz), []), (A(Lorentz), [])]
>>> t4 = (A(m0)*A(m1)*A(-m0))._tids
>>> t4.get_components_with_free_indices()
[(A(Lorentz), []), (A(Lorentz), [(m1, 0, 1)]), (A(Lorentz), [])]
>>> t5 = (A(m0)*A(m1)*A(m2))._tids
>>> t5.get_components_with_free_indices()
[(A(Lorentz), [(m0, 0, 0)]), (A(Lorentz), [(m1, 0, 1)]), (A(Lorentz), [(m2, 0, 2)])]
"""
components = self.components
ret_comp = []
free_counter = 0
if len(self.free) == 0:
return [(comp, []) for comp in components]
for i, comp in enumerate(components):
c_free = []
while free_counter < len(self.free):
if not self.free[free_counter][2] == i:
break
c_free.append(self.free[free_counter])
free_counter += 1
if free_counter >= len(self.free):
break
ret_comp.append((comp, c_free))
return ret_comp
@staticmethod
def from_components_and_indices(components, indices):
"""
Create a new ``TIDS`` object from ``components`` and ``indices``
``components`` ``TensorHead`` objects representing the components
of the tensor expression.
``indices`` ``TensorIndex`` objects, the indices. Contractions are
detected upon construction.
Examples
========
>>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, TIDS, tensorhead
>>> Lorentz = TensorIndexType('Lorentz', dummy_fmt='L')
>>> m0, m1, m2, m3 = tensor_indices('m0,m1,m2,m3', Lorentz)
>>> T = tensorhead('T', [Lorentz]*4, [[1]*4])
>>> TIDS.from_components_and_indices([T], [m0, m1, -m1, m3])
TIDS([T(Lorentz,Lorentz,Lorentz,Lorentz)], [(m0, 0, 0), (m3, 3, 0)], [(1, 2, 0, 0)])
In case of many components the same indices have slightly different
indexes:
>>> A = tensorhead('A', [Lorentz], [[1]])
>>> TIDS.from_components_and_indices([A]*4, [m0, m1, -m1, m3])
TIDS([A(Lorentz), A(Lorentz), A(Lorentz), A(Lorentz)], [(m0, 0, 0), (m3, 0, 3)], [(0, 0, 1, 2)])
"""
tids = None
cur_pos = 0
for i in components:
tids_sing = TIDS([i], *TIDS.free_dum_from_indices(*indices[cur_pos:cur_pos+i.rank]))
if tids is None:
tids = tids_sing
else:
tids *= tids_sing
cur_pos += i.rank
if tids is None:
tids = TIDS([], [], [])
tids.free.sort(key=lambda x: x[0].name)
tids.dum.sort()
return tids
@deprecated(useinstead="get_indices")
def to_indices(self):
return self.get_indices()
@staticmethod
def free_dum_from_indices(*indices):
"""
Convert ``indices`` into ``free``, ``dum`` for single component tensor
``free`` list of tuples ``(index, pos, 0)``,
where ``pos`` is the position of index in
the list of indices formed by the component tensors
``dum`` list of tuples ``(pos_contr, pos_cov, 0, 0)``
Examples
========
>>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, TIDS
>>> Lorentz = TensorIndexType('Lorentz', dummy_fmt='L')
>>> m0, m1, m2, m3 = tensor_indices('m0,m1,m2,m3', Lorentz)
>>> TIDS.free_dum_from_indices(m0, m1, -m1, m3)
([(m0, 0, 0), (m3, 3, 0)], [(1, 2, 0, 0)])
"""
n = len(indices)
if n == 1:
return [(indices[0], 0, 0)], []
# find the positions of the free indices and of the dummy indices
free = [True]*len(indices)
index_dict = {}
dum = []
for i, index in enumerate(indices):
name = index._name
typ = index._tensortype
contr = index._is_up
if (name, typ) in index_dict:
# found a pair of dummy indices
is_contr, pos = index_dict[(name, typ)]
# check consistency and update free
if is_contr:
if contr:
raise ValueError('two equal contravariant indices in slots %d and %d' %(pos, i))
else:
free[pos] = False
free[i] = False
else:
if contr:
free[pos] = False
free[i] = False
else:
raise ValueError('two equal covariant indices in slots %d and %d' %(pos, i))
if contr:
dum.append((i, pos, 0, 0))
else:
dum.append((pos, i, 0, 0))
else:
index_dict[(name, typ)] = index._is_up, i
free = [(index, i, 0) for i, index in enumerate(indices) if free[i]]
free.sort()
return free, dum
@staticmethod
def _check_matrix_indices(f_free, g_free, nc1):
# This "private" method checks matrix indices.
# Matrix indices are special as there are only two, and observe
# anomalous substitution rules to determine contractions.
dum = []
# make sure that free indices appear in the same order as in their component:
f_free.sort(key=lambda x: (x[2], x[1]))
g_free.sort(key=lambda x: (x[2], x[1]))
matrix_indices_storage = {}
transform_right_to_left = {}
f_pop_pos = []
g_pop_pos = []
for free_pos, (ind, i, c) in enumerate(f_free):
index_type = ind._tensortype
if ind not in (index_type.auto_left, -index_type.auto_right):
continue
matrix_indices_storage[ind] = (free_pos, i, c)
for free_pos, (ind, i, c) in enumerate(g_free):
index_type = ind._tensortype
if ind not in (index_type.auto_left, -index_type.auto_right):
continue
if ind == index_type.auto_left:
if -index_type.auto_right in matrix_indices_storage:
other_pos, other_i, other_c = matrix_indices_storage.pop(-index_type.auto_right)
dum.append((other_i, i, other_c, c + nc1))
# mark to remove other_pos and free_pos from free:
g_pop_pos.append(free_pos)
f_pop_pos.append(other_pos)
continue
if ind in matrix_indices_storage:
other_pos, other_i, other_c = matrix_indices_storage.pop(ind)
dum.append((other_i, i, other_c, c + nc1))
# mark to remove other_pos and free_pos from free:
g_pop_pos.append(free_pos)
f_pop_pos.append(other_pos)
transform_right_to_left[-index_type.auto_right] = c
continue
if ind in transform_right_to_left:
other_c = transform_right_to_left.pop(ind)
if c == other_c:
g_free[free_pos] = (index_type.auto_left, i, c)
for i in reversed(sorted(f_pop_pos)):
f_free.pop(i)
for i in reversed(sorted(g_pop_pos)):
g_free.pop(i)
return dum
@staticmethod
def mul(f, g):
"""
The algorithms performing the multiplication of two ``TIDS`` instances.
In short, it forms a new ``TIDS`` object, joining components and indices,
checking that abstract indices are compatible, and possibly contracting
them.
Examples
========
>>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, TIDS, tensorhead
>>> Lorentz = TensorIndexType('Lorentz', dummy_fmt='L')
>>> m0, m1, m2, m3 = tensor_indices('m0,m1,m2,m3', Lorentz)
>>> T = tensorhead('T', [Lorentz]*4, [[1]*4])
>>> A = tensorhead('A', [Lorentz], [[1]])
>>> tids_1 = TIDS.from_components_and_indices([T], [m0, m1, -m1, m3])
>>> tids_2 = TIDS.from_components_and_indices([A], [m2])
>>> tids_1 * tids_2
TIDS([T(Lorentz,Lorentz,Lorentz,Lorentz), A(Lorentz)],\
[(m0, 0, 0), (m3, 3, 0), (m2, 0, 1)], [(1, 2, 0, 0)])
In this case no contraction has been performed.
>>> tids_3 = TIDS.from_components_and_indices([A], [-m3])
>>> tids_1 * tids_3
TIDS([T(Lorentz,Lorentz,Lorentz,Lorentz), A(Lorentz)],\
[(m0, 0, 0)], [(1, 2, 0, 0), (3, 0, 0, 1)])
Free indices ``m3`` and ``-m3`` are identified as a contracted couple, and are
therefore transformed into dummy indices.
A wrong index construction (for example, trying to contract two
contravariant indices or using indices multiple times) would result in
an exception:
>>> tids_4 = TIDS.from_components_and_indices([A], [m3])
>>> # This raises an exception:
>>> # tids_1 * tids_4
"""
index_up = lambda u: u if u.is_up else -u
# lambda returns True is index is not a matrix index:
notmat = lambda i: i not in (i._tensortype.auto_left, -i._tensortype.auto_right)
f_free = f.free[:]
g_free = g.free[:]
nc1 = len(f.components)
dum = TIDS._check_matrix_indices(f_free, g_free, nc1)
# find out which free indices of f and g are contracted
free_dict1 = dict([(i if i.is_up else -i, (pos, cpos, i)) for i, pos, cpos in f_free])
free_dict2 = dict([(i if i.is_up else -i, (pos, cpos, i)) for i, pos, cpos in g_free])
free_names = set(free_dict1.keys()) & set(free_dict2.keys())
# find the new `free` and `dum`
dum2 = [(i1, i2, c1 + nc1, c2 + nc1) for i1, i2, c1, c2 in g.dum]
free1 = [(ind, i, c) for ind, i, c in f_free if index_up(ind) not in free_names]
free2 = [(ind, i, c + nc1) for ind, i, c in g_free if index_up(ind) not in free_names]
free = free1 + free2
dum.extend(f.dum + dum2)
for name in free_names:
ipos1, cpos1, ind1 = free_dict1[name]
ipos2, cpos2, ind2 = free_dict2[name]
cpos2 += nc1
if ind1._is_up == ind2._is_up:
raise ValueError('wrong index construction {0}'.format(ind1))
if ind1._is_up:
new_dummy = (ipos1, ipos2, cpos1, cpos2)
else:
new_dummy = (ipos2, ipos1, cpos2, cpos1)
dum.append(new_dummy)
return (f.components + g.components, free, dum)
def __mul__(self, other):
return TIDS(*self.mul(self, other))
def __str__(self):
return "TIDS({0}, {1}, {2})".format(self.components, self.free, self.dum)
def __repr__(self):
return self.__str__()
def sorted_components(self):
"""
Returns a ``TIDS`` with sorted components
The sorting is done taking into account the commutation group
of the component tensors.
"""
from sympy.combinatorics.permutations import _af_invert
cv = list(zip(self.components, range(len(self.components))))
sign = 1
n = len(cv) - 1
for i in range(n):
for j in range(n, i, -1):
c = cv[j-1][0].commutes_with(cv[j][0])
if c not in [0, 1]:
continue
if (cv[j-1][0]._types, cv[j-1][0]._name) > \
(cv[j][0]._types, cv[j][0]._name):
cv[j-1], cv[j] = cv[j], cv[j-1]
if c:
sign = -sign
# perm_inv[new_pos] = old_pos
components = [x[0] for x in cv]
perm_inv = [x[1] for x in cv]
perm = _af_invert(perm_inv)
free = [(ind, i, perm[c]) for ind, i, c in self.free]
free.sort()
dum = [(i1, i2, perm[c1], perm[c2]) for i1, i2, c1, c2 in self.dum]
dum.sort(key=lambda x: components[x[2]].index_types[x[0]])
return TIDS(components, free, dum), sign
def _get_sorted_free_indices_for_canon(self):
sorted_free = self.free[:]
sorted_free.sort(key=lambda x: x[0])
return sorted_free
def _get_sorted_dum_indices_for_canon(self):
return sorted(self.dum, key=lambda x: (x[2], x[0]))
def canon_args(self):
"""
Returns ``(g, dummies, msym, v)``, the entries of ``canonicalize``
see ``canonicalize`` in ``tensor_can.py``
"""
# to be called after sorted_components
from sympy.combinatorics.permutations import _af_new
# types = list(set(self._types))
# types.sort(key = lambda x: x._name)
n = self._ext_rank
g = [None]*n + [n, n+1]
pos = 0
vpos = []
components = self.components
for t in components:
vpos.append(pos)
pos += t._rank
# ordered indices: first the free indices, ordered by types
# then the dummy indices, ordered by types and contravariant before
# covariant
# g[position in tensor] = position in ordered indices
for i, (indx, ipos, cpos) in enumerate(self._get_sorted_free_indices_for_canon()):
pos = vpos[cpos] + ipos
g[pos] = i
pos = len(self.free)
j = len(self.free)
dummies = []
prev = None
a = []
msym = []
for ipos1, ipos2, cpos1, cpos2 in self._get_sorted_dum_indices_for_canon():
pos1 = vpos[cpos1] + ipos1
pos2 = vpos[cpos2] + ipos2
g[pos1] = j
g[pos2] = j + 1
j += 2
typ = components[cpos1].index_types[ipos1]
if typ != prev:
if a:
dummies.append(a)
a = [pos, pos + 1]
prev = typ
msym.append(typ.metric_antisym)
else:
a.extend([pos, pos + 1])
pos += 2
if a:
dummies.append(a)
numtyp = []
prev = None
for t in components:
if t == prev:
numtyp[-1][1] += 1
else:
prev = t
numtyp.append([prev, 1])
v = []
for h, n in numtyp:
if h._comm == 0 or h._comm == 1:
comm = h._comm
else:
comm = TensorManager.get_comm(h._comm, h._comm)
v.append((h._symmetry.base, h._symmetry.generators, n, comm))
return _af_new(g), dummies, msym, v
def perm2tensor(self, g, canon_bp=False):
"""
Returns a ``TIDS`` instance corresponding to the permutation ``g``
``g`` permutation corresponding to the tensor in the representation
used in canonicalization
``canon_bp`` if True, then ``g`` is the permutation
corresponding to the canonical form of the tensor
"""
vpos = []
components = self.components
pos = 0
for t in components:
vpos.append(pos)
pos += t._rank
sorted_free = [i[0] for i in self._get_sorted_free_indices_for_canon()]
nfree = len(sorted_free)
rank = self._ext_rank
dum = [[None]*4 for i in range((rank - nfree)//2)]
free = []
icomp = -1
for i in range(rank):
if i in vpos:
icomp += vpos.count(i)
pos0 = i
ipos = i - pos0
gi = g[i]
if gi < nfree:
ind = sorted_free[gi]
free.append((ind, ipos, icomp))
else:
j = gi - nfree
idum, cov = divmod(j, 2)
if cov:
dum[idum][1] = ipos
dum[idum][3] = icomp
else:
dum[idum][0] = ipos
dum[idum][2] = icomp
dum = [tuple(x) for x in dum]
return TIDS(components, free, dum)
def get_indices(self):
"""
Get a list of indices, creating new tensor indices to complete dummy indices.
"""
components = self.components
free = self.free
dum = self.dum
indices = [None]*self._ext_rank
start = 0
pos = 0
vpos = []
for t in components:
vpos.append(pos)
pos += t.rank
cdt = defaultdict(int)
# if the free indices have names with dummy_fmt, start with an
# index higher than those for the dummy indices
# to avoid name collisions
for indx, ipos, cpos in free:
if indx._name.split('_')[0] == indx._tensortype._dummy_fmt[:-3]:
cdt[indx._tensortype] = max(cdt[indx._tensortype], int(indx._name.split('_')[1]) + 1)
start = vpos[cpos]
indices[start + ipos] = indx
for ipos1, ipos2, cpos1, cpos2 in dum:
start1 = vpos[cpos1]
start2 = vpos[cpos2]
typ1 = components[cpos1].index_types[ipos1]
assert typ1 == components[cpos2].index_types[ipos2]
fmt = typ1._dummy_fmt
nd = cdt[typ1]
indices[start1 + ipos1] = TensorIndex(fmt % nd, typ1)
indices[start2 + ipos2] = TensorIndex(fmt % nd, typ1, False)
cdt[typ1] += 1
return indices
def contract_metric(self, g):
"""
Returns new TIDS and sign.
Sign is either 1 or -1, to correct the sign after metric contraction
(for spinor indices).
"""
components = self.components
antisym = g.index_types[0].metric_antisym
#if not any(x == g for x in components):
# return self
# list of positions of the metric ``g``
gpos = [i for i, x in enumerate(components) if x == g]
if not gpos:
return self, 1
sign = 1
dum = self.dum[:]
free = self.free[:]
elim = set()
for gposx in gpos:
if gposx in elim:
continue
free1 = [x for x in free if x[-1] == gposx]
dum1 = [x for x in dum if x[-2] == gposx or x[-1] == gposx]
if not dum1:
continue
elim.add(gposx)
if len(dum1) == 2:
if not antisym:
dum10, dum11 = dum1
if dum10[3] == gposx:
# the index with pos p0 and component c0 is contravariant
c0 = dum10[2]
p0 = dum10[0]
else:
# the index with pos p0 and component c0 is covariant
c0 = dum10[3]
p0 = dum10[1]
if dum11[3] == gposx:
# the index with pos p1 and component c1 is contravariant
c1 = dum11[2]
p1 = dum11[0]
else:
# the index with pos p1 and component c1 is covariant
c1 = dum11[3]
p1 = dum11[1]
dum.append((p0, p1, c0, c1))
else:
dum10, dum11 = dum1
# change the sign to bring the indices of the metric to contravariant
# form; change the sign if dum10 has the metric index in position 0
if dum10[3] == gposx:
# the index with pos p0 and component c0 is contravariant
c0 = dum10[2]
p0 = dum10[0]
if dum10[1] == 1:
sign = -sign
else:
# the index with pos p0 and component c0 is covariant
c0 = dum10[3]
p0 = dum10[1]
if dum10[0] == 0:
sign = -sign
if dum11[3] == gposx:
# the index with pos p1 and component c1 is contravariant
c1 = dum11[2]
p1 = dum11[0]
sign = -sign
else:
# the index with pos p1 and component c1 is covariant
c1 = dum11[3]
p1 = dum11[1]
dum.append((p0, p1, c0, c1))
elif len(dum1) == 1:
if not antisym:
dp0, dp1, dc0, dc1 = dum1[0]
if dc0 == dc1:
# g(i, -i)
typ = g.index_types[0]
if typ._dim is None:
raise ValueError('dimension not assigned')
sign = sign*typ._dim
else:
# g(i0, i1)*p(-i1)
if dc0 == gposx:
p1 = dp1
c1 = dc1
else:
p1 = dp0
c1 = dc0
ind, p, c = free1[0]
free.append((ind, p1, c1))
else:
dp0, dp1, dc0, dc1 = dum1[0]
if dc0 == dc1:
# g(i, -i)
typ = g.index_types[0]
if typ._dim is None:
raise ValueError('dimension not assigned')
sign = sign*typ._dim
if dp0 < dp1:
# g(i, -i) = -D with antisymmetric metric
sign = -sign
else:
# g(i0, i1)*p(-i1)
if dc0 == gposx:
p1 = dp1
c1 = dc1
if dp0 == 0:
sign = -sign
else:
p1 = dp0
c1 = dc0
ind, p, c = free1[0]
free.append((ind, p1, c1))
dum = [x for x in dum if x not in dum1]
free = [x for x in free if x not in free1]
shift = 0
shifts = [0]*len(components)
for i in range(len(components)):
if i in elim:
shift += 1
continue
shifts[i] = shift
free = [(ind, p, c - shifts[c]) for (ind, p, c) in free if c not in elim]
dum = [(p0, p1, c0 - shifts[c0], c1 - shifts[c1]) for i, (p0, p1, c0, c1) in enumerate(dum) if c0 not in elim and c1 not in elim]
components = [c for i, c in enumerate(components) if i not in elim]
tids = TIDS(components, free, dum)
return tids, sign
class VTIDS(TIDS):
"""
DEPRECATED: DO NOT USE.
"""
@deprecated(useinstead="TIDS")
def __init__(self, components, free, dum, data):
super(VTIDS, self).__init__(components, free, dum)
self.data = data
@staticmethod
@deprecated(useinstead="TIDS")
def parse_data(data):
"""
DEPRECATED: DO NOT USE.
"""
return _TensorDataLazyEvaluator.parse_data(data)
@deprecated(useinstead="TIDS")
def correct_signature_from_indices(self, data, indices, free, dum):
"""
DEPRECATED: DO NOT USE.
"""
return _TensorDataLazyEvaluator._correct_signature_from_indices(data, indices, free, dum)
@staticmethod
@deprecated(useinstead="TIDS")
def flip_index_by_metric(data, metric, pos):
"""
DEPRECATED: DO NOT USE.
"""
return _TensorDataLazyEvaluator._flip_index_by_metric(data, metric, pos)
class _TensorDataLazyEvaluator(CantSympify):
"""
EXPERIMENTAL: do not rely on this class, it may change without deprecation
warnings in future versions of SymPy.
This object contains the logic to associate components data to a tensor
expression. Components data are set via the ``.data`` property of tensor
expressions, is stored inside this class as a mapping between the tensor
expression and the ``ndarray``.
Computations are executed lazily: whereas the tensor expressions can have
contractions, tensor products, and additions, components data are not
computed until they are accessed by reading the ``.data`` property
associated to the tensor expression.
"""
_substitutions_dict = dict()
_substitutions_dict_tensmul = dict()
def __getitem__(self, key):
dat = self._get(key)
if dat is None:
return None
numpy = import_module("numpy")
if not isinstance(dat, numpy.ndarray):
return dat
if dat.ndim == 0:
return dat[()]
elif dat.ndim == 1 and dat.size == 1:
return dat[0]
return dat
def _get(self, key):
"""
Retrieve ``data`` associated with ``key``.
This algorithm looks into ``self._substitutions_dict`` for all
``TensorHead`` in the ``TensExpr`` (or just ``TensorHead`` if key is a
TensorHead instance). It reconstructs the components data that the
tensor expression should have by performing on components data the
operations that correspond to the abstract tensor operations applied.
Metric tensor is handled in a different manner: it is pre-computed in
``self._substitutions_dict_tensmul``.
"""
if key in self._substitutions_dict:
return self._substitutions_dict[key]
if isinstance(key, TensorHead):
return None
if isinstance(key, Tensor):
# special case to handle metrics. Metric tensors cannot be
# constructed through contraction by the metric, their
# components show if they are a matrix or its inverse.
signature = tuple([i.is_up for i in key.get_indices()])
srch = (key.component,) + signature
if srch in self._substitutions_dict_tensmul:
return self._substitutions_dict_tensmul[srch]
return self.data_tensmul_from_tensorhead(key, key.component)
if isinstance(key, TensMul):
tensmul_list = key.split()
if len(tensmul_list) == 1 and len(tensmul_list[0].components) == 1:
# special case to handle metrics. Metric tensors cannot be
# constructed through contraction by the metric, their
# components show if they are a matrix or its inverse.
signature = tuple([i.is_up for i in tensmul_list[0].get_indices()])
srch = (tensmul_list[0].components[0],) + signature
if srch in self._substitutions_dict_tensmul:
return self._substitutions_dict_tensmul[srch]
data_list = [self.data_tensmul_from_tensorhead(i, i.components[0]) for i in tensmul_list]
if all([i is None for i in data_list]):
return None
if any([i is None for i in data_list]):
raise ValueError("Mixing tensors with associated components "\
"data with tensors without components data")
data_result, tensmul_result = self.data_product_tensors(data_list, tensmul_list)
return data_result
if isinstance(key, TensAdd):
sumvar = S.Zero
data_list = [i.data for i in key.args]
if all([i is None for i in data_list]):
return None
if any([i is None for i in data_list]):
raise ValueError("Mixing tensors with associated components "\
"data with tensors without components data")
for i in data_list:
sumvar += i
return sumvar
return None
def data_tensorhead_from_tensmul(self, data, tensmul, tensorhead):
"""
This method is used when assigning components data to a ``TensMul``
object, it converts components data to a fully contravariant ndarray,
which is then stored according to the ``TensorHead`` key.
"""
if data is None:
return None
return self._correct_signature_from_indices(
data,
tensmul.get_indices(),
tensmul.free,
tensmul.dum,
True)
def data_tensmul_from_tensorhead(self, tensmul, tensorhead):
"""
This method corrects the components data to the right signature
(covariant/contravariant) using the metric associated with each
``TensorIndexType``.
"""
if tensorhead.data is None:
return None
return self._correct_signature_from_indices(
tensorhead.data,
tensmul.get_indices(),
tensmul.free,
tensmul.dum)
def data_product_tensors(self, data_list, tensmul_list):
"""
Given a ``data_list``, list of ``ndarray``'s and a ``tensmul_list``,
list of ``TensMul`` instances, compute the resulting ``ndarray``,
after tensor products and contractions.
"""
def data_mul(f, g):
"""
Multiplies two ``ndarray`` objects, it first calls ``TIDS.mul``,
then checks which indices have been contracted, and finally
contraction operation on data, according to the contracted indices.
"""
data1, tensmul1 = f
data2, tensmul2 = g
components, free, dum = TIDS.mul(tensmul1, tensmul2)
data = _TensorDataLazyEvaluator._contract_ndarray(tensmul1.free, tensmul2.free, data1, data2)
# TODO: do this more efficiently... maybe by just passing an index list
# to .data_product_tensor(...)
return data, TensMul.from_TIDS(S.One, TIDS(components, free, dum))
return reduce(data_mul, zip(data_list, tensmul_list))
def _assign_data_to_tensor_expr(self, key, data):
if isinstance(key, TensAdd):
raise ValueError('cannot assign data to TensAdd')
# here it is assumed that `key` is a `TensMul` instance.
if len(key.components) != 1:
raise ValueError('cannot assign data to TensMul with multiple components')
tensorhead = key.components[0]
newdata = self.data_tensorhead_from_tensmul(data, key, tensorhead)
return tensorhead, newdata
def _check_permutations_on_data(self, tens, data):
import numpy
if isinstance(tens, TensorHead):
rank = tens.rank
generators = tens.symmetry.generators
elif isinstance(tens, Tensor):
rank = tens.rank
generators = tens.components[0].symmetry.generators
elif isinstance(tens, TensorIndexType):
rank = tens.metric.rank
generators = tens.metric.symmetry.generators
# Every generator is a permutation, check that by permuting the array
# by that permutation, the array will be the same, except for a
# possible sign change if the permutation admits it.
for gener in generators:
sign_change = +1 if (gener(rank) == rank) else -1
data_swapped = data
last_data = data
permute_axes = list(map(gener, list(range(rank))))
# the order of a permutation is the number of times to get the
# identity by applying that permutation.
for i in range(gener.order()-1):
data_swapped = numpy.transpose(data_swapped, permute_axes)
# if any value in the difference array is non-zero, raise an error:
if (last_data - sign_change*data_swapped).any():
raise ValueError("Component data symmetry structure error")
last_data = data_swapped
def __setitem__(self, key, value):
"""
Set the components data of a tensor object/expression.
Components data are transformed to the all-contravariant form and stored
with the corresponding ``TensorHead`` object. If a ``TensorHead`` object
cannot be uniquely identified, it will raise an error.
"""
data = _TensorDataLazyEvaluator.parse_data(value)
self._check_permutations_on_data(key, data)
# TensorHead and TensorIndexType can be assigned data directly, while
# TensMul must first convert data to a fully contravariant form, and
# assign it to its corresponding TensorHead single component.
if not isinstance(key, (TensorHead, TensorIndexType)):
key, data = self._assign_data_to_tensor_expr(key, data)
if isinstance(key, TensorHead):
for dim, indextype in zip(data.shape, key.index_types):
if indextype.data is None:
raise ValueError("index type {} has no components data"\
" associated (needed to raise/lower index)".format(indextype))
if indextype.dim is None:
continue
if dim != indextype.dim:
raise ValueError("wrong dimension of ndarray")
self._substitutions_dict[key] = data
def __delitem__(self, key):
del self._substitutions_dict[key]
def __contains__(self, key):
return key in self._substitutions_dict
@staticmethod
def _contract_ndarray(free1, free2, ndarray1, ndarray2):
numpy = import_module('numpy')
def ikey(x):
return x[1:]
free1 = free1[:]
free2 = free2[:]
free1.sort(key=ikey)
free2.sort(key=ikey)
self_free = [_[0] for _ in free1]
axes1 = []
axes2 = []
for jpos, jindex in enumerate(free2):
if -jindex[0] in self_free:
nidx = self_free.index(-jindex[0])
else:
continue
axes1.append(nidx)
axes2.append(jpos)
contracted_ndarray = numpy.tensordot(
ndarray1,
ndarray2,
(axes1, axes2)
)
return contracted_ndarray
@staticmethod
def add_tensor_mul(prod, f, g):
def mul_function():
return _TensorDataLazyEvaluator._contract_ndarray(f.free, g.free, f.data, g.data)
_TensorDataLazyEvaluator._substitutions_dict[prod] = mul_function()
@staticmethod
def add_tensor_add(addition, f, g):
def add_function():
return f.data + g.data
_TensorDataLazyEvaluator._substitutions_dict[addition] = add_function()
def add_metric_data(self, metric, data):
"""
Assign data to the ``metric`` tensor. The metric tensor behaves in an
anomalous way when raising and lowering indices.
A fully covariant metric is the inverse transpose of the fully
contravariant metric (it is meant matrix inverse). If the metric is
symmetric, the transpose is not necessary and mixed
covariant/contravariant metrics are Kronecker deltas.
"""
# hard assignment, data should not be added to `TensorHead` for metric:
# the problem with `TensorHead` is that the metric is anomalous, i.e.
# raising and lowering the index means considering the metric or its
# inverse, this is not the case for other tensors.
self._substitutions_dict_tensmul[metric, True, True] = data
inverse_transpose = self.inverse_transpose_matrix(data)
# in symmetric spaces, the traspose is the same as the original matrix,
# the full covariant metric tensor is the inverse transpose, so this
# code will be able to handle non-symmetric metrics.
self._substitutions_dict_tensmul[metric, False, False] = inverse_transpose
# now mixed cases, these are identical to the unit matrix if the metric
# is symmetric.
m = Matrix(data)
invt = Matrix(inverse_transpose)
self._substitutions_dict_tensmul[metric, True, False] = m * invt
self._substitutions_dict_tensmul[metric, False, True] = invt * m
@staticmethod
def _flip_index_by_metric(data, metric, pos):
numpy = import_module('numpy')
data = numpy.tensordot(
metric,
data,
(1, pos))
return numpy.rollaxis(data, 0, pos+1)
@staticmethod
def inverse_matrix(ndarray):
m = Matrix(ndarray).inv()
return _TensorDataLazyEvaluator.parse_data(m)
@staticmethod
def inverse_transpose_matrix(ndarray):
m = Matrix(ndarray).inv().T
return _TensorDataLazyEvaluator.parse_data(m)
@staticmethod
def _correct_signature_from_indices(data, indices, free, dum, inverse=False):
"""
Utility function to correct the values inside the components data
ndarray according to whether indices are covariant or contravariant.
It uses the metric matrix to lower values of covariant indices.
"""
numpy = import_module('numpy')
# change the ndarray values according covariantness/contravariantness of the indices
# use the metric
for i, indx in enumerate(indices):
if not indx.is_up and not inverse:
data = _TensorDataLazyEvaluator._flip_index_by_metric(data, indx._tensortype.data, i)
elif not indx.is_up and inverse:
data = _TensorDataLazyEvaluator._flip_index_by_metric(
data,
_TensorDataLazyEvaluator.inverse_matrix(indx._tensortype.data),
i
)
if len(dum) > 0:
### perform contractions ###
axes1 = []
axes2 = []
for i, indx1 in enumerate(indices):
try:
nd = indices[:i].index(-indx1)
except ValueError:
continue
axes1.append(nd)
axes2.append(i)
for ax1, ax2 in zip(axes1, axes2):
data = numpy.trace(data, axis1=ax1, axis2=ax2)
return data
@staticmethod
def _sort_data_axes(old, new):
numpy = import_module('numpy')
new_data = old.data.copy()
old_free = [i[0] for i in old.free]
new_free = [i[0] for i in new.free]
for i in range(len(new_free)):
for j in range(i, len(old_free)):
if old_free[j] == new_free[i]:
old_free[i], old_free[j] = old_free[j], old_free[i]
new_data = numpy.swapaxes(new_data, i, j)
break
return new_data
@staticmethod
def add_rearrange_tensmul_parts(new_tensmul, old_tensmul):
def sorted_compo():
return _TensorDataLazyEvaluator._sort_data_axes(old_tensmul, new_tensmul)
_TensorDataLazyEvaluator._substitutions_dict[new_tensmul] = sorted_compo()
@staticmethod
@doctest_depends_on(modules=('numpy',))
def parse_data(data):
"""
Transform ``data`` to a numpy ndarray. The parameter ``data`` may
contain data in various formats, e.g. nested lists, sympy ``Matrix``,
and so on.
Examples
========
>>> from sympy.tensor.tensor import _TensorDataLazyEvaluator
>>> _TensorDataLazyEvaluator.parse_data([1, 3, -6, 12])
[1 3 -6 12]
>>> _TensorDataLazyEvaluator.parse_data([[1, 2], [4, 7]])
[[1 2]
[4 7]]
"""
numpy = import_module('numpy')
if (numpy is not None) and (not isinstance(data, numpy.ndarray)):
if len(data) == 2 and hasattr(data[0], '__call__'):
def fromfunction_sympify(*x):
return sympify(data[0](*x))
data = numpy.fromfunction(fromfunction_sympify, data[1])
else:
vsympify = numpy.vectorize(sympify)
data = vsympify(numpy.array(data))
return data
_tensor_data_substitution_dict = _TensorDataLazyEvaluator()
class _TensorManager(object):
"""
Class to manage tensor properties.
Notes
=====
Tensors belong to tensor commutation groups; each group has a label
``comm``; there are predefined labels:
``0`` tensors commuting with any other tensor
``1`` tensors anticommuting among themselves
``2`` tensors not commuting, apart with those with ``comm=0``
Other groups can be defined using ``set_comm``; tensors in those
groups commute with those with ``comm=0``; by default they
do not commute with any other group.
"""
def __init__(self):
self._comm_init()
def _comm_init(self):
self._comm = [{} for i in range(3)]
for i in range(3):
self._comm[0][i] = 0
self._comm[i][0] = 0
self._comm[1][1] = 1
self._comm[2][1] = None
self._comm[1][2] = None
self._comm_symbols2i = {0:0, 1:1, 2:2}
self._comm_i2symbol = {0:0, 1:1, 2:2}
@property
def comm(self):
return self._comm
def comm_symbols2i(self, i):
"""
get the commutation group number corresponding to ``i``
``i`` can be a symbol or a number or a string
If ``i`` is not already defined its commutation group number
is set.
"""
if i not in self._comm_symbols2i:
n = len(self._comm)
self._comm.append({})
self._comm[n][0] = 0
self._comm[0][n] = 0
self._comm_symbols2i[i] = n
self._comm_i2symbol[n] = i
return n
return self._comm_symbols2i[i]
def comm_i2symbol(self, i):
"""
Returns the symbol corresponding to the commutation group number.
"""
return self._comm_i2symbol[i]
def set_comm(self, i, j, c):
"""
set the commutation parameter ``c`` for commutation groups ``i, j``
Parameters
==========
i, j : symbols representing commutation groups
c : group commutation number
Notes
=====
``i, j`` can be symbols, strings or numbers,
apart from ``0, 1`` and ``2`` which are reserved respectively
for commuting, anticommuting tensors and tensors not commuting
with any other group apart with the commuting tensors.
For the remaining cases, use this method to set the commutation rules;
by default ``c=None``.
The group commutation number ``c`` is assigned in correspondence
to the group commutation symbols; it can be
0 commuting
1 anticommuting
None no commutation property
Examples
========
``G`` and ``GH`` do not commute with themselves and commute with
each other; A is commuting.
>>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, tensorhead, TensorManager
>>> Lorentz = TensorIndexType('Lorentz')
>>> i0,i1,i2,i3,i4 = tensor_indices('i0:5', Lorentz)
>>> A = tensorhead('A', [Lorentz], [[1]])
>>> G = tensorhead('G', [Lorentz], [[1]], 'Gcomm')
>>> GH = tensorhead('GH', [Lorentz], [[1]], 'GHcomm')
>>> TensorManager.set_comm('Gcomm', 'GHcomm', 0)
>>> (GH(i1)*G(i0)).canon_bp()
G(i0)*GH(i1)
>>> (G(i1)*G(i0)).canon_bp()
G(i1)*G(i0)
>>> (G(i1)*A(i0)).canon_bp()
A(i0)*G(i1)
"""
if c not in (0, 1, None):
raise ValueError('`c` can assume only the values 0, 1 or None')
if i not in self._comm_symbols2i:
n = len(self._comm)
self._comm.append({})
self._comm[n][0] = 0
self._comm[0][n] = 0
self._comm_symbols2i[i] = n
self._comm_i2symbol[n] = i
if j not in self._comm_symbols2i:
n = len(self._comm)
self._comm.append({})
self._comm[0][n] = 0
self._comm[n][0] = 0
self._comm_symbols2i[j] = n
self._comm_i2symbol[n] = j
ni = self._comm_symbols2i[i]
nj = self._comm_symbols2i[j]
self._comm[ni][nj] = c
self._comm[nj][ni] = c
def set_comms(self, *args):
"""
set the commutation group numbers ``c`` for symbols ``i, j``
Parameters
==========
args : sequence of ``(i, j, c)``
"""
for i, j, c in args:
self.set_comm(i, j, c)
def get_comm(self, i, j):
"""
Return the commutation parameter for commutation group numbers ``i, j``
see ``_TensorManager.set_comm``
"""
return self._comm[i].get(j, 0 if i == 0 or j == 0 else None)
def clear(self):
"""
Clear the TensorManager.
"""
self._comm_init()
TensorManager = _TensorManager()
@doctest_depends_on(modules=('numpy',))
class TensorIndexType(Basic):
"""
A TensorIndexType is characterized by its name and its metric.
Parameters
==========
name : name of the tensor type
metric : metric symmetry or metric object or ``None``
dim : dimension, it can be a symbol or an integer or ``None``
eps_dim : dimension of the epsilon tensor
dummy_fmt : name of the head of dummy indices
Attributes
==========
``name``
``metric_name`` : it is 'metric' or metric.name
``metric_antisym``
``metric`` : the metric tensor
``delta`` : ``Kronecker delta``
``epsilon`` : the ``Levi-Civita epsilon`` tensor
``dim``
``dim_eps``
``dummy_fmt``
``data`` : a property to add ``ndarray`` values, to work in a specified basis.
Notes
=====
The ``metric`` parameter can be:
``metric = False`` symmetric metric (in Riemannian geometry)
``metric = True`` antisymmetric metric (for spinor calculus)
``metric = None`` there is no metric
``metric`` can be an object having ``name`` and ``antisym`` attributes.
If there is a metric the metric is used to raise and lower indices.
In the case of antisymmetric metric, the following raising and
lowering conventions will be adopted:
``psi(a) = g(a, b)*psi(-b); chi(-a) = chi(b)*g(-b, -a)``
``g(-a, b) = delta(-a, b); g(b, -a) = -delta(a, -b)``
where ``delta(-a, b) = delta(b, -a)`` is the ``Kronecker delta``
(see ``TensorIndex`` for the conventions on indices).
If there is no metric it is not possible to raise or lower indices;
e.g. the index of the defining representation of ``SU(N)``
is 'covariant' and the conjugate representation is
'contravariant'; for ``N > 2`` they are linearly independent.
``eps_dim`` is by default equal to ``dim``, if the latter is an integer;
else it can be assigned (for use in naive dimensional regularization);
if ``eps_dim`` is not an integer ``epsilon`` is ``None``.
Examples
========
>>> from sympy.tensor.tensor import TensorIndexType
>>> Lorentz = TensorIndexType('Lorentz', dummy_fmt='L')
>>> Lorentz.metric
metric(Lorentz,Lorentz)
Examples with metric components data added, this means it is working on a
fixed basis:
>>> Lorentz.data = [1, -1, -1, -1]
>>> Lorentz
TensorIndexType(Lorentz, 0)
>>> Lorentz.data
[[1 0 0 0]
[0 -1 0 0]
[0 0 -1 0]
[0 0 0 -1]]
"""
def __new__(cls, name, metric=False, dim=None, eps_dim=None,
dummy_fmt=None):
if isinstance(name, string_types):
name = Symbol(name)
obj = Basic.__new__(cls, name, S.One if metric else S.Zero)
obj._name = str(name)
if not dummy_fmt:
obj._dummy_fmt = '%s_%%d' % obj.name
else:
obj._dummy_fmt = '%s_%%d' % dummy_fmt
if metric is None:
obj.metric_antisym = None
obj.metric = None
else:
if metric in (True, False, 0, 1):
metric_name = 'metric'
obj.metric_antisym = metric
else:
metric_name = metric.name
obj.metric_antisym = metric.antisym
sym2 = TensorSymmetry(get_symmetric_group_sgs(2, obj.metric_antisym))
S2 = TensorType([obj]*2, sym2)
obj.metric = S2(metric_name)
obj.metric._matrix_behavior = True
obj._dim = dim
obj._delta = obj.get_kronecker_delta()
obj._eps_dim = eps_dim if eps_dim else dim
obj._epsilon = obj.get_epsilon()
obj._autogenerated = []
return obj
@property
def auto_right(self):
if not hasattr(self, '_auto_right'):
self._auto_right = TensorIndex("auto_right", self)
return self._auto_right
@property
def auto_left(self):
if not hasattr(self, '_auto_left'):
self._auto_left = TensorIndex("auto_left", self)
return self._auto_left
@property
def auto_index(self):
if not hasattr(self, '_auto_index'):
self._auto_index = TensorIndex("auto_index", self)
return self._auto_index
@property
def data(self):
return _tensor_data_substitution_dict[self]
@data.setter
def data(self, data):
# This assignment is a bit controversial, should metric components be assigned
# to the metric only or also to the TensorIndexType object? The advantage here
# is the ability to assign a 1D array and transform it to a 2D diagonal array.
numpy = import_module('numpy')
data = _TensorDataLazyEvaluator.parse_data(data)
if data.ndim > 2:
raise ValueError("data have to be of rank 1 (diagonal metric) or 2.")
if data.ndim == 1:
if self.dim is not None:
nda_dim = data.shape[0]
if nda_dim != self.dim:
raise ValueError("Dimension mismatch")
dim = data.shape[0]
newndarray = numpy.zeros((dim, dim), dtype=object)
for i, val in enumerate(data):
newndarray[i, i] = val
data = newndarray
dim1, dim2 = data.shape
if dim1 != dim2:
raise ValueError("Non-square matrix tensor.")
if self.dim is not None:
if self.dim != dim1:
raise ValueError("Dimension mismatch")
_tensor_data_substitution_dict[self] = data
_tensor_data_substitution_dict.add_metric_data(self.metric, data)
delta = self.get_kronecker_delta()
i1 = TensorIndex('i1', self)
i2 = TensorIndex('i2', self)
delta(i1, -i2).data = _TensorDataLazyEvaluator.parse_data(eye(dim1))
@data.deleter
def data(self):
if self in _tensor_data_substitution_dict:
del _tensor_data_substitution_dict[self]
if self.metric in _tensor_data_substitution_dict:
del _tensor_data_substitution_dict[self.metric]
@property
def name(self):
return self._name
@property
def dim(self):
return self._dim
@property
def delta(self):
return self._delta
@property
def eps_dim(self):
return self._eps_dim
@property
def epsilon(self):
return self._epsilon
@property
def dummy_fmt(self):
return self._dummy_fmt
def get_kronecker_delta(self):
sym2 = TensorSymmetry(get_symmetric_group_sgs(2))
S2 = TensorType([self]*2, sym2)
delta = S2('KD')
delta._matrix_behavior = True
return delta
def get_epsilon(self):
if not isinstance(self._eps_dim, int):
return None
sym = TensorSymmetry(get_symmetric_group_sgs(self._eps_dim, 1))
Sdim = TensorType([self]*self._eps_dim, sym)
epsilon = Sdim('Eps')
return epsilon
def __lt__(self, other):
return self.name < other.name
def __str__(self):
return self.name
__repr__ = __str__
def _components_data_full_destroy(self):
"""
EXPERIMENTAL: do not rely on this API method.
This destroys components data associated to the ``TensorIndexType``, if
any, specifically:
* metric tensor data
* Kronecker tensor data
"""
if self in _tensor_data_substitution_dict:
del _tensor_data_substitution_dict[self]
def delete_tensmul_data(key):
if key in _tensor_data_substitution_dict._substitutions_dict_tensmul:
del _tensor_data_substitution_dict._substitutions_dict_tensmul[key]
# delete metric data:
delete_tensmul_data((self.metric, True, True))
delete_tensmul_data((self.metric, True, False))
delete_tensmul_data((self.metric, False, True))
delete_tensmul_data((self.metric, False, False))
# delete delta tensor data:
delta = self.get_kronecker_delta()
if delta in _tensor_data_substitution_dict:
del _tensor_data_substitution_dict[delta]
@doctest_depends_on(modules=('numpy',))
class TensorIndex(Basic):
"""
Represents an abstract tensor index.
Parameters
==========
name : name of the index, or ``True`` if you want it to be automatically assigned
tensortype : ``TensorIndexType`` of the index
is_up : flag for contravariant index
Attributes
==========
``name``
``tensortype``
``is_up``
Notes
=====
Tensor indices are contracted with the Einstein summation convention.
An index can be in contravariant or in covariant form; in the latter
case it is represented prepending a ``-`` to the index name.
Dummy indices have a name with head given by ``tensortype._dummy_fmt``
Examples
========
>>> from sympy.tensor.tensor import TensorIndexType, TensorIndex, TensorSymmetry, TensorType, get_symmetric_group_sgs
>>> Lorentz = TensorIndexType('Lorentz', dummy_fmt='L')
>>> i = TensorIndex('i', Lorentz); i
i
>>> sym1 = TensorSymmetry(*get_symmetric_group_sgs(1))
>>> S1 = TensorType([Lorentz], sym1)
>>> A, B = S1('A,B')
>>> A(i)*B(-i)
A(L_0)*B(-L_0)
If you want the index name to be automatically assigned, just put ``True``
in the ``name`` field, it will be generated using the reserved character
``_`` in front of its name, in order to avoid conflicts with possible
existing indices:
>>> i0 = TensorIndex(True, Lorentz)
>>> i0
_i0
>>> i1 = TensorIndex(True, Lorentz)
>>> i1
_i1
>>> A(i0)*B(-i1)
A(_i0)*B(-_i1)
>>> A(i0)*B(-i0)
A(L_0)*B(-L_0)
"""
def __new__(cls, name, tensortype, is_up=True):
if isinstance(name, string_types):
name_symbol = Symbol(name)
elif isinstance(name, Symbol):
name_symbol = name
elif name is True:
name = "_i{0}".format(len(tensortype._autogenerated))
name_symbol = Symbol(name)
tensortype._autogenerated.append(name_symbol)
else:
raise ValueError("invalid name")
obj = Basic.__new__(cls, name_symbol, tensortype, S.One if is_up else S.Zero)
obj._name = str(name)
obj._tensortype = tensortype
obj._is_up = is_up
return obj
@property
def name(self):
return self._name
@property
def tensortype(self):
return self._tensortype
@property
def is_up(self):
return self._is_up
def _print(self):
s = self._name
if not self._is_up:
s = '-%s' % s
return s
def __lt__(self, other):
return (self._tensortype, self._name) < (other._tensortype, other._name)
def __neg__(self):
t1 = TensorIndex(self._name, self._tensortype,
(not self._is_up))
return t1
def tensor_indices(s, typ):
"""
Returns list of tensor indices given their names and their types
Parameters
==========
s : string of comma separated names of indices
typ : list of ``TensorIndexType`` of the indices
Examples
========
>>> from sympy.tensor.tensor import TensorIndexType, tensor_indices
>>> Lorentz = TensorIndexType('Lorentz', dummy_fmt='L')
>>> a, b, c, d = tensor_indices('a,b,c,d', Lorentz)
"""
if isinstance(s, str):
a = [x.name for x in symbols(s, seq=True)]
else:
raise ValueError('expecting a string')
tilist = [TensorIndex(i, typ) for i in a]
if len(tilist) == 1:
return tilist[0]
return tilist
@doctest_depends_on(modules=('numpy',))
class TensorSymmetry(Basic):
"""
Monoterm symmetry of a tensor
Parameters
==========
bsgs : tuple ``(base, sgs)`` BSGS of the symmetry of the tensor
Attributes
==========
``base`` : base of the BSGS
``generators`` : generators of the BSGS
``rank`` : rank of the tensor
Notes
=====
A tensor can have an arbitrary monoterm symmetry provided by its BSGS.
Multiterm symmetries, like the cyclic symmetry of the Riemann tensor,
are not covered.
See Also
========
sympy.combinatorics.tensor_can.get_symmetric_group_sgs
Examples
========
Define a symmetric tensor
>>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, TensorSymmetry, TensorType, get_symmetric_group_sgs
>>> Lorentz = TensorIndexType('Lorentz', dummy_fmt='L')
>>> sym2 = TensorSymmetry(get_symmetric_group_sgs(2))
>>> S2 = TensorType([Lorentz]*2, sym2)
>>> V = S2('V')
"""
def __new__(cls, *args, **kw_args):
if len(args) == 1:
base, generators = args[0]
elif len(args) == 2:
base, generators = args
else:
raise TypeError("bsgs required, either two separate parameters or one tuple")
if not isinstance(base, Tuple):
base = Tuple(*base)
if not isinstance(generators, Tuple):
generators = Tuple(*generators)
obj = Basic.__new__(cls, base, generators, **kw_args)
return obj
@property
def base(self):
return self.args[0]
@property
def generators(self):
return self.args[1]
@property
def rank(self):
return self.args[1][0].size - 2
def tensorsymmetry(*args):
"""
Return a ``TensorSymmetry`` object.
One can represent a tensor with any monoterm slot symmetry group
using a BSGS.
``args`` can be a BSGS
``args[0]`` base
``args[1]`` sgs
Usually tensors are in (direct products of) representations
of the symmetric group;
``args`` can be a list of lists representing the shapes of Young tableaux
Notes
=====
For instance:
``[[1]]`` vector
``[[1]*n]`` symmetric tensor of rank ``n``
``[[n]]`` antisymmetric tensor of rank ``n``
``[[2, 2]]`` monoterm slot symmetry of the Riemann tensor
``[[1],[1]]`` vector*vector
``[[2],[1],[1]`` (antisymmetric tensor)*vector*vector
Notice that with the shape ``[2, 2]`` we associate only the monoterm
symmetries of the Riemann tensor; this is an abuse of notation,
since the shape ``[2, 2]`` corresponds usually to the irreducible
representation characterized by the monoterm symmetries and by the
cyclic symmetry.
Examples
========
Symmetric tensor using a Young tableau
>>> from sympy.tensor.tensor import TensorIndexType, TensorType, tensorsymmetry
>>> Lorentz = TensorIndexType('Lorentz', dummy_fmt='L')
>>> sym2 = tensorsymmetry([1, 1])
>>> S2 = TensorType([Lorentz]*2, sym2)
>>> V = S2('V')
Symmetric tensor using a ``BSGS`` (base, strong generator set)
>>> from sympy.tensor.tensor import TensorSymmetry, get_symmetric_group_sgs
>>> sym2 = tensorsymmetry(*get_symmetric_group_sgs(2))
>>> S2 = TensorType([Lorentz]*2, sym2)
>>> V = S2('V')
"""
from sympy.combinatorics import Permutation
def tableau2bsgs(a):
if len(a) == 1:
# antisymmetric vector
n = a[0]
bsgs = get_symmetric_group_sgs(n, 1)
else:
if all(x == 1 for x in a):
# symmetric vector
n = len(a)
bsgs = get_symmetric_group_sgs(n)
elif a == [2, 2]:
bsgs = riemann_bsgs
else:
raise NotImplementedError
return bsgs
if not args:
return TensorSymmetry(Tuple(), Tuple(Permutation(1)))
if len(args) == 2 and isinstance(args[1][0], Permutation):
return TensorSymmetry(args)
base, sgs = tableau2bsgs(args[0])
for a in args[1:]:
basex, sgsx = tableau2bsgs(a)
base, sgs = bsgs_direct_product(base, sgs, basex, sgsx)
return TensorSymmetry(Tuple(base, sgs))
@doctest_depends_on(modules=('numpy',))
class TensorType(Basic):
"""
Class of tensor types.
Parameters
==========
index_types : list of ``TensorIndexType`` of the tensor indices
symmetry : ``TensorSymmetry`` of the tensor
Attributes
==========
``index_types``
``symmetry``
``types`` : list of ``TensorIndexType`` without repetitions
Examples
========
Define a symmetric tensor
>>> from sympy.tensor.tensor import TensorIndexType, tensorsymmetry, TensorType
>>> Lorentz = TensorIndexType('Lorentz', dummy_fmt='L')
>>> sym2 = tensorsymmetry([1, 1])
>>> S2 = TensorType([Lorentz]*2, sym2)
>>> V = S2('V')
"""
is_commutative = False
def __new__(cls, index_types, symmetry, **kw_args):
assert symmetry.rank == len(index_types)
obj = Basic.__new__(cls, Tuple(*index_types), symmetry, **kw_args)
return obj
@property
def index_types(self):
return self.args[0]
@property
def symmetry(self):
return self.args[1]
@property
def types(self):
return sorted(set(self.index_types), key=lambda x: x.name)
def __str__(self):
return 'TensorType(%s)' % ([str(x) for x in self.index_types])
def __call__(self, s, comm=0, matrix_behavior=0):
"""
Return a TensorHead object or a list of TensorHead objects.
``s`` name or string of names
``comm``: commutation group number
see ``_TensorManager.set_comm``
Examples
========
Define symmetric tensors ``V``, ``W`` and ``G``, respectively
commuting, anticommuting and with no commutation symmetry
>>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, tensorsymmetry, TensorType, canon_bp
>>> Lorentz = TensorIndexType('Lorentz', dummy_fmt='L')
>>> a, b = tensor_indices('a,b', Lorentz)
>>> sym2 = tensorsymmetry([1]*2)
>>> S2 = TensorType([Lorentz]*2, sym2)
>>> V = S2('V')
>>> W = S2('W', 1)
>>> G = S2('G', 2)
>>> canon_bp(V(a, b)*V(-b, -a))
V(L_0, L_1)*V(-L_0, -L_1)
>>> canon_bp(W(a, b)*W(-b, -a))
0
"""
if isinstance(s, str):
names = [x.name for x in symbols(s, seq=True)]
else:
raise ValueError('expecting a string')
if len(names) == 1:
return TensorHead(names[0], self, comm, matrix_behavior=matrix_behavior)
else:
return [TensorHead(name, self, comm, matrix_behavior=matrix_behavior) for name in names]
def tensorhead(name, typ, sym, comm=0, matrix_behavior=0):
"""
Function generating tensorhead(s).
Parameters
==========
name : name or sequence of names (as in ``symbol``)
typ : index types
sym : same as ``*args`` in ``tensorsymmetry``
comm : commutation group number
see ``_TensorManager.set_comm``
Examples
========
>>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, tensorhead
>>> Lorentz = TensorIndexType('Lorentz', dummy_fmt='L')
>>> a, b = tensor_indices('a,b', Lorentz)
>>> A = tensorhead('A', [Lorentz]*2, [[1]*2])
>>> A(a, -b)
A(a, -b)
"""
sym = tensorsymmetry(*sym)
S = TensorType(typ, sym)
th = S(name, comm, matrix_behavior=matrix_behavior)
return th
@doctest_depends_on(modules=('numpy',))
class TensorHead(Basic):
r"""
Tensor head of the tensor
Parameters
==========
name : name of the tensor
typ : list of TensorIndexType
comm : commutation group number
Attributes
==========
``name``
``index_types``
``rank``
``types`` : equal to ``typ.types``
``symmetry`` : equal to ``typ.symmetry``
``comm`` : commutation group
Notes
=====
A ``TensorHead`` belongs to a commutation group, defined by a
symbol on number ``comm`` (see ``_TensorManager.set_comm``);
tensors in a commutation group have the same commutation properties;
by default ``comm`` is ``0``, the group of the commuting tensors.
Examples
========
>>> from sympy.tensor.tensor import TensorIndexType, tensorhead, TensorType
>>> Lorentz = TensorIndexType('Lorentz', dummy_fmt='L')
>>> A = tensorhead('A', [Lorentz, Lorentz], [[1],[1]])
Examples with ndarray values, the components data assigned to the
``TensorHead`` object are assumed to be in a fully-contravariant
representation. In case it is necessary to assign components data which
represents the values of a non-fully covariant tensor, see the other
examples.
>>> from sympy.tensor.tensor import tensor_indices, tensorhead
>>> Lorentz.data = [1, -1, -1, -1]
>>> i0, i1 = tensor_indices('i0:2', Lorentz)
>>> A.data = [[j+2*i for j in range(4)] for i in range(4)]
in order to retrieve data, it is also necessary to specify abstract indices
enclosed by round brackets, then numerical indices inside square brackets.
>>> A(i0, i1)[0, 0]
0
>>> A(i0, i1)[2, 3] == 3+2*2
True
Notice that square brackets create a valued tensor expression instance:
>>> A(i0, i1)
A(i0, i1)
To view the data, just type:
>>> A.data
[[0 1 2 3]
[2 3 4 5]
[4 5 6 7]
[6 7 8 9]]
Turning to a tensor expression, covariant indices get the corresponding
components data corrected by the metric:
>>> A(i0, -i1).data
[[0 -1 -2 -3]
[2 -3 -4 -5]
[4 -5 -6 -7]
[6 -7 -8 -9]]
>>> A(-i0, -i1).data
[[0 -1 -2 -3]
[-2 3 4 5]
[-4 5 6 7]
[-6 7 8 9]]
while if all indices are contravariant, the ``ndarray`` remains the same
>>> A(i0, i1).data
[[0 1 2 3]
[2 3 4 5]
[4 5 6 7]
[6 7 8 9]]
When all indices are contracted and components data are added to the tensor,
accessing the data will return a scalar, no numpy object. In fact, numpy
ndarrays are dropped to scalars if they contain only one element.
>>> A(i0, -i0)
A(L_0, -L_0)
>>> A(i0, -i0).data
-18
It is also possible to assign components data to an indexed tensor, i.e. a
tensor with specified covariant and contravariant components. In this
example, the covariant components data of the Electromagnetic tensor are
injected into `A`:
>>> from sympy import symbols
>>> Ex, Ey, Ez, Bx, By, Bz = symbols('E_x E_y E_z B_x B_y B_z')
>>> c = symbols('c', positive=True)
Let's define `F`, an antisymmetric tensor, we have to assign an
antisymmetric matrix to it, because `[[2]]` stands for the Young tableau
representation of an antisymmetric set of two elements:
>>> F = tensorhead('A', [Lorentz, Lorentz], [[2]])
>>> F(-i0, -i1).data = [
... [0, Ex/c, Ey/c, Ez/c],
... [-Ex/c, 0, -Bz, By],
... [-Ey/c, Bz, 0, -Bx],
... [-Ez/c, -By, Bx, 0]]
Now it is possible to retrieve the contravariant form of the Electromagnetic
tensor:
>>> F(i0, i1).data
[[0 -E_x/c -E_y/c -E_z/c]
[E_x/c 0 -B_z B_y]
[E_y/c B_z 0 -B_x]
[E_z/c -B_y B_x 0]]
and the mixed contravariant-covariant form:
>>> F(i0, -i1).data
[[0 E_x/c E_y/c E_z/c]
[E_x/c 0 B_z -B_y]
[E_y/c -B_z 0 B_x]
[E_z/c B_y -B_x 0]]
To convert the numpy's ndarray to a sympy matrix, just cast:
>>> from sympy import Matrix
>>> Matrix(F.data)
Matrix([
[ 0, -E_x/c, -E_y/c, -E_z/c],
[E_x/c, 0, -B_z, B_y],
[E_y/c, B_z, 0, -B_x],
[E_z/c, -B_y, B_x, 0]])
Still notice, in this last example, that accessing components data from a
tensor without specifying the indices is equivalent to assume that all
indices are contravariant.
It is also possible to store symbolic components data inside a tensor, for
example, define a four-momentum-like tensor:
>>> from sympy import symbols
>>> P = tensorhead('P', [Lorentz], [[1]])
>>> E, px, py, pz = symbols('E p_x p_y p_z', positive=True)
>>> P.data = [E, px, py, pz]
The contravariant and covariant components are, respectively:
>>> P(i0).data
[E p_x p_y p_z]
>>> P(-i0).data
[E -p_x -p_y -p_z]
The contraction of a 1-index tensor by itself is usually indicated by a
power by two:
>>> P(i0)**2
E**2 - p_x**2 - p_y**2 - p_z**2
As the power by two is clearly identical to `P_\mu P^\mu`, it is possible to
simply contract the ``TensorHead`` object, without specifying the indices
>>> P**2
E**2 - p_x**2 - p_y**2 - p_z**2
"""
is_commutative = False
def __new__(cls, name, typ, comm=0, matrix_behavior=0, **kw_args):
if isinstance(name, string_types):
name_symbol = Symbol(name)
elif isinstance(name, Symbol):
name_symbol = name
else:
raise ValueError("invalid name")
comm2i = TensorManager.comm_symbols2i(comm)
obj = Basic.__new__(cls, name_symbol, typ, **kw_args)
obj._matrix_behavior = matrix_behavior
obj._name = obj.args[0].name
obj._rank = len(obj.index_types)
obj._types = typ.types
obj._symmetry = typ.symmetry
obj._comm = comm2i
return obj
@property
def name(self):
return self._name
@property
def rank(self):
return self._rank
@property
def types(self):
return self._types[:]
@property
def symmetry(self):
return self._symmetry
@property
def typ(self):
return self.args[1]
@property
def comm(self):
return self._comm
@property
def index_types(self):
return self.args[1].index_types[:]
def __lt__(self, other):
return (self.name, self.index_types) < (other.name, other.index_types)
def commutes_with(self, other):
"""
Returns ``0`` if ``self`` and ``other`` commute, ``1`` if they anticommute.
Returns ``None`` if ``self`` and ``other`` neither commute nor anticommute.
"""
r = TensorManager.get_comm(self._comm, other._comm)
return r
def _print(self):
return '%s(%s)' %(self.name, ','.join([str(x) for x in self.index_types]))
def _check_auto_matrix_indices_in_call(self, *indices):
matrix_behavior_kinds = dict()
if len(indices) != len(self.index_types):
if not self._matrix_behavior:
raise ValueError('wrong number of indices')
# Take the last one or two missing
# indices as auto-matrix indices:
ldiff = len(self.index_types) - len(indices)
if ldiff > 2:
raise ValueError('wrong number of indices')
if ldiff == 2:
mat_ind = [len(indices), len(indices) + 1]
elif ldiff == 1:
mat_ind = [len(indices)]
not_equal = True
else:
not_equal = False
mat_ind = [i for i, e in enumerate(indices) if e is True]
if mat_ind:
not_equal = True
indices = tuple([_ for _ in indices if _ is not True])
for i, el in enumerate(indices):
if not isinstance(el, TensorIndex):
not_equal = True
break
if el._tensortype != self.index_types[i]:
not_equal = True
break
if not_equal:
for el in mat_ind:
eltyp = self.index_types[el]
if eltyp in matrix_behavior_kinds:
elind = -self.index_types[el].auto_right
matrix_behavior_kinds[eltyp].append(elind)
else:
elind = self.index_types[el].auto_left
matrix_behavior_kinds[eltyp] = [elind]
indices = indices[:el] + (elind,) + indices[el:]
return indices, matrix_behavior_kinds
def __call__(self, *indices, **kw_args):
"""
Returns a tensor with indices.
There is a special behavior in case of indices denoted by ``True``,
they are considered auto-matrix indices, their slots are automatically
filled, and confer to the tensor the behavior of a matrix or vector
upon multiplication with another tensor containing auto-matrix indices
of the same ``TensorIndexType``. This means indices get summed over the
same way as in matrix multiplication. For matrix behavior, define two
auto-matrix indices, for vector behavior define just one.
Examples
========
>>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, tensorhead
>>> Lorentz = TensorIndexType('Lorentz', dummy_fmt='L')
>>> a, b = tensor_indices('a,b', Lorentz)
>>> A = tensorhead('A', [Lorentz]*2, [[1]*2])
>>> t = A(a, -b)
>>> t
A(a, -b)
To use the auto-matrix index behavior, just put a ``True`` on the
desired index position.
>>> r = A(True, True)
>>> r
A(auto_left, -auto_right)
Here ``auto_left`` and ``auto_right`` are automatically generated
tensor indices, they are only two for every ``TensorIndexType`` and
can be assigned to just one or two indices of a given type.
Auto-matrix indices can be assigned many times in a tensor, if indices
are of different ``TensorIndexType``
>>> Spinor = TensorIndexType('Spinor', dummy_fmt='S')
>>> B = tensorhead('B', [Lorentz, Lorentz, Spinor, Spinor], [[1]*4])
>>> s = B(True, True, True, True)
>>> s
B(auto_left, -auto_right, auto_left, -auto_right)
Here, ``auto_left`` and ``auto_right`` are repeated twice, but they are
not the same indices, as they refer to different ``TensorIndexType``s.
Auto-matrix indices are automatically contracted upon multiplication,
>>> r*s
A(auto_left, L_0)*B(-L_0, -auto_right, auto_left, -auto_right)
The multiplication algorithm has found an ``auto_right`` index in ``A``
and an ``auto_left`` index in ``B`` referring to the same
``TensorIndexType`` (``Lorentz``), so they have been contracted.
Auto-matrix indices can be accessed from the ``TensorIndexType``:
>>> Lorentz.auto_right
auto_right
>>> Lorentz.auto_left
auto_left
There is a special case, in which the ``True`` parameter is not needed
to declare an auto-matrix index, i.e. when the matrix behavior has been
declared upon ``TensorHead`` construction, in that case the last one or
two tensor indices may be omitted, so that they automatically become
auto-matrix indices:
>>> C = tensorhead('C', [Lorentz, Lorentz], [[1]*2], matrix_behavior=True)
>>> C()
C(auto_left, -auto_right)
"""
indices, matrix_behavior_kinds = self._check_auto_matrix_indices_in_call(*indices)
tensor = Tensor._new_with_dummy_replacement(self, indices, **kw_args)
return tensor
def __pow__(self, other):
if self.data is None:
raise ValueError("No power on abstract tensors.")
numpy = import_module('numpy')
metrics = [_.data for _ in self.args[1].args[0]]
marray = self.data
for metric in metrics:
marray = numpy.tensordot(marray, numpy.tensordot(metric, marray, (1, 0)), (0, 0))
pow2 = marray[()]
return pow2 ** (Rational(1, 2) * other)
@property
def data(self):
return _tensor_data_substitution_dict[self]
@data.setter
def data(self, data):
_tensor_data_substitution_dict[self] = data
@data.deleter
def data(self):
if self in _tensor_data_substitution_dict:
del _tensor_data_substitution_dict[self]
def __iter__(self):
return self.data.flatten().__iter__()
def _components_data_full_destroy(self):
"""
EXPERIMENTAL: do not rely on this API method.
Destroy components data associated to the ``TensorHead`` object, this
checks for attached components data, and destroys components data too.
"""
# do not garbage collect Kronecker tensor (it should be done by
# ``TensorIndexType`` garbage collection)
if self.name == "KD":
return
# the data attached to a tensor must be deleted only by the TensorHead
# destructor. If the TensorHead is deleted, it means that there are no
# more instances of that tensor anywhere.
if self in _tensor_data_substitution_dict:
del _tensor_data_substitution_dict[self]
@doctest_depends_on(modules=('numpy',))
class TensExpr(Basic):
"""
Abstract base class for tensor expressions
Notes
=====
A tensor expression is an expression formed by tensors;
currently the sums of tensors are distributed.
A ``TensExpr`` can be a ``TensAdd`` or a ``TensMul``.
``TensAdd`` objects are put in canonical form using the Butler-Portugal
algorithm for canonicalization under monoterm symmetries.
``TensMul`` objects are formed by products of component tensors,
and include a coefficient, which is a SymPy expression.
In the internal representation contracted indices are represented
by ``(ipos1, ipos2, icomp1, icomp2)``, where ``icomp1`` is the position
of the component tensor with contravariant index, ``ipos1`` is the
slot which the index occupies in that component tensor.
Contracted indices are therefore nameless in the internal representation.
"""
_op_priority = 11.0
is_commutative = False
def __neg__(self):
return self*S.NegativeOne
def __abs__(self):
raise NotImplementedError
def __add__(self, other):
raise NotImplementedError
def __radd__(self, other):
raise NotImplementedError
def __sub__(self, other):
raise NotImplementedError
def __rsub__(self, other):
raise NotImplementedError
def __mul__(self, other):
raise NotImplementedError
def __rmul__(self, other):
raise NotImplementedError
def __pow__(self, other):
if self.data is None:
raise ValueError("No power without ndarray data.")
numpy = import_module('numpy')
free = self.free
marray = self.data
for metric in free:
marray = numpy.tensordot(
marray,
numpy.tensordot(
metric[0]._tensortype.data,
marray,
(1, 0)
),
(0, 0)
)
pow2 = marray[()]
return pow2 ** (Rational(1, 2) * other)
def __rpow__(self, other):
raise NotImplementedError
def __div__(self, other):
raise NotImplementedError
def __rdiv__(self, other):
raise NotImplementedError()
__truediv__ = __div__
__rtruediv__ = __rdiv__
@doctest_depends_on(modules=('numpy',))
def get_matrix(self):
"""
Returns ndarray components data as a matrix, if components data are
available and ndarray dimension does not exceed 2.
Examples
========
>>> from sympy.tensor.tensor import TensorIndexType, tensorsymmetry, TensorType
>>> from sympy import ones
>>> Lorentz = TensorIndexType('Lorentz', dummy_fmt='L')
>>> sym2 = tensorsymmetry([1]*2)
>>> S2 = TensorType([Lorentz]*2, sym2)
>>> A = S2('A')
The tensor ``A`` is symmetric in its indices, as can be deduced by the
``[1, 1]`` Young tableau when constructing `sym2`. One has to be
careful to assign symmetric component data to ``A``, as the symmetry
properties of data are currently not checked to be compatible with the
defined tensor symmetry.
>>> from sympy.tensor.tensor import tensor_indices, tensorhead
>>> Lorentz.data = [1, -1, -1, -1]
>>> i0, i1 = tensor_indices('i0:2', Lorentz)
>>> A.data = [[j+i for j in range(4)] for i in range(4)]
>>> A(i0, i1).get_matrix()
Matrix([
[0, 1, 2, 3],
[1, 2, 3, 4],
[2, 3, 4, 5],
[3, 4, 5, 6]])
It is possible to perform usual operation on matrices, such as the
matrix multiplication:
>>> A(i0, i1).get_matrix()*ones(4, 1)
Matrix([
[ 6],
[10],
[14],
[18]])
"""
if 0 < self.rank <= 2:
rows = self.data.shape[0]
columns = self.data.shape[1] if self.rank == 2 else 1
if self.rank == 2:
mat_list = [] * rows
for i in range(rows):
mat_list.append([])
for j in range(columns):
mat_list[i].append(self[i, j])
else:
mat_list = [None] * rows
for i in range(rows):
mat_list[i] = self[i]
return Matrix(mat_list)
else:
raise NotImplementedError(
"missing multidimensional reduction to matrix.")
def _eval_simplify(self, ratio, measure):
# this is a way to simplify a tensor expression.
# This part walks for all `TensorHead`s appearing in the tensor expr
# and looks for `simplify_this_type`, to specifically act on a subexpr
# containing one type of `TensorHead` instance only:
expr = self
for i in list(set(self.components)):
if hasattr(i, 'simplify_this_type'):
expr = i.simplify_this_type(expr)
# TODO: missing feature, perform metric contraction.
return expr
@doctest_depends_on(modules=('numpy',))
class TensAdd(TensExpr):
"""
Sum of tensors
Parameters
==========
free_args : list of the free indices
Attributes
==========
``args`` : tuple of addends
``rank`` : rank of the tensor
``free_args`` : list of the free indices in sorted order
Notes
=====
Sum of more than one tensor are put automatically in canonical form.
Examples
========
>>> from sympy.tensor.tensor import TensorIndexType, tensorhead, tensor_indices
>>> Lorentz = TensorIndexType('Lorentz', dummy_fmt='L')
>>> a, b = tensor_indices('a,b', Lorentz)
>>> p, q = tensorhead('p,q', [Lorentz], [[1]])
>>> t = p(a) + q(a); t
p(a) + q(a)
>>> t(b)
p(b) + q(b)
Examples with components data added to the tensor expression:
>>> from sympy import eye
>>> Lorentz.data = [1, -1, -1, -1]
>>> a, b = tensor_indices('a, b', Lorentz)
>>> p.data = [2, 3, -2, 7]
>>> q.data = [2, 3, -2, 7]
>>> t = p(a) + q(a); t
p(a) + q(a)
>>> t(b)
p(b) + q(b)
The following are: 2**2 - 3**2 - 2**2 - 7**2 ==> -58
>>> (p(a)*p(-a)).data
-58
>>> p(a)**2
-58
"""
def __new__(cls, *args, **kw_args):
args = [sympify(x) for x in args if x]
args = TensAdd._tensAdd_flatten(args)
if not args:
return S.Zero
if len(args) == 1 and not isinstance(args[0], TensExpr):
return args[0]
# replace auto-matrix indices so that they are the same in all addends
args = TensAdd._tensAdd_check_automatrix(args)
# now check that all addends have the same indices:
TensAdd._tensAdd_check(args)
# if TensAdd has only 1 TensMul element in its `args`:
if len(args) == 1 and isinstance(args[0], TensMul):
obj = Basic.__new__(cls, *args, **kw_args)
return obj
# TODO: do not or do canonicalize by default?
# Technically, one may wish to have additions of non-canonicalized
# tensors. This feature should be removed in the future.
# Unfortunately this would require to rewrite a lot of tests.
# canonicalize all TensMul
args = [canon_bp(x) for x in args if x]
args = [x for x in args if x]
# if there are no more args (i.e. have cancelled out),
# just return zero:
if not args:
return S.Zero
if len(args) == 1:
return args[0]
# collect canonicalized terms
def sort_key(t):
x = get_tids(t)
return (x.components, x.free, x.dum)
args.sort(key=sort_key)
args = TensAdd._tensAdd_collect_terms(args)
if not args:
return S.Zero
# it there is only a component tensor return it
if len(args) == 1:
return args[0]
obj = Basic.__new__(cls, *args, **kw_args)
return obj
@staticmethod
def _tensAdd_flatten(args):
# flatten TensAdd, coerce terms which are not tensors to tensors
if not all(isinstance(x, TensExpr) for x in args):
args1 = []
for x in args:
if isinstance(x, TensExpr):
if isinstance(x, TensAdd):
args1.extend(list(x.args))
else:
args1.append(x)
args1 = [x for x in args1 if isinstance(x, TensExpr) and x.coeff]
args2 = [x for x in args if not isinstance(x, TensExpr)]
t1 = TensMul.from_data(Add(*args2), [], [], [])
args = [t1] + args1
a = []
for x in args:
if isinstance(x, TensAdd):
a.extend(list(x.args))
else:
a.append(x)
args = [x for x in a if x.coeff]
return args
@staticmethod
def _tensAdd_check_automatrix(args):
# check that all automatrix indices are the same.
# if there are no addends, just return.
if not args:
return args
# @type auto_left_types: set
auto_left_types = set([])
auto_right_types = set([])
args_auto_left_types = []
args_auto_right_types = []
for i, arg in enumerate(args):
arg_auto_left_types = set([])
arg_auto_right_types = set([])
for index in get_indices(arg):
# @type index: TensorIndex
if index in (index._tensortype.auto_left, -index._tensortype.auto_left):
auto_left_types.add(index._tensortype)
arg_auto_left_types.add(index._tensortype)
if index in (index._tensortype.auto_right, -index._tensortype.auto_right):
auto_right_types.add(index._tensortype)
arg_auto_right_types.add(index._tensortype)
args_auto_left_types.append(arg_auto_left_types)
args_auto_right_types.append(arg_auto_right_types)
for arg, aas_left, aas_right in zip(args, args_auto_left_types, args_auto_right_types):
missing_left = auto_left_types - aas_left
missing_right = auto_right_types - aas_right
missing_intersection = missing_left & missing_right
for j in missing_intersection:
args[i] *= j.delta(j.auto_left, -j.auto_right)
if missing_left != missing_right:
raise ValueError("cannot determine how to add auto-matrix indices on some args")
return args
@staticmethod
def _tensAdd_check(args):
# check that all addends have the same free indices
indices0 = set([x[0] for x in get_tids(args[0]).free])
list_indices = [set([y[0] for y in get_tids(x).free]) for x in args[1:]]
if not all(x == indices0 for x in list_indices):
raise ValueError('all tensors must have the same indices')
@staticmethod
def _tensAdd_collect_terms(args):
# collect TensMul terms differing at most by their coefficient
a = []
prev = args[0]
prev_coeff = get_coeff(prev)
changed = False
for x in args[1:]:
# if x and prev have the same tensor, update the coeff of prev
x_tids = get_tids(x)
prev_tids = get_tids(prev)
if x_tids.components == prev_tids.components \
and x_tids.free == prev_tids.free and x_tids.dum == prev_tids.dum:
prev_coeff = prev_coeff + get_coeff(x)
changed = True
op = 0
else:
# x and prev are different; if not changed, prev has not
# been updated; store it
if not changed:
a.append(prev)
else:
# get a tensor from prev with coeff=prev_coeff and store it
if prev_coeff:
t = TensMul.from_data(prev_coeff, prev_tids.components,
prev_tids.free, prev_tids.dum)
a.append(t)
# move x to prev
op = 1
pprev, prev = prev, x
pprev_coeff, prev_coeff = prev_coeff, get_coeff(x)
changed = False
# if the case op=0 prev was not stored; store it now
# in the case op=1 x was not stored; store it now (as prev)
if op == 0 and prev_coeff:
prev = TensMul.from_data(prev_coeff, prev_tids.components, prev_tids.free, prev_tids.dum)
a.append(prev)
elif op == 1:
a.append(prev)
return a
@property
def rank(self):
return self.args[0].rank
@property
def free_args(self):
return self.args[0].free_args
def __call__(self, *indices):
"""Returns tensor with ordered free indices replaced by ``indices``
Parameters
==========
indices
Examples
========
>>> from sympy import Symbol
>>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, tensorhead
>>> D = Symbol('D')
>>> Lorentz = TensorIndexType('Lorentz', dim=D, dummy_fmt='L')
>>> i0,i1,i2,i3,i4 = tensor_indices('i0:5', Lorentz)
>>> p, q = tensorhead('p,q', [Lorentz], [[1]])
>>> g = Lorentz.metric
>>> t = p(i0)*p(i1) + g(i0,i1)*q(i2)*q(-i2)
>>> t(i0,i2)
metric(i0, i2)*q(L_0)*q(-L_0) + p(i0)*p(i2)
>>> t(i0,i1) - t(i1,i0)
0
"""
free_args = self.free_args
indices = list(indices)
if [x._tensortype for x in indices] != [x._tensortype for x in free_args]:
raise ValueError('incompatible types')
if indices == free_args:
return self
index_tuples = list(zip(free_args, indices))
a = [x.func(*x.fun_eval(*index_tuples).args) for x in self.args]
res = TensAdd(*a)
return res
def canon_bp(self):
"""
canonicalize using the Butler-Portugal algorithm for canonicalization
under monoterm symmetries.
"""
args = [x.canon_bp() for x in self.args]
res = TensAdd(*args)
return res
def equals(self, other):
other = sympify(other)
if isinstance(other, TensMul) and other._coeff == 0:
return all(x._coeff == 0 for x in self.args)
if isinstance(other, TensExpr):
if self.rank != other.rank:
return False
if isinstance(other, TensAdd):
if set(self.args) != set(other.args):
return False
else:
return True
t = self - other
if not isinstance(t, TensExpr):
return t == 0
else:
if isinstance(t, TensMul):
return t._coeff == 0
else:
return all(x._coeff == 0 for x in t.args)
def __add__(self, other):
return TensAdd(self, other)
def __radd__(self, other):
return TensAdd(other, self)
def __sub__(self, other):
return TensAdd(self, -other)
def __rsub__(self, other):
return TensAdd(other, -self)
def __mul__(self, other):
return TensAdd(*(x*other for x in self.args))
def __rmul__(self, other):
return self*other
def __div__(self, other):
other = sympify(other)
if isinstance(other, TensExpr):
raise ValueError('cannot divide by a tensor')
return TensAdd(*(x/other for x in self.args))
def __rdiv__(self, other):
raise ValueError('cannot divide by a tensor')
def __getitem__(self, item):
return self.data[item]
__truediv__ = __div__
__truerdiv__ = __rdiv__
def contract_delta(self, delta):
args = [x.contract_delta(delta) for x in self.args]
t = TensAdd(*args)
return canon_bp(t)
def contract_metric(self, g):
"""
Raise or lower indices with the metric ``g``
Parameters
==========
g : metric
contract_all : if True, eliminate all ``g`` which are contracted
Notes
=====
see the ``TensorIndexType`` docstring for the contraction conventions
"""
args = [contract_metric(x, g) for x in self.args]
t = TensAdd(*args)
return canon_bp(t)
def fun_eval(self, *index_tuples):
"""
Return a tensor with free indices substituted according to ``index_tuples``
Parameters
==========
index_types : list of tuples ``(old_index, new_index)``
Examples
========
>>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, tensorhead
>>> Lorentz = TensorIndexType('Lorentz', dummy_fmt='L')
>>> i, j, k, l = tensor_indices('i,j,k,l', Lorentz)
>>> A, B = tensorhead('A,B', [Lorentz]*2, [[1]*2])
>>> t = A(i, k)*B(-k, -j) + A(i, -j)
>>> t.fun_eval((i, k),(-j, l))
A(k, L_0)*B(l, -L_0) + A(k, l)
"""
args = self.args
args1 = []
for x in args:
y = x.fun_eval(*index_tuples)
args1.append(y)
return TensAdd(*args1)
def substitute_indices(self, *index_tuples):
"""
Return a tensor with free indices substituted according to ``index_tuples``
Parameters
==========
index_types : list of tuples ``(old_index, new_index)``
Examples
========
>>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, tensorhead
>>> Lorentz = TensorIndexType('Lorentz', dummy_fmt='L')
>>> i, j, k, l = tensor_indices('i,j,k,l', Lorentz)
>>> A, B = tensorhead('A,B', [Lorentz]*2, [[1]*2])
>>> t = A(i, k)*B(-k, -j); t
A(i, L_0)*B(-L_0, -j)
>>> t.substitute_indices((i,j), (j, k))
A(j, L_0)*B(-L_0, -k)
"""
args = self.args
args1 = []
for x in args:
y = x.substitute_indices(*index_tuples)
args1.append(y)
return TensAdd(*args1)
def _print(self):
a = []
args = self.args
for x in args:
a.append(str(x))
a.sort()
s = ' + '.join(a)
s = s.replace('+ -', '- ')
return s
@staticmethod
def from_TIDS_list(coeff, tids_list):
"""
Given a list of coefficients and a list of ``TIDS`` objects, construct
a ``TensAdd`` instance, equivalent to the one that would result from
creating single instances of ``TensMul`` and then adding them.
Examples
========
>>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, tensorhead, TensAdd
>>> Lorentz = TensorIndexType('Lorentz', dummy_fmt='L')
>>> i, j = tensor_indices('i,j', Lorentz)
>>> A, B = tensorhead('A,B', [Lorentz]*2, [[1]*2])
>>> eA = 3*A(i, j)
>>> eB = 2*B(j, i)
>>> t1 = eA._tids
>>> t2 = eB._tids
>>> c1 = eA.coeff
>>> c2 = eB.coeff
>>> TensAdd.from_TIDS_list([c1, c2], [t1, t2])
2*B(i, j) + 3*A(i, j)
If the coefficient parameter is a scalar, then it will be applied
as a coefficient on all ``TIDS`` objects.
>>> TensAdd.from_TIDS_list(4, [t1, t2])
4*A(i, j) + 4*B(i, j)
"""
if not isinstance(coeff, (list, tuple, Tuple)):
coeff = [coeff] * len(tids_list)
tensmul_list = [TensMul.from_TIDS(c, t) for c, t in zip(coeff, tids_list)]
return TensAdd(*tensmul_list)
@property
def data(self):
return _tensor_data_substitution_dict[self]
@data.setter
def data(self, data):
# TODO: check data compatibility with properties of tensor.
_tensor_data_substitution_dict[self] = data
@data.deleter
def data(self):
if self in _tensor_data_substitution_dict:
del _tensor_data_substitution_dict[self]
def __iter__(self):
if not self.data:
raise ValueError("No iteration on abstract tensors")
return self.data.flatten().__iter__()
@doctest_depends_on(modules=('numpy',))
class Tensor(TensExpr):
"""
Base tensor class, i.e. this represents a tensor, the single unit to be
put into an expression.
This object is usually created from a ``TensorHead``, by attaching indices
to it. Indices preceded by a minus sign are considered contravariant,
otherwise covariant.
Examples
========
>>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, tensorhead
>>> Lorentz = TensorIndexType("Lorentz", dummy_fmt="L")
>>> mu, nu = tensor_indices('mu nu', Lorentz)
>>> A = tensorhead("A", [Lorentz, Lorentz], [[1], [1]])
>>> A(mu, -nu)
A(mu, -nu)
>>> A(mu, -mu)
A(L_0, -L_0)
"""
is_commutative = False
def __new__(cls, tensor_head, indices, **kw_args):
tids = TIDS.from_components_and_indices((tensor_head,), indices)
obj = Basic.__new__(cls, tensor_head, Tuple(*indices), **kw_args)
obj._tids = tids
obj._indices = indices
obj._is_canon_bp = kw_args.get('is_canon_bp', False)
return obj
@staticmethod
def _new_with_dummy_replacement(tensor_head, indices, **kw_args):
tids = TIDS.from_components_and_indices((tensor_head,), indices)
indices = tids.get_indices()
return Tensor(tensor_head, indices, **kw_args)
@property
def is_canon_bp(self):
return self._is_canon_bp
@property
def indices(self):
return self._indices
@property
def free(self):
return self._tids.free
@property
def dum(self):
return self._tids.dum
@property
def rank(self):
return len(self.free)
@property
def free_args(self):
return sorted([x[0] for x in self.free])
def perm2tensor(self, g, canon_bp=False):
"""
Returns the tensor corresponding to the permutation ``g``
For further details, see the method in ``TIDS`` with the same name.
"""
return perm2tensor(self, g, canon_bp)
def canon_bp(self):
if self._is_canon_bp:
return self
g, dummies, msym, v = self._tids.canon_args()
can = canonicalize(g, dummies, msym, *v)
if can == 0:
return S.Zero
tensor = self.perm2tensor(can, True)
return tensor
@property
def types(self):
return get_tids(self).components[0]._types
@property
def coeff(self):
return S.One
@property
def component(self):
return self.args[0]
@property
def components(self):
return [self.args[0]]
def split(self):
return [self]
def expand(self):
return self
def sorted_components(self):
return self
def get_indices(self):
"""
Get a list of indices, corresponding to those of the tensor.
"""
return self._tids.get_indices()
def as_base_exp(self):
return self, S.One
def substitute_indices(self, *index_tuples):
return substitute_indices(self, *index_tuples)
def __call__(self, *indices):
"""Returns tensor with ordered free indices replaced by ``indices``
Examples
========
>>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, tensorhead
>>> Lorentz = TensorIndexType('Lorentz', dummy_fmt='L')
>>> i0,i1,i2,i3,i4 = tensor_indices('i0:5', Lorentz)
>>> A = tensorhead('A', [Lorentz]*5, [[1]*5])
>>> t = A(i2, i1, -i2, -i3, i4)
>>> t
A(L_0, i1, -L_0, -i3, i4)
>>> t(i1, i2, i3)
A(L_0, i1, -L_0, i2, i3)
"""
free_args = self.free_args
indices = list(indices)
if [x._tensortype for x in indices] != [x._tensortype for x in free_args]:
raise ValueError('incompatible types')
if indices == free_args:
return self
t = self.fun_eval(*list(zip(free_args, indices)))
# object is rebuilt in order to make sure that all contracted indices
# get recognized as dummies, but only if there are contracted indices.
if len(set(i if i.is_up else -i for i in indices)) != len(indices):
return t.func(*t.args)
return t
def fun_eval(self, *index_tuples):
free = self.free
free1 = []
for j, ipos, cpos in free:
# search j in index_tuples
for i, v in index_tuples:
if i == j:
free1.append((v, ipos, cpos))
break
else:
free1.append((j, ipos, cpos))
return TensMul.from_data(self.coeff, self.components, free1, self.dum)
# TODO: put this into TensExpr?
def __iter__(self):
return self.data.flatten().__iter__()
# TODO: put this into TensExpr?
def __getitem__(self, item):
return self.data[item]
@property
def data(self):
return _tensor_data_substitution_dict[self]
@data.setter
def data(self, data):
# TODO: check data compatibility with properties of tensor.
_tensor_data_substitution_dict[self] = data
@data.deleter
def data(self):
if self in _tensor_data_substitution_dict:
del _tensor_data_substitution_dict[self]
if self.metric in _tensor_data_substitution_dict:
del _tensor_data_substitution_dict[self.metric]
def __mul__(self, other):
if isinstance(other, TensAdd):
return TensAdd(*[self*arg for arg in other.args])
tmul = TensMul(self, other)
return tmul
def __rmul__(self, other):
return TensMul(other, self)
def __div__(self, other):
if isinstance(other, TensExpr):
raise ValueError('cannot divide by a tensor')
return TensMul(self, S.One/other, is_canon_bp=self.is_canon_bp)
def __rdiv__(self, other):
raise ValueError('cannot divide by a tensor')
def __add__(self, other):
return TensAdd(self, other)
def __radd__(self, other):
return TensAdd(other, self)
def __sub__(self, other):
return TensAdd(self, -other)
def __rsub__(self, other):
return TensAdd(other, self)
__truediv__ = __div__
__rtruediv__ = __rdiv__
def __neg__(self):
return TensMul(S.NegativeOne, self)
def _print(self):
indices = [str(ind) for ind in self.indices]
component = self.component
if component.rank > 0:
return ('%s(%s)' % (component.name, ', '.join(indices)))
else:
return ('%s' % component.name)
def equals(self, other):
if other == 0:
return self.coeff == 0
other = sympify(other)
if not isinstance(other, TensExpr):
assert not self.components
return S.One == other
def _get_compar_comp(self):
t = self.canon_bp()
r = (t.coeff, tuple(t.components), \
tuple(sorted(t.free)), tuple(sorted(t.dum)))
return r
return _get_compar_comp(self) == _get_compar_comp(other)
def contract_metric(self, metric):
tids, sign = get_tids(self).contract_metric(metric)
return TensMul.from_TIDS(sign, tids)
def contract_delta(self, metric):
return self.contract_metric(metric)
@doctest_depends_on(modules=('numpy',))
class TensMul(TensExpr):
"""
Product of tensors
Parameters
==========
coeff : SymPy coefficient of the tensor
args
Attributes
==========
``components`` : list of ``TensorHead`` of the component tensors
``types`` : list of nonrepeated ``TensorIndexType``
``free`` : list of ``(ind, ipos, icomp)``, see Notes
``dum`` : list of ``(ipos1, ipos2, icomp1, icomp2)``, see Notes
``ext_rank`` : rank of the tensor counting the dummy indices
``rank`` : rank of the tensor
``coeff`` : SymPy coefficient of the tensor
``free_args`` : list of the free indices in sorted order
``is_canon_bp`` : ``True`` if the tensor in in canonical form
Notes
=====
``args[0]`` list of ``TensorHead`` of the component tensors.
``args[1]`` list of ``(ind, ipos, icomp)``
where ``ind`` is a free index, ``ipos`` is the slot position
of ``ind`` in the ``icomp``-th component tensor.
``args[2]`` list of tuples representing dummy indices.
``(ipos1, ipos2, icomp1, icomp2)`` indicates that the contravariant
dummy index is the ``ipos1``-th slot position in the ``icomp1``-th
component tensor; the corresponding covariant index is
in the ``ipos2`` slot position in the ``icomp2``-th component tensor.
"""
def __new__(cls, *args, **kw_args):
# make sure everything is sympified:
args = [sympify(arg) for arg in args]
# flatten:
args = TensMul._flatten(args)
is_canon_bp = kw_args.get('is_canon_bp', False)
if not any([isinstance(arg, TensExpr) for arg in args]):
tids = TIDS([], [], [])
else:
tids_list = [arg._tids for arg in args if isinstance(arg, (Tensor, TensMul))]
if len(tids_list) == 1:
for arg in args:
if not isinstance(arg, Tensor):
continue
is_canon_bp = kw_args.get('is_canon_bp', arg._is_canon_bp)
tids = reduce(lambda a, b: a*b, tids_list)
if any([isinstance(arg, TensAdd) for arg in args]):
add_args = TensAdd._tensAdd_flatten(args)
return TensAdd(*add_args)
coeff = reduce(lambda a, b: a*b, [S.One] + [arg for arg in args if not isinstance(arg, TensExpr)])
args = tids.get_tensors()
if coeff != 1:
args = [coeff] + args
if len(args) == 1:
return args[0]
obj = Basic.__new__(cls, *args)
obj._types = []
for t in tids.components:
obj._types.extend(t._types)
obj._tids = tids
obj._ext_rank = len(obj._tids.free) + 2*len(obj._tids.dum)
obj._coeff = coeff
obj._is_canon_bp = is_canon_bp
return obj
@staticmethod
def _flatten(args):
a = []
for arg in args:
if isinstance(arg, TensMul):
a.extend(arg.args)
else:
a.append(arg)
return a
@staticmethod
def from_data(coeff, components, free, dum, **kw_args):
tids = TIDS(components, free, dum)
return TensMul.from_TIDS(coeff, tids, **kw_args)
@staticmethod
def from_TIDS(coeff, tids, **kw_args):
return TensMul(coeff, *tids.get_tensors(), **kw_args)
@property
def free_args(self):
return sorted([x[0] for x in self.free])
@property
def components(self):
return self._tids.components[:]
@property
def free(self):
return self._tids.free[:]
@property
def coeff(self):
return self._coeff
@property
def dum(self):
return self._tids.dum[:]
@property
def rank(self):
return len(self.free)
@property
def types(self):
return self._types[:]
def equals(self, other):
if other == 0:
return self.coeff == 0
other = sympify(other)
if not isinstance(other, TensExpr):
assert not self.components
return self._coeff == other
def _get_compar_comp(self):
t = self.canon_bp()
r = (get_coeff(t), tuple(t.components), \
tuple(sorted(t.free)), tuple(sorted(t.dum)))
return r
return _get_compar_comp(self) == _get_compar_comp(other)
def get_indices(self):
"""
Returns the list of indices of the tensor
The indices are listed in the order in which they appear in the
component tensors.
The dummy indices are given a name which does not collide with
the names of the free indices.
Examples
========
>>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, tensorhead
>>> Lorentz = TensorIndexType('Lorentz', dummy_fmt='L')
>>> m0, m1, m2 = tensor_indices('m0,m1,m2', Lorentz)
>>> g = Lorentz.metric
>>> p, q = tensorhead('p,q', [Lorentz], [[1]])
>>> t = p(m1)*g(m0,m2)
>>> t.get_indices()
[m1, m0, m2]
"""
return self._tids.get_indices()
def split(self):
"""
Returns a list of tensors, whose product is ``self``
Dummy indices contracted among different tensor components
become free indices with the same name as the one used to
represent the dummy indices.
Examples
========
>>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, tensorhead
>>> Lorentz = TensorIndexType('Lorentz', dummy_fmt='L')
>>> a, b, c, d = tensor_indices('a,b,c,d', Lorentz)
>>> A, B = tensorhead('A,B', [Lorentz]*2, [[1]*2])
>>> t = A(a,b)*B(-b,c)
>>> t
A(a, L_0)*B(-L_0, c)
>>> t.split()
[A(a, L_0), B(-L_0, c)]
"""
if self.args == ():
return [self]
splitp = []
res = 1
for arg in self.args:
if isinstance(arg, Tensor):
splitp.append(res*arg)
res = 1
else:
res *= arg
return splitp
def __add__(self, other):
return TensAdd(self, other)
def __radd__(self, other):
return TensAdd(other, self)
def __sub__(self, other):
return TensAdd(self, -other)
def __rsub__(self, other):
return TensAdd(other, -self)
def __mul__(self, other):
"""
Multiply two tensors using Einstein summation convention.
If the two tensors have an index in common, one contravariant
and the other covariant, in their product the indices are summed
Examples
========
>>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, tensorhead
>>> Lorentz = TensorIndexType('Lorentz', dummy_fmt='L')
>>> m0, m1, m2 = tensor_indices('m0,m1,m2', Lorentz)
>>> g = Lorentz.metric
>>> p, q = tensorhead('p,q', [Lorentz], [[1]])
>>> t1 = p(m0)
>>> t2 = q(-m0)
>>> t1*t2
p(L_0)*q(-L_0)
"""
other = sympify(other)
if not isinstance(other, TensExpr):
coeff = self.coeff*other
tmul = TensMul.from_TIDS(coeff, self._tids, is_canon_bp=self._is_canon_bp)
return tmul
if isinstance(other, TensAdd):
return TensAdd(*[self*x for x in other.args])
new_tids = self._tids*other._tids
coeff = self.coeff*other.coeff
tmul = TensMul.from_TIDS(coeff, new_tids)
return tmul
def __rmul__(self, other):
other = sympify(other)
coeff = other*self._coeff
tmul = TensMul.from_TIDS(coeff, self._tids)
return tmul
def __div__(self, other):
other = sympify(other)
if isinstance(other, TensExpr):
raise ValueError('cannot divide by a tensor')
coeff = self._coeff/other
tmul = TensMul.from_TIDS(coeff, self._tids, is_canon_bp=self._is_canon_bp)
return tmul
def __rdiv__(self, other):
raise ValueError('cannot divide by a tensor')
def __getitem__(self, item):
return self.data[item]
__truediv__ = __div__
__truerdiv__ = __rdiv__
def sorted_components(self):
"""
Returns a tensor with sorted components
calling the corresponding method in a ``TIDS`` object.
"""
new_tids, sign = self._tids.sorted_components()
coeff = -self.coeff if sign == -1 else self.coeff
t = TensMul.from_TIDS(coeff, new_tids)
return t
def perm2tensor(self, g, canon_bp=False):
"""
Returns the tensor corresponding to the permutation ``g``
For further details, see the method in ``TIDS`` with the same name.
"""
return perm2tensor(self, g, canon_bp)
def canon_bp(self):
"""
Canonicalize using the Butler-Portugal algorithm for canonicalization
under monoterm symmetries.
Examples
========
>>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, tensorhead
>>> Lorentz = TensorIndexType('Lorentz', dummy_fmt='L')
>>> m0, m1, m2 = tensor_indices('m0,m1,m2', Lorentz)
>>> A = tensorhead('A', [Lorentz]*2, [[2]])
>>> t = A(m0,-m1)*A(m1,-m0)
>>> t.canon_bp()
-A(L_0, L_1)*A(-L_0, -L_1)
>>> t = A(m0,-m1)*A(m1,-m2)*A(m2,-m0)
>>> t.canon_bp()
0
"""
if self._is_canon_bp:
return self
if not self.components:
return self
t = self.sorted_components()
g, dummies, msym, v = t._tids.canon_args()
can = canonicalize(g, dummies, msym, *v)
if can == 0:
return S.Zero
tmul = t.perm2tensor(can, True)
return tmul
def contract_delta(self, delta):
t = self.contract_metric(delta)
return t
def contract_metric(self, g):
"""
Raise or lower indices with the metric ``g``
Parameters
==========
g : metric
Notes
=====
see the ``TensorIndexType`` docstring for the contraction conventions
Examples
========
>>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, tensorhead
>>> Lorentz = TensorIndexType('Lorentz', dummy_fmt='L')
>>> m0, m1, m2 = tensor_indices('m0,m1,m2', Lorentz)
>>> g = Lorentz.metric
>>> p, q = tensorhead('p,q', [Lorentz], [[1]])
>>> t = p(m0)*q(m1)*g(-m0, -m1)
>>> t.canon_bp()
metric(L_0, L_1)*p(-L_0)*q(-L_1)
>>> t.contract_metric(g).canon_bp()
p(L_0)*q(-L_0)
"""
tids, sign = get_tids(self).contract_metric(g)
res = TensMul.from_TIDS(sign*self.coeff, tids)
return res
def substitute_indices(self, *index_tuples):
return substitute_indices(self, *index_tuples)
def fun_eval(self, *index_tuples):
"""
Return a tensor with free indices substituted according to ``index_tuples``
``index_types`` list of tuples ``(old_index, new_index)``
Examples
========
>>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, tensorhead
>>> Lorentz = TensorIndexType('Lorentz', dummy_fmt='L')
>>> i, j, k, l = tensor_indices('i,j,k,l', Lorentz)
>>> A, B = tensorhead('A,B', [Lorentz]*2, [[1]*2])
>>> t = A(i, k)*B(-k, -j); t
A(i, L_0)*B(-L_0, -j)
>>> t.fun_eval((i, k),(-j, l))
A(k, L_0)*B(-L_0, l)
"""
free = self.free
free1 = []
for j, ipos, cpos in free:
# search j in index_tuples
for i, v in index_tuples:
if i == j:
free1.append((v, ipos, cpos))
break
else:
free1.append((j, ipos, cpos))
return TensMul.from_data(self.coeff, self.components, free1, self.dum)
def __call__(self, *indices):
"""Returns tensor product with ordered free indices replaced by ``indices``
Examples
========
>>> from sympy import Symbol
>>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, tensorhead
>>> D = Symbol('D')
>>> Lorentz = TensorIndexType('Lorentz', dim=D, dummy_fmt='L')
>>> i0,i1,i2,i3,i4 = tensor_indices('i0:5', Lorentz)
>>> g = Lorentz.metric
>>> p, q = tensorhead('p,q', [Lorentz], [[1]])
>>> t = p(i0)*q(i1)*q(-i1)
>>> t(i1)
p(i1)*q(L_0)*q(-L_0)
"""
free_args = self.free_args
indices = list(indices)
if [x._tensortype for x in indices] != [x._tensortype for x in free_args]:
raise ValueError('incompatible types')
if indices == free_args:
return self
t = self.fun_eval(*list(zip(free_args, indices)))
# object is rebuilt in order to make sure that all contracted indices
# get recognized as dummies, but only if there are contracted indices.
if len(set(i if i.is_up else -i for i in indices)) != len(indices):
return t.func(*t.args)
return t
def _print(self):
args = self.args
get_str = lambda arg: str(arg) if arg.is_Atom or isinstance(arg, TensExpr) else ("(%s)" % str(arg))
if not args:
# no arguments is equivalent to "1", i.e. TensMul().
# If tensors are constructed correctly, this should never occur.
return "1"
if self.coeff == S.NegativeOne:
# expressions like "-A(a)"
return "-"+"*".join([get_str(arg) for arg in args[1:]])
# prints expressions like "A(a)", "3*A(a)", "(1+x)*A(a)"
return "*".join([get_str(arg) for arg in self.args])
@property
def data(self):
dat = _tensor_data_substitution_dict[self]
if dat is None:
return None
return self.coeff * dat
@data.setter
def data(self, data):
raise ValueError("Not possible to set component data to a tensor expression")
@data.deleter
def data(self):
raise ValueError("Not possible to delete component data to a tensor expression")
def __iter__(self):
if self.data is None:
raise ValueError("No iteration on abstract tensors")
return (self.data.flatten()).__iter__()
def canon_bp(p):
"""
Butler-Portugal canonicalization
"""
if isinstance(p, TensExpr):
return p.canon_bp()
return p
def tensor_mul(*a):
"""
product of tensors
"""
if not a:
return TensMul.from_data(S.One, [], [], [])
t = a[0]
for tx in a[1:]:
t = t*tx
return t
def riemann_cyclic_replace(t_r):
"""
replace Riemann tensor with an equivalent expression
``R(m,n,p,q) -> 2/3*R(m,n,p,q) - 1/3*R(m,q,n,p) + 1/3*R(m,p,n,q)``
"""
free = sorted(t_r.free, key=lambda x: x[1])
m, n, p, q = [x[0] for x in free]
t0 = S(2)/3*t_r
t1 = - S(1)/3*t_r.substitute_indices((m,m),(n,q),(p,n),(q,p))
t2 = S(1)/3*t_r.substitute_indices((m,m),(n,p),(p,n),(q,q))
t3 = t0 + t1 + t2
return t3
def riemann_cyclic(t2):
"""
replace each Riemann tensor with an equivalent expression
satisfying the cyclic identity.
This trick is discussed in the reference guide to Cadabra.
Examples
========
>>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, tensorhead, riemann_cyclic
>>> Lorentz = TensorIndexType('Lorentz', dummy_fmt='L')
>>> i, j, k, l = tensor_indices('i,j,k,l', Lorentz)<|fim▁hole|> 0
"""
if isinstance(t2, (TensMul, Tensor)):
args = [t2]
else:
args = t2.args
a1 = [x.split() for x in args]
a2 = [[riemann_cyclic_replace(tx) for tx in y] for y in a1]
a3 = [tensor_mul(*v) for v in a2]
t3 = TensAdd(*a3)
if not t3:
return t3
else:
return canon_bp(t3)
def get_lines(ex, index_type):
"""
returns ``(lines, traces, rest)`` for an index type,
where ``lines`` is the list of list of positions of a matrix line,
``traces`` is the list of list of traced matrix lines,
``rest`` is the rest of the elements ot the tensor.
"""
def _join_lines(a):
i = 0
while i < len(a):
x = a[i]
xend = x[-1]
xstart = x[0]
hit = True
while hit:
hit = False
for j in range(i + 1, len(a)):
if j >= len(a):
break
if a[j][0] == xend:
hit = True
x.extend(a[j][1:])
xend = x[-1]
a.pop(j)
continue
if a[j][0] == xstart:
hit = True
a[i] = reversed(a[j][1:]) + x
x = a[i]
xstart = a[i][0]
a.pop(j)
continue
if a[j][-1] == xend:
hit = True
x.extend(reversed(a[j][:-1]))
xend = x[-1]
a.pop(j)
continue
if a[j][-1] == xstart:
hit = True
a[i] = a[j][:-1] + x
x = a[i]
xstart = x[0]
a.pop(j)
continue
i += 1
return a
tids = ex._tids
components = tids.components
dt = {}
for c in components:
if c in dt:
continue
index_types = c.index_types
a = []
for i in range(len(index_types)):
if index_types[i] is index_type:
a.append(i)
if len(a) > 2:
raise ValueError('at most two indices of type %s allowed' % index_type)
if len(a) == 2:
dt[c] = a
dum = tids.dum
lines = []
traces = []
traces1 = []
for p0, p1, c0, c1 in dum:
if components[c0] not in dt:
continue
if c0 == c1:
traces.append([c0])
continue
ta0 = dt[components[c0]]
ta1 = dt[components[c1]]
if p0 not in ta0:
continue
if ta0.index(p0) == ta1.index(p1):
# case gamma(i,s0,-s1)in c0, gamma(j,-s0,s2) in c1;
# to deal with this case one could add to the position
# a flag for transposition;
# one could write [(c0, False), (c1, True)]
raise NotImplementedError
# if p0 == ta0[1] then G in pos c0 is mult on the right by G in c1
# if p0 == ta0[0] then G in pos c1 is mult on the right by G in c0
ta0 = dt[components[c0]]
b0, b1 = (c0, c1) if p0 == ta0[1] else (c1, c0)
lines1 = lines[:]
for line in lines:
if line[-1] == b0:
if line[0] == b1:
n = line.index(min(line))
traces1.append(line)
traces.append(line[n:] + line[:n])
else:
line.append(b1)
break
elif line[0] == b1:
line.insert(0, b0)
break
else:
lines1.append([b0, b1])
lines = [x for x in lines1 if x not in traces1]
lines = _join_lines(lines)
rest = []
for line in lines:
for y in line:
rest.append(y)
for line in traces:
for y in line:
rest.append(y)
rest = [x for x in range(len(components)) if x not in rest]
return lines, traces, rest
def get_indices(t):
if not isinstance(t, TensExpr):
return ()
return t.get_indices()
def get_tids(t):
if isinstance(t, TensExpr):
return t._tids
return TIDS([], [], [])
def get_coeff(t):
if isinstance(t, Tensor):
return S.One
if isinstance(t, TensMul):
return t.coeff
if isinstance(t, TensExpr):
raise ValueError("no coefficient associated to this tensor expression")
return t
def contract_metric(t, g):
if isinstance(t, TensExpr):
return t.contract_metric(g)
return t
def perm2tensor(t, g, canon_bp=False):
"""
Returns the tensor corresponding to the permutation ``g``
For further details, see the method in ``TIDS`` with the same name.
"""
if not isinstance(t, TensExpr):
return t
new_tids = get_tids(t).perm2tensor(g, canon_bp)
coeff = get_coeff(t)
if g[-1] != len(g) - 1:
coeff = -coeff
res = TensMul.from_TIDS(coeff, new_tids, is_canon_bp=canon_bp)
return res
def substitute_indices(t, *index_tuples):
"""
Return a tensor with free indices substituted according to ``index_tuples``
``index_types`` list of tuples ``(old_index, new_index)``
Note: this method will neither raise or lower the indices, it will just replace their symbol.
Examples
========
>>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, tensorhead
>>> Lorentz = TensorIndexType('Lorentz', dummy_fmt='L')
>>> i, j, k, l = tensor_indices('i,j,k,l', Lorentz)
>>> A, B = tensorhead('A,B', [Lorentz]*2, [[1]*2])
>>> t = A(i, k)*B(-k, -j); t
A(i, L_0)*B(-L_0, -j)
>>> t.substitute_indices((i,j), (j, k))
A(j, L_0)*B(-L_0, -k)
"""
if not isinstance(t, TensExpr):
return t
free = t.free
free1 = []
for j, ipos, cpos in free:
for i, v in index_tuples:
if i._name == j._name and i._tensortype == j._tensortype:
if i._is_up == j._is_up:
free1.append((v, ipos, cpos))
else:
free1.append((-v, ipos, cpos))
break
else:
free1.append((j, ipos, cpos))
t = TensMul.from_data(t.coeff, t.components, free1, t.dum)
return t<|fim▁end|> | >>> R = tensorhead('R', [Lorentz]*4, [[2, 2]])
>>> t = R(i,j,k,l)*(R(-i,-j,-k,-l) - 2*R(-i,-k,-j,-l))
>>> riemann_cyclic(t) |
<|file_name|>test_scoping.py<|end_file_name|><|fim▁begin|>from unittest.mock import Mock
import sqlalchemy as sa
from sqlalchemy import ForeignKey
from sqlalchemy import Integer
from sqlalchemy import String
from sqlalchemy import testing
from sqlalchemy.orm import query
from sqlalchemy.orm import relationship
from sqlalchemy.orm import scoped_session
from sqlalchemy.orm import Session
from sqlalchemy.orm import sessionmaker
from sqlalchemy.testing import assert_raises_message
from sqlalchemy.testing import assert_warns_message
from sqlalchemy.testing import eq_
from sqlalchemy.testing import fixtures
from sqlalchemy.testing import is_
from sqlalchemy.testing import mock
from sqlalchemy.testing.schema import Column
from sqlalchemy.testing.schema import Table
class ScopedSessionTest(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table(
"table1",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("data", String(30)),
)
Table(
"table2",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("someid", None, ForeignKey("table1.id")),
)
def test_basic(self):
table2, table1 = self.tables.table2, self.tables.table1
Session = scoped_session(sa.orm.sessionmaker(testing.db))
class CustomQuery(query.Query):
pass
class SomeObject(fixtures.ComparableEntity):
query = Session.query_property()
class SomeOtherObject(fixtures.ComparableEntity):
query = Session.query_property()
custom_query = Session.query_property(query_cls=CustomQuery)
self.mapper_registry.map_imperatively(
SomeObject,
table1,
properties={"options": relationship(SomeOtherObject)},
)
self.mapper_registry.map_imperatively(SomeOtherObject, table2)
s = SomeObject(id=1, data="hello")
sso = SomeOtherObject()
s.options.append(sso)
Session.add(s)
Session.commit()
Session.refresh(sso)
Session.remove()
eq_(
SomeObject(
id=1, data="hello", options=[SomeOtherObject(someid=1)]
),
Session.query(SomeObject).one(),
)
eq_(
SomeObject(
id=1, data="hello", options=[SomeOtherObject(someid=1)]
),
SomeObject.query.one(),
)
eq_(
SomeOtherObject(someid=1),
SomeOtherObject.query.filter(
SomeOtherObject.someid == sso.someid
).one(),
)
assert isinstance(SomeOtherObject.query, query.Query)
assert not isinstance(SomeOtherObject.query, CustomQuery)
assert isinstance(SomeOtherObject.custom_query, query.Query)
def test_config_errors(self):
Session = scoped_session(sa.orm.sessionmaker())
s = Session() # noqa
assert_raises_message(
sa.exc.InvalidRequestError,
"Scoped session is already present",
Session,
bind=testing.db,
)
assert_warns_message(
sa.exc.SAWarning,
"At least one scoped session is already present. ",
Session.configure,
bind=testing.db,
)
def test_call_with_kwargs(self):
mock_scope_func = Mock()
SessionMaker = sa.orm.sessionmaker()
Session = scoped_session(sa.orm.sessionmaker(), mock_scope_func)
s0 = SessionMaker()
assert s0.autoflush == True
mock_scope_func.return_value = 0
s1 = Session()
assert s1.autoflush == True
assert_raises_message(
sa.exc.InvalidRequestError,
"Scoped session is already present",
Session,
autoflush=False,
)
mock_scope_func.return_value = 1
s2 = Session(autoflush=False)
assert s2.autoflush == False
def test_methods_etc(self):
mock_session = Mock()
mock_session.bind = "the bind"
sess = scoped_session(lambda: mock_session)
sess.add("add")
sess.delete("delete")
sess.get("Cls", 5)
eq_(sess.bind, "the bind")
eq_(
mock_session.mock_calls,
[
mock.call.add("add", _warn=True),
mock.call.delete("delete"),
mock.call.get(
"Cls",
5,
options=None,
populate_existing=False,
with_for_update=None,
identity_token=None,
execution_options=None,
),
],
)
with mock.patch(
"sqlalchemy.orm.session.object_session"
) as mock_object_session:
sess.object_session("foo")
eq_(mock_object_session.mock_calls, [mock.call("foo")])
@testing.combinations(
"style1",
"style2",
"style3",
"style4",
)
def test_get_bind_custom_session_subclass(self, style):
"""test #6285"""
class MySession(Session):
if style == "style1":
def get_bind(self, mapper=None, **kwargs):
return super().get_bind(mapper=mapper, **kwargs)
elif style == "style2":
# this was the workaround for #6285, ensure it continues
# working as well
def get_bind(self, mapper=None, *args, **kwargs):
return super().get_bind(mapper, *args, **kwargs)
elif style == "style3":
# py2k style
def get_bind(self, mapper=None, *args, **kwargs):
return super(MySession, self).get_bind(
mapper, *args, **kwargs
)
elif style == "style4":
# py2k style
def get_bind(self, mapper=None, **kwargs):
return super(MySession, self).get_bind(
mapper=mapper, **kwargs
)
s1 = MySession(testing.db)
is_(s1.get_bind(), testing.db)
ss = scoped_session(sessionmaker(testing.db, class_=MySession))
is_(ss.get_bind(), testing.db)
def test_attributes(self):
expected = [
name
for cls in Session.mro()
for name in vars(cls)<|fim▁hole|> ignore_list = {
"connection_callable",
"transaction",
"in_transaction",
"in_nested_transaction",
"get_transaction",
"get_nested_transaction",
"prepare",
"invalidate",
"bind_mapper",
"bind_table",
"enable_relationship_loading",
"dispatch",
}
SM = scoped_session(sa.orm.sessionmaker(testing.db))
missing = [
name
for name in expected
if not hasattr(SM, name) and name not in ignore_list
]
eq_(missing, [])<|fim▁end|> | if not name.startswith("_")
]
|
<|file_name|>deriving-span-Zero-struct.rs<|end_file_name|><|fim▁begin|>// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// This file was auto-generated using 'src/etc/generate-deriving-span-tests.py'
#![feature(struct_variant)]
extern crate rand;<|fim▁hole|>#[deriving(Zero)] //~ ERROR failed to find an implementation
struct Struct {
x: Error //~ ERROR failed to find an implementation
//~^ ERROR failed to find an implementation
//~^^ ERROR type `Error` does not implement any method in scope
}
fn main() {}<|fim▁end|> |
struct Error;
|
<|file_name|>PcepControllerAdapter.java<|end_file_name|><|fim▁begin|>/*
* Copyright 2014 Open Networking Laboratory
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.onosproject.provider.pcep.tunnel.impl;
import org.onosproject.net.DeviceId;
import org.onosproject.pcep.api.PcepController;
import org.onosproject.pcep.api.PcepDpid;
import org.onosproject.pcep.api.PcepLinkListener;
import org.onosproject.pcep.api.PcepSwitch;
import org.onosproject.pcep.api.PcepSwitchListener;
import org.onosproject.pcep.api.PcepTunnel;
import org.onosproject.pcep.api.PcepTunnelListener;
public class PcepControllerAdapter implements PcepController {
@Override
public Iterable<PcepSwitch> getSwitches() {
return null;
}
@Override
public PcepSwitch getSwitch(PcepDpid did) {
return null;
}
@Override
public void addListener(PcepSwitchListener listener) {
}
@Override
public void removeListener(PcepSwitchListener listener) {
}
@Override
public void addLinkListener(PcepLinkListener listener) {
}
@Override
public void removeLinkListener(PcepLinkListener listener) {
}
@Override
public void addTunnelListener(PcepTunnelListener listener) {
}
@Override
public void removeTunnelListener(PcepTunnelListener listener) {
}
@Override
public PcepTunnel applyTunnel(DeviceId srcDid, DeviceId dstDid, long srcPort, long dstPort, long bandwidth,
String name) {
return null;
}
@Override
public Boolean deleteTunnel(String id) {<|fim▁hole|>
@Override
public Boolean updateTunnelBandwidth(String id, long bandwidth) {
return null;
}
@Override
public void getTunnelStatistics(String pcepTunnelId) {
}
}<|fim▁end|> | return null;
} |
<|file_name|>test.py<|end_file_name|><|fim▁begin|>import unittest
import json
import logintc
class TestLoginTCClient(unittest.TestCase):
def set_response(self, method, url, headers, body):
full_url = ''.join(['https://cloud.logintc.com/api', url])
self.responses[(method, full_url)] = (headers, body)
def verify_request(self, method, url, body=None):
full_url = ''.join(['https://cloud.logintc.com/api', url])
if body is not None:
return self.requests[(method, full_url)] == body
else:
return (method, full_url) in self.requests
def setUp(self):
def _mock_request(url, method, headers, body=None):
if body is not None and body != '':
self.requests[(method, url)] = json.loads(body)
else:
self.requests[(method, url)] = None
return self.responses[(method, url)]
self.api_key = 'tZwXzwvdvwFp9oNvRK3ilAs5WZXEwkZ6X0IyexpqjtsDb7POd9x' \
'JNw5JaqJsRJRM'
self.domain_id = 'fa3df768810f0bcb2bfbf0413bfe072e720deb2e'
self.session_id = '45244fcfe80fbbb0c40f3325487c23053591f575'
self.user_id = '649fde0d701f636d90ed979bf032b557e48a87cc'
self.user_username = 'jdoe'
self.user_email = '[email protected]'
self.user_name = 'John Doe'
self.domain_name = 'Cisco ASA'
self.domain_type = 'RADIUS'
self.domain_key_type = 'PIN'
self.organization_name = 'Chrome Stage'
self.token_code = '89hto1p45'
self.client = logintc.LoginTC(self.api_key)
self.client.http.request = _mock_request
self.responses = {}
self.requests = {}
def tearDown(self):
self.responses = {}
self.requests = {}
def test_get_session_500_status_raises_exception(self):
self.set_response('GET',
'/domains/%s/sessions/%s' %
(self.domain_id, self.session_id),
{'status': '500'}, '')
self.assertRaises(logintc.InternalAPIException,
self.client.get_session, self.domain_id,
self.session_id)
def test_get_session(self):
self.set_response('GET',
'/domains/%s/sessions/%s' %
(self.domain_id, self.session_id),
{'status': '200'},
json.dumps({'state': 'pending'}))
res = self.client.get_session(self.domain_id, self.session_id)
self.assertEqual({'state': 'pending'}, res)
def test_create_session_raises_exception(self):
self.set_response('POST',
'/domains/%s/sessions' % self.domain_id,
{'status': '404'},
json.dumps({'errors': [
{'code': 'api.error.notfound.token',
'message': 'No token loaded for user.'}]}))
self.assertRaises(logintc.NoTokenException,
self.client.create_session,
self.domain_id, 'username')
def test_create_session(self):
self.set_response('POST',
'/domains/%s/sessions' % self.domain_id,
{'status': '200'},
json.dumps({'id': self.session_id,
'state': 'pending'}))
res = self.client.create_session(self.domain_id, username='test')
self.assertEqual({'id': self.session_id, 'state': 'pending'}, res)
def test_delete_session(self):
path = '/domains/%s/sessions/%s' % (self.domain_id, self.session_id)
self.set_response('DELETE',
path,
{'status': '200'},
'')
self.client.delete_session(self.domain_id, self.session_id)
self.assertTrue(self.verify_request('DELETE', path))
def test_create_user(self):
self.set_response('POST',
'/users',
{'status': '200'},
json.dumps({'id': self.user_id,
'username': self.user_username,
'email': self.user_email,
'name': self.user_name,
'domains': []
}))
res = self.client.create_user(self.user_username, self.user_email,
self.user_name,)
self.assertEqual({'id': self.user_id,
'username': self.user_username,
'email': self.user_email,
'name': self.user_name,
'domains': []}, res)
def test_get_user(self):
self.set_response('GET',
'/users/%s' % self.user_id,
{'status': '200'},
json.dumps({'id': self.user_id,
'username': self.user_username,
'email': self.user_email,
'name': self.user_name,
'domains': []
}))
res = self.client.get_user(self.user_id)
self.assertEqual({'id': self.user_id,
'username': self.user_username,
'email': self.user_email,
'name': self.user_name,
'domains': []}, res)
def test_update_user(self):
path = '/users/%s' % self.user_id
self.set_response('PUT',
path,
{'status': '200'},
json.dumps({'id': self.user_id,
'username': self.user_username,
'email': '[email protected]',
'name': 'New Name',
'domains': []
}))
res = self.client.update_user(self.user_id, name='New Name',
email='[email protected]')
self.assertEqual({'id': self.user_id,
'username': self.user_username,
'email': '[email protected]',
'name': 'New Name',
'domains': []}, res)
self.assertTrue(self.verify_request('PUT', path, {'name': 'New Name',
'email': '[email protected]'}))
def test_delete_user(self):
path = '/users/%s' % self.user_id
self.set_response('DELETE',
path,
{'status': '200'},
'')
self.client.delete_user(self.user_id)
self.assertTrue(self.verify_request('DELETE', path))
def test_add_domain_user(self):
path = '/domains/%s/users/%s' % (self.domain_id, self.user_id)
self.set_response('PUT',
path,
{'status': '200'},
'')
self.client.add_domain_user(self.domain_id, self.user_id)
self.assertTrue(self.verify_request('PUT', path))
def test_set_domain_users(self):
users = [{'username': "user1",
'email': "[email protected]",
'name': "user one"},
{'username': "user2",
'email': "[email protected]",<|fim▁hole|> self.set_response('PUT',
path,
{'status': '200'},
'')
self.client.set_domain_users(self.domain_id, users)
self.assertTrue(self.verify_request('PUT', path, users))
def test_remove_domain_user(self):
path = '/domains/%s/users/%s' % (self.domain_id, self.user_id)
self.set_response('DELETE',
path,
{'status': '200'},
'')
self.client.remove_domain_user(self.domain_id, self.user_id)
self.assertTrue(self.verify_request('DELETE', path))
def test_create_user_token(self):
self.set_response('PUT',
'/domains/%s/users/%s/token' %
(self.domain_id, self.user_id),
{'status': '200'},
json.dumps({'state': 'pending',
'code': self.token_code}))
res = self.client.create_user_token(self.domain_id, self.user_id)
self.assertEqual({'state': 'pending', 'code': self.token_code}, res)
def test_get_user_token(self):
self.set_response('GET',
'/domains/%s/users/%s/token' %
(self.domain_id, self.user_id),
{'status': '200'},
json.dumps({'state': 'active'}))
res = self.client.get_user_token(self.domain_id, self.user_id)
self.assertEqual({'state': 'active'}, res)
def test_delete_user_token(self):
path = '/domains/%s/users/%s/token' % (self.domain_id, self.user_id)
self.set_response('DELETE',
path,
{'status': '200'},
'')
self.client.delete_user_token(self.domain_id, self.user_id)
self.assertTrue(self.verify_request('DELETE', path))
def test_get_ping(self):
self.set_response('GET',
'/ping',
{'status': '200'},
json.dumps({'status': 'OK'}))
res = self.client.get_ping()
self.assertEqual({'status': 'OK'}, res)
def test_get_organization(self):
self.set_response('GET',
'/organization',
{'status': '200'},
json.dumps({'name': self.organization_name}))
res = self.client.get_organization()
self.assertEqual({'name': self.organization_name}, res)
def test_get_domain(self):
self.set_response('GET',
'/domains/%s' % self.domain_id,
{'status': '200'},
json.dumps({'id': self.domain_id,
'name': self.domain_name,
'type': self.domain_type,
'keyType': self.domain_key_type
}))
res = self.client.get_domain(self.domain_id)
self.assertEqual({'id': self.domain_id,
'name': self.domain_name,
'type': self.domain_type,
'keyType': self.domain_key_type}, res)
def test_get_domain_user(self):
self.set_response('GET',
'/domains/%s/users/%s' % (self.domain_id, self.user_id),
{'status': '200'},
json.dumps({'id': self.user_id,
'username': self.user_username,
'email': self.user_email,
'name': self.user_name,
'domains': ['%s' % self.domain_id]
}))
res = self.client.get_domain_user(self.domain_id, self.user_id)
self.assertEqual({'id': self.user_id,
'username': self.user_username,
'email': self.user_email,
'name': self.user_name,
'domains': ['%s' % self.domain_id]
}, res)
def test_get_domain_users(self):
self.set_response('GET',
'/domains/%s/users?page=1' % self.domain_id,
{'status': '200'},
json.dumps([{'id': self.user_id,
'username': self.user_username,
'email': self.user_email,
'name': self.user_name,
'domains': ['%s' % self.domain_id]
}, {'id': self.user_id,
'username': self.user_username,
'email': self.user_email,
'name': self.user_name,
'domains': ['%s' % self.domain_id]
}]))
res = self.client.get_domain_users(self.domain_id)
self.assertEqual([{'id': self.user_id,
'username': self.user_username,
'email': self.user_email,
'name': self.user_name,
'domains': ['%s' % self.domain_id]
}, {'id': self.user_id,
'username': self.user_username,
'email': self.user_email,
'name': self.user_name,
'domains': ['%s' % self.domain_id]
}], res)
def test_get_domain_image(self):
self.set_response('GET',
'/domains/%s/image' % self.domain_id,
{'status': '200'}, 'Hello World!')
res = self.client.get_domain_image(self.domain_id)
self.assertEqual('Hello World!', res)
if __name__ == '__main__':
unittest.main()<|fim▁end|> | 'name': "user two"}]
path = '/domains/%s/users' % self.domain_id
|
<|file_name|>BaseTreeBusRules.java<|end_file_name|><|fim▁begin|>/* Copyright (C) 2022, Specify Collections Consortium
*
* Specify Collections Consortium, Biodiversity Institute, University of Kansas,
* 1345 Jayhawk Boulevard, Lawrence, Kansas, 66045, USA, [email protected]
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
package edu.ku.brc.specify.datamodel.busrules;
import java.awt.Component;
import java.awt.event.ActionEvent;
import java.awt.event.ActionListener;
import java.awt.event.ItemEvent;
import java.awt.event.ItemListener;
import java.text.DateFormat;
import java.util.List;
import java.util.Set;
import java.util.Vector;
import javax.swing.DefaultComboBoxModel;
import javax.swing.JCheckBox;
import javax.swing.JLabel;
import javax.swing.JOptionPane;
import javax.swing.JTextField;
import javax.swing.SwingUtilities;
import javax.swing.event.ListSelectionEvent;
import javax.swing.event.ListSelectionListener;
import org.apache.commons.lang.StringUtils;
import org.apache.log4j.Logger;
import edu.ku.brc.af.core.AppContextMgr;
import edu.ku.brc.af.core.db.DBFieldInfo;
import edu.ku.brc.af.core.db.DBTableIdMgr;
import edu.ku.brc.af.core.db.DBTableInfo;
import edu.ku.brc.af.core.expresssearch.QueryAdjusterForDomain;
import edu.ku.brc.af.ui.forms.FormViewObj;
import edu.ku.brc.af.ui.forms.Viewable;
import edu.ku.brc.af.ui.forms.persist.AltViewIFace.CreationMode;
import edu.ku.brc.af.ui.forms.validation.UIValidator;
import edu.ku.brc.af.ui.forms.validation.ValComboBox;
import edu.ku.brc.af.ui.forms.validation.ValComboBoxFromQuery;
import edu.ku.brc.af.ui.forms.validation.ValTextField;
import edu.ku.brc.dbsupport.DataProviderFactory;
import edu.ku.brc.dbsupport.DataProviderSessionIFace;
import edu.ku.brc.dbsupport.DataProviderSessionIFace.QueryIFace;
import edu.ku.brc.specify.config.SpecifyAppContextMgr;
import edu.ku.brc.specify.conversion.BasicSQLUtils;
import edu.ku.brc.specify.datamodel.CollectionMember;
import edu.ku.brc.specify.datamodel.Discipline;
import edu.ku.brc.specify.datamodel.SpTaskSemaphore;
import edu.ku.brc.specify.datamodel.TreeDefIface;
import edu.ku.brc.specify.datamodel.TreeDefItemIface;
import edu.ku.brc.specify.datamodel.TreeDefItemStandardEntry;
import edu.ku.brc.specify.datamodel.Treeable;
import edu.ku.brc.specify.dbsupport.TaskSemaphoreMgr;
import edu.ku.brc.specify.dbsupport.TaskSemaphoreMgr.USER_ACTION;
import edu.ku.brc.specify.dbsupport.TaskSemaphoreMgrCallerIFace;
import edu.ku.brc.specify.dbsupport.TreeDefStatusMgr;
import edu.ku.brc.specify.treeutils.TreeDataService;
import edu.ku.brc.specify.treeutils.TreeDataServiceFactory;
import edu.ku.brc.specify.treeutils.TreeHelper;
import edu.ku.brc.ui.GetSetValueIFace;
import edu.ku.brc.ui.UIRegistry;
/**
* @author rod
*
* (original author was JDS)
*
* @code_status Alpha
*
* Jan 10, 2008
*
* @param <T>
* @param <D>
* @param <I>
*/
public abstract class BaseTreeBusRules<T extends Treeable<T,D,I>,
D extends TreeDefIface<T,D,I>,
I extends TreeDefItemIface<T,D,I>>
extends AttachmentOwnerBaseBusRules
{
public static final boolean ALLOW_CONCURRENT_FORM_ACCESS = true;
public static final long FORM_SAVE_LOCK_MAX_DURATION_IN_MILLIS = 60000;
private static final Logger log = Logger.getLogger(BaseTreeBusRules.class);
private boolean processedRules = false;
/**
* Constructor.
*
* @param dataClasses a var args list of classes that this business rules implementation handles
*/
public BaseTreeBusRules(Class<?>... dataClasses)
{
super(dataClasses);
}
/* (non-Javadoc)
* @see edu.ku.brc.ui.forms.BaseBusRules#initialize(edu.ku.brc.ui.forms.Viewable)
*/
@Override
public void initialize(Viewable viewableArg)
{
super.initialize(viewableArg);
GetSetValueIFace parentField = (GetSetValueIFace)formViewObj.getControlByName("parent");
Component comp = formViewObj.getControlByName("definitionItem");
if (comp instanceof ValComboBox)
{
final ValComboBox rankComboBox = (ValComboBox)comp;
final JCheckBox acceptedCheckBox = (JCheckBox)formViewObj.getControlByName("isAccepted");
Component apComp = formViewObj.getControlByName("acceptedParent");
final ValComboBoxFromQuery acceptedParentWidget = apComp instanceof ValComboBoxFromQuery ?
(ValComboBoxFromQuery )apComp : null;
if (parentField instanceof ValComboBoxFromQuery)
{
final ValComboBoxFromQuery parentCBX = (ValComboBoxFromQuery)parentField;
if (parentCBX != null && rankComboBox != null)
{
parentCBX.addListSelectionListener(new ListSelectionListener() {
public void valueChanged(ListSelectionEvent e)
{
if (e == null || !e.getValueIsAdjusting())
{
parentChanged(formViewObj, parentCBX, rankComboBox, acceptedCheckBox, acceptedParentWidget);
}
}
});
rankComboBox.getComboBox().addActionListener(new ActionListener(){
@Override
public void actionPerformed(ActionEvent e)
{
rankChanged(formViewObj, parentCBX, rankComboBox, acceptedCheckBox, acceptedParentWidget);
}
});
}
}
if (acceptedCheckBox != null && acceptedParentWidget != null)
{
acceptedCheckBox.addItemListener(new ItemListener()
{
public void itemStateChanged(ItemEvent e)
{
if (acceptedCheckBox.isSelected())
{
acceptedParentWidget.setValue(null, null);
acceptedParentWidget.setChanged(true); // This should be done automatically
acceptedParentWidget.setEnabled(false);
}
else
{
acceptedParentWidget.setEnabled(true);
}
}
});
}
}
}
/**
* @return list of foreign key relationships for purposes of checking
* if a record can be deleted.
* The list contains two entries for each relationship. The first entry
* is the related table name. The second is the name of the foreign key field in the related table.
*/
public abstract String[] getRelatedTableAndColumnNames();
/**
* @return list of ass foreign key relationships.
* The list contains two entries for each relationship. The first entry
* is the related table name. The second is the name of the foreign key field in the related table.
*/
public String[] getAllRelatedTableAndColumnNames()
{
return getRelatedTableAndColumnNames();
}
/* (non-Javadoc)
* @see edu.ku.brc.af.ui.forms.BaseBusRules#okToEnableDelete(java.lang.Object)
*/
@SuppressWarnings("unchecked")
@Override
public boolean okToEnableDelete(Object dataObj)
{
// This is a little weak and chessey, but it gets the job done.
// Becase both the Tree and Definition want/need to share Business Rules.
String viewName = formViewObj.getView().getName();
if (StringUtils.contains(viewName, "TreeDef"))
{
final I treeDefItem = (I)dataObj;
if (treeDefItem != null && treeDefItem.getTreeDef() != null)
{
return treeDefItem.getTreeDef().isRequiredLevel(treeDefItem.getRankId());
}
}
return super.okToEnableDelete(dataObj);
}
/**
* @param node
* @return
*/
@SuppressWarnings("unchecked")
public boolean okToDeleteNode(T node)
{
if (node.getDefinition() != null && !node.getDefinition().getNodeNumbersAreUpToDate() && !node.getDefinition().isUploadInProgress())
{
//Scary. If nodes are not up to date, tree rules may not work.
//The application should prevent edits to items/trees whose tree numbers are not up to date except while uploading
//workbenches.
throw new RuntimeException(node.getDefinition().getName() + " has out of date node numbers.");
}
if (node.getDefinition() != null && node.getDefinition().isUploadInProgress())
{
//don't think this will ever get called during an upload/upload-undo, but just in case.
return true;
}
Integer id = node.getTreeId();
if (id == null)
{
return true;
}
String[] relationships = getRelatedTableAndColumnNames();
// if the given node can't be deleted, return false
if (!super.okToDelete(relationships, node.getTreeId()))
{
return false;
}
// now check the children
// get a list of all descendent IDs
DataProviderSessionIFace session = null;
List<Integer> childIDs = null;
try
{
session = DataProviderFactory.getInstance().createSession();
String queryStr = "SELECT n.id FROM " + node.getClass().getName() + " n WHERE n.nodeNumber <= :highChild AND n.nodeNumber > :nodeNum ORDER BY n.rankId DESC";
QueryIFace query = session.createQuery(queryStr, false);
query.setParameter("highChild", node.getHighestChildNodeNumber());
query.setParameter("nodeNum", node.getNodeNumber());
childIDs = (List<Integer>)query.list();
} catch (Exception ex)
{
edu.ku.brc.exceptions.ExceptionTracker.getInstance().capture(BaseTreeBusRules.class, ex);
// Error Dialog
ex.printStackTrace();
} finally
{
if (session != null)
{
session.close();
}
}
// if there are no descendent nodes, return true
if (childIDs != null && childIDs.size() == 0)
{
return true;
}
// break the descendent checks up into chunks or queries
// This is an arbitrary number. Trial and error will determine a good value. This determines
// the number of IDs that wind up in the "IN" clause of the query run inside okToDelete().
int chunkSize = 250;
int lastRecordChecked = -1;
boolean childrenDeletable = true;
while (lastRecordChecked + 1 < childIDs.size() && childrenDeletable)
{
int startOfChunk = lastRecordChecked + 1;
int endOfChunk = Math.min(lastRecordChecked+1+chunkSize, childIDs.size());
// grabs selected subset, exclusive of the last index
List<Integer> chunk = childIDs.subList(startOfChunk, endOfChunk);
Integer[] idChunk = chunk.toArray(new Integer[1]);
childrenDeletable = super.okToDelete(relationships, idChunk);
lastRecordChecked = endOfChunk - 1;
}
return childrenDeletable;
}
@Override
protected String getExtraWhereColumns(DBTableInfo tableInfo) {
String result = super.getExtraWhereColumns(tableInfo);
if (CollectionMember.class.isAssignableFrom(tableInfo.getClassObj()))
{
Vector<Object> cols = BasicSQLUtils.querySingleCol("select distinct CollectionID from collection "
+ "where DisciplineID = " + AppContextMgr.getInstance().getClassObject(Discipline.class).getId());
if (cols != null)
{
String colList = "";
for (Object col : cols)
{
if (!"".equals(colList))
{
colList += ",";
}
colList += col;
}
if (!"".equals(colList))
{
result = "((" + result + ") or " + tableInfo.getAbbrev() + ".CollectionMemberID in(" + colList + "))";
}
}
}
return result;
}
@SuppressWarnings("unchecked")
protected void rankChanged(final FormViewObj form,
final ValComboBoxFromQuery parentComboBox,
final ValComboBox rankComboBox,
final JCheckBox acceptedCheckBox,
final ValComboBoxFromQuery acceptedParentWidget)
{
if (form.getAltView().getMode() != CreationMode.EDIT)
{
return;
}
//log.debug("form was validated: calling adjustRankComboBoxModel()");
Object objInForm = form.getDataObj();
//log.debug("form data object = " + objInForm);
if (objInForm == null)
{
return;
}
final T formNode = (T)objInForm;
T parent = null;
if (parentComboBox.getValue() instanceof String)
{
// the data is still in the VIEW mode for some reason
log.debug("Form is in mode (" + form.getAltView().getMode() + ") but the parent data is a String");
parentComboBox.getValue();
parent = formNode.getParent();
}
else
{
parent = (T)parentComboBox.getValue();
}
final T theParent = parent;
I rankObj = (I )rankComboBox.getValue();
final int rank = rankObj == null ? -2 : rankObj.getRankId();
SwingUtilities.invokeLater(new Runnable() {
public void run()
{
boolean canSynonymize = false;
if (canAccessSynonymy(formNode, rank))
{
canSynonymize = formNode.getDefinition() != null && formNode.getDefinition()
.getSynonymizedLevel() <= rank
&& formNode.getDescendantCount() == 0;
}
if (acceptedCheckBox != null && acceptedParentWidget != null)
{
acceptedCheckBox.setEnabled(canSynonymize && theParent != null);
if (acceptedCheckBox.isSelected() && acceptedCheckBox.isEnabled())
{
acceptedParentWidget.setValue(null, null);
acceptedParentWidget.setChanged(true); // This should be done automatically
acceptedParentWidget.setEnabled(false);
}
}
form.getValidator().validateForm();
}
});
}
@SuppressWarnings("unchecked")
protected void parentChanged(final FormViewObj form,
final ValComboBoxFromQuery parentComboBox,
final ValComboBox rankComboBox,
final JCheckBox acceptedCheckBox,
final ValComboBoxFromQuery acceptedParentWidget)
{
if (form.getAltView().getMode() != CreationMode.EDIT)
{
return;
}
//log.debug("form was validated: calling adjustRankComboBoxModel()");
Object objInForm = form.getDataObj();
//log.debug("form data object = " + objInForm);
if (objInForm == null)
{
return;
}
final T formNode = (T)objInForm;
// set the contents of this combobox based on the value chosen as the parent
adjustRankComboBoxModel(parentComboBox, rankComboBox, formNode);
T parent = null;
if (parentComboBox.getValue() instanceof String)
{
// the data is still in the VIEW mode for some reason
log.debug("Form is in mode (" + form.getAltView().getMode() + ") but the parent data is a String");
parentComboBox.getValue();
parent = formNode.getParent();
}
else
{
parent = (T)parentComboBox.getValue();
}
// set the tree def for the object being edited by using the parent node's tree def
// set the parent too??? (lookups for the AcceptedParent QueryComboBox need this)
if (parent != null)
{
formNode.setDefinition(parent.getDefinition());
formNode.setParent(parent);
}
SwingUtilities.invokeLater(new Runnable() {
public void run()
{
boolean rnkEnabled = rankComboBox.getComboBox().getModel().getSize() > 0;
rankComboBox.setEnabled(rnkEnabled);
JLabel label = form.getLabelFor(rankComboBox);
if (label != null)
{
label.setEnabled(rnkEnabled);
}
if (rankComboBox.hasFocus() && !rnkEnabled)
{
parentComboBox.requestFocus();
}
rankChanged(formViewObj, parentComboBox, rankComboBox, acceptedCheckBox, acceptedParentWidget);
form.getValidator().validateForm();
}
});
}
/**
* @param parentField
* @param rankComboBox
* @param nodeInForm
*/
@SuppressWarnings("unchecked")
protected void adjustRankComboBoxModel(final GetSetValueIFace parentField,
final ValComboBox rankComboBox,
final T nodeInForm)
{
log.debug("Adjusting the model for the 'rank' combo box in a tree node form");
if (nodeInForm == null)
{
return;
}
log.debug("nodeInForm = " + nodeInForm.getName());
DefaultComboBoxModel<I> model = (DefaultComboBoxModel<I>)rankComboBox.getModel();
model.removeAllElements();
// this is the highest rank the edited item can possibly be
I topItem = null;
// this is the lowest rank the edited item can possibly be
I bottomItem = null;
Object value = parentField.getValue();
T parent = null;
if (value instanceof String)
{
// this happens when the combobox is in view mode, which means it's really a textfield
// in that case, the parent of the node in the form will do, since the user can't change the parents
parent = nodeInForm.getParent();
}
else
{
parent = (T)parentField.getValue();
}
if (parent == null)
{
return;
}
// grab all the def items from just below the parent's item all the way to the next enforced level
// or to the level of the highest ranked child
topItem = parent.getDefinitionItem().getChild();
log.debug("highest valid tree level: " + topItem);
if (topItem == null)
{
// this only happens if a parent was chosen that cannot have children b/c it is at the
// lowest defined level in the tree
log.warn("Chosen node cannot be a parent node. It is at the lowest defined level of the tree.");
return;
}
// find the child with the highest rank and set that child's def item as the bottom of the range
if (!nodeInForm.getChildren().isEmpty())
{
for (T child: nodeInForm.getChildren())
{
if (bottomItem==null || child.getRankId()>bottomItem.getRankId())
{
bottomItem = child.getDefinitionItem().getParent();
}
}
}
log.debug("lowest valid tree level: " + bottomItem);
I item = topItem;
boolean done = false;
while (!done)
{
model.addElement(item);
if (item.getChild()==null || item.getIsEnforced()==Boolean.TRUE || (bottomItem != null && item.getRankId().intValue()==bottomItem.getRankId().intValue()) )
{
done = true;
}
item = item.getChild();
}
if (nodeInForm.getDefinitionItem() != null)
{
I defItem = nodeInForm.getDefinitionItem();
for (int i = 0; i < model.getSize(); ++i)
{
I modelItem = (I)model.getElementAt(i);
if (modelItem.getRankId().equals(defItem.getRankId()))
{
log.debug("setting rank selected value to " + modelItem);
model.setSelectedItem(modelItem);
}
}
// if (model.getIndexOf(defItem) != -1)
// {
// model.setSelectedItem(defItem);
// }
}
else if (model.getSize() == 1)
{
Object defItem = model.getElementAt(0);
log.debug("setting rank selected value to the only available option: " + defItem);
model.setSelectedItem(defItem);
}
}
<|fim▁hole|>
/* (non-Javadoc)
* @see edu.ku.brc.ui.forms.BaseBusRules#afterFillForm(java.lang.Object)
*/
@SuppressWarnings("unchecked")
@Override
public void afterFillForm(final Object dataObj)
{
// This is a little weak and cheesey, but it gets the job done.
// Because both the Tree and Definition want/need to share Business Rules.
String viewName = formViewObj.getView().getName();
if (StringUtils.contains(viewName, "TreeDef"))
{
if (formViewObj.getAltView().getMode() != CreationMode.EDIT)
{
// when we're not in edit mode, we don't need to setup any listeners since the user can't change anything
//log.debug("form is not in edit mode: no special listeners will be attached");
return;
}
if (!StringUtils.contains(viewName, "TreeDefItem"))
{
return;
}
final I nodeInForm = (I)formViewObj.getDataObj();
//disable FullName -related fields if TreeDefItem is used by nodes in the tree
//NOTE: Can remove the edit restriction. Tree rebuilds now update fullname fields. Need to add tree rebuild after fullname def edits.
if (nodeInForm != null && nodeInForm.getTreeDef() != null)
{
// boolean canNOTEditFullNameFlds = nodeInForm.hasTreeEntries();
// if (canNOTEditFullNameFlds)
// {
// ValTextField ftCtrl = (ValTextField )formViewObj.getControlByName("textAfter");
// if (ftCtrl != null)
// {
// ftCtrl.setEnabled(false);
// }
// ftCtrl = (ValTextField )formViewObj.getControlByName("textBefore");
// if (ftCtrl != null)
// {
// ftCtrl.setEnabled(false);
// }
// ftCtrl = (ValTextField )formViewObj.getControlByName("fullNameSeparator");
// if (ftCtrl != null)
// {
// ftCtrl.setEnabled(false);
// }
// ValCheckBox ftBox = (ValCheckBox )formViewObj.getControlByName("isInFullName");
// if (ftBox != null)
// {
// ftBox.setEnabled(false);
// }
// }
if (!viewName.endsWith("TreeDefItem"))
{
return;
}
//disabling editing of name and rank for standard levels.
List<TreeDefItemStandardEntry> stds = nodeInForm.getTreeDef().getStandardLevels();
TreeDefItemStandardEntry stdLevel = null;
for (TreeDefItemStandardEntry std : stds)
{
//if (std.getTitle().equals(nodeInForm.getName()) && std.getRank() == nodeInForm.getRankId())
if (std.getRank() == nodeInForm.getRankId())
{
stdLevel = std;
break;
}
}
if (stdLevel != null)
{
ValTextField nameCtrl = (ValTextField )formViewObj.getControlByName("name");
Component rankCtrl = formViewObj.getControlByName("rankId");
if (nameCtrl != null)
{
nameCtrl.setEnabled(false);
}
if (rankCtrl != null)
{
rankCtrl.setEnabled(false);
}
if (nodeInForm.getTreeDef().isRequiredLevel(stdLevel.getRank()))
{
Component enforcedCtrl = formViewObj.getControlByName("isEnforced");
if (enforcedCtrl != null)
{
enforcedCtrl.setEnabled(false);
}
}
}
}
return;
}
final T nodeInForm = (T) formViewObj.getDataObj();
if (formViewObj.getAltView().getMode() != CreationMode.EDIT)
{
if (nodeInForm != null)
{
//XXX this MAY be necessary due to a bug with TextFieldFromPickListTable??
// TextFieldFromPickListTable.setValue() does nothing because of a null adapter member.
Component comp = formViewObj.getControlByName("definitionItem");
if (comp instanceof JTextField)
{
((JTextField )comp).setText(nodeInForm.getDefinitionItem().getName());
}
}
}
else
{
processedRules = false;
GetSetValueIFace parentField = (GetSetValueIFace) formViewObj
.getControlByName("parent");
Component comp = formViewObj.getControlByName("definitionItem");
if (comp instanceof ValComboBox)
{
final ValComboBox rankComboBox = (ValComboBox) comp;
if (parentField instanceof ValComboBoxFromQuery)
{
final ValComboBoxFromQuery parentCBX = (ValComboBoxFromQuery) parentField;
if (parentCBX != null && rankComboBox != null && nodeInForm != null)
{
parentCBX.registerQueryBuilder(new TreeableSearchQueryBuilder(nodeInForm,
rankComboBox, TreeableSearchQueryBuilder.PARENT));
}
}
if (nodeInForm != null && nodeInForm.getDefinitionItem() != null)
{
// log.debug("node in form already has a set rank: forcing a call to
// adjustRankComboBoxModel()");
UIValidator.setIgnoreAllValidation(this, true);
adjustRankComboBoxModel(parentField, rankComboBox, nodeInForm);
UIValidator.setIgnoreAllValidation(this, false);
}
// TODO: the form system MUST require the accepted parent widget to be present if
// the
// isAccepted checkbox is present
final JCheckBox acceptedCheckBox = (JCheckBox) formViewObj
.getControlByName("isAccepted");
final ValComboBoxFromQuery acceptedParentWidget = (ValComboBoxFromQuery) formViewObj
.getControlByName("acceptedParent");
if (canAccessSynonymy(nodeInForm))
{
if (acceptedCheckBox != null
&& acceptedParentWidget != null)
{
if (acceptedCheckBox.isSelected() && nodeInForm != null
&& nodeInForm.getDefinition() != null)
{
// disable if necessary
boolean canSynonymize = nodeInForm.getDefinition()
.getSynonymizedLevel() <= nodeInForm
.getRankId()
&& nodeInForm.getDescendantCount() == 0;
acceptedCheckBox.setEnabled(canSynonymize);
}
acceptedParentWidget.setEnabled(!acceptedCheckBox
.isSelected()
&& acceptedCheckBox.isEnabled());
if (acceptedCheckBox.isSelected())
{
acceptedParentWidget.setValue(null, null);
}
if (nodeInForm != null && acceptedParentWidget != null
&& rankComboBox != null)
{
acceptedParentWidget
.registerQueryBuilder(new TreeableSearchQueryBuilder(
nodeInForm,
rankComboBox,
TreeableSearchQueryBuilder.ACCEPTED_PARENT));
}
}
}
else
{
if (acceptedCheckBox != null)
{
acceptedCheckBox.setEnabled(false);
}
if (acceptedParentWidget != null)
{
acceptedParentWidget.setEnabled(false);
}
}
if (parentField instanceof ValComboBoxFromQuery)
{
parentChanged(formViewObj, (ValComboBoxFromQuery) parentField, rankComboBox,
acceptedCheckBox, acceptedParentWidget);
}
}
}
}
/**
* @param tableInfo
*
* @return Select (i.e. everything before where clause) of sqlTemplate
*/
protected String getSqlSelectTemplate(final DBTableInfo tableInfo)
{
StringBuilder sb = new StringBuilder();
sb.append("select %s1 FROM "); //$NON-NLS-1$
sb.append(tableInfo.getClassName());
sb.append(" as "); //$NON-NLS-1$
sb.append(tableInfo.getAbbrev());
String joinSnipet = QueryAdjusterForDomain.getInstance().getJoinClause(tableInfo, true, null, false); //arg 2: false means SQL
if (joinSnipet != null)
{
sb.append(' ');
sb.append(joinSnipet);
}
sb.append(' ');
return sb.toString();
}
/**
* @param dataObj
*
* return true if acceptedParent and accepted fields should be enabled on data forms.
*/
@SuppressWarnings("unchecked")
protected boolean canAccessSynonymy(final T dataObj)
{
if (dataObj == null)
{
return false; //??
}
if (dataObj.getChildren().size() > 0)
{
return false;
}
TreeDefItemIface<?,?,?> defItem = dataObj.getDefinitionItem();
if (defItem == null)
{
return false; //???
}
TreeDefIface<?,?,?> def = dataObj.getDefinition();
if (def == null)
{
def = ((SpecifyAppContextMgr )AppContextMgr.getInstance()).getTreeDefForClass((Class<? extends Treeable<?,?,?>> )dataObj.getClass());
}
if (!def.isSynonymySupported())
{
return false;
}
return defItem.getRankId() >= def.getSynonymizedLevel();
}
/**
* @param dataObj
* @param rank
* @return true if the rank is synonymizable according to the relevant TreeDefinition
*
* For use when dataObj's rank has not yet been assigned or updated.
*/
@SuppressWarnings("unchecked")
protected boolean canAccessSynonymy(final T dataObj, final int rank)
{
if (dataObj == null)
{
return false; //??
}
if (dataObj.getChildren().size() > 0)
{
return false;
}
TreeDefIface<?,?,?> def = ((SpecifyAppContextMgr )AppContextMgr.getInstance()).getTreeDefForClass((Class<? extends Treeable<?,?,?>> )dataObj.getClass());
if (!def.isSynonymySupported())
{
return false;
}
return rank >= def.getSynonymizedLevel();
}
/**
* Updates the fullname field of any nodes effected by changes to <code>node</code> that are about
* to be saved to the DB.
*
* @param node
* @param session
* @param nameChanged
* @param parentChanged
* @param rankChanged
*/
@SuppressWarnings("unchecked")
protected void updateFullNamesIfNecessary(T node, DataProviderSessionIFace session)
{
if (!(node.getDefinition().getDoNodeNumberUpdates() && node.getDefinition().getNodeNumbersAreUpToDate())) {
return;
}
if (node.getTreeId() == null)
{
// this is a new node
// it shouldn't need updating since we set the fullname at creation time
return;
}
boolean updateNodeFullName = false;
boolean updateDescFullNames = false;
// we need a way to determine if the name changed
// load a fresh copy from the DB and get the values needed for comparison
DataProviderSessionIFace tmpSession = DataProviderFactory.getInstance().createSession();
T fromDB = (T)tmpSession.get(node.getClass(), node.getTreeId());
tmpSession.close();
if (fromDB == null)
{
// this node is new and hasn't yet been flushed to the DB, so we don't need to worry about updating fullnames
//return;
fromDB = node;
}
T origParent = fromDB.getParent();
boolean parentChanged = false;
T currentParent = node.getParent();
if ((currentParent == null && origParent != null) || (currentParent != null && origParent == null))
{
// I can't imagine how this would ever happen, but just in case
parentChanged = true;
}
if (currentParent != null && origParent != null && !currentParent.getTreeId().equals(origParent.getTreeId()))
{
// the parent ID changed
parentChanged = true;
}
boolean higherLevelsIncluded = false;
if (parentChanged)
{
higherLevelsIncluded = higherLevelsIncludedInFullname(node);
higherLevelsIncluded |= higherLevelsIncludedInFullname(fromDB);
}
if (parentChanged && higherLevelsIncluded)
{
updateNodeFullName = true;
updateDescFullNames = true;
}
boolean nameChanged = !(fromDB.getName().equals(node.getName()));
boolean rankChanged = !(fromDB.getRankId().equals(node.getRankId()));
if (rankChanged || nameChanged)
{
updateNodeFullName = true;
if (booleanValue(fromDB.getDefinitionItem().getIsInFullName(), false) == true)
{
updateDescFullNames = true;
}
if (booleanValue(node.getDefinitionItem().getIsInFullName(), false) == true)
{
updateDescFullNames = true;
}
} else if (fromDB == node)
{
updateNodeFullName = true;
}
if (updateNodeFullName)
{
if (updateDescFullNames)
{
// this could take a long time
TreeHelper.fixFullnameForNodeAndDescendants(node);
}
else
{
// this should be really fast
String fullname = TreeHelper.generateFullname(node);
node.setFullName(fullname);
}
}
}
protected boolean higherLevelsIncludedInFullname(T node)
{
boolean higherLevelsIncluded = false;
// this doesn't necessarily mean the fullname has to be changed
// if no higher levels are included in the fullname, then nothing needs updating
// so, let's see if higher levels factor into the fullname
T l = node.getParent();
while (l != null)
{
if ((l.getDefinitionItem().getIsInFullName() != null) &&
(l.getDefinitionItem().getIsInFullName().booleanValue() == true))
{
higherLevelsIncluded = true;
break;
}
l = l.getParent();
}
return higherLevelsIncluded;
}
/* (non-Javadoc)
* @see edu.ku.brc.specify.datamodel.busrules.BaseBusRules#beforeSave(java.lang.Object, edu.ku.brc.dbsupport.DataProviderSessionIFace)
*/
@SuppressWarnings("unchecked")
@Override
public void beforeSave(Object dataObj, DataProviderSessionIFace session)
{
super.beforeSave(dataObj, session);
if (dataObj instanceof Treeable)
{
// NOTE: the instanceof check can't check against 'T' since T isn't a class
// this has a SMALL amount of risk to it
T node = (T)dataObj;
if (!node.getDefinition().getNodeNumbersAreUpToDate() && !node.getDefinition().isUploadInProgress())
{
//Scary. If nodes are not up to date, tree rules may not work (actually this one is OK. (for now)).
//The application should prevent edits to items/trees whose tree numbers are not up to date except while uploading
//workbenches.
throw new RuntimeException(node.getDefinition().getName() + " has out of date node numbers.");
}
// set it's fullname
String fullname = TreeHelper.generateFullname(node);
node.setFullName(fullname);
}
}
/* (non-Javadoc)
* @see edu.ku.brc.specify.datamodel.busrules.BaseBusRules#afterSaveCommit(java.lang.Object)
*/
@SuppressWarnings("unchecked")
@Override
public boolean beforeSaveCommit(final Object dataObj, final DataProviderSessionIFace session) throws Exception
{
// PLEASE NOTE!
// If any changes are made to this check to make sure no one (Like GeologicTimePeriod) is overriding this method
// and make the appropriate changes there also.
if (!super.beforeSaveCommit(dataObj, session))
{
return false;
}
boolean success = true;
// compare the dataObj values to the nodeBeforeSave values to determine if a node was moved or added
if (dataObj instanceof Treeable)
{
// NOTE: the instanceof check can't check against 'T' since T isn't a class
// this has a SMALL amount of risk to it
T node = (T)dataObj;
if (!node.getDefinition().getNodeNumbersAreUpToDate() && !node.getDefinition().isUploadInProgress())
{
//Scary. If nodes are not up to date, tree rules may not work.
//The application should prevent edits to items/trees whose tree numbers are not up to date except while uploading
//workbenches.
throw new RuntimeException(node.getDefinition().getName() + " has out of date node numbers.");
}
// if the node doesn't have any assigned node number, it must be new
boolean added = (node.getNodeNumber() == null);
if (node.getDefinition().getDoNodeNumberUpdates() && node.getDefinition().getNodeNumbersAreUpToDate())
{
log.info("Saved tree node was added. Updating node numbers appropriately.");
TreeDataService<T,D,I> dataServ = TreeDataServiceFactory.createService();
if (added)
{
success = dataServ.updateNodeNumbersAfterNodeAddition(node, session);
}
else
{
success = dataServ.updateNodeNumbersAfterNodeEdit(node, session);
}
}
else
{
node.getDefinition().setNodeNumbersAreUpToDate(false);
}
}
return success;
}
/* (non-Javadoc)
* @see edu.ku.brc.af.ui.forms.BaseBusRules#beforeDeleteCommit(java.lang.Object, edu.ku.brc.dbsupport.DataProviderSessionIFace)
*/
/*
* NOTE: If this method is overridden, freeLocks() MUST be called when result is false
* !!
*
*/
@Override
public boolean beforeDeleteCommit(Object dataObj,
DataProviderSessionIFace session) throws Exception
{
if (!super.beforeDeleteCommit(dataObj, session))
{
return false;
}
if (dataObj != null && (formViewObj == null || !StringUtils.contains(formViewObj.getView().getName(), "TreeDef")) &&
BaseTreeBusRules.ALLOW_CONCURRENT_FORM_ACCESS && viewable != null)
{
return getRequiredLocks(dataObj);
}
else
{
return true;
}
}
/* (non-Javadoc)
* @see edu.ku.brc.ui.forms.BaseBusRules#afterDeleteCommit(java.lang.Object)
*/
@SuppressWarnings("unchecked")
@Override
public void afterDeleteCommit(Object dataObj)
{
try
{
if (dataObj instanceof Treeable)
{
// NOTE: the instanceof check can't check against 'T' since T
// isn't a class
// this has a SMALL amount of risk to it
T node = (T) dataObj;
if (!node.getDefinition().getNodeNumbersAreUpToDate()
&& !node.getDefinition().isUploadInProgress())
{
// Scary. If nodes are not up to date, tree rules may not
// work.
// The application should prevent edits to items/trees whose
// tree numbers are not up to date except while uploading
// workbenches.
throw new RuntimeException(node.getDefinition().getName()
+ " has out of date node numbers.");
}
if (node.getDefinition().getDoNodeNumberUpdates()
&& node.getDefinition().getNodeNumbersAreUpToDate())
{
log
.info("A tree node was deleted. Updating node numbers appropriately.");
TreeDataService<T, D, I> dataServ = TreeDataServiceFactory
.createService();
// apparently a refresh() is necessary. node can hold
// obsolete values otherwise.
// Possibly needs to be done for all business rules??
DataProviderSessionIFace session = null;
try
{
session = DataProviderFactory.getInstance()
.createSession();
// rods - 07/28/08 commented out because the node is
// already deleted
// session.refresh(node);
dataServ.updateNodeNumbersAfterNodeDeletion(node,
session);
} catch (Exception ex)
{
edu.ku.brc.exceptions.ExceptionTracker.getInstance()
.capture(BaseTreeBusRules.class, ex);
ex.printStackTrace();
} finally
{
if (session != null)
{
session.close();
}
}
} else
{
node.getDefinition().setNodeNumbersAreUpToDate(false);
}
}
} finally
{
if (BaseTreeBusRules.ALLOW_CONCURRENT_FORM_ACCESS && viewable != null)
{
this.freeLocks();
}
}
}
/**
* Handles the {@link #beforeSave(Object)} method if the passed in {@link Object}
* is an instance of {@link TreeDefItemIface}. The real work of this method is to
* update the 'fullname' field of all {@link Treeable} objects effected by the changes
* to the passed in {@link TreeDefItemIface}.
*
* @param defItem the {@link TreeDefItemIface} being saved
*/
@SuppressWarnings("unchecked")
protected void beforeSaveTreeDefItem(I defItem)
{
// we need a way to determine if the 'isInFullname' value changed
// load a fresh copy from the DB and get the values needed for comparison
DataProviderSessionIFace tmpSession = DataProviderFactory.getInstance().createSession();
I fromDB = (I)tmpSession.load(defItem.getClass(), defItem.getTreeDefItemId());
tmpSession.close();
DataProviderSessionIFace session = DataProviderFactory.getInstance().createSession();
session.attach(defItem);
boolean changeThisLevel = false;
boolean changeAllDescendants = false;
boolean fromDBIsInFullname = makeNotNull(fromDB.getIsInFullName());
boolean currentIsInFullname = makeNotNull(defItem.getIsInFullName());
if (fromDBIsInFullname != currentIsInFullname)
{
changeAllDescendants = true;
}
// look for changes in the 'textBefore', 'textAfter' or 'fullNameSeparator' fields
String fromDbBeforeText = makeNotNull(fromDB.getTextBefore());
String fromDbAfterText = makeNotNull(fromDB.getTextAfter());
String fromDbSeparator = makeNotNull(fromDB.getFullNameSeparator());
String before = makeNotNull(defItem.getTextBefore());
String after = makeNotNull(defItem.getTextAfter());
String separator = makeNotNull(defItem.getFullNameSeparator());
boolean textFieldChanged = false;
boolean beforeChanged = !before.equals(fromDbBeforeText);
boolean afterChanged = !after.equals(fromDbAfterText);
boolean sepChanged = !separator.equals(fromDbSeparator);
if (beforeChanged || afterChanged || sepChanged)
{
textFieldChanged = true;
}
if (textFieldChanged)
{
if (currentIsInFullname)
{
changeAllDescendants = true;
}
changeThisLevel = true;
}
if (changeThisLevel && !changeAllDescendants)
{
Set<T> levelNodes = defItem.getTreeEntries();
for (T node: levelNodes)
{
String generated = TreeHelper.generateFullname(node);
node.setFullName(generated);
}
}
else if (changeThisLevel && changeAllDescendants)
{
Set<T> levelNodes = defItem.getTreeEntries();
for (T node: levelNodes)
{
TreeHelper.fixFullnameForNodeAndDescendants(node);
}
}
else if (!changeThisLevel && changeAllDescendants)
{
Set<T> levelNodes = defItem.getTreeEntries();
for (T node: levelNodes)
{
// grab all child nodes and go from there
for (T child: node.getChildren())
{
TreeHelper.fixFullnameForNodeAndDescendants(child);
}
}
}
// else don't change anything
session.close();
}
protected boolean booleanValue(Boolean bool, boolean defaultIfNull)
{
if (bool != null)
{
return bool.booleanValue();
}
return defaultIfNull;
}
/**
* Converts a null string into an empty string. If the provided String is not
* null, it is returned unchanged.
*
* @param s a string
* @return the string or " ", if null
*/
private String makeNotNull(String s)
{
return (s == null) ? "" : s;
}
/**
* Returns the provided {@link Boolean}, or <code>false</code> if null
*
* @param b the {@link Boolean} to convert to non-null
* @return the provided {@link Boolean}, or <code>false</code> if null
*/
private boolean makeNotNull(Boolean b)
{
return (b == null) ? false : b.booleanValue();
}
/* (non-Javadoc)
* @see edu.ku.brc.ui.forms.BaseBusRules#beforeDelete(java.lang.Object, edu.ku.brc.dbsupport.DataProviderSessionIFace)
*/
@Override
public Object beforeDelete(Object dataObj, DataProviderSessionIFace session)
{
super.beforeDelete(dataObj, session);
if (dataObj instanceof Treeable<?,?,?>)
{
Treeable<?, ?, ?> node = (Treeable<?,?,?> )dataObj;
if (node.getAcceptedParent() != null)
{
node.getAcceptedParent().getAcceptedChildren().remove(node);
node.setAcceptedParent(null);
}
}
return dataObj;
}
/**
* @param parentDataObj
* @param dataObj
* @return
*/
@SuppressWarnings("unchecked")
protected boolean parentHasChildWithSameName(final Object parentDataObj, final Object dataObj)
{
if (dataObj instanceof Treeable<?,?,?>)
{
Treeable<T, D, I> node = (Treeable<T,D,I> )dataObj;
Treeable<T, D, I> parent = parentDataObj == null ? node.getParent() : (Treeable<T, D, I> )parentDataObj;
if (parent != null)
{
//XXX the sql below will only work if all Treeable tables use fields named 'isAccepted' and 'name' to store
//the name and isAccepted properties.
String tblName = DBTableIdMgr.getInstance().getInfoById(node.getTableId()).getName();
String sql = "SELECT count(*) FROM " + tblName + " where isAccepted "
+ "and name = " + BasicSQLUtils.getEscapedSQLStrExpr(node.getName());
if (parent.getTreeId() != null)
{
sql += " and parentid = " + parent.getTreeId();
}
if (node.getTreeId() != null)
{
sql += " and " + tblName + "id != " + node.getTreeId();
}
return BasicSQLUtils.getNumRecords(sql) > 0;
}
}
return false;
}
/**
* @param parentDataObj
* @param dataObj
* @param isExistingObject
* @return
*/
@SuppressWarnings("unchecked")
public STATUS checkForSiblingWithSameName(final Object parentDataObj, final Object dataObj,
final boolean isExistingObject)
{
STATUS result = STATUS.OK;
if (parentHasChildWithSameName(parentDataObj, dataObj))
{
String parentName;
if (parentDataObj == null)
{
parentName = ((Treeable<T,D,I> )dataObj).getParent().getFullName();
}
else
{
parentName = ((Treeable<T,D,I> )parentDataObj).getFullName();
}
boolean saveIt = UIRegistry.displayConfirm(
UIRegistry.getResourceString("BaseTreeBusRules.IDENTICALLY_NAMED_SIBLING_TITLE"),
String.format(UIRegistry.getResourceString("BaseTreeBusRules.IDENTICALLY_NAMED_SIBLING_MSG"),
parentName, ((Treeable<T,D,I> )dataObj).getName()),
UIRegistry.getResourceString("SAVE"),
UIRegistry.getResourceString("CANCEL"),
JOptionPane.QUESTION_MESSAGE);
if (!saveIt)
{
//Adding to reasonList prevents blank "Issue of Concern" popup -
//but causes annoying second "duplicate child" nag.
reasonList
.add(UIRegistry
.getResourceString("BaseTreeBusRules.IDENTICALLY_NAMED_SIBLING")); // XXX
// i18n
result = STATUS.Error;
}
}
return result;
}
/**
* @param dataObj
* @return OK if required data is present.
*
* Checks for requirements that can't be defined in the database schema.
*/
protected STATUS checkForRequiredFields(Object dataObj)
{
if (dataObj instanceof Treeable<?,?,?>)
{
STATUS result = STATUS.OK;
Treeable<?,?,?> obj = (Treeable<?,?,?> )dataObj;
if (obj.getParent() == null )
{
if (obj.getDefinitionItem() != null && obj.getDefinitionItem().getParent() == null)
{
//it's the root, null parent is OK.
return result;
}
result = STATUS.Error;
DBTableInfo info = DBTableIdMgr.getInstance().getInfoById(obj.getTableId());
DBFieldInfo fld = info.getFieldByColumnName("Parent");
String fldTitle = fld != null ? fld.getTitle() : UIRegistry.getResourceString("PARENT");
reasonList.add(String.format(UIRegistry.getResourceString("GENERIC_FIELD_MISSING"), fldTitle));
}
//check that non-accepted node has an 'AcceptedParent'
if (obj.getIsAccepted() == null || !obj.getIsAccepted() && obj.getAcceptedParent() == null)
{
result = STATUS.Error;
DBTableInfo info = DBTableIdMgr.getInstance().getInfoById(obj.getTableId());
DBFieldInfo fld = info.getFieldByColumnName("AcceptedParent");
String fldTitle = fld != null ? fld.getTitle() : UIRegistry.getResourceString("ACCEPTED");
reasonList.add(String.format(UIRegistry.getResourceString("GENERIC_FIELD_MISSING"), fldTitle));
}
return result;
}
return STATUS.None; //???
}
/* (non-Javadoc)
* @see edu.ku.brc.af.ui.forms.BaseBusRules#processBusinessRules(java.lang.Object, java.lang.Object, boolean)
*/
@Override
public STATUS processBusinessRules(Object parentDataObj, Object dataObj,
boolean isExistingObject)
{
reasonList.clear();
STATUS result = STATUS.OK;
if (!processedRules && dataObj instanceof Treeable<?, ?, ?>)
{
result = checkForSiblingWithSameName(parentDataObj, dataObj, isExistingObject);
if (result == STATUS.OK)
{
result = checkForRequiredFields(dataObj);
}
if (result == STATUS.OK)
{
processedRules = true;
}
}
return result;
}
/* (non-Javadoc)
* @see edu.ku.brc.af.ui.forms.BaseBusRules#isOkToSave(java.lang.Object, edu.ku.brc.dbsupport.DataProviderSessionIFace)
*/
/*
* NOTE: If this method is overridden, freeLocks() MUST be called when result is false
* !!
*
*/
@Override
public boolean isOkToSave(Object dataObj, DataProviderSessionIFace session)
{
boolean result = super.isOkToSave(dataObj, session);
if (result && dataObj != null && !StringUtils.contains(formViewObj.getView().getName(), "TreeDef")
&& BaseTreeBusRules.ALLOW_CONCURRENT_FORM_ACCESS)
{
if (!getRequiredLocks(dataObj))
{
result = false;
reasonList.add(getUnableToLockMsg());
}
}
return result;
}
/**
* @return true if locks were aquired.
*
* Locks necessary tables prior to a save.
* Only used when ALLOW_CONCURRENT_FORM_ACCESS is true.
*/
protected boolean getRequiredLocks(Object dataObj)
{
TreeDefIface<?,?,?> treeDef = ((Treeable<?,?,?>)dataObj).getDefinition();
boolean result = !TreeDefStatusMgr.isRenumberingNodes(treeDef) && TreeDefStatusMgr.isNodeNumbersAreUpToDate(treeDef);
if (!result) {
try {
Thread.sleep(1500);
result = !TreeDefStatusMgr.isRenumberingNodes(treeDef) && TreeDefStatusMgr.isNodeNumbersAreUpToDate(treeDef);
} catch (Exception e) {
result = false;
}
}
if (result) {
TaskSemaphoreMgr.USER_ACTION r = TaskSemaphoreMgr.lock(getFormSaveLockTitle(), getFormSaveLockName(), "save",
TaskSemaphoreMgr.SCOPE.Discipline, false, new TaskSemaphoreMgrCallerIFace(){
/* (non-Javadoc)
* @see edu.ku.brc.specify.dbsupport.TaskSemaphoreMgrCallerIFace#resolveConflict(edu.ku.brc.specify.datamodel.SpTaskSemaphore, boolean, java.lang.String)
*/
@Override
public USER_ACTION resolveConflict(
SpTaskSemaphore semaphore,
boolean previouslyLocked, String prevLockBy)
{
if (System.currentTimeMillis() - semaphore.getLockedTime().getTime() > FORM_SAVE_LOCK_MAX_DURATION_IN_MILLIS) {
//something is clearly wrong with the lock. Ignore it and re-use it. It will be cleared when save succeeds.
log.warn("automatically overriding expired " + getFormSaveLockTitle() + " lock set by " +
prevLockBy + " at " + DateFormat.getDateTimeInstance().format(semaphore.getLockedTime()));
return USER_ACTION.OK;
} else {
return USER_ACTION.Error;
}
}
}, false);
result = r == TaskSemaphoreMgr.USER_ACTION.OK;
}
return result;
}
/**
* @return the class for the generic parameter <T>
*/
protected abstract Class<?> getNodeClass();
/**
* @return the title for the form save lock.
*/
protected String getFormSaveLockTitle()
{
return String.format(UIRegistry.getResourceString("BaseTreeBusRules.SaveLockTitle"), getNodeClass().getSimpleName());
}
/**
* @return the name for the form save lock.
*/
protected String getFormSaveLockName()
{
return getNodeClass().getSimpleName() + "Save";
}
/**
* @return localized message to display in case of failure to lock for saving.
*/
protected String getUnableToLockMsg()
{
return UIRegistry.getResourceString("BaseTreeBusRules.UnableToLockForSave");
}
/**
* Free locks acquired for saving.
*/
protected void freeLocks()
{
TaskSemaphoreMgr.unlock(getFormSaveLockTitle(), getFormSaveLockName(), TaskSemaphoreMgr.SCOPE.Discipline);
}
/* (non-Javadoc)
* @see edu.ku.brc.af.ui.forms.BaseBusRules#afterSaveCommit(java.lang.Object, edu.ku.brc.dbsupport.DataProviderSessionIFace)
*/
@Override
public boolean afterSaveCommit(Object dataObj,
DataProviderSessionIFace session)
{
boolean result = false;
if (!super.afterSaveCommit(dataObj, session))
{
result = false;
}
if (BaseTreeBusRules.ALLOW_CONCURRENT_FORM_ACCESS && viewable != null)
{
freeLocks();
}
return result;
}
/* (non-Javadoc)
* @see edu.ku.brc.af.ui.forms.BaseBusRules#afterSaveFailure(java.lang.Object, edu.ku.brc.dbsupport.DataProviderSessionIFace)
*/
@Override
public void afterSaveFailure(Object dataObj,
DataProviderSessionIFace session)
{
super.afterSaveFailure(dataObj, session);
if (BaseTreeBusRules.ALLOW_CONCURRENT_FORM_ACCESS && viewable != null)
{
freeLocks();
}
}
/* (non-Javadoc)
* @see edu.ku.brc.af.ui.forms.BaseBusRules#processBusinessRules(java.lang.Object)
*/
@Override
public STATUS processBusinessRules(Object dataObj) {
STATUS result = STATUS.OK;
if (!processedRules)
{
result = super.processBusinessRules(dataObj);
if (result == STATUS.OK)
{
result = checkForSiblingWithSameName(null, dataObj, false);
}
if (result == STATUS.OK)
{
result = checkForRequiredFields(dataObj);
}
}
else
{
processedRules = false;
}
return result;
}
}<|fim▁end|> | |
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|>from __future__ import print_function<|fim▁hole|>from .patchpipette import PatchPipette<|fim▁end|> | |
<|file_name|>cs_CZ.js<|end_file_name|><|fim▁begin|>{
"": {
"domain": "ckan",
"lang": "cs_CZ",
"plural-forms": "nplurals=3; plural=(n==1) ? 0 : (n>=2 && n<=4) ? 1 : 2;"
},
"An Error Occurred": [
null,
"Nastala chyba"
],
"Are you sure you want to perform this action?": [
null,
"Jste si jistí, že chcete provést tuto akci?"
],
"Cancel": [
null,
"Zrušit"
],
"Confirm": [
null,
"Potvrdit"
],
"Edit": [
null,
"Upravit"
],
"Failed to load data API information": [
null,
"Pokus o získání informací pomocí API selhal"
],
"Follow": [
null,
"Sledovat"
],
"Hide": [
null,
"Skrýt"
],
"Image": [
null,
"Obrázek"
],
"Input is too short, must be at least one character": [
null,
"Zadaný vstup je příliš krátký, musíte zadat alespoň jeden znak"
],
"Link": [
null,
"Odkaz"
],
"Link to a URL on the internet (you can also link to an API)": [
null,
"Odkaz na internetovou URL adresu (můžete také zadat odkaz na API)"
],
"Loading...": [
null,
"Nahrávám ..."
],
"No matches found": [
null,
"Nenalezena žádná shoda"
],
"Please Confirm Action": [
null,
"Prosím potvrďte akci"
],
"Remove": [
null,
"Odstranit"
],
"Reorder resources": [
null,
"Změnit pořadí zdrojů"
],
"Reset this": [
null,
"Resetovat"
],
"Resource uploaded": [
null,
"Zdroj nahrán"
],
"Save order": [
null,
"Uložit pořadí"
],
"Saving...": [
null,
"Ukládám..."
],
"Show more": [
null,
"Ukázat více"
],
"Start typing…": [
null,
"Začněte psát..."
],
"There are unsaved modifications to this form": [
null,
"Tento formulář obsahuje neuložené změny"
],
"There is no API data to load for this resource": [
null,
"Tento zdroj neobsahuje žádná data, která lze poskytnou přes API"
],
"URL": [
null,
"URL"
],
"Unable to authenticate upload": [
null,
"Nastala chyba autentizace při nahrávání dat"
],
"Unable to get data for uploaded file": [
null,
"Nelze získat data z nahraného souboru"<|fim▁hole|> "Nelze nahrát soubor"
],
"Unfollow": [
null,
"Přestat sledovat"
],
"Upload": [
null,
"Nahrát"
],
"Upload a file": [
null,
"Nahrát soubor"
],
"Upload a file on your computer": [
null,
"Nahrát soubor na Váš počítač"
],
"You are uploading a file. Are you sure you want to navigate away and stop this upload?": [
null,
"Právě nahráváte soubor. Jste si opravdu jistí, že chcete tuto stránku opustit a ukončit tak nahrávání?"
],
"show less": [
null,
"ukázat méně"
],
"show more": [
null,
"ukázat více"
]
}<|fim▁end|> | ],
"Unable to upload file": [
null, |
<|file_name|>multicol.rs<|end_file_name|><|fim▁begin|>/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! CSS Multi-column layout http://dev.w3.org/csswg/css-multicol/
#![deny(unsafe_code)]
use ServoArc;
use app_units::Au;
use block::BlockFlow;
use context::LayoutContext;
use display_list_builder::{DisplayListBuildState, StackingContextCollectionState};
use euclid::{Point2D, Vector2D};
use floats::FloatKind;
use flow::{Flow, FlowClass, OpaqueFlow, FragmentationContext, GetBaseFlow};
use fragment::{Fragment, FragmentBorderBoxIterator, Overflow};
use gfx_traits::print_tree::PrintTree;
use std::cmp::{min, max};
use std::fmt;
use std::sync::Arc;
use style::logical_geometry::LogicalSize;
use style::properties::ComputedValues;
use style::values::Either;
use style::values::computed::{LengthOrPercentageOrAuto, LengthOrPercentageOrNone};
#[allow(unsafe_code)]
unsafe impl ::flow::HasBaseFlow for MulticolFlow {}
#[repr(C)]
pub struct MulticolFlow {
pub block_flow: BlockFlow,
/// Length between the inline-start edge of a column and that of the next.
/// That is, the used column-width + used column-gap.
pub column_pitch: Au,
}
#[allow(unsafe_code)]
unsafe impl ::flow::HasBaseFlow for MulticolColumnFlow {}
#[repr(C)]
pub struct MulticolColumnFlow {
pub block_flow: BlockFlow,
}
impl MulticolFlow {
pub fn from_fragment(fragment: Fragment, float_kind: Option<FloatKind>) -> MulticolFlow {
MulticolFlow {
block_flow: BlockFlow::from_fragment_and_float_kind(fragment, float_kind),
column_pitch: Au(0),
}
}
}
impl MulticolColumnFlow {
pub fn from_fragment(fragment: Fragment) -> MulticolColumnFlow {
MulticolColumnFlow {
block_flow: BlockFlow::from_fragment(fragment),
}
}
}
impl Flow for MulticolFlow {
fn class(&self) -> FlowClass {
FlowClass::Multicol
}
fn as_mut_block(&mut self) -> &mut BlockFlow {
&mut self.block_flow
}
fn as_block(&self) -> &BlockFlow {
&self.block_flow
}
fn as_mut_multicol(&mut self) -> &mut MulticolFlow {
self
}
fn bubble_inline_sizes(&mut self) {
// FIXME(SimonSapin) http://dev.w3.org/csswg/css-sizing/#multicol-intrinsic
self.block_flow.bubble_inline_sizes();
}
fn assign_inline_sizes(&mut self, layout_context: &LayoutContext) {
debug!("assign_inline_sizes({}): assigning inline_size for flow", "multicol");
let shared_context = layout_context.shared_context();
self.block_flow.compute_inline_sizes(shared_context);
// Move in from the inline-start border edge.
let inline_start_content_edge = self.block_flow.fragment.border_box.start.i +
self.block_flow.fragment.border_padding.inline_start;
// Distance from the inline-end margin edge to the inline-end content edge.
let inline_end_content_edge =
self.block_flow.fragment.margin.inline_end +
self.block_flow.fragment.border_padding.inline_end;
self.block_flow.assign_inline_sizes(layout_context);
let padding_and_borders = self.block_flow.fragment.border_padding.inline_start_end();
let content_inline_size =
self.block_flow.fragment.border_box.size.inline - padding_and_borders;
let column_width;
{
let column_style = self.block_flow.fragment.style.get_column();
let column_gap = match column_style.column_gap {
Either::First(len) => len.into(),
Either::Second(_normal) => self.block_flow.fragment.style.get_font().font_size.size(),
};
let mut column_count;
if let Either::First(column_width) = column_style.column_width {
let column_width = Au::from(column_width);
column_count =
max(1, (content_inline_size + column_gap).0 / (column_width + column_gap).0);
if let Either::First(specified_column_count) = column_style.column_count {
column_count = min(column_count, specified_column_count.0 as i32);
}
} else {
column_count = match column_style.column_count {
Either::First(n) => n.0,
_ => unreachable!(),
}
}
column_width =
max(Au(0), (content_inline_size + column_gap) / column_count - column_gap);
self.column_pitch = column_width + column_gap;
}
self.block_flow.fragment.border_box.size.inline = content_inline_size + padding_and_borders;
self.block_flow.propagate_assigned_inline_size_to_children(
shared_context, inline_start_content_edge, inline_end_content_edge, column_width,
|_, _, _, _, _, _| {});
}
fn assign_block_size(&mut self, ctx: &LayoutContext) {
debug!("assign_block_size: assigning block_size for multicol");
let fragmentation_context = Some(FragmentationContext {
this_fragment_is_empty: true,
available_block_size: {
let style = &self.block_flow.fragment.style;
if let LengthOrPercentageOrAuto::Length(length) = style.content_block_size() {
Au::from(length)
} else if let LengthOrPercentageOrNone::Length(length) = style.max_block_size() {
Au::from(length)
} else {
// FIXME: do column balancing instead
// FIXME: (until column balancing) substract margins/borders/padding
LogicalSize::from_physical(
self.block_flow.base.writing_mode,
ctx.shared_context().viewport_size(),
).block
}<|fim▁hole|>
// Before layout, everything is in a single "column"
assert!(self.block_flow.base.children.len() == 1);
let mut column = self.block_flow.base.children.pop_front_arc().unwrap();
// Pretend there is no children for this:
self.block_flow.assign_block_size(ctx);
loop {
let remaining = Arc::get_mut(&mut column).unwrap().fragment(ctx, fragmentation_context);
self.block_flow.base.children.push_back_arc(column);
column = match remaining {
Some(remaining) => remaining,
None => break
};
}
}
fn compute_stacking_relative_position(&mut self, layout_context: &LayoutContext) {
self.block_flow.compute_stacking_relative_position(layout_context);
let pitch = LogicalSize::new(self.block_flow.base.writing_mode, self.column_pitch, Au(0));
let pitch = pitch.to_physical(self.block_flow.base.writing_mode);
for (i, child) in self.block_flow.base.children.iter_mut().enumerate() {
let point = &mut child.mut_base().stacking_relative_position;
*point = *point + Vector2D::new(pitch.width * i as i32, pitch.height * i as i32);
}
}
fn update_late_computed_inline_position_if_necessary(&mut self, inline_position: Au) {
self.block_flow.update_late_computed_inline_position_if_necessary(inline_position)
}
fn update_late_computed_block_position_if_necessary(&mut self, block_position: Au) {
self.block_flow.update_late_computed_block_position_if_necessary(block_position)
}
fn build_display_list(&mut self, state: &mut DisplayListBuildState) {
debug!("build_display_list_multicol");
self.block_flow.build_display_list(state);
}
fn collect_stacking_contexts(&mut self, state: &mut StackingContextCollectionState) {
self.block_flow.collect_stacking_contexts(state);
}
fn repair_style(&mut self, new_style: &ServoArc<ComputedValues>) {
self.block_flow.repair_style(new_style)
}
fn compute_overflow(&self) -> Overflow {
self.block_flow.compute_overflow()
}
fn contains_roots_of_absolute_flow_tree(&self) -> bool {
self.block_flow.contains_roots_of_absolute_flow_tree()
}
fn is_absolute_containing_block(&self) -> bool {
self.block_flow.is_absolute_containing_block()
}
fn generated_containing_block_size(&self, flow: OpaqueFlow) -> LogicalSize<Au> {
self.block_flow.generated_containing_block_size(flow)
}
fn iterate_through_fragment_border_boxes(&self,
iterator: &mut FragmentBorderBoxIterator,
level: i32,
stacking_context_position: &Point2D<Au>) {
self.block_flow.iterate_through_fragment_border_boxes(iterator, level, stacking_context_position);
}
fn mutate_fragments(&mut self, mutator: &mut FnMut(&mut Fragment)) {
self.block_flow.mutate_fragments(mutator);
}
fn print_extra_flow_children(&self, print_tree: &mut PrintTree) {
self.block_flow.print_extra_flow_children(print_tree);
}
}
impl Flow for MulticolColumnFlow {
fn class(&self) -> FlowClass {
FlowClass::MulticolColumn
}
fn as_mut_block(&mut self) -> &mut BlockFlow {
&mut self.block_flow
}
fn as_block(&self) -> &BlockFlow {
&self.block_flow
}
fn bubble_inline_sizes(&mut self) {
self.block_flow.bubble_inline_sizes();
}
fn assign_inline_sizes(&mut self, layout_context: &LayoutContext) {
debug!("assign_inline_sizes({}): assigning inline_size for flow", "multicol column");
self.block_flow.assign_inline_sizes(layout_context);
}
fn assign_block_size(&mut self, ctx: &LayoutContext) {
debug!("assign_block_size: assigning block_size for multicol column");
self.block_flow.assign_block_size(ctx);
}
fn fragment(&mut self, layout_context: &LayoutContext,
fragmentation_context: Option<FragmentationContext>)
-> Option<Arc<Flow>> {
Flow::fragment(&mut self.block_flow, layout_context, fragmentation_context)
}
fn compute_stacking_relative_position(&mut self, layout_context: &LayoutContext) {
self.block_flow.compute_stacking_relative_position(layout_context)
}
fn update_late_computed_inline_position_if_necessary(&mut self, inline_position: Au) {
self.block_flow.update_late_computed_inline_position_if_necessary(inline_position)
}
fn update_late_computed_block_position_if_necessary(&mut self, block_position: Au) {
self.block_flow.update_late_computed_block_position_if_necessary(block_position)
}
fn build_display_list(&mut self, state: &mut DisplayListBuildState) {
debug!("build_display_list_multicol column");
self.block_flow.build_display_list(state);
}
fn collect_stacking_contexts(&mut self, state: &mut StackingContextCollectionState) {
self.block_flow.collect_stacking_contexts(state);
}
fn repair_style(&mut self, new_style: &ServoArc<ComputedValues>) {
self.block_flow.repair_style(new_style)
}
fn compute_overflow(&self) -> Overflow {
self.block_flow.compute_overflow()
}
fn contains_roots_of_absolute_flow_tree(&self) -> bool {
self.block_flow.contains_roots_of_absolute_flow_tree()
}
fn is_absolute_containing_block(&self) -> bool {
self.block_flow.is_absolute_containing_block()
}
fn generated_containing_block_size(&self, flow: OpaqueFlow) -> LogicalSize<Au> {
self.block_flow.generated_containing_block_size(flow)
}
fn iterate_through_fragment_border_boxes(&self,
iterator: &mut FragmentBorderBoxIterator,
level: i32,
stacking_context_position: &Point2D<Au>) {
self.block_flow.iterate_through_fragment_border_boxes(iterator, level, stacking_context_position);
}
fn mutate_fragments(&mut self, mutator: &mut FnMut(&mut Fragment)) {
self.block_flow.mutate_fragments(mutator);
}
fn print_extra_flow_children(&self, print_tree: &mut PrintTree) {
self.block_flow.print_extra_flow_children(print_tree);
}
}
impl fmt::Debug for MulticolFlow {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "MulticolFlow: {:?}", self.block_flow)
}
}
impl fmt::Debug for MulticolColumnFlow {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "MulticolColumnFlow: {:?}", self.block_flow)
}
}<|fim▁end|> | }
}); |
<|file_name|>Seat.js<|end_file_name|><|fim▁begin|>import {Entity} from 'aframe-react';
import React from 'react';
export default class Seat extends React.Component {
constructor(props) {
super(props);
this.state = {
opacity: 1,
};
}
render() {
return (
<Entity position={this.props.seatPos} >
<Entity visible={ true }
material={{ color: this.props.buttonColor, transparent: true, shader: 'flat', opacity: this.props.Opacity }}
geometry={{ primitive: "plane", width: 0.2, height: 0.1 }}
id={this.props.componentId}
opacity={ this.props.Opacity }
onClick={ this.props.seatAnimation }
/>
</Entity><|fim▁hole|><|fim▁end|> | );
}} |
<|file_name|>task-comm-16.rs<|end_file_name|><|fim▁begin|>// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// run-pass
#![allow(unused_mut)]
#![allow(unused_parens)]
#![allow(non_camel_case_types)]
use std::sync::mpsc::channel;
use std::cmp;
// Tests of ports and channels on various types
fn test_rec() {
struct R {val0: isize, val1: u8, val2: char}
let (tx, rx) = channel();
let r0: R = R {val0: 0, val1: 1, val2: '2'};
tx.send(r0).unwrap();
let mut r1: R;
r1 = rx.recv().unwrap();
assert_eq!(r1.val0, 0);
assert_eq!(r1.val1, 1);
assert_eq!(r1.val2, '2');
}
fn test_vec() {
let (tx, rx) = channel();
let v0: Vec<isize> = vec![0, 1, 2];
tx.send(v0).unwrap();
let v1 = rx.recv().unwrap();
assert_eq!(v1[0], 0);
assert_eq!(v1[1], 1);
assert_eq!(v1[2], 2);
}
fn test_str() {
let (tx, rx) = channel();
let s0 = "test".to_string();
tx.send(s0).unwrap();
let s1 = rx.recv().unwrap();
assert_eq!(s1.as_bytes()[0], 't' as u8);
assert_eq!(s1.as_bytes()[1], 'e' as u8);
assert_eq!(s1.as_bytes()[2], 's' as u8);
assert_eq!(s1.as_bytes()[3], 't' as u8);
}
#[derive(Debug)]
enum t {
tag1,
tag2(isize),
tag3(isize, u8, char)
}
impl cmp::PartialEq for t {
fn eq(&self, other: &t) -> bool {
match *self {
t::tag1 => {
match (*other) {
t::tag1 => true,
_ => false
}
}
t::tag2(e0a) => {
match (*other) {
t::tag2(e0b) => e0a == e0b,
_ => false
}
}
t::tag3(e0a, e1a, e2a) => {
match (*other) {
t::tag3(e0b, e1b, e2b) =>
e0a == e0b && e1a == e1b && e2a == e2b,
_ => false
}
}
}
}
fn ne(&self, other: &t) -> bool { !(*self).eq(other) }
}
fn test_tag() {
let (tx, rx) = channel();
tx.send(t::tag1).unwrap();
tx.send(t::tag2(10)).unwrap();
tx.send(t::tag3(10, 11, 'A')).unwrap();
let mut t1: t;
t1 = rx.recv().unwrap();
assert_eq!(t1, t::tag1);
t1 = rx.recv().unwrap();
assert_eq!(t1, t::tag2(10));
t1 = rx.recv().unwrap();
assert_eq!(t1, t::tag3(10, 11, 'A'));
}
fn test_chan() {
let (tx1, rx1) = channel();
let (tx2, rx2) = channel();
tx1.send(tx2).unwrap();
let tx2 = rx1.recv().unwrap();
// Does the transmitted channel still work?
<|fim▁hole|>}
pub fn main() {
test_rec();
test_vec();
test_str();
test_tag();
test_chan();
}<|fim▁end|> | tx2.send(10).unwrap();
let mut i: isize;
i = rx2.recv().unwrap();
assert_eq!(i, 10); |
<|file_name|>iter-map-fn-return.js<|end_file_name|><|fim▁begin|>// Copyright (C) 2015 the V8 project authors. All rights reserved.
// This code is governed by the BSD license found in the LICENSE file.
/*---
esid: sec-array.from
es6id: 22.1.2.1
description: Value returned by mapping function (traversed via iterator)
info: |
[...]
2. If mapfn is undefined, let mapping be false.
3. else
a. If IsCallable(mapfn) is false, throw a TypeError exception.
b. If thisArg was supplied, let T be thisArg; else let T be undefined.
c. Let mapping be true
[...]
6. If usingIterator is not undefined, then
[...]
g. Repeat
[...]
vii. If mapping is true, then
1. Let mappedValue be Call(mapfn, T, «nextValue, k»).
2. If mappedValue is an abrupt completion, return
IteratorClose(iterator, mappedValue).
3. Let mappedValue be mappedValue.[[value]].
features: [Symbol.iterator]
---*/
var thisVals = [];
var nextResult = {
done: false,
value: {}
};
var nextNextResult = {
done: false,
value: {}
};
var firstReturnVal = {};
var secondReturnVal = {};
var mapFn = function(value, idx) {
var returnVal = nextReturnVal;
nextReturnVal = nextNextReturnVal;
nextNextReturnVal = null;
return returnVal;
};
var nextReturnVal = firstReturnVal;
var nextNextReturnVal = secondReturnVal;
var items = {};
var result;
items[Symbol.iterator] = function() {
return {
next: function() {
var result = nextResult;
nextResult = nextNextResult;
nextNextResult = {
done: true
};
return result;
}
};
};
<|fim▁hole|>assert.sameValue(result[0], firstReturnVal);
assert.sameValue(result[1], secondReturnVal);<|fim▁end|> | result = Array.from(items, mapFn);
assert.sameValue(result.length, 2); |
<|file_name|>dropbox.py<|end_file_name|><|fim▁begin|>###############################################################################
#
# Copyright (C) 2001-2014 Micronaet SRL (<http://www.micronaet.it>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.<|fim▁hole|># This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
import os
import sys
import erppeek
import shutil
import parameters # Micronaet: configuration file
from datetime import datetime, timedelta
from dateutil.relativedelta import relativedelta
# -----------------------------------------------------------------------------
# Parameters:
# -----------------------------------------------------------------------------
# ODOO connection:
odoo_server = parameters.odoo_server
odoo_port = parameters.odoo_port
odoo_user = parameters.odoo_user
odoo_password = parameters.odoo_password
odoo_database = parameters.odoo_database
# Dropbox:
demo = parameters.demo
samba_path = parameters.samba_path
dropbox_path = parameters.dropbox_path
print '''
Setup parameters:
ODOO: Connection: %s:%s DB %s utente: %s
Demo: %s
Samba folders: %s
Dropbox path: %s
''' % (
odoo_server,
odoo_port,
odoo_database,
odoo_user,
demo,
samba_path,
dropbox_path,
)
# -----------------------------------------------------------------------------
# UTILITY:
# -----------------------------------------------------------------------------
def get_modify_date(fullname):
''' Return modify date for file
'''
modify_date = datetime.fromtimestamp(
os.stat(fullname).st_mtime).strftime('%Y-%m-%d')
return modify_date
# -----------------------------------------------------------------------------
# ODOO operation:
# -----------------------------------------------------------------------------
odoo = erppeek.Client(
'http://%s:%s' % (
odoo_server, odoo_port),
db=odoo_database,
user=odoo_user,
password=odoo_password,
)
# Pool used:
product_pool = odoo.model('product.product.web.server')
product_ids = product_pool.search([
('connector_id.wordpress', '=', True),
])
# Check elements:
#error = [] # Error database
#warning = [] # Warning database
#info = [] # Info database
#log = [] # Log database
#log_sym = [] # Log database for symlinks
#product_odoo = {}
# Only if new file (check how):
dropbox_root_path = os.path.expanduser(dropbox_path)
samba_root_path = os.path.expanduser(samba_path)
# -----------------------------------------------------------------------------
# Save current files (Dropbox folder):
# -----------------------------------------------------------------------------
current_files = []
for root, folders, files in os.walk(dropbox_root_path):
for f in files:
current_files.append(
os.path.join(root, f)
break # only first folder!
# -----------------------------------------------------------------------------
# Logg on all product image selected:
# -----------------------------------------------------------------------------
for product in product_pool.browse(product_ids):
for image in product.image_ids:
image_id = image.id
code = image.album_id.code
samba_relative_path = image.album_id.path # TODO dropbox_path
filename = product.filename
origin = os.path.(samba_relative_path, filename)
destination = os.path.(dropbox_root_path, '%s.%s' % (code, filename))
if destination in current_files:
current_files.remove(destination)
# Create symlink:
try:
os.symlink(origin, destination)
log_sym.append('CREATO: origin: %s destination: %s' % (
origin, destination))
except:
log_sym.append('ERRORE: origin: %s destination: %s' % (
origin, destination))
# Find dropbox link:
# Save dropbox link:
os.system('chmod 777 "%s" -R' % dropbox_path)
for filename in current_files:
os.rm(filename)
# file_modify = get_modify_date(fullname)
# os.system('mkdir -p "%s"' % product_folder)
print 'End operation'
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:<|fim▁end|> | # |
<|file_name|>nestingLimit.cpp<|end_file_name|><|fim▁begin|>// ArduinoJson - https://arduinojson.org
// Copyright © 2014-2022, Benoit BLANCHON
// MIT License
#include <ArduinoJson.h>
#include <catch.hpp>
#define SHOULD_WORK(expression) REQUIRE(DeserializationError::Ok == expression);
#define SHOULD_FAIL(expression) \
REQUIRE(DeserializationError::TooDeep == expression);
TEST_CASE("JsonDeserializer nesting") {
DynamicJsonDocument doc(4096);
SECTION("Input = const char*") {
SECTION("limit = 0") {
DeserializationOption::NestingLimit nesting(0);
SHOULD_WORK(deserializeJson(doc, "\"toto\"", nesting));
SHOULD_WORK(deserializeJson(doc, "123", nesting));
SHOULD_WORK(deserializeJson(doc, "true", nesting));
SHOULD_FAIL(deserializeJson(doc, "[]", nesting));
SHOULD_FAIL(deserializeJson(doc, "{}", nesting));
SHOULD_FAIL(deserializeJson(doc, "[\"toto\"]", nesting));
SHOULD_FAIL(deserializeJson(doc, "{\"toto\":1}", nesting));
}
SECTION("limit = 1") {
DeserializationOption::NestingLimit nesting(1);
SHOULD_WORK(deserializeJson(doc, "[\"toto\"]", nesting));
SHOULD_WORK(deserializeJson(doc, "{\"toto\":1}", nesting));
SHOULD_FAIL(deserializeJson(doc, "{\"toto\":{}}", nesting));
SHOULD_FAIL(deserializeJson(doc, "{\"toto\":[]}", nesting));
SHOULD_FAIL(deserializeJson(doc, "[[\"toto\"]]", nesting));
SHOULD_FAIL(deserializeJson(doc, "[{\"toto\":1}]", nesting));
}
}
SECTION("char* and size_t") {<|fim▁hole|> SECTION("limit = 0") {
DeserializationOption::NestingLimit nesting(0);
SHOULD_WORK(deserializeJson(doc, "\"toto\"", 6, nesting));
SHOULD_WORK(deserializeJson(doc, "123", 3, nesting));
SHOULD_WORK(deserializeJson(doc, "true", 4, nesting));
SHOULD_FAIL(deserializeJson(doc, "[]", 2, nesting));
SHOULD_FAIL(deserializeJson(doc, "{}", 2, nesting));
SHOULD_FAIL(deserializeJson(doc, "[\"toto\"]", 8, nesting));
SHOULD_FAIL(deserializeJson(doc, "{\"toto\":1}", 10, nesting));
}
SECTION("limit = 1") {
DeserializationOption::NestingLimit nesting(1);
SHOULD_WORK(deserializeJson(doc, "[\"toto\"]", 8, nesting));
SHOULD_WORK(deserializeJson(doc, "{\"toto\":1}", 10, nesting));
SHOULD_FAIL(deserializeJson(doc, "{\"toto\":{}}", 11, nesting));
SHOULD_FAIL(deserializeJson(doc, "{\"toto\":[]}", 11, nesting));
SHOULD_FAIL(deserializeJson(doc, "[[\"toto\"]]", 10, nesting));
SHOULD_FAIL(deserializeJson(doc, "[{\"toto\":1}]", 12, nesting));
}
}
SECTION("Input = std::string") {
SECTION("limit = 0") {
DeserializationOption::NestingLimit nesting(0);
SHOULD_WORK(deserializeJson(doc, std::string("\"toto\""), nesting));
SHOULD_WORK(deserializeJson(doc, std::string("123"), nesting));
SHOULD_WORK(deserializeJson(doc, std::string("true"), nesting));
SHOULD_FAIL(deserializeJson(doc, std::string("[]"), nesting));
SHOULD_FAIL(deserializeJson(doc, std::string("{}"), nesting));
SHOULD_FAIL(deserializeJson(doc, std::string("[\"toto\"]"), nesting));
SHOULD_FAIL(deserializeJson(doc, std::string("{\"toto\":1}"), nesting));
}
SECTION("limit = 1") {
DeserializationOption::NestingLimit nesting(1);
SHOULD_WORK(deserializeJson(doc, std::string("[\"toto\"]"), nesting));
SHOULD_WORK(deserializeJson(doc, std::string("{\"toto\":1}"), nesting));
SHOULD_FAIL(deserializeJson(doc, std::string("{\"toto\":{}}"), nesting));
SHOULD_FAIL(deserializeJson(doc, std::string("{\"toto\":[]}"), nesting));
SHOULD_FAIL(deserializeJson(doc, std::string("[[\"toto\"]]"), nesting));
SHOULD_FAIL(deserializeJson(doc, std::string("[{\"toto\":1}]"), nesting));
}
}
SECTION("Input = std::istream") {
SECTION("limit = 0") {
DeserializationOption::NestingLimit nesting(0);
std::istringstream good("true");
std::istringstream bad("[]");
SHOULD_WORK(deserializeJson(doc, good, nesting));
SHOULD_FAIL(deserializeJson(doc, bad, nesting));
}
SECTION("limit = 1") {
DeserializationOption::NestingLimit nesting(1);
std::istringstream good("[\"toto\"]");
std::istringstream bad("{\"toto\":{}}");
SHOULD_WORK(deserializeJson(doc, good, nesting));
SHOULD_FAIL(deserializeJson(doc, bad, nesting));
}
}
}<|fim▁end|> | |
<|file_name|>chevron-right.js<|end_file_name|><|fim▁begin|>'use strict';
var React = require('react');<|fim▁hole|>
var NavigationChevronRight = React.createClass({
displayName: 'NavigationChevronRight',
render: function render() {
return React.createElement(
SvgIcon,
this.props,
React.createElement('path', { d: 'M10 6L8.59 7.41 13.17 12l-4.58 4.59L10 18l6-6z' })
);
}
});
module.exports = NavigationChevronRight;<|fim▁end|> | var SvgIcon = require('../../svg-icon'); |
<|file_name|>config.py<|end_file_name|><|fim▁begin|># vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2012, Nachi Ueno, NTT MCL, Inc.<|fim▁hole|># Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from quantum.openstack.common import cfg
meta_plugin_opts = [
cfg.StrOpt('plugin_list', default='',
help=_("List of plugins to load")),
cfg.StrOpt('l3_plugin_list', default='',
help=_("List of L3 plugins to load")),
cfg.StrOpt('default_flavor', default='',
help=_("Default flavor to use")),
cfg.StrOpt('default_l3_flavor', default='',
help=_("Default L3 flavor to use")),
cfg.StrOpt('supported_extension_aliases', default='',
help=_("Supported extension aliases")),
cfg.StrOpt('extension_map', default='',
help=_("A list of extensions, per plugin, to load.")),
]
proxy_plugin_opts = [
cfg.StrOpt('admin_user',
help=_("Admin user")),
cfg.StrOpt('admin_password',
help=_("Admin password")),
cfg.StrOpt('admin_tenant_name',
help=_("Admin tenant name")),
cfg.StrOpt('auth_url',
help=_("Authentication URL")),
cfg.StrOpt('auth_strategy', default='keystone',
help=_("The type of authentication to use")),
cfg.StrOpt('auth_region',
help=_("Authentication region")),
]
cfg.CONF.register_opts(meta_plugin_opts, "META")
cfg.CONF.register_opts(proxy_plugin_opts, "PROXY")<|fim▁end|> | # All Rights Reserved.
# |
<|file_name|>0004_auto__add_articlecomment.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db<|fim▁hole|>
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'ArticleComment'
db.create_table('cms_articlecomment', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('article', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['cms.Article'])),
('created_at', self.gf('django.db.models.fields.DateField')(auto_now_add=True, blank=True)),
('author', self.gf('django.db.models.fields.CharField')(max_length=60)),
('comment', self.gf('django.db.models.fields.TextField')()),
))
db.send_create_signal('cms', ['ArticleComment'])
def backwards(self, orm):
# Deleting model 'ArticleComment'
db.delete_table('cms_articlecomment')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'cms.article': {
'Meta': {'ordering': "['title']", 'object_name': 'Article'},
'allow_comments': ('django.db.models.fields.CharField', [], {'default': "'N'", 'max_length': '1'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'content': ('django.db.models.fields.TextField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'conversions': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'created_at': ('django.db.models.fields.DateField', [], {'default': 'datetime.datetime.now'}),
'header': ('django.db.models.fields.TextField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'keywords': ('django.db.models.fields.TextField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'sections': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['cms.Section']", 'null': 'True', 'through': "orm['cms.SectionItem']", 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '250', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
'updated_at': ('django.db.models.fields.DateField', [], {'auto_now': 'True', 'blank': 'True'}),
'views': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'cms.articlearchive': {
'Meta': {'ordering': "('updated_at',)", 'object_name': 'ArticleArchive'},
'article': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Article']"}),
'content': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'header': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'cms.articlecomment': {
'Meta': {'ordering': "('created_at',)", 'object_name': 'ArticleComment'},
'article': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Article']"}),
'author': ('django.db.models.fields.CharField', [], {'max_length': '60'}),
'comment': ('django.db.models.fields.TextField', [], {}),
'created_at': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'cms.filedownload': {
'Meta': {'object_name': 'FileDownload'},
'count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'expires_at': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'file': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['filer.File']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'uuid': ('uuidfield.fields.UUIDField', [], {'unique': 'True', 'max_length': '32', 'blank': 'True'})
},
'cms.menu': {
'Meta': {'object_name': 'Menu'},
'article': ('smart_selects.db_fields.ChainedForeignKey', [], {'default': 'None', 'to': "orm['cms.Article']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
u'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'link': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'parent': ('mptt.fields.TreeForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['cms.Menu']"}),
u'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'section': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Section']", 'null': 'True', 'blank': 'True'}),
u'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.section': {
'Meta': {'ordering': "['title']", 'object_name': 'Section'},
'articles': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['cms.Article']", 'null': 'True', 'through': "orm['cms.SectionItem']", 'blank': 'True'}),
'conversions': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'header': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'keywords': ('django.db.models.fields.TextField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '250', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
'views': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'cms.sectionitem': {
'Meta': {'ordering': "['order']", 'object_name': 'SectionItem'},
'article': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Article']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1', 'db_index': 'True'}),
'section': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Section']"})
},
'cms.urlmigrate': {
'Meta': {'object_name': 'URLMigrate'},
'dtupdate': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'new_url': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
'obs': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'old_url': ('django.db.models.fields.CharField', [], {'max_length': '250', 'db_index': 'True'}),
'redirect_type': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'views': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'filer.file': {
'Meta': {'object_name': 'File'},
'_file_size': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'folder': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'all_files'", 'null': 'True', 'to': "orm['filer.Folder']"}),
'has_all_mandatory_data': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'original_filename': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'owned_files'", 'null': 'True', 'to': "orm['auth.User']"}),
'polymorphic_ctype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'polymorphic_filer.file_set'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"}),
'sha1': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '40', 'blank': 'True'}),
'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
'filer.folder': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('parent', 'name'),)", 'object_name': 'Folder'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
u'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
u'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'filer_owned_folders'", 'null': 'True', 'to': "orm['auth.User']"}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['filer.Folder']"}),
u'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
u'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
}
}
complete_apps = ['cms']<|fim▁end|> | from south.v2 import SchemaMigration
from django.db import models |
<|file_name|>actor.rs<|end_file_name|><|fim▁begin|>use crate::{
actionable_events::ActionDispatcher,
actions::ActionOutput,
entity::{EntityRef, Realm},
};
pub trait Actor: std::fmt::Debug {
/// Returns whether the actor's `on_active()` callback should be invoked when the actor becomes
/// idle after performing an action.
fn activate_on_idle(&self) -> bool {
false
}
/// Activates an actor.
///
/// Actors are responsible for scheduling their own activation based on any of the other
/// triggers in this trait. The only time `on_active()` is called automatically is when
/// `activate_on_idle()` returns `true` and the actor becomes idle again after performing
/// some other action.
fn on_active(&self, _realm: &mut Realm, _dispatcher: &ActionDispatcher) -> ActionOutput {
Ok(Vec::new())
}
/// Invoked when the actor itself is being attacked.
fn on_attack(
&self,
_realm: &mut Realm,
_dispatcher: &ActionDispatcher,
_attacker: EntityRef,
) -> ActionOutput {
Ok(Vec::new())
}
/// Invoked when a character in the same room is being attacked.
fn on_character_attacked(
&self,
_realm: &mut Realm,
_dispatcher: &ActionDispatcher,
_attacker: EntityRef,
_defendent: EntityRef,
) -> ActionOutput {
Ok(Vec::new())
}
/// Invoked when a character in the same room dies.
///
/// If the character dies as a direct result of another character's attack, the attacker
/// is given as well.
fn on_character_died(
&self,
_realm: &mut Realm,
_dispatcher: &ActionDispatcher,
_casualty: EntityRef,
_attacker: Option<EntityRef>,
) -> ActionOutput {
Ok(Vec::new())
}
/// Invoked when a character entered the same room.
fn on_character_entered(
&mut self,
_realm: &mut Realm,
_dispatcher: &ActionDispatcher,
_character: EntityRef,
) -> ActionOutput {
Ok(Vec::new())
}<|fim▁hole|>
/// Invoked when the character itself dies.
///
/// If the character dies as a direct result of another character's attack, the attacker
/// is given as well.
fn on_die(
&self,
_realm: &mut Realm,
_dispatcher: &ActionDispatcher,
_attacker: Option<EntityRef>,
) -> ActionOutput {
Ok(Vec::new())
}
/// Invoked when the actor is spawned.
fn on_spawn(&self, _realm: &mut Realm, _dispatcher: &ActionDispatcher) -> ActionOutput {
Ok(Vec::new())
}
/// Invoked when a character talks directly to the actor.
fn on_talk(
&self,
_realm: &mut Realm,
_dispatcher: &ActionDispatcher,
_speaker: EntityRef,
_message: &str,
) -> ActionOutput {
Ok(Vec::new())
}
}<|fim▁end|> | |
<|file_name|>webdriver.cpp<|end_file_name|><|fim▁begin|>/*
Copyright 2007-2009 WebDriver committers
Copyright 2007-2009 Google Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "stdafx.h"
#include "webdriver.h"
#include "finder.h"
#include "interactions.h"
#include "InternetExplorerDriver.h"
#include "logging.h"
#include "jsxpath.h"
#include "cookies.h"
#include "sizzle.h"
#include "utils.h"
#include "atoms.h"
#include "IEReturnTypes.h"
#include "windowHandling.h"
#include <stdio.h>
#include <iostream>
#include <string>
#include <vector>
#define END_TRY catch(std::wstring& m) \
{ \
if (m.find(L"TIME OUT") != std::wstring::npos) { return ETIMEOUT; } \
wcerr << m.c_str() << endl; \
LOG(WARN) << "Last error: " << GetLastError(); \
return EEXPECTEDERROR; \
} \
catch (...) \
{ \
safeIO::CoutA("CException caught in dll", true); \
return EUNHANDLEDERROR; }
struct WebDriver {
InternetExplorerDriver *ie;
long implicitWaitTimeout;
};
struct WebElement {
ElementWrapper *element;
};
struct ScriptArgs {
LONG currentIndex;
int maxLength;
SAFEARRAY* args;
};
struct ScriptResult {
CComVariant result;
};
struct StringWrapper {
wchar_t *text;
};
struct ElementCollection {
std::vector<ElementWrapper*>* elements;
};
struct StringCollection {
std::vector<std::wstring>* strings;
};
InternetExplorerDriver* openIeInstance = NULL;
clock_t endAt(WebDriver* driver) {
clock_t end = clock() + (driver->implicitWaitTimeout / 1000 * CLOCKS_PER_SEC);
if (driver->implicitWaitTimeout > 0 && driver->implicitWaitTimeout < 1000)
{
end += 1 * CLOCKS_PER_SEC;
}
return end;
}
int terminateIe()
{
std::vector<HWND> allWindows;
getTopLevelWindows(&allWindows);
// Wait until all open windows are gone. Common case, no worries
while (allWindows.size() > 0) {
allWindows.clear();
getTopLevelWindows(&allWindows);
for (vector<HWND>::iterator curr = allWindows.begin();
curr != allWindows.end();
curr++) {
SendMessage(*curr, WM_CLOSE, NULL, NULL);
}
// Pause to allow IE to process the message. If we don't do this and
// we're using IE 8, and "Restore previous session" is enabled (an
// increasingly common state) then a modal system dialog will be
// displayed to the user. Not what we want.
wait(500);
}
// If it's longer than this, we're on a very strange system
wchar_t taskkillPath[256];
if (!ExpandEnvironmentStrings(L"%SystemRoot%\\system32\\taskkill.exe", taskkillPath, 256))
{
cerr << "Unable to find taskkill application" << endl;
return EUNHANDLEDERROR;
}
std::wstring args = L" /f /im iexplore.exe";
STARTUPINFO startup_info;
memset(&startup_info, 0, sizeof(startup_info));
startup_info.cb = sizeof(startup_info);
PROCESS_INFORMATION process_info;
if (!CreateProcessW(taskkillPath, &args[0], NULL, NULL, false, DETACHED_PROCESS, NULL, NULL, &startup_info, &process_info))
{
cerr << "Could not execute taskkill. Bailing: " << GetLastError() << endl;
return EUNHANDLEDERROR;
}
WaitForSingleObject(process_info.hProcess, INFINITE);
CloseHandle(process_info.hThread);
CloseHandle(process_info.hProcess);
return SUCCESS;
}
extern "C"
{
// String manipulation functions
int wdStringLength(StringWrapper* string, int* length)
{
if (!string) {
cerr << "No string to get length of" << endl;
*length = -1;
return -1;
}
if (!string->text) {
cerr << "No underlying string to get length of" << endl;
*length = -1;
return -2;
}
size_t len = wcslen(string->text);
*length = (int) len + 1;
return SUCCESS;
}
int wdFreeString(StringWrapper* string)
{
if (!string) {
return ENOSTRING;
}
if (string->text) delete[] string->text;
delete string;
return SUCCESS;
}
int wdCopyString(StringWrapper* source, int size, wchar_t* dest)
{
if (!source) {
cerr << "No source wrapper" << endl;
return ENOSTRING;
}
if (!source->text) {
cerr << "No source text" << endl;
return ENOSTRING;
}
wcscpy_s(dest, size, source->text);
return SUCCESS;
}
// Collection manipulation functions
int wdcGetElementCollectionLength(ElementCollection* collection, int* length)
{
if (!collection || !collection->elements) return ENOCOLLECTION;
*length = (int) collection->elements->size();
return SUCCESS;
}
int wdcGetElementAtIndex(ElementCollection* collection, int index, WebElement** result)
{
*result = NULL;
if (!collection || !collection->elements) return ENOCOLLECTION;
std::vector<ElementWrapper*>::const_iterator cur = collection->elements->begin();
cur += index;
WebElement* element = new WebElement();
element->element = *cur;
*result = element;
return SUCCESS;
}
int wdcGetStringCollectionLength(StringCollection* collection, int* length)
{
if (!collection) return ENOCOLLECTION;
*length = (int) collection->strings->size();
return SUCCESS;
}
int wdcGetStringAtIndex(StringCollection* collection, int index, StringWrapper** result)
{
*result = NULL;
if (!collection) return ENOCOLLECTION;
std::vector<std::wstring>::const_iterator cur = collection->strings->begin();
cur += index;
StringWrapper* wrapper = new StringWrapper();
size_t size = (*cur).length() + 1;
wrapper->text = new wchar_t[size];
wcscpy_s(wrapper->text, size, (*cur).c_str());
*result = wrapper;
return SUCCESS;
}
// Element manipulation functions
int wdeFreeElement(WebElement* element)
{
if (!element)
return ENOSUCHDRIVER;
if (element->element) delete element->element;
delete element;
return SUCCESS;
}
int wdFreeElementCollection(ElementCollection* collection, int alsoFreeElements)
{
if (!collection || !collection->elements)
return ENOSUCHCOLLECTION;
if (alsoFreeElements) {
std::vector<ElementWrapper*>::const_iterator cur = collection->elements->begin();
std::vector<ElementWrapper*>::const_iterator end = collection->elements->end();
while (cur != end) {
delete *cur;
cur++;
}
}
delete collection->elements;
delete collection;
return SUCCESS;
}
int wdFreeStringCollection(StringCollection* collection)
{
if (!collection || !collection->strings)
return ENOSUCHCOLLECTION;
delete collection->strings;
delete collection;
return SUCCESS;
}
int wdFreeScriptArgs(ScriptArgs* scriptArgs)
{
if (!scriptArgs || !scriptArgs->args)
return ENOSUCHCOLLECTION;
SafeArrayDestroy(scriptArgs->args);
delete scriptArgs;
return SUCCESS;
}
int wdFreeScriptResult(ScriptResult* scriptResult)
{
if (!scriptResult)
return ENOCOLLECTION;
VariantClear(&scriptResult->result);
delete scriptResult;
return SUCCESS;
}
// Driver manipulation functions
int wdFreeDriver(WebDriver* driver)
{
if (!driver || !driver->ie) return ENOSUCHDRIVER;
try {
terminateIe();
} catch (...) {
// Fine. We're quitting anyway.
}
delete driver->ie;
delete driver;
driver = NULL;
// Let the IE COM instance fade away
wait(4000);
return SUCCESS;
}
int wdNewDriverInstance(WebDriver** result)
{
*result = NULL;
TRY
{
terminateIe();
/*
wchar_t iePath[256];
if (!ExpandEnvironmentStrings(L"%ProgramFiles%\\Internet Explorer\\iexplore.exe", iePath, 256))
{
cerr << "Unable to find IE" << endl;
return EUNHANDLEDERROR;
}
memset(&startup_info, 0, sizeof(startup_info));
startup_info.cb = sizeof(startup_info);
args = L"about:blank";
if (!CreateProcessW(iePath, &args[0], NULL, NULL, false, 0, NULL, NULL, &startup_info, &process_info))
{
cerr << "Could not execute IE. Bailing: " << GetLastError() << endl;
return EUNHANDLEDERROR;
}
*/
WebDriver *driver = new WebDriver();
driver->ie = new InternetExplorerDriver();
driver->ie->setVisible(true);
driver->implicitWaitTimeout = 0;
openIeInstance = driver->ie;
*result = driver;
return SUCCESS;
}
END_TRY
return ENOSUCHDRIVER;
}
int wdGet(WebDriver* driver, const wchar_t* url)
{
if (!driver || !driver->ie) return ENOSUCHDRIVER;
try {
driver->ie->get(url);
driver->ie->waitForNavigateToFinish();
return SUCCESS;
} END_TRY;
}
int wdGoBack(WebDriver* driver)
{
if (!driver || !driver->ie) return ENOSUCHDRIVER;
try {
driver->ie->goBack();
return SUCCESS;
} END_TRY;
}
int wdGoForward(WebDriver* driver)
{
if (!driver || !driver->ie) return ENOSUCHDRIVER;
try {
driver->ie->goForward();
return SUCCESS;
} END_TRY;
}
int wdRefresh(WebDriver* driver)
{
if (!driver || !driver->ie) return ENOSUCHDRIVER;
StringWrapper* wrapper;
int result = wdGetCurrentUrl(driver, &wrapper);
if (result != SUCCESS) {
return result;
}
result = wdGet(driver, wrapper->text);
wdFreeString(wrapper);
return result;
}
int wdClose(WebDriver* driver)
{
if (!driver || !driver->ie) return ENOSUCHDRIVER;
try {
driver->ie->close();
return SUCCESS;
} END_TRY
}
int wdGetVisible(WebDriver* driver, int* value)
{
if (!driver || !driver->ie) return ENOSUCHDRIVER;
try {
*value = driver->ie->getVisible() ? 1 : 0;
return SUCCESS;
} END_TRY;
}
int wdSetVisible(WebDriver* driver, int value)
{
if (!driver || !driver->ie) return ENOSUCHDRIVER;
try {
driver->ie->setVisible(value != 0);
} END_TRY;
return SUCCESS;
}
int wdGetCurrentUrl(WebDriver* driver, StringWrapper** result)
{
*result = NULL;
if (!driver || !driver->ie) return ENOSUCHDRIVER;
try {
const std::wstring originalString(driver->ie->getCurrentUrl());
size_t length = originalString.length() + 1;
wchar_t* toReturn = new wchar_t[length];
wcscpy_s(toReturn, length, originalString.c_str());
StringWrapper* res = new StringWrapper();
res->text = toReturn;
*result = res;
return SUCCESS;
} END_TRY;
}
int wdGetTitle(WebDriver* driver, StringWrapper** result)
{
*result = NULL;
if (!driver || !driver->ie) return ENOSUCHDRIVER;
try {
const std::wstring originalString(driver->ie->getTitle());
size_t length = originalString.length() + 1;
wchar_t* toReturn = new wchar_t[length];
wcscpy_s(toReturn, length, originalString.c_str());
StringWrapper* res = new StringWrapper();
res->text = toReturn;
*result = res;
return SUCCESS;
} END_TRY;
}
int wdGetPageSource(WebDriver* driver, StringWrapper** result)
{
*result = NULL;
if (!driver || !driver->ie) return ENOSUCHDRIVER;
try {
const std::wstring originalString(driver->ie->getPageSource());
size_t length = originalString.length() + 1;
wchar_t* toReturn = new wchar_t[length];
wcscpy_s(toReturn, length, originalString.c_str());
StringWrapper* res = new StringWrapper();
res->text = toReturn;
*result = res;
return SUCCESS;
} END_TRY;
}
int wdGetCookies(WebDriver* driver, StringWrapper** result)
{
*result = NULL;
if (!driver || !driver->ie) return ENOSUCHDRIVER;
try {
const std::wstring originalString(driver->ie->getCookies());
size_t length = originalString.length() + 1;
wchar_t* toReturn = new wchar_t[length];
wcscpy_s(toReturn, length, originalString.c_str());
StringWrapper* res = new StringWrapper();
res->text = toReturn;
*result = res;
return SUCCESS;
} END_TRY;
}
int wdAddCookie(WebDriver* driver, const wchar_t* cookie)
{
if (!driver || !driver->ie) return ENOSUCHDRIVER;
try {
return driver->ie->addCookie(cookie);
} END_TRY;
}
int wdDeleteCookie(WebDriver* driver, const wchar_t* cookieName)
{
if (!driver || !driver->ie) return ENOSUCHDRIVER;
// Inject the XPath engine
std::wstring script;
for (int i = 0; DELETECOOKIES[i]; i++) {
script += DELETECOOKIES[i];
}
ScriptArgs* args;
int result = wdNewScriptArgs(&args, 1);
if (result != SUCCESS) {
return result;
}
wdAddStringScriptArg(args, cookieName);
ScriptResult* scriptResult = NULL;
result = wdExecuteScript(driver, script.c_str(), args, &scriptResult);
wdFreeScriptArgs(args);
if (scriptResult) delete scriptResult;
return result;
}
int wdSwitchToActiveElement(WebDriver* driver, WebElement** result)
{
*result = NULL;
if (!driver || !driver->ie) return ENOSUCHDRIVER;
try {
ElementWrapper* element = driver->ie->getActiveElement();
if (!element)
return ENOSUCHELEMENT;
WebElement* toReturn = new WebElement();
toReturn->element = element;
*result = toReturn;
return SUCCESS;
} END_TRY;
}
int wdSwitchToWindow(WebDriver* driver, const wchar_t* name)
{
if (!driver || !driver->ie) return ENOSUCHDRIVER;
try {
int result;
// It's entirely possible the window to switch to isn't here yet.
// TODO(simon): Make this configurable
for (int i = 0; i < 8; i++) {
result = driver->ie->switchToWindow(name);
if (result == SUCCESS) { break; }
wait(500);
}
return result;
} END_TRY;
}
int wdSwitchToFrame(WebDriver* driver, const wchar_t* path)
{
if (!driver || !driver->ie) return ENOSUCHDRIVER;
try {
// TODO(simon): Make this configurable
for (int i = 0; i < 8; i++) {
bool result = driver->ie->switchToFrame(path);
if (result) { return SUCCESS; }
wait(500);
}
return ENOSUCHFRAME;
} END_TRY;
}
int wdWaitForLoadToComplete(WebDriver* driver)
{
if (!driver || !driver->ie) return ENOSUCHDRIVER;
try {
driver->ie->waitForNavigateToFinish();
return SUCCESS;
} END_TRY;
}
int wdGetCurrentWindowHandle(WebDriver* driver, StringWrapper** handle)
{
if (!driver || !driver->ie) return ENOSUCHDRIVER;
try {
const std::wstring originalString(driver->ie->getHandle());
// TODO(simon): Check that the handle is in the map of known driver instances
size_t length = originalString.length() + 1;
wchar_t* toReturn = new wchar_t[length];
wcscpy_s(toReturn, length, originalString.c_str());
StringWrapper* res = new StringWrapper();
res->text = toReturn;
*handle = res;
return SUCCESS;
} END_TRY;
}
int wdGetAllWindowHandles(WebDriver* driver, StringCollection** handles)
{
if (!driver || !driver->ie) return ENOSUCHDRIVER;
*handles = NULL;
try {
std::vector<std::wstring> rawHandles = driver->ie->getAllHandles();
StringCollection* collection = new StringCollection();
collection->strings = new std::vector<std::wstring>();
for (std::vector<std::wstring>::iterator curr = rawHandles.begin();
curr != rawHandles.end();
curr++) {
collection->strings->push_back(std::wstring(*curr));
}
*handles = collection;
return SUCCESS;
} END_TRY;
}
int verifyFresh(WebElement* element)
{
if (!element || !element->element) { return ENOSUCHELEMENT; }
try {
if (!element->element->isFresh())
{
return EOBSOLETEELEMENT;
}
} END_TRY;
return SUCCESS;
}
int wdeClick(WebElement* element)
{
int res = verifyFresh(element); if (res != SUCCESS) { return res; }
try {
res = element->element->click();
return res;
} END_TRY;
}
int wdeGetAttribute(WebDriver* driver, WebElement* element, const wchar_t* name, StringWrapper** result)
{
*result = NULL;
int res = verifyFresh(element); if (res != SUCCESS) { return res; }
try {
std::wstring script(L"(function() { return function(){ ");
// Read in all the scripts
for (int j = 0; GET_ATTRIBUTE[j]; j++) {
script += GET_ATTRIBUTE[j];
script += L"\n";
}
// Now for the magic
script += L"var element = arguments[0];\n";
script += L"var attributeName = arguments[1];\n";
script += L"return getAttribute(element, attributeName);\n";
// Close things
script += L"};})();";
ScriptArgs* args;
res = wdNewScriptArgs(&args, 2);
if (res != SUCCESS) {
return res;
}
wdAddElementScriptArg(args, element);
wdAddStringScriptArg(args, name);
WebDriver* driver = new WebDriver();
driver->ie = element->element->getParent();
ScriptResult* scriptResult = NULL;
res = wdExecuteScript(driver, script.c_str(), args, &scriptResult);
wdFreeScriptArgs(args);
driver->ie = NULL;
delete driver;
if (res != SUCCESS)
{
wdFreeScriptResult(scriptResult);
return res;
}
int type;
wdGetScriptResultType(driver, scriptResult, &type);
if (type != TYPE_EMPTY && scriptResult->result.vt != VT_NULL) {
const std::wstring originalString(comvariant2cw(scriptResult->result));
size_t length = originalString.length() + 1;
wchar_t* toReturn = new wchar_t[length];
wcscpy_s(toReturn, length, originalString.c_str());
*result = new StringWrapper();
(*result)->text = toReturn;
}
wdFreeScriptResult(scriptResult);
return SUCCESS;
} END_TRY;
}
int wdeGetValueOfCssProperty(WebElement* element, const wchar_t* name, StringWrapper** result)
{
*result = NULL;
int res = verifyFresh(element); if (res != SUCCESS) { return res; }
try {
const std::wstring originalString(element->element->getValueOfCssProperty(name));
size_t length = originalString.length() + 1;
wchar_t* toReturn = new wchar_t[length];
wcscpy_s(toReturn, length, originalString.c_str());
StringWrapper* res = new StringWrapper();
res->text = toReturn;
*result = res;
return SUCCESS;
} END_TRY;
}
int wdeGetText(WebElement* element, StringWrapper** result)
{
*result = NULL;
int res = verifyFresh(element); if (res != SUCCESS) { return res; }
try {
const std::wstring originalString(element->element->getText());
size_t length = originalString.length() + 1;
wchar_t* toReturn = new wchar_t[length];
wcscpy_s(toReturn, length, originalString.c_str());
StringWrapper* res = new StringWrapper();
res->text = toReturn;
*result = res;
return SUCCESS;
} END_TRY;
}
int wdeGetTagName(WebElement* element, StringWrapper** result)
{
*result = NULL;
int res = verifyFresh(element); if (res != SUCCESS) { return res; }
try {
const std::wstring originalString(element->element->getTagName());
size_t length = originalString.length() + 1;
wchar_t* toReturn = new wchar_t[length];
wcscpy_s(toReturn, length, originalString.c_str());
StringWrapper* res = new StringWrapper();
res->text = toReturn;
*result = res;
return SUCCESS;
} END_TRY;
}
int wdeIsSelected(WebElement* element, int* result)
{
*result = 0;
try {
StringWrapper* wrapper;
WebDriver* driver = new WebDriver();
driver->ie = element->element->getParent();
int res = wdeGetAttribute(driver, element, L"selected", &wrapper);
driver->ie = NULL;
delete driver;
if (res != SUCCESS)
{
return res;
}
*result = wrapper && wrapper->text && wcscmp(L"true", wrapper->text) == 0 ? 1 : 0;
wdFreeString(wrapper);
return SUCCESS;
} END_TRY;
}
int wdeSetSelected(WebElement* element)
{
int res = verifyFresh(element); if (res != SUCCESS) { return res; }
try {
return element->element->setSelected();
} END_TRY;
}
int wdeToggle(WebElement* element, int* result)
{
*result = 0;
int res = verifyFresh(element); if (res != SUCCESS) { return res; }
try {
int res = element->element->toggle();
if (res == SUCCESS) {
return wdeIsSelected(element, result);
}
return res;
} END_TRY;
}
int wdeIsEnabled(WebElement* element, int* result)
{
int res = verifyFresh(element); if (res != SUCCESS) { return res; }
try {
*result = element->element->isEnabled() ? 1 : 0;
return SUCCESS;
} END_TRY;
}
int wdeIsDisplayed(WebElement* element, int* result)
{
int res = verifyFresh(element); if (res != SUCCESS) { return res; }
try {
*result = element->element->isDisplayed() ? 1 : 0;
return SUCCESS;
} END_TRY;
}
int wdeSendKeys(WebElement* element, const wchar_t* text)
{
int res = verifyFresh(element); if (res != SUCCESS) { return res; }
try {
return element->element->sendKeys(text);
} END_TRY;
}
int wdeSendKeyPress(WebElement* element, const wchar_t* text)
{
int res = verifyFresh(element); if (res != SUCCESS) { return res; }
try {
return element->element->sendKeyPress(text);
} END_TRY;
}
int wdeSendKeyRelease(WebElement* element, const wchar_t* text)
{
int res = verifyFresh(element); if (res != SUCCESS) { return res; }
try {
return element->element->sendKeyRelease(text);
} END_TRY;
}
int wdeClear(WebElement* element)
{
int res = verifyFresh(element); if (res != SUCCESS) { return res; }
try {
element->element->clear();
return SUCCESS;
} END_TRY;
}
int wdeSubmit(WebElement* element)
{
int res = verifyFresh(element); if (res != SUCCESS) { return res; }
try {
element->element->submit();
return SUCCESS;
} END_TRY;
}
int wdeGetDetailsOnceScrolledOnToScreen(WebElement* element, HWND* hwnd, long* x, long* y, long* width, long* height)
{
int res = verifyFresh(element); if (res != SUCCESS) { return res; }
try {
element->element->getLocationWhenScrolledIntoView(hwnd, x, y, width, height);
return SUCCESS;
} END_TRY;
}
int wdeGetLocation(WebElement* element, long* x, long* y)
{
int res = verifyFresh(element); if (res != SUCCESS) { return res; }
try {
element->element->getLocation(x, y);
return SUCCESS;
} END_TRY;
}
int wdeGetSize(WebElement* element, long* width, long* height)
{
int res = verifyFresh(element); if (res != SUCCESS) { return res; }
try {
int result = element->element->getWidth(width);
if (result != SUCCESS) {
return result;
}
result = element->element->getHeight(height);
return result;
} END_TRY;
}
int wdFindElementById(WebDriver* driver, WebElement* element, const wchar_t* id, WebElement** result)
{
*result = NULL;
if (!driver || !driver->ie) { return ENOSUCHDRIVER; }
InternetExplorerDriver* ie = driver->ie;
CComPtr<IHTMLElement> elem;
if (element && element->element) {
elem = element->element->getWrappedElement();
}
try {
clock_t end = endAt(driver);
int res = ENOSUCHELEMENT;
do {
ElementWrapper* wrapper;
res = ie->selectElementById(elem, id, &wrapper);
if (res != SUCCESS) {
continue;
}
WebElement* toReturn = new WebElement();
toReturn->element = wrapper;
*result = toReturn;
return SUCCESS;
} while (clock() < end);
return res;
} END_TRY;
}
int wdFindElementsById(WebDriver* driver, WebElement* element, const wchar_t* id, ElementCollection** result)
{
*result = NULL;
if (!driver || !driver->ie) { return ENOSUCHDRIVER; }
try {
InternetExplorerDriver* ie = driver->ie;
CComPtr<IHTMLElement> elem;
if (element && element->element) {
elem = element->element->getWrappedElement();
}
clock_t end = endAt(driver);
ElementCollection* collection = new ElementCollection();
*result = collection;
do {
collection->elements = driver->ie->selectElementsById(elem, id);
if (collection->elements->size() > 0) {
return SUCCESS;
}
} while (clock() < end);
return SUCCESS;
} END_TRY;
}
int wdFindElementByName(WebDriver* driver, WebElement* element, const wchar_t* name, WebElement** result)
{
*result = NULL;
if (!driver || !driver->ie) { return ENOSUCHDRIVER; }
CComPtr<IHTMLDOMNode> res;
InternetExplorerDriver* ie = driver->ie;
CComPtr<IHTMLElement> elem;
if (element && element->element) {
elem = element->element->getWrappedElement();
}
try {
clock_t end = endAt(driver);
int res = ENOSUCHELEMENT;
do {
ElementWrapper* wrapper;
int res = ie->selectElementByName(elem, name, &wrapper);
if (res != SUCCESS) {
continue;
}
WebElement* toReturn = new WebElement();
toReturn->element = wrapper;
*result = toReturn;
return SUCCESS;
} while (clock() < end);
return res;
} END_TRY;
}
int wdFindElementsByName(WebDriver* driver, WebElement* element, const wchar_t* name, ElementCollection** result)
{
*result = NULL;
try {
InternetExplorerDriver* ie = driver->ie;
CComPtr<IHTMLElement> elem;
if (element && element->element) {
elem = element->element->getWrappedElement();
}
ElementCollection* collection = new ElementCollection();
*result = collection;
clock_t end = endAt(driver);
do {
collection->elements = driver->ie->selectElementsByName(elem, name);
if (collection->elements->size() > 0) {
return SUCCESS;
}
} while (clock() < end);
return SUCCESS;
} END_TRY;
}
int wdFindElementByClassName(WebDriver* driver, WebElement* element, const wchar_t* className, WebElement** result)
{
*result = NULL;
if (!driver || !driver->ie) { return ENOSUCHDRIVER; }
InternetExplorerDriver* ie = driver->ie;
CComPtr<IHTMLElement> elem;
if (element && element->element) {
elem = element->element->getWrappedElement();
}
try {
clock_t end = endAt(driver);
int res = ENOSUCHELEMENT;
do {
ElementWrapper* wrapper;
int res = ie->selectElementByClassName(elem, className, &wrapper);
if (res != SUCCESS) {
continue;
}
WebElement* toReturn = new WebElement();
toReturn->element = wrapper;
*result = toReturn;
return SUCCESS;
} while (clock() < end);
return res;
} END_TRY;
}
int wdFindElementsByClassName(WebDriver* driver, WebElement* element, const wchar_t* className, ElementCollection** result)
{
*result = NULL;
if (!driver || !driver->ie) { return ENOSUCHDRIVER; }
try {
InternetExplorerDriver* ie = driver->ie;
CComPtr<IHTMLElement> elem;
if (element && element->element) {
elem = element->element->getWrappedElement();
}
clock_t end = endAt(driver);
ElementCollection* collection = new ElementCollection();
*result = collection;
do {
collection->elements = driver->ie->selectElementsByClassName(elem, className);
if (collection->elements->size() > 0) {
return SUCCESS;
}
} while (clock() < end);
return SUCCESS;
} END_TRY;
}
int wdFindElementByCss(WebDriver* driver, WebElement* element, const wchar_t* selector, WebElement** out)
{
*out = NULL;
if (!driver || !driver->ie) { return ENOSUCHDRIVER; }
try {
clock_t end = endAt(driver);
int result = ENOSUCHELEMENT;
do {
std::wstring script(L"(function() { return function(){");
for (int i = 0; SIZZLE[i]; i++) {
script += SIZZLE[i];
script += L"\n";
}
script += L"var root = arguments[1] ? arguments[1] : document.documentElement;";
script += L"if (root['querySelector']) { return root.querySelector(arguments[0]); } ";
script += L"var results = []; Sizzle(arguments[0], root, results);";
script += L"return results.length > 0 ? results[0] : null;";
script += L"};})();";
// Call it
ScriptArgs* args;
result = wdNewScriptArgs(&args, 2);
if (result != SUCCESS) {
wdFreeScriptArgs(args);
continue;
}
result = wdAddStringScriptArg(args, selector);
if (result != SUCCESS) {
wdFreeScriptArgs(args);
continue;
}
if (element) {
result = wdAddElementScriptArg(args, element);
}
if (result != SUCCESS) {
wdFreeScriptArgs(args);
continue;
}
ScriptResult* queryResult;
result = wdExecuteScript(driver, script.c_str(), args, &queryResult);
wdFreeScriptArgs(args);
// And be done
if (result == SUCCESS) {
int type = 0;
result = wdGetScriptResultType(driver, queryResult, &type);
if (type != TYPE_EMPTY) {
result = wdGetElementScriptResult(queryResult, driver, out);
} else {
result = ENOSUCHELEMENT;
wdFreeScriptResult(queryResult);
continue;
}
}
wdFreeScriptResult(queryResult);
return result;
} while (clock() < end);
return result;
} END_TRY;
}
int wdFindElementsByCss(WebDriver* driver, WebElement* element, const wchar_t* selector, ElementCollection** out)
{
*out = NULL;
if (!driver || !driver->ie) { return ENOSUCHDRIVER; }
try {
clock_t end = endAt(driver);
int result = EUNHANDLEDERROR;
do {
// Call it
std::wstring script(L"(function() { return function(){");
for (int i = 0; SIZZLE[i]; i++) {
script += SIZZLE[i];
script += L"\n";
}
script += L"var root = arguments[1] ? arguments[1] : document.documentElement;";
script += L"if (root['querySelectorAll']) { return root.querySelectorAll(arguments[0]); } ";
script += L"var results = []; Sizzle(arguments[0], root, results);";
script += L"return results;";
script += L"};})();";
// Call it
ScriptArgs* args;
result = wdNewScriptArgs(&args, 2);
if (result != SUCCESS) {
wdFreeScriptArgs(args);
continue;
}
result = wdAddStringScriptArg(args, selector);
if (result != SUCCESS) {
wdFreeScriptArgs(args);
continue;
}
result = wdAddElementScriptArg(args, element);
if (result != SUCCESS) {
wdFreeScriptArgs(args);
continue;
}
ScriptResult* queryResult;
result = wdExecuteScript(driver, script.c_str(), args, &queryResult);
wdFreeScriptArgs(args);
// And be done
if (result != SUCCESS) {
wdFreeScriptResult(queryResult);
return result;
}
ElementCollection* elements = new ElementCollection();
elements->elements = new std::vector<ElementWrapper*>();
int length;
result = wdGetArrayLengthScriptResult(driver, queryResult, &length);
if (result != SUCCESS) {
wdFreeScriptResult(queryResult);
return result;
}
for (long i = 0; i < length; i++) {
ScriptResult* getElemRes;
wdGetArrayItemFromScriptResult(driver, queryResult, i, &getElemRes);
WebElement* e;
wdGetElementScriptResult(getElemRes, driver, &e);
elements->elements->push_back(e->element);
e->element = NULL;
delete e;
}
wdFreeScriptResult(queryResult);
*out = elements;
return SUCCESS;
} while (clock() < end);
return result;
} END_TRY;
}
int wdFindElementByLinkText(WebDriver* driver, WebElement* element, const wchar_t* linkText, WebElement** result)
{
*result = NULL;
if (!driver || !driver->ie) { return ENOSUCHDRIVER; }
InternetExplorerDriver* ie = driver->ie;
CComPtr<IHTMLElement> elem;
if (element && element->element) {
elem = element->element->getWrappedElement();
}
try {
clock_t end = endAt(driver);
int res = ENOSUCHELEMENT;
do {
ElementWrapper* wrapper;
int res = ie->selectElementByLink(elem, linkText, &wrapper);
if (res != SUCCESS) {
continue;
}
WebElement* toReturn = new WebElement();
toReturn->element = wrapper;
*result = toReturn;
return SUCCESS;
} while (clock() < end);
return res;
} END_TRY;
}
int wdFindElementsByLinkText(WebDriver* driver, WebElement* element, const wchar_t* linkText, ElementCollection** result)
{
*result = NULL;
if (!driver || !driver->ie) { return ENOSUCHDRIVER; }
try {
InternetExplorerDriver* ie = driver->ie;
CComPtr<IHTMLElement> elem;
if (element && element->element) {
elem = element->element->getWrappedElement();
}
ElementCollection* collection = new ElementCollection();
*result = collection;
clock_t end = endAt(driver);
do {
collection->elements = driver->ie->selectElementsByLink(elem, linkText);
if (collection->elements->size() > 0) {
return SUCCESS;
}
} while (clock() < end);
return SUCCESS;
} END_TRY;
}
int wdFindElementByPartialLinkText(WebDriver* driver, WebElement* element, const wchar_t* linkText, WebElement** result)
{
*result = NULL;
if (!driver || !driver->ie) { return ENOSUCHDRIVER; }
InternetExplorerDriver* ie = driver->ie;
CComPtr<IHTMLElement> elem;
if (element && element->element) {
elem = element->element->getWrappedElement();
}
try {
clock_t end = endAt(driver);
int res = ENOSUCHELEMENT;
do {
ElementWrapper* wrapper;
int res = ie->selectElementByPartialLink(elem, linkText, &wrapper);
if (res != SUCCESS) {
continue;
}
WebElement* toReturn = new WebElement();
toReturn->element = wrapper;
*result = toReturn;
return SUCCESS;
} while (clock() < end);
return res;
} END_TRY;
}
int wdFindElementsByPartialLinkText(WebDriver* driver, WebElement* element, const wchar_t* linkText, ElementCollection** result)
{
*result = NULL;
if (!driver || !driver->ie) { return ENOSUCHDRIVER; }
try {
InternetExplorerDriver* ie = driver->ie;
CComPtr<IHTMLElement> elem;
if (element && element->element) {
elem = element->element->getWrappedElement();
}
ElementCollection* collection = new ElementCollection();
*result = collection;
clock_t end = endAt(driver);
do {
collection->elements = driver->ie->selectElementsByPartialLink(elem, linkText);
if (collection->elements->size() > 0) {
return SUCCESS;
}
} while (clock() < end);
return SUCCESS;
} END_TRY;
}
int wdFindElementByTagName(WebDriver* driver, WebElement* element, const wchar_t* name, WebElement** result)
{
*result = NULL;
if (!driver || !driver->ie) { return ENOSUCHDRIVER; }
InternetExplorerDriver* ie = driver->ie;
CComPtr<IHTMLElement> elem;
if (element && element->element) {
elem = element->element->getWrappedElement();
}
try {
clock_t end = endAt(driver);
int res = ENOSUCHELEMENT;
do {
ElementWrapper* wrapper;
int res = ie->selectElementByTagName(elem, name, &wrapper);
if (res != SUCCESS) {
continue;
}
WebElement* toReturn = new WebElement();
toReturn->element = wrapper;
*result = toReturn;
return SUCCESS;
} while (clock() < end);
return res;
} END_TRY;
}
int wdFindElementsByTagName(WebDriver* driver, WebElement* element, const wchar_t* name, ElementCollection** result)
{
*result = NULL;
if (!driver || !driver->ie) { return ENOSUCHDRIVER; }
try {
InternetExplorerDriver* ie = driver->ie;
CComPtr<IHTMLElement> elem;
if (element && element->element) {
elem = element->element->getWrappedElement();
}
ElementCollection* collection = new ElementCollection();
*result = collection;
clock_t end = endAt(driver);
do {
collection->elements = driver->ie->selectElementsByTagName(elem, name);
if (collection->elements->size() > 0) {
return SUCCESS;
}
} while (clock() < end);
return SUCCESS;
} END_TRY;
}
int injectXPathEngine(WebDriver* driver)
{
// Inject the XPath engine
std::wstring script;
for (int i = 0; XPATHJS[i]; i++) {
script += XPATHJS[i];
}
ScriptArgs* args;
int result = wdNewScriptArgs(&args, 0);
if (result != SUCCESS) {
return result;
}
ScriptResult* scriptResult = NULL;
result = wdExecuteScript(driver, script.c_str(), args, &scriptResult);
wdFreeScriptArgs(args);
if (scriptResult) delete scriptResult;
return result;
}
int wdFindElementByXPath(WebDriver* driver, WebElement* element, const wchar_t* xpath, WebElement** out)
{
*out = NULL;
if (!driver || !driver->ie) { return ENOSUCHDRIVER; }
try {
clock_t end = endAt(driver);
int result = ENOSUCHELEMENT;
do {
result = injectXPathEngine(driver);
// TODO(simon): Why does the injecting sometimes fail?
/*
if (result != SUCCESS) {
return result;
}
*/
// Call it
std::wstring query;
if (element) {
query += L"(function() { return function(){var res = document.__webdriver_evaluate(arguments[0], arguments[1], null, 7, null); return res.snapshotItem(0) ;};})();";
} else {
query += L"(function() { return function(){var res = document.__webdriver_evaluate(arguments[0], document, null, 7, null); return res.snapshotLength != 0 ? res.snapshotItem(0) : undefined ;};})();";
}
ScriptArgs* queryArgs;
result = wdNewScriptArgs(&queryArgs, 2);
if (result != SUCCESS) {
wdFreeScriptArgs(queryArgs);
continue;
}
result = wdAddStringScriptArg(queryArgs, xpath);
if (result != SUCCESS) {
wdFreeScriptArgs(queryArgs);
continue;
}
if (element) {
result = wdAddElementScriptArg(queryArgs, element);
}
if (result != SUCCESS) {
wdFreeScriptArgs(queryArgs);
continue;
}
ScriptResult* queryResult;
result = wdExecuteScript(driver, query.c_str(), queryArgs, &queryResult);
wdFreeScriptArgs(queryArgs);
// And be done
if (result == SUCCESS) {
int type = 0;
result = wdGetScriptResultType(driver, queryResult, &type);
if (type != TYPE_EMPTY) {
result = wdGetElementScriptResult(queryResult, driver, out);
} else {
result = ENOSUCHELEMENT;
wdFreeScriptResult(queryResult);
continue;
}
}
wdFreeScriptResult(queryResult);
return result;
} while (clock() < end);
return result;
} END_TRY;
}
int wdFindElementsByXPath(WebDriver* driver, WebElement* element, const wchar_t* xpath, ElementCollection** out)
{
*out = NULL;
if (!driver || !driver->ie) { return ENOSUCHDRIVER; }
try {
clock_t end = endAt(driver);
int result = EUNHANDLEDERROR;
do {
result = injectXPathEngine(driver);
if (result != SUCCESS) {
continue;
}
// Call it
std::wstring query;
if (element)
query += L"(function() { return function() {var res = document.__webdriver_evaluate(arguments[0], arguments[1], null, 7, null); return res;};})();";
else
query += L"(function() { return function() {var res = document.__webdriver_evaluate(arguments[0], document, null, 7, null); return res;};})();";
// We need to use the raw functions because we don't allow random objects
// to be returned from the executeScript method normally
SAFEARRAYBOUND bounds;
bounds.cElements = 2;
bounds.lLbound = 0;
SAFEARRAY* queryArgs = SafeArrayCreate(VT_VARIANT, 1, &bounds);
CComVariant queryArg(xpath);
LONG index = 0;
SafeArrayPutElement(queryArgs, &index, &queryArg);
if (element) {
CComVariant elementArg(element->element->getWrappedElement());
LONG index = 1;
SafeArrayPutElement(queryArgs, &index, &elementArg);
}
CComVariant snapshot;
result = driver->ie->executeScript(query.c_str(), queryArgs, &snapshot);
SafeArrayDestroy(queryArgs);
if (result != SUCCESS) {
continue;
}
bounds.cElements = 1;
SAFEARRAY* lengthArgs = SafeArrayCreate(VT_VARIANT, 1, &bounds);
index = 0;
SafeArrayPutElement(lengthArgs, &index, &snapshot);
CComVariant lengthVar;
result = driver->ie->executeScript(L"(function(){return function() {return arguments[0].snapshotLength;}})();", lengthArgs, &lengthVar);
SafeArrayDestroy(lengthArgs);
if (result != SUCCESS) {
continue;
}
if (lengthVar.vt != VT_I4) {
result = EUNEXPECTEDJSERROR;
continue;
}
long length = lengthVar.lVal;
bounds.cElements = 2;
SAFEARRAY* snapshotArgs = SafeArrayCreate(VT_VARIANT, 1, &bounds);
index = 0;
SafeArrayPutElement(snapshotArgs, &index, &snapshot);
ElementCollection* elements = new ElementCollection();
elements->elements = new std::vector<ElementWrapper*>();
index = 1;
for (long i = 0; i < length; i++) {
ScriptArgs* getElemArgs;
wdNewScriptArgs(&getElemArgs, 2);
// Cheat
index = 0;
SafeArrayPutElement(getElemArgs->args, &index, &snapshot);
getElemArgs->currentIndex++;
wdAddNumberScriptArg(getElemArgs, i);
ScriptResult* getElemRes;
wdExecuteScript(driver, L"(function(){return function() {return arguments[0].iterateNext();}})();", getElemArgs, &getElemRes);
WebElement* e;
wdGetElementScriptResult(getElemRes, driver, &e);
elements->elements->push_back(e->element);
wdFreeScriptArgs(getElemArgs);
}
SafeArrayDestroy(queryArgs);
*out = elements;
return SUCCESS;
} while (clock() < end);
return result;
} END_TRY;
}
int wdNewScriptArgs(ScriptArgs** scriptArgs, int maxLength)
{
*scriptArgs = NULL;
ScriptArgs* args = new ScriptArgs();
args->currentIndex = 0;
args->maxLength = maxLength;
SAFEARRAYBOUND bounds;
bounds.cElements = maxLength;
bounds.lLbound = 0;
args->args = SafeArrayCreate(VT_VARIANT, 1, &bounds);
*scriptArgs = args;
return SUCCESS;
}
int wdAddStringScriptArg(ScriptArgs* scriptArgs, const wchar_t* arg)
{
std::wstring value(arg);
CComVariant dest(arg);
LONG index = scriptArgs->currentIndex;
SafeArrayPutElement(scriptArgs->args, &index, &dest);
scriptArgs->currentIndex++;
return SUCCESS;
}
int wdAddBooleanScriptArg(ScriptArgs* scriptArgs, int trueOrFalse)
{
VARIANT dest;
dest.vt = VT_BOOL;
dest.boolVal = trueOrFalse == 1;
LONG index = scriptArgs->currentIndex;
SafeArrayPutElement(scriptArgs->args, &index, &dest);
scriptArgs->currentIndex++;
return SUCCESS;
}
int wdAddNumberScriptArg(ScriptArgs* scriptArgs, long number)
{
VARIANT dest;
dest.vt = VT_I4;
dest.lVal = (LONG) number;
LONG index = scriptArgs->currentIndex;
SafeArrayPutElement(scriptArgs->args, &index, &dest);
scriptArgs->currentIndex++;
return SUCCESS;
}
int wdAddDoubleScriptArg(ScriptArgs* scriptArgs, double number)
{
VARIANT dest;
dest.vt = VT_R8;
dest.dblVal = (DOUBLE) number;
LONG index = scriptArgs->currentIndex;
SafeArrayPutElement(scriptArgs->args, &index, &dest);
scriptArgs->currentIndex++;
return SUCCESS;
}
int wdAddElementScriptArg(ScriptArgs* scriptArgs, WebElement* element)
{
VARIANT dest;
VariantClear(&dest);
if (!element || !element->element) {
dest.vt = VT_EMPTY;
} else {
dest.vt = VT_DISPATCH;
dest.pdispVal = element->element->getWrappedElement();
}
LONG index = scriptArgs->currentIndex;
SafeArrayPutElement(scriptArgs->args, &index, &dest);
scriptArgs->currentIndex++;
return SUCCESS;
}
int wdExecuteScript(WebDriver* driver, const wchar_t* script, ScriptArgs* scriptArgs, ScriptResult** scriptResultRef)
{
try {
*scriptResultRef = NULL;
CComVariant result;
int res = driver->ie->executeScript(script, scriptArgs->args, &result);
if (res != SUCCESS) {
return res;
}
ScriptResult* toReturn = new ScriptResult();
HRESULT hr = VariantCopy(&(toReturn->result), &result);
if (!SUCCEEDED(hr) && result.vt == VT_USERDEFINED) {
// Special handling of the user defined path *sigh*
toReturn->result.vt = VT_USERDEFINED;
toReturn->result.bstrVal = CComBSTR(result.bstrVal);
}
*scriptResultRef = toReturn;
return SUCCESS;
} END_TRY;
}
int wdGetScriptResultType(WebDriver* driver, ScriptResult* result, int* type)
{
if (!result) { return ENOSCRIPTRESULT; }
switch (result->result.vt) {
case VT_BSTR:
*type = TYPE_STRING;
break;
case VT_I4:
case VT_I8:
*type = TYPE_LONG;
break;
case VT_BOOL:
*type = TYPE_BOOLEAN;
break;
case VT_DISPATCH:
{
LPCWSTR itemType = driver->ie->getScriptResultType(&(result->result));
std::string itemTypeStr;
cw2string(itemType, itemTypeStr);
LOG(DEBUG) << "Got type: " << itemTypeStr;
// If it's a Javascript array or an HTML Collection - type 8 will
// indicate the driver that this is ultimately an array.
if ((itemTypeStr == "JavascriptArray") ||
(itemTypeStr == "HtmlCollection")) {
*type = TYPE_ARRAY;
} else {
*type = TYPE_ELEMENT;
}
}
break;
case VT_EMPTY:
*type = TYPE_EMPTY;
break;
case VT_USERDEFINED:
*type = TYPE_EXCEPTION;
break;
case VT_R4:
case VT_R8:
*type = TYPE_DOUBLE;
break;
default:
return EUNKNOWNSCRIPTRESULT;
}
return SUCCESS;
}
int wdGetStringScriptResult(ScriptResult* result, StringWrapper** wrapper)
{
*wrapper = NULL;
if (!result) { return ENOSCRIPTRESULT; }
StringWrapper* toReturn = new StringWrapper();
BSTR val = result->result.bstrVal;
if (!val) {
toReturn->text = new wchar_t[1];
wcscpy_s(toReturn->text, 1, L"");
} else {
UINT length = SysStringLen(val);
toReturn->text = new wchar_t[length + 1];
wcscpy_s(toReturn->text, length + 1, val);
}
*wrapper = toReturn;
return SUCCESS;
}
int wdGetNumberScriptResult(ScriptResult* result, long* value)
{
if (!result) { return ENOSCRIPTRESULT; }
*value = result->result.lVal;
return SUCCESS;
}
int wdGetDoubleScriptResult(ScriptResult* result, double* value)
{
if (!result) { return ENOSCRIPTRESULT; }
*value = result->result.dblVal;
return SUCCESS;
}
int wdGetBooleanScriptResult(ScriptResult* result, int* value)
{
if (!result) { return ENOSCRIPTRESULT; }
*value = result->result.boolVal == VARIANT_TRUE ? 1 : 0;
return SUCCESS;
}
int wdGetElementScriptResult(ScriptResult* result, WebDriver* driver, WebElement** element)
{
*element = NULL;
if (!result) { return ENOSCRIPTRESULT; }
IHTMLElement *node = (IHTMLElement*) result->result.pdispVal;
WebElement* toReturn = new WebElement();
toReturn->element = new ElementWrapper(driver->ie, node);
*element = toReturn;
return SUCCESS;
}
int wdGetArrayLengthScriptResult(WebDriver* driver, ScriptResult* result,
int* length)
{
// Prepare an array for the Javascript execution, containing only one
// element - the original returned array from a JS execution.
SAFEARRAYBOUND lengthQuery;
lengthQuery.cElements = 1;
lengthQuery.lLbound = 0;
SAFEARRAY* lengthArgs = SafeArrayCreate(VT_VARIANT, 1, &lengthQuery);
LONG index = 0;
SafeArrayPutElement(lengthArgs, &index, &(result->result));
CComVariant lengthVar;
int lengthResult = driver->ie->executeScript(
L"(function(){return function() {return arguments[0].length;}})();",
lengthArgs, &lengthVar);
SafeArrayDestroy(lengthArgs);
if (lengthResult != SUCCESS) {
return lengthResult;<|fim▁hole|> // Expect the return type to be an integer. A non-integer means this was
// not an array after all.
if (lengthVar.vt != VT_I4) {
return EUNEXPECTEDJSERROR;
}
*length = lengthVar.lVal;
return SUCCESS;
}
int wdGetArrayItemFromScriptResult(WebDriver* driver, ScriptResult* result,
int index, ScriptResult** arrayItem)
{
// Prepare an array for Javascript execution. The array contains the original
// array returned from a previous execution and the index of the item required
// from that array.
ScriptArgs* getItemArgs;
wdNewScriptArgs(&getItemArgs, 2);
LONG argIndex = 0;
// Original array.
SafeArrayPutElement(getItemArgs->args, &argIndex, &(result->result));
getItemArgs->currentIndex++;
// Item index
wdAddNumberScriptArg(getItemArgs, index);
int execRes = wdExecuteScript(
driver,
L"(function(){return function() {return arguments[0][arguments[1]];}})();",
getItemArgs, arrayItem);
wdFreeScriptArgs(getItemArgs);
getItemArgs = NULL;
return execRes;
}
int wdeMouseDownAt(HWND hwnd, long windowX, long windowY)
{
mouseDownAt(hwnd, windowX, windowY, MOUSEBUTTON_LFET);
return SUCCESS;
}
int wdeMouseUpAt(HWND hwnd, long windowX, long windowY)
{
mouseUpAt(hwnd, windowX, windowY, MOUSEBUTTON_LFET);
return SUCCESS;
}
int wdeMouseMoveTo(HWND hwnd, long duration, long fromX, long fromY, long toX, long toY)
{
mouseMoveTo(hwnd, duration, fromX, fromY, toX, toY);
return SUCCESS;
}
int wdCaptureScreenshotAsBase64(WebDriver* driver, StringWrapper** result) {
*result = NULL;
if (!driver || !driver->ie) return ENOSUCHDRIVER;
try {
const std::wstring originalString(driver->ie->captureScreenshotAsBase64());
size_t length = originalString.length() + 1;
wchar_t* toReturn = new wchar_t[length];
wcscpy_s(toReturn, length, originalString.c_str());
StringWrapper* res = new StringWrapper();
res->text = toReturn;
*result = res;
return SUCCESS;
} END_TRY;
}
int wdSetImplicitWaitTimeout(WebDriver* driver, long timeoutInMillis)
{
if (!driver || !driver->ie) return ENOSUCHDRIVER;
driver->implicitWaitTimeout = timeoutInMillis;
return SUCCESS;
}
}<|fim▁end|> | }
|
<|file_name|>test_driver.py<|end_file_name|><|fim▁begin|>"""
@package mi.instrument.seabird.sbe16plus_v2.test.test_driver
@file mi/instrument/seabird/sbe16plus_v2/test/test_driver.py
@author David Everett
@brief Test cases for InstrumentDriver
USAGE:
Make tests verbose and provide stdout
* From the IDK
$ bin/test_driver
$ bin/test_driver -u
$ bin/test_driver -i
$ bin/test_driver -q
* From pyon
$ bin/nosetests -s -v .../mi/instrument/seabird/sbe16plus_v2/ooicore
$ bin/nosetests -s -v .../mi/instrument/seabird/sbe16plus_v2/ooicore -a UNIT
$ bin/nosetests -s -v .../mi/instrument/seabird/sbe16plus_v2/ooicore -a INT
$ bin/nosetests -s -v .../mi/instrument/seabird/sbe16plus_v2/ooicore -a QUAL
"""
__author__ = 'David Everett'
__license__ = 'Apache 2.0'
# Standard lib imports
import time
import unittest
# 3rd party imports
from nose.plugins.attrib import attr
from mock import Mock
# MI logger
from mi.core.log import get_logger ; log = get_logger()
from mi.core.instrument.instrument_driver import DriverAsyncEvent
from mi.core.instrument.chunker import StringChunker
# from interface.objects import AgentCommand
from mi.idk.unit_test import DriverTestMixin
from mi.idk.unit_test import ParameterTestConfigKey
from mi.idk.unit_test import AgentCapabilityType
from mi.core.exceptions import InstrumentParameterException
from mi.core.exceptions import InstrumentProtocolException
from mi.core.exceptions import InstrumentCommandException
from mi.core.exceptions import InstrumentTimeoutException
from mi.instrument.seabird.sbe16plus_v2.driver import SBE16Protocol
from mi.instrument.seabird.sbe16plus_v2.driver import SBE16InstrumentDriver
from mi.instrument.seabird.sbe16plus_v2.driver import DataParticleType
from mi.instrument.seabird.sbe16plus_v2.driver import ConfirmedParameter
from mi.instrument.seabird.sbe16plus_v2.driver import NEWLINE
from mi.instrument.seabird.sbe16plus_v2.driver import SBE16DataParticleKey
from mi.instrument.seabird.sbe16plus_v2.driver import SBE16StatusParticleKey
from mi.instrument.seabird.sbe16plus_v2.driver import SBE16CalibrationParticleKey
from mi.instrument.seabird.sbe16plus_v2.driver import ProtocolState
from mi.instrument.seabird.sbe16plus_v2.driver import ProtocolEvent
from mi.instrument.seabird.sbe16plus_v2.driver import ScheduledJob
from mi.instrument.seabird.sbe16plus_v2.driver import Capability
from mi.instrument.seabird.sbe16plus_v2.driver import Parameter
from mi.instrument.seabird.sbe16plus_v2.driver import Command
from mi.instrument.seabird.sbe16plus_v2.driver import Prompt
from mi.instrument.seabird.driver import SBE_EPOCH
from mi.instrument.seabird.test.test_driver import SeaBirdUnitTest
from mi.instrument.seabird.test.test_driver import SeaBirdIntegrationTest
from mi.instrument.seabird.test.test_driver import SeaBirdQualificationTest
from mi.instrument.seabird.test.test_driver import SeaBirdPublicationTest
from mi.core.instrument.instrument_driver import DriverConnectionState
from mi.core.instrument.instrument_driver import DriverProtocolState
from mi.core.instrument.instrument_driver import ResourceAgentState
class SeaBird16plusMixin(DriverTestMixin):
InstrumentDriver = SBE16InstrumentDriver
'''
Mixin class used for storing data particle constants and common data assertion methods.
'''
# Create some short names for the parameter test config
TYPE = ParameterTestConfigKey.TYPE
READONLY = ParameterTestConfigKey.READONLY
STARTUP = ParameterTestConfigKey.STARTUP
DA = ParameterTestConfigKey.DIRECT_ACCESS
VALUE = ParameterTestConfigKey.VALUE
REQUIRED = ParameterTestConfigKey.REQUIRED
DEFAULT = ParameterTestConfigKey.DEFAULT
STATES = ParameterTestConfigKey.STATES
###
# Instrument output (driver input) Definitions
###
VALID_SAMPLE = "#0409DB0A738C81747A84AC0006000A2E541E18BE6ED9" + NEWLINE
VALID_SAMPLE2 = "0409DB0A738C81747A84AC0006000A2E541E18BE6ED9" + NEWLINE
VALID_DS_RESPONSE = 'SBE 16plus V 2.5 SERIAL NO. 6841 28 Feb 2013 16:39:31' + NEWLINE + \
'vbatt = 23.4, vlith = 8.0, ioper = 61.4 ma, ipump = 0.3 ma,' + NEWLINE + \
'status = not logging' + NEWLINE + \
'samples = 0, free = 4386542' + NEWLINE + \
'sample interval = 10 seconds, number of measurements per sample = 4' + NEWLINE + \
'pump = run pump during sample, delay before sampling = 0.0 seconds, delay after sampling = 0.0 seconds' + NEWLINE + \
'transmit real-time = yes' + NEWLINE + \
'battery cutoff = 7.5 volts' + NEWLINE + \<|fim▁hole|> 'pressure sensor = strain gauge, range = 160.0' + NEWLINE + \
'SBE 38 = no, SBE 50 = no, WETLABS = no, OPTODE = no, SBE63 = no, Gas Tension Device = no' + NEWLINE + \
'Ext Volt 0 = yes, Ext Volt 1 = yes' + NEWLINE + \
'Ext Volt 2 = yes, Ext Volt 3 = yes' + NEWLINE + \
'Ext Volt 4 = yes, Ext Volt 5 = yes' + NEWLINE + \
'echo characters = yes' + NEWLINE + \
'output format = raw HEX' + NEWLINE + \
'serial sync mode disabled' + NEWLINE
VALID_DCAL_QUARTZ = 'SBE 16plus V 2.5 SERIAL NO. 6841 28 Feb 2013 18:37:40' + NEWLINE + \
'temperature: 18-May-12' + NEWLINE + \
' TA0 = 1.561342e-03' + NEWLINE + \
' TA1 = 2.561486e-04' + NEWLINE + \
' TA2 = 1.896537e-07' + NEWLINE + \
' TA3 = 1.301189e-07' + NEWLINE + \
' TOFFSET = 0.000000e+00' + NEWLINE + \
'conductivity: 18-May-11' + NEWLINE + \
' G = -9.896568e-01' + NEWLINE + \
' H = 1.316599e-01' + NEWLINE + \
' I = -2.213854e-04' + NEWLINE + \
' J = 3.292199e-05' + NEWLINE + \
' CPCOR = -9.570000e-08' + NEWLINE + \
' CTCOR = 3.250000e-06' + NEWLINE + \
' CSLOPE = 1.000000e+00' + NEWLINE + \
'pressure S/N = 125270, range = 1000 psia: 02-nov-12' + NEWLINE + \
' PC1 = -4.642673e+03' + NEWLINE + \
' PC2 = -4.611640e-03' + NEWLINE + \
' PC3 = 8.921190e-04' + NEWLINE + \
' PD1 = 7.024800e-02' + NEWLINE + \
' PD2 = 0.000000e+00' + NEWLINE + \
' PT1 = 3.022595e+01' + NEWLINE + \
' PT2 = -1.549720e-04' + NEWLINE + \
' PT3 = 2.677750e-06' + NEWLINE + \
' PT4 = 1.705490e-09' + NEWLINE + \
' PSLOPE = 1.000000e+00' + NEWLINE + \
' POFFSET = 0.000000e+00' + NEWLINE + \
'volt 0: offset = -4.650526e-02, slope = 1.246381e+00' + NEWLINE + \
'volt 1: offset = -4.618105e-02, slope = 1.247197e+00' + NEWLINE + \
'volt 2: offset = -4.659790e-02, slope = 1.247601e+00' + NEWLINE + \
'volt 3: offset = -4.502421e-02, slope = 1.246911e+00' + NEWLINE + \
'volt 4: offset = -4.589158e-02, slope = 1.246346e+00' + NEWLINE + \
'volt 5: offset = -4.609895e-02, slope = 1.247868e+00' + NEWLINE + \
' EXTFREQSF = 9.999949e-01' + NEWLINE
VALID_DCAL_STRAIN ='SBE 16plus V 2.5 SERIAL NO. 6841 28 Feb 2013 18:37:40' + NEWLINE + \
'temperature: 18-May-12' + NEWLINE + \
' TA0 = 1.561342e-03' + NEWLINE + \
' TA1 = 2.561486e-04' + NEWLINE + \
' TA2 = 1.896537e-07' + NEWLINE + \
' TA3 = 1.301189e-07' + NEWLINE + \
' TOFFSET = 0.000000e+00' + NEWLINE + \
'conductivity: 18-May-11' + NEWLINE + \
' G = -9.896568e-01' + NEWLINE + \
' H = 1.316599e-01' + NEWLINE + \
' I = -2.213854e-04' + NEWLINE + \
' J = 3.292199e-05' + NEWLINE + \
' CPCOR = -9.570000e-08' + NEWLINE + \
' CTCOR = 3.250000e-06' + NEWLINE + \
' CSLOPE = 1.000000e+00' + NEWLINE + \
'pressure S/N = 3230195, range = 160 psia: 11-May-11' + NEWLINE + \
' PA0 = 4.960417e-02' + NEWLINE + \
' PA1 = 4.883682e-04' + NEWLINE + \
' PA2 = -5.687309e-12' + NEWLINE + \
' PTCA0 = 5.249802e+05' + NEWLINE + \
' PTCA1 = 7.595719e+00' + NEWLINE + \
' PTCA2 = -1.322776e-01' + NEWLINE + \
' PTCB0 = 2.503125e+01' + NEWLINE + \
' PTCB1 = 5.000000e-05' + NEWLINE + \
' PTCB2 = 0.000000e+00' + NEWLINE + \
' PTEMPA0 = -6.431504e+01' + NEWLINE + \
' PTEMPA1 = 5.168177e+01' + NEWLINE + \
' PTEMPA2 = -2.847757e-01' + NEWLINE + \
' POFFSET = 0.000000e+00' + NEWLINE + \
'volt 0: offset = -4.650526e-02, slope = 1.246381e+00' + NEWLINE + \
'volt 1: offset = -4.618105e-02, slope = 1.247197e+00' + NEWLINE + \
'volt 2: offset = -4.659790e-02, slope = 1.247601e+00' + NEWLINE + \
'volt 3: offset = -4.502421e-02, slope = 1.246911e+00' + NEWLINE + \
'volt 4: offset = -4.589158e-02, slope = 1.246346e+00' + NEWLINE + \
'volt 5: offset = -4.609895e-02, slope = 1.247868e+00' + NEWLINE + \
' EXTFREQSF = 9.999949e-01' + NEWLINE
###
# Parameter and Type Definitions
###
_driver_parameters = {
# Parameters defined in the IOS
Parameter.DATE_TIME : {TYPE: str, READONLY: True, DA: False, STARTUP: False},
Parameter.ECHO : {TYPE: bool, READONLY: True, DA: True, STARTUP: True, DEFAULT: True, VALUE: True},
Parameter.OUTPUT_EXEC_TAG : {TYPE: bool, READONLY: True, DA: True, STARTUP: True, DEFAULT: True, VALUE: True},
Parameter.TXREALTIME : {TYPE: bool, READONLY: True, DA: True, STARTUP: True, DEFAULT: True, VALUE: True},
Parameter.PUMP_MODE : {TYPE: int, READONLY: False, DA: True, STARTUP: True, DEFAULT: 2, VALUE: 2},
Parameter.NCYCLES : {TYPE: int, READONLY: False, DA: False, STARTUP: True, DEFAULT: 4, VALUE: 4},
Parameter.INTERVAL : {TYPE: int, READONLY: False, DA: False, STARTUP: True, VALUE: 10},
Parameter.BIOWIPER : {TYPE: bool, READONLY: True, DA: True, STARTUP: True, DEFAULT: False, VALUE: False},
Parameter.PTYPE : {TYPE: int, READONLY: True, DA: True, STARTUP: True, DEFAULT: 1, VALUE: 1},
Parameter.VOLT0 : {TYPE: bool, READONLY: True, DA: True, STARTUP: True, DEFAULT: True, VALUE: True},
Parameter.VOLT1 : {TYPE: bool, READONLY: True, DA: True, STARTUP: True, DEFAULT: True, VALUE: True},
Parameter.VOLT2 : {TYPE: bool, READONLY: True, DA: True, STARTUP: True, DEFAULT: True, VALUE: True},
Parameter.VOLT3 : {TYPE: bool, READONLY: True, DA: True, STARTUP: True, DEFAULT: True, VALUE: True},
Parameter.VOLT4 : {TYPE: bool, READONLY: True, DA: True, STARTUP: True, DEFAULT: True, VALUE: True},
Parameter.VOLT5 : {TYPE: bool, READONLY: True, DA: True, STARTUP: True, DEFAULT: True, VALUE: True},
Parameter.DELAY_BEFORE_SAMPLE : {TYPE: float, READONLY: True, DA: True, STARTUP: True, DEFAULT: 0.0, VALUE: 0.0},
Parameter.DELAY_AFTER_SAMPLE : {TYPE: float, READONLY: True, DA: True, STARTUP: True, DEFAULT: 0.0, VALUE: 0.0},
Parameter.SBE63 : {TYPE: bool, READONLY: True, DA: True, STARTUP: True, DEFAULT: False, VALUE: False},
Parameter.SBE38 : {TYPE: bool, READONLY: True, DA: True, STARTUP: True, DEFAULT: False, VALUE: False},
Parameter.SBE50 : {TYPE: bool, READONLY: True, DA: True, STARTUP: True, DEFAULT: False, VALUE: False},
Parameter.WETLABS : {TYPE: bool, READONLY: True, DA: True, STARTUP: True, DEFAULT: False, VALUE: False},
Parameter.GTD : {TYPE: bool, READONLY: True, DA: True, STARTUP: True, DEFAULT: False, VALUE: False},
Parameter.OPTODE : {TYPE: bool, READONLY: True, DA: True, STARTUP: True, DEFAULT: False, VALUE: False},
Parameter.SYNCMODE : {TYPE: bool, READONLY: True, DA: True, STARTUP: True, DEFAULT: False, VALUE: False},
Parameter.SYNCWAIT : {TYPE: bool, READONLY: True, DA: False, STARTUP: False, DEFAULT: 0, VALUE: 0, REQUIRED: False},
Parameter.OUTPUT_FORMAT : {TYPE: int, READONLY: True, DA: True, STARTUP: True, DEFAULT: 0, VALUE: 0},
Parameter.LOGGING : {TYPE: bool, READONLY: True, DA: False, STARTUP: False},
}
_driver_capabilities = {
# capabilities defined in the IOS
Capability.QUIT_SESSION : {STATES: [ProtocolState.COMMAND, ProtocolState.AUTOSAMPLE]},
Capability.START_AUTOSAMPLE : {STATES: [ProtocolState.COMMAND]},
Capability.STOP_AUTOSAMPLE : {STATES: [ProtocolState.AUTOSAMPLE]},
Capability.CLOCK_SYNC : {STATES: [ProtocolState.COMMAND]},
Capability.ACQUIRE_STATUS : {STATES: [ProtocolState.COMMAND, ProtocolState.AUTOSAMPLE]},
Capability.GET_CONFIGURATION : {STATES: [ProtocolState.COMMAND, ProtocolState.AUTOSAMPLE]},
Capability.TEST : {STATES: [ProtocolState.COMMAND]},
Capability.RESET_EC : {STATES: [ProtocolState.COMMAND]},
}
_sample_parameters = {
SBE16DataParticleKey.TEMP: {TYPE: int, VALUE: 264667, REQUIRED: True },
SBE16DataParticleKey.CONDUCTIVITY: {TYPE: int, VALUE: 684940, REQUIRED: True },
SBE16DataParticleKey.PRESSURE: {TYPE: int, VALUE: 8483962, REQUIRED: True },
SBE16DataParticleKey.PRESSURE_TEMP: {TYPE: int, VALUE: 33964, REQUIRED: True },
SBE16DataParticleKey.TIME: {TYPE: int, VALUE: 415133401, REQUIRED: True },
}
_status_parameters = {
SBE16StatusParticleKey.FIRMWARE_VERSION: {TYPE: unicode, VALUE: '2.5', REQUIRED: True },
SBE16StatusParticleKey.SERIAL_NUMBER: {TYPE: int, VALUE: 6841, REQUIRED: True },
SBE16StatusParticleKey.DATE_TIME: {TYPE: unicode, VALUE: '28 Feb 2013 16:39:31', REQUIRED: True },
SBE16StatusParticleKey.VBATT: {TYPE: float, VALUE: 23.4, REQUIRED: True },
SBE16StatusParticleKey.VLITH: {TYPE: float, VALUE: 8.0, REQUIRED: True },
SBE16StatusParticleKey.IOPER: {TYPE: float, VALUE: 61.4, REQUIRED: True },
SBE16StatusParticleKey.IPUMP: {TYPE: float, VALUE: 0.3, REQUIRED: True },
SBE16StatusParticleKey.STATUS: {TYPE: unicode, VALUE: 'not logging', REQUIRED: True },
SBE16StatusParticleKey.SAMPLES: {TYPE: int, VALUE: 0, REQUIRED: True },
SBE16StatusParticleKey.FREE: {TYPE: int, VALUE: 4386542, REQUIRED: True },
SBE16StatusParticleKey.SAMPLE_INTERVAL: {TYPE: int, VALUE: 10, REQUIRED: True },
SBE16StatusParticleKey.MEASUREMENTS_PER_SAMPLE: {TYPE: int, VALUE: 4, REQUIRED: True },
SBE16StatusParticleKey.PUMP_MODE: {TYPE: unicode, VALUE: 'run pump during sample', REQUIRED: True },
SBE16StatusParticleKey.DELAY_BEFORE_SAMPLING: {TYPE: float, VALUE: 0.0, REQUIRED: True },
SBE16StatusParticleKey.DELAY_AFTER_SAMPLING: {TYPE: float, VALUE: 0.0, REQUIRED: True },
SBE16StatusParticleKey.TX_REAL_TIME: {TYPE: int, VALUE: 1, REQUIRED: True },
SBE16StatusParticleKey.BATTERY_CUTOFF: {TYPE: float, VALUE: 7.5, REQUIRED: True },
SBE16StatusParticleKey.PRESSURE_SENSOR: {TYPE: unicode, VALUE: 'strain gauge', REQUIRED: True },
SBE16StatusParticleKey.RANGE: {TYPE: float, VALUE: 160, REQUIRED: True },
SBE16StatusParticleKey.SBE38: {TYPE: int, VALUE: 0, REQUIRED: True },
SBE16StatusParticleKey.SBE50: {TYPE: int, VALUE: 0, REQUIRED: True },
SBE16StatusParticleKey.WETLABS: {TYPE: int, VALUE: 0, REQUIRED: True },
SBE16StatusParticleKey.OPTODE: {TYPE: int, VALUE: 0, REQUIRED: True },
SBE16StatusParticleKey.GAS_TENSION_DEVICE: {TYPE: int, VALUE: 0, REQUIRED: True },
SBE16StatusParticleKey.EXT_VOLT_0: {TYPE: int, VALUE: 1, REQUIRED: True },
SBE16StatusParticleKey.EXT_VOLT_1: {TYPE: int, VALUE: 1, REQUIRED: True },
SBE16StatusParticleKey.EXT_VOLT_2: {TYPE: int, VALUE: 1, REQUIRED: True },
SBE16StatusParticleKey.EXT_VOLT_3: {TYPE: int, VALUE: 1, REQUIRED: True },
SBE16StatusParticleKey.EXT_VOLT_4: {TYPE: int, VALUE: 1, REQUIRED: True },
SBE16StatusParticleKey.EXT_VOLT_5: {TYPE: int, VALUE: 1, REQUIRED: True },
SBE16StatusParticleKey.ECHO_CHARACTERS: {TYPE: int, VALUE: 1, REQUIRED: True },
SBE16StatusParticleKey.OUTPUT_FORMAT: {TYPE: int, VALUE: 0, REQUIRED: True },
SBE16StatusParticleKey.OUTPUT_SALINITY: {TYPE: int, VALUE: 0, REQUIRED: False },
SBE16StatusParticleKey.OUTPUT_SOUND_VELOCITY: {TYPE: int, VALUE: 0, REQUIRED: False },
SBE16StatusParticleKey.SERIAL_SYNC_MODE: {TYPE: int, VALUE: 0, REQUIRED: True },
}
# Base calibration structure, but exludes pressure sensor type. Those parameters are based
# on ptype
_calibration_parameters_base = {
SBE16CalibrationParticleKey.FIRMWARE_VERSION: {TYPE: unicode, VALUE: "2.5", REQUIRED: True },
SBE16CalibrationParticleKey.SERIAL_NUMBER: {TYPE: int, VALUE: 6841, REQUIRED: True },
SBE16CalibrationParticleKey.DATE_TIME: {TYPE: unicode, VALUE: "28 Feb 2013 18:37:40", REQUIRED: True },
SBE16CalibrationParticleKey.TEMP_CAL_DATE: {TYPE: unicode, VALUE: "18-May-12", REQUIRED: True },
SBE16CalibrationParticleKey.TA0: {TYPE: float, VALUE: 1.561342e-03, REQUIRED: True },
SBE16CalibrationParticleKey.TA1: {TYPE: float, VALUE: 2.561486e-04, REQUIRED: True },
SBE16CalibrationParticleKey.TA2: {TYPE: float, VALUE: 1.896537e-07, REQUIRED: True },
SBE16CalibrationParticleKey.TA3: {TYPE: float, VALUE: 1.301189e-07, REQUIRED: True },
SBE16CalibrationParticleKey.TOFFSET: {TYPE: float, VALUE: 0.0, REQUIRED: True },
SBE16CalibrationParticleKey.COND_CAL_DATE: {TYPE: unicode, VALUE: '18-May-11', REQUIRED: True },
SBE16CalibrationParticleKey.CONDG: {TYPE: float, VALUE: -9.896568e-01, REQUIRED: True },
SBE16CalibrationParticleKey.CONDH: {TYPE: float, VALUE: 1.316599e-01, REQUIRED: True },
SBE16CalibrationParticleKey.CONDI: {TYPE: float, VALUE: -2.213854e-04, REQUIRED: True },
SBE16CalibrationParticleKey.CONDJ: {TYPE: float, VALUE: 3.292199e-05, REQUIRED: True },
SBE16CalibrationParticleKey.CPCOR: {TYPE: float, VALUE: -9.570000e-08, REQUIRED: True },
SBE16CalibrationParticleKey.CTCOR: {TYPE: float, VALUE: 3.250000e-06, REQUIRED: True },
SBE16CalibrationParticleKey.CSLOPE: {TYPE: float, VALUE: 1.0, REQUIRED: True },
SBE16CalibrationParticleKey.EXT_VOLT0_OFFSET: {TYPE: float, VALUE: -4.650526e-02, REQUIRED: True },
SBE16CalibrationParticleKey.EXT_VOLT0_SLOPE: {TYPE: float, VALUE: 1.246381e+00, REQUIRED: True },
SBE16CalibrationParticleKey.EXT_VOLT1_OFFSET: {TYPE: float, VALUE: -4.618105e-02, REQUIRED: True },
SBE16CalibrationParticleKey.EXT_VOLT1_SLOPE: {TYPE: float, VALUE: 1.247197e+00, REQUIRED: True },
SBE16CalibrationParticleKey.EXT_VOLT2_OFFSET: {TYPE: float, VALUE: -4.659790e-02, REQUIRED: True },
SBE16CalibrationParticleKey.EXT_VOLT2_SLOPE: {TYPE: float, VALUE: 1.247601e+00, REQUIRED: True },
SBE16CalibrationParticleKey.EXT_VOLT3_OFFSET: {TYPE: float, VALUE: -4.502421e-02, REQUIRED: True },
SBE16CalibrationParticleKey.EXT_VOLT3_SLOPE: {TYPE: float, VALUE: 1.246911e+00, REQUIRED: True },
SBE16CalibrationParticleKey.EXT_VOLT4_OFFSET: {TYPE: float, VALUE: -4.589158e-02, REQUIRED: True },
SBE16CalibrationParticleKey.EXT_VOLT4_SLOPE: {TYPE: float, VALUE: 1.246346e+00, REQUIRED: True },
SBE16CalibrationParticleKey.EXT_VOLT5_OFFSET: {TYPE: float, VALUE: -4.609895e-02, REQUIRED: True },
SBE16CalibrationParticleKey.EXT_VOLT5_SLOPE: {TYPE: float, VALUE: 1.247868e+00, REQUIRED: True },
SBE16CalibrationParticleKey.EXT_FREQ: {TYPE: float, VALUE: 9.999949e-01, REQUIRED: True },
}
# Calibration particle definition for a 16 with a quartz pressure sensor
_calibration_parameters_quartz = dict(
{
SBE16CalibrationParticleKey.PRES_SERIAL_NUMBER: {TYPE: int, VALUE: 125270, REQUIRED: True },
SBE16CalibrationParticleKey.PRES_RANGE: {TYPE: int, VALUE: 1000, REQUIRED: True },
SBE16CalibrationParticleKey.PRES_CAL_DATE: {TYPE: unicode, VALUE: '02-nov-12', REQUIRED: True },
SBE16CalibrationParticleKey.PC1: {TYPE: float, VALUE: -4.642673e+03, REQUIRED: True },
SBE16CalibrationParticleKey.PC2: {TYPE: float, VALUE: -4.611640e-03, REQUIRED: True },
SBE16CalibrationParticleKey.PC3: {TYPE: float, VALUE: 8.921190e-04, REQUIRED: True },
SBE16CalibrationParticleKey.PD1: {TYPE: float, VALUE: 7.024800e-02, REQUIRED: True },
SBE16CalibrationParticleKey.PD2: {TYPE: float, VALUE: 0.000000e+00, REQUIRED: True },
SBE16CalibrationParticleKey.PT1: {TYPE: float, VALUE: 3.022595e+01, REQUIRED: True },
SBE16CalibrationParticleKey.PT2: {TYPE: float, VALUE: -1.549720e-04, REQUIRED: True },
SBE16CalibrationParticleKey.PT3: {TYPE: float, VALUE: 2.677750e-06, REQUIRED: True },
SBE16CalibrationParticleKey.PT4: {TYPE: float, VALUE: 1.705490e-09, REQUIRED: True },
SBE16CalibrationParticleKey.PSLOPE: {TYPE: float, VALUE: 1.000000e+00, REQUIRED: True },
SBE16CalibrationParticleKey.POFFSET: {TYPE: float, VALUE: 0.000000e+00, REQUIRED: True },
},
**_calibration_parameters_base
)
# Calibration particle definition for a 16 with a stain gauge pressure sensor
_calibration_parameters_strain = dict(
{
SBE16CalibrationParticleKey.PRES_SERIAL_NUMBER: {TYPE: int, VALUE: 3230195, REQUIRED: True },
SBE16CalibrationParticleKey.PRES_RANGE: {TYPE: int, VALUE: 160, REQUIRED: True },
SBE16CalibrationParticleKey.PRES_CAL_DATE: {TYPE: unicode, VALUE: '11-May-11', REQUIRED: True },
SBE16CalibrationParticleKey.PA0: {TYPE: float, VALUE: 4.960417e-02, REQUIRED: True },
SBE16CalibrationParticleKey.PA1: {TYPE: float, VALUE: 4.883682e-04, REQUIRED: True },
SBE16CalibrationParticleKey.PA2: {TYPE: float, VALUE: -5.687309e-12, REQUIRED: True },
SBE16CalibrationParticleKey.PTCA0: {TYPE: float, VALUE: 5.249802e+05, REQUIRED: True },
SBE16CalibrationParticleKey.PTCA1: {TYPE: float, VALUE: 7.595719e+00, REQUIRED: True },
SBE16CalibrationParticleKey.PTCA2: {TYPE: float, VALUE: -1.322776e-01, REQUIRED: True },
SBE16CalibrationParticleKey.PTCB0: {TYPE: float, VALUE: 2.503125e+01, REQUIRED: True },
SBE16CalibrationParticleKey.PTCB1: {TYPE: float, VALUE: 5.000000e-05, REQUIRED: True },
SBE16CalibrationParticleKey.PTCB2: {TYPE: float, VALUE: 0.000000e+003, REQUIRED: True },
SBE16CalibrationParticleKey.PTEMPA0: {TYPE: float, VALUE: -6.431504e+01, REQUIRED: True },
SBE16CalibrationParticleKey.PTEMPA1: {TYPE: float, VALUE: 5.168177e+01, REQUIRED: True },
SBE16CalibrationParticleKey.PTEMPA2: {TYPE: float, VALUE: -2.847757e-01, REQUIRED: True },
SBE16CalibrationParticleKey.POFFSET: {TYPE: float, VALUE: 0.000000e+00, REQUIRED: True },
},
**_calibration_parameters_base
)
###
# Driver Parameter Methods
###
def assert_driver_parameters(self, current_parameters, verify_values = False):
"""
Verify that all driver parameters are correct and potentially verify values.
@param current_parameters: driver parameters read from the driver instance
@param verify_values: should we verify values against definition?
"""
self.assert_parameters(current_parameters, self._driver_parameters, verify_values)
def assert_particle_sample(self, data_particle, verify_values = False):
'''
Verify sample particle
@param data_particle: SBE16DataParticle data particle
@param verify_values: bool, should we verify parameter values
'''
self.assert_data_particle_keys(SBE16DataParticleKey, self._sample_parameters)
self.assert_data_particle_header(data_particle, DataParticleType.CTD_PARSED, require_instrument_timestamp=True)
self.assert_data_particle_parameters(data_particle, self._sample_parameters, verify_values)
def assert_particle_status(self, data_particle, verify_values = False):
'''
Verify status particle
@param data_particle: SBE16StatusParticle data particle
@param verify_values: bool, should we verify parameter values
'''
self.assert_data_particle_keys(SBE16StatusParticleKey, self._status_parameters)
self.assert_data_particle_header(data_particle, DataParticleType.DEVICE_STATUS)
self.assert_data_particle_parameters(data_particle, self._status_parameters, verify_values)
def assert_particle_calibration_quartz(self, data_particle, verify_values = False):
'''
Verify calibration particle
@param data_particle: SBE16CalibrationParticle data particle
@param verify_values: bool, should we verify parameter values
'''
# Have to skip this test because the parameter set is dynamic
#self.assert_data_particle_keys(SBE16CalibrationParticleKey, self._calibration_parameters_quartz)
self.assert_data_particle_header(data_particle, DataParticleType.DEVICE_CALIBRATION)
self.assert_data_particle_parameters(data_particle, self._calibration_parameters_quartz, verify_values)
def assert_particle_calibration_strain(self, data_particle, verify_values = False):
'''
Verify calibration particle
@param data_particle: SBE16CalibrationParticle data particle
@param verify_values: bool, should we verify parameter values
'''
# Have to skip this test because the parameter set is dynamic
#self.assert_data_particle_keys(SBE16CalibrationParticleKey, self._calibration_parameters_strain)
self.assert_data_particle_header(data_particle, DataParticleType.DEVICE_CALIBRATION)
self.assert_data_particle_parameters(data_particle, self._calibration_parameters_strain, verify_values)
def assert_granule_calibration_strain(self, granule, verify_values = False):
'''
Verify calibration granule
@param data_particle: SBE16CalibrationParticle data granule
@param verify_values: bool, should we verify parameter values
'''
# Have to skip this test because the parameter set is dynamic
#self.assert_data_particle_keys(SBE16CalibrationParticleKey, self._calibration_parameters_strain)
self.assert_data_particle_header(granule, DataParticleType.DEVICE_CALIBRATION)
self.assert_data_particle_parameters(granule, self._calibration_parameters_strain, verify_values)
#################################### RULES ####################################
# #
# Common capabilities in the base class #
# #
# Instrument specific stuff in the derived class #
# #
# Generator spits out either stubs or comments describing test this here, #
# test that there. #
# #
# Qualification tests are driven through the instrument_agent #
# #
###############################################################################
###############################################################################
# UNIT TESTS #
# Unit tests test the method calls and parameters using Mock. #
###############################################################################
@attr('UNIT', group='mi')
class SBEUnitTestCase(SeaBirdUnitTest, SeaBird16plusMixin):
"""Unit Test Driver"""
def test_driver_enums(self):
"""
Verify that all driver enumeration has no duplicate values that might cause confusion. Also
do a little extra validation for the Capabilites
"""
self.assert_enum_has_no_duplicates(Command())
self.assert_enum_has_no_duplicates(ScheduledJob())
self.assert_enum_has_no_duplicates(DataParticleType())
self.assert_enum_has_no_duplicates(ProtocolState())
self.assert_enum_has_no_duplicates(ProtocolEvent())
self.assert_enum_has_no_duplicates(Parameter())
self.assert_enum_complete(ConfirmedParameter(), Parameter())
# Test capabilites for duplicates, them verify that capabilities is a subset of proto events
self.assert_enum_has_no_duplicates(Capability())
self.assert_enum_complete(Capability(), ProtocolEvent())
def test_driver_schema(self):
"""
get the driver schema and verify it is configured properly
"""
driver = self.InstrumentDriver(self._got_data_event_callback)
self.assert_driver_schema(driver, self._driver_parameters, self._driver_capabilities)
def test_chunker(self):
"""
Test the chunker and verify the particles created.
"""
chunker = StringChunker(SBE16Protocol.sieve_function)
self.assert_chunker_sample(chunker, self.VALID_SAMPLE)
self.assert_chunker_sample_with_noise(chunker, self.VALID_SAMPLE)
self.assert_chunker_fragmented_sample(chunker, self.VALID_SAMPLE)
self.assert_chunker_combined_sample(chunker, self.VALID_SAMPLE)
self.assert_chunker_sample(chunker, self.VALID_SAMPLE2)
self.assert_chunker_sample_with_noise(chunker, self.VALID_SAMPLE2)
self.assert_chunker_fragmented_sample(chunker, self.VALID_SAMPLE2)
self.assert_chunker_combined_sample(chunker, self.VALID_SAMPLE2)
self.assert_chunker_sample(chunker, self.VALID_DS_RESPONSE)
self.assert_chunker_sample_with_noise(chunker, self.VALID_DS_RESPONSE)
self.assert_chunker_fragmented_sample(chunker, self.VALID_DS_RESPONSE, 64)
self.assert_chunker_combined_sample(chunker, self.VALID_DS_RESPONSE)
self.assert_chunker_sample(chunker, self.VALID_DCAL_QUARTZ)
self.assert_chunker_sample_with_noise(chunker, self.VALID_DCAL_QUARTZ)
self.assert_chunker_fragmented_sample(chunker, self.VALID_DCAL_QUARTZ, 64)
self.assert_chunker_combined_sample(chunker, self.VALID_DCAL_QUARTZ)
self.assert_chunker_sample(chunker, self.VALID_DCAL_STRAIN)
self.assert_chunker_sample_with_noise(chunker, self.VALID_DCAL_STRAIN)
self.assert_chunker_fragmented_sample(chunker, self.VALID_DCAL_STRAIN, 64)
self.assert_chunker_combined_sample(chunker, self.VALID_DCAL_STRAIN)
@unittest.skip("passes with test_driver, fails with nosetest")
def test_got_data(self):
"""
Verify sample data passed through the got data method produces the correct data particles
"""
# Create and initialize the instrument driver with a mock port agent
driver = self.InstrumentDriver(self._got_data_event_callback)
self.assert_initialize_driver(driver)
self.assert_raw_particle_published(driver, True)
# Start validating data particles
self.assert_particle_published(driver, self.VALID_SAMPLE, self.assert_particle_sample, True)
self.assert_particle_published(driver, self.VALID_SAMPLE2, self.assert_particle_sample, True)
self.assert_particle_published(driver, self.VALID_DS_RESPONSE, self.assert_particle_status, True)
self.assert_particle_published(driver, self.VALID_DCAL_QUARTZ, self.assert_particle_calibration_quartz, True)
self.assert_particle_published(driver, self.VALID_DCAL_STRAIN, self.assert_particle_calibration_strain, True)
def test_capabilities(self):
"""
Verify the FSM reports capabilities as expected. All states defined in this dict must
also be defined in the protocol FSM.
"""
capabilities = {
ProtocolState.UNKNOWN: ['DRIVER_EVENT_DISCOVER'],
ProtocolState.TEST: ['DRIVER_EVENT_GET',
'DRIVER_EVENT_RUN_TEST'],
ProtocolState.COMMAND: ['DRIVER_EVENT_ACQUIRE_SAMPLE',
'DRIVER_EVENT_ACQUIRE_STATUS',
'DRIVER_EVENT_CLOCK_SYNC',
'DRIVER_EVENT_GET',
'DRIVER_EVENT_SET',
'DRIVER_EVENT_TEST',
'DRIVER_EVENT_START_AUTOSAMPLE',
'DRIVER_EVENT_START_DIRECT',
'PROTOCOL_EVENT_GET_CONFIGURATION',
'PROTOCOL_EVENT_RESET_EC',
'PROTOCOL_EVENT_QUIT_SESSION',
'DRIVER_EVENT_SCHEDULED_CLOCK_SYNC'],
ProtocolState.AUTOSAMPLE: ['DRIVER_EVENT_GET',
'PROTOCOL_EVENT_QUIT_SESSION',
'DRIVER_EVENT_STOP_AUTOSAMPLE',
'PROTOCOL_EVENT_GET_CONFIGURATION',
'DRIVER_EVENT_SCHEDULED_CLOCK_SYNC',
'DRIVER_EVENT_ACQUIRE_STATUS'],
ProtocolState.DIRECT_ACCESS: ['DRIVER_EVENT_STOP_DIRECT', 'EXECUTE_DIRECT']
}
driver = self.InstrumentDriver(self._got_data_event_callback)
self.assert_capabilities(driver, capabilities)
@unittest.skip("passes with test_driver, fails with nosetest")
def test_parse_ds(self):
"""
Create a mock port agent
"""
driver = self.InstrumentDriver(self._got_data_event_callback)
self.assert_initialize_driver(driver, ProtocolState.COMMAND)
source = self.VALID_DS_RESPONSE
baseline = driver._protocol._param_dict.get_current_timestamp()
# First verify that parse ds sets all know parameters.
driver._protocol._parse_dsdc_response(source, '<Executed/>')
pd = driver._protocol._param_dict.get_all(baseline)
log.debug("Param Dict Values: %s" % pd)
log.debug("Param Sample: %s" % source)
self.assert_driver_parameters(pd, True)
# Now change some things and make sure they are parsed properly
# Note: Only checking parameters that can change.
# Logging
source = source.replace("= not logging", "= logging")
log.debug("Param Sample: %s" % source)
driver._protocol._parse_dsdc_response(source, '<Executed/>')
pd = driver._protocol._param_dict.get_all(baseline)
self.assertTrue(pd.get(Parameter.LOGGING))
# Sync Mode
source = source.replace("serial sync mode disabled", "serial sync mode enabled")
log.debug("Param Sample: %s" % source)
driver._protocol._parse_dsdc_response(source, '<Executed/>')
pd = driver._protocol._param_dict.get_all(baseline)
self.assertTrue(pd.get(Parameter.SYNCMODE))
# Pump Mode 0
source = source.replace("run pump during sample", "no pump")
log.debug("Param Sample: %s" % source)
driver._protocol._parse_dsdc_response(source, '<Executed/>')
pd = driver._protocol._param_dict.get_all(baseline)
self.assertEqual(pd.get(Parameter.PUMP_MODE), 0)
# Pump Mode 1
source = source.replace("no pump", "run pump for 0.5 sec")
log.debug("Param Sample: %s" % source)
driver._protocol._parse_dsdc_response(source, '<Executed/>')
pd = driver._protocol._param_dict.get_all(baseline)
self.assertEqual(pd.get(Parameter.PUMP_MODE), 1)
# Pressure Sensor type 2
source = source.replace("strain gauge", "quartz without temp comp")
log.debug("Param Sample: %s" % source)
driver._protocol._parse_dsdc_response(source, '<Executed/>')
pd = driver._protocol._param_dict.get_all(baseline)
self.assertEqual(pd.get(Parameter.PTYPE), 2)
# Pressure Sensor type 3
source = source.replace("quartz without temp comp", "quartz with temp comp")
log.debug("Param Sample: %s" % source)
driver._protocol._parse_dsdc_response(source, '<Executed/>')
pd = driver._protocol._param_dict.get_all(baseline)
self.assertEqual(pd.get(Parameter.PTYPE), 3)
def test_parse_set_response(self):
"""
Test response from set commands.
"""
driver = self.InstrumentDriver(self._got_data_event_callback)
self.assert_initialize_driver(driver, ProtocolState.COMMAND)
response = "Not an error"
driver._protocol._parse_set_response(response, Prompt.EXECUTED)
driver._protocol._parse_set_response(response, Prompt.COMMAND)
with self.assertRaises(InstrumentProtocolException):
driver._protocol._parse_set_response(response, Prompt.BAD_COMMAND)
response = "<ERROR type='INVALID ARGUMENT' msg='out of range'/>"
with self.assertRaises(InstrumentParameterException):
driver._protocol._parse_set_response(response, Prompt.EXECUTED)
###############################################################################
# INTEGRATION TESTS #
# Integration test test the direct driver / instrument interaction #
# but making direct calls via zeromq. #
# - Common Integration tests test the driver through the instrument agent #
# and common for all drivers (minmum requirement for ION ingestion) #
###############################################################################
@attr('INT', group='mi')
class SBEIntTestCase(SeaBirdIntegrationTest, SeaBird16plusMixin):
"""
Integration tests for the sbe16 driver. This class tests and shows
use patterns for the sbe16 driver as a zmq driver process.
"""
def test_test(self):
"""
Test the hardware testing mode.
"""
self.assert_initialize_driver()
start_time = time.time()
timeout = time.time() + 300
reply = self.driver_client.cmd_dvr('execute_resource', ProtocolEvent.TEST)
self.assert_current_state(ProtocolState.TEST)
# Test the driver is in test state.
state = self.driver_client.cmd_dvr('get_resource_state')
while state != ProtocolState.COMMAND:
time.sleep(5)
elapsed = time.time() - start_time
log.info('Device testing %f seconds elapsed.' % elapsed)
state = self.driver_client.cmd_dvr('get_resource_state')
self.assertLess(time.time(), timeout, msg="Timeout waiting for instrument to come out of test")
# Verify we received the test result and it passed.
test_results = [evt for evt in self.events if evt['type']==DriverAsyncEvent.RESULT]
self.assertTrue(len(test_results) == 1)
self.assertEqual(test_results[0]['value']['success'], 'Passed')
def test_parameters(self):
"""
Test driver parameters and verify their type. Startup parameters also verify the parameter
value. This test confirms that parameters are being read/converted properly and that
the startup has been applied.
"""
self.assert_initialize_driver()
reply = self.driver_client.cmd_dvr('get_resource', Parameter.ALL)
self.assert_driver_parameters(reply, True)
def test_set(self):
"""
Test all set commands. Verify all exception cases.
"""
self.assert_initialize_driver()
# Verify we can set all parameters in bulk
new_values = {
Parameter.INTERVAL: 20,
Parameter.PUMP_MODE: 0,
Parameter.NCYCLES: 6
}
self.assert_set_bulk(new_values)
# Pump Mode
# x=0: No pump.
# x=1: Run pump for 0.5 sec before each sample.
# x=2: Run pump during each sample.
self.assert_set(Parameter.PUMP_MODE, 0)
self.assert_set(Parameter.PUMP_MODE, 1)
self.assert_set(Parameter.PUMP_MODE, 2)
self.assert_set_exception(Parameter.PUMP_MODE, -1)
self.assert_set_exception(Parameter.PUMP_MODE, 3)
self.assert_set_exception(Parameter.PUMP_MODE, 'bad')
# NCYCLE Range 1 - 100
self.assert_set(Parameter.NCYCLES, 1)
self.assert_set(Parameter.NCYCLES, 100)
self.assert_set_exception(Parameter.NCYCLES, 0)
self.assert_set_exception(Parameter.NCYCLES, 101)
self.assert_set_exception(Parameter.NCYCLES, -1)
self.assert_set_exception(Parameter.NCYCLES, 0.1)
self.assert_set_exception(Parameter.NCYCLES, 'bad')
# SampleInterval Range 10 - 14,400
self.assert_set(Parameter.INTERVAL, 10)
self.assert_set(Parameter.INTERVAL, 14400)
self.assert_set_exception(Parameter.INTERVAL, 9)
self.assert_set_exception(Parameter.INTERVAL, 14401)
self.assert_set_exception(Parameter.INTERVAL, -1)
self.assert_set_exception(Parameter.INTERVAL, 0.1)
self.assert_set_exception(Parameter.INTERVAL, 'bad')
# Read only parameters
self.assert_set_readonly(Parameter.ECHO, False)
self.assert_set_readonly(Parameter.OUTPUT_EXEC_TAG, False)
self.assert_set_readonly(Parameter.TXREALTIME, False)
self.assert_set_readonly(Parameter.BIOWIPER, False)
self.assert_set_readonly(Parameter.PTYPE, 1)
self.assert_set_readonly(Parameter.VOLT0, False)
self.assert_set_readonly(Parameter.VOLT1, False)
self.assert_set_readonly(Parameter.VOLT2, False)
self.assert_set_readonly(Parameter.VOLT3, False)
self.assert_set_readonly(Parameter.VOLT4, False)
self.assert_set_readonly(Parameter.VOLT5, False)
self.assert_set_readonly(Parameter.DELAY_BEFORE_SAMPLE, 1)
self.assert_set_readonly(Parameter.DELAY_AFTER_SAMPLE, 1)
self.assert_set_readonly(Parameter.SBE63, False)
self.assert_set_readonly(Parameter.SBE38, False)
self.assert_set_readonly(Parameter.SBE50, False)
self.assert_set_readonly(Parameter.WETLABS, False)
self.assert_set_readonly(Parameter.GTD, False)
self.assert_set_readonly(Parameter.OPTODE, False)
self.assert_set_readonly(Parameter.SYNCMODE, False)
self.assert_set_readonly(Parameter.SYNCWAIT, 1)
self.assert_set_readonly(Parameter.OUTPUT_FORMAT, 1)
self.assert_set_readonly(Parameter.LOGGING, False)
def test_startup_params(self):
"""
Verify that startup parameters are applied correctly. Generally this
happens in the driver discovery method.
"""
# Explicitly verify these values after discover. They should match
# what the startup values should be
get_values = {
Parameter.INTERVAL: 10,
Parameter.PUMP_MODE: 2,
Parameter.NCYCLES: 4
}
# Change the values of these parameters to something before the
# driver is reinitalized. They should be blown away on reinit.
new_values = {
Parameter.INTERVAL: 20,
Parameter.PUMP_MODE: 0,
Parameter.NCYCLES: 6
}
self.assert_initialize_driver()
self.assert_startup_parameters(self.assert_driver_parameters, new_values, get_values)
# Start autosample and try again
self.assert_set_bulk(new_values)
self.assert_driver_command(ProtocolEvent.START_AUTOSAMPLE, state=ProtocolState.AUTOSAMPLE, delay=1)
self.assert_startup_parameters(self.assert_driver_parameters)
self.assert_current_state(ProtocolState.AUTOSAMPLE)
def test_commands(self):
"""
Run instrument commands from both command and streaming mode.
"""
self.assert_initialize_driver()
####
# First test in command mode
####
self.assert_driver_command(ProtocolEvent.CLOCK_SYNC)
self.assert_driver_command(ProtocolEvent.SCHEDULED_CLOCK_SYNC)
self.assert_driver_command(ProtocolEvent.QUIT_SESSION)
self.assert_driver_command(ProtocolEvent.START_AUTOSAMPLE, state=ProtocolState.AUTOSAMPLE, delay=1)
self.assert_driver_command(ProtocolEvent.STOP_AUTOSAMPLE, state=ProtocolState.COMMAND, delay=1)
self.assert_driver_command(ProtocolEvent.ACQUIRE_STATUS, regex=r'serial sync mode')
self.assert_driver_command(ProtocolEvent.RESET_EC)
self.assert_driver_command(ProtocolEvent.ACQUIRE_STATUS, regex=r'serial sync mode')
self.assert_driver_command(ProtocolEvent.GET_CONFIGURATION, regex=r'EXTFREQSF =')
####
# Test in streaming mode
####
# Put us in streaming
self.assert_driver_command(ProtocolEvent.START_AUTOSAMPLE, state=ProtocolState.AUTOSAMPLE, delay=1)
self.assert_driver_command(ProtocolEvent.SCHEDULED_CLOCK_SYNC)
self.assert_driver_command(ProtocolEvent.ACQUIRE_STATUS, regex=r'serial sync mode')
self.assert_driver_command(ProtocolEvent.GET_CONFIGURATION, regex=r'EXTFREQSF =')
self.assert_driver_command(ProtocolEvent.QUIT_SESSION)
self.assert_driver_command(ProtocolEvent.STOP_AUTOSAMPLE, state=ProtocolState.COMMAND, delay=1)
####
# Test a bad command
####
self.assert_driver_command_exception('ima_bad_command', exception_class=InstrumentCommandException)
def test_autosample(self):
"""
Verify that we can enter streaming and that all particles are produced
properly.
Because we have to test for three different data particles we can't use
the common assert_sample_autosample method
"""
self.assert_initialize_driver()
self.assert_set(Parameter.INTERVAL, 10)
self.assert_driver_command(ProtocolEvent.START_AUTOSAMPLE, state=ProtocolState.AUTOSAMPLE, delay=1)
self.assert_async_particle_generation(DataParticleType.CTD_PARSED, self.assert_particle_sample, timeout=60)
self.assert_particle_generation(ProtocolEvent.ACQUIRE_STATUS, DataParticleType.DEVICE_STATUS, self.assert_particle_status)
self.assert_particle_generation(ProtocolEvent.GET_CONFIGURATION, DataParticleType.DEVICE_CALIBRATION, self.assert_particle_calibration_strain)
self.assert_driver_command(ProtocolEvent.STOP_AUTOSAMPLE, state=ProtocolState.COMMAND, delay=1)
def test_polled(self):
"""
Test that we can generate particles with commands
"""
self.assert_initialize_driver()
self.assert_particle_generation(ProtocolEvent.GET_CONFIGURATION, DataParticleType.DEVICE_CALIBRATION, self.assert_particle_calibration_strain)
self.assert_particle_generation(ProtocolEvent.ACQUIRE_STATUS, DataParticleType.DEVICE_STATUS, self.assert_particle_status)
self.assert_particle_generation(ProtocolEvent.ACQUIRE_SAMPLE, DataParticleType.CTD_PARSED, self.assert_particle_sample)
###
# Test scheduled events
###
def assert_calibration_coefficients(self):
"""
Verify a calibration particle was generated
"""
self.clear_events()
self.assert_async_particle_generation(DataParticleType.DEVICE_CALIBRATION, self.assert_particle_calibration_strain, timeout=120)
def test_scheduled_device_configuration_command(self):
"""
Verify the device configuration command can be triggered and run in command
"""
self.assert_scheduled_event(ScheduledJob.CONFIGURATION_DATA, self.assert_calibration_coefficients, delay=120)
self.assert_current_state(ProtocolState.COMMAND)
def test_scheduled_device_configuration_autosample(self):
"""
Verify the device configuration command can be triggered and run in autosample
"""
self.assert_scheduled_event(ScheduledJob.CONFIGURATION_DATA, self.assert_calibration_coefficients,
autosample_command=ProtocolEvent.START_AUTOSAMPLE, delay=180)
self.assert_current_state(ProtocolState.AUTOSAMPLE)
self.assert_driver_command(ProtocolEvent.STOP_AUTOSAMPLE)
def assert_acquire_status(self):
"""
Verify a status particle was generated
"""
self.clear_events()
self.assert_async_particle_generation(DataParticleType.DEVICE_STATUS, self.assert_particle_status, timeout=120)
def test_scheduled_device_status_command(self):
"""
Verify the device status command can be triggered and run in command
"""
self.assert_scheduled_event(ScheduledJob.ACQUIRE_STATUS, self.assert_acquire_status, delay=120)
self.assert_current_state(ProtocolState.COMMAND)
def test_scheduled_device_status_autosample(self):
"""
Verify the device status command can be triggered and run in autosample
"""
self.assert_scheduled_event(ScheduledJob.ACQUIRE_STATUS, self.assert_acquire_status,
autosample_command=ProtocolEvent.START_AUTOSAMPLE, delay=180)
self.assert_current_state(ProtocolState.AUTOSAMPLE)
self.assert_driver_command(ProtocolEvent.STOP_AUTOSAMPLE)
def test_scheduled_clock_sync_command(self):
"""
Verify the scheduled clock sync is triggered and functions as expected
"""
timeout = 120
self.assert_scheduled_event(ScheduledJob.CLOCK_SYNC, delay=timeout)
self.assert_current_state(ProtocolState.COMMAND)
# Set the clock to some time in the past
# Need an easy way to do this now that DATE_TIME is read only
#self.assert_set_clock(Parameter.DATE_TIME, time_override=SBE_EPOCH)
# Check the clock until it is set correctly (by a schedued event)
#self.assert_clock_set(Parameter.DATE_TIME, sync_clock_cmd=ProtocolEvent.GET_CONFIGURATION, timeout=timeout)
def test_scheduled_clock_sync_autosample(self):
"""
Verify the scheduled clock sync is triggered and functions as expected
"""
timeout = 240
self.assert_scheduled_event(ScheduledJob.CLOCK_SYNC, delay=timeout)
self.assert_current_state(ProtocolState.COMMAND)
# Set the clock to some time in the past
# Need an easy way to do this now that DATE_TIME is read only
#self.assert_set_clock(Parameter.DATE_TIME, time_override=SBE_EPOCH)
self.assert_driver_command(ProtocolEvent.START_AUTOSAMPLE)
# Check the clock until it is set correctly (by a scheduled event)
#self.assert_clock_set(Parameter.DATE_TIME, sync_clock_cmd=ProtocolEvent.GET_CONFIGURATION, timeout=timeout, tolerance=10)
def assert_cycle(self):
self.assert_current_state(ProtocolState.COMMAND)
self.assert_driver_command(ProtocolEvent.START_AUTOSAMPLE)
self.assert_current_state(ProtocolState.AUTOSAMPLE)
self.assert_async_particle_generation(DataParticleType.CTD_PARSED, self.assert_particle_sample, particle_count = 6, timeout=60)
self.assert_particle_generation(ProtocolEvent.ACQUIRE_STATUS, DataParticleType.DEVICE_STATUS, self.assert_particle_status)
self.assert_particle_generation(ProtocolEvent.GET_CONFIGURATION, DataParticleType.DEVICE_CALIBRATION, self.assert_particle_calibration_strain)
self.assert_driver_command(ProtocolEvent.STOP_AUTOSAMPLE)
self.assert_current_state(ProtocolState.COMMAND)
def test_discover(self):
"""
Verify we can discover from both command and auto sample modes
"""
self.assert_initialize_driver()
self.assert_cycle()
self.assert_cycle()
def test_metadata(self):
metadata = self.driver_client.cmd_dvr('get_config_metadata')
self.assertEqual(metadata, None) # must be connected
self.assert_initialize_driver()
metadata = self.driver_client.cmd_dvr('get_config_metadata')
log.debug("Metadata: %s", metadata)
self.assertTrue(isinstance(metadata, str))
###############################################################################
# QUALIFICATION TESTS #
# Device specific qualification tests are for #
# testing device specific capabilities #
###############################################################################
@attr('QUAL', group='mi')
class SBEQualTestCase(SeaBirdQualificationTest, SeaBird16plusMixin):
"""Qualification Test Container"""
def test_autosample(self):
"""
Verify autosample works and data particles are created
"""
self.assert_enter_command_mode()
self.assert_set_parameter(Parameter.INTERVAL, 10)
self.assert_start_autosample()
self.assert_particle_async(DataParticleType.CTD_PARSED, self.assert_particle_sample)
self.assert_particle_polled(ProtocolEvent.ACQUIRE_STATUS, self.assert_particle_status, DataParticleType.DEVICE_STATUS, sample_count=1, timeout=20)
self.assert_particle_polled(ProtocolEvent.GET_CONFIGURATION, self.assert_particle_calibration_strain, DataParticleType.DEVICE_CALIBRATION, sample_count=1, timeout=20)
# Stop autosample and do run a couple commands.
self.assert_stop_autosample()
self.assert_particle_polled(ProtocolEvent.ACQUIRE_STATUS, self.assert_particle_status, DataParticleType.DEVICE_STATUS, sample_count=1)
self.assert_particle_polled(ProtocolEvent.GET_CONFIGURATION, self.assert_particle_calibration_strain, DataParticleType.DEVICE_CALIBRATION, sample_count=1)
# Restart autosample and gather a couple samples
self.assert_sample_autosample(self.assert_particle_sample, DataParticleType.CTD_PARSED)
def assert_cycle(self):
self.assert_start_autosample()
self.assert_particle_async(DataParticleType.CTD_PARSED, self.assert_particle_sample)
self.assert_particle_polled(ProtocolEvent.ACQUIRE_STATUS, self.assert_particle_status, DataParticleType.DEVICE_STATUS, sample_count=1, timeout=20)
self.assert_particle_polled(ProtocolEvent.GET_CONFIGURATION, self.assert_particle_calibration_strain, DataParticleType.DEVICE_CALIBRATION, sample_count=1, timeout=20)
self.assert_stop_autosample()
self.assert_particle_polled(ProtocolEvent.ACQUIRE_STATUS, self.assert_particle_status, DataParticleType.DEVICE_STATUS, sample_count=1)
self.assert_particle_polled(ProtocolEvent.GET_CONFIGURATION, self.assert_particle_calibration_strain, DataParticleType.DEVICE_CALIBRATION, sample_count=1)
def test_cycle(self):
"""
Verify we can bounce between command and streaming. We try it a few times to see if we can find a timeout.
"""
self.assert_enter_command_mode()
self.assert_cycle()
self.assert_cycle()
self.assert_cycle()
self.assert_cycle()
def test_poll(self):
'''
Verify that we can poll for a sample. Take sample for this instrument
Also poll for other engineering data streams.
'''
self.assert_enter_command_mode()
self.assert_particle_polled(ProtocolEvent.ACQUIRE_SAMPLE, self.assert_particle_sample, DataParticleType.CTD_PARSED, sample_count=1)
self.assert_particle_polled(ProtocolEvent.ACQUIRE_STATUS, self.assert_particle_status, DataParticleType.DEVICE_STATUS, sample_count=1)
self.assert_particle_polled(ProtocolEvent.GET_CONFIGURATION, self.assert_particle_calibration_strain, DataParticleType.DEVICE_CALIBRATION, sample_count=1)
def test_direct_access_telnet_mode(self):
"""
@brief This test manually tests that the Instrument Driver properly supports direct access to the physical instrument. (telnet mode)
"""
self.assert_enter_command_mode()
self.assert_set_parameter(Parameter.INTERVAL, 10)
# go into direct access, and muck up a setting.
self.assert_direct_access_start_telnet(timeout=600)
self.tcp_client.send_data("%sampleinterval=97%s" % (NEWLINE, NEWLINE))
self.tcp_client.expect(Prompt.EXECUTED)
self.assert_direct_access_stop_telnet()
# verify the setting got restored.
self.assert_enter_command_mode()
self.assert_get_parameter(Parameter.INTERVAL, 10)
def test_execute_clock_sync(self):
"""
Verify we can syncronize the instrument internal clock
"""
self.assert_enter_command_mode()
self.assert_execute_resource(ProtocolEvent.CLOCK_SYNC)
# get the time from the driver
check_new_params = self.instrument_agent_client.get_resource([Parameter.DATE_TIME])
# convert driver's time from formatted date/time string to seconds integer
instrument_time = time.mktime(time.strptime(check_new_params.get(Parameter.DATE_TIME).lower(), "%d %b %Y %H:%M:%S"))
# need to convert local machine's time to date/time string and back to seconds to 'drop' the DST attribute so test passes
# get time from local machine
lt = time.strftime("%d %b %Y %H:%M:%S", time.gmtime(time.mktime(time.localtime())))
# convert local time from formatted date/time string to seconds integer to drop DST
local_time = time.mktime(time.strptime(lt, "%d %b %Y %H:%M:%S"))
# Now verify that the time matches to within 15 seconds
self.assertLessEqual(abs(instrument_time - local_time), 15)
def test_get_capabilities(self):
"""
@brief Verify that the correct capabilities are returned from get_capabilities
at various driver/agent states.
"""
self.assert_enter_command_mode()
##################
# Command Mode
##################
capabilities = {
AgentCapabilityType.AGENT_COMMAND: self._common_agent_commands(ResourceAgentState.COMMAND),
AgentCapabilityType.AGENT_PARAMETER: self._common_agent_parameters(),
AgentCapabilityType.RESOURCE_COMMAND: [
ProtocolEvent.TEST,
ProtocolEvent.GET,
ProtocolEvent.SET,
ProtocolEvent.RESET_EC,
ProtocolEvent.CLOCK_SYNC,
ProtocolEvent.QUIT_SESSION,
ProtocolEvent.ACQUIRE_STATUS,
ProtocolEvent.START_AUTOSAMPLE,
ProtocolEvent.GET_CONFIGURATION,
],
AgentCapabilityType.RESOURCE_INTERFACE: None,
AgentCapabilityType.RESOURCE_PARAMETER: self._driver_parameters.keys()
}
self.assert_capabilities(capabilities)
##################
# Streaming Mode
##################
capabilities[AgentCapabilityType.AGENT_COMMAND] = self._common_agent_commands(ResourceAgentState.STREAMING)
capabilities[AgentCapabilityType.RESOURCE_COMMAND] = [
ProtocolEvent.GET,
ProtocolEvent.STOP_AUTOSAMPLE,
ProtocolEvent.QUIT_SESSION,
ProtocolEvent.ACQUIRE_STATUS,
ProtocolEvent.GET_CONFIGURATION,
]
self.assert_start_autosample()
self.assert_capabilities(capabilities)
self.assert_stop_autosample()
##################
# DA Mode
##################
capabilities[AgentCapabilityType.AGENT_COMMAND] = self._common_agent_commands(ResourceAgentState.DIRECT_ACCESS)
capabilities[AgentCapabilityType.RESOURCE_COMMAND] = self._common_da_resource_commands()
self.assert_direct_access_start_telnet()
self.assert_capabilities(capabilities)
self.assert_direct_access_stop_telnet()
#######################
# Uninitialized Mode
#######################
capabilities[AgentCapabilityType.AGENT_COMMAND] = self._common_agent_commands(ResourceAgentState.UNINITIALIZED)
capabilities[AgentCapabilityType.RESOURCE_COMMAND] = []
capabilities[AgentCapabilityType.RESOURCE_INTERFACE] = []
capabilities[AgentCapabilityType.RESOURCE_PARAMETER] = []
self.assert_reset()
self.assert_capabilities(capabilities)
###############################################################################
# PUBLICATION TESTS #
# Device specific pulication tests are for #
# testing device specific capabilities #
###############################################################################
@attr('PUB', group='mi')
class SBEPubTestCase(SeaBirdPublicationTest):
def test_granule_generation(self):
self.assert_initialize_driver()
# Currently these tests only verify that the data granule is generated, but the values
# are not tested. We will eventually need to replace log.debug with a better callback
# function that actually tests the granule.
self.assert_sample_async("raw data", log.debug, DataParticleType.RAW, timeout=10)
self.assert_sample_async(self.VALID_SAMPLE, log.debug, DataParticleType.CTD_PARSED, timeout=10)
self.assert_sample_async(self.VALID_DS_RESPONSE, log.debug, DataParticleType.DEVICE_STATUS, timeout=10)
self.assert_sample_async(self.VALID_DCAL_STRAIN, log.debug, DataParticleType.DEVICE_CALIBRATION, timeout=10)
self.assert_sample_async(self.VALID_DCAL_QUARTZ, log.debug, DataParticleType.DEVICE_CALIBRATION, timeout=10)<|fim▁end|> | |
<|file_name|>PropertySupport.java<|end_file_name|><|fim▁begin|>/**
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*
* Copyright 2012-2017 the original author or authors.
*/
package org.assertj.core.util.introspection;
import static java.lang.String.format;
import static java.util.Collections.emptyList;
import static java.util.Collections.unmodifiableList;
import static org.assertj.core.util.IterableUtil.isNullOrEmpty;
import static org.assertj.core.util.Preconditions.checkArgument;
import static org.assertj.core.util.introspection.Introspection.getPropertyGetter;
import java.lang.reflect.Method;
import java.util.ArrayList;
import java.util.List;
import org.assertj.core.util.VisibleForTesting;
/**
* Utility methods for properties access.
*
* @author Joel Costigliola
* @author Alex Ruiz
* @author Nicolas François
* @author Florent Biville
*/
public class PropertySupport {
private static final String SEPARATOR = ".";
private static final PropertySupport INSTANCE = new PropertySupport();
/**
* Returns the singleton instance of this class.
*
* @return the singleton instance of this class.
*/
public static PropertySupport instance() {
return INSTANCE;
}
@VisibleForTesting
PropertySupport() {
}
/**
* Returns a <code>{@link List}</code> containing the values of the given property name, from the elements of the
* given <code>{@link Iterable}</code>. If the given {@code Iterable} is empty or {@code null}, this method will
* return an empty {@code List}. This method supports nested properties (e.g. "address.street.number").
*
* @param propertyName the name of the property. It may be a nested property. It is left to the clients to validate
* for {@code null} or empty.
* @param target the given {@code Iterable}.
* @return an {@code Iterable} containing the values of the given property name, from the elements of the given
* {@code Iterable}.
* @throws IntrospectionError if an element in the given {@code Iterable} does not have a property with a matching
* name.
*/
public <T> List<T> propertyValues(String propertyName, Class<T> clazz, Iterable<?> target) {
if (isNullOrEmpty(target)) {
return emptyList();
}
if (isNestedProperty(propertyName)) {
String firstPropertyName = popPropertyNameFrom(propertyName);
Iterable<Object> propertyValues = propertyValues(firstPropertyName, Object.class, target);
// extract next sub-property values until reaching the last sub-property
return propertyValues(nextPropertyNameFrom(propertyName), clazz, propertyValues);
}
return simplePropertyValues(propertyName, clazz, target);
}
/**
* Static variant of {@link #propertyValueOf(String, Class, Object)} for syntactic sugar.
* <p>
*
* @param propertyName the name of the property. It may be a nested property. It is left to the clients to validate
* for {@code null} or empty.
* @param target the given object
* @param clazz type of property
* @return a the values of the given property name
* @throws IntrospectionError if the given target does not have a property with a matching name.
*/
public static <T> T propertyValueOf(String propertyName, Object target, Class<T> clazz) {
return instance().propertyValueOf(propertyName, clazz, target);
}
private <T> List<T> simplePropertyValues(String propertyName, Class<T> clazz, Iterable<?> target) {
List<T> propertyValues = new ArrayList<>();
for (Object e : target) {
propertyValues.add(e == null ? null : propertyValue(propertyName, clazz, e));
}
return unmodifiableList(propertyValues);
}
private String popPropertyNameFrom(String propertyNameChain) {
if (!isNestedProperty(propertyNameChain)) {
return propertyNameChain;
}
return propertyNameChain.substring(0, propertyNameChain.indexOf(SEPARATOR));
}
private String nextPropertyNameFrom(String propertyNameChain) {
if (!isNestedProperty(propertyNameChain)) {
return "";
}
return propertyNameChain.substring(propertyNameChain.indexOf(SEPARATOR) + 1);
}
/**
* <pre><code class='java'> isNestedProperty("address.street"); // true
* isNestedProperty("address.street.name"); // true
* isNestedProperty("person"); // false
* isNestedProperty(".name"); // false
* isNestedProperty("person."); // false
* isNestedProperty("person.name."); // false
* isNestedProperty(".person.name"); // false
* isNestedProperty("."); // false
* isNestedProperty(""); // false</code></pre>
*/
private boolean isNestedProperty(String propertyName) {
return propertyName.contains(SEPARATOR) && !propertyName.startsWith(SEPARATOR) && !propertyName.endsWith(SEPARATOR);
}
/**
* Return the value of a simple property from a target object.
* <p>
* This only works for simple property, nested property are not supported ! use
* {@link #propertyValueOf(String, Class, Object)}
*
* @param propertyName the name of the property. It may be a nested property. It is left to the clients to validate
* for {@code null} or empty.
* @param target the given object
* @param clazz type of property
* @return a the values of the given property name
* @throws IntrospectionError if the given target does not have a property with a matching name.
*/
@SuppressWarnings("unchecked")
public <T> T propertyValue(String propertyName, Class<T> clazz, Object target) {
Method getter = getPropertyGetter(propertyName, target);
try {
return (T) getter.invoke(target);
} catch (ClassCastException e) {
String msg = format("Unable to obtain the value of the property <'%s'> from <%s> - wrong property type specified <%s>",
propertyName, target, clazz);
throw new IntrospectionError(msg, e);
} catch (Exception unexpected) {
String msg = format("Unable to obtain the value of the property <'%s'> from <%s>", propertyName, target);
throw new IntrospectionError(msg, unexpected);
}
}
/**
* Returns the value of the given property name given target. If the given object is {@code null}, this method will
* return null.<br>
* This method supports nested properties (e.g. "address.street.number").
*
* @param propertyName the name of the property. It may be a nested property. It is left to the clients to validate
* for {@code null} or empty.
* @param clazz the class of property.<|fim▁hole|> * @return the value of the given property name given target.
* @throws IntrospectionError if target object does not have a property with a matching name.
* @throws IllegalArgumentException if propertyName is null.
*/
public <T> T propertyValueOf(String propertyName, Class<T> clazz, Object target) {
checkArgument(propertyName != null, "the property name should not be null.");
// returns null if target is null as we can't extract a property from a null object
// but don't want to raise an exception if we were looking at a nested property
if (target == null) return null;
if (isNestedProperty(propertyName)) {
String firstPropertyName = popPropertyNameFrom(propertyName);
Object propertyValue = propertyValue(firstPropertyName, Object.class, target);
// extract next sub-property values until reaching the last sub-property
return propertyValueOf(nextPropertyNameFrom(propertyName), clazz, propertyValue);
}
return propertyValue(propertyName, clazz, target);
}
/**
* just delegates to {@link #propertyValues(String, Class, Iterable)} with Class being Object.class
*/
public List<Object> propertyValues(String fieldOrPropertyName, Iterable<?> objects) {
return propertyValues(fieldOrPropertyName, Object.class, objects);
}
public boolean publicGetterExistsFor(String fieldName, Object actual) {
try {
getPropertyGetter(fieldName, actual);
} catch (IntrospectionError e) {
return false;
}
return true;
}
}<|fim▁end|> | * @param target the given Object to extract property from. |
<|file_name|>interval_base_map.hpp<|end_file_name|><|fim▁begin|>/*-----------------------------------------------------------------------------+
Copyright (c) 2007-2012: Joachim Faulhaber
Copyright (c) 1999-2006: Cortex Software GmbH, Kantstrasse 57, Berlin
+------------------------------------------------------------------------------+
Distributed under the Boost Software License, Version 1.0.
(See accompanying file LICENCE.txt or copy at
http://www.boost.org/LICENSE_1_0.txt)
+-----------------------------------------------------------------------------*/
#ifndef BOOST_ICL_INTERVAL_BASE_MAP_HPP_JOFA_990223
#define BOOST_ICL_INTERVAL_BASE_MAP_HPP_JOFA_990223
#include <limits>
#include <boost/type_traits/ice.hpp>
#include <boost/mpl/and.hpp>
#include <boost/mpl/or.hpp>
#include <boost/mpl/not.hpp>
#include <boost/icl/detail/notate.hpp>
#include <boost/icl/detail/design_config.hpp>
#include <boost/icl/detail/on_absorbtion.hpp>
#include <boost/icl/detail/interval_map_algo.hpp>
#include <boost/icl/associative_interval_container.hpp>
#include <boost/icl/type_traits/is_interval_splitter.hpp>
#include <boost/icl/map.hpp>
namespace boost{namespace icl
{
template<class DomainT, class CodomainT>
struct mapping_pair
{
DomainT key;
CodomainT data;
mapping_pair():key(), data(){}
mapping_pair(const DomainT& key_value, const CodomainT& data_value)
:key(key_value), data(data_value){}
mapping_pair(const std::pair<DomainT,CodomainT>& std_pair)
:key(std_pair.first), data(std_pair.second){}
};
/** \brief Implements a map as a map of intervals (base class) */
template
<
class SubType,
typename DomainT,
typename CodomainT,
class Traits = icl::partial_absorber,
ICL_COMPARE Compare = ICL_COMPARE_INSTANCE(ICL_COMPARE_DEFAULT, DomainT),
ICL_COMBINE Combine = ICL_COMBINE_INSTANCE(icl::inplace_plus, CodomainT),
ICL_SECTION Section = ICL_SECTION_INSTANCE(icl::inter_section, CodomainT),
ICL_INTERVAL(ICL_COMPARE) Interval = ICL_INTERVAL_INSTANCE(ICL_INTERVAL_DEFAULT, DomainT, Compare),
ICL_ALLOC Alloc = std::allocator
>
class interval_base_map
{
public:
//==========================================================================
//= Associated types
//==========================================================================
typedef interval_base_map<SubType,DomainT,CodomainT,
Traits,Compare,Combine,Section,Interval,Alloc>
type;
/// The designated \e derived or \e sub_type of this base class
typedef SubType sub_type;
/// Auxilliary type for overloadresolution
typedef type overloadable_type;
/// Traits of an itl map
typedef Traits traits;
//--------------------------------------------------------------------------
//- Associated types: Related types
//--------------------------------------------------------------------------
/// The atomized type representing the corresponding container of elements
typedef typename icl::map<DomainT,CodomainT,
Traits,Compare,Combine,Section,Alloc> atomized_type;
//--------------------------------------------------------------------------
//- Associated types: Data
//--------------------------------------------------------------------------
/// Domain type (type of the keys) of the map
typedef DomainT domain_type;
typedef typename boost::call_traits<DomainT>::param_type domain_param;
/// Domain type (type of the keys) of the map
typedef CodomainT codomain_type;
/// Auxiliary type to help the compiler resolve ambiguities when using std::make_pair
typedef mapping_pair<domain_type,codomain_type> domain_mapping_type;
/// Conceptual is a map a set of elements of type \c element_type
typedef domain_mapping_type element_type;
/// The interval type of the map
typedef ICL_INTERVAL_TYPE(Interval,DomainT,Compare) interval_type;
/// Auxiliary type for overload resolution
typedef std::pair<interval_type,CodomainT> interval_mapping_type;
/// Type of an interval containers segment, that is spanned by an interval
typedef std::pair<interval_type,CodomainT> segment_type;
//--------------------------------------------------------------------------
//- Associated types: Size
//--------------------------------------------------------------------------
/// The difference type of an interval which is sometimes different form the domain_type
typedef typename difference_type_of<domain_type>::type difference_type;
/// The size type of an interval which is mostly std::size_t
typedef typename size_type_of<domain_type>::type size_type;
//--------------------------------------------------------------------------
//- Associated types: Functors
//--------------------------------------------------------------------------
/// Comparison functor for domain values
typedef ICL_COMPARE_DOMAIN(Compare,DomainT) domain_compare;
typedef ICL_COMPARE_DOMAIN(Compare,segment_type) segment_compare;
/// Combine functor for codomain value aggregation
typedef ICL_COMBINE_CODOMAIN(Combine,CodomainT) codomain_combine;
/// Inverse Combine functor for codomain value aggregation
typedef typename inverse<codomain_combine>::type inverse_codomain_combine;
/// Intersection functor for codomain values
typedef typename mpl::if_
<has_set_semantics<codomain_type>
, ICL_SECTION_CODOMAIN(Section,CodomainT)
, codomain_combine
>::type codomain_intersect;
/// Inverse Combine functor for codomain value intersection
typedef typename inverse<codomain_intersect>::type inverse_codomain_intersect;
/// Comparison functor for intervals which are keys as well
typedef exclusive_less_than<interval_type> interval_compare;
/// Comparison functor for keys
typedef exclusive_less_than<interval_type> key_compare;
//--------------------------------------------------------------------------
//- Associated types: Implementation and stl related
//--------------------------------------------------------------------------
/// The allocator type of the set
typedef Alloc<std::pair<const interval_type, codomain_type> >
allocator_type;
/// Container type for the implementation
typedef ICL_IMPL_SPACE::map<interval_type,codomain_type,
key_compare,allocator_type> ImplMapT;
/// key type of the implementing container
typedef typename ImplMapT::key_type key_type;
/// value type of the implementing container
typedef typename ImplMapT::value_type value_type;
/// data type of the implementing container
typedef typename ImplMapT::value_type::second_type data_type;
/// pointer type
typedef typename ImplMapT::pointer pointer;
/// const pointer type
typedef typename ImplMapT::const_pointer const_pointer;
/// reference type
typedef typename ImplMapT::reference reference;
/// const reference type
typedef typename ImplMapT::const_reference const_reference;
/// iterator for iteration over intervals
typedef typename ImplMapT::iterator iterator;
/// const_iterator for iteration over intervals
typedef typename ImplMapT::const_iterator const_iterator;
/// iterator for reverse iteration over intervals
typedef typename ImplMapT::reverse_iterator reverse_iterator;
/// const_iterator for iteration over intervals
typedef typename ImplMapT::const_reverse_iterator const_reverse_iterator;
/// element iterator: Depreciated, see documentation.
typedef boost::icl::element_iterator<iterator> element_iterator;
/// const element iterator: Depreciated, see documentation.
typedef boost::icl::element_iterator<const_iterator> element_const_iterator;
/// element reverse iterator: Depreciated, see documentation.
typedef boost::icl::element_iterator<reverse_iterator> element_reverse_iterator;
/// element const reverse iterator: Depreciated, see documentation.
typedef boost::icl::element_iterator<const_reverse_iterator> element_const_reverse_iterator;
typedef typename on_absorbtion<type, codomain_combine,
Traits::absorbs_identities>::type on_codomain_absorbtion;
public:
BOOST_STATIC_CONSTANT(bool,
is_total_invertible = ( Traits::is_total
&& has_inverse<codomain_type>::value));
BOOST_STATIC_CONSTANT(int, fineness = 0);
public:
//==========================================================================
//= Construct, copy, destruct
//==========================================================================
/** Default constructor for the empty object */
interval_base_map()
{
BOOST_CONCEPT_ASSERT((DefaultConstructibleConcept<DomainT>));
BOOST_CONCEPT_ASSERT((LessThanComparableConcept<DomainT>));
BOOST_CONCEPT_ASSERT((DefaultConstructibleConcept<CodomainT>));
BOOST_CONCEPT_ASSERT((EqualComparableConcept<CodomainT>));
}
/** Copy constructor */
interval_base_map(const interval_base_map& src): _map(src._map)
{
BOOST_CONCEPT_ASSERT((DefaultConstructibleConcept<DomainT>));
BOOST_CONCEPT_ASSERT((LessThanComparableConcept<DomainT>));
BOOST_CONCEPT_ASSERT((DefaultConstructibleConcept<CodomainT>));
BOOST_CONCEPT_ASSERT((EqualComparableConcept<CodomainT>));
}
/** Copy assignment operator */
interval_base_map& operator = (const interval_base_map& src)
{
this->_map = src._map;
return *this;
}
# ifndef BOOST_NO_RVALUE_REFERENCES
//==========================================================================
//= Move semantics
//==========================================================================
/** Move constructor */
interval_base_map(interval_base_map&& src): _map(boost::move(src._map))
{
BOOST_CONCEPT_ASSERT((DefaultConstructibleConcept<DomainT>));
BOOST_CONCEPT_ASSERT((LessThanComparableConcept<DomainT>));
BOOST_CONCEPT_ASSERT((DefaultConstructibleConcept<CodomainT>));
BOOST_CONCEPT_ASSERT((EqualComparableConcept<CodomainT>));
}
/** Move assignment operator */
interval_base_map& operator = (interval_base_map&& src)
{
this->_map = boost::move(src._map);
return *this;
}
//==========================================================================
# endif // BOOST_NO_RVALUE_REFERENCES
/** swap the content of containers */
void swap(interval_base_map& object) { _map.swap(object._map); }
//==========================================================================
//= Containedness
//==========================================================================
/** clear the map */
void clear() { icl::clear(*that()); }
/** is the map empty? */
bool empty()const { return icl::is_empty(*that()); }
//==========================================================================
//= Size
//==========================================================================
/** An interval map's size is it's cardinality */
size_type size()const
{
return icl::cardinality(*that());
}
/** Size of the iteration over this container */
std::size_t iterative_size()const
{
return _map.size();
}
//==========================================================================
//= Selection
//==========================================================================
/** Find the interval value pair, that contains \c key */
const_iterator find(const domain_type& key_value)const
{
return icl::find(*this, key_value);
}
/** Find the first interval value pair, that collides with interval
\c key_interval */
const_iterator find(const interval_type& key_interval)const
{
return _map.find(key_interval);
}
/** Total select function. */
codomain_type operator()(const domain_type& key_value)const
{
const_iterator it_ = icl::find(*this, key_value);
return it_==end() ? identity_element<codomain_type>::value()
: (*it_).second;
}
//==========================================================================
//= Addition
//==========================================================================
/** Addition of a key value pair to the map */
SubType& add(const element_type& key_value_pair)
{
return icl::add(*that(), key_value_pair);
}
/** Addition of an interval value pair to the map. */
SubType& add(const segment_type& interval_value_pair)
{
this->template _add<codomain_combine>(interval_value_pair);
return *that();
}
/** Addition of an interval value pair \c interval_value_pair to the map.
Iterator \c prior_ is a hint to the position \c interval_value_pair can be
inserted after. */
iterator add(iterator prior_, const segment_type& interval_value_pair)
{
return this->template _add<codomain_combine>(prior_, interval_value_pair);
}
//==========================================================================
//= Subtraction
//==========================================================================
/** Subtraction of a key value pair from the map */
SubType& subtract(const element_type& key_value_pair)
{
return icl::subtract(*that(), key_value_pair);
}
/** Subtraction of an interval value pair from the map. */
SubType& subtract(const segment_type& interval_value_pair)
{
on_invertible<type, is_total_invertible>
::subtract(*that(), interval_value_pair);
return *that();
}
//==========================================================================
//= Insertion
//==========================================================================
/** Insertion of a \c key_value_pair into the map. */
SubType& insert(const element_type& key_value_pair)
{
return icl::insert(*that(), key_value_pair);
}
/** Insertion of an \c interval_value_pair into the map. */
SubType& insert(const segment_type& interval_value_pair)
{
_insert(interval_value_pair);
return *that();
}
/** Insertion of an \c interval_value_pair into the map. Iterator \c prior_.
serves as a hint to insert after the element \c prior point to. */
iterator insert(iterator prior, const segment_type& interval_value_pair)
{
return _insert(prior, interval_value_pair);
}
/** With <tt>key_value_pair = (k,v)</tt> set value \c v for key \c k */
SubType& set(const element_type& key_value_pair)
{
return icl::set_at(*that(), key_value_pair);
}
/** With <tt>interval_value_pair = (I,v)</tt> set value \c v
for all keys in interval \c I in the map. */
SubType& set(const segment_type& interval_value_pair)
{
return icl::set_at(*that(), interval_value_pair);
}
//==========================================================================
//= Erasure
//==========================================================================
/** Erase a \c key_value_pair from the map. */
SubType& erase(const element_type& key_value_pair)
{
icl::erase(*that(), key_value_pair);
return *that();
}
/** Erase an \c interval_value_pair from the map. */
SubType& erase(const segment_type& interval_value_pair);
/** Erase a key value pair for \c key. */
SubType& erase(const domain_type& key)
{
return icl::erase(*that(), key);
}
/** Erase all value pairs within the range of the
interval <tt>inter_val</tt> from the map. */
SubType& erase(const interval_type& inter_val);
/** Erase all value pairs within the range of the interval that iterator
\c position points to. */
void erase(iterator position){ this->_map.erase(position); }
/** Erase all value pairs for a range of iterators <tt>[first,past)</tt>. */
void erase(iterator first, iterator past){ this->_map.erase(first, past); }
//==========================================================================
//= Intersection
//==========================================================================
/** The intersection of \c interval_value_pair and \c *this map is added to \c section. */
void add_intersection(SubType& section, const segment_type& interval_value_pair)const
{
on_definedness<SubType, Traits::is_total>
::add_intersection(section, *that(), interval_value_pair);
}
//==========================================================================
//= Symmetric difference
//==========================================================================
/** If \c *this map contains \c key_value_pair it is erased, otherwise it is added. */
SubType& flip(const element_type& key_value_pair)
{
return icl::flip(*that(), key_value_pair);
}
/** If \c *this map contains \c interval_value_pair it is erased, otherwise it is added. */
SubType& flip(const segment_type& interval_value_pair)
{
on_total_absorbable<SubType, Traits::is_total, Traits::absorbs_identities>
::flip(*that(), interval_value_pair);
return *that();
}
//==========================================================================
//= Iterator related
//==========================================================================
iterator lower_bound(const key_type& interval)
{ return _map.lower_bound(interval); }
iterator upper_bound(const key_type& interval)
{ return _map.upper_bound(interval); }
const_iterator lower_bound(const key_type& interval)const
{ return _map.lower_bound(interval); }
const_iterator upper_bound(const key_type& interval)const
{ return _map.upper_bound(interval); }
std::pair<iterator,iterator> equal_range(const key_type& interval)
{
return std::pair<iterator,iterator>
(lower_bound(interval), upper_bound(interval));
}
std::pair<const_iterator,const_iterator>
equal_range(const key_type& interval)const
{
return std::pair<const_iterator,const_iterator>
(lower_bound(interval), upper_bound(interval));
}
iterator begin() { return _map.begin(); }
iterator end() { return _map.end(); }
const_iterator begin()const { return _map.begin(); }
const_iterator end()const { return _map.end(); }
reverse_iterator rbegin() { return _map.rbegin(); }
reverse_iterator rend() { return _map.rend(); }
const_reverse_iterator rbegin()const { return _map.rbegin(); }
const_reverse_iterator rend()const { return _map.rend(); }
private:
template<class Combiner>
iterator _add(const segment_type& interval_value_pair);
template<class Combiner>
iterator _add(iterator prior_, const segment_type& interval_value_pair);
template<class Combiner>
void _subtract(const segment_type& interval_value_pair);
iterator _insert(const segment_type& interval_value_pair);
iterator _insert(iterator prior_, const segment_type& interval_value_pair);
private:
template<class Combiner>
void add_segment(const interval_type& inter_val, const CodomainT& co_val, iterator& it_);
template<class Combiner>
void add_main(interval_type& inter_val, const CodomainT& co_val,
iterator& it_, const iterator& last_);
template<class Combiner>
void add_rear(const interval_type& inter_val, const CodomainT& co_val, iterator& it_);
void add_front(const interval_type& inter_val, iterator& first_);
private:
void subtract_front(const interval_type& inter_val, iterator& first_);
template<class Combiner>
void subtract_main(const CodomainT& co_val, iterator& it_, const iterator& last_);
template<class Combiner>
void subtract_rear(interval_type& inter_val, const CodomainT& co_val, iterator& it_);
private:
void insert_main(const interval_type&, const CodomainT&, iterator&, const iterator&);
void erase_rest ( interval_type&, const CodomainT&, iterator&, const iterator&);
template<class FragmentT>
void total_add_intersection(SubType& section, const FragmentT& fragment)const
{
section += *that();
section.add(fragment);
}
void partial_add_intersection(SubType& section, const segment_type& operand)const
{
interval_type inter_val = operand.first;
if(icl::is_empty(inter_val))
return;
std::pair<const_iterator, const_iterator> exterior = equal_range(inter_val);
if(exterior.first == exterior.second)
return;
for(const_iterator it_=exterior.first; it_ != exterior.second; it_++)
{
interval_type common_interval = (*it_).first & inter_val;
if(!icl::is_empty(common_interval))
{
section.template _add<codomain_combine> (value_type(common_interval, (*it_).second) );
section.template _add<codomain_intersect>(value_type(common_interval, operand.second));
}
}
}
void partial_add_intersection(SubType& section, const element_type& operand)const
{
partial_add_intersection(section, make_segment<type>(operand));
}
protected:
template <class Combiner>
iterator gap_insert(iterator prior_, const interval_type& inter_val,
const codomain_type& co_val )
{
// inter_val is not conained in this map. Insertion will be successful
BOOST_ASSERT(this->_map.find(inter_val) == this->_map.end());
BOOST_ASSERT((!on_absorbtion<type,Combiner,Traits::absorbs_identities>::is_absorbable(co_val)));
return this->_map.insert(prior_, value_type(inter_val, version<Combiner>()(co_val)));
}
template <class Combiner>
std::pair<iterator, bool>
add_at(const iterator& prior_, const interval_type& inter_val,
const codomain_type& co_val )
{
// Never try to insert an identity element into an identity element absorber here:
BOOST_ASSERT((!(on_absorbtion<type,Combiner,Traits::absorbs_identities>::is_absorbable(co_val))));
iterator inserted_
= this->_map.insert(prior_, value_type(inter_val, Combiner::identity_element()));
if((*inserted_).first == inter_val && (*inserted_).second == Combiner::identity_element())
{
Combiner()((*inserted_).second, co_val);
return std::pair<iterator,bool>(inserted_, true);
}
else
return std::pair<iterator,bool>(inserted_, false);
}
std::pair<iterator, bool>
insert_at(const iterator& prior_, const interval_type& inter_val,
const codomain_type& co_val )
{
iterator inserted_
= this->_map.insert(prior_, value_type(inter_val, co_val));
if(inserted_ == prior_)
return std::pair<iterator,bool>(inserted_, false);
else if((*inserted_).first == inter_val)
return std::pair<iterator,bool>(inserted_, true);
else
return std::pair<iterator,bool>(inserted_, false);
}
protected:
sub_type* that() { return static_cast<sub_type*>(this); }
const sub_type* that()const { return static_cast<const sub_type*>(this); }
protected:
ImplMapT _map;
private:
//--------------------------------------------------------------------------
template<class Type, bool is_total_invertible>
struct on_invertible;
template<class Type>
struct on_invertible<Type, true>
{
typedef typename Type::segment_type segment_type;
typedef typename Type::inverse_codomain_combine inverse_codomain_combine;
static void subtract(Type& object, const segment_type& operand)
{ object.template _add<inverse_codomain_combine>(operand); }
};
template<class Type>
struct on_invertible<Type, false>
{
typedef typename Type::segment_type segment_type;
typedef typename Type::inverse_codomain_combine inverse_codomain_combine;
static void subtract(Type& object, const segment_type& operand)
{ object.template _subtract<inverse_codomain_combine>(operand); }
};
friend struct on_invertible<type, true>;
friend struct on_invertible<type, false>;
//--------------------------------------------------------------------------
//--------------------------------------------------------------------------
template<class Type, bool is_total>
struct on_definedness;
template<class Type>
struct on_definedness<Type, true>
{
static void add_intersection(Type& section, const Type& object,
const segment_type& operand)
{ object.total_add_intersection(section, operand); }
};
template<class Type>
struct on_definedness<Type, false>
{
static void add_intersection(Type& section, const Type& object,
const segment_type& operand)
{ object.partial_add_intersection(section, operand); }
};
friend struct on_definedness<type, true>;
friend struct on_definedness<type, false>;
//--------------------------------------------------------------------------
//--------------------------------------------------------------------------
template<class Type, bool has_set_semantics>
struct on_codomain_model;
template<class Type>
struct on_codomain_model<Type, true>
{
typedef typename Type::interval_type interval_type;
typedef typename Type::codomain_type codomain_type;
typedef typename Type::segment_type segment_type;
typedef typename Type::codomain_combine codomain_combine;
typedef typename Type::inverse_codomain_intersect inverse_codomain_intersect;
static void add(Type& intersection, interval_type& common_interval,
const codomain_type& flip_value, const codomain_type& co_value)
{
codomain_type common_value = flip_value;
inverse_codomain_intersect()(common_value, co_value);
intersection.template
_add<codomain_combine>(segment_type(common_interval, common_value));
}
};
template<class Type>
struct on_codomain_model<Type, false>
{
typedef typename Type::interval_type interval_type;
typedef typename Type::codomain_type codomain_type;
typedef typename Type::segment_type segment_type;
typedef typename Type::codomain_combine codomain_combine;
static void add(Type& intersection, interval_type& common_interval,
const codomain_type&, const codomain_type&)
{
intersection.template
_add<codomain_combine>(segment_type(common_interval,
identity_element<codomain_type>::value()));
}
};
friend struct on_codomain_model<type, true>;
friend struct on_codomain_model<type, false>;
//--------------------------------------------------------------------------
//--------------------------------------------------------------------------
template<class Type, bool is_total, bool absorbs_identities>
struct on_total_absorbable;
template<class Type>
struct on_total_absorbable<Type, true, true>
{
static void flip(Type& object, const typename Type::segment_type&)
{ icl::clear(object); }
};
#ifdef BOOST_MSVC
#pragma warning(push)
#pragma warning(disable:4127) // conditional expression is constant
#endif
template<class Type>
struct on_total_absorbable<Type, true, false>
{
typedef typename Type::segment_type segment_type;
typedef typename Type::codomain_type codomain_type;
static void flip(Type& object, const segment_type& operand)
{
object += operand;
ICL_FORALL(typename Type, it_, object)
(*it_).second = identity_element<codomain_type>::value();
if(mpl::not_<is_interval_splitter<Type> >::value)
icl::join(object);
}
};
#ifdef BOOST_MSVC
#pragma warning(pop)
#endif
template<class Type, bool absorbs_identities>
struct on_total_absorbable<Type, false, absorbs_identities>
{
typedef typename Type::segment_type segment_type;
typedef typename Type::codomain_type codomain_type;
typedef typename Type::interval_type interval_type;
typedef typename Type::value_type value_type;
typedef typename Type::const_iterator const_iterator;
typedef typename Type::set_type set_type;
typedef typename Type::inverse_codomain_intersect inverse_codomain_intersect;
static void flip(Type& object, const segment_type& interval_value_pair)
{
// That which is common shall be subtracted
// That which is not shall be added
// So interval_value_pair has to be 'complementary added' or flipped
interval_type span = interval_value_pair.first;
std::pair<const_iterator, const_iterator> exterior
= object.equal_range(span);
const_iterator first_ = exterior.first;
const_iterator end_ = exterior.second;
interval_type covered, left_over, common_interval;
const codomain_type& x_value = interval_value_pair.second;
const_iterator it_ = first_;
set_type eraser;
Type intersection;
while(it_ != end_ )
{
const codomain_type& co_value = (*it_).second;
covered = (*it_++).first;
//[a ... : span
// [b ... : covered
//[a b) : left_over
left_over = right_subtract(span, covered);
//That which is common ...
common_interval = span & covered;
if(!icl::is_empty(common_interval))
{
// ... shall be subtracted
icl::add(eraser, common_interval);
on_codomain_model<Type, has_set_semantics<codomain_type>::value>
::add(intersection, common_interval, x_value, co_value);
}
icl::add(object, value_type(left_over, x_value)); //That which is not shall be added
// Because this is a collision free addition I don't have to distinguish codomain_types.
//... d) : span
//... c) : covered
// [c d) : span'
span = left_subtract(span, covered);
}
//If span is not empty here, it is not in the set so it shall be added
icl::add(object, value_type(span, x_value));
//finally rewrite the common segments
icl::erase(object, eraser);
object += intersection;
}
};
//--------------------------------------------------------------------------
} ;
//==============================================================================
//= Addition detail
//==============================================================================
template <class SubType, class DomainT, class CodomainT, class Traits, ICL_COMPARE Compare, ICL_COMBINE Combine, ICL_SECTION Section, ICL_INTERVAL(ICL_COMPARE) Interval, ICL_ALLOC Alloc>
inline void interval_base_map<SubType,DomainT,CodomainT,Traits,Compare,Combine,Section,Interval,Alloc>
::add_front(const interval_type& inter_val, iterator& first_)
{
// If the collision sequence has a left residual 'left_resid' it will
// be split, to provide a standardized start of algorithms:
// The addend interval 'inter_val' covers the beginning of the collision sequence.
// only for the first there can be a left_resid: a part of *first_ left of inter_val
interval_type left_resid = right_subtract((*first_).first, inter_val);
if(!icl::is_empty(left_resid))
{ // [------------ . . .
// [left_resid---first_ --- . . .
iterator prior_ = cyclic_prior(*this, first_);
const_cast<interval_type&>((*first_).first)
= left_subtract((*first_).first, left_resid);
//NOTE: Only splitting
this->_map.insert(prior_, segment_type(left_resid, (*first_).second));
}
//POST:
// [----- inter_val ---- . . .
// ...[-- first_ --...
}
template <class SubType, class DomainT, class CodomainT, class Traits, ICL_COMPARE Compare, ICL_COMBINE Combine, ICL_SECTION Section, ICL_INTERVAL(ICL_COMPARE) Interval, ICL_ALLOC Alloc>
template<class Combiner>
inline void interval_base_map<SubType,DomainT,CodomainT,Traits,Compare,Combine,Section,Interval,Alloc>
::add_segment(const interval_type& inter_val, const CodomainT& co_val, iterator& it_)
{
interval_type lead_gap = right_subtract(inter_val, (*it_).first);
if(!icl::is_empty(lead_gap))
{
// [lead_gap--- . . .
// [-- it_ ...
iterator prior_ = prior(it_);
iterator inserted_ = this->template gap_insert<Combiner>(prior_, lead_gap, co_val);
that()->handle_inserted(prior_, inserted_);
}
// . . . --------- . . . addend interval
// [-- it_ --) has a common part with the first overval
Combiner()((*it_).second, co_val);
that()->template handle_left_combined<Combiner>(it_++);
}
template <class SubType, class DomainT, class CodomainT, class Traits, ICL_COMPARE Compare, ICL_COMBINE Combine, ICL_SECTION Section, ICL_INTERVAL(ICL_COMPARE) Interval, ICL_ALLOC Alloc>
template<class Combiner>
inline void interval_base_map<SubType,DomainT,CodomainT,Traits,Compare,Combine,Section,Interval,Alloc>
::add_main(interval_type& inter_val, const CodomainT& co_val,
iterator& it_, const iterator& last_)
{
interval_type cur_interval;
while(it_!=last_)
{
cur_interval = (*it_).first ;
add_segment<Combiner>(inter_val, co_val, it_);
// shrink interval
inter_val = left_subtract(inter_val, cur_interval);
}
}
template <class SubType, class DomainT, class CodomainT, class Traits, ICL_COMPARE Compare, ICL_COMBINE Combine, ICL_SECTION Section, ICL_INTERVAL(ICL_COMPARE) Interval, ICL_ALLOC Alloc>
template<class Combiner>
inline void interval_base_map<SubType,DomainT,CodomainT,Traits,Compare,Combine,Section,Interval,Alloc>
::add_rear(const interval_type& inter_val, const CodomainT& co_val, iterator& it_)
{
iterator prior_ = cyclic_prior(*that(), it_);
interval_type cur_itv = (*it_).first ;
interval_type lead_gap = right_subtract(inter_val, cur_itv);
if(!icl::is_empty(lead_gap))
{ // [lead_gap--- . . .
// [prior) [-- it_ ...
iterator inserted_ = this->template gap_insert<Combiner>(prior_, lead_gap, co_val);
that()->handle_inserted(prior_, inserted_);
}
interval_type end_gap = left_subtract(inter_val, cur_itv);
if(!icl::is_empty(end_gap))
{
// [----------------end_gap)
// . . . -- it_ --)
Combiner()((*it_).second, co_val);
that()->template gap_insert_at<Combiner>(it_, prior_, end_gap, co_val);
}
else
{
// only for the last there can be a right_resid: a part of *it_ right of x
interval_type right_resid = left_subtract(cur_itv, inter_val);
if(icl::is_empty(right_resid))
{
// [---------------)
// [-- it_ ---)
Combiner()((*it_).second, co_val);
that()->template handle_preceeded_combined<Combiner>(prior_, it_);
}
else
{
// [--------------)
// [-- it_ --right_resid)
const_cast<interval_type&>((*it_).first) = right_subtract((*it_).first, right_resid);
//NOTE: This is NOT an insertion that has to take care for correct application of
// the Combiner functor. It only reestablished that state after splitting the
// 'it_' interval value pair. Using _map_insert<Combiner> does not work here.
iterator insertion_ = this->_map.insert(it_, value_type(right_resid, (*it_).second));
that()->handle_reinserted(insertion_);
Combiner()((*it_).second, co_val);
that()->template handle_preceeded_combined<Combiner>(insertion_, it_);
}
}
}
//==============================================================================
//= Addition
//==============================================================================
template <class SubType, class DomainT, class CodomainT, class Traits, ICL_COMPARE Compare, ICL_COMBINE Combine, ICL_SECTION Section, ICL_INTERVAL(ICL_COMPARE) Interval, ICL_ALLOC Alloc>
template<class Combiner>
inline typename interval_base_map<SubType,DomainT,CodomainT,Traits,Compare,Combine,Section,Interval,Alloc>::iterator
interval_base_map<SubType,DomainT,CodomainT,Traits,Compare,Combine,Section,Interval,Alloc>
::_add(const segment_type& addend)
{
typedef typename on_absorbtion<type,Combiner,
absorbs_identities<type>::value>::type on_absorbtion_;
const interval_type& inter_val = addend.first;
if(icl::is_empty(inter_val))
return this->_map.end();
const codomain_type& co_val = addend.second;
if(on_absorbtion_::is_absorbable(co_val))
return this->_map.end();
std::pair<iterator,bool> insertion
= this->_map.insert(value_type(inter_val, version<Combiner>()(co_val)));
if(insertion.second)
return that()->handle_inserted(insertion.first);
else
{
// Detect the first and the end iterator of the collision sequence
iterator first_ = this->_map.lower_bound(inter_val),
last_ = insertion.first;
//assert(end_ == this->_map.upper_bound(inter_val));
iterator it_ = first_;
interval_type rest_interval = inter_val;
add_front (rest_interval, it_ );
add_main<Combiner>(rest_interval, co_val, it_, last_);
add_rear<Combiner>(rest_interval, co_val, it_ );
return it_;
}
}
template <class SubType, class DomainT, class CodomainT, class Traits, ICL_COMPARE Compare, ICL_COMBINE Combine, ICL_SECTION Section, ICL_INTERVAL(ICL_COMPARE) Interval, ICL_ALLOC Alloc>
template<class Combiner>
inline typename interval_base_map<SubType,DomainT,CodomainT,Traits,Compare,Combine,Section,Interval,Alloc>::iterator
interval_base_map<SubType,DomainT,CodomainT,Traits,Compare,Combine,Section,Interval,Alloc>
::_add(iterator prior_, const segment_type& addend)
{
typedef typename on_absorbtion<type,Combiner,
absorbs_identities<type>::value>::type on_absorbtion_;
const interval_type& inter_val = addend.first;
if(icl::is_empty(inter_val))
return prior_;
const codomain_type& co_val = addend.second;
if(on_absorbtion_::is_absorbable(co_val))
return prior_;
std::pair<iterator,bool> insertion
= add_at<Combiner>(prior_, inter_val, co_val);
if(insertion.second)
return that()->handle_inserted(insertion.first);
else
{
// Detect the first and the end iterator of the collision sequence
std::pair<iterator,iterator> overlap = equal_range(inter_val);
iterator it_ = overlap.first,
last_ = prior(overlap.second);
interval_type rest_interval = inter_val;
add_front (rest_interval, it_ );
add_main<Combiner>(rest_interval, co_val, it_, last_);
add_rear<Combiner>(rest_interval, co_val, it_ );
return it_;
}
}
//==============================================================================
//= Subtraction detail
//==============================================================================
template <class SubType, class DomainT, class CodomainT, class Traits, ICL_COMPARE Compare, ICL_COMBINE Combine, ICL_SECTION Section, ICL_INTERVAL(ICL_COMPARE) Interval, ICL_ALLOC Alloc>
inline void interval_base_map<SubType,DomainT,CodomainT,Traits,Compare,Combine,Section,Interval,Alloc>
::subtract_front(const interval_type& inter_val, iterator& it_)
{
interval_type left_resid = right_subtract((*it_).first, inter_val);
if(!icl::is_empty(left_resid)) // [--- inter_val ---)
{ //[prior_) [left_resid)[--- it_ . . .
iterator prior_ = cyclic_prior(*this, it_);
const_cast<interval_type&>((*it_).first) = left_subtract((*it_).first, left_resid);
this->_map.insert(prior_, value_type(left_resid, (*it_).second));
// The segemnt *it_ is split at inter_val.first(), so as an invariant
// segment *it_ is always "under" inter_val and a left_resid is empty.
}
}
template <class SubType, class DomainT, class CodomainT, class Traits, ICL_COMPARE Compare, ICL_COMBINE Combine, ICL_SECTION Section, ICL_INTERVAL(ICL_COMPARE) Interval, ICL_ALLOC Alloc>
template<class Combiner>
inline void interval_base_map<SubType,DomainT,CodomainT,Traits,Compare,Combine,Section,Interval,Alloc>
::subtract_main(const CodomainT& co_val, iterator& it_, const iterator& last_)
{
while(it_ != last_)
{
Combiner()((*it_).second, co_val);
that()->template handle_left_combined<Combiner>(it_++);
}
}
template <class SubType, class DomainT, class CodomainT, class Traits, ICL_COMPARE Compare, ICL_COMBINE Combine, ICL_SECTION Section, ICL_INTERVAL(ICL_COMPARE) Interval, ICL_ALLOC Alloc>
template<class Combiner>
inline void interval_base_map<SubType,DomainT,CodomainT,Traits,Compare,Combine,Section,Interval,Alloc>
::subtract_rear(interval_type& inter_val, const CodomainT& co_val, iterator& it_)
{
interval_type right_resid = left_subtract((*it_).first, inter_val);
if(icl::is_empty(right_resid))
{
Combiner()((*it_).second, co_val);
that()->template handle_combined<Combiner>(it_);
}
else
{
const_cast<interval_type&>((*it_).first) = right_subtract((*it_).first, right_resid);
iterator next_ = this->_map.insert(it_, value_type(right_resid, (*it_).second));
Combiner()((*it_).second, co_val);
that()->template handle_succeeded_combined<Combiner>(it_, next_);
}
}
//==============================================================================
//= Subtraction
//==============================================================================
template <class SubType, class DomainT, class CodomainT, class Traits, ICL_COMPARE Compare, ICL_COMBINE Combine, ICL_SECTION Section, ICL_INTERVAL(ICL_COMPARE) Interval, ICL_ALLOC Alloc>
template<class Combiner>
inline void interval_base_map<SubType,DomainT,CodomainT,Traits,Compare,Combine,Section,Interval,Alloc>
::_subtract(const segment_type& minuend)
{
interval_type inter_val = minuend.first;
if(icl::is_empty(inter_val))
return;
const codomain_type& co_val = minuend.second;
if(on_absorbtion<type,Combiner,Traits::absorbs_identities>::is_absorbable(co_val))
return;
std::pair<iterator, iterator> exterior = equal_range(inter_val);
if(exterior.first == exterior.second)
return;
iterator last_ = prior(exterior.second);
iterator it_ = exterior.first;
subtract_front (inter_val, it_ );
subtract_main <Combiner>( co_val, it_, last_);
subtract_rear <Combiner>(inter_val, co_val, it_ );
}
//==============================================================================
//= Insertion
//==============================================================================
template <class SubType, class DomainT, class CodomainT, class Traits, ICL_COMPARE Compare, ICL_COMBINE Combine, ICL_SECTION Section, ICL_INTERVAL(ICL_COMPARE) Interval, ICL_ALLOC Alloc>
inline void interval_base_map<SubType,DomainT,CodomainT,Traits,Compare,Combine,Section,Interval,Alloc>
::insert_main(const interval_type& inter_val, const CodomainT& co_val,
iterator& it_, const iterator& last_)
{
iterator end_ = boost::next(last_);
iterator prior_ = it_, inserted_;
if(prior_ != this->_map.end())
--prior_;
interval_type rest_interval = inter_val, left_gap, cur_itv;
interval_type last_interval = last_ ->first;
while(it_ != end_ )
{
cur_itv = (*it_).first ;
left_gap = right_subtract(rest_interval, cur_itv);
if(!icl::is_empty(left_gap))
{
inserted_ = this->_map.insert(prior_, value_type(left_gap, co_val));
it_ = that()->handle_inserted(inserted_);
}
// shrink interval
rest_interval = left_subtract(rest_interval, cur_itv);
prior_ = it_;
++it_;
}
//insert_rear(rest_interval, co_val, last_):
interval_type end_gap = left_subtract(rest_interval, last_interval);
if(!icl::is_empty(end_gap))
{
inserted_ = this->_map.insert(prior_, value_type(end_gap, co_val));
it_ = that()->handle_inserted(inserted_);
}
else
it_ = prior_;
}
template <class SubType, class DomainT, class CodomainT, class Traits, ICL_COMPARE Compare, ICL_COMBINE Combine, ICL_SECTION Section, ICL_INTERVAL(ICL_COMPARE) Interval, ICL_ALLOC Alloc>
inline typename interval_base_map<SubType,DomainT,CodomainT,Traits,Compare,Combine,Section,Interval,Alloc>::iterator
interval_base_map<SubType,DomainT,CodomainT,Traits,Compare,Combine,Section,Interval,Alloc>
::_insert(const segment_type& addend)
{
interval_type inter_val = addend.first;
if(icl::is_empty(inter_val))
return this->_map.end();
const codomain_type& co_val = addend.second;
if(on_codomain_absorbtion::is_absorbable(co_val))
return this->_map.end();
std::pair<iterator,bool> insertion = this->_map.insert(addend);
if(insertion.second)
return that()->handle_inserted(insertion.first);
else
{
// Detect the first and the end iterator of the collision sequence
iterator first_ = this->_map.lower_bound(inter_val),
last_ = insertion.first;
//assert((++last_) == this->_map.upper_bound(inter_val));
iterator it_ = first_;
insert_main(inter_val, co_val, it_, last_);
return it_;
}
}
template <class SubType, class DomainT, class CodomainT, class Traits, ICL_COMPARE Compare, ICL_COMBINE Combine, ICL_SECTION Section, ICL_INTERVAL(ICL_COMPARE) Interval, ICL_ALLOC Alloc>
inline typename interval_base_map<SubType,DomainT,CodomainT,Traits,Compare,Combine,Section,Interval,Alloc>::iterator<|fim▁hole|>{
interval_type inter_val = addend.first;
if(icl::is_empty(inter_val))
return prior_;
const codomain_type& co_val = addend.second;
if(on_codomain_absorbtion::is_absorbable(co_val))
return prior_;
std::pair<iterator,bool> insertion = insert_at(prior_, inter_val, co_val);
if(insertion.second)
return that()->handle_inserted(insertion.first);
{
// Detect the first and the end iterator of the collision sequence
std::pair<iterator,iterator> overlap = equal_range(inter_val);
iterator it_ = overlap.first,
last_ = prior(overlap.second);
insert_main(inter_val, co_val, it_, last_);
return it_;
}
}
//==============================================================================
//= Erasure segment_type
//==============================================================================
template <class SubType, class DomainT, class CodomainT, class Traits, ICL_COMPARE Compare, ICL_COMBINE Combine, ICL_SECTION Section, ICL_INTERVAL(ICL_COMPARE) Interval, ICL_ALLOC Alloc>
inline void interval_base_map<SubType,DomainT,CodomainT,Traits,Compare,Combine,Section,Interval,Alloc>
::erase_rest(interval_type& inter_val, const CodomainT& co_val,
iterator& it_, const iterator& last_)
{
// For all intervals within loop: (*it_).first are contained_in inter_val
while(it_ != last_)
if((*it_).second == co_val)
this->_map.erase(it_++);
else it_++;
//erase_rear:
if((*it_).second == co_val)
{
interval_type right_resid = left_subtract((*it_).first, inter_val);
if(icl::is_empty(right_resid))
this->_map.erase(it_);
else
const_cast<interval_type&>((*it_).first) = right_resid;
}
}
template <class SubType, class DomainT, class CodomainT, class Traits, ICL_COMPARE Compare, ICL_COMBINE Combine, ICL_SECTION Section, ICL_INTERVAL(ICL_COMPARE) Interval, ICL_ALLOC Alloc>
inline SubType& interval_base_map<SubType,DomainT,CodomainT,Traits,Compare,Combine,Section,Interval,Alloc>
::erase(const segment_type& minuend)
{
interval_type inter_val = minuend.first;
if(icl::is_empty(inter_val))
return *that();
const codomain_type& co_val = minuend.second;
if(on_codomain_absorbtion::is_absorbable(co_val))
return *that();
std::pair<iterator,iterator> exterior = equal_range(inter_val);
if(exterior.first == exterior.second)
return *that();
iterator first_ = exterior.first, end_ = exterior.second,
last_ = cyclic_prior(*this, end_);
iterator second_= first_; ++second_;
if(first_ == last_)
{ // [----inter_val----)
// .....first_==last_.....
// only for the last there can be a right_resid: a part of *it_ right of minuend
interval_type right_resid = left_subtract((*first_).first, inter_val);
if((*first_).second == co_val)
{
interval_type left_resid = right_subtract((*first_).first, inter_val);
if(!icl::is_empty(left_resid)) // [----inter_val----)
{ // [left_resid)..first_==last_......
const_cast<interval_type&>((*first_).first) = left_resid;
if(!icl::is_empty(right_resid))
this->_map.insert(first_, value_type(right_resid, co_val));
}
else if(!icl::is_empty(right_resid))
const_cast<interval_type&>((*first_).first) = right_resid;
else
this->_map.erase(first_);
}
}
else
{
// first AND NOT last
if((*first_).second == co_val)
{
interval_type left_resid = right_subtract((*first_).first, inter_val);
if(icl::is_empty(left_resid))
this->_map.erase(first_);
else
const_cast<interval_type&>((*first_).first) = left_resid;
}
erase_rest(inter_val, co_val, second_, last_);
}
return *that();
}
//==============================================================================
//= Erasure key_type
//==============================================================================
template <class SubType, class DomainT, class CodomainT, class Traits, ICL_COMPARE Compare, ICL_COMBINE Combine, ICL_SECTION Section, ICL_INTERVAL(ICL_COMPARE) Interval, ICL_ALLOC Alloc>
inline SubType& interval_base_map<SubType,DomainT,CodomainT,Traits,Compare,Combine,Section,Interval,Alloc>
::erase(const interval_type& minuend)
{
if(icl::is_empty(minuend))
return *that();
std::pair<iterator, iterator> exterior = equal_range(minuend);
if(exterior.first == exterior.second)
return *that();
iterator first_ = exterior.first,
end_ = exterior.second,
last_ = prior(end_);
interval_type left_resid = right_subtract((*first_).first, minuend);
interval_type right_resid = left_subtract(last_ ->first, minuend);
if(first_ == last_ )
if(!icl::is_empty(left_resid))
{
const_cast<interval_type&>((*first_).first) = left_resid;
if(!icl::is_empty(right_resid))
this->_map.insert(first_, value_type(right_resid, (*first_).second));
}
else if(!icl::is_empty(right_resid))
const_cast<interval_type&>((*first_).first) = left_subtract((*first_).first, minuend);
else
this->_map.erase(first_);
else
{ // [-------- minuend ---------)
// [left_resid fst) . . . . [lst right_resid)
iterator second_= first_; ++second_;
iterator start_ = icl::is_empty(left_resid)? first_: second_;
iterator stop_ = icl::is_empty(right_resid)? end_ : last_ ;
this->_map.erase(start_, stop_); //erase [start_, stop_)
if(!icl::is_empty(left_resid))
const_cast<interval_type&>((*first_).first) = left_resid;
if(!icl::is_empty(right_resid))
const_cast<interval_type&>(last_ ->first) = right_resid;
}
return *that();
}
//-----------------------------------------------------------------------------
// type traits
//-----------------------------------------------------------------------------
template
<
class SubType,
class DomainT, class CodomainT, class Traits, ICL_COMPARE Compare, ICL_COMBINE Combine, ICL_SECTION Section, ICL_INTERVAL(ICL_COMPARE) Interval, ICL_ALLOC Alloc
>
struct is_map<icl::interval_base_map<SubType,DomainT,CodomainT,Traits,Compare,Combine,Section,Interval,Alloc> >
{
typedef is_map<icl::interval_base_map<SubType,DomainT,CodomainT,Traits,Compare,Combine,Section,Interval,Alloc> > type;
BOOST_STATIC_CONSTANT(bool, value = true);
};
template
<
class SubType,
class DomainT, class CodomainT, class Traits, ICL_COMPARE Compare, ICL_COMBINE Combine, ICL_SECTION Section, ICL_INTERVAL(ICL_COMPARE) Interval, ICL_ALLOC Alloc
>
struct has_inverse<icl::interval_base_map<SubType,DomainT,CodomainT,Traits,Compare,Combine,Section,Interval,Alloc> >
{
typedef has_inverse<icl::interval_base_map<SubType,DomainT,CodomainT,Traits,Compare,Combine,Section,Interval,Alloc> > type;
BOOST_STATIC_CONSTANT(bool, value = (has_inverse<CodomainT>::value));
};
template
<
class SubType,
class DomainT, class CodomainT, class Traits, ICL_COMPARE Compare, ICL_COMBINE Combine, ICL_SECTION Section, ICL_INTERVAL(ICL_COMPARE) Interval, ICL_ALLOC Alloc
>
struct is_interval_container<icl::interval_base_map<SubType,DomainT,CodomainT,Traits,Compare,Combine,Section,Interval,Alloc> >
{
typedef is_interval_container<icl::interval_base_map<SubType,DomainT,CodomainT,Traits,Compare,Combine,Section,Interval,Alloc> > type;
BOOST_STATIC_CONSTANT(bool, value = true);
};
template
<
class SubType,
class DomainT, class CodomainT, class Traits, ICL_COMPARE Compare, ICL_COMBINE Combine, ICL_SECTION Section, ICL_INTERVAL(ICL_COMPARE) Interval, ICL_ALLOC Alloc
>
struct absorbs_identities<icl::interval_base_map<SubType,DomainT,CodomainT,Traits,Compare,Combine,Section,Interval,Alloc> >
{
typedef absorbs_identities<icl::interval_base_map<SubType,DomainT,CodomainT,Traits,Compare,Combine,Section,Interval,Alloc> > type;
BOOST_STATIC_CONSTANT(bool, value = (Traits::absorbs_identities));
};
template
<
class SubType,
class DomainT, class CodomainT, class Traits, ICL_COMPARE Compare, ICL_COMBINE Combine, ICL_SECTION Section, ICL_INTERVAL(ICL_COMPARE) Interval, ICL_ALLOC Alloc
>
struct is_total<icl::interval_base_map<SubType,DomainT,CodomainT,Traits,Compare,Combine,Section,Interval,Alloc> >
{
typedef is_total<icl::interval_base_map<SubType,DomainT,CodomainT,Traits,Compare,Combine,Section,Interval,Alloc> > type;
BOOST_STATIC_CONSTANT(bool, value = (Traits::is_total));
};
}} // namespace icl boost
#endif<|fim▁end|> | interval_base_map<SubType,DomainT,CodomainT,Traits,Compare,Combine,Section,Interval,Alloc>
::_insert(iterator prior_, const segment_type& addend) |
<|file_name|>issue-7563.rs<|end_file_name|><|fim▁begin|>// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
extern crate debug;
trait IDummy {
fn do_nothing(&self);
}
struct A { a: int }
struct B<'a> { b: int, pa: &'a A }
impl IDummy for A {
fn do_nothing(&self) {
println!("A::do_nothing() is called");
}<|fim▁hole|>
impl<'a> B<'a> {
fn get_pa(&self) -> &'a IDummy { self.pa as &'a IDummy }
}
pub fn main() {
let sa = A { a: 100 };
let sb = B { b: 200, pa: &sa };
println!("sa is {:?}", sa);
println!("sb is {:?}", sb);
println!("sb.pa is {:?}", sb.get_pa());
}<|fim▁end|> | } |
<|file_name|>Lock.java<|end_file_name|><|fim▁begin|>/*
* To change this license header, choose License Headers in Project Properties.
* To change this template file, choose Tools | Templates
* and open the template in the editor.
*/
package blastbenchmark;
<|fim▁hole|> * @author thanos
*/
public class Lock {
private boolean isLocked = false;
public synchronized void lock()
throws InterruptedException {
while (isLocked) {
//System.out.println("locked.. sleeping");
wait();
}
isLocked = true;
}
public synchronized void unlock() {
isLocked = false;
notify();
}
}<|fim▁end|> |
/**
* |
<|file_name|>exceptions.py<|end_file_name|><|fim▁begin|>"""Special exceptions for the ``command_interface`` app."""
class CommandError(Exception):<|fim▁hole|><|fim▁end|> | pass |
<|file_name|>BundleGrid.js<|end_file_name|><|fim▁begin|>/*
* Copyright 2013 pingworks - Alexander Birk und Christoph Lukas
* Copyright 2014 //SEIBERT/MEDIA - Lars-Erik Kimmel
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
Ext.define("Dash.view.BundleGrid", {
extend: 'Ext.grid.Panel',
alias: 'widget.bundlegrid',
requires: ['Ext.String', 'Ext.grid.column.Action', 'Ext.window.MessageBox'],
store: 'Bundles',
width: '100%',
id: 'BundleGrid',
stageColumnOffset: 6,
visibleColumnIndex: function (colIndex) {
// work around extjs bug: colIndex is wrong when some cols are hidden
var hiddenCols = 0;
for (var i = 0; i < colIndex; i++) {
if (!this.columns[i].isVisible())
hiddenCols++;
}
return colIndex + hiddenCols;
},
getStageNrFromColIndex: function(colIndex) {
return this.visibleColumnIndex(colIndex) - this.stageColumnOffset;
},
stageStatusIconRenderer: function(value, metadata, record, rowIndex, colIndex, store, view) {
var stageStatus = Dash.app.getController('Bundle').getStageStatus(record, this.getStageNrFromColIndex(colIndex));
var iconUrl = Ext.BLANK_IMAGE_URL;
var iconCls = '';
if (stageStatus) {
iconUrl = Ext.String.format(Dash.config.stagestatus.iconpath, stageStatus.get('icon'));
iconCls = stageStatus.get('cls');
}
this.columns[this.visibleColumnIndex(colIndex)].items[0].icon = iconUrl;
this.columns[this.visibleColumnIndex(colIndex)].items[0].iconCls = iconCls;
},
deploymentActionRenderer: function(value, metadata, record, rowIndex, colIndex, store, view) {
var ctrl = Dash.app.getController('Deployment');
this.columns[this.colIndexDeployment].items[0].iconCls =
ctrl.deploymentAllowed(record) ? '' : this.disabledCls;
},
triggerJenkinsJobActionRenderer: function(value, metadata, record, rowIndex, colIndex, store, view) {
var ctrl = Dash.app.getController('TriggerJenkinsJob');
this.columns[this.colIndexTriggerJenkinsJob].items[0].iconCls =
ctrl.triggerJenkinsJobAllowed(record) ? '' : this.disabledCls;
},
createChangeTooltip: function(target, bundle) {
return Ext.create('Dash.view.ChangeToolTip', {
id: 'TTC-' + bundle.get('id').replace(/\./g, '-'),
target: target
});
},
createJobResultTooltip: function(target, bundle, stage) {
return Ext.create('Dash.view.JobResultToolTip', {
id: 'TTJR-' + bundle.get('id').replace(/\./g, '-') + '-' + stage,
target: target
});
},
initComponent: function() {
var that = this;
this.columns = [
{
text: Dash.config.bundlegrid.label.timestamp,
dataIndex: 'timestamp',
type: 'date',
renderer: Ext.util.Format.dateRenderer(Dash.config.bundlegrid.dateformat),
width: Dash.config.bundlegrid.colwidth.timestamp,
hidden: Dash.config.bundlegrid.hidden.timestamp
},
{
text: Dash.config.bundlegrid.label.committer,
dataIndex: 'committer',
width: Dash.config.bundlegrid.colwidth.committer,
hidden: Dash.config.bundlegrid.hidden.committer
},
{
text: Dash.config.bundlegrid.label.pname,
dataIndex: 'pname',
width: Dash.config.bundlegrid.colwidth.pname,
hidden: Dash.config.bundlegrid.hidden.pname
},
{
text: Dash.config.bundlegrid.label.branch,<|fim▁hole|> dataIndex: 'branch_name',
width: Dash.config.bundlegrid.colwidth.branch,
hidden: Dash.config.bundlegrid.hidden.branch
},
{
text: Dash.config.bundlegrid.label.revision,
dataIndex: 'revision',
renderer: function(value, metadata, record, rowIndex, colIndex, store, view) {
return ( Dash.config.bundlegrid.vcslink && Dash.config.bundlegrid.vcslink != '' && record.get('revision') != 'Unavailable')
? Ext.String.format(Dash.config.bundlegrid.vcslink, record.get('repository'), record.get('revision'), record.get('branch'))
: record.get('revision');
},
width: Dash.config.bundlegrid.colwidth.revision,
hidden: Dash.config.bundlegrid.hidden.revision
},
{
text: Dash.config.bundlegrid.label.repository,
dataIndex: 'repository',
renderer: function(value, metadata, record, rowIndex, colIndex, store, view) {
return ( Dash.config.bundlegrid.vcsrepolink && Dash.config.bundlegrid.vcsrepolink != '' && record.get('repository') != 'Unavailable')
? Ext.String.format(Dash.config.bundlegrid.vcsrepolink, record.get('repository'), record.get('revision'))
: record.get('repository');
},
width: Dash.config.bundlegrid.colwidth.repository,
hidden: Dash.config.bundlegrid.hidden.repository
},
{
text: Dash.config.bundlegrid.label.bundle,
menuText: 'Bundle',
dataIndex: 'id',
renderer: function(value, metadata, record, rowIndex, colIndex, store, view) {
return ( Dash.config.bundlegrid.repolink && Dash.config.bundlegrid.repolink != '' )
? Ext.String.format(Dash.config.bundlegrid.repolink, record.get('branch'), record.get('id'))
: record.get('id');
},
width: Dash.config.bundlegrid.colwidth.bundle,
hidden: Dash.config.bundlegrid.hidden.bundle
}
];
for (var i = 1; i <= Dash.config.pipelineStages; i++) {
this.columns.push({
text: Dash.config.bundlegrid.label['stage' + i],
menuText: i,
dataIndex: 'stage' + i,
align: 'center',
xtype: 'actioncolumn',
items: [
{
handler: function(gridview, rowIndex, colIndex, item, event, record) {
var stage = this.getStageNrFromColIndex(colIndex);
that.fireEvent('hideAllTooltips');
that.fireEvent(
'loadJobResult',
record,
stage,
that.createJobResultTooltip(event.target, record, stage)
);
}
}
],
renderer: this.stageStatusIconRenderer,
scope: this,
width: Dash.config.bundlegrid.colwidth['stage' + i],
hidden: Dash.config.bundlegrid.hidden['stage' + i]
});
}
var cols = [
{
text: Dash.config.bundlegrid.label.build,
menuText: 'Build',
align: 'center',
xtype: 'actioncolumn',
width: Dash.config.bundlegrid.colwidth.build,
hidden: Dash.config.bundlegrid.hidden.build,
items: [
{
margin: 10,
tooltip: "Build neustarten",
icon: Dash.config.bundlegrid.icon.restartBuild,
isDisabled: function(gridview, rowIndex, colIndex, item, record) {
return record.isBuildRunning();
},
handler: function(gridview, rowIndex, colIndex, item, event, record) {
Ext.MessageBox.confirm(
'Build neustarten',
Ext.String.format("Wollen Sie den Build für Revision '{0}' im Branch '{1}' wirklich neustarten?\nDer Build wird eventuell später gestartet und erscheint erst dann auf dem Dashboard.", record.get('revision'), record.get('branch')),
function(btn) {
if (btn == 'yes') {
that.fireEvent('restartBuild', record);
}
}
);
}
},
{
margin: 10,
tooltip: "Build stoppen",
icon: Dash.config.bundlegrid.icon.stopBuild,
isDisabled: function(gridview, rowIndex, colIndex, item, record) {
return !record.isBuildRunning();
},
handler: function(gridview, rowIndex, colIndex, item, event, record) {
Ext.MessageBox.confirm(
'Build stoppen',
Ext.String.format("Wollen Sie den Build für Revision '{0}' im Branch '{1}' wirklich stoppen?", record.get('revision'), record.get('branch')),
function(btn) {
if (btn == 'yes') {
that.fireEvent('stopBuild', record);
}
}
);
}
},
{
margin: 10,
tooltip: "Build anzeigen",
icon: Dash.config.bundlegrid.icon.showBuild,
isDisabled: function(gridview, rowIndex, colIndex, item, record) {
return !Ext.isDefined(record.getLatestBuildUrl());
},
handler: function(gridview, rowIndex, colIndex, item, event, record) {
that.fireEvent('showBuild', record);
}
}
],
scope: this
},
{
text: Dash.config.bundlegrid.label.changes,
menuText: 'Änderungen',
align: 'center',
xtype: 'actioncolumn',
width: Dash.config.bundlegrid.colwidth.changes,
hidden: Dash.config.bundlegrid.hidden.changes,
items: [
{
icon: Dash.config.bundlegrid.icon.change,
handler: function(gridview, rowIndex, colIndex, item, event, record) {
that.fireEvent('hideAllTooltips');
that.fireEvent(
'loadChanges',
record,
that.createChangeTooltip(event.target, record)
);
}
}
]
},
{
id: 'ColumnDeployment',
text: Dash.config.bundlegrid.label.deployment,
xtype: 'actioncolumn',
width: Dash.config.bundlegrid.colwidth.deployment,
hidden: Dash.config.bundlegrid.hidden.deployment,
flex: Dash.config.bundlegrid.flex.deployment,
items: [
{
disabled: !Dash.config.bundlegrid.deployment.enabled,
icon: Dash.config.bundlegrid.icon.deploy,
handler: function(gridview, rowIndex, colIndex, item, event, record) {
that.fireEvent('hideEnvironmentsWindow');
that.fireEvent('hideTriggerJenkinsJobWindow');
that.fireEvent('showDeployWindow', record);
}
}
],
renderer: this.deploymentActionRenderer,
scope: this
},
{
id: 'ColumnTriggerJenkinsJob',
text: Dash.config.bundlegrid.label.triggerJenkinsJob,
xtype: 'actioncolumn',
width: Dash.config.bundlegrid.colwidth.triggerJenkinsJob,
hidden: Dash.config.bundlegrid.hidden.triggerJenkinsJob,
flex: Dash.config.bundlegrid.flex.triggerJenkinsJob,
items: [
{
disabled: !Dash.config.bundlegrid.triggerJenkinsJob.enabled,
icon: Dash.config.bundlegrid.icon.deploy,
handler: function(gridview, rowIndex, colIndex, item, event, record) {
that.fireEvent('hideEnvironmentsWindow');
that.fireEvent('hideDeployWindow');
that.fireEvent('showTriggerJenkinsJobWindow', record);
}
}
],
renderer: this.triggerJenkinsJobActionRenderer,
scope: this
},
{
id: 'ColumnEditComment',
text: Dash.config.bundlegrid.label.editComment,
menuText: Dash.config.bundlegrid.label.editComment,
align: 'center',
xtype: 'actioncolumn',
hidden: Dash.config.bundlegrid.hidden.editComment,
width: Dash.config.bundlegrid.colwidth.editComment,
items: [
{
icon: Dash.config.bundlegrid.icon.comment,
handler: function(gridview, rowIndex, colIndex, item, event, record) {
that.fireEvent('showCommentWindow', record);
}
}
]
},
{
id: 'ColumnComment',
text: Dash.config.bundlegrid.label.comment,
menuText: Dash.config.bundlegrid.label.comment,
dataIndex: 'comment',
hidden: Dash.config.bundlegrid.hidden.comment,
width: Dash.config.bundlegrid.colwidth.comment,
flex: Dash.config.bundlegrid.flex.comment,
renderer: function(text) {
return Ext.util.Format.htmlEncode(text);
}
}
];
Ext.Array.each(cols, function (el) {
that.columns.push(el);
});
Ext.Array.forEach(this.columns, function(column, index) {
if (column.id == 'ColumnDeployment')
this.colIndexDeployment = index;
}, this);
Ext.Array.forEach(this.columns, function(column, index) {
if (column.id == 'ColumnTriggerJenkinsJob')
this.colIndexTriggerJenkinsJob = index;
}, this);
this.callParent(arguments);
}
});<|fim▁end|> | |
<|file_name|>tck-touchscreen-websockets.js<|end_file_name|><|fim▁begin|>/**********************************************************************************
*
* This file is part of e-venement.
*
* e-venement is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License.
*
* e-venement is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with e-venement; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
* Copyright (c) 2006-2016 Baptiste SIMON <baptiste.simon AT e-glop.net>
* Copyright (c) 2006-2016 Libre Informatique [http://www.libre-informatique.fr/]
*
***********************************************************************************/
if ( LI === undefined )
var LI = {};
$(document).ready(function () {
var connector = new EveConnector('https://localhost:8164', function () {
// *T* here we are after the websocket first connection is established
getAvailableUsbDevices().then(getAvailableSerialDevices).then(function(){
if ( LI.activeEPT )
configureEPT();
});
connector.onError = function () {
$('#li_transaction_museum .print [name=direct], #li_transaction_manifestations .print [name=direct]')
.remove();
$('#li_transaction_museum .print').prop('title', null);
};
});
// AVAILABLE DEVICES ******************
function getAvailableDevices(type)
{
if (['usb', 'serial'].indexOf(type) === -1)
return Promise.reject('Wrong device type: ' + type);
// Get all the configured devices ids
connector.log('info', 'Configured ' + type + ' devices: ', LI[type]);
if (LI[type] === undefined)
return;
var devices = [];
['printers', 'displays', 'epts'].forEach(function(family){
if (LI[type][family] !== undefined)
for (var name in LI[type][family])
devices.push(LI[type][family][name][0]);
});
return connector.areDevicesAvailable({type: type, params: devices}).then(function(data){
// data contains the list of the available devices
connector.log('info', 'Available ' + type + ' devices:', data.params);
// Check if we have an available USB printer device and store it in LI.activeDirectPrinter global variable
var foundPrinter = false;
if ( type === "usb" && LI.usb.printers !== undefined ) {
foundPrinter = data.params.some(function(device){
for ( var name in LI.usb.printers )
if ( LI.usb.printers[name][0].vid === device.vid && LI.usb.printers[name][0].pid === device.pid ) {
LI.activeDirectPrinter = {type: type, params: device}; // global var
connector.log('info', 'LI.activeDirectPrinter:', LI.activeDirectPrinter);
return true;
}
return false;
});
}
foundPrinter && configureDirectPrint();
// Check if we have an available SERIAL display device and store it in LI.activeDisplay global variable
LI.activeDisplay = false; // global var
if ( type === "serial" && LI.serial.displays !== undefined ) {
data.params.some(function(device){
for ( var name in LI.serial.displays )
if ( device.pnpId.includes(LI.serial.displays[name][0].pnpId) ) {
LI.activeDisplay = {type: type, params: device};
connector.log('info', 'LI.activeDisplay:', LI.activeDisplay);
return true;
}
return false;
});
}
if (LI.activeDisplay) {
configureDisplay();
displayTotals();
}
// Check if we have an available serial EPT device and store it in LI.activeEPT global variable
LI.activeEPT = false; // global var
if ( type === "serial" && LI.serial.epts !== undefined ) {
data.params.some(function(device){
for ( var name in LI.serial.epts )
if ( device.pnpId.includes(LI.serial.epts[name][0].pnpId) ) {
LI.activeEPT = {type: type, params: device}; // global var
connector.log('info', 'LI.activeEPT:', LI.activeEPT);
return true;
}
return false;
});
}
}).catch(function(error){
connector.log('error', error);
});
}; // END getAvailableDevices()
function getAvailableUsbDevices()
{
return getAvailableDevices('usb');
};
function getAvailableSerialDevices()
{
return getAvailableDevices('serial');
};
// LCD DISPLAY ******************
var lastDisplay = {date: Date.now(), lines: []};
var displayTotalsTimeoutId;
// configure the form for handling LCD display (if there is an available LCD display)
function configureDisplay()
{
if ( LI.activeDisplay === undefined )
return;
// Refresh Display when totals change
$('#li_transaction_field_payments_list .topay .pit').on("changeData", displayTotals);
$('#li_transaction_field_payments_list .change .pit').on("changeData", displayTotals);
// Display totals when page (or tab) is selected
document.addEventListener("visibilitychange", function(evt){
visible = !this.hidden;
if ( visible )
displayTotals(500, true);
else
displayDefaultMsg();
});
// Display default message when leaving the page (or tab closed...)
$(window).on("beforeunload", function(){
$('#li_transaction_field_payments_list .topay .pit').unbind("changeData", displayTotals);
$('#li_transaction_field_payments_list .change .pit').unbind("changeData", displayTotals);
displayDefaultMsg(true);
});
};
// outputs totals on LCD display
function displayTotals(delay, force) {
var Display = LIDisplay(LI.activeDisplay, connector);
if (!Display)
return;
var total = $('#li_transaction_field_payments_list .topay .pit').data('value');
total = LI.format_currency(total, false).replace('€', 'E');
var total_label = $('.displays .display-total').text().trim();
var total_spaces = ' '.repeat(Display.width - total.length - total_label.length);
var left = $('#li_transaction_field_payments_list .change .pit').data('value');
left = LI.format_currency(left, false).replace('€', 'E');
var left_label = $('.displays .display-left').text().trim();
var left_spaces = ' '.repeat(Display.width - left.length - left_label.length);
var lines = [
total_label + total_spaces + total,
left_label + left_spaces + left
];
clearTimeout(displayTotalsTimeoutId);
if ( !force && lines.join('||') === lastDisplay.lines.join('||') ) {
return;
}
var now = Date.now();
if ( delay === undefined )
delay = (now - lastDisplay.date < 500) ? 500 : 0;
displayTotalsTimeoutId = setTimeout(function(){
lastDisplay.date = now;
lastDisplay.lines = lines;
Display.write(lines);
}, delay);
};
// outputs default message on USB display
function displayDefaultMsg(force) {
var Display = LIDisplay(LI.activeDisplay, connector);
if (!Display)
return;
var msg = $('.displays .display-default').text();
msg = msg || "...";
var lines = [msg, ''];
clearTimeout(displayTotalsTimeoutId);
if ( lines.join('||') === lastDisplay.lines.join('||') ) {
return;
}
var now = Date.now();
var delay = (now - lastDisplay.date < 500) ? 500 : 0;
if ( force ) {
lastDisplay.date = now;
lastDisplay.lines = lines;
Display.write(lines);
}
else displayTotalsTimeoutId = setTimeout(function(){
lastDisplay.date = now;
lastDisplay.lines = lines;
Display.write(lines);
}, delay);
};
// DIRECT PRINTING ******************
// configure the form for direct printing (if there is an available direct printer)
function configureDirectPrint()
{
if ( LI.activeDirectPrinter === undefined )
return;
var usbParams = LI.activeDirectPrinter.params;
var dp_title = $('#li_transaction_field_close .print .direct-printing-info').length > 0 ?
$('#li_transaction_field_close .print .direct-printing-info').text() :
( LI.directPrintTitle ? LI.directPrintTitle : "Direct Print" );
$('#li_transaction_museum .print, #li_transaction_manifestations .print')
.each(function () {
$(this)
.append($('<input type="hidden" />').prop('name', 'direct').val(JSON.stringify(usbParams)))
.prop('title', dp_title);
})
.attr('onsubmit', null)
.unbind('submit')
.submit(function () {
// *T* here we are when the print form is submitted
connector.log('info', 'Submitting direct print form...');
LI.printTickets(this, false, directPrint);
return false;
});
// Partial print
$('form.print.partial-print')
.append($('<input type="hidden" />').prop('name', 'direct').val(JSON.stringify(usbParams)))
.prop('title', dp_title)
.unbind('submit')
.submit(directPrint);
};
function directPrint(event) {
var form = event.target;
if (LI.activeDirectPrinter === undefined) {
LI.alert('Direct printer is undefined', 'error');
return false;
}
$.ajax({
method: $(form).prop('method'),
url: $(form).prop('action'),
data: $(form).serialize(),
error: function (error) {
console.error('directPrint ajax error', error);
},
success: function (data) {
// *T* here we are when we have got the base64 data representing tickets ready to be printed
if (!data) {
connector.log('info', 'Empty data, nothing to send');
return;
}
// send data to the printer through the connector then reads the printer answer
connector.log('info', 'Sending data...');
connector.log('info', data);
var Printer = LIPrinter(LI.activeDirectPrinter, connector);
if (!Printer) {
LI.alert('Direct printer not configured', 'error');
return;
}
var logData = {
transaction_id: LI.transactionId,
duplicate: $(form).find('[name=duplicate]').prop('checked'),
printer: JSON.stringify(LI.activeDirectPrinter)
};
Printer.print(data).then(function(res){
connector.log('info', 'Print OK', res);
LI.alert('Print OK');
logData.error = false;
logData.status = res.statuses.join(' | ');
logData.raw_status = res.raw_status;
}).catch(function (err) {
connector.log('error', 'Print error:', err);
for ( var i in err.statuses ) LI.alert(err.statuses[i], 'error');
logData.error = true;
logData.status = err.statuses.join(' | ');
logData.raw_status = err.raw_status;
}).then(function(){
// log direct print result in DB
$.ajax({
type: "GET",
url: LI.directPrintLogUrl,
data: {directPrint: logData},
dataType: 'json',
success: function () {
connector.log('info', 'directPrintLog success', LI.closePrintWindow);
typeof LI.closePrintWindow === "function" && LI.closePrintWindow();<|fim▁hole|> error: function (err) {
console.error(err);
}
});
});
}
});
return false;
};
// ELECTRONIC PAYMENT TERMINAL ******************
// configure the form for EPT handling (if there is an available EPT)
function configureEPT() {
if ( LI.activeEPT === undefined )
return;
$('#li_transaction_field_payment_new button[data-ept=1]').click(startEPT);
$('.cancel-ept-transaction').click(cancelEPT);
}
// toggle between the payment form and the EPT screen
function toggleEPTtransaction()
{
$('#li_transaction_field_payment_new form').toggle();
$('#li_transaction_field_simplified .payments button[name="simplified[payment_method_id]"], #li_transaction_field_simplified .payments input').toggle()
$('#ept-transaction').toggle();
$('#ept-transaction-simplified').toggle();
}
function getCentsAmount(value) {
value = value + '';
var amount = value.replace(",", ".").trim();
if( /^(\-|\+)?[0-9]+(\.[0-9]+)?$/.test(amount) )
return Math.round(amount * 100);
return 'Not a number';
};
// Initiate a transaction with the EPT
function startEPT(event) {
var EPT = LIEPT(LI.activeEPT, connector);
if ( !EPT )
return true; // submits the payment form
var value = $('#transaction_payment_new_value').val().trim();
if ( value === '' )
value = LI.parseFloat($('#li_transaction_field_payments_list tfoot .change .sf_admin_list_td_list_value.pit').html());
var amount = getCentsAmount(value);
if ( isNaN(amount) || amount <= 0 ) {
alert('Wrong amount'); // TODO: translate this
return false;
}
var transaction_id = $("#transaction_close_id").val().trim();
if ( ! transaction_id ) {
alert('Transaction ID not found'); // TODO: translate this
return false;
}
// replace new payment form by EPT transaction message
toggleEPTtransaction();
// Find out if we need to wait for the EPT transaction end
var wait = ( LI.ept_wait_transaction_end !== undefined ) ? LI.ept_wait_transaction_end : false;
// Send the amount to the EPT
EPT.sendAmount(amount, {wait: wait}).then(function(res){
connector.log('info', res);
var errorMessage = $('.js-i18n[data-source="ept_failure"]').data('target');
if ( res.status === 'accepted' || res.status === 'handled')
$('#li_transaction_field_payment_new form').submit();
// TODO: check integrity (pos, amount, currency) and read priv and rep fields
else {
alert(errorMessage);
}
toggleEPTtransaction();
}).catch(function(err){
connector.log('error', err);
alert(errorMessage);
toggleEPTtransaction();
});
// prevent new payment form submit
return false;
}
function cancelEPT() {
// replace EPT transaction message by new payment form
toggleEPTtransaction();
}
});<|fim▁end|> | }, |
<|file_name|>util.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
from __future__ import absolute_import
import inspect
import itertools
import random
import warnings
import numpy as np
from .gd import GradientDescent
from .bfgs import Lbfgs
from .cg import NonlinearConjugateGradient
from .rprop import Rprop
from .rmsprop import RmsProp
from .adadelta import Adadelta
from .adam import Adam
from radagrad import Radagrad
from adagrad import Adagrad
from adagrad_full import AdagradFull
try:
from sklearn.grid_search import ParameterSampler
except ImportError:
pass
def is_garray(cand):
return hasattr(cand, 'as_numpy_array')
def is_array(cand):
return is_garray(cand) or isinstance(cand, np.ndarray)
def clear_info(info):
"""Clean up contents of info dictionary for better use.
Keys to be removed are ``args``, ``kwargs`` and any non-scalar numpy or
gnumpy arrays. Numpy scalars are converted to floats.
Examples
--------
>>> import numpy as np
>>> info = {'args': None, 'foo': np.zeros(3), 'bar': np.array(1),
... 'loss': 1.}
>>> cleared = clear_info(info)
>>> cleared == {'bar': 1.0, 'loss': 1.0}
True
"""
items = info.iteritems()
items = ((k, float(v.reshape((1,))[0]) if is_array(v) and v.size == 1 else v)
for k, v in items)
items = ((k, v) for k, v in items if not is_array(v))
items = ((k, v) for k, v in items if k not in ('args', 'kwargs'))
return dict(items)
def coroutine(f):
"""Turn a generator function into a coroutine by calling .next() once."""
def started(*args, **kwargs):
cr = f(*args, **kwargs)
next(cr)
return cr
return started
def aslist(item):
if not isinstance(item, (list, tuple)):
item = [item]
return item
def mini_slices(n_samples, batch_size):
"""Yield slices of size `batch_size` that work with a container of length
`n_samples`."""
n_batches, rest = divmod(n_samples, batch_size)
if rest != 0:
n_batches += 1
return [slice(i * batch_size, (i + 1) * batch_size) for i in range(n_batches)]
def draw_mini_slices(n_samples, batch_size, with_replacement=False):
slices = mini_slices(n_samples, batch_size)
idxs = range(len(slices))
if with_replacement:
yield random.choice(slices)
else:
while True:
random.shuffle(idxs)
for i in idxs:
yield slices[i]
def draw_mini_indices(n_samples, batch_size):
assert n_samples > batch_size
idxs = range(n_samples)
random.shuffle(idxs)
pos = 0
while True:
while pos + batch_size <= n_samples:
yield idxs[pos:pos + batch_size]
pos += batch_size
batch = idxs[pos:]
needed = batch_size - len(batch)
random.shuffle(idxs)
batch += idxs[0:needed]
yield batch
pos = needed
def optimizer(identifier, wrt, *args, **kwargs):
"""Return an optimizer with the desired configuration.
This is a convenience function if one wants to try out different optimizers
but wants to change as little code as possible.
Additional arguments and keyword arguments will be passed to the constructor
of the class. If the found class does not take the arguments supplied, this
will `not` throw an error, but pass silently.
:param identifier: String identifying the optimizer to use. Can be either
``asgd``, ``gd``, ``lbfgs``, ``ncg``, ``rprop``, ``adadelta`` or
``smd``.
:param wrt: Numpy array pointing to the data to optimize.
"""
klass_map = {
'gd': GradientDescent,
'lbfgs': Lbfgs,
'ncg': NonlinearConjugateGradient,
'rprop': Rprop,
'rmsprop': RmsProp,
'adadelta': Adadelta,
'adam': Adam,
'radagrad': Radagrad,
'adagrad-full': AdagradFull,
'adagrad': Adagrad,
}
# Find out which arguments to pass on.
klass = klass_map[identifier]
argspec = inspect.getargspec(klass.__init__)
if argspec.keywords is None:
# Issue a warning for each of the arguments that have been passed
# to this optimizer but were not used.
expected_keys = set(argspec.args)
given_keys = set(kwargs.keys())
unused_keys = given_keys - expected_keys
for i in unused_keys:
warnings.warn('Argument named %s is not expected by %s'
% (i, klass))
# We need to filter stuff out.
used_keys = expected_keys & given_keys
kwargs = dict((k, kwargs[k]) for k in used_keys)
try:
opt = klass(wrt, *args, **kwargs)
except TypeError:
raise TypeError('required arguments for %s: %s' % (klass, argspec.args))
return opt
def shaped_from_flat(flat, shapes):
"""Given a one dimensional array ``flat``, return a list of views of shapes
``shapes`` on that array.
Each view will point to a distinct memory region, consecutively allocated
in flat.
Parameters
----------
flat : array_like
Array of one dimension.
shapes : list of tuples of ints
Each entry of this list specifies the shape of the corresponding view
into ``flat``.
Returns
-------
views : list of arrays
Each entry has the shape given in ``shapes`` and points as a view into
``flat``.
"""
shapes = [(i,) if isinstance(i, int) else i for i in shapes]
sizes = [np.prod(i) for i in shapes]
n_used = 0
views = []
for size, shape in zip(sizes, shapes):
this = flat[n_used:n_used + size]
n_used += size
this.shape = shape
views.append(this)
return views
def empty_with_views(shapes, empty_func=np.empty):
"""Create an array and views shaped according to ``shapes``.
The ``shapes`` parameter is a list of tuples of ints. Each tuple
represents a desired shape for an array which will be allocated in a bigger
memory region. This memory region will be represented by an array as well.
For example, the shape speciciation ``[2, (3, 2)]`` will create an array
``flat`` of size 8. The first view will have a size of ``(2,)`` and point
to the first two entries, i.e. ``flat`[:2]`, while the second array will
have a shape of ``(3, 2)`` and point to the elements ``flat[2:8]``.
Parameters
----------
spec : list of tuples of ints
Specification of the desired shapes.
empty_func : callable
function that returns a memory region given an integer of the desired
size. (Examples include ``numpy.empty``, which is the default,
``gnumpy.empty`` and ``theano.tensor.empty``.
Returns
-------
flat : array_like (depending on ``empty_func``)
Memory region containing all the views.
views : list of array_like
Variable number of results. Each contains a view into the array
``flat``.
Examples
--------
>>> from climin.util import empty_with_views
>>> flat, (w, b) = empty_with_views([(3, 2), 2])
>>> w[...] = 1
>>> b[...] = 2
>>> flat
array([ 1., 1., 1., 1., 1., 1., 2., 2.])
>>> flat[0] = 3
>>> w
array([[ 3., 1.],
[ 1., 1.],
[ 1., 1.]])
"""
shapes = [(i,) if isinstance(i, int) else i for i in shapes]
sizes = [np.prod(i) for i in shapes]
n_pars = sum(sizes)
flat = empty_func(n_pars)
views = shaped_from_flat(flat, shapes)
return flat, views
def minibatches(arr, batch_size, d=0):
"""Return a list of views of the given arr.
Each view represents a mini bach of the data.
Parameters
----------
arr : array_like
Array to obtain batches from. Needs to be slicable. If ``d > 0``, needs
to have a ``.shape`` attribute from which the number of samples can
be obtained.
batch_size : int
Size of a batch. Last batch might be smaller if ``batch_size`` is not a
divisor of ``arr``.
d : int, optional, default: 0
Dimension along which the data samples are separated and thus slicing
should be done.
Returns
-------
mini_batches : list
Each item of the list is a view of ``arr``. Views are ordered.
"""
# This alternative is to make this work with lists in the case of d == 0.
if d == 0:
n_batches, rest = divmod(len(arr), batch_size)
else:
n_batches, rest = divmod(arr.shape[d], batch_size)
if rest:
n_batches += 1
slices = (slice(i * batch_size, (i + 1) * batch_size)
for i in range(n_batches))
if d == 0:
res = [arr[i] for i in slices]
elif d == 1:
res = [arr[:, i] for i in slices]
elif d == 2:
res = [arr[:, :, i] for i in slices]
return res
def iter_minibatches(lst, batch_size, dims, n_cycles=False, random_state=None):
"""Return an iterator that successively yields tuples containing aligned
minibatches of size `batch_size` from slicable objects given in `lst`, in
random order without replacement.
Because different containers might require slicing over different
dimensions, the dimension of each container has to be givens as a list
`dims`.
Parameters
----------
lst : list of array_like
Each item of the list will be sliced into mini batches in alignemnt with
the others.
batch_size : int
Size of each batch. Last batch might be smaller.
dims : list
Aligned with ``lst``, gives the dimension along which the data samples
are separated.
n_cycles : int or False, optional [default: False]
Number of cycles after which to stop the iterator. If ``False``, will
yield forever.
random_state : a numpy.random.RandomState object, optional [default : None]
Random number generator that will act as a seed for the minibatch order
Returns
-------
batches : iterator
Infinite iterator of mini batches in random order (without
replacement).
"""
batches = [minibatches(i, batch_size, d) for i, d in zip(lst, dims)]
if len(batches) > 1:
if any(len(i) != len(batches[0]) for i in batches[1:]):
raise ValueError("containers to be batched have different lengths")
counter = itertools.count()
if random_state is not None:
random.seed(random_state.normal())
while True:
indices = [i for i, _ in enumerate(batches[0])]
while True:
random.shuffle(indices)
for i in indices:
yield tuple(b[i] for b in batches)
count = next(counter)
if n_cycles and count >= n_cycles:
raise StopIteration()
class OptimizerDistribution(object):
"""OptimizerDistribution class.
Can be used for specifying optimizers in scikit-learn's randomized parameter
search.
Attributes<|fim▁hole|> Maps an optimizer key to a grid to sample from.
"""
def __init__(self, **options):
"""Create an OptimizerDistribution object.
Parameters
----------
options : dict
Maps an optimizer key to a grid to sample from.
"""
self.options = options
def rvs(self):
opt = random.choice(list(self.options.keys()))
grid = self.options[opt]
sample = list(ParameterSampler(grid, n_iter=1))[0]
return opt, sample<|fim▁end|> | ----------
options : dict |
<|file_name|>create_script_steps.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
#
# Copyright 2015-2020 BigML
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
import json
import os
from datetime import datetime
from .world import world, logged_wait, res_filename
from nose.tools import eq_, assert_less
from bigml.api import HTTP_CREATED
from bigml.api import HTTP_ACCEPTED
from bigml.api import FINISHED
from bigml.api import FAULTY
from bigml.api import get_status
from bigml.util import is_url
from .read_script_steps import i_get_the_script
#@step(r'the script code is "(.*)" and the value of "(.*)" is "(.*)"')
def the_script_code_and_attributes(step, source_code, param, param_value):<|fim▁hole|> (param, param_value, param, param_value)))
#@step(r'I create a whizzml script from a excerpt of code "(.*)"$')
def i_create_a_script(step, source_code):
resource = world.api.create_script(source_code,
{"project": world.project_id})
world.status = resource['code']
eq_(world.status, HTTP_CREATED)
world.location = resource['location']
world.script = resource['object']
world.scripts.append(resource['resource'])
#@step(r'I create a whizzml script from file "(.*)"$')
def i_create_a_script_from_file_or_url(step, source_code):
if not is_url(source_code):
source_code = res_filename(source_code)
resource = world.api.create_script(source_code,
{"project": world.project_id})
world.status = resource['code']
eq_(world.status, HTTP_CREATED)
world.location = resource['location']
world.script = resource['object']
world.scripts.append(resource['resource'])
#@step(r'I update the script with "(.*)", "(.*)"$')
def i_update_a_script(step, param, param_value):
resource = world.api.update_script(world.script['resource'],
{param: param_value})
world.status = resource['code']
eq_(world.status, HTTP_ACCEPTED)
world.location = resource['location']
world.script = resource['object']
#@step(r'I wait until the script status code is either (\d) or (-\d) less than (\d+)')
def wait_until_script_status_code_is(step, code1, code2, secs):
start = datetime.utcnow()
delta = int(secs) * world.delta
script_id = world.script['resource']
i_get_the_script(step, script_id)
status = get_status(world.script)
count = 0
while (status['code'] != int(code1) and
status['code'] != int(code2)):
count += 1
logged_wait(start, delta, count, "script")
assert_less((datetime.utcnow() - start).seconds, delta)
i_get_the_script(step, script_id)
status = get_status(world.script)
eq_(status['code'], int(code1))
#@step(r'I wait until the script is ready less than (\d+)')
def the_script_is_finished(step, secs):
wait_until_script_status_code_is(step, FINISHED, FAULTY, secs)<|fim▁end|> | res_param_value = world.script[param]
eq_(res_param_value, param_value,
("The script %s is %s and the expected %s is %s" % |
<|file_name|>PropertyList.tsx<|end_file_name|><|fim▁begin|>import React from 'react';
import {Button, Empty, Tooltip} from 'antd';
import PlusSquareIcon from '@ant-design/icons/PlusSquareOutlined';
import {
attributeList,
ClientConn,
ValueSubscriber,
ValueUpdate,
blankFuncDesc,
configDescs,
configList,
FunctionDesc,
PropDesc,
PropGroupDesc,
deepEqual,
mapConfigDesc,
} from '../../../src/core/editor';
import {PropertyEditor} from './PropertyEditor';
import {GroupEditor} from './GroupEditor';
import {MultiSelectComponent, MultiSelectLoader} from './MultiSelectComponent';
import {ExpandIcon, ExpandState} from '../component/Tree';
import {AddCustomPropertyMenu} from './AddCustomProperty';
import {Popup} from '../component/ClickPopup';
import {BlockWidget} from '../block/view/BlockWidget';
import {LazyUpdateComponent, LazyUpdateSubscriber} from '../component/LazyUpdateComponent';
import {OptionalPropertyList} from './OptionalPropertyList';
import {CustomPropertyReorder} from './PropertyReorder';
function descToEditor(conn: ClientConn, paths: string[], funcDesc: FunctionDesc, propDesc: PropDesc) {
return (
<PropertyEditor
key={propDesc.name}
name={propDesc.name}
paths={paths}
conn={conn}
funcDesc={funcDesc}
propDesc={propDesc}
/>
);
}
class BlockLoader extends MultiSelectLoader<PropertyList> {
isListener = new ValueSubscriber({
onUpdate: (response: ValueUpdate) => {
this.conn.watchDesc(response.cache.value, this.onDesc);
},
});
custom: (PropDesc | PropGroupDesc)[];
customListener = new ValueSubscriber({
onUpdate: (response: ValueUpdate) => {
let value = response.cache.value;
if (!Array.isArray(value)) {
value = null;
}
if (!deepEqual(value, this.custom)) {
this.custom = value;
this.parent.forceUpdate();
}
},
});
widget: string = null;
widgetListener = new ValueSubscriber({
onUpdate: (response: ValueUpdate) => {
let value = response.cache.value;
if (typeof value !== 'string') {
value = null;
}
if (this.widget !== value) {
this.widget = value;
this.parent.forceUpdate();
}
},
});
desc: FunctionDesc;
onDesc = (desc: FunctionDesc) => {
if (desc == null) {
this.desc = blankFuncDesc;
} else {
this.desc = desc;
}
this.parent.forceUpdate();
};
init() {
this.isListener.subscribe(this.conn, `${this.path}.#is`, true);
this.customListener.subscribe(this.conn, `${this.path}.#custom`, true);
this.widgetListener.subscribe(this.conn, `${this.path}.@b-widget`, true);
}
destroy() {
this.isListener.unsubscribe();
this.customListener.unsubscribe();
this.widgetListener.unsubscribe();
this.conn.unwatchDesc(this.onDesc);
}
}
function getPropDescName(prop: PropDesc | PropGroupDesc) {
if (prop.type === 'group') {
return `${prop.name}[]`;
} else if (prop.name) {
return prop.name;
}
return '@invalid';
}
function comparePropDesc(a: PropDesc | PropGroupDesc, b: PropDesc | PropGroupDesc) {
if (a.type === 'group') {
if (a.name !== b.name) return false;
if (
!(a as PropGroupDesc).properties ||
!(b as PropGroupDesc).properties ||
(a as PropGroupDesc).properties.length !== (b as PropGroupDesc).properties.length
) {
return false;
}
for (let i = 0; i < (a as PropGroupDesc).properties.length; ++i) {
if (!comparePropDesc((a as PropGroupDesc).properties[i], (b as PropGroupDesc).properties[i])) {
return false;
}
}
} else {
if (a.name !== b.name) return false;
if ((a as PropDesc).type !== (b as PropDesc).type) return false;
}
return true;
}
interface Props {
conn: ClientConn;
paths: string[];
style?: React.CSSProperties;
// minimal is used when PropertyList is shown as popup, like in the ServiceEditor
mode?: 'minimal' | 'subBlock';
}
interface State {
showConfig: boolean;
showAttribute: boolean;
showCustom: boolean;
showAddCustomPopup: boolean;
}
class PropertyDefMerger {
map: Map<string, PropDesc | PropGroupDesc> = null;
isNotEmpty() {
return this.map && this.map.size > 0;
}
add(props: (PropDesc | PropGroupDesc)[]) {
if (this.map) {
// merge with existing propMap, only show properties exist in all desc
let checkedProperties: Set<string> = new Set<string>();
for (let prop of props) {
let name = getPropDescName(prop);
if (this.map.has(name) && !comparePropDesc(this.map.get(name), prop)) {
// hide property if there is a conflict
this.map.delete(name);
} else {
checkedProperties.add(name);
}
}
for (let [name, prop] of this.map) {
if (!checkedProperties.has(name)) {
this.map.delete(name);
}
}
} else {
this.map = new Map<string, PropDesc | PropGroupDesc>();
for (let prop of props) {
let name = getPropDescName(prop);
this.map.set(name, prop);
}
}
}
render(paths: string[], conn: ClientConn, funcDesc: FunctionDesc, isCustom?: boolean) {
let children: React.ReactNode[] = [];
if (this.map) {
for (let [name, prop] of this.map) {
if (prop.type === 'group') {
children.push(
<GroupEditor
key={name}
paths={paths}
conn={conn}
isCustom={isCustom}
funcDesc={funcDesc}
groupDesc={prop as PropGroupDesc}
/>
);
} else if (prop.name) {
children.push(
<PropertyEditor
key={name}
name={name}
paths={paths}
conn={conn}
isCustom={isCustom}
funcDesc={funcDesc}
propDesc={prop as PropDesc}
reorder={isCustom ? CustomPropertyReorder : null}
/>
);
}
}
}
return children;
}
remove(name: string) {
if (this.map) {
this.map.delete(name);
}
}
}
export class PropertyList extends MultiSelectComponent<Props, State, BlockLoader> {
constructor(props: Readonly<Props>) {
super(props);
this.state = {showConfig: false, showAttribute: false, showCustom: true, showAddCustomPopup: false};
this.updateLoaders(props.paths);
}
<|fim▁hole|> return new BlockLoader(path, this);
}
onShowCustomClick = () => {
this.safeSetState({showCustom: !this.state.showCustom});
};
onShowConfigClick = () => {
this.safeSetState({showConfig: !this.state.showConfig});
};
onShowAttributeClick = () => {
this.safeSetState({showAttribute: !this.state.showAttribute});
};
onAddCustomPopup = (visible: boolean) => {
this.safeSetState({showAddCustomPopup: visible});
};
onAddCustom = (desc: PropDesc | PropGroupDesc) => {
let {conn} = this.props;
for (let [path, subscriber] of this.loaders) {
conn.addCustomProp(path, desc);
}
this.onAddCustomPopup(false);
};
renderImpl() {
let {conn, paths, style, mode} = this.props;
let {showConfig, showAttribute, showCustom, showAddCustomPopup} = this.state;
let descChecked: Set<string> = new Set<string>();
let propMerger: PropertyDefMerger = new PropertyDefMerger();
let configMerger: PropertyDefMerger = new PropertyDefMerger();
let customMerger: PropertyDefMerger = new PropertyDefMerger();
let isEmpty = true;
let optionalDescs = new Set<FunctionDesc>();
for (let [path, subscriber] of this.loaders) {
let desc = subscriber.desc;
if (desc) {
if (isEmpty) {
isEmpty = false;
}
if (optionalDescs) {
if (desc.optional) {
optionalDescs.add(desc);
} else if (desc.base && (desc = conn.watchDesc(desc.base)) /*set value and convert to bool*/) {
optionalDescs.add(desc);
} else {
// no need for optioanl properties
optionalDescs = null;
}
}
} else {
isEmpty = true;
break;
}
}
if (isEmpty) {
// nothing selected
return (
<div className="ticl-property-list" style={style}>
<Empty image={Empty.PRESENTED_IMAGE_SIMPLE} />
</div>
);
}
let baseDesc = conn.getCommonBaseFunc(optionalDescs);
for (let [path, subscriber] of this.loaders) {
if (subscriber.desc) {
if (!descChecked.has(subscriber.desc.name)) {
descChecked.add(subscriber.desc.name);
propMerger.add(subscriber.desc.properties);
}
} else {
// properties not ready
propMerger.map = null;
break;
}
}
if (mode === 'subBlock') {
propMerger.remove('#output');
}
let firstLoader: BlockLoader = this.loaders.entries().next().value[1];
let funcDesc = firstLoader.desc;
if (!funcDesc) {
funcDesc = blankFuncDesc;
}
let children = propMerger.render(paths, conn, funcDesc);
if (mode !== 'minimal') {
// merge #config properties
let configChildren: React.ReactNode[];
if (showConfig) {
for (let [path, subscriber] of this.loaders) {
if (subscriber.desc) {
configMerger.add(mapConfigDesc(subscriber.desc.configs) || configList);
} else {
// properties not ready
configMerger.map = null;
break;
}
}
if (configMerger.isNotEmpty()) {
configChildren = configMerger.render(paths, conn, funcDesc, true);
}
}
// merge #custom properties
let customChildren: React.ReactNode[];
for (let [path, subscriber] of this.loaders) {
if (subscriber.custom) {
customMerger.add(subscriber.custom);
} else {
// properties not ready
customMerger.map = null;
break;
}
}
if (customMerger.isNotEmpty() && showCustom) {
customChildren = customMerger.render(paths, conn, funcDesc, true);
}
let allowAttribute = mode == null && paths.length === 1;
let customExpand: ExpandState = 'empty';
if (customMerger.isNotEmpty()) {
customExpand = showCustom ? 'opened' : 'closed';
}
return (
<div className="ticl-property-list" style={style}>
<PropertyEditor name="#is" paths={paths} conn={conn} funcDesc={funcDesc} propDesc={configDescs['#is']} />
{children.length ? (
<div className="ticl-property-divider">
<div className="ticl-h-line" />
</div>
) : null}
{children}
{baseDesc ? <OptionalPropertyList conn={conn} paths={paths} funcDesc={baseDesc} /> : null}
<div className="ticl-property-divider">
<div className="ticl-h-line" style={{maxWidth: '16px'}} />
<ExpandIcon opened={showConfig ? 'opened' : 'closed'} onClick={this.onShowConfigClick} />
<span>config</span>
<div className="ticl-h-line" />
</div>
{configChildren}
{allowAttribute ? (
<div className="ticl-property-divider">
<div className="ticl-h-line" style={{maxWidth: '16px'}} />
<ExpandIcon opened={showAttribute ? 'opened' : 'closed'} onClick={this.onShowAttributeClick} />
<span>block</span>
<div className="ticl-h-line" />
</div>
) : null}
{allowAttribute && showAttribute ? (
<PropertyAttributeList conn={conn} paths={paths} funcDesc={funcDesc} />
) : null}
<div className="ticl-property-divider">
<div className="ticl-h-line" style={{maxWidth: '16px'}} />
<ExpandIcon opened={customExpand} onClick={this.onShowCustomClick} />
<span>custom</span>
<Popup
popupVisible={showAddCustomPopup}
onPopupVisibleChange={this.onAddCustomPopup}
popup={<AddCustomPropertyMenu conn={conn} onAddProperty={this.onAddCustom} />}
>
<Button className="ticl-icon-btn" shape="circle" tabIndex={-1} icon={<PlusSquareIcon />} />
</Popup>
<div className="ticl-h-line" />
</div>
{customChildren}
</div>
);
} else {
// minimal block used by Service Editor
return (
<div className="ticl-property-list" style={style}>
<PropertyEditor
name="#is"
paths={paths}
conn={conn}
funcDesc={funcDesc}
propDesc={configDescs['#is(readonly)']}
/>
<div className="ticl-property-divider">
<div className="ticl-h-line" />
</div>
{children}
</div>
);
}
}
}
interface PropertyAttributeProps {
conn: ClientConn;
paths: string[];
funcDesc: FunctionDesc;
}
class PropertyAttributeList extends LazyUpdateComponent<PropertyAttributeProps, any> {
widgetListener = new LazyUpdateSubscriber(this);
_subscribingPath: string;
updatePaths(paths: string[]) {
if (paths[0] === this._subscribingPath) {
return;
}
this._subscribingPath = paths[0];
const {conn} = this.props;
this.widgetListener.subscribe(conn, `${this._subscribingPath}.@b-widget`);
}
constructor(props: PropertyAttributeProps) {
super(props);
this.updatePaths(props.paths);
}
renderImpl() {
let {conn, paths, funcDesc} = this.props;
this.updatePaths(paths);
let attributeChildren = [];
for (let attributeDesc of attributeList) {
attributeChildren.push(descToEditor(conn, paths, funcDesc, attributeDesc));
}
attributeChildren.push(descToEditor(conn, paths, funcDesc, BlockWidget.widgetDesc));
let widget = BlockWidget.get(this.widgetListener.value);
if (widget) {
for (let propDesc of widget.viewProperties) {
attributeChildren.push(descToEditor(conn, paths, funcDesc, propDesc as PropDesc));
}
}
return attributeChildren;
}
componentWillUnmount() {
this.widgetListener.unsubscribe();
super.componentWillUnmount();
}
}<|fim▁end|> | createLoader(path: string) { |
<|file_name|>source_transformer.py<|end_file_name|><|fim▁begin|>import command_line
import ast
import os
import traceback
from operator import attrgetter
import settings
from execution_tree_builder import build_execution_tree
# TODO move classes to separate files
class DataCollectorCall(object):
def __init__(self, var_name="", indentation=0, line_position=0, need_stacktrace=False, is_return_operator=False):
self.collected_variable = var_name
self.indentation = indentation
self.line_position = line_position
self.need_stacktrace = need_stacktrace
self.is_return_operator = is_return_operator
class SourceCodeInfo(object):
def __init__(self, function_calls=[], function_declarations=[], statements=[], source_code_string="", return_calls=[]):
self.function_calls = function_calls
self.function_declarations = function_declarations
self.statements = statements
self.source_code_string = source_code_string
self.return_calls = return_calls
class FunctionCall(object):
def __init__(self, func_name="", arguments=[], line_position=0, indentation=0, parent_function=""):
self.func_name = func_name
self.arguments = arguments
self.line_position = line_position
self.indentation = indentation
self.parent_function = parent_function
def __str__(self):
return self.func_name + " " + str(self.line_position)
class FunctionDeclaration(object):
def __init__(self, func_name="", arguments=[], start_position=0, end_position=0):
self.name = func_name
self.arguments = arguments
self.start_position = start_position
self.end_position = end_position
def __str__(self):
return self.name + " " + str(self.arguments) + " " + str(self.start_position) + " " + str(self.end_position)
class ReturnCall(object):
def __init__(self, line_position = 0, indentation = 0):
self.line_position = line_position
self.indentation = indentation
<|fim▁hole|> def __init__(self, destination_var_name="", subscript_key="", line_position=0, indentation=0):
self.destination_var_name = destination_var_name
self.subscript_key = subscript_key
self.line_position = line_position
self.indentation = indentation
def __str__(self):
return self.destination_var_name + " " + str(self.line_position)
class VariableAsFunction(object):
def __init__(self):
self.var_name = ""
self.type = "" # ???
self.dependencies = ()
### Sample of the modification of existing source code
### http://stackoverflow.com/questions/768634/parse-a-py-file-read-the-ast-modify-it-then-write-back-the-modified-source-c
# TODO move process functions to separate file
def process_assign_node(node):
statement = Statement()
if isinstance(node, ast.AugAssign):
statement.destination_var_name = node.target.id
statement.line_position = node.target.lineno
statement.indentation = node.target.col_offset
else:
for target in node.targets:
if isinstance(target, ast.Name):
statement.destination_var_name = target.id
statement.line_position = target.lineno
statement.indentation = target.col_offset
elif isinstance(target, ast.Subscript):
statement.destination_var_name = target.value.id
statement.subscript_key = target.slice.value.id
statement.line_position = target.lineno
statement.indentation = target.col_offset
if isinstance(node.value, ast.List):
for list_item in node.value.elts:
if is_value_type(list_item):
print "Value type"
else:
print "Other type"
elif isinstance(node.value, ast.BinOp):
print "Binary operation"
process_operand(node.value.left)
process_operand(node.value.right)
elif isinstance(node.value, ast.Subscript):
print "Subscript assign "
elif is_value_type(node.value):
print ""
else:
print "Unhandled assign type"
return statement
def process_operand(operand):
if isinstance(operand, ast.Num):
print "Operand is a number."
elif isinstance(operand, ast.Call):
print "Operand is function call."
else:
print "Unhandled operand's processing."
def is_value_type(item):
# TODO: extend with
return isinstance(item, ast.Num)
def process_return_call_node(node):
return_call = ReturnCall(line_position=node.lineno, indentation=node.col_offset)
return return_call
def process_func_call_node(node):
function_call = FunctionCall()
items = []
for arg in node.args:
# ast.Name
if isinstance(arg, ast.Name):
items.append(arg.id)
function_call.func_name = node.func.id
function_call.arguments = items
function_call.line_position = node.lineno
function_call.indentation = node.col_offset
return function_call
def process_func_declaration_node(node):
declaration = FunctionDeclaration()
function_args = []
for arg in node.args.args:
# ast.Name
function_args.append(arg.id)
declaration.name = node.name
declaration.args = function_args
declaration.start_position = node.lineno
for element in node.body:
if element.lineno > declaration.end_position:
declaration.end_position = element.lineno
return declaration
def put_data_collector(variable, line_position):
print variable + " " + str(line_position)
def generate_indentation(size):
return " " * size
def build_data_collectors(source_code_info):
"""
Build structures with arguments for generating file write capturing calls
"""
file_write_calls = []
for statement in source_code_info.statements:
line_position = statement.line_position + 1
data_collector = DataCollectorCall(statement.destination_var_name, statement.indentation, line_position)
file_write_calls.append(data_collector)
for function_declaration in source_code_info.function_declarations:
for argument in function_declaration.args:
line_position = function_declaration.start_position + 1
data_collector = DataCollectorCall(argument, settings.DEFAULT_INDENT_SIZE, line_position, True)
file_write_calls.append(data_collector)
for return_call in source_code_info.return_calls:
data_collector = DataCollectorCall(indentation=return_call.indentation,
line_position=return_call.line_position - 1,
is_return_operator=True,
need_stacktrace=True)
file_write_calls.append(data_collector)
file_write_calls.sort(key=attrgetter('line_position'))
return file_write_calls
def generate_data_collector_call(data_collector_call, descriptor_name):
result_write_call = ""
indentation = generate_indentation(data_collector_call.indentation)
if data_collector_call.need_stacktrace:
stacktrace_type = settings.META_MARK_FUNC_CALL_STACKTRACE
if data_collector_call.is_return_operator == True:
stacktrace_type = settings.META_MARK_RETURN_STACKTRACE
file_write_call_string = "{}.write(\"{}\" + str(traceback.extract_stack()) + \"\\n\")\n".format(descriptor_name,
stacktrace_type)
stacktrace_snapshot_call = indentation + file_write_call_string
result_write_call += stacktrace_snapshot_call
if data_collector_call.is_return_operator == False:
var_name = data_collector_call.collected_variable
file_write_call_string = "{}.write(\"{} \" + \"{} = \" + str({}) + \"\\n\")\n".format(descriptor_name,
settings.META_MARK_VARCHANGE,
var_name, var_name)
var_change_write_call = indentation + file_write_call_string
result_write_call += var_change_write_call
return result_write_call
def apply_data_collectors(source_code_info):
data_collectors_info = build_data_collectors(source_code_info)
result_code = settings.FILE_DESCRIPTOR_NAME + " = open(\"" + settings.COLLECTED_DATA_FILE + "\", \"w\")\n"
line_counter = 1
code_lines = source_code_info.source_code_string.split("\n")
if len(data_collectors_info) > 0:
current_data_collector = data_collectors_info[0]
data_collectors_info.remove(current_data_collector)
for code_line in code_lines:
while current_data_collector is not None and current_data_collector.line_position == line_counter:
result_code += "\n" + generate_data_collector_call(current_data_collector, settings.FILE_DESCRIPTOR_NAME)
current_data_collector = None
if len(data_collectors_info) > 0:
current_data_collector = data_collectors_info[0]
data_collectors_info.remove(current_data_collector)
result_code = result_code + "\n" + code_line
line_counter += 1
result_code = "{}\n{}{}".format(result_code, settings.FILE_DESCRIPTOR_NAME, ".close()")
return result_code<|fim▁end|> |
class Statement(object): |
<|file_name|>translate_test.py<|end_file_name|><|fim▁begin|>"""Translate generators test."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import shutil
import tarfile
import tensorflow as tf
from data_generators import text_problems
from data_generators import translate
class TranslateTest(tf.test.TestCase):
DATASETS = [
["data1.tgz", ("train1.en", "train1.de")],
["data2.tgz", ("train2.en", "train2.de")],
["data3.tgz", ("train3.en", "train3.de")],
]
@classmethod
def setUpClass(cls):
tmp_dir = tf.test.get_temp_dir()
compressed_dir = os.path.join(tmp_dir, "compressed")
shutil.rmtree(tmp_dir)
tf.gfile.MakeDirs(compressed_dir)
en_data = [str(i) for i in range(10, 40)]
de_data = [str(i) for i in range(100, 130)]
data = list(zip(en_data, de_data))<|fim▁hole|> os.path.join(compressed_dir, name) for name in dataset[1]
]
with tf.gfile.Open(en_file, "w") as en_f:
with tf.gfile.Open(de_file, "w") as de_f:
start = i * 10
end = start + 10
for en_line, de_line in data[start:end]:
en_f.write(en_line)
en_f.write("\n")
de_f.write(de_line)
de_f.write("\n")
with tarfile.open(os.path.join(tmp_dir, tar_file), "w:gz") as tar_f:
tar_f.add(en_file, os.path.basename(en_file))
tar_f.add(de_file, os.path.basename(de_file))
cls.tmp_dir = tmp_dir
cls.data = data
def testCompileData(self):
filename = "out"
filepath = os.path.join(self.tmp_dir, filename)
translate.compile_data(self.tmp_dir, self.DATASETS, filename)
count = 0
for i, example in enumerate(
text_problems.text2text_txt_iterator(filepath + ".lang1",
filepath + ".lang2")):
expected = self.data[i]
self.assertEqual(list(expected), [example["inputs"], example["targets"]])
count += 1
self.assertEqual(count, len(self.data))
if __name__ == "__main__":
tf.test.main()<|fim▁end|> |
for i, dataset in enumerate(cls.DATASETS):
tar_file = dataset[0]
en_file, de_file = [ |
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|><|fim▁hole|>"""Tests for Airly."""<|fim▁end|> | |
<|file_name|>ui_pxmarkerdialog.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'pxmarkerdialog.ui'
#
# Created by: PyQt5 UI code generator 5.8.1
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_PxMarkerDialog(object):
def setupUi(self, PxMarkerDialog):
PxMarkerDialog.setObjectName("PxMarkerDialog")
PxMarkerDialog.resize(400, 300)
self.btn_confirm_box = QtWidgets.QDialogButtonBox(PxMarkerDialog)
self.btn_confirm_box.setGeometry(QtCore.QRect(290, 20, 81, 241))
self.btn_confirm_box.setOrientation(QtCore.Qt.Vertical)
self.btn_confirm_box.setStandardButtons(QtWidgets.QDialogButtonBox.Cancel|QtWidgets.QDialogButtonBox.Ok)
self.btn_confirm_box.setObjectName("btn_confirm_box")
self.pxmarker_table_widget = QtWidgets.QTableWidget(PxMarkerDialog)
self.pxmarker_table_widget.setGeometry(QtCore.QRect(10, 20, 271, 261))<|fim▁hole|> self.pxmarker_table_widget.setRowCount(0)
item = QtWidgets.QTableWidgetItem()
self.pxmarker_table_widget.setHorizontalHeaderItem(0, item)
item = QtWidgets.QTableWidgetItem()
self.pxmarker_table_widget.setHorizontalHeaderItem(1, item)
item = QtWidgets.QTableWidgetItem()
self.pxmarker_table_widget.setHorizontalHeaderItem(2, item)
self.pxmarker_table_widget.horizontalHeader().setDefaultSectionSize(50)
self.pxmarker_table_widget.horizontalHeader().setMinimumSectionSize(40)
self.pxmarker_table_widget.horizontalHeader().setStretchLastSection(True)
self.pxmarker_table_widget.verticalHeader().setVisible(False)
self.pxmarker_table_widget.verticalHeader().setHighlightSections(False)
self.retranslateUi(PxMarkerDialog)
self.btn_confirm_box.accepted.connect(PxMarkerDialog.accept)
self.btn_confirm_box.rejected.connect(PxMarkerDialog.reject)
QtCore.QMetaObject.connectSlotsByName(PxMarkerDialog)
def retranslateUi(self, PxMarkerDialog):
_translate = QtCore.QCoreApplication.translate
PxMarkerDialog.setWindowTitle(_translate("PxMarkerDialog", "Edit Pixel Markers"))
item = self.pxmarker_table_widget.horizontalHeaderItem(0)
item.setText(_translate("PxMarkerDialog", "Class"))
item = self.pxmarker_table_widget.horizontalHeaderItem(1)
item.setText(_translate("PxMarkerDialog", "Color"))
item = self.pxmarker_table_widget.horizontalHeaderItem(2)
item.setText(_translate("PxMarkerDialog", "Feature"))<|fim▁end|> | self.pxmarker_table_widget.setEditTriggers(QtWidgets.QAbstractItemView.DoubleClicked|QtWidgets.QAbstractItemView.EditKeyPressed)
self.pxmarker_table_widget.setObjectName("pxmarker_table_widget")
self.pxmarker_table_widget.setColumnCount(3) |
<|file_name|>SinusGenerator.cpp<|end_file_name|><|fim▁begin|>/*
* Project Maudio
* Copyright (C) 2015 Martin Schwarz
* See LICENSE.txt for the full license
*/
#include "core/audiosource/SinusGenerator.hpp"
#include "core/util/AudioException.hpp"
#include "core/util/Util.hpp"
#include <cmath>
namespace maudio{
SinusGenerator::SinusGenerator(){
mAudioInfo.mFileInfo.Title = "Sinus Test Generator from Maudio";
mFreq.reset(new KeyableFloatProperty("Frequency", 1000));
mProperties.add(mFreq);
mSamplerate.reset(new UIntProperty("Samplerate", 44100));
mProperties.add(mSamplerate);<|fim▁hole|> mAudioInfo.Channels = mChannels->get();
mAudioInfo.Offset = 0;
mAudioInfo.Samplerate = mSamplerate->get();
mAudioInfo.Samples = -1;
}
SinusGenerator::~SinusGenerator(){
}
AudioBuffer SinusGenerator::get(unsigned long pos, unsigned int length) noexcept{
mAudioInfo.Channels = mChannels->get();
mAudioInfo.Samplerate = mSamplerate->get();
AudioBuffer ret(mAudioInfo.Channels, length, pos, mAudioInfo.Samplerate);
for(unsigned int i = 0; i < length; i++){
Sample tmp(mAudioInfo.Channels);
float index = pos + i;
for(unsigned int j = 0; j < mAudioInfo.Channels; j++){
tmp.set(sin(mFreq->get(PositionToSeconds((pos + i), mAudioInfo.Samplerate))
* index * (2 * M_PI) / mAudioInfo.Samplerate), j);
}
ret.set(tmp, i);
}
return ret;
}
AudioInfo SinusGenerator::getInfo() noexcept{
mAudioInfo.Channels = mChannels->get();
mAudioInfo.Samplerate = mSamplerate->get();
return mAudioInfo;
}
bool SinusGenerator::checkIfCompatible(std::shared_ptr<Node> node, int slot){
return true;
}
void SinusGenerator::readConfig(const Config &conf){
return;
}
void SinusGenerator::setFrequency(float freq){
mFreq->setKey(freq, 0);
}
void SinusGenerator::setSamplerate(unsigned int samplerate){
mSamplerate->set(samplerate);
}
void SinusGenerator::setChannels(unsigned int channels){
mChannels->set(channels);
}
} // maudio<|fim▁end|> | mChannels.reset(new UIntProperty("Channels", 1));
mProperties.add(mChannels);
|
<|file_name|>partial_guild.rs<|end_file_name|><|fim▁begin|>use super::super::utils::{deserialize_emojis, deserialize_roles};
use ::model::*;
#[cfg(feature="model")]
use ::builder::{EditGuild, EditMember, EditRole};
/// Partial information about a [`Guild`]. This does not include information
/// like member data.
///
/// [`Guild`]: struct.Guild.html
#[derive(Clone, Debug, Deserialize)]
pub struct PartialGuild {
pub id: GuildId,
pub afk_channel_id: Option<ChannelId>,
pub afk_timeout: u64,
pub default_message_notifications: u64,
pub embed_channel_id: Option<ChannelId>,
pub embed_enabled: bool,
#[serde(deserialize_with="deserialize_emojis")]
pub emojis: HashMap<EmojiId, Emoji>,
pub features: Vec<Feature>,
pub icon: Option<String>,
pub mfa_level: u64,
pub name: String,
pub owner_id: UserId,
pub region: String,
#[serde(deserialize_with="deserialize_roles")]
pub roles: HashMap<RoleId, Role>,
pub splash: Option<String>,
pub verification_level: VerificationLevel,
}
#[cfg(feature="model")]
impl PartialGuild {
/// Ban a [`User`] from the guild. All messages by the
/// user within the last given number of days given will be deleted. This
/// may be a range between `0` and `7`.
///
/// **Note**: Requires the [Ban Members] permission.
///
/// # Examples
///
/// Ban a member and remove all messages they've sent in the last 4 days:
///
/// ```rust,ignore
/// // assumes a `user` and `guild` have already been bound
/// let _ = guild.ban(user, 4);
/// ```
///
/// # Errors
///
/// Returns a [`ModelError::DeleteMessageDaysAmount`] if the number of
/// days' worth of messages to delete is over the maximum.
///
/// [`ModelError::DeleteMessageDaysAmount`]: enum.ModelError.html#variant.DeleteMessageDaysAmount
/// [`User`]: struct.User.html
/// [Ban Members]: permissions/constant.BAN_MEMBERS.html
pub fn ban<U: Into<UserId>>(&self, user: U, delete_message_days: u8) -> Result<()> {
if delete_message_days > 7 {
return Err(Error::Model(ModelError::DeleteMessageDaysAmount(delete_message_days)));
}
self.id.ban(user, delete_message_days)
}
/// Gets a list of the guild's bans.
///
/// Requires the [Ban Members] permission.
///
/// [Ban Members]: permissions/constant.BAN_MEMBERS.html
#[inline]
pub fn bans(&self) -> Result<Vec<Ban>> {
self.id.bans()
}
/// Gets all of the guild's channels over the REST API.
///
/// [`Guild`]: struct.Guild.html
#[inline]
pub fn channels(&self) -> Result<HashMap<ChannelId, GuildChannel>> {
self.id.channels()
}
/// Creates a [`GuildChannel`] in the guild.
///
/// Refer to [`http::create_channel`] for more information.
///
/// Requires the [Manage Channels] permission.
///
/// # Examples
///
/// Create a voice channel in a guild with the name `test`:
///
/// ```rust,ignore
/// use serenity::model::ChannelType;
///
/// guild.create_channel("test", ChannelType::Voice);
/// ```
///
/// [`GuildChannel`]: struct.GuildChannel.html
/// [`http::create_channel`]: ../http/fn.create_channel.html
/// [Manage Channels]: permissions/constant.MANAGE_CHANNELS.html
#[inline]
pub fn create_channel(&self, name: &str, kind: ChannelType) -> Result<GuildChannel> {
self.id.create_channel(name, kind)
}
/// Creates an emoji in the guild with a name and base64-encoded image.
///
/// Refer to the documentation for [`Guild::create_emoji`] for more
/// information.
///
/// Requires the [Manage Emojis] permission.
///
/// # Examples
///
/// See the [`EditProfile::avatar`] example for an in-depth example as to
/// how to read an image from the filesystem and encode it as base64. Most
/// of the example can be applied similarly for this method.
///
/// [`EditProfile::avatar`]: ../builder/struct.EditProfile.html#method.avatar
/// [`Guild::create_emoji`]: struct.Guild.html#method.create_emoji
/// [`utils::read_image`]: ../utils/fn.read_image.html
/// [Manage Emojis]: permissions/constant.MANAGE_EMOJIS.html
#[inline]
pub fn create_emoji(&self, name: &str, image: &str) -> Result<Emoji> {
self.id.create_emoji(name, image)
}
/// Creates an integration for the guild.
///
/// Requires the [Manage Guild] permission.
///
/// [Manage Guild]: permissions/constant.MANAGE_GUILD.html
#[inline]
pub fn create_integration<I>(&self, integration_id: I, kind: &str) -> Result<()>
where I: Into<IntegrationId> {
self.id.create_integration(integration_id, kind)
}
/// Creates a new role in the guild with the data set, if any.
///
/// See the documentation for [`Guild::create_role`] on how to use this.
///
/// **Note**: Requires the [Manage Roles] permission.
///
/// # Errors
///
/// If the `cache` is enabled, returns a [`ModelError::InvalidPermissions`]
/// if the current user does not have permission to perform bans.
///
/// [`ModelError::InvalidPermissions`]: enum.ModelError.html#variant.InvalidPermissions
/// [`Guild::create_role`]: struct.Guild.html#method.create_role
/// [Manage Roles]: permissions/constant.MANAGE_ROLES.html
#[inline]
pub fn create_role<F: FnOnce(EditRole) -> EditRole>(&self, f: F) -> Result<Role> {
self.id.create_role(f)
}
/// Deletes the current guild if the current user is the owner of the
/// guild.
///
/// **Note**: Requires the current user to be the owner of the guild.
#[inline]
pub fn delete(&self) -> Result<PartialGuild> {
self.id.delete()
}
/// Deletes an [`Emoji`] from the guild.
///
/// Requires the [Manage Emojis] permission.
///
/// [`Emoji`]: struct.Emoji.html
/// [Manage Emojis]: permissions/constant.MANAGE_EMOJIS.html
#[inline]
pub fn delete_emoji<E: Into<EmojiId>>(&self, emoji_id: E) -> Result<()> {
self.id.delete_emoji(emoji_id)
}
/// Deletes an integration by Id from the guild.
///
/// Requires the [Manage Guild] permission.
///
/// [Manage Guild]: permissions/constant.MANAGE_GUILD.html
#[inline]
pub fn delete_integration<I: Into<IntegrationId>>(&self, integration_id: I) -> Result<()> {
self.id.delete_integration(integration_id)
}
/// Deletes a [`Role`] by Id from the guild.
///
/// Also see [`Role::delete`] if you have the `cache` and `methods` features
/// enabled.
///
/// Requires the [Manage Roles] permission.
///
/// [`Role`]: struct.Role.html
/// [`Role::delete`]: struct.Role.html#method.delete
/// [Manage Roles]: permissions/constant.MANAGE_ROLES.html
#[inline]
pub fn delete_role<R: Into<RoleId>>(&self, role_id: R) -> Result<()> {
self.id.delete_role(role_id)
}
/// Edits the current guild with new data where specified.
///
/// **Note**: Requires the current user to have the [Manage Guild]
/// permission.
///
/// [`Context::edit_guild`]: ../client/struct.Context.html#method.edit_guild
/// [Manage Guild]: permissions/constant.MANAGE_GUILD.html
pub fn edit<F>(&mut self, f: F) -> Result<()>
where F: FnOnce(EditGuild) -> EditGuild {
match self.id.edit(f) {
Ok(guild) => {
self.afk_channel_id = guild.afk_channel_id;
self.afk_timeout = guild.afk_timeout;
self.default_message_notifications = guild.default_message_notifications;
self.emojis = guild.emojis;
self.features = guild.features;
self.icon = guild.icon;
self.mfa_level = guild.mfa_level;
self.name = guild.name;
self.owner_id = guild.owner_id;
self.region = guild.region;
self.roles = guild.roles;
self.splash = guild.splash;
self.verification_level = guild.verification_level;
Ok(())
},
Err(why) => Err(why),
}
}
/// Edits an [`Emoji`]'s name in the guild.
///
/// Also see [`Emoji::edit`] if you have the `cache` and `methods` features
/// enabled.
///
/// Requires the [Manage Emojis] permission.
///
/// [`Emoji`]: struct.Emoji.html
/// [`Emoji::edit`]: struct.Emoji.html#method.edit
/// [Manage Emojis]: permissions/constant.MANAGE_EMOJIS.html
#[inline]
pub fn edit_emoji<E: Into<EmojiId>>(&self, emoji_id: E, name: &str) -> Result<Emoji> {
self.id.edit_emoji(emoji_id, name)
}
/// Edits the properties of member of the guild, such as muting or
/// nicknaming them.
///
/// Refer to `EditMember`'s documentation for a full list of methods and
/// permission restrictions.
///
/// # Examples
///
/// Mute a member and set their roles to just one role with a predefined Id:
///
/// ```rust,ignore
/// use serenity::model::GuildId;
///
/// GuildId(7).edit_member(user_id, |m| m.mute(true).roles(&vec![role_id]));
/// ```
#[inline]
pub fn edit_member<F, U>(&self, user_id: U, f: F) -> Result<()>
where F: FnOnce(EditMember) -> EditMember, U: Into<UserId> {
self.id.edit_member(user_id, f)
}
/// Edits the current user's nickname for the guild.
///
/// Pass `None` to reset the nickname.
///
/// **Note**: Requires the [Change Nickname] permission.
///
/// # Errors
///
/// If the `cache` is enabled, returns a [`ModelError::InvalidPermissions`]
/// if the current user does not have permission to change their own
/// nickname.
///
/// [`ModelError::InvalidPermissions`]: enum.ModelError.html#variant.InvalidPermissions
/// [Change Nickname]: permissions/constant.CHANGE_NICKNAME.html
#[inline]
pub fn edit_nickname(&self, new_nickname: Option<&str>) -> Result<()> {
self.id.edit_nickname(new_nickname)
}
/// Gets an emoji in the guild by Id.
///
/// Requires the [Manage Emojis] permission.
///
/// [Manage Emojis]: permissions/constant.MANAGE_EMOJIS.html
#[inline]
pub fn emoji<E: Into<EmojiId>>(&self, emoji_id: E) -> Result<Emoji> {
self.id.emoji(emoji_id)
}
/// Gets a list of all of the guild's emojis.
///
/// Requires the [Manage Emojis] permission.
///
/// [Manage Emojis]: permissions/constant.MANAGE_EMOJIS.html
#[inline]
pub fn emojis(&self) -> Result<Vec<Emoji>> {
self.id.emojis()
}
/// Gets a partial amount of guild data by its Id.
///
/// Requires that the current user be in the guild.
#[inline]
pub fn get<G: Into<GuildId>>(guild_id: G) -> Result<PartialGuild> {
guild_id.into().get()
}
/// Kicks a [`Member`] from the guild.
///
/// Requires the [Kick Members] permission.
///
/// [`Member`]: struct.Member.html
/// [Kick Members]: permissions/constant.KICK_MEMBERS.html
#[inline]
pub fn kick<U: Into<UserId>>(&self, user_id: U) -> Result<()> {
self.id.kick(user_id)
}
/// Returns a formatted URL of the guild's icon, if the guild has an icon.
pub fn icon_url(&self) -> Option<String> {
self.icon.as_ref().map(|icon|
format!(cdn!("/icons/{}/{}.webp"), self.id, icon))
}
/// Gets all integration of the guild.
///
/// This performs a request over the REST API.
#[inline]
pub fn integrations(&self) -> Result<Vec<Integration>> {
self.id.integrations()
}
/// Gets all of the guild's invites.
///
/// Requires the [Manage Guild] permission.
///
/// [Manage Guild]: permissions/constant.MANAGE_GUILD.html
#[inline]
pub fn invites(&self) -> Result<Vec<RichInvite>> {
self.id.invites()
}
/// Leaves the guild.
#[inline]
pub fn leave(&self) -> Result<()> {
self.id.leave()
}
/// Gets a user's [`Member`] for the guild by Id.
///
/// [`Guild`]: struct.Guild.html
/// [`Member`]: struct.Member.html
pub fn member<U: Into<UserId>>(&self, user_id: U) -> Result<Member> {
self.id.member(user_id)
}
/// Gets a list of the guild's members.
///
/// Optionally pass in the `limit` to limit the number of results. Maximum
/// value is 1000. Optionally pass in `after` to offset the results by a
/// [`User`]'s Id.
///
/// [`User`]: struct.User.html
pub fn members<U>(&self, limit: Option<u64>, after: Option<U>)
-> Result<Vec<Member>> where U: Into<UserId> {
self.id.members(limit, after)
}
/// Moves a member to a specific voice channel.
///
/// Requires the [Move Members] permission.
///
/// [Move Members]: permissions/constant.MOVE_MEMBERS.html
#[inline]
pub fn move_member<C, U>(&self, user_id: U, channel_id: C) -> Result<()>
where C: Into<ChannelId>, U: Into<UserId> {
self.id.move_member(user_id, channel_id)
}
/// Gets the number of [`Member`]s that would be pruned with the given
/// number of days.
///
/// Requires the [Kick Members] permission.
///
/// [`Member`]: struct.Member.html
/// [Kick Members]: permissions/constant.KICK_MEMBERS.html
#[inline]
pub fn prune_count(&self, days: u16) -> Result<GuildPrune> {
self.id.prune_count(days)
}
/// Returns the Id of the shard associated with the guild.
///
/// When the cache is enabled this will automatically retrieve the total
/// number of shards.
///
/// **Note**: When the cache is enabled, this function unlocks the cache to
/// retrieve the total number of shards in use. If you already have the
/// total, consider using [`utils::shard_id`].
///
/// [`utils::shard_id`]: ../utils/fn.shard_id.html
#[cfg(all(feature="cache", feature="utils"))]
#[inline]
pub fn shard_id(&self) -> u64 {
self.id.shard_id()
}
<|fim▁hole|> ///
/// When the cache is not enabled, the total number of shards being used
/// will need to be passed.
///
/// # Examples
///
/// Retrieve the Id of the shard for a guild with Id `81384788765712384`,
/// using 17 shards:
///
/// ```rust,ignore
/// use serenity::utils;
///
/// // assumes a `guild` has already been bound
///
/// assert_eq!(guild.shard_id(17), 7);
/// ```
#[cfg(all(feature="utils", not(feature="cache")))]
#[inline]
pub fn shard_id(&self, shard_count: u64) -> u64 {
self.id.shard_id(shard_count)
}
/// Returns the formatted URL of the guild's splash image, if one exists.
pub fn splash_url(&self) -> Option<String> {
self.icon.as_ref().map(|icon|
format!(cdn!("/splashes/{}/{}.webp"), self.id, icon))
}
/// Starts an integration sync for the given integration Id.
///
/// Requires the [Manage Guild] permission.
///
/// [Manage Guild]: permissions/constant.MANAGE_GUILD.html
#[inline]
pub fn start_integration_sync<I: Into<IntegrationId>>(&self, integration_id: I) -> Result<()> {
self.id.start_integration_sync(integration_id)
}
/// Unbans a [`User`] from the guild.
///
/// Requires the [Ban Members] permission.
///
/// [`User`]: struct.User.html
/// [Ban Members]: permissions/constant.BAN_MEMBERS.html
#[inline]
pub fn unban<U: Into<UserId>>(&self, user_id: U) -> Result<()> {
self.id.unban(user_id)
}
/// Retrieves the guild's webhooks.
///
/// **Note**: Requires the [Manage Webhooks] permission.
///
/// [Manage Webhooks]: permissions/constant.MANAGE_WEBHOOKS.html
#[inline]
pub fn webhooks(&self) -> Result<Vec<Webhook>> {
self.id.webhooks()
}
/// Alias of [`bans`].
///
/// [`bans`]: #method.bans
#[deprecated(since="0.1.5", note="Use `bans` instead.")]
#[inline]
pub fn get_bans(&self) -> Result<Vec<Ban>> {
self.bans()
}
/// Alias of [`channels`].
///
/// [`channels`]: #method.channels
#[deprecated(since="0.1.5", note="Use `channels` instead.")]
#[inline]
pub fn get_channels(&self) -> Result<HashMap<ChannelId, GuildChannel>> {
self.channels()
}
/// Alias of [`emoji`].
///
/// [`emoji`]: #method.emoji
#[deprecated(since="0.1.5", note="Use `emoji` instead.")]
#[inline]
pub fn get_emoji<E: Into<EmojiId>>(&self, emoji_id: E) -> Result<Emoji> {
self.emoji(emoji_id)
}
/// Alias of [`emojis`].
///
/// [`emojis`]: #method.emojis
#[deprecated(since="0.1.5", note="Use `emojis` instead.")]
#[inline]
pub fn get_emojis(&self) -> Result<Vec<Emoji>> {
self.emojis()
}
/// Alias of [`integrations`].
///
/// [`integrations`]: #method.integrations
#[deprecated(since="0.1.5", note="Use `integrations` instead.")]
#[inline]
pub fn get_integrations(&self) -> Result<Vec<Integration>> {
self.integrations()
}
/// Alias of [`invites`].
///
/// [`invites`]: #method.invites
#[deprecated(since="0.1.5", note="Use `invites` instead.")]
#[inline]
pub fn get_invites(&self) -> Result<Vec<RichInvite>> {
self.invites()
}
/// Alias of [`member`].
///
/// [`member`]: #method.member
#[deprecated(since="0.1.5", note="Use `member` instead.")]
#[inline]
pub fn get_member<U: Into<UserId>>(&self, user_id: U) -> Result<Member> {
self.member(user_id)
}
/// Alias of [`members`].
///
/// [`members`]: #method.members
#[deprecated(since="0.1.5", note="Use `members` instead.")]
#[inline]
pub fn get_members<U>(&self, limit: Option<u64>, after: Option<U>)
-> Result<Vec<Member>> where U: Into<UserId> {
self.members(limit, after)
}
/// Alias of [`prune_count`].
///
/// [`prune_count`]: #method.prune_count
#[deprecated(since="0.1.5", note="Use `prune_count` instead.")]
#[inline]
pub fn get_prune_count(&self, days: u16) -> Result<GuildPrune> {
self.prune_count(days)
}
/// Alias of [`webhooks`].
///
/// [`webhooks`]: #method.webhooks
#[deprecated(since="0.1.5", note="Use `webhooks` instead.")]
#[inline]
pub fn get_webhooks(&self) -> Result<Vec<Webhook>> {
self.webhooks()
}
}<|fim▁end|> | /// Returns the Id of the shard associated with the guild.
///
/// When the cache is enabled this will automatically retrieve the total
/// number of shards. |
<|file_name|>kgapp.cpp<|end_file_name|><|fim▁begin|>/*
Greeter module for xdm
Copyright (C) 1997, 1998 Steffen Hansen <[email protected]>
Copyright (C) 2000-2003 Oswald Buddenhagen <[email protected]>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include "kdm_greet.h"
#include "kdmshutdown.h"
#include "kdmconfig.h"
#include "kgapp.h"
#include "kgreeter.h"
#ifdef XDMCP
# include "kchooser.h"
#endif
#include <kprocess.h>
#include <kcmdlineargs.h>
#include <kcrash.h>
#include <kstandarddirs.h>
#include <ksimpleconfig.h>
#include <qtimer.h>
#include <qcursor.h>
#include <qpalette.h>
#include <stdlib.h> // free(), exit()
#include <unistd.h> // alarm()
#include <X11/Xlib.h>
#include <X11/Xutil.h>
#include <X11/keysym.h>
#include <X11/cursorfont.h>
extern "C" {
static void
sigAlarm( int )
{
exit( EX_RESERVER_DPY );
}
}
GreeterApp::GreeterApp()
{
pingInterval = _isLocal ? 0 : _pingInterval;
if (pingInterval) {
struct sigaction sa;
sigemptyset( &sa.sa_mask );
sa.sa_flags = 0;
sa.sa_handler = sigAlarm;
sigaction( SIGALRM, &sa, 0 );
alarm( pingInterval * 70 ); // sic! give the "proper" pinger enough time
startTimer( pingInterval * 60000 );
}
}
void
GreeterApp::timerEvent( QTimerEvent * )
{
alarm( 0 );
if (!PingServer( qt_xdisplay() ))
::exit( EX_RESERVER_DPY );
alarm( pingInterval * 70 ); // sic! give the "proper" pinger enough time
}
bool
GreeterApp::x11EventFilter( XEvent * ev )<|fim▁hole|> KeySym sym;
switch (ev->type) {
case FocusIn:
case FocusOut:
// Hack to tell dialogs to take focus when the keyboard is grabbed
ev->xfocus.mode = NotifyNormal;
break;
case KeyPress:
sym = XLookupKeysym( &ev->xkey, 0 );
if (sym != XK_Return && !IsModifierKey( sym ))
emit activity();
break;
case ButtonPress:
emit activity();
/* fall through */
case ButtonRelease:
// Hack to let the RMB work as LMB
if (ev->xbutton.button == 3)
ev->xbutton.button = 1;
/* fall through */
case MotionNotify:
if (ev->xbutton.state & Button3Mask)
ev->xbutton.state = (ev->xbutton.state & ~Button3Mask) | Button1Mask;
break;
}
return false;
}
extern bool kde_have_kipc;
extern "C" {
static int
xIOErr( Display * )
{
exit( EX_RESERVER_DPY );
}
void
kg_main( const char *argv0 )
{
static char *argv[] = { (char *)"kdmgreet", 0 };
KCmdLineArgs::init( 1, argv, *argv, 0, 0, 0, true );
kde_have_kipc = false;
KApplication::disableAutoDcopRegistration();
KCrash::setSafer( true );
GreeterApp app;
XSetIOErrorHandler( xIOErr );
Display *dpy = qt_xdisplay();
if (!_GUIStyle.isEmpty())
app.setStyle( _GUIStyle );
_colorScheme = locate( "data", "kdisplay/color-schemes/" + _colorScheme + ".kcsrc" );
if (!_colorScheme.isEmpty()) {
KSimpleConfig config( _colorScheme, true );
config.setGroup( "Color Scheme" );
app.setPalette( app.createApplicationPalette( &config, 7 ) );
}
app.setFont( _normalFont );
setup_modifiers( dpy, _numLockStatus );
SecureDisplay( dpy );
KProcess *proc = 0;
if (!_grabServer) {
if (_useBackground) {
proc = new KProcess;
*proc << QCString( argv0, strrchr( argv0, '/' ) - argv0 + 2 ) + "krootimage";
*proc << _backgroundCfg;
proc->start();
}
GSendInt( G_SetupDpy );
GRecvInt();
}
GSendInt( G_Ready );
setCursor( dpy, app.desktop()->winId(), XC_left_ptr );
for (;;) {
int rslt, cmd = GRecvInt();
if (cmd == G_ConfShutdown) {
int how = GRecvInt(), uid = GRecvInt();
char *os = GRecvStr();
KDMSlimShutdown::externShutdown( how, os, uid );
if (os)
free( os );
GSendInt( G_Ready );
_autoLoginDelay = 0;
continue;
}
if (cmd == G_ErrorGreet) {
if (KGVerify::handleFailVerify( qApp->desktop()->screen( _greeterScreen ) ))
break;
_autoLoginDelay = 0;
cmd = G_Greet;
}
KProcess *proc2 = 0;
app.setOverrideCursor( Qt::WaitCursor );
FDialog *dialog;
#ifdef XDMCP
if (cmd == G_Choose) {
dialog = new ChooserDlg;
GSendInt( G_Ready ); /* tell chooser to go into async mode */
GRecvInt(); /* ack */
} else
#endif
{
if ((cmd != G_GreetTimed && !_autoLoginAgain) ||
_autoLoginUser.isEmpty())
_autoLoginDelay = 0;
if (_useTheme && !_theme.isEmpty()) {
KThemedGreeter *tgrt;
dialog = tgrt = new KThemedGreeter;
if (!tgrt->isOK()) {
delete tgrt;
dialog = new KStdGreeter;
}
} else
dialog = new KStdGreeter;
if (*_preloader) {
proc2 = new KProcess;
*proc2 << _preloader;
proc2->start();
}
}
app.restoreOverrideCursor();
Debug( "entering event loop\n" );
rslt = dialog->exec();
Debug( "left event loop\n" );
delete dialog;
delete proc2;
#ifdef XDMCP
switch (rslt) {
case ex_greet:
GSendInt( G_DGreet );
continue;
case ex_choose:
GSendInt( G_DChoose );
continue;
default:
break;
}
#endif
break;
}
KGVerify::done();
delete proc;
UnsecureDisplay( dpy );
restore_modifiers();
XSetInputFocus( qt_xdisplay(), PointerRoot, PointerRoot, CurrentTime );
}
} // extern "C"
#include "kgapp.moc"<|fim▁end|> | { |
<|file_name|>test_utils_dataframe.py<|end_file_name|><|fim▁begin|>import numpy as np
import pandas as pd
import pandas.util.testing as tm
import dask.dataframe as dd
from dask.dataframe.utils import (shard_df_on_index, meta_nonempty, make_meta,
raise_on_meta_error)
import pytest
def test_shard_df_on_index():
df = pd.DataFrame({'x': [1, 2, 3, 4, 5, 6], 'y': list('abdabd')},
index=[10, 20, 30, 40, 50, 60])
result = list(shard_df_on_index(df, [20, 50]))
assert list(result[0].index) == [10]
assert list(result[1].index) == [20, 30, 40]
assert list(result[2].index) == [50, 60]
def test_make_meta():
df = pd.DataFrame({'a': [1, 2, 3], 'b': list('abc'), 'c': [1., 2., 3.]},
index=[10, 20, 30])
# Pandas dataframe
meta = make_meta(df)
assert len(meta) == 0
assert (meta.dtypes == df.dtypes).all()
assert isinstance(meta.index, type(df.index))
# Pandas series
meta = make_meta(df.a)
assert len(meta) == 0
assert meta.dtype == df.a.dtype
assert isinstance(meta.index, type(df.index))
# Pandas index
meta = make_meta(df.index)<|fim▁hole|> assert len(meta) == 0
# Dask object
ddf = dd.from_pandas(df, npartitions=2)
assert make_meta(ddf) is ddf._meta
# Dict
meta = make_meta({'a': 'i8', 'b': 'O', 'c': 'f8'})
assert isinstance(meta, pd.DataFrame)
assert len(meta) == 0
assert (meta.dtypes == df.dtypes).all()
assert isinstance(meta.index, pd.RangeIndex)
# Iterable
meta = make_meta([('a', 'i8'), ('c', 'f8'), ('b', 'O')])
assert (meta.columns == ['a', 'c', 'b']).all()
assert len(meta) == 0
assert (meta.dtypes == df.dtypes[meta.dtypes.index]).all()
assert isinstance(meta.index, pd.RangeIndex)
# Tuple
meta = make_meta(('a', 'i8'))
assert isinstance(meta, pd.Series)
assert len(meta) == 0
assert meta.dtype == 'i8'
assert meta.name == 'a'
# With index
meta = make_meta({'a': 'i8', 'b': 'i4'}, pd.Int64Index([1, 2], name='foo'))
assert isinstance(meta.index, pd.Int64Index)
assert len(meta.index) == 0
meta = make_meta(('a', 'i8'), pd.Int64Index([1, 2], name='foo'))
assert isinstance(meta.index, pd.Int64Index)
assert len(meta.index) == 0
# Numpy scalar
meta = make_meta(np.float64(1.0))
assert isinstance(meta, np.float64)
# Python scalar
meta = make_meta(1.0)
assert isinstance(meta, np.float64)
# Timestamp
x = pd.Timestamp(2000, 1, 1)
meta = make_meta(x)
assert meta is x
# Dtype expressions
meta = make_meta('i8')
assert isinstance(meta, np.int64)
meta = make_meta(float)
assert isinstance(meta, np.dtype(float).type)
meta = make_meta(np.dtype('bool'))
assert isinstance(meta, np.bool_)
assert pytest.raises(TypeError, lambda: make_meta(None))
def test_meta_nonempty():
df1 = pd.DataFrame({'A': pd.Categorical(['Alice', 'Bob', 'Carol']),
'B': list('abc'),
'C': 'bar',
'D': np.float32(1),
'E': np.int32(1),
'F': pd.Timestamp('2016-01-01'),
'G': pd.date_range('2016-01-01', periods=3,
tz='America/New_York'),
'H': pd.Timedelta('1 hours', 'ms'),
'I': np.void(b' ')},
columns=list('DCBAHGFEI'))
df2 = df1.iloc[0:0]
df3 = meta_nonempty(df2)
assert (df3.dtypes == df2.dtypes).all()
assert df3['A'][0] == 'Alice'
assert df3['B'][0] == 'foo'
assert df3['C'][0] == 'foo'
assert df3['D'][0] == np.float32(1)
assert df3['D'][0].dtype == 'f4'
assert df3['E'][0] == np.int32(1)
assert df3['E'][0].dtype == 'i4'
assert df3['F'][0] == pd.Timestamp('1970-01-01 00:00:00')
assert df3['G'][0] == pd.Timestamp('1970-01-01 00:00:00',
tz='America/New_York')
assert df3['H'][0] == pd.Timedelta('1', 'ms')
assert df3['I'][0] == 'foo'
s = meta_nonempty(df2['A'])
assert s.dtype == df2['A'].dtype
assert (df3['A'] == s).all()
def test_meta_duplicated():
df = pd.DataFrame(columns=['A', 'A', 'B'])
res = meta_nonempty(df)
exp = pd.DataFrame([['foo', 'foo', 'foo'],
['foo', 'foo', 'foo']],
index=['a', 'b'],
columns=['A', 'A', 'B'])
tm.assert_frame_equal(res, exp)
def test_meta_nonempty_index():
idx = pd.RangeIndex(1, name='foo')
res = meta_nonempty(idx)
assert type(res) is pd.RangeIndex
assert res.name == idx.name
idx = pd.Int64Index([1], name='foo')
res = meta_nonempty(idx)
assert type(res) is pd.Int64Index
assert res.name == idx.name
idx = pd.Index(['a'], name='foo')
res = meta_nonempty(idx)
assert type(res) is pd.Index
assert res.name == idx.name
idx = pd.DatetimeIndex(['1970-01-01'], freq='d',
tz='America/New_York', name='foo')
res = meta_nonempty(idx)
assert type(res) is pd.DatetimeIndex
assert res.tz == idx.tz
assert res.freq == idx.freq
assert res.name == idx.name
idx = pd.PeriodIndex(['1970-01-01'], freq='d', name='foo')
res = meta_nonempty(idx)
assert type(res) is pd.PeriodIndex
assert res.freq == idx.freq
assert res.name == idx.name
idx = pd.TimedeltaIndex([np.timedelta64(1, 'D')], freq='d', name='foo')
res = meta_nonempty(idx)
assert type(res) is pd.TimedeltaIndex
assert res.freq == idx.freq
assert res.name == idx.name
idx = pd.CategoricalIndex(['a'], ['a', 'b'], ordered=True, name='foo')
res = meta_nonempty(idx)
assert type(res) is pd.CategoricalIndex
assert (res.categories == idx.categories).all()
assert res.ordered == idx.ordered
assert res.name == idx.name
levels = [pd.Int64Index([1], name='a'),
pd.Float64Index([1.0], name='b')]
idx = pd.MultiIndex(levels=levels, labels=[[0], [0]], names=['a', 'b'])
res = meta_nonempty(idx)
assert type(res) is pd.MultiIndex
for idx1, idx2 in zip(idx.levels, res.levels):
assert type(idx1) is type(idx2)
assert idx1.name == idx2.name
assert res.names == idx.names
def test_meta_nonempty_scalar():
meta = meta_nonempty(np.float64(1.0))
assert isinstance(meta, np.float64)
x = pd.Timestamp(2000, 1, 1)
meta = meta_nonempty(x)
assert meta is x
def test_raise_on_meta_error():
try:
with raise_on_meta_error():
raise RuntimeError("Bad stuff")
except Exception as e:
assert e.args[0].startswith("Metadata inference failed.\n")
assert 'RuntimeError' in e.args[0]
try:
with raise_on_meta_error("myfunc"):
raise RuntimeError("Bad stuff")
except Exception as e:
assert e.args[0].startswith("Metadata inference failed in `myfunc`.\n")
assert 'RuntimeError' in e.args[0]<|fim▁end|> | assert isinstance(meta, type(df.index)) |
<|file_name|>video.service.ts<|end_file_name|><|fim▁begin|>import { Injectable } from '@angular/core';
import { Http, Response, Headers, RequestOptions } from '@angular/http';
import { Observable } from 'rxjs/Observable';
// import 'rxjs/add/operator/map';
import 'rxjs/Rx';
import { Config } from './config';
import { UserService } from './user.service';
import { HashService } from './hash.service';
declare var componentHandler: any;
export class VideoCard {
_id: string;
link: string;
title: string;
title0: string;
image: string;
creator: string;
updateat: Date;
longtime: number;
youtubeid: string;
viewcount: number;
keywords: Array<any>;
nowOnTime: string;
isSpecial: boolean;
status: number;
}
export class VideoDetails {
_id: string;
link: string;
title: string;
title0: string;
image: string;
creator: string;
updateat: Date;
longtime: number;
youtubeid: string;
viewcount: number;
keywords: Array<any>;
nowOnTime: string;
isSpecial: boolean;
status: number;
}
@Injectable()
export class VideoService {
keywords: Array<any>;
constructor(private config:Config, private http: Http, private userService: UserService){
this.http.get(this.config.HOST + '/keywords')
.map((res) => { return HashService.decrypt(res); } )
.catch(this.handleError)
.subscribe((keywords: Array<any>) => {
this.keywords = keywords;
}, (err: any) => { console.error(err); });;
}
upgradeDom(){
if(componentHandler) componentHandler.upgradeDom();
}
getYoutube(id: string): Observable<VideoCard[]> {
return this.http.get(this.config.HOST + '/youtube/' + id)
.map((res:any) => { return HashService.decrypt(res); } )
.catch(this.handleError);
}
searchVideos(txtSearch: string, meta:any): Observable<VideoCard[]> {
return this.http.get(this.config.HOST + '/video/search?txtSearch=' + txtSearch + '&page=' + meta.page + "&rows=" + meta.rows)
.map((res:any) => { return this.fromNowOn(HashService.decrypt(res)); } )
.catch(this.handleError);
}
getKeywordVideos(keyword: string, meta:any): Observable<VideoCard[]> {
return this.http.get(this.config.HOST + '/video/keyword?keyword=' + keyword + '&page=' + meta.page + "&rows=" + meta.rows)
.map((res:any) => { return HashService.decrypt(res) } )
.catch(this.handleError);
}
getNewestVideos(meta:any): Observable<VideoCard[]> {
return this.http.get(this.config.HOST + '/video/newest?page=' + meta.page + "&rows=" + meta.rows)
.map((res:any) => { return this.fromNowOn(HashService.decrypt(res)); } )
.catch(this.handleError);
}
getMostVideos(meta:any): Observable<VideoCard[]> {
return this.http.get(this.config.HOST + '/video/most?page=' + meta.page + "&rows=" + meta.rows)
.map((res:any) => { return this.fromNowOn(HashService.decrypt(res)); } )
.catch(this.handleError);
}
getHotVideos(meta:any): Observable<VideoCard[]> {
return this.http.get(this.config.HOST + '/video/hot?page=' + meta.page + "&rows=" + meta.rows)
.map((res:any) => { return this.fromNowOn(HashService.decrypt(res)); } )
.catch(this.handleError);
}
getRelateVideos(id: string, keywords: Array<any>, updateat: string, meta: any): Observable<VideoCard[]> {
var s = '';
for(var i in keywords){
var k = keywords[i];
if(s.length > 0) s += ',';
s += '' + k._id;
}
return this.http.get(this.config.HOST + '/video/relate?id=' + id + '&keywords=' + s + '&updateat=' + updateat + '&page=' + meta.page + "&rows=" + meta.rows)
.map((res:any) => { return this.fromNowOn(HashService.decrypt(res)); } )
.catch(this.handleError);
}
getVideo(id: string): Observable<VideoDetails>{
return this.http.get(this.config.HOST + '/video/'+id)
.map((res:any) => { return HashService.decrypt(res); } )
.catch(this.handleError);
}
addVideo(v: any): Observable<VideoCard[]> {
return this.http.post(this.config.HOST + '/video', v, new RequestOptions({ headers: new Headers({ 'Content-Type': 'application/json', 'me': this.userService.currentUser._id }) }))
.map((res:any) => { return HashService.decrypt(res); } )
.catch(this.handleError);
}
updateVideoKeyword(videoId: string, keywordId: string): Observable<Array<any>> {
return this.http.put(this.config.HOST + '/video/' + videoId, {keywordid: keywordId}, new RequestOptions({ headers: new Headers({ 'Content-Type': 'application/json', 'me': this.userService.currentUser._id }) }))
.map((res:any) => { return HashService.decrypt(res); } )
.catch(this.handleError);
}
addFavorite(v: any): Observable<VideoCard[]> {
return this.http.post(this.config.HOST + '/favorite', v, new RequestOptions({ headers: new Headers({ 'Content-Type': 'application/json', 'me': this.userService.currentUser._id }) }))
.map((res:any) => { return HashService.decrypt(res); } )
.catch(this.handleError);
}
updateSpecial(videoId:string, isSpecial:boolean): Observable<VideoCard[]> {
return this.http.put(this.config.HOST + '/video/' + videoId, {isSpecial: isSpecial}, new RequestOptions({ headers: new Headers({ 'Content-Type': 'application/json'}) }))
.map((res:any) => { return HashService.decrypt(res); } )
.catch(this.handleError);
}
updateVideoStatus(videoId:string, status:number){
return this.http.put(this.config.HOST + '/video/' + videoId, {status: status}, new RequestOptions({ headers: new Headers({ 'Content-Type': 'application/json'}) }))
.map((res:any) => { return HashService.decrypt(res); } )
.catch(this.handleError);
}
removeVideo(id: any): Observable<VideoCard[]> {
return this.http.delete(this.config.HOST + '/video/' + id, new RequestOptions({ headers: new Headers({ 'me': this.userService.currentUser._id }) }))
.map((res:any) => { return HashService.decrypt(res); } )
.catch(this.handleError);
}
removeFavorite(id: any): Observable<VideoCard[]> {
return this.http.delete(this.config.HOST + '/favorite/' + id, new RequestOptions({ headers: new Headers({ 'me': this.userService.currentUser._id }) }))
.map((res:any) => { return HashService.decrypt(res); } )
.catch(this.handleError);
}
getMyVideo(): Observable<VideoCard[]> {
return this.http.get(this.config.HOST + '/myvideo', new RequestOptions({ headers: new Headers({ 'me': this.userService.currentUser._id }) }))
.map((res:any) => { return HashService.decrypt(res); } )
.catch(this.handleError);
}
private fromNowOn(v:any){
if(!v) return v;
var now = new Date();
let handleVideo = (v: any) => {
var t0 = (now.getTime() - new Date(v.updateat).getTime());
var str = '';
var t = Math.floor(t0/1000/60/60/24);
if(t > 0) str = t + ' ngày';
else {
t = Math.floor(t0/1000/60/60);
if(t > 0) str = t + ' giờ';
else {
t = Math.floor(t0/1000/60);<|fim▁hole|> if(t > 0) str = t + ' giây';
}
}
}
v.nowOnTime = str + ' trước';
return v;
};
if(v instanceof Array){
for(var i in v){
v[i] = handleVideo(v[i]);
}
}else{
v = handleVideo(v);
}
return v;
}
private handleError (error: any) {
// In a real world app, we might use a remote logging infrastructure
// We'd also dig deeper into the error to get a better message
let errMsg = (error.message) ? error.message :
error.status ? `${error.status} - ${error.statusText}` : 'Server error';
console.error(errMsg); // log to console instead
return Observable.throw(errMsg);
}
}<|fim▁end|> | if(t > 0) str = t + ' phút';
else {
t = Math.floor(t0/1000); |
<|file_name|>handlers.rs<|end_file_name|><|fim▁begin|>// Handle incoming requests.
extern crate iron;
extern crate redis;
extern crate rustc_serialize;
extern crate hyper;
extern crate url;
extern crate router;
use std::vec::Vec;
use rustc_serialize::json::Json;
use iron::modifiers::Redirect;
use iron::headers::{CacheControl, CacheDirective};
use iron::prelude::*;
use iron::status;
use iron::Url as iUrl;
use hyper::client::Client;
use router::Router;
use redis::{Commands, Value};
use helpers::{setup_redis, fetch, get_status_or, local_redir, set_redis_cache};
use github::schedule_update as schedule_github_update;
// The base URL for our badges. We aren't actually compiling them ourselves,
// but are reusing the great shields.io service.
static BADGE_URL_BASE: &'static str = "https://img.shields.io/badge/";
// Github Finder
// Expand a branch name into the hash, cache the redirect for 5min
// `/github/:user/:repo/badge.svg => /github/:user/:repo/:sha/badge.svg`
pub fn github_finder(req: &mut Request) -> IronResult<Response> {
// Learn the parameters given to the request
let router = req.extensions.get::<Router>().unwrap();
let redis: redis::Connection = setup_redis();
let hyper_client: Client = Client::new();
let user = router.find("user").unwrap();
let repo = router.find("repo").unwrap();
let branch = router.find("branch").unwrap_or("master");
let method = router.find("method").unwrap_or("badge.svg");
// And the cache key we use to keep the map from branch->SHA
let redis_key = format!("cached-sha/github/{0}/{1}:{2}", user, repo, branch);
// Let's see if redis has this key. If it does, redirect the request
// directly
match redis.get(redis_key.to_owned()) {
Ok(Value::Data(sha)) => {
local_redir(&format!("/github/sha/{0}/{1}/{2}/{3}",
user,
repo,
String::from_utf8(sha).unwrap(),
method),
&req.url)
}
// otherwise, we need to look up the current SHA for the branch
_ => {
let github_url = format!("https://api.github.com/repos/{0}/{1}/git/refs/heads/{2}",
user,
repo,
branch);
// Fetch the content API request for the Github URL,
// Parse its JSON and try to find the `SHA`-key.
if let Some(body) = fetch(&hyper_client, &github_url) {<|fim▁hole|> // the request to
set_redis_cache(&redis, &redis_key, &sha);
local_redir(&format!("/github/sha/{0}/{1}/{2}/{3}",
user,
repo,
sha,
method),
&req.url)
} else {
// If we couldn't find the SHA, then there is a problem
// we need to inform the user about. Usually this means
// they did a typo or the content moved – either way, we
// fire a 404 – Not Found.
warn!("{}: SHA not found in JSON: {}", &github_url, &json);
Ok(Response::with((status::NotFound,
format!("Couldn't find on Github {}", &github_url))))
}
} else {
warn!("{}: Couldn't parse Githubs JSON response: {}",
&github_url,
&body);
Ok(Response::with((status::InternalServerError,
"Couldn't parse Githubs JSON response")))
}
} else {
Ok(Response::with((status::NotFound,
format!("Couldn't find on Github {}", &github_url))))
}
}
}
}
// ## Github Handler
// Handle the request for a status report of a user-repo-sha combination.
// Usually the request ends up here after having been redirected via the
// `github_finder`-handler.
// In this request is where the actual sausage is done.
pub fn github_handler(req: &mut Request) -> IronResult<Response> {
// First extract all the request information
let router = req.extensions.get::<Router>().unwrap();
let redis: redis::Connection = setup_redis();
let user = router.find("user").unwrap();
let repo = router.find("repo").unwrap();
let sha = router.find("sha").unwrap();
let filename: Vec<&str> = router.find("method")
.unwrap_or("badge.svg")
.rsplitn(2, '.')
.collect();
let (method, ext) = match filename.len() {
2 => (filename[1], filename[0]),
_ => (filename[0], ""),
};
// Use `get_status_or` to look up and map the cached result
// or trigger a `schedule_github_update` if that isn't found yet
let result_key = format!("result/github/{0}/{1}:{2}", user, repo, sha);
let (text, color): (String, String) = get_status_or(
redis.get(result_key.to_owned()),
|| schedule_github_update(&user, &repo, &sha));
// Then render the response
let mut response = match method {
// If this is a simple request for status, just return the result
"status" => Response::with((status::Ok, text.to_owned())),
// for the badge, put text, color, base URL and query-parameters from the
// incoming requests together to the URL we need to forward it to
"badge" => {
let target_badge = match req.url.clone().query {
Some(query) => format!("{}clippy-{}-{}.{}?{}", BADGE_URL_BASE, text, color, ext, query),
_ => format!("{}clippy-{}-{}.{}", BADGE_URL_BASE, text, color, ext),
};
// while linting, use only temporary redirects, so that the actual
// result will be asked for later
Response::with((match text.as_str() {
"linting" => status::Found,
_ => status::MovedPermanently
}, Redirect(iUrl::parse(&target_badge).unwrap())))
},
// emojibadge and fullemojibadge do the same as the request for `badge`,
// except that they replace the status with appropriate emoji
"emojibadge" => {
let emoji = match text.as_str() {
"linting" => "👷".to_string(),
"failed" => "😱".to_string(),
"success" => "👌".to_string(),
_ => text.replace("errors", "🤕").replace("warnings", "😟")
};
let target_badge = match req.url.clone().query {
Some(query) => format!("{}clippy-{}-{}.{}?{}", BADGE_URL_BASE, emoji, color, ext, query),
_ => format!("{}clippy-{}-{}.{}", BADGE_URL_BASE, emoji, color, ext),
};
Response::with((match color.as_str() {
"blue" => status::Found,
_ => status::MovedPermanently
}, Redirect(iUrl::parse(&target_badge).unwrap())))
},
"fullemojibadge" => {
let emoji = match text.as_str() {
"linting" => "👷".to_string(),
"failed" => "😱".to_string(),
"success" => "👌".to_string(),
_ => text.replace("errors", "🤕").replace("warnings", "😟")
};
let target_badge = match req.url.clone().query {
Some(query) => format!("{}📎-{}-{}.{}?{}", BADGE_URL_BASE, emoji, color, ext, query),
_ => format!("{}📎-{}-{}.{}", BADGE_URL_BASE, emoji, color, ext),
};
Response::with((match color.as_str() {
"blue" => status::Found,
_ => status::MovedPermanently
}, Redirect(iUrl::parse(&target_badge).unwrap())))
},
// If the request is asking for the logs, fetch those. This isn't particularly
// simple as the Redis library makes the unwrapping a little bit tricky and hard
// for rust to guess the proper types. So we have to specify the types and iterator
// rather explictly at times.
"log" => {
let log_key = format!("log/github/{0}/{1}:{2}", user, repo, sha);
match redis.lrange(log_key.to_owned(), 0, -1) {
Ok(Some(Value::Bulk(logs))) => {
let logs: Vec<String> = logs.iter()
.map(|ref v| {
match **v {
Value::Data(ref val) => {
String::from_utf8(val.to_owned())
.unwrap()
.to_owned()
}
_ => "".to_owned(),
}
})
.collect();
Response::with((status::Ok, logs.join("\n")))
}
// if there aren't any logs found, we might just started the
// process. Let the request know.
_ => {
Response::with((status::Ok, "Started. Please refresh"))
}
}
},
// Nothing else is supported – but in rust, we have to return all things
// of the same type. So let's return a `BadRequst` :) .
_ => Response::with((status::BadRequest, format!("{} Not Implemented.", method))),
};
response.headers.set(CacheControl(vec![CacheDirective::NoCache]));
Ok(response)
}<|fim▁end|> | if let Ok(json) = Json::from_str(&body) {
if let Some(&Json::String(ref sha)) = json.find_path(&["object", "sha"]) {
// Once found, store the SHA in the cache and redirect |
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
#<|fim▁hole|># to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
""" PyAbleton
A library for creating and editing Ableton Live instrument/effect presets in Python.
"""
__author__ = '[email protected]'
__version__ = '1.0'
import presets<|fim▁end|> | # Copyright (c) 2014 Hamilton Kibbe <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"), |
<|file_name|>_action_groups_operations.py<|end_file_name|><|fim▁begin|># coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ActionGroupsOperations:
"""ActionGroupsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~$(python-base-namespace).v2018_03_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def create_or_update(
self,
resource_group_name: str,
action_group_name: str,
action_group: "_models.ActionGroupResource",
**kwargs: Any
) -> "_models.ActionGroupResource":
"""Create a new action group or update an existing one.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param action_group_name: The name of the action group.
:type action_group_name: str
:param action_group: The action group to create or use for the update.
:type action_group: ~$(python-base-namespace).v2018_03_01.models.ActionGroupResource
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ActionGroupResource, or the result of cls(response)
:rtype: ~$(python-base-namespace).v2018_03_01.models.ActionGroupResource
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ActionGroupResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-03-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create_or_update.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'actionGroupName': self._serialize.url("action_group_name", action_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(action_group, 'ActionGroupResource')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ActionGroupResource', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('ActionGroupResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.insights/actionGroups/{actionGroupName}'} # type: ignore
async def get(
self,
resource_group_name: str,
action_group_name: str,
**kwargs: Any
) -> "_models.ActionGroupResource":
"""Get an action group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param action_group_name: The name of the action group.
:type action_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ActionGroupResource, or the result of cls(response)
:rtype: ~$(python-base-namespace).v2018_03_01.models.ActionGroupResource
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ActionGroupResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-03-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'actionGroupName': self._serialize.url("action_group_name", action_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('ActionGroupResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.insights/actionGroups/{actionGroupName}'} # type: ignore
async def delete(
self,
resource_group_name: str,
action_group_name: str,
**kwargs: Any
) -> None:
"""Delete an action group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param action_group_name: The name of the action group.
:type action_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-03-01"
accept = "application/json"
# Construct URL
url = self.delete.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'actionGroupName': self._serialize.url("action_group_name", action_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.insights/actionGroups/{actionGroupName}'} # type: ignore
async def update(
self,
resource_group_name: str,
action_group_name: str,
action_group_patch: "_models.ActionGroupPatchBody",
**kwargs: Any
) -> "_models.ActionGroupResource":
"""Updates an existing action group's tags. To update other fields use the CreateOrUpdate method.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param action_group_name: The name of the action group.
:type action_group_name: str
:param action_group_patch: Parameters supplied to the operation.
:type action_group_patch: ~$(python-base-namespace).v2018_03_01.models.ActionGroupPatchBody
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ActionGroupResource, or the result of cls(response)
:rtype: ~$(python-base-namespace).v2018_03_01.models.ActionGroupResource
:raises: ~azure.core.exceptions.HttpResponseError
"""<|fim▁hole|> }
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-03-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'actionGroupName': self._serialize.url("action_group_name", action_group_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(action_group_patch, 'ActionGroupPatchBody')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('ActionGroupResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.insights/actionGroups/{actionGroupName}'} # type: ignore
def list_by_subscription_id(
self,
**kwargs: Any
) -> AsyncIterable["_models.ActionGroupList"]:
"""Get a list of all action groups in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ActionGroupList or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~$(python-base-namespace).v2018_03_01.models.ActionGroupList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ActionGroupList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-03-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_subscription_id.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ActionGroupList', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_subscription_id.metadata = {'url': '/subscriptions/{subscriptionId}/providers/microsoft.insights/actionGroups'} # type: ignore
def list_by_resource_group(
self,
resource_group_name: str,
**kwargs: Any
) -> AsyncIterable["_models.ActionGroupList"]:
"""Get a list of all action groups in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ActionGroupList or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~$(python-base-namespace).v2018_03_01.models.ActionGroupList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ActionGroupList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-03-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ActionGroupList', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.insights/actionGroups'} # type: ignore
async def enable_receiver(
self,
resource_group_name: str,
action_group_name: str,
enable_request: "_models.EnableRequest",
**kwargs: Any
) -> None:
"""Enable a receiver in an action group. This changes the receiver's status from Disabled to
Enabled. This operation is only supported for Email or SMS receivers.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param action_group_name: The name of the action group.
:type action_group_name: str
:param enable_request: The receiver to re-enable.
:type enable_request: ~$(python-base-namespace).v2018_03_01.models.EnableRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-03-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.enable_receiver.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'actionGroupName': self._serialize.url("action_group_name", action_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(enable_request, 'EnableRequest')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 409]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
enable_receiver.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.insights/actionGroups/{actionGroupName}/subscribe'} # type: ignore<|fim▁end|> | cls = kwargs.pop('cls', None) # type: ClsType["_models.ActionGroupResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError |
<|file_name|>index.js<|end_file_name|><|fim▁begin|>import React, { Component } from 'react';
import CatagoryBox1 from '../CatagoryBox1';
import { BgBox } from './style';
function BackgroundBox (props) {
const {bcolor} = props;
const {data} = props;
const cata = data.map((d,i) => <CatagoryBox1 data={d} key={i} />);
return (<|fim▁hole|> );
};
export default BackgroundBox;<|fim▁end|> | <BgBox bcolor={bcolor}>
{cata}
</BgBox> |
<|file_name|>EtoileImageToTexturePlugin.cpp<|end_file_name|><|fim▁begin|>/**
* Copyright(C) 2009-2012
* @author Jing HUANG
<|fim▁hole|>* @date 1/2/2011
*/
#include "EtoileImageToTexturePlugin.h"
#include "util/File.h"
#include "QtTextureLoader.h"
/**
* @brief For tracking memory leaks under windows using the crtdbg
*/
#if ( defined( _DEBUG ) || defined( DEBUG ) ) && defined( _MSC_VER )
#define _CRTDBG_MAP_ALLOC
#include <crtdbg.h>
#define DEBUG_NEW new( _NORMAL_BLOCK, __FILE__, __LINE__ )
#define new DEBUG_NEW
#endif
Etoile::EPlugin* loadEtoileImageToTexturePlugin()
{
return new Etoile::EtoileImageToTexturePlugin("ImageToTexture");
}
namespace Etoile
{
ImageToTextureStringInputSocket::ImageToTextureStringInputSocket(const std::string& name) : StringInputSocket(name)
{
}
void ImageToTextureStringInputSocket::perform(std::string* signal)
{
if(signal == NULL) return;
EtoileImageToTexturePlugin* plugin = (EtoileImageToTexturePlugin*)(this->getNode());
plugin->openFile(*signal);
}
void ImageToTextureStringInputSocket::retrieve(std::string* signal)
{
EtoileImageToTexturePlugin* plugin = (EtoileImageToTexturePlugin*)(this->getNode());
plugin->openFile("");
}
ImageToTextureOutputSocket::ImageToTextureOutputSocket(const std::string& name) : TextureOutputSocket(name)
{
}
EtoileImageToTexturePlugin::EtoileImageToTexturePlugin(const std::string& name): EPlugin(), SocketNode()
{
this->getType()._description = "ImageToTexture";
this->getType()._name = name;
this->getType()._w = 80;
this->getType()._h = 60;
this->getType()._color._r = 120;
this->getType()._color._g = 240;
this->getType()._color._b = 250;
this->getType()._color._a = 240;
_pInput = new ImageToTextureStringInputSocket();
this->addInputSocket(_pInput);
_pOutput = new ImageToTextureOutputSocket();
this->addOutputSocket(_pOutput);
}
EtoileImageToTexturePlugin::~EtoileImageToTexturePlugin()
{
}
void EtoileImageToTexturePlugin::openFile(const std::string& filename)
{
if(filename.empty()) return;
std::string ext = File::getFileExtension(filename);
Mesh *mesh = new Mesh(filename);
QtTextureLoader textureloader;
Texture* t = textureloader.loadFromFile(filename);
_pOutput->set(t);
_pOutput->signalEmit(t);
}
}<|fim▁end|> | * @file EtoileImageToTexturePlugin.cpp
* @brief
|
<|file_name|>base.py<|end_file_name|><|fim▁begin|># Copyright (c) 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import socket
import time
import uuid
import testresources
import testtools
from heatclient import client as heatclient
from keystoneclient.v2_0 import client as ksclient
from muranoclient import client as mclient
import muranoclient.common.exceptions as exceptions
import murano.tests.functional.engine.config as cfg
CONF = cfg.cfg.CONF
class MuranoBase(testtools.TestCase, testtools.testcase.WithAttributes,
testresources.ResourcedTestCase):
@classmethod
def setUpClass(cls):
super(MuranoBase, cls).setUpClass()
cfg.load_config()
keystone_client = ksclient.Client(username=CONF.murano.user,
password=CONF.murano.password,
tenant_name=CONF.murano.tenant,
auth_url=CONF.murano.auth_url)
heat_url = keystone_client.service_catalog.url_for(
service_type='orchestration', endpoint_type='publicURL')
cls.heat_client = heatclient.Client('1', endpoint=heat_url,
token=keystone_client.auth_token)
url = CONF.murano.murano_url
murano_url = url if 'v1' not in url else "/".join(
url.split('/')[:url.split('/').index('v1')])
cls.muranoclient = mclient.Client('1',
endpoint=murano_url,
token=keystone_client.auth_token)
cls.linux = CONF.murano.linux_image
cls.pkgs_path = os.path.abspath(os.path.join(
os.path.dirname(__file__),
os.path.pardir,
'murano-app-incubator'
))
def upload_package(package_name, body, app):
files = {'%s' % package_name: open(app, 'rb')}
return cls.muranoclient.packages.create(body, files)
upload_package(
'PostgreSQL',
{"categories": ["Databases"], "tags": ["tag"]},
os.path.join(cls.pkgs_path, 'io.murano.databases.PostgreSql.zip')
)
upload_package(
'SqlDatabase',
{"categories": ["Databases"], "tags": ["tag"]},
os.path.join(cls.pkgs_path, 'io.murano.databases.SqlDatabase.zip')
)
upload_package(
'Apache',
{"categories": ["Application Servers"], "tags": ["tag"]},
os.path.join(cls.pkgs_path,
'io.murano.apps.apache.ApacheHttpServer.zip')
)
upload_package(
'Tomcat',
{"categories": ["Application Servers"], "tags": ["tag"]},
os.path.join(cls.pkgs_path, 'io.murano.apps.apache.Tomcat.zip')
)
upload_package(
'Telnet',
{"categories": ["Web"], "tags": ["tag"]},
os.path.join(cls.pkgs_path, 'io.murano.apps.linux.Telnet.zip')
)
def setUp(self):
super(MuranoBase, self).setUp()
self.environments = []
def tearDown(self):
super(MuranoBase, self).tearDown()
for env in self.environments:
try:
self.environment_delete(env)
except Exception:
pass
def environment_delete(self, environment_id, timeout=180):
self.muranoclient.environments.delete(environment_id)
start_time = time.time()
while time.time() - start_time < timeout:
try:
self.muranoclient.environments.get(environment_id)
except exceptions.HTTPNotFound:
return
raise Exception(
'Environment {0} was not deleted in {1} seconds'.format(
environment_id, timeout))
def wait_for_environment_deploy(self, environment):
start_time = time.time()
while environment.manager.get(environment.id).status != 'ready':
if time.time() - start_time > 1200:
self.fail(
'Environment deployment is not finished in 1200 seconds')
time.sleep(5)
return environment.manager.get(environment.id)
def check_port_access(self, ip, port):
result = 1
start_time = time.time()
while time.time() - start_time < 300:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
result = sock.connect_ex((str(ip), port))
sock.close()
if result == 0:
break
time.sleep(5)
self.assertEqual(0, result, '%s port is closed on instance' % port)
def deployment_success_check(self, environment, port):
deployment = self.muranoclient.deployments.list(environment.id)[-1]
self.assertEqual('success', deployment.state,
'Deployment status is {0}'.format(deployment.state))
ip = environment.services[-1]['instance']['floatingIpAddress']
if ip:
self.check_port_access(ip, port)
else:
self.fail('Instance does not have floating IP')
def test_deploy_telnet(self):
post_body = {
"instance": {
"flavor": "m1.medium",
"image": self.linux,
"assignFloatingIp": True,
"?": {
"type": "io.murano.resources.LinuxMuranoInstance",
"id": str(uuid.uuid4())
},
"name": "testMurano"
},
"name": "teMurano",
"?": {
"type": "io.murano.apps.linux.Telnet",
"id": str(uuid.uuid4())
}
}
environment_name = 'Telnetenv' + uuid.uuid4().hex[:5]
env = self._quick_deploy(environment_name, post_body)
self.deployment_success_check(env, 23)
def test_deploy_apache(self):
post_body = {
"instance": {
"flavor": "m1.medium",
"image": self.linux,
"assignFloatingIp": True,
"?": {
"type": "io.murano.resources.LinuxMuranoInstance",
"id": str(uuid.uuid4())
},
"name": "testMurano"
},
"name": "teMurano",
"?": {
"type": "io.murano.apps.apache.ApacheHttpServer",
"id": str(uuid.uuid4())
}
}
environment_name = 'Apacheenv' + uuid.uuid4().hex[:5]
env = self._quick_deploy(environment_name, post_body)
self.deployment_success_check(env, 80)
def test_deploy_postgresql(self):
post_body = {
"instance": {
"flavor": "m1.medium",
"image": self.linux,
"assignFloatingIp": True,
"?": {
"type": "io.murano.resources.LinuxMuranoInstance",
"id": str(uuid.uuid4())
},
"name": "testMurano"
},
"name": "teMurano",
"database": "test_db",
"username": "test_usr",
"password": "test_pass",
"?": {
"type": "io.murano.databases.PostgreSql",
"id": str(uuid.uuid4())
}
}
environment_name = 'Postgreenv' + uuid.uuid4().hex[:5]
env = self._quick_deploy(environment_name, post_body)
self.deployment_success_check(env, 5432)
def test_deploy_tomcat(self):
post_body = {
"instance": {
"flavor": "m1.medium",
"image": self.linux,
"assignFloatingIp": True,
"?": {
"type": "io.murano.resources.LinuxMuranoInstance",
"id": str(uuid.uuid4())
},
"name": "testMurano"
},
"name": "teMurano",
"?": {
"type": "io.murano.apps.apache.Tomcat",
"id": str(uuid.uuid4())
}
}
environment_name = 'Tomcatenv' + uuid.uuid4().hex[:5]
env = self._quick_deploy(environment_name, post_body)
self.deployment_success_check(env, 8080)
def _get_telnet_app(self):
return {
"instance": {
"?": {
"type": "io.murano.resources.LinuxMuranoInstance",
"id": str(uuid.uuid4())
},
"flavor": "m1.medium",
"image": self.linux,
"name": "instance{0}".format(uuid.uuid4().hex[:5]),
},
"name": "app{0}".format(uuid.uuid4().hex[:5]),
"?": {
"type": "io.murano.apps.linux.Telnet",
"id": str(uuid.uuid4())
}
}
def _quick_deploy(self, name, *apps):
environment = self.muranoclient.environments.create({'name': name})
self.environments.append(environment.id)
session = self.muranoclient.sessions.configure(environment.id)
for app in apps:
self.muranoclient.services.post(environment.id,
path='/',
data=app,
session_id=session.id)
self.muranoclient.sessions.deploy(environment.id, session.id)
return self.wait_for_environment_deploy(environment)
def _get_stack(self, environment_id):
for stack in self.heat_client.stacks.list():
if environment_id in stack.description:
return stack
def test_instance_refs_are_removed_after_application_is_removed(self):
# FIXME(sergmelikyan): Revise this as part of proper fix for #1359998
self.skipTest('Skipped until proper fix for #1359998 is proposed')
name = 'e' + uuid.uuid4().hex
# create environment with telnet application
application1 = self._get_telnet_app()
application2 = self._get_telnet_app()
application_id = application1['?']['id']
instance_name = application1['instance']['name']
apps = [application1, application2]
environment = self._quick_deploy(name, *apps)
# delete telnet application
session = self.muranoclient.sessions.configure(environment.id)
self.muranoclient.services.delete(environment.id,
'/' + application_id,
session.id)
self.muranoclient.sessions.deploy(environment.id, session.id)
self.wait_for_environment_deploy(environment)
stack_name = self._get_stack(environment.id).stack_name
template = self.heat_client.stacks.template(stack_name)
ip_addresses = '{0}-assigned-ip'.format(instance_name)
floating_ip = '{0}-FloatingIPaddress'.format(instance_name)
self.assertNotIn(ip_addresses, template['outputs'])
self.assertNotIn(floating_ip, template['outputs'])
self.assertNotIn(instance_name, template['resources'])
def test_stack_deletion_after_env_is_deleted(self):
name = 'e' + uuid.uuid4().hex
<|fim▁hole|> stack = self._get_stack(environment.id)
self.assertIsNotNone(stack)
self.muranoclient.environments.delete(environment.id)
start_time = time.time()
while stack is not None:
if time.time() - start_time > 300:
break
time.sleep(5)
stack = self._get_stack(environment.id)
self.assertIsNone(stack, 'stack is not deleted')<|fim▁end|> | application = self._get_telnet_app()
environment = self._quick_deploy(name, application)
|
<|file_name|>transmute_int_to_float.rs<|end_file_name|><|fim▁begin|>use super::TRANSMUTE_INT_TO_FLOAT;
use clippy_utils::diagnostics::span_lint_and_then;
use clippy_utils::sugg;
use rustc_errors::Applicability;
use rustc_hir::Expr;
use rustc_lint::LateContext;
use rustc_middle::ty::{self, Ty};
/// Checks for `transmute_int_to_float` lint.
/// Returns `true` if it's triggered, otherwise returns `false`.
pub(super) fn check<'tcx>(
cx: &LateContext<'tcx>,
e: &'tcx Expr<'_>,
from_ty: Ty<'tcx>,
to_ty: Ty<'tcx>,
args: &'tcx [Expr<'_>],
const_context: bool,
) -> bool {
match (&from_ty.kind(), &to_ty.kind()) {
(ty::Int(_) | ty::Uint(_), ty::Float(_)) if !const_context => {
span_lint_and_then(
cx,
TRANSMUTE_INT_TO_FLOAT,
e.span,
&format!("transmute from a `{}` to a `{}`", from_ty, to_ty),
|diag| {
let arg = sugg::Sugg::hir(cx, &args[0], "..");
let arg = if let ty::Int(int_ty) = from_ty.kind() {
arg.as_ty(format!(
"u{}",
int_ty.bit_width().map_or_else(|| "size".to_string(), |v| v.to_string())
))
} else {
arg
};
diag.span_suggestion(
e.span,
"consider using",
format!("{}::from_bits({})", to_ty, arg.to_string()),
Applicability::Unspecified,
);
},
);<|fim▁hole|> true
},
_ => false,
}
}<|fim▁end|> | |
<|file_name|>LazyGenerator.py<|end_file_name|><|fim▁begin|>#
# Copyright (C) 2003-2006 greg Landrum and Rational Discovery LLC
#
# @@ All Rights Reserved @@
# This file is part of the RDKit.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the RDKit source tree.
#
""" lazy generator of 2D pharmacophore signature data
"""
from __future__ import print_function
from rdkit.Chem.Pharm2D import SigFactory, Matcher
raise NotImplementedError('not finished yet')
class Generator(object):
"""
Important attributes:
- mol: the molecules whose signature is being worked with
- sigFactory : the SigFactory object with signature parameters
NOTE: no preprocessing is carried out for _sigFactory_.
It *must* be pre-initialized.
**Notes**
-
"""
<|fim▁hole|>
**Arguments**
- sigFactory: a signature factory, see class docs
- mol: a molecule, see class docs
- dMat: (optional) a distance matrix for the molecule. If this
is not provided, one will be calculated
- bitCache: (optional) if nonzero, a local cache of which bits
have been queried will be maintained. Otherwise things must
be recalculate each time a bit is queried.
"""
if not isinstance(sigFactory, SigFactory.SigFactory):
raise ValueError('bad factory')
self.sigFactory = sigFactory
self.mol = mol
if dMat is None:
useBO = sigFactory.includeBondOrder
dMat = Chem.GetDistanceMatrix(mol, useBO)
self.dMat = dMat
if bitCache:
self.bits = {}
else:
self.bits = None
featFamilies = [fam for fam in sigFactory.featFactory.GetFeatureFamilies()
if fam not in sigFactory.skipFeats]
nFeats = len(featFamilies)
featMatches = {}
for fam in featFamilies:
featMatches[fam] = []
feats = sigFactory.featFactory.GetFeaturesForMol(mol)
for feat in feats:
if feat.GetFamily() not in sigFactory.skipFeats:
featMatches[feat.GetFamily()].append(feat.GetAtomIds())
featMatches = [None] * nFeats
for i in range(nFeats):
featMatches[i] = sigFactory.featFactory.GetMolFeature()
self.pattMatches = pattMatches
def GetBit(self, idx):
""" returns a bool indicating whether or not the bit is set
"""
if idx < 0 or idx >= self.sig.GetSize():
raise IndexError('Index %d invalid' % (idx))
if self.bits is not None and idx in self.bits:
return self.bits[idx]
tmp = Matcher.GetAtomsMatchingBit(self.sig, idx, self.mol, dMat=self.dMat, justOne=1,
matchingAtoms=self.pattMatches)
if not tmp or len(tmp) == 0:
res = 0
else:
res = 1
if self.bits is not None:
self.bits[idx] = res
return res
def __len__(self):
""" allows class to support len()
"""
return self.sig.GetSize()
def __getitem__(self, itm):
""" allows class to support random access.
Calls self.GetBit()
"""
return self.GetBit(itm)
if __name__ == '__main__':
import time
from rdkit import RDConfig, Chem
from rdkit.Chem.Pharm2D import Gobbi_Pharm2D, Generate
import random
factory = Gobbi_Pharm2D.factory
nToDo = 100
inD = open(RDConfig.RDDataDir + "/NCI/first_5K.smi", 'r').readlines()[:nToDo]
mols = [None] * len(inD)
for i in range(len(inD)):
smi = inD[i].split('\t')[0]
smi.strip()
mols[i] = Chem.MolFromSmiles(smi)
sig = factory.GetSignature()
nBits = 300
random.seed(23)
bits = [random.randint(0, sig.GetSize() - 1) for x in range(nBits)]
print('Using the Lazy Generator')
t1 = time.time()
for i in range(len(mols)):
if not i % 10:
print('done mol %d of %d' % (i, len(mols)))
gen = Generator(factory, mols[i])
for bit in bits:
v = gen[bit]
t2 = time.time()
print('\tthat took %4.2f seconds' % (t2 - t1))
print('Generating and checking signatures')
t1 = time.time()
for i in range(len(mols)):
if not i % 10:
print('done mol %d of %d' % (i, len(mols)))
sig = Generate.Gen2DFingerprint(mols[i], factory)
for bit in bits:
v = sig[bit]
t2 = time.time()
print('\tthat took %4.2f seconds' % (t2 - t1))<|fim▁end|> | def __init__(self, sigFactory, mol, dMat=None, bitCache=True):
""" constructor |
<|file_name|>ConsultaCNPJBC.java<|end_file_name|><|fim▁begin|>package br.gov.serpro.infoconv.proxy.businesscontroller;
import java.lang.reflect.Field;
import java.util.ArrayList;
import java.util.List;
import javax.inject.Inject;
import br.gov.frameworkdemoiselle.stereotype.BusinessController;
import br.gov.serpro.infoconv.proxy.exception.AcessoNegadoException;
import br.gov.serpro.infoconv.proxy.exception.CNPJNaoEncontradoException;
import br.gov.serpro.infoconv.proxy.exception.CpfNaoEncontradoException;
import br.gov.serpro.infoconv.proxy.exception.DadosInvalidosException;
import br.gov.serpro.infoconv.proxy.exception.InfraException;
import br.gov.serpro.infoconv.proxy.exception.PerfilInvalidoException;
import br.gov.serpro.infoconv.proxy.rest.dto.cnpj.Perfil1CNPJ;
import br.gov.serpro.infoconv.proxy.rest.dto.cnpj.Perfil2CNPJ;
import br.gov.serpro.infoconv.proxy.rest.dto.cnpj.Perfil3CNPJ;
import br.gov.serpro.infoconv.proxy.util.InfoconvConfig;
import br.gov.serpro.infoconv.ws.cnpj.ArrayOfCNPJPerfil1;
import br.gov.serpro.infoconv.ws.cnpj.ArrayOfCNPJPerfil2;
import br.gov.serpro.infoconv.ws.cnpj.ArrayOfCNPJPerfil3;
import br.gov.serpro.infoconv.ws.cnpj.CNPJPerfil1;
import br.gov.serpro.infoconv.ws.cnpj.CNPJPerfil2;<|fim▁hole|> * consultas de cnpj e transformar os erros previstos em exceções.
*
*/
@BusinessController
public class ConsultaCNPJBC {
/** Classe de configuração do infoconv. */
@Inject
InfoconvConfig infoconv;
private static final String CPF_CONSULTANTE = "79506240949";
/**
* Verifica a propriedade ERRO para saber se houve algum problema na
* consulta.
*
* Como se trata de um webservice sempre retorna com codigo http 200 e
* dentro da msg de retorno o campo "erro" informa se teve algum problema.
*
* Alguns "erros" são na verdade avisos por isso não levantam exceção. segue
* os erros que levantam exceção:
*
* - AcessoNegadoException: Todos os erros que começãm com ACS - Erro. Podem
* ser por falta de permissão ou algum problema com certificado. A mensagem
* explica qual o problema.
*
* - CNPJNaoEncontradoException: Quando o cpf não está na base. Erros: CPJ
* 04
*
* - DadosInvalidosException: Qualquer problema de validação no servidor.
* Erros: CPJ 02,06 e 11
*
* - InfraException: Erros no lado do servidor (WS) Erros: CPF 00 , 01, 03,
* 08, 09
*
* Documentação dos códigos de Erros:
* https://github.com/infoconv/infoconv-ws
*
* @param response
* @throws AcessoNegadoException
* @throws DadosInvalidosException
* @throws InfraException
* @throws CNPJNaoEncontradoException
*/
private void verificarErros(final Object retorno)
throws AcessoNegadoException, DadosInvalidosException, InfraException, CNPJNaoEncontradoException {
try {
Class<?> c = retorno.getClass();
Field erroField = c.getDeclaredField("erro");
erroField.setAccessible(true);
String erroMsg = (String) erroField.get(retorno);
if (erroMsg.indexOf("ACS - Erro") > -1) {
throw new AcessoNegadoException(erroMsg);
} else if (erroMsg.indexOf("CPJ - Erro 04") > -1) {
throw new CNPJNaoEncontradoException(erroMsg);
} else if (erroMsg.indexOf("CPJ - Erro 02") > -1
|| erroMsg.indexOf("CPJ - Erro 06") > -1
|| erroMsg.indexOf("CPJ - Erro 11") > -1) {
throw new DadosInvalidosException(erroMsg);
} else if (erroMsg.indexOf("CPJ - Erro 01") > -1
|| erroMsg.indexOf("CPJ - Erro 03") > -1
|| erroMsg.indexOf("CPJ - Erro 00") > -1
|| erroMsg.indexOf("CPJ - Erro 08") > -1
|| erroMsg.indexOf("CPJ - Erro 09") > -1) {
throw new InfraException(erroMsg);
}
} catch (NoSuchFieldException e) {
e.printStackTrace();
} catch (SecurityException e) {
e.printStackTrace();
} catch (IllegalArgumentException e) {
e.printStackTrace();
} catch (IllegalAccessException e) {
e.printStackTrace();
}
}
/**
* Baseado no perfil indicado monta uma lista generia com o tipo de CNPJ do
* perfil como Object.
*
* @param listaCNPJs
* @param perfil
* @return
* @throws PerfilInvalidoException
* @throws InfraException
* @throws DadosInvalidosException
* @throws AcessoNegadoException
* @throws CNPJNaoEncontradoException
*/
public List<Object> consultarListaDeCnpjPorPerfil(final String listaCNPJs, final String perfil)
throws PerfilInvalidoException, AcessoNegadoException, DadosInvalidosException,
InfraException, CNPJNaoEncontradoException {
List<Object> lista = new ArrayList<Object>();
String perfilUpper = perfil.toUpperCase();
if (perfil == null || "P1".equals(perfilUpper)) {
ArrayOfCNPJPerfil1 p = infoconv.consultarCNPJSoap.consultarCNPJP1(listaCNPJs, CPF_CONSULTANTE);
lista.addAll(p.getCNPJPerfil1());
} else if ("P1T".equals(perfilUpper)) {
ArrayOfCNPJPerfil1 p = infoconv.consultarCNPJSoap.consultarCNPJP1T(listaCNPJs, CPF_CONSULTANTE);
lista.addAll(p.getCNPJPerfil1());
} else if ("P2".equals(perfilUpper)) {
ArrayOfCNPJPerfil2 p = infoconv.consultarCNPJSoap.consultarCNPJP2(listaCNPJs, CPF_CONSULTANTE);
lista.addAll(p.getCNPJPerfil2());
} else if ("P2T".equals(perfilUpper)) {
ArrayOfCNPJPerfil2 p = infoconv.consultarCNPJSoap.consultarCNPJP2T(listaCNPJs, CPF_CONSULTANTE);
lista.addAll(p.getCNPJPerfil2());
} else if ("P3".equals(perfilUpper)) {
ArrayOfCNPJPerfil3 p = infoconv.consultarCNPJSoap.consultarCNPJP3(listaCNPJs, CPF_CONSULTANTE);
lista.addAll(p.getCNPJPerfil3());
} else if ("P3T".equals(perfilUpper)) {
ArrayOfCNPJPerfil3 p = infoconv.consultarCNPJSoap.consultarCNPJP3T(listaCNPJs, CPF_CONSULTANTE);
lista.addAll(p.getCNPJPerfil3());
} else {
throw new PerfilInvalidoException();
}
verificarErros(lista.get(0));
return lista;
}
/**
* Consulta o webservice do infoconv ConsultarCNPJSoap/ConsultarCNPJP1
*
* @param listaCNPJs
* @return
* @throws AcessoNegadoException
* @throws CpfNaoEncontradoException
* @throws DadosInvalidosException
* @throws InfraException
*/
public List<Perfil1CNPJ> listarPerfil1(String listaCNPJs) throws AcessoNegadoException, CNPJNaoEncontradoException, DadosInvalidosException, InfraException{
ArrayOfCNPJPerfil1 result = infoconv.consultarCNPJSoap.consultarCNPJP1(listaCNPJs, CPF_CONSULTANTE);
verificarErros(result.getCNPJPerfil1().get(0));
List<Perfil1CNPJ> lista = new ArrayList<Perfil1CNPJ>();
for (CNPJPerfil1 perfil1 : result.getCNPJPerfil1()) {
lista.add(new Perfil1CNPJ(perfil1));
}
return lista;
}
/**
* Consulta o webservice do infoconv ConsultarCNPJSoap/ConsultarCNPJP2
*
* @param listaCNPJs
* @return
* @throws AcessoNegadoException
* @throws CpfNaoEncontradoException
* @throws DadosInvalidosException
* @throws InfraException
*/
public List<Perfil2CNPJ> listarPerfil2(String listaCNPJs) throws AcessoNegadoException, CNPJNaoEncontradoException, DadosInvalidosException, InfraException{
ArrayOfCNPJPerfil2 result = infoconv.consultarCNPJSoap.consultarCNPJP2(listaCNPJs, CPF_CONSULTANTE);
verificarErros(result.getCNPJPerfil2().get(0));
List<Perfil2CNPJ> lista = new ArrayList<Perfil2CNPJ>();
for (CNPJPerfil2 perfil1 : result.getCNPJPerfil2()) {
lista.add(new Perfil2CNPJ(perfil1));
}
return lista;
}
/**
* Consulta o webservice do infoconv ConsultarCNPJSoap/ConsultarCNPJP3
*
* @param listaCNPJs
* @return
* @throws AcessoNegadoException
* @throws CpfNaoEncontradoException
* @throws DadosInvalidosException
* @throws InfraException
*/
public List<Perfil3CNPJ> listarPerfil3(String listaCNPJs) throws AcessoNegadoException, CNPJNaoEncontradoException, DadosInvalidosException, InfraException{
ArrayOfCNPJPerfil3 result = infoconv.consultarCNPJSoap.consultarCNPJP3(listaCNPJs, CPF_CONSULTANTE);
verificarErros(result.getCNPJPerfil3().get(0));
List<Perfil3CNPJ> lista = new ArrayList<Perfil3CNPJ>();
for (CNPJPerfil3 perfil1 : result.getCNPJPerfil3()) {
lista.add(new Perfil3CNPJ(perfil1));
}
return lista;
}
}<|fim▁end|> | import br.gov.serpro.infoconv.ws.cnpj.CNPJPerfil3;
/**
* Classe responsável por interagir com o componente infoconv-ws para obter as |
<|file_name|>test.scratch.be.speech.cpp<|end_file_name|><|fim▁begin|>/* /////////////////////////////////////////////////////////////////////////
* File: test/scratch/test.scratch.be.speech/test.scratch.be.speech.cpp
*
* Purpose: C++ example program for Pantheios. Demonstrates:
*
* - use of custom severity level information for tabbing output
* - definition of a custom back-end that supports tabbed output
* - use of pantheios::logputs() in bail-out conditions
*
* Created: 31st August 2006
* Updated: 27th January 2017
*
* www: http://www.pantheios.org/
*
* License: This source code is placed into the public domain 2006
* by Synesis Software Pty Ltd. There are no restrictions
* whatsoever to your use of the software.
*
* This software is provided "as is", and any warranties,
* express or implied, of any kind and for any purpose, are
* disclaimed.
*
* ////////////////////////////////////////////////////////////////////// */
/* This inclusion required for suppressing warnings during NoX (No eXception-support) configurations. */
#include <pantheios/util/test/compiler_warnings_suppression.first_include.h><|fim▁hole|>#include <pantheios/backend.h>
#include <pantheios/implicit_link/core.h>
#include <pantheios/implicit_link/fe.simple.h>
#include <pantheios/implicit_link/be.lrsplit.h>
#include <pantheios/implicit_link/bel.WindowsConsole.h>
#include <pantheios/implicit_link/bec.speech.WithCallback.h>
#include <pantheios/backends/bec.speech.h>
/* Standard C/C++ header files */
#include <exception> // for std::exception
#include <string> // for std::string
#include <stdio.h> // for fprintf()
#include <stdlib.h> // for exit codes
#include <string.h> // for memset()
#include <pantheios/util/test/compiler_warnings_suppression.last_include.h>
/* ////////////////////////////////////////////////////////////////////// */
// Define the fe.simple process identity, so that it links when using fe.simple
PANTHEIOS_EXTERN PAN_CHAR_T const PANTHEIOS_FE_PROCESS_IDENTITY[] = PANTHEIOS_LITERAL_STRING("test.scratch.speech");
/* ////////////////////////////////////////////////////////////////////// */
//PANTHEIOS_BE_DEFINE_BE_FUNCTIONS(speech)
PANTHEIOS_BE_DEFINE_BER_FUNCTIONS(speech)
PANTHEIOS_CALL(void) pantheios_be_speech_getAppInit(int backEndId, pan_be_speech_init_t* init) /* throw() */
{
// init->flags |= PANTHEIOS_BE_SPEECH_F_SYNCHRONOUS;
// init->flags |= PANTHEIOS_BE_SPEECH_F_PURGE_BEFORE_SPEAK;
// init->flags |= PANTHEIOS_BE_SPEECH_F_SPEAK_PUNCTUATION;
// init->flags |= PANTHEIOS_BE_SPEECH_F_SYNCHRONOUS_ON_CRITICAL;
}
/* ////////////////////////////////////////////////////////////////////// */
int main()
{
DWORD shortPause = 1250;
try
{
// pantheios::log(pantheios::notice, "Hello");
// ::Sleep(shortPause);
// pantheios::log(pantheios::notice(2), "Hello");
// ::Sleep(shortPause);
// pantheios::log(pantheios::notice(2), "Hello, boys. This is your daddy, telling you to turn around and eat your dinner. Now!");
// ::Sleep(shortPause);
short s = SHRT_MIN;
unsigned short us = USHRT_MAX;
int i = INT_MIN;
unsigned int ui = UINT_MAX;
long l = LONG_MIN;
unsigned long ul = ULONG_MAX;
#if 0
// Log a short in decimal; Output: "s: [-32768]"
pantheios::log_NOTICE("s: [", pantheios::integer(s), "]");
::Sleep(shortPause);
// Log a unsigned short as hexadecimal; Output: "us: [ffff]"
pantheios::log_NOTICE("us: [", pantheios::integer(us, pantheios::fmt::hex), "]");
::Sleep(shortPause);
// Log an int, into a width of 20; Output: "i: [-2147483648 ]"
pantheios::log_NOTICE("i: [", pantheios::integer(i, -20), "]");
::Sleep(shortPause);
// Log an unsigned int as hexadecimal with 0x prefix; Output: "ui: [0xffffffff]"
pantheios::log_NOTICE("ui: [", pantheios::integer(ui, pantheios::fmt::hex | pantheios::fmt::zeroXPrefix), "]");
::Sleep(shortPause);
// Log a long; Output: "l: [ -2147483648]"
pantheios::log_NOTICE("l: [", pantheios::integer(l, 20), "]");
::Sleep(shortPause);
// Log an unsigned long; Output: "ul: [4294967295]"
pantheios::log_NOTICE("ul: [", pantheios::integer(ul), "]");
::Sleep(shortPause);
#else /* ? 0 */
pantheios::log_NOTICE("Hi!");
::Sleep(shortPause);
pantheios::log_NOTICE("This is your logger, calling.");
::Sleep(shortPause);
pantheios::log_NOTICE("Here come some diagnostic logging statements ...");
::Sleep(shortPause);
#endif /* 0 */
pantheios::log_DEBUG("just being pedantic");
::Sleep(shortPause);
pantheios::log_INFORMATIONAL("you can ignore this");
::Sleep(shortPause);
pantheios::log_NOTICE("this is noteworthy");
::Sleep(shortPause);
pantheios::log_WARNING("there may be a problem");
::Sleep(shortPause);
pantheios::log_ERROR("there is a problem");
::Sleep(shortPause);
pantheios::log_CRITICAL("there is a serious problem");
::Sleep(shortPause);
pantheios::log_ALERT("there is a very serious problem");
::Sleep(shortPause);
pantheios::log_EMERGENCY("aargh! I'm operating in contradiction to my design!");
::Sleep(90000);
return EXIT_SUCCESS;
}
catch(std::bad_alloc &)
{
pantheios::log_CRITICAL("out of memory");
}
catch(std::exception &x)
{
pantheios::log_ALERT("Exception: ", x);
}
catch(...)
{
pantheios::logputs(pantheios::emergency, "Unexpected unknown error");
}
return EXIT_FAILURE;
}
/* ////////////////////////////////////////////////////////////////////// */<|fim▁end|> |
/* Pantheios header files */
#include <pantheios/pantheios.hpp> |
<|file_name|>grep_test.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python -u<|fim▁hole|># encoding: utf-8
#
# Copyright (c) 2012, Peter Hillerström <[email protected]>
# All rights reserved. This software is licensed under 3-clause BSD license.
#
# For the full copyright and license information, please view the LICENSE
# file that was distributed with this source code.
import pytest
from prism.grep import pattern, search
def log_lines():
return [
"[Sun Apr 08 12:51:52 2012] [notice] Digest: done",
"[Mon Jul 11 09:26:13 2011] Error: [client ::1] File does not exist: /Library/WebServer/Documents/favicon.ico",
]
def test_search():
for line in log_lines():
assert search(line), "Regexp pattern '{0}' didn't match line '{1}'".format(pattern, line)<|fim▁end|> | |
<|file_name|>SQLManagerKeyOverwiteTest.java<|end_file_name|><|fim▁begin|>package water.jdbc;
<|fim▁hole|>import water.Key;
import water.Keyed;
import water.fvec.Frame;
import water.runner.CloudSize;
import water.runner.H2ORunner;
import java.util.Set;
import java.util.stream.Collectors;
import java.util.stream.IntStream;
import static org.junit.Assert.*;
@RunWith(H2ORunner.class)
@CloudSize(1)
public class SQLManagerKeyOverwiteTest {
@Test public void nextKeyHasRightPrefixAndPostfix() {
final String prefix = "foo";
final String postfix = "bar";
final Key<Frame> key = SQLManager.nextTableKey(prefix, postfix);
assertTrue(key.toString().startsWith(prefix));
assertTrue(key.toString().endsWith(postfix));
}
@Test public void nextKeyKeyHasNoWhitechars() {
final Key<Frame> key = SQLManager.nextTableKey("f o o ", "b a r");
assertFalse(key.toString().contains("\\W"));
}
@Test public void makeRandomKeyCreatesUniqueKeys() {
final int count = 1000;
final long actualCount = IntStream.range(0, count)
.boxed()
.parallel()
.map(i -> SQLManager.nextTableKey("foo", "bar"))
.map(Key::toString)
.count();
assertEquals(count, actualCount);
}
}<|fim▁end|> | import org.junit.Test;
import org.junit.runner.RunWith; |
<|file_name|>OgreShaderExHardwareSkinning.cpp<|end_file_name|><|fim▁begin|>/*
-----------------------------------------------------------------------------
This source file is part of OGRE
(Object-oriented Graphics Rendering Engine)
For the latest info, see http://www.ogre3d.org
Copyright (c) 2000-2014 Torus Knot Software Ltd
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
-----------------------------------------------------------------------------
*/
#include "OgreShaderExHardwareSkinning.h"
#ifdef RTSHADER_SYSTEM_BUILD_EXT_SHADERS
#include "OgreShaderExDualQuaternionSkinning.h"
#include "OgreShaderExLinearSkinning.h"
#include "OgreShaderFFPRenderState.h"
#include "OgreShaderProgram.h"
#include "OgreShaderParameter.h"
#include "OgreShaderProgramSet.h"
#include "OgreEntity.h"
#include "OgreSubEntity.h"
#include "OgreMaterial.h"
#include "OgreSubMesh.h"
#include "OgreShaderGenerator.h"
#define HS_DATA_BIND_NAME "HS_SRS_DATA"
namespace Ogre {
template<> RTShader::HardwareSkinningFactory* Singleton<RTShader::HardwareSkinningFactory>::msSingleton = 0;
namespace RTShader {
HardwareSkinningFactory* HardwareSkinningFactory::getSingletonPtr(void)
{
return msSingleton;
}
HardwareSkinningFactory& HardwareSkinningFactory::getSingleton(void)
{
assert( msSingleton ); return ( *msSingleton );
}
String HardwareSkinning::Type = "SGX_HardwareSkinning";
/************************************************************************/
/* */
/************************************************************************/
HardwareSkinning::HardwareSkinning() :
mCreator(NULL),
mSkinningType(ST_LINEAR)
{
}
//-----------------------------------------------------------------------
const String& HardwareSkinning::getType() const
{
return Type;
}
//-----------------------------------------------------------------------
int HardwareSkinning::getExecutionOrder() const
{
return FFP_TRANSFORM;
}
//-----------------------------------------------------------------------
void HardwareSkinning::setHardwareSkinningParam(ushort boneCount, ushort weightCount, SkinningType skinningType, bool correctAntipodalityHandling, bool scalingShearingSupport)
{
mSkinningType = skinningType;
if(skinningType == ST_DUAL_QUATERNION)
{
if(mDualQuat.isNull())
{
mDualQuat.bind(OGRE_NEW DualQuaternionSkinning);
}
mActiveTechnique = mDualQuat;
}
else //if(skinningType == ST_LINEAR)
{
if(mLinear.isNull())
{
mLinear.bind(OGRE_NEW LinearSkinning);
}
mActiveTechnique = mLinear;
}
mActiveTechnique->setHardwareSkinningParam(boneCount, weightCount, correctAntipodalityHandling, scalingShearingSupport);
}
//-----------------------------------------------------------------------
ushort HardwareSkinning::getBoneCount()
{
assert(!mActiveTechnique.isNull());
return mActiveTechnique->getBoneCount();
}
//-----------------------------------------------------------------------
ushort HardwareSkinning::getWeightCount()
{
assert(!mActiveTechnique.isNull());
return mActiveTechnique->getWeightCount();
}
//-----------------------------------------------------------------------
SkinningType HardwareSkinning::getSkinningType()
{
assert(!mActiveTechnique.isNull());
return mSkinningType;
}
//-----------------------------------------------------------------------
bool HardwareSkinning::hasCorrectAntipodalityHandling()
{
assert(!mActiveTechnique.isNull());
return mActiveTechnique->hasCorrectAntipodalityHandling();
}
//-----------------------------------------------------------------------
bool HardwareSkinning::hasScalingShearingSupport()
{
assert(!mActiveTechnique.isNull());
return mActiveTechnique->hasScalingShearingSupport();
}
//-----------------------------------------------------------------------
void HardwareSkinning::copyFrom(const SubRenderState& rhs)
{
const HardwareSkinning& hardSkin = static_cast<const HardwareSkinning&>(rhs);
mDualQuat = hardSkin.mDualQuat;
mLinear = hardSkin.mLinear;
mActiveTechnique = hardSkin.mActiveTechnique;
mCreator = hardSkin.mCreator;
mSkinningType = hardSkin.mSkinningType;
}
//-----------------------------------------------------------------------
void operator<<(std::ostream& o, const HardwareSkinning::SkinningData& data)
{
o << data.isValid;
o << data.maxBoneCount;
o << data.maxWeightCount;
o << data.skinningType;
o << data.correctAntipodalityHandling;
o << data.scalingShearingSupport;
}
//-----------------------------------------------------------------------
bool HardwareSkinning::preAddToRenderState(const RenderState* renderState, Pass* srcPass, Pass* dstPass)
{
bool isValid = true;
Technique* pFirstTech = srcPass->getParent()->getParent()->getTechnique(0);
const Any& hsAny = pFirstTech->getUserObjectBindings().getUserAny(HS_DATA_BIND_NAME);
if (hsAny.isEmpty() == false)
{
HardwareSkinning::SkinningData pData =
(any_cast<HardwareSkinning::SkinningData>(hsAny));
isValid = pData.isValid;
//If the skinning data is being passed through the material, we need to create an instance of the appropriate
//skinning type and set its parameters here
setHardwareSkinningParam(pData.maxBoneCount, pData.maxWeightCount, pData.skinningType,
pData.correctAntipodalityHandling, pData.scalingShearingSupport);
}
//If there is no associated technique, default to linear skinning as a pass-through
if(mActiveTechnique.isNull())
{
setHardwareSkinningParam(0, 0, ST_LINEAR, false, false);
}
int boneCount = mActiveTechnique->getBoneCount();
int weightCount = mActiveTechnique->getWeightCount();
bool doBoneCalculations = isValid &&
(boneCount != 0) && (boneCount <= 256) &&
(weightCount != 0) && (weightCount <= 4) &&
((mCreator == NULL) || (boneCount <= mCreator->getMaxCalculableBoneCount()));
mActiveTechnique->setDoBoneCalculations(doBoneCalculations);
if ((doBoneCalculations) && (mCreator))
{
//update the receiver and caster materials
if (dstPass->getParent()->getShadowCasterMaterial().isNull())
{
dstPass->getParent()->setShadowCasterMaterial(
mCreator->getCustomShadowCasterMaterial(mSkinningType, weightCount - 1));
}
if (dstPass->getParent()->getShadowReceiverMaterial().isNull())
{
dstPass->getParent()->setShadowReceiverMaterial(
mCreator->getCustomShadowReceiverMaterial(mSkinningType, weightCount - 1));
}
}
return true;
}
//-----------------------------------------------------------------------
bool HardwareSkinning::resolveParameters(ProgramSet* programSet)
{
assert(!mActiveTechnique.isNull());
return mActiveTechnique->resolveParameters(programSet);
}
//-----------------------------------------------------------------------
bool HardwareSkinning::resolveDependencies(ProgramSet* programSet)
{
assert(!mActiveTechnique.isNull());
return mActiveTechnique->resolveDependencies(programSet);
}
//-----------------------------------------------------------------------
bool HardwareSkinning::addFunctionInvocations(ProgramSet* programSet)
{
assert(!mActiveTechnique.isNull());
return mActiveTechnique->addFunctionInvocations(programSet);
}
//-----------------------------------------------------------------------
HardwareSkinningFactory::HardwareSkinningFactory() :
mMaxCalculableBoneCount(70)<|fim▁hole|>//-----------------------------------------------------------------------
const String& HardwareSkinningFactory::getType() const
{
return HardwareSkinning::Type;
}
//-----------------------------------------------------------------------
SubRenderState* HardwareSkinningFactory::createInstance(ScriptCompiler* compiler, PropertyAbstractNode* prop, Pass* pass, SGScriptTranslator* translator)
{
if (prop->name == "hardware_skinning")
{
bool hasError = false;
uint32 boneCount = 0;
uint32 weightCount = 0;
String skinningType = "";
SkinningType skinType = ST_LINEAR;
bool correctAntipodalityHandling = false;
bool scalingShearingSupport = false;
if(prop->values.size() >= 2)
{
AbstractNodeList::iterator it = prop->values.begin();
if(false == SGScriptTranslator::getUInt(*it, &boneCount))
hasError = true;
++it;
if(false == SGScriptTranslator::getUInt(*it, &weightCount))
hasError = true;
if(prop->values.size() >= 5)
{
++it;
SGScriptTranslator::getString(*it, &skinningType);
++it;
SGScriptTranslator::getBoolean(*it, &correctAntipodalityHandling);
++it;
SGScriptTranslator::getBoolean(*it, &scalingShearingSupport);
}
//If the skinningType is not specified or is specified incorrectly, default to linear skinning.
if(skinningType == "dual_quaternion")
{
skinType = ST_DUAL_QUATERNION;
}
else
{
skinType = ST_LINEAR;
}
}
if (hasError == true)
{
compiler->addError(ScriptCompiler::CE_INVALIDPARAMETERS, prop->file, prop->line, "Expected the format: hardware_skinning <bone count> <weight count> [skinning type] [correct antipodality handling] [scaling/shearing support]");
return NULL;
}
else
{
//create and update the hardware skinning sub render state
SubRenderState* subRenderState = createOrRetrieveInstance(translator);
HardwareSkinning* hardSkinSrs = static_cast<HardwareSkinning*>(subRenderState);
hardSkinSrs->setHardwareSkinningParam(boneCount, weightCount, skinType, correctAntipodalityHandling, scalingShearingSupport);
return subRenderState;
}
}
return NULL;
}
//-----------------------------------------------------------------------
void HardwareSkinningFactory::writeInstance(MaterialSerializer* ser, SubRenderState* subRenderState,
Pass* srcPass, Pass* dstPass)
{
ser->writeAttribute(4, "hardware_skinning");
HardwareSkinning* hardSkinSrs = static_cast<HardwareSkinning*>(subRenderState);
ser->writeValue(StringConverter::toString(hardSkinSrs->getBoneCount()));
ser->writeValue(StringConverter::toString(hardSkinSrs->getWeightCount()));
//Correct antipodality handling and scaling and shearing support are only really valid for dual quaternion skinning
if(hardSkinSrs->getSkinningType() == ST_DUAL_QUATERNION)
{
ser->writeValue("dual_quaternion");
ser->writeValue(StringConverter::toString(hardSkinSrs->hasCorrectAntipodalityHandling()));
ser->writeValue(StringConverter::toString(hardSkinSrs->hasScalingShearingSupport()));
}
}
//-----------------------------------------------------------------------
SubRenderState* HardwareSkinningFactory::createInstanceImpl()
{
HardwareSkinning* pSkin = OGRE_NEW HardwareSkinning;
pSkin->_setCreator(this);
return pSkin;
}
//-----------------------------------------------------------------------
void HardwareSkinningFactory::setCustomShadowCasterMaterials(const SkinningType skinningType, const MaterialPtr& caster1Weight, const MaterialPtr& caster2Weight,
const MaterialPtr& caster3Weight, const MaterialPtr& caster4Weight)
{
if(skinningType == ST_DUAL_QUATERNION)
{
mCustomShadowCasterMaterialsDualQuaternion[0] = caster1Weight;
mCustomShadowCasterMaterialsDualQuaternion[1] = caster2Weight;
mCustomShadowCasterMaterialsDualQuaternion[2] = caster3Weight;
mCustomShadowCasterMaterialsDualQuaternion[3] = caster4Weight;
}
else //if(skinningType == ST_LINEAR)
{
mCustomShadowCasterMaterialsLinear[0] = caster1Weight;
mCustomShadowCasterMaterialsLinear[1] = caster2Weight;
mCustomShadowCasterMaterialsLinear[2] = caster3Weight;
mCustomShadowCasterMaterialsLinear[3] = caster4Weight;
}
}
//-----------------------------------------------------------------------
void HardwareSkinningFactory::setCustomShadowReceiverMaterials(const SkinningType skinningType, const MaterialPtr& receiver1Weight, const MaterialPtr& receiver2Weight,
const MaterialPtr& receiver3Weight, const MaterialPtr& receiver4Weight)
{
if(skinningType == ST_DUAL_QUATERNION)
{
mCustomShadowReceiverMaterialsDualQuaternion[0] = receiver1Weight;
mCustomShadowReceiverMaterialsDualQuaternion[1] = receiver2Weight;
mCustomShadowReceiverMaterialsDualQuaternion[2] = receiver3Weight;
mCustomShadowReceiverMaterialsDualQuaternion[3] = receiver4Weight;
}
else //if(skinningType == ST_LINEAR)
{
mCustomShadowReceiverMaterialsLinear[0] = receiver1Weight;
mCustomShadowReceiverMaterialsLinear[1] = receiver2Weight;
mCustomShadowReceiverMaterialsLinear[2] = receiver3Weight;
mCustomShadowReceiverMaterialsLinear[3] = receiver4Weight;
}
}
//-----------------------------------------------------------------------
const MaterialPtr& HardwareSkinningFactory::getCustomShadowCasterMaterial(const SkinningType skinningType, ushort index) const
{
assert(index < HS_MAX_WEIGHT_COUNT);
if(skinningType == ST_DUAL_QUATERNION)
{
return mCustomShadowCasterMaterialsDualQuaternion[index];
}
else //if(skinningType = ST_LINEAR)
{
return mCustomShadowCasterMaterialsLinear[index];
}
}
//-----------------------------------------------------------------------
const MaterialPtr& HardwareSkinningFactory::getCustomShadowReceiverMaterial(const SkinningType skinningType, ushort index) const
{
assert(index < HS_MAX_WEIGHT_COUNT);
if(skinningType == ST_DUAL_QUATERNION)
{
return mCustomShadowReceiverMaterialsDualQuaternion[index];
}
else //if(skinningType == ST_LINEAR)
{
return mCustomShadowReceiverMaterialsLinear[index];
}
}
//-----------------------------------------------------------------------
void HardwareSkinningFactory::prepareEntityForSkinning(const Entity* pEntity, SkinningType skinningType,
bool correctAntidpodalityHandling, bool shearScale)
{
if (pEntity != NULL)
{
size_t lodLevels = pEntity->getNumManualLodLevels() + 1;
for(size_t indexLod = 0 ; indexLod < lodLevels ; ++indexLod)
{
const Entity* pCurEntity = pEntity;
if (indexLod > 0) pCurEntity = pEntity->getManualLodLevel(indexLod - 1);
unsigned int numSubEntities = pCurEntity->getNumSubEntities();
for(unsigned int indexSub = 0 ; indexSub < numSubEntities ; ++indexSub)
{
ushort boneCount = 0,weightCount = 0;
bool isValid = extractSkeletonData(pCurEntity, indexSub, boneCount, weightCount);
SubEntity* pSubEntity = pCurEntity->getSubEntity(indexSub);
const MaterialPtr& pMat = pSubEntity->getMaterial();
imprintSkeletonData(pMat, isValid, boneCount, weightCount, skinningType, correctAntidpodalityHandling, shearScale);
}
}
}
}
//-----------------------------------------------------------------------
bool HardwareSkinningFactory::extractSkeletonData(const Entity* pEntity, unsigned int subEntityIndex, ushort& boneCount, ushort& weightCount)
{
bool isValidData = false;
boneCount = 0;
weightCount = 0;
//Check if we have pose animation which the HS sub render state does not
//know how to handle
bool hasVertexAnim = pEntity->getMesh()->hasVertexAnimation();
//gather data on the skeleton
if (!hasVertexAnim && pEntity->hasSkeleton())
{
//get weights count
MeshPtr pMesh = pEntity->getMesh();
RenderOperation ro;
SubMesh* pSubMesh = pMesh->getSubMesh(subEntityIndex);
pSubMesh->_getRenderOperation(ro,0);
//get the largest bone assignment
boneCount = std::max<ushort>(pMesh->sharedBlendIndexToBoneIndexMap.size(), pSubMesh->blendIndexToBoneIndexMap.size());
//go over vertex deceleration
//check that they have blend indices and blend weights
const VertexElement* pDeclWeights = ro.vertexData->vertexDeclaration->findElementBySemantic(VES_BLEND_WEIGHTS,0);
const VertexElement* pDeclIndexes = ro.vertexData->vertexDeclaration->findElementBySemantic(VES_BLEND_INDICES,0);
if ((pDeclWeights != NULL) && (pDeclIndexes != NULL))
{
isValidData = true;
switch (pDeclWeights->getType())
{
case VET_FLOAT1: weightCount = 1; break;
case VET_FLOAT2: weightCount = 2; break;
case VET_FLOAT3: weightCount = 3; break;
case VET_FLOAT4: weightCount = 4; break;
default: isValidData = false;
}
}
}
return isValidData;
}
//-----------------------------------------------------------------------
bool HardwareSkinningFactory::imprintSkeletonData(const MaterialPtr& pMaterial, bool isVaild,
ushort boneCount, ushort weightCount, SkinningType skinningType, bool correctAntidpodalityHandling, bool scalingShearingSupport)
{
bool isUpdated = false;
if (pMaterial->getNumTechniques() > 0)
{
HardwareSkinning::SkinningData data;
//get the previous skinning data if available
UserObjectBindings& binding = pMaterial->getTechnique(0)->getUserObjectBindings();
const Any& hsAny = binding.getUserAny(HS_DATA_BIND_NAME);
if (hsAny.isEmpty() == false)
{
data = (any_cast<HardwareSkinning::SkinningData>(hsAny));
}
//check if we need to update the data
if (((data.isValid == true) && (isVaild == false)) ||
(data.maxBoneCount < boneCount) ||
(data.maxWeightCount < weightCount))
{
//update the data
isUpdated = true;
data.isValid &= isVaild;
data.maxBoneCount = std::max<ushort>(data.maxBoneCount, boneCount);
data.maxWeightCount = std::max<ushort>(data.maxWeightCount, weightCount);
data.skinningType = skinningType;
data.correctAntipodalityHandling = correctAntidpodalityHandling;
data.scalingShearingSupport = scalingShearingSupport;
//update the data in the material and invalidate it in the RTShader system
//do it will be regenerated
binding.setUserAny(HS_DATA_BIND_NAME, Any(data));
size_t schemeCount = ShaderGenerator::getSingleton().getRTShaderSchemeCount();
for(size_t i = 0 ; i < schemeCount ; ++i)
{
//invalidate the material so it will be recreated with the correct
//amount of bones and weights
const String& schemeName = ShaderGenerator::getSingleton().getRTShaderScheme(i);
ShaderGenerator::getSingleton().invalidateMaterial(
schemeName, pMaterial->getName(), pMaterial->getGroup());
}
}
}
return isUpdated;
}
}
}
#endif<|fim▁end|> | {
}
|
<|file_name|>how-to-snippet.component.ts<|end_file_name|><|fim▁begin|>import { Component, OnInit } from '@angular/core';
import { environment } from '../../../../environments/environment';
@Component({
selector: 'app-howto-snippets',
templateUrl: './how-to-snippet.component.html',<|fim▁hole|>export class HowToSnippetComponent implements OnInit {
environment = environment;
ngOnInit() {}
}<|fim▁end|> | styleUrls: ['./how-to-snippet.component.scss']
}) |
<|file_name|>test_properties.py<|end_file_name|><|fim▁begin|># coding: utf-8
"""
/properties/
/properties/:id/<|fim▁hole|>/properties/groups/:name/
"""
from django.test import TestCase
from django.core.urlresolvers import reverse
from rest_framework import status
from rest_framework.test import APITestCase
from ..models import Property, PropertyGroup
from ..defaults import PROPERTY_TEXT_FIELD
class TestPropertiesCRUD(APITestCase):
fixtures = ['erp_test/tests/fixtures/properties_crud.json',]
def test_property_list(self):
url = reverse('api:property-list')
response = self.client.get(url, format='json')
data = [{'id': obj.id, 'name': obj.name, 'title': obj.title,
'required': obj.required, 'position': obj.position,
'type': obj.type, 'unit': obj.unit
} for obj in Property.objects.all()]
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data, data)
def test_property_create(self):
url = reverse('api:property-list')
data = {'name': 'test', 'title': 'Test', 'required': False,
'position': 999, 'type': PROPERTY_TEXT_FIELD}
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
def test_server_update(self):
url = reverse('api:property-detail', args=[1])
data = {'name': 'new server'}
response = self.client.patch(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
def test_server_delete(self):
url = reverse('api:property-detail', args=[1])
response = self.client.delete(url)
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
class TestPropertyGroupCRUD(APITestCase):
fixtures = ['erp_test/tests/fixtures/property_groups.json',]
def test_property_group_list(self):
url = reverse('api:property-group-list')
data = [{'id': obj.id, 'name': obj.name}
for obj in PropertyGroup.objects.all()]
response = self.client.get(url, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data, data)
def test_property_group_detail_by_pk(self):
url = reverse('api:property-group-detail', args=[2])
data = {'id': 2, 'name': 'cpu',
'properties': [
{'id': 2, 'name': 'cpu.socket',
'title': 'CPU Socket', 'required': True,
'position': 2, 'type': 3, 'unit': ''},
]}
response = self.client.get(url, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data, data)
def test_property_group_detail_by_name(self):
url = reverse('api:property-group-detail-by_name', args=['cpu'])
data = {'id': 2, 'name': 'cpu',
'properties': [
{'id': 2, 'name': 'cpu.socket',
'title': 'CPU Socket', 'required': True,
'position': 2, 'type': 3, 'unit': ''},
]}
response = self.client.get(url, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data, data)<|fim▁end|> | /properties/groups/
/properties/groups/:id/ |
<|file_name|>constants.py<|end_file_name|><|fim▁begin|>"""
Constants specific to the SQL storage portion of the ORM.
"""
from collections import namedtuple
import re
# Valid query types (a set is used for speedy lookups). These are (currently)
# considered SQL-specific; other storage systems may choose to use different
# lookup types.
QUERY_TERMS = {
'exact', 'iexact', 'contains', 'icontains', 'gt', 'gte', 'lt', 'lte', 'in',
'startswith', 'istartswith', 'endswith', 'iendswith', 'range', 'year',
'month', 'day', 'week_day', 'hour', 'minute', 'second', 'isnull', 'search',
'regex', 'iregex',
}
# Size of each "chunk" for get_iterator calls.
# Larger values are slightly faster at the expense of more storage space.
GET_ITERATOR_CHUNK_SIZE = 100
# Namedtuples for sql.* internal use.
# Join lists (indexes into the tuples that are values in the alias_map<|fim▁hole|>JoinInfo = namedtuple('JoinInfo',
'table_name rhs_alias join_type lhs_alias '
'join_cols nullable join_field')
# Pairs of column clauses to select, and (possibly None) field for the clause.
SelectInfo = namedtuple('SelectInfo', 'col field')
# How many results to expect from a cursor.execute call
MULTI = 'multi'
SINGLE = 'single'
CURSOR = 'cursor'
NO_RESULTS = 'no results'
ORDER_PATTERN = re.compile(r'\?|[-+]?[.\w]+$')
ORDER_DIR = {
'ASC': ('ASC', 'DESC'),
'DESC': ('DESC', 'ASC'),
}<|fim▁end|> | # dictionary in the Query class). |
<|file_name|>always_update.py<|end_file_name|><|fim▁begin|># coding=utf-8
from __future__ import print_function
"""
Place in ~/.octoprint/plugins & restart server to test:
* python_checker and python_updater mechanism
* demotion of pip and python setup.py clean output that
gets written to stderr but isn't as severe as that would
look
Plugin will always demand to update itself, multiple
consecutive runs are not a problem.
"""
import time
NAME = "Always Update"
OLD_VERSION = "1.0.0"
NEW_VERSION = "2.0.0"
class Foo(object):
def get_latest(self, target, check, full_data=None):
information = dict(local=dict(name=OLD_VERSION, value=OLD_VERSION),<|fim▁hole|> remote=dict(name=NEW_VERSION, value=NEW_VERSION))
current = False
return information, current
def can_perform_update(self, target, check):
return True
def perform_update(self, target, check, target_version, log_cb=None):
if not callable(log_cb):
import sys
def log_cb(lines, prefix=None, stream=None, strip=True):
if stream == "stdout":
f = sys.stdout
elif stream == "stderr":
f = sys.stderr
else:
f = None
for line in lines:
print(line, file=f)
log_cb(["Updating Always Update..."])
time.sleep(1)
log_cb(["running clean",
"recursively removing *.pyc from 'src'"],
stream="stdout")
log_cb(["'build/lib' does not exist -- can't clean it",
"'build/bdist.win32' does not exist -- can't clean it",
"'build/scripts-2.7' does not exist -- can't clean it"],
stream="stderr")
log_cb(["removing 'Development\OctoPrint\OctoPrint\src\octoprint_setuptools\__init__.pyc'"],
stream="stdout")
time.sleep(1)
log_cb(["This should be red"],
stream="stderr")
log_cb(["You are using pip version 7.1.2, however version 9.0.1 is available.",
"You should consider upgrading via the 'python -m pip install --upgrade pip' command."],
stream="stderr")
time.sleep(3)
log_cb(["Done!"])
def get_update_information():
foo = Foo()
return dict(
always_update=dict(
displayName=NAME,
displayVersion=OLD_VERSION,
type="python_checker",
python_checker=foo,
python_updater=foo
)
)
__plugin_name__ = NAME
__plugin_hooks__ = {
"octoprint.plugin.softwareupdate.check_config": get_update_information,
}<|fim▁end|> | |
<|file_name|>test_activity_log.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Copyright 2010-2012 Asidev s.r.l.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from collections import namedtuple
import os
import shutil
import stat
import tempfile
import unittest
from aybu.manager.activity_log import ActivityLog
from aybu.manager.activity_log.fs import (mkdir,
create,
copy,
mv,
rm,
rmdir,
rmtree)
from aybu.manager.activity_log.exc import TransactionError
from aybu.manager.activity_log.template import render
class ActivityLogTests(unittest.TestCase):
def setUp(self):
self.tempdir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.tempdir)
def test_create(self):
al = ActivityLog()
# test rollback
file_= os.path.join(self.tempdir, 'test.txt')
al.add(create, file_)
self.assertTrue(os.path.exists(file_))
al.rollback()
self.assertFalse(os.path.exists(file_))
# test successfull create
al.add(create, file_)
al.commit()
self.assertTrue(os.path.exists(file_))
# test unsuccessfull create
with self.assertRaises(OSError):
al.add(create, file_)
self.assertTrue(os.path.exists(file_))
def test_transaction_status(self):
al = ActivityLog(autobegin=False)
with self.assertRaises(TransactionError):<|fim▁hole|>
al.begin()
al.commit()
with self.assertRaises(TransactionError):
al.commit()
def test_transaction(self):
al = ActivityLog()
dir_ = os.path.join(self.tempdir, 'test')
join = os.path.join
def dostuff():
al.add(mkdir, dir_)
al.add(create, join(dir_, 'testfile.txt'), content="Test")
al.add(copy, join(dir_, 'testfile.txt'), join(dir_, 'test2.txt'))
dostuff()
al.rollback()
self.assertFalse(os.path.exists(join(dir_, 'test2.txt')))
self.assertFalse(os.path.exists(join(dir_, 'testfile.txt')))
self.assertFalse(os.path.exists(dir_))
dostuff()
al.commit()
self.assertTrue(os.path.exists(dir_))
self.assertTrue(os.path.exists(join(dir_, 'testfile.txt')))
self.assertTrue(os.path.exists(join(dir_, 'test2.txt')))
def test_failed_rollback(self):
al = ActivityLog()
dir_ = os.path.join(self.tempdir, 'test')
inner_dir = os.path.join(dir_, 'inner')
al.add(mkdir, dir_)
al.add(mkdir, inner_dir)
os.chmod(dir_, stat.S_IRUSR|stat.S_IXUSR)
with self.assertRaises(OSError):
al.rollback()
self.assertTrue(os.path.exists(dir_))
self.assertTrue(os.path.exists(inner_dir))
os.chmod(dir_, stat.S_IRWXU | stat.S_IRWXG)
def test_error_on_exists(self):
al = ActivityLog()
dir_ = os.path.join(self.tempdir, 'test')
al.add(mkdir, dir_)
al.commit()
al.add(mkdir, dir_, error_on_exists=False)
al.rollback()
self.assertTrue(os.path.exists(dir_))
def test_render(self):
al = ActivityLog()
instance = namedtuple('Instance', ['paths', 'environment'])(
paths=namedtuple('Paths', ['pyramid_config', 'alembic_config'])(
pyramid_config='MYDUMMYCONFIG',
alembic_config='MYDUMMYCONFIG'
),
environment= namedtuple('Environment', ['settings',
'smtp_config',
'uwsgi_config',
'os_config'])(
smtp_config=None,
uwsgi_config=None,
os_config=None,
settings=None
)
)
template_name = 'main.py.mako'
target = os.path.join(self.tempdir, 'main.py')
al.add(render, template_name, target, instance=instance)
self.assertTrue(os.path.exists(target))
with open(target) as f:
self.assertIn('MYDUMMYCONFIG', f.read())
al.rollback()
self.assertFalse(os.path.exists(target))
al.add(render, template_name, target, deferred=True, instance=instance)
self.assertFalse(os.path.exists(target))
al.commit()
self.assertTrue(os.path.exists(target))
def test_delete(self):
al = ActivityLog()
testfile = os.path.join(self.tempdir, 'test.txt')
with self.assertRaises(OSError):
al.add(rm, testfile)
al.add(rm, testfile, error_on_not_exists=False)
al.commit()
with open(testfile, "w") as f:
f.write("###")
al.add(rm, testfile)
self.assertFalse(os.path.exists(testfile))
al.rollback()
self.assertTrue(os.path.exists(testfile))
al.add(rm, testfile)
self.assertFalse(os.path.exists(testfile))
al.commit()
self.assertFalse(os.path.exists(testfile))
testdir = os.path.join(self.tempdir, 'test')
al.add(mkdir, testdir)
al.commit()
# test rmdir
al.add(rmdir, testdir)
self.assertFalse(os.path.exists(testdir))
al.rollback()
self.assertTrue(os.path.exists(testdir))
al.add(rmdir, testdir)
al.commit()
self.assertFalse(os.path.exists(testdir))
# test rmtree
al.add(mkdir, testdir)
inner = os.path.join(testdir, 'inner')
al.add(mkdir, inner)
al.commit()
al.add(rmtree, testdir)
self.assertFalse(os.path.exists(testdir))
al.rollback()
self.assertTrue(os.path.exists(testdir))
al.add(rmtree, testdir)
al.commit()
self.assertFalse(os.path.exists(testdir))
def test_mv(self):
al = ActivityLog()
source = os.path.join(self.tempdir, "source")
destination = os.path.join(self.tempdir, "destination")
os.mkdir(source)
os.mkdir(destination)
with self.assertRaises(OSError):
al.add(mv, source, destination)
shutil.rmtree(destination)
al.add(mv, source, destination)
self.assertFalse(os.path.exists(source))
self.assertTrue(os.path.exists(destination))
al.rollback()
self.assertTrue(os.path.exists(source))
self.assertFalse(os.path.exists(destination))
al.add(mv, source, destination)
al.commit()
self.assertFalse(os.path.exists(source))
self.assertTrue(os.path.exists(destination))<|fim▁end|> | al.commit()
with self.assertRaises(TransactionError):
al.rollback() |
<|file_name|>createBucketWindow.js<|end_file_name|><|fim▁begin|>/*
* File: app/view/createBucketWindow.js
*
* This file was generated by Sencha Architect version 3.0.4.
* http://www.sencha.com/products/architect/
*
* This file requires use of the Ext JS 4.2.x library, under independent license.
* License of Sencha Architect does not include license for Ext JS 4.2.x. For more
* details see http://www.sencha.com/license or contact [email protected].
*
* This file will be auto-generated each and everytime you save your project.
*
* Do NOT hand edit this file.
*/
Ext.define('MyApp.view.createBucketWindow', {
extend: 'Ext.window.Window',
alias: 'widget.createBucketWindow',
requires: [
'Ext.form.Panel',
'Ext.form.FieldSet',
'Ext.form.field.Text',
'Ext.toolbar.Toolbar',
'Ext.button.Button'
],
height: 172,
width: 400,
resizable: false,
layout: 'border',
title: 'Create Bucket',
modal: true,
initComponent: function() {
var me = this;
Ext.applyIf(me, {
items: [
{
xtype: 'panel',
region: 'center',
id: 'createBucketTopPanel',
itemId: 'createBucketTopPanel',
items: [
{
xtype: 'form',
height: 156,
id: 'createBucketFormPanel',
itemId: 'createBucketFormPanel',
bodyPadding: 10,
items: [
{
xtype: 'fieldset',
padding: 10,
title: 'Bucket Data ',
items: [
{
xtype: 'textfield',
anchor: '100%',
id: 'createBucketName',
itemId: 'createBucketName',
fieldLabel: 'Name',
labelAlign: 'right',
labelWidth: 54,
name: 'bucketName',
allowBlank: false,
allowOnlyWhitespace: false,
enforceMaxLength: true,
maskRe: /[a-z0-9\-]/,<|fim▁hole|> maxLength: 32,
regex: /[a-z0-9\-]/
}
]
}
]
}
],
dockedItems: [
{
xtype: 'toolbar',
dock: 'bottom',
id: 'createBucketToolbar',
itemId: 'createBucketToolbar',
layout: {
type: 'hbox',
pack: 'center'
},
items: [
{
xtype: 'button',
handler: function(button, e) {
objectConstants.me.createObjectBucket();
},
padding: '2 20 2 20',
text: 'Ok'
},
{
xtype: 'button',
handler: function(button, e) {
var myForm = Ext.getCmp("createBucketFormPanel");
myForm.getForm().findField("createBucketName").setValue('');
},
padding: '2 12 2 12',
text: 'Clear'
}
]
}
]
}
]
});
me.callParent(arguments);
}
});<|fim▁end|> | |
<|file_name|>HttpStatusCode.java<|end_file_name|><|fim▁begin|>/***************************************************************************
Copyright (c) 2016, EPAM SYSTEMS INC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
****************************************************************************/
package com.epam.dlab.automation.http;
<|fim▁hole|> public static final int UNAUTHORIZED = 401;
public static final int ACCEPTED = 202;
public static final int NOT_FOUND = 404;
private HttpStatusCode() {
}
}<|fim▁end|> | public class HttpStatusCode {
public static final int OK = 200; |
<|file_name|>commands.py<|end_file_name|><|fim▁begin|>import os
from gettext import gettext as _
from pulp.bindings.exceptions import NotFoundException
from pulp.client.arg_utils import convert_boolean_arguments
from pulp.client.extensions.decorator import priority
from pulp.client.extensions.extensions import PulpCliCommand, PulpCliOption
from pulp.client.commands.polling import PollingCommand
from pulp.client.commands.consumer.query import ConsumerListCommand
from pulp.client.commands.options import OPTION_REPO_ID, OPTION_CONSUMER_ID
from pulp.client.commands.repo.cudl import ListRepositoriesCommand
from pulp_node import constants
from pulp_node.extension import (missing_resources, node_activated, repository_enabled,
ensure_node_section)
from pulp_node.extensions.admin import sync_schedules
from pulp_node.extensions.admin.options import (NODE_ID_OPTION, MAX_BANDWIDTH_OPTION,
MAX_CONCURRENCY_OPTION)
from pulp_node.extensions.admin.rendering import ProgressTracker, UpdateRenderer
NODE = _('Node')
CONSUMER = _('Consumer')
REPOSITORY = _('Repository')
REPO_NAME = 'repo'
ACTIVATE_NAME = 'activate'
DEACTIVATE_NAME = 'deactivate'
ENABLE_NAME = 'enable'
DISABLE_NAME = 'disable'
SYNC_NAME = 'sync'
PUBLISH_NAME = 'publish'
BIND_NAME = 'bind'
UNBIND_NAME = 'unbind'
UPDATE_NAME = 'run'
SCHEDULES_NAME = 'schedules'
NODE_LIST_DESC = _('list child nodes')
REPO_LIST_DESC = _('list node enabled repositories')
ACTIVATE_DESC = _('activate a consumer as a child node')
DEACTIVATE_DESC = _('deactivate a child node')
BIND_DESC = _('bind a child node to a repository')
UNBIND_DESC = _('removes the binding between a child node and a repository')
UPDATE_DESC = _('triggers an immediate synchronization of a child node')
ENABLE_DESC = _('enables binding to a repository by a child node')
DISABLE_DESC = _('disables binding to a repository by a child node')
REPO_DESC = _('repository related commands')
AUTO_PUBLISH_DESC = _('if "true", the nodes information will be automatically published each '
'time the repository is synchronized; defaults to "true"')
SYNC_DESC = _('child node synchronization commands')
PUBLISH_DESC = _('publishing commands')
STRATEGY_DESC = _('synchronization strategy (mirror|additive) default is additive')
SCHEDULES_DESC = _('manage node sync schedules')
NODE_LIST_TITLE = _('Child Nodes')
REPO_LIST_TITLE = _('Enabled Repositories')
AUTO_PUBLISH_OPTION = PulpCliOption('--auto-publish', AUTO_PUBLISH_DESC, required=False,
default='true')
STRATEGY_OPTION = \
PulpCliOption('--strategy', STRATEGY_DESC, required=False, default=constants.ADDITIVE_STRATEGY)
# --- messages ---------------------------------------------------------------
REPO_ENABLED = _('Repository enabled.')
REPO_DISABLED = _('Repository disabled.')
PUBLISH_SUCCEEDED = _('Publish succeeded.')
PUBLISH_FAILED = _('Publish failed. See: pulp log for details.')
NODE_ACTIVATED = _('Consumer activated as child node.')
NODE_DEACTIVATED = _('Child node deactivated.')
BIND_SUCCEEDED = _('Node bind succeeded.')
UNBIND_SUCCEEDED = _('Node unbind succeeded')
ALREADY_ENABLED = _('Repository already enabled. Nothing done.')
FAILED_NOT_ENABLED = _('Repository not enabled. See: the \'node repo enable\' command.')
NOT_BOUND_NOTHING_DONE = _('Node not bound to repository. No action performed.')
NOT_ACTIVATED_ERROR = _(
'%(t)s [ %(id)s ] not activated as a node. See: the \'node activate\' command.')
NOT_ACTIVATED_NOTHING_DONE = _('%(t)s is not activated as a node. No action performed.')
NOT_ENABLED_NOTHING_DONE = _('%(t)s not enabled. No action performed.')
STRATEGY_NOT_SUPPORTED = _('Strategy [ %(n)s ] not supported. Must be one of: %(s)s')
RESOURCE_MISSING_ERROR = _('%(t)s [ %(id)s ] not found on the server.')
ALREADY_ACTIVATED_NOTHING_DONE = _('%(n)s already activated as child node. No action performed.')
BIND_WARNING = \
_('Note: Repository [ %(r)s ] will be included in node synchronization.')
UNBIND_WARNING = \
_('Warning: Repository [ %(r)s ] will NOT be included in node synchronization')
ENABLE_WARNING = \
_('Note: Repository [ %(r)s ] will not be available for node synchronization until published.'
' See: the \'node repo publish\' command.')
AUTO_PUBLISH_WARNING = \
_('Warning: enabling with auto-publish may degrade repository synchronization performance.')
# --- extension loading ------------------------------------------------------
@priority()
def initialize(context):
"""
:type context: pulp.client.extensions.core.ClientContext
"""
node_section = ensure_node_section(context.cli)
node_section.add_command(NodeListCommand(context))
node_section.add_command(NodeActivateCommand(context))
node_section.add_command(NodeDeactivateCommand(context))
node_section.add_command(NodeBindCommand(context))
node_section.add_command(NodeUnbindCommand(context))
repo_section = node_section.create_subsection(REPO_NAME, REPO_DESC)
repo_section.add_command(NodeRepoEnableCommand(context))
repo_section.add_command(NodeRepoDisableCommand(context))
repo_section.add_command(NodeListRepositoriesCommand(context))
repo_section.add_command(NodeRepoPublishCommand(context))
sync_section = node_section.create_subsection(SYNC_NAME, SYNC_DESC)
sync_section.add_command(NodeUpdateCommand(context))
schedules_section = sync_section.create_subsection(SCHEDULES_NAME, SCHEDULES_DESC)
schedules_section.add_command(sync_schedules.NodeCreateScheduleCommand(context))
schedules_section.add_command(sync_schedules.NodeDeleteScheduleCommand(context))
schedules_section.add_command(sync_schedules.NodeUpdateScheduleCommand(context))
schedules_section.add_command(sync_schedules.NodeListScheduleCommand(context))
schedules_section.add_command(sync_schedules.NodeNextRunCommand(context))
# --- listing ----------------------------------------------------------------
class NodeListCommand(ConsumerListCommand):
STRATEGY_FIELD = 'update_strategy'
_ALL_FIELDS = ConsumerListCommand._ALL_FIELDS[0:-1] \
+ [STRATEGY_FIELD] + ConsumerListCommand._ALL_FIELDS[-1:]
def __init__(self, context):
super(NodeListCommand, self).__init__(context, description=NODE_LIST_DESC)
def get_title(self):
return NODE_LIST_TITLE
def get_consumer_list(self, kwargs):
nodes = []
for consumer in super(NodeListCommand, self).get_consumer_list(kwargs):
notes = consumer['notes']
if not notes.get(constants.NODE_NOTE_KEY):
continue
consumer[self.STRATEGY_FIELD] = \
notes.get(constants.STRATEGY_NOTE_KEY, constants.DEFAULT_STRATEGY)
nodes.append(consumer)
return nodes
def format_bindings(self, consumer):
formatted = {}
key = 'bindings'
for binding in consumer.get(key, []):
repo_id = binding['repo_id']
type_id = binding['type_id']
if type_id not in constants.ALL_DISTRIBUTORS:
# nodes only
continue
strategy = binding['binding_config'].get('strategy', constants.DEFAULT_STRATEGY)
repo_ids = formatted.get(strategy)
if repo_ids is None:
repo_ids = []
formatted[strategy] = repo_ids
repo_ids.append(repo_id)
consumer[key] = formatted
class NodeListRepositoriesCommand(ListRepositoriesCommand):
def __init__(self, context):
super(NodeListRepositoriesCommand, self).__init__(
context,
description=REPO_LIST_DESC,
repos_title=REPO_LIST_TITLE)
def get_repositories(self, query_params, **kwargs):
enabled = []
_super = super(NodeListRepositoriesCommand, self)
repositories = _super.get_repositories(query_params, **kwargs)
for repository in repositories:
repo_id = repository['id']
http = self.context.server.repo_distributor.distributors(repo_id)
for dist in http.response_body:
if dist['distributor_type_id'] in constants.ALL_DISTRIBUTORS:
enabled.append(repository)
return enabled
# --- publishing -------------------------------------------------------------
class NodeRepoPublishCommand(PollingCommand):
def __init__(self, context):
super(NodeRepoPublishCommand, self).__init__(PUBLISH_NAME, PUBLISH_DESC, self.run, context)
self.add_option(OPTION_REPO_ID)
def run(self, **kwargs):
repo_id = kwargs[OPTION_REPO_ID.keyword]
if not repository_enabled(self.context, repo_id):
msg = FAILED_NOT_ENABLED
self.context.prompt.render_success_message(msg)
return
try:
http = self.context.server.repo_actions.publish(repo_id, constants.HTTP_DISTRIBUTOR, {})
task = http.response_body
self.poll([task], kwargs)
except NotFoundException, e:
for _id, _type in missing_resources(e):
if _type == 'repo_id':
msg = RESOURCE_MISSING_ERROR % dict(t=REPOSITORY, id=_id)
self.context.prompt.render_failure_message(msg)
else:
raise
return os.EX_DATAERR
def succeeded(self, task):
self.context.prompt.render_success_message(PUBLISH_SUCCEEDED)
def failed(self, task):
self.context.prompt.render_failure_message(PUBLISH_FAILED)
# --- activation -------------------------------------------------------------
class NodeActivateCommand(PulpCliCommand):
def __init__(self, context):
super(NodeActivateCommand, self).__init__(ACTIVATE_NAME, ACTIVATE_DESC, self.run)
self.add_option(OPTION_CONSUMER_ID)
self.add_option(STRATEGY_OPTION)
self.context = context
def run(self, **kwargs):
consumer_id = kwargs[OPTION_CONSUMER_ID.keyword]
strategy = kwargs[STRATEGY_OPTION.keyword]
delta = {'notes': {constants.NODE_NOTE_KEY: True, constants.STRATEGY_NOTE_KEY: strategy}}
if node_activated(self.context, consumer_id):
msg = ALREADY_ACTIVATED_NOTHING_DONE % dict(n=CONSUMER)
self.context.prompt.render_success_message(msg)
return
if strategy not in constants.STRATEGIES:
msg = STRATEGY_NOT_SUPPORTED % dict(n=strategy, s=constants.STRATEGIES)
self.context.prompt.render_failure_message(msg)
return os.EX_DATAERR
try:
self.context.server.consumer.update(consumer_id, delta)
self.context.prompt.render_success_message(NODE_ACTIVATED)
except NotFoundException, e:
for _id, _type in missing_resources(e):
if _type == 'consumer':
msg = RESOURCE_MISSING_ERROR % dict(t=CONSUMER, id=_id)
self.context.prompt.render_failure_message(msg)
else:
raise
return os.EX_DATAERR
class NodeDeactivateCommand(PulpCliCommand):
def __init__(self, context):
super(NodeDeactivateCommand, self).__init__(DEACTIVATE_NAME, DEACTIVATE_DESC, self.run)
self.add_option(NODE_ID_OPTION)
self.context = context
def run(self, **kwargs):
consumer_id = kwargs[NODE_ID_OPTION.keyword]
delta = {'notes': {constants.NODE_NOTE_KEY: None, constants.STRATEGY_NOTE_KEY: None}}
if not node_activated(self.context, consumer_id):
msg = NOT_ACTIVATED_NOTHING_DONE % dict(t=CONSUMER)
self.context.prompt.render_success_message(msg)
return
try:
self.context.server.consumer.update(consumer_id, delta)
self.context.prompt.render_success_message(NODE_DEACTIVATED)
except NotFoundException, e:
for _id, _type in missing_resources(e):
if _type == 'consumer':
msg = RESOURCE_MISSING_ERROR % dict(t=CONSUMER, id=_id)
self.context.prompt.render_failure_message(msg)
else:
raise
return os.EX_DATAERR
# --- enable -----------------------------------------------------------------
class NodeRepoEnableCommand(PulpCliCommand):
def __init__(self, context):
super(NodeRepoEnableCommand, self).__init__(ENABLE_NAME, ENABLE_DESC, self.run)
self.add_option(OPTION_REPO_ID)
self.add_option(AUTO_PUBLISH_OPTION)
self.context = context
def run(self, **kwargs):
convert_boolean_arguments([AUTO_PUBLISH_OPTION.keyword], kwargs)
repo_id = kwargs[OPTION_REPO_ID.keyword]
auto_publish = kwargs[AUTO_PUBLISH_OPTION.keyword]
binding = self.context.server.repo_distributor
if repository_enabled(self.context, repo_id):
msg = ALREADY_ENABLED
self.context.prompt.render_success_message(msg)
return
try:
binding.create(
repo_id,
constants.HTTP_DISTRIBUTOR,
{},
auto_publish,
constants.HTTP_DISTRIBUTOR)
self.context.prompt.render_success_message(REPO_ENABLED)
self.context.prompt.render_warning_message(ENABLE_WARNING % dict(r=repo_id))
if auto_publish:
self.context.prompt.render_warning_message(AUTO_PUBLISH_WARNING)
except NotFoundException, e:
for _id, _type in missing_resources(e):
if _type == 'repository':
msg = RESOURCE_MISSING_ERROR % dict(t=REPOSITORY, id=_id)
self.context.prompt.render_failure_message(msg)
else:
raise
return os.EX_DATAERR
class NodeRepoDisableCommand(PulpCliCommand):
def __init__(self, context):
super(NodeRepoDisableCommand, self).__init__(DISABLE_NAME, DISABLE_DESC, self.run)
self.add_option(OPTION_REPO_ID)
self.context = context
def run(self, **kwargs):
repo_id = kwargs[OPTION_REPO_ID.keyword]
try:
self.context.server.repo_distributor.delete(repo_id, constants.HTTP_DISTRIBUTOR)
self.context.prompt.render_success_message(REPO_DISABLED)
except NotFoundException, e:
for _id, _type in missing_resources(e):
if _type == 'repository':
msg = RESOURCE_MISSING_ERROR % dict(t=REPOSITORY, id=_id)
self.context.prompt.render_failure_message(msg)
continue
if _type == 'distributor':
msg = NOT_ENABLED_NOTHING_DONE % dict(t=REPOSITORY)
self.context.prompt.render_success_message(msg)
continue
raise
return os.EX_DATAERR
class BindingCommand(PulpCliCommand):
def missing_resources(self, prompt, exception):
unhandled = []
for _id, _type in missing_resources(exception):
if _type == 'consumer_id':
msg = RESOURCE_MISSING_ERROR % dict(t=NODE, id=_id)
prompt.render_failure_message(msg)
continue
if _type == 'repo_id':
msg = RESOURCE_MISSING_ERROR % dict(t=REPOSITORY, id=_id)
prompt.render_failure_message(msg)
continue
unhandled.append((_id, _type))
return unhandled
class NodeBindCommand(BindingCommand):
def __init__(self, context):
super(NodeBindCommand, self).__init__(BIND_NAME, BIND_DESC, self.run)
self.add_option(OPTION_REPO_ID)
self.add_option(NODE_ID_OPTION)
self.add_option(STRATEGY_OPTION)
self.context = context
def run(self, **kwargs):
repo_id = kwargs[OPTION_REPO_ID.keyword]
node_id = kwargs[NODE_ID_OPTION.keyword]
dist_id = constants.HTTP_DISTRIBUTOR
strategy = kwargs[STRATEGY_OPTION.keyword]
binding_config = {constants.STRATEGY_KEYWORD: strategy}
if not node_activated(self.context, node_id):
msg = NOT_ACTIVATED_ERROR % dict(t=CONSUMER, id=node_id)
self.context.prompt.render_failure_message(msg)
return os.EX_USAGE
if strategy not in constants.STRATEGIES:
msg = STRATEGY_NOT_SUPPORTED % dict(n=strategy, s=constants.STRATEGIES)
self.context.prompt.render_failure_message(msg)
return os.EX_DATAERR
try:
self.context.server.bind.bind(
node_id,
repo_id,
dist_id,
notify_agent=False,
binding_config=binding_config)
self.context.prompt.render_success_message(BIND_SUCCEEDED)
warning = BIND_WARNING % dict(r=repo_id)
self.context.prompt.render_warning_message(warning)
except NotFoundException, e:
unhandled = self.missing_resources(self.context.prompt, e)
for _id, _type in unhandled:
if _type == 'distributor':
msg = FAILED_NOT_ENABLED
self.context.prompt.render_failure_message(msg)
else:
raise
return os.EX_DATAERR
class NodeUnbindCommand(BindingCommand):<|fim▁hole|> super(NodeUnbindCommand, self).__init__(UNBIND_NAME, UNBIND_DESC, self.run)
self.add_option(OPTION_REPO_ID)
self.add_option(NODE_ID_OPTION)
self.context = context
def run(self, **kwargs):
repo_id = kwargs[OPTION_REPO_ID.keyword]
node_id = kwargs[NODE_ID_OPTION.keyword]
dist_id = constants.HTTP_DISTRIBUTOR
try:
self.context.server.bind.unbind(node_id, repo_id, dist_id)
self.context.prompt.render_success_message(UNBIND_SUCCEEDED)
warning = UNBIND_WARNING % dict(r=repo_id)
self.context.prompt.render_warning_message(warning)
except NotFoundException, e:
unhandled = self.missing_resources(self.context.prompt, e)
for _id, _type in unhandled:
if _type == 'bind_id':
msg = NOT_BOUND_NOTHING_DONE
self.context.prompt.render_success_message(msg)
else:
raise
return os.EX_DATAERR
class NodeUpdateCommand(PollingCommand):
def __init__(self, context):
super(NodeUpdateCommand, self).__init__(UPDATE_NAME, UPDATE_DESC, self.run, context)
self.add_option(NODE_ID_OPTION)
self.add_option(MAX_CONCURRENCY_OPTION)
self.add_option(MAX_BANDWIDTH_OPTION)
self.tracker = ProgressTracker(self.context.prompt)
def run(self, **kwargs):
node_id = kwargs[NODE_ID_OPTION.keyword]
max_bandwidth = kwargs[MAX_BANDWIDTH_OPTION.keyword]
max_concurrency = kwargs[MAX_CONCURRENCY_OPTION.keyword]
units = [dict(type_id='node', unit_key=None)]
options = {
constants.MAX_DOWNLOAD_BANDWIDTH_KEYWORD: max_bandwidth,
constants.MAX_DOWNLOAD_CONCURRENCY_KEYWORD: max_concurrency,
}
if not node_activated(self.context, node_id):
msg = NOT_ACTIVATED_ERROR % dict(t=CONSUMER, id=node_id)
self.context.prompt.render_failure_message(msg)
return os.EX_USAGE
try:
http = self.context.server.consumer_content.update(node_id, units=units,
options=options)
task = http.response_body
self.poll([task], kwargs)
except NotFoundException, e:
for _id, _type in missing_resources(e):
if _type == 'consumer':
msg = RESOURCE_MISSING_ERROR % dict(t=NODE, id=_id)
self.context.prompt.render_failure_message(msg)
else:
raise
return os.EX_DATAERR
def progress(self, task, spinner):
self.tracker.display(task.progress_report)
def succeeded(self, task):
report = task.result['details'].values()[0]
r = UpdateRenderer(self.context.prompt, report)
r.render()<|fim▁end|> |
def __init__(self, context): |
<|file_name|>views.py<|end_file_name|><|fim▁begin|>import json
import os
from flask import request, g, render_template, make_response, jsonify, Response
from helpers.raw_endpoint import get_id, store_json_to_file
from helpers.groups import get_groups
from json_controller import JSONController
from main import app
from pymongo import MongoClient, errors
HERE = os.path.dirname(os.path.abspath(__file__))
# setup database connection
def connect_client():
"""Connects to Mongo client"""
try:
return MongoClient(app.config['DB_HOST'], int(app.config['DB_PORT']))
except errors.ConnectionFailure as e:
raise e
<|fim▁hole|> g.mongo_client = connect_client()
g.mongo_db = getattr(g.mongo_client, app.config['DB_NAME'])
g.groups_collection = g.mongo_db[os.environ.get('DB_GROUPS_COLLECTION')]
return g.mongo_db
@app.teardown_appcontext
def close_db(error):
"""Closes connection with Mongo client"""
if hasattr(g, 'mongo_client'):
g.mongo_client.close()
# Begin view routes
@app.route('/')
@app.route('/index/')
def index():
"""Landing page for SciNet"""
return render_template("index.html")
@app.route('/faq/')
def faq():
"""FAQ page for SciNet"""
return render_template("faq.html")
@app.route('/leaderboard/')
def leaderboard():
"""Leaderboard page for SciNet"""
get_db()
groups = get_groups(g.groups_collection)
return render_template("leaderboard.html", groups=groups)
@app.route('/ping', methods=['POST'])
def ping_endpoint():
"""API endpoint determines potential article hash exists in db
:return: status code 204 -- hash not present, continue submission
:return: status code 201 -- hash already exists, drop submission
"""
db = get_db()
target_hash = request.form.get('hash')
if db.raw.find({'hash': target_hash}).count():
return Response(status=201)
else:
return Response(status=204)
@app.route('/articles')
def ArticleEndpoint():
"""Eventual landing page for searching/retrieving articles"""
if request.method == 'GET':
return render_template("articles.html")
@app.route('/raw', methods=['POST'])
def raw_endpoint():
"""API endpoint for submitting raw article data
:return: status code 405 - invalid JSON or invalid request type
:return: status code 400 - unsupported content-type or invalid publisher
:return: status code 201 - successful submission
"""
# Ensure post's content-type is supported
if request.headers['content-type'] == 'application/json':
# Ensure data is a valid JSON
try:
user_submission = json.loads(request.data)
except ValueError:
return Response(status=405)
# generate UID for new entry
uid = get_id()
# store incoming JSON in raw storage
file_path = os.path.join(
HERE,
'raw_payloads',
str(uid)
)
store_json_to_file(user_submission, file_path)
# hand submission to controller and return Resposne
db = get_db()
controller_response = JSONController(user_submission, db=db, _id=uid).submit()
return controller_response
# User submitted an unsupported content-type
else:
return Response(status=400)
#@TODO: Implicit or Explicit group additions? Issue #51 comments on the issues page
#@TODO: Add form validation
@app.route('/requestnewgroup/', methods=['POST'])
def request_new_group():
# Grab submission form data and prepare email message
data = request.json
msg = "Someone has request that you add {group_name} to the leaderboard \
groups. The groups website is {group_website} and the submitter can \
be reached at {submitter_email}.".format(
group_name=data['new_group_name'],
group_website=data['new_group_website'],
submitter_email=data['submitter_email'])
return Response(status=200)
'''
try:
email(
subject="SciNet: A new group has been requested",
fro="[email protected]",
to='[email protected]',
msg=msg)
return Response(status=200)
except:
return Response(status=500)
'''
# Error handlers
@app.errorhandler(404)
def not_found(error):
return make_response(jsonify( { 'error': 'Page Not Found' } ), 404)
@app.errorhandler(405)
def method_not_allowed(error):
return make_response(jsonify( { 'error': 'Method Not Allowed' } ), 405)<|fim▁end|> | def get_db():
"""Connects to Mongo database"""
if not hasattr(g, 'mongo_client'): |
<|file_name|>test_sh_base.py<|end_file_name|><|fim▁begin|>import shesha.config as conf
simul_name = "bench_scao_sh_16x16_8pix"
# loop
p_loop = conf.Param_loop()
p_loop.set_niter(100)
p_loop.set_ittime(0.002) # =1/500
# geom
p_geom = conf.Param_geom()
p_geom.set_zenithangle(0.)
# tel
p_tel = conf.Param_tel()
p_tel.set_diam(4.0)
p_tel.set_cobs(0.12)
# atmos
p_atmos = conf.Param_atmos()
p_atmos.set_r0(0.16)
p_atmos.set_nscreens(1)
p_atmos.set_frac([1.0])
p_atmos.set_alt([0.0])
p_atmos.set_windspeed([20.0])
p_atmos.set_winddir([45.])
p_atmos.set_L0([1.e5])
# target
p_target = conf.Param_target()
p_targets = [p_target]
# p_target.set_ntargets(1)
p_target.set_xpos(0.)
p_target.set_ypos(0.)
p_target.set_Lambda(1.65)
p_target.set_mag(10.)
# wfs
p_wfs0 = conf.Param_wfs()
p_wfss = [p_wfs0]
p_wfs0.set_type("sh")
p_wfs0.set_nxsub(8)<|fim▁hole|>p_wfs0.set_pixsize(0.3)
p_wfs0.set_fracsub(0.8)
p_wfs0.set_xpos(0.)
p_wfs0.set_ypos(0.)
p_wfs0.set_Lambda(0.5)
p_wfs0.set_gsmag(8.)
p_wfs0.set_optthroughput(0.5)
p_wfs0.set_zerop(1.e11)
p_wfs0.set_noise(3.)
p_wfs0.set_atmos_seen(1)
# lgs parameters
# p_wfs0.set_gsalt(90*1.e3)
# p_wfs0.set_lltx(0)
# p_wfs0.set_llty(0)
# p_wfs0.set_laserpower(10)
# p_wfs0.set_lgsreturnperwatt(1.e3)
# p_wfs0.set_proftype("Exp")
# p_wfs0.set_beamsize(0.8)
# dm
p_dm0 = conf.Param_dm()
p_dm1 = conf.Param_dm()
p_dms = [p_dm0, p_dm1]
p_dm0.set_type("pzt")
nact = p_wfs0.nxsub + 1
p_dm0.set_nact(nact)
p_dm0.set_alt(0.)
p_dm0.set_thresh(0.3)
p_dm0.set_coupling(0.2)
p_dm0.set_unitpervolt(0.01)
p_dm0.set_push4imat(100.)
p_dm1.set_type("tt")
p_dm1.set_alt(0.)
p_dm1.set_unitpervolt(0.0005)
p_dm1.set_push4imat(10.)
# centroiders
p_centroider0 = conf.Param_centroider()
p_centroiders = [p_centroider0]
p_centroider0.set_nwfs(0)
p_centroider0.set_type("cog")
# p_centroider0.set_type("corr")
# p_centroider0.set_type_fct("model")
# controllers
p_controller0 = conf.Param_controller()
p_controllers = [p_controller0]
p_controller0.set_type("ls")
p_controller0.set_nwfs([0])
p_controller0.set_ndm([0, 1])
p_controller0.set_maxcond(1500.)
p_controller0.set_delay(1.)
p_controller0.set_gain(0.4)
p_controller0.set_modopti(0)
p_controller0.set_nrec(2048)
p_controller0.set_nmodes(216)
p_controller0.set_gmin(0.001)
p_controller0.set_gmax(0.5)
p_controller0.set_ngain(500)<|fim▁end|> | p_wfs0.set_npix(8) |
<|file_name|>bitcoin_fa_IR.ts<|end_file_name|><|fim▁begin|><?xml version="1.0" ?><!DOCTYPE TS><TS language="fa_IR" version="2.0">
<defaultcodec>UTF-8</defaultcodec>
<context>
<name>AboutDialog</name>
<message>
<location filename="../forms/aboutdialog.ui" line="+14"/>
<source>About Unicorncoin</source>
<translation>در مورد بیتکویین</translation>
</message>
<message>
<location line="+39"/>
<source><b>Unicorncoin</b> version</source>
<translation><b>Unicorncoin</b> version</translation>
</message>
<message>
<location line="+57"/>
<source>
This is experimental software.
Distributed under the MIT/X11 software license, see the accompanying file COPYING or http://www.opensource.org/licenses/mit-license.php.
This product includes software developed by the OpenSSL Project for use in the OpenSSL Toolkit (http://www.openssl.org/) and cryptographic software written by Eric Young ([email protected]) and UPnP software written by Thomas Bernard.</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../aboutdialog.cpp" line="+14"/>
<source>Copyright</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<source>The Litecoin developers</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>AddressBookPage</name>
<message>
<location filename="../forms/addressbookpage.ui" line="+14"/>
<source>Address Book</source>
<translation>دفترچه آدرس</translation>
</message>
<message>
<location line="+19"/>
<source>Double-click to edit address or label</source>
<translation>برای ویرایش آدرس/برچسب دوبار کلیک نمایید</translation>
</message>
<message>
<location line="+27"/>
<source>Create a new address</source>
<translation>یک آدرس جدید بسازید</translation>
</message>
<message>
<location line="+14"/>
<source>Copy the currently selected address to the system clipboard</source>
<translation>آدرس انتخاب شده را در کلیپ بوردِ سیستم کپی کنید</translation>
</message>
<message>
<location line="-11"/>
<source>&New Address</source>
<translation>و آدرس جدید</translation>
</message>
<message>
<location filename="../addressbookpage.cpp" line="+63"/>
<source>These are your Unicorncoin addresses for receiving payments. You may want to give a different one to each sender so you can keep track of who is paying you.</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../forms/addressbookpage.ui" line="+14"/>
<source>&Copy Address</source>
<translation>و کپی آدرس</translation>
</message>
<message>
<location line="+11"/>
<source>Show &QR Code</source>
<translation>نشان و کد QR</translation>
</message>
<message>
<location line="+11"/>
<source>Sign a message to prove you own a Unicorncoin address</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Sign &Message</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+25"/>
<source>Delete the currently selected address from the list</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+27"/>
<source>Export the data in the current tab to a file</source>
<translation>صدور داده نوار جاری به یک فایل</translation>
</message>
<message>
<location line="+3"/>
<source>&Export</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-44"/>
<source>Verify a message to ensure it was signed with a specified Unicorncoin address</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>&Verify Message</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+14"/>
<source>&Delete</source>
<translation>و حذف</translation>
</message>
<message>
<location filename="../addressbookpage.cpp" line="-5"/>
<source>These are your Unicorncoin addresses for sending payments. Always check the amount and the receiving address before sending coins.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+13"/>
<source>Copy &Label</source>
<translation>کپی و برچسب</translation>
</message>
<message>
<location line="+1"/>
<source>&Edit</source>
<translation>و ویرایش</translation>
</message>
<message>
<location line="+1"/>
<source>Send &Coins</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+260"/>
<source>Export Address Book Data</source>
<translation>انتقال اطلاعات دفترچه آدرس</translation>
</message>
<message>
<location line="+1"/>
<source>Comma separated file (*.csv)</source>
<translation>سی.اس.وی. (فایل جداگانه دستوری)</translation>
</message>
<message>
<location line="+13"/>
<source>Error exporting</source>
<translation>صدور پیام خطا</translation>
</message>
<message>
<location line="+0"/>
<source>Could not write to file %1.</source>
<translation>قابل کپی در فایل نیست %1</translation>
</message>
</context>
<context>
<name>AddressTableModel</name>
<message>
<location filename="../addresstablemodel.cpp" line="+144"/>
<source>Label</source>
<translation>برچسب</translation>
</message>
<message>
<location line="+0"/>
<source>Address</source>
<translation>آدرس</translation>
</message>
<message>
<location line="+36"/>
<source>(no label)</source>
<translation>(برچسب ندارد)</translation>
</message>
</context>
<context>
<name>AskPassphraseDialog</name>
<message>
<location filename="../forms/askpassphrasedialog.ui" line="+26"/>
<source>Passphrase Dialog</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+21"/>
<source>Enter passphrase</source>
<translation>رمز/پَس فرِیز را وارد کنید</translation>
</message>
<message>
<location line="+14"/>
<source>New passphrase</source>
<translation>رمز/پَس فرِیز جدید را وارد کنید</translation>
</message>
<message>
<location line="+14"/>
<source>Repeat new passphrase</source>
<translation>رمز/پَس فرِیز را دوباره وارد کنید</translation>
</message>
<message>
<location filename="../askpassphrasedialog.cpp" line="+33"/>
<source>Enter the new passphrase to the wallet.<br/>Please use a passphrase of <b>10 or more random characters</b>, or <b>eight or more words</b>.</source>
<translation>رمز/پَس فرِیز جدید را در wallet وارد کنید. برای انتخاب رمز/پَس فرِیز از 10 کاراکتر تصادفی یا بیشتر و یا هشت کلمه یا بیشتر استفاده کنید. </translation>
</message>
<message>
<location line="+1"/>
<source>Encrypt wallet</source>
<translation>wallet را رمزگذاری کنید</translation>
</message>
<message>
<location line="+3"/>
<source>This operation needs your wallet passphrase to unlock the wallet.</source>
<translation>برای انجام این عملکرد به رمز/پَس فرِیزِwallet نیاز است تا آن را از حالت قفل درآورد.</translation>
</message>
<message>
<location line="+5"/>
<source>Unlock wallet</source>
<translation>باز کردن قفل wallet </translation>
</message>
<message>
<location line="+3"/>
<source>This operation needs your wallet passphrase to decrypt the wallet.</source>
<translation>برای کشف رمز wallet، به رمز/پَس فرِیزِwallet نیاز است.</translation>
</message>
<message>
<location line="+5"/>
<source>Decrypt wallet</source>
<translation>کشف رمز wallet</translation>
</message>
<message>
<location line="+3"/>
<source>Change passphrase</source>
<translation>تغییر رمز/پَس فرِیز</translation>
</message>
<message>
<location line="+1"/>
<source>Enter the old and new passphrase to the wallet.</source>
<translation>رمز/پَس فرِیزِ قدیم و جدید را در wallet وارد کنید</translation>
</message>
<message>
<location line="+46"/>
<source>Confirm wallet encryption</source>
<translation>رمزگذاری wallet را تایید کنید</translation>
</message>
<message>
<location line="+1"/>
<source>Warning: If you encrypt your wallet and lose your passphrase, you will <b>LOSE ALL OF YOUR LITECOINS</b>!</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<source>Are you sure you wish to encrypt your wallet?</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+15"/>
<source>IMPORTANT: Any previous backups you have made of your wallet file should be replaced with the newly generated, encrypted wallet file. For security reasons, previous backups of the unencrypted wallet file will become useless as soon as you start using the new, encrypted wallet.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+100"/>
<location line="+24"/>
<source>Warning: The Caps Lock key is on!</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-130"/>
<location line="+58"/>
<source>Wallet encrypted</source>
<translation>تایید رمزگذاری</translation>
</message>
<message>
<location line="-56"/>
<source>Unicorncoin will close now to finish the encryption process. Remember that encrypting your wallet cannot fully protect your unicorncoins from being stolen by malware infecting your computer.</source>
<translation>Unicorncoin برای اتمام فرایند رمزگذاری بسته خواهد شد. به خاطر داشته باشید که رمزگذاری WALLET شما، کامپیوتر شما را از آلودگی به بدافزارها مصون نمی دارد.</translation>
</message>
<message>
<location line="+13"/>
<location line="+7"/>
<location line="+42"/>
<location line="+6"/>
<source>Wallet encryption failed</source>
<translation>رمزگذاری تایید نشد</translation>
</message>
<message>
<location line="-54"/>
<source>Wallet encryption failed due to an internal error. Your wallet was not encrypted.</source>
<translation>رمزگذاری به علت خطای داخلی تایید نشد. wallet شما رمزگذاری نشد</translation>
</message>
<message>
<location line="+7"/>
<location line="+48"/>
<source>The supplied passphrases do not match.</source>
<translation>رمزهای/پَس فرِیزهایِ وارد شده با هم تطابق ندارند</translation>
</message>
<message>
<location line="-37"/>
<source>Wallet unlock failed</source>
<translation>قفل wallet باز نشد</translation>
</message>
<message>
<location line="+1"/>
<location line="+11"/>
<location line="+19"/>
<source>The passphrase entered for the wallet decryption was incorrect.</source>
<translation>رمزهای/پَس فرِیزهایِ وارد شده wallet برای کشف رمز اشتباه است.</translation>
</message>
<message>
<location line="-20"/>
<source>Wallet decryption failed</source>
<translation>کشف رمز wallet انجام نشد</translation>
</message>
<message>
<location line="+14"/>
<source>Wallet passphrase was successfully changed.</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>BitcoinGUI</name>
<message>
<location filename="../bitcoingui.cpp" line="+233"/>
<source>Sign &message...</source>
<translation>امضا و پیام</translation>
</message>
<message>
<location line="+280"/>
<source>Synchronizing with network...</source>
<translation>به روز رسانی با شبکه...</translation>
</message>
<message>
<location line="-349"/>
<source>&Overview</source>
<translation>و بازبینی</translation>
</message>
<message>
<location line="+1"/>
<source>Show general overview of wallet</source>
<translation>نمای کلی از wallet را نشان بده</translation>
</message>
<message>
<location line="+20"/>
<source>&Transactions</source>
<translation>و تراکنش</translation>
</message>
<message>
<location line="+1"/>
<source>Browse transaction history</source>
<translation>تاریخچه تراکنش را باز کن</translation>
</message>
<message>
<location line="+7"/>
<source>Edit the list of stored addresses and labels</source>
<translation>فهرست آدرسها و برچسبهای ذخیره شده را ویرایش کن</translation>
</message>
<message>
<location line="-14"/>
<source>Show the list of addresses for receiving payments</source>
<translation>فهرست آدرسها را برای دریافت وجه نشان بده</translation>
</message>
<message>
<location line="+31"/>
<source>E&xit</source>
<translation>خروج</translation>
</message>
<message>
<location line="+1"/>
<source>Quit application</source>
<translation>از "درخواست نامه"/ application خارج شو</translation>
</message>
<message>
<location line="+4"/>
<source>Show information about Unicorncoin</source>
<translation>اطلاعات در مورد Unicorncoin را نشان بده</translation>
</message>
<message>
<location line="+2"/>
<source>About &Qt</source>
<translation>درباره و QT</translation>
</message>
<message>
<location line="+1"/>
<source>Show information about Qt</source>
<translation>نمایش اطلاعات درباره QT</translation>
</message>
<message>
<location line="+2"/>
<source>&Options...</source>
<translation>و انتخابها</translation>
</message>
<message>
<location line="+6"/>
<source>&Encrypt Wallet...</source>
<translation>و رمزگذاری wallet</translation>
</message>
<message>
<location line="+3"/>
<source>&Backup Wallet...</source>
<translation>و گرفتن نسخه پیشتیبان از wallet</translation>
</message>
<message>
<location line="+2"/>
<source>&Change Passphrase...</source>
<translation>تغییر رمز/پَس فرِیز</translation>
</message>
<message>
<location line="+285"/>
<source>Importing blocks from disk...</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Reindexing blocks on disk...</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-347"/>
<source>Send coins to a Unicorncoin address</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+49"/>
<source>Modify configuration options for Unicorncoin</source>
<translation>اصلاح انتخابها برای پیکربندی Unicorncoin</translation>
</message>
<message>
<location line="+9"/>
<source>Backup wallet to another location</source>
<translation>گرفتن نسخه پیشتیبان در آدرسی دیگر</translation>
</message>
<message>
<location line="+2"/>
<source>Change the passphrase used for wallet encryption</source>
<translation>رمز مربوط به رمزگذاریِ wallet را تغییر دهید</translation>
</message>
<message>
<location line="+6"/>
<source>&Debug window</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Open debugging and diagnostic console</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-4"/>
<source>&Verify message...</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-165"/>
<location line="+530"/>
<source>Unicorncoin</source>
<translation>unicorncoin</translation>
</message>
<message>
<location line="-530"/>
<source>Wallet</source>
<translation>کیف پول</translation>
</message>
<message>
<location line="+101"/>
<source>&Send</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>&Receive</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+14"/>
<source>&Addresses</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+22"/>
<source>&About Unicorncoin</source>
<translation>&در مورد بیتکویین</translation>
</message>
<message>
<location line="+9"/>
<source>&Show / Hide</source>
<translation>&نمایش/ عدم نمایش و</translation>
</message>
<message>
<location line="+1"/>
<source>Show or hide the main Window</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Encrypt the private keys that belong to your wallet</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>Sign messages with your Unicorncoin addresses to prove you own them</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Verify messages to ensure they were signed with specified Unicorncoin addresses</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+28"/>
<source>&File</source>
<translation>و فایل</translation>
</message>
<message>
<location line="+7"/>
<source>&Settings</source>
<translation>و تنظیمات</translation>
</message>
<message>
<location line="+6"/>
<source>&Help</source>
<translation>و راهنما</translation>
</message>
<message>
<location line="+9"/>
<source>Tabs toolbar</source>
<translation>نوار ابزار</translation>
</message>
<message>
<location line="+17"/>
<location line="+10"/>
<source>[testnet]</source>
<translation>[testnet]</translation>
</message>
<message>
<location line="+47"/>
<source>Unicorncoin client</source>
<translation>مشتری unicorncoin</translation>
</message>
<message numerus="yes">
<location line="+141"/>
<source>%n active connection(s) to Unicorncoin network</source>
<translation><numerusform>%n ارتباط فعال به شبکه Unicorncoin
%n ارتباط فعال به شبکه Unicorncoin</numerusform></translation>
</message>
<message>
<location line="+22"/>
<source>No block source available...</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+12"/>
<source>Processed %1 of %2 (estimated) blocks of transaction history.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+4"/>
<source>Processed %1 blocks of transaction history.</source>
<translation type="unfinished"/>
</message>
<message numerus="yes">
<location line="+20"/>
<source>%n hour(s)</source>
<translation type="unfinished"><numerusform></numerusform></translation>
</message>
<message numerus="yes">
<location line="+4"/>
<source>%n day(s)</source>
<translation type="unfinished"><numerusform></numerusform></translation>
</message>
<message numerus="yes">
<location line="+4"/>
<source>%n week(s)</source>
<translation type="unfinished"><numerusform></numerusform></translation>
</message>
<message>
<location line="+4"/>
<source>%1 behind</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+14"/>
<source>Last received block was generated %1 ago.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Transactions after this will not yet be visible.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+22"/>
<source>Error</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Warning</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Information</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+70"/>
<source>This transaction is over the size limit. You can still send it for a fee of %1, which goes to the nodes that process your transaction and helps to support the network. Do you want to pay the fee?</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-140"/>
<source>Up to date</source>
<translation>روزآمد</translation>
</message>
<message>
<location line="+31"/>
<source>Catching up...</source>
<translation>در حال روزآمد سازی..</translation>
</message>
<message>
<location line="+113"/>
<source>Confirm transaction fee</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+8"/>
<source>Sent transaction</source>
<translation>ارسال تراکنش</translation>
</message>
<message>
<location line="+0"/>
<source>Incoming transaction</source>
<translation>تراکنش دریافتی</translation>
</message>
<message>
<location line="+1"/>
<source>Date: %1
Amount: %2
Type: %3
Address: %4
</source>
<translation>تاریخ: %1⏎ میزان وجه : %2⏎ نوع: %3⏎ آدرس: %4⏎
</translation>
</message>
<message>
<location line="+33"/>
<location line="+23"/>
<source>URI handling</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-23"/>
<location line="+23"/>
<source>URI can not be parsed! This can be caused by an invalid Unicorncoin address or malformed URI parameters.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+17"/>
<source>Wallet is <b>encrypted</b> and currently <b>unlocked</b></source>
<translation>wallet رمزگذاری شد و در حال حاضر از حالت قفل در آمده است</translation>
</message>
<message>
<location line="+8"/>
<source>Wallet is <b>encrypted</b> and currently <b>locked</b></source>
<translation>wallet رمزگذاری شد و در حال حاضر قفل است</translation>
</message>
<message>
<location filename="../bitcoin.cpp" line="+111"/>
<source>A fatal error occurred. Unicorncoin can no longer continue safely and will quit.</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>ClientModel</name>
<message>
<location filename="../clientmodel.cpp" line="+104"/>
<source>Network Alert</source>
<translation>هشدار شبکه</translation>
</message>
</context>
<context>
<name>EditAddressDialog</name>
<message>
<location filename="../forms/editaddressdialog.ui" line="+14"/>
<source>Edit Address</source>
<translation>ویرایش آدرسها</translation>
</message>
<message>
<location line="+11"/>
<source>&Label</source>
<translation>و برچسب</translation>
</message>
<message>
<location line="+10"/>
<source>The label associated with this address book entry</source>
<translation>برچسب مربوط به این دفترچه آدرس</translation>
</message>
<message>
<location line="+7"/>
<source>&Address</source>
<translation>و آدرس</translation>
</message>
<message>
<location line="+10"/>
<source>The address associated with this address book entry. This can only be modified for sending addresses.</source>
<translation>برچسب مربوط به این دفترچه آدرس و تنها ب</translation>
</message>
<message>
<location filename="../editaddressdialog.cpp" line="+21"/>
<source>New receiving address</source>
<translation>آدرسِ دریافت کننده جدید</translation>
</message>
<message>
<location line="+4"/>
<source>New sending address</source>
<translation>آدرس ارسال کننده جدید</translation>
</message>
<message>
<location line="+3"/>
<source>Edit receiving address</source>
<translation>ویرایش آدرسِ دریافت کننده</translation>
</message>
<message>
<location line="+4"/>
<source>Edit sending address</source>
<translation>ویرایش آدرسِ ارسال کننده</translation>
</message>
<message>
<location line="+76"/>
<source>The entered address "%1" is already in the address book.</source>
<translation>آدرس وارد شده %1 قبلا به فهرست آدرسها اضافه شده بوده است.</translation>
</message>
<message>
<location line="-5"/>
<source>The entered address "%1" is not a valid Unicorncoin address.</source>
<translation>آدرس وارد شده "%1" یک آدرس صحیح برای unicorncoin نسشت</translation>
</message>
<message>
<location line="+10"/>
<source>Could not unlock wallet.</source>
<translation>عدم توانیی برای قفل گشایی wallet</translation>
</message>
<message>
<location line="+5"/>
<source>New key generation failed.</source>
<translation>عدم توانیی در ایجاد کلید جدید</translation>
</message>
</context>
<context>
<name>GUIUtil::HelpMessageBox</name>
<message>
<location filename="../guiutil.cpp" line="+424"/>
<location line="+12"/>
<source>Unicorncoin-Qt</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-12"/>
<source>version</source>
<translation>نسخه</translation>
</message>
<message>
<location line="+2"/>
<source>Usage:</source>
<translation>میزان استفاده:</translation>
</message>
<message>
<location line="+1"/>
<source>command-line options</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+4"/>
<source>UI options</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Set language, for example "de_DE" (default: system locale)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Start minimized</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Show splash screen on startup (default: 1)</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>OptionsDialog</name>
<message>
<location filename="../forms/optionsdialog.ui" line="+14"/>
<source>Options</source>
<translation>انتخاب/آپشن</translation>
</message>
<message>
<location line="+16"/>
<source>&Main</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+6"/>
<source>Optional transaction fee per kB that helps make sure your transactions are processed quickly. Most transactions are 1 kB.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+15"/>
<source>Pay transaction &fee</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+31"/>
<source>Automatically start Unicorncoin after logging in to the system.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>&Start Unicorncoin on system login</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+35"/>
<source>Reset all client options to default.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>&Reset Options</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+13"/>
<source>&Network</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+6"/>
<source>Automatically open the Unicorncoin client port on the router. This only works when your router supports UPnP and it is enabled.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Map port using &UPnP</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>Connect to the Unicorncoin network through a SOCKS proxy (e.g. when connecting through Tor).</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>&Connect through SOCKS proxy:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+9"/>
<source>Proxy &IP:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+19"/>
<source>IP address of the proxy (e.g. 127.0.0.1)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>&Port:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+19"/>
<source>Port of the proxy (e.g. 9050)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>SOCKS &Version:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+13"/>
<source>SOCKS version of the proxy (e.g. 5)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+36"/>
<source>&Window</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+6"/>
<source>Show only a tray icon after minimizing the window.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>&Minimize to the tray instead of the taskbar</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>Minimize instead of exit the application when the window is closed. When this option is enabled, the application will be closed only after selecting Quit in the menu.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>M&inimize on close</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+21"/>
<source>&Display</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+8"/>
<source>User Interface &language:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+13"/>
<source>The user interface language can be set here. This setting will take effect after restarting Unicorncoin.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+11"/>
<source>&Unit to show amounts in:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+13"/>
<source>Choose the default subdivision unit to show in the interface and when sending coins.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+9"/>
<source>Whether to show Unicorncoin addresses in the transaction list or not.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>&Display addresses in transaction list</source>
<translation>و نمایش آدرسها در فهرست تراکنش</translation>
</message>
<message>
<location line="+71"/>
<source>&OK</source>
<translation>و تایید</translation>
</message>
<message>
<location line="+7"/>
<source>&Cancel</source>
<translation>و رد</translation>
</message>
<message>
<location line="+10"/>
<source>&Apply</source>
<translation>و به کار گرفتن</translation>
</message>
<message>
<location filename="../optionsdialog.cpp" line="+53"/>
<source>default</source>
<translation>پیش فرض</translation>
</message>
<message>
<location line="+130"/>
<source>Confirm options reset</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Some settings may require a client restart to take effect.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<source>Do you want to proceed?</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+42"/>
<location line="+9"/>
<source>Warning</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-9"/>
<location line="+9"/>
<source>This setting will take effect after restarting Unicorncoin.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+29"/>
<source>The supplied proxy address is invalid.</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>OverviewPage</name>
<message>
<location filename="../forms/overviewpage.ui" line="+14"/>
<source>Form</source>
<translation>فرم</translation>
</message>
<message>
<location line="+50"/>
<location line="+166"/>
<source>The displayed information may be out of date. Your wallet automatically synchronizes with the Unicorncoin network after a connection is established, but this process has not completed yet.</source>
<translation>اطلاعات نمایش داده شده ممکن است روزآمد نباشد. wallet شما به صورت خودکار بعد از برقراری اتصال با شبکه unicorncoin به روز می شود اما این فرایند هنوز تکمیل نشده است.</translation>
</message>
<message>
<location line="-124"/>
<source>Balance:</source>
<translation>مانده حساب:</translation>
</message>
<message>
<location line="+29"/>
<source>Unconfirmed:</source>
<translation>تایید نشده</translation>
</message>
<message>
<location line="-78"/>
<source>Wallet</source>
<translation>کیف پول</translation>
</message>
<message>
<location line="+107"/>
<source>Immature:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+13"/>
<source>Mined balance that has not yet matured</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+46"/>
<source><b>Recent transactions</b></source>
<translation>تراکنشهای اخیر</translation>
</message>
<message>
<location line="-101"/>
<source>Your current balance</source>
<translation>مانده حساب جاری</translation>
</message>
<message>
<location line="+29"/>
<source>Total of transactions that have yet to be confirmed, and do not yet count toward the current balance</source>
<translation>تعداد تراکنشهایی که نیاز به تایید دارند و هنوز در مانده حساب جاری شما به حساب نیامده اند</translation>
</message>
<message>
<location filename="../overviewpage.cpp" line="+116"/>
<location line="+1"/>
<source>out of sync</source>
<translation>خارج از روزآمد سازی</translation>
</message>
</context>
<context>
<name>PaymentServer</name>
<message>
<location filename="../paymentserver.cpp" line="+107"/>
<source>Cannot start unicorncoin: click-to-pay handler</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>QRCodeDialog</name>
<message>
<location filename="../forms/qrcodedialog.ui" line="+14"/>
<source>QR Code Dialog</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+59"/>
<source>Request Payment</source>
<translation>درخواست وجه</translation>
</message>
<message>
<location line="+56"/>
<source>Amount:</source>
<translation>میزان وجه:</translation>
</message>
<message>
<location line="-44"/>
<source>Label:</source>
<translation>برچسب:</translation>
</message>
<message>
<location line="+19"/>
<source>Message:</source>
<translation>پیام:</translation>
</message>
<message>
<location line="+71"/>
<source>&Save As...</source>
<translation>و ذخیره با عنوانِ...</translation>
</message>
<message>
<location filename="../qrcodedialog.cpp" line="+62"/>
<source>Error encoding URI into QR Code.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+40"/>
<source>The entered amount is invalid, please check.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+23"/>
<source>Resulting URI too long, try to reduce the text for label / message.</source>
<translation>متن وارد شده طولانی است، متنِ برچسب/پیام را کوتاه کنید</translation>
</message>
<message>
<location line="+25"/>
<source>Save QR Code</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<source>PNG Images (*.png)</source>
<translation>تصاویر با فرمت PNG
(*.png)</translation>
</message>
</context>
<context>
<name>RPCConsole</name>
<message>
<location filename="../forms/rpcconsole.ui" line="+46"/>
<source>Client name</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+10"/>
<location line="+23"/>
<location line="+26"/>
<location line="+23"/>
<location line="+23"/>
<location line="+36"/>
<location line="+53"/>
<location line="+23"/>
<location line="+23"/>
<location filename="../rpcconsole.cpp" line="+339"/>
<source>N/A</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-217"/>
<source>Client version</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-45"/>
<source>&Information</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+68"/>
<source>Using OpenSSL version</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+49"/>
<source>Startup time</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+29"/>
<source>Network</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>Number of connections</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+23"/>
<source>On testnet</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+23"/>
<source>Block chain</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>Current number of blocks</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+23"/>
<source>Estimated total blocks</source>
<translation type="unfinished"/>
</message><|fim▁hole|> <source>Last block time</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+52"/>
<source>&Open</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+16"/>
<source>Command-line options</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>Show the Unicorncoin-Qt help message to get a list with possible Unicorncoin command-line options.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>&Show</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+24"/>
<source>&Console</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-260"/>
<source>Build date</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-104"/>
<source>Unicorncoin - Debug window</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+25"/>
<source>Unicorncoin Core</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+279"/>
<source>Debug log file</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>Open the Unicorncoin debug log file from the current data directory. This can take a few seconds for large log files.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+102"/>
<source>Clear console</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../rpcconsole.cpp" line="-30"/>
<source>Welcome to the Unicorncoin RPC console.</source>
<translation>به کنسول آر.پی.سی. LITECOIN خوش آمدید</translation>
</message>
<message>
<location line="+1"/>
<source>Use up and down arrows to navigate history, and <b>Ctrl-L</b> to clear screen.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Type <b>help</b> for an overview of available commands.</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>SendCoinsDialog</name>
<message>
<location filename="../forms/sendcoinsdialog.ui" line="+14"/>
<location filename="../sendcoinsdialog.cpp" line="+124"/>
<location line="+5"/>
<location line="+5"/>
<location line="+5"/>
<location line="+6"/>
<location line="+5"/>
<location line="+5"/>
<source>Send Coins</source>
<translation>سکه های ارسالی</translation>
</message>
<message>
<location line="+50"/>
<source>Send to multiple recipients at once</source>
<translation>ارسال همزمان به گیرنده های متعدد</translation>
</message>
<message>
<location line="+3"/>
<source>Add &Recipient</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+20"/>
<source>Remove all transaction fields</source>
<translation>تمامی فیلدهای تراکنش حذف شوند</translation>
</message>
<message>
<location line="+3"/>
<source>Clear &All</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+22"/>
<source>Balance:</source>
<translation>مانده حساب:</translation>
</message>
<message>
<location line="+10"/>
<source>123.456 BTC</source>
<translation>123.456 BTC</translation>
</message>
<message>
<location line="+31"/>
<source>Confirm the send action</source>
<translation>تایید عملیات ارسال </translation>
</message>
<message>
<location line="+3"/>
<source>S&end</source>
<translation>و ارسال</translation>
</message>
<message>
<location filename="../sendcoinsdialog.cpp" line="-59"/>
<source><b>%1</b> to %2 (%3)</source>
<translation>%1 به %2 (%3)</translation>
</message>
<message>
<location line="+5"/>
<source>Confirm send coins</source>
<translation>تایید ارسال سکه ها</translation>
</message>
<message>
<location line="+1"/>
<source>Are you sure you want to send %1?</source>
<translation>شما مطمئن هستید که می خواهید %1 را ارسال کنید؟</translation>
</message>
<message>
<location line="+0"/>
<source> and </source>
<translation>و</translation>
</message>
<message>
<location line="+23"/>
<source>The recipient address is not valid, please recheck.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>The amount to pay must be larger than 0.</source>
<translation>میزان پرداخت باید بیشتر از 0 باشد</translation>
</message>
<message>
<location line="+5"/>
<source>The amount exceeds your balance.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>The total exceeds your balance when the %1 transaction fee is included.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+6"/>
<source>Duplicate address found, can only send to each address once per send operation.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>Error: Transaction creation failed!</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>Error: The transaction was rejected. This might happen if some of the coins in your wallet were already spent, such as if you used a copy of wallet.dat and coins were spent in the copy but not marked as spent here.</source>
<translation>خطا: تراکنش تایید نشد. این خطا ممکن است به این دلیل اتفاق بیافتد که سکه های wallet شما خرج شده باشند مثلا اگر wallet.dat را مپی کرده باشید و سکه های شما در آن کپی استفاده شده باشند اما در اینجا نمایش داده نشده اند.</translation>
</message>
</context>
<context>
<name>SendCoinsEntry</name>
<message>
<location filename="../forms/sendcoinsentry.ui" line="+14"/>
<source>Form</source>
<translation>فرم</translation>
</message>
<message>
<location line="+15"/>
<source>A&mount:</source>
<translation>و میزان وجه</translation>
</message>
<message>
<location line="+13"/>
<source>Pay &To:</source>
<translation>پرداخت و به چه کسی</translation>
</message>
<message>
<location line="+34"/>
<source>The address to send the payment to (e.g. Ler4HNAEfwYhBmGXcFP2Po1NpRUEiK8km2)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+60"/>
<location filename="../sendcoinsentry.cpp" line="+26"/>
<source>Enter a label for this address to add it to your address book</source>
<translation>یک برچسب برای این آدرس بنویسید تا به دفترچه آدرسهای شما اضافه شود</translation>
</message>
<message>
<location line="-78"/>
<source>&Label:</source>
<translation>و برچسب</translation>
</message>
<message>
<location line="+28"/>
<source>Choose address from address book</source>
<translation>آدرس از فهرست آدرس انتخاب کنید</translation>
</message>
<message>
<location line="+10"/>
<source>Alt+A</source>
<translation>Alt و A</translation>
</message>
<message>
<location line="+7"/>
<source>Paste address from clipboard</source>
<translation>آدرس را بر کلیپ بورد کپی کنید</translation>
</message>
<message>
<location line="+10"/>
<source>Alt+P</source>
<translation>Alt و P</translation>
</message>
<message>
<location line="+7"/>
<source>Remove this recipient</source>
<translation>این گیرنده را حذف کن</translation>
</message>
<message>
<location filename="../sendcoinsentry.cpp" line="+1"/>
<source>Enter a Unicorncoin address (e.g. Ler4HNAEfwYhBmGXcFP2Po1NpRUEiK8km2)</source>
<translation>یک آدرس unicorncoin وارد کنید (مثال Ler4HNAEfwYhBmGXcFP2Po1NpRUEiK8km2)</translation>
</message>
</context>
<context>
<name>SignVerifyMessageDialog</name>
<message>
<location filename="../forms/signverifymessagedialog.ui" line="+14"/>
<source>Signatures - Sign / Verify a Message</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+13"/>
<source>&Sign Message</source>
<translation>و امضای پیام </translation>
</message>
<message>
<location line="+6"/>
<source>You can sign messages with your addresses to prove you own them. Be careful not to sign anything vague, as phishing attacks may try to trick you into signing your identity over to them. Only sign fully-detailed statements you agree to.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+18"/>
<source>The address to sign the message with (e.g. Ler4HNAEfwYhBmGXcFP2Po1NpRUEiK8km2)</source>
<translation>یک آدرس unicorncoin وارد کنید (مثال Ler4HNAEfwYhBmGXcFP2Po1NpRUEiK8km2)</translation>
</message>
<message>
<location line="+10"/>
<location line="+213"/>
<source>Choose an address from the address book</source>
<translation>آدرس از فهرست آدرس انتخاب کنید</translation>
</message>
<message>
<location line="-203"/>
<location line="+213"/>
<source>Alt+A</source>
<translation>Alt و A</translation>
</message>
<message>
<location line="-203"/>
<source>Paste address from clipboard</source>
<translation>آدرس را بر کلیپ بورد کپی کنید</translation>
</message>
<message>
<location line="+10"/>
<source>Alt+P</source>
<translation>Alt و P</translation>
</message>
<message>
<location line="+12"/>
<source>Enter the message you want to sign here</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>Signature</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+27"/>
<source>Copy the current signature to the system clipboard</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+21"/>
<source>Sign the message to prove you own this Unicorncoin address</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Sign &Message</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+14"/>
<source>Reset all sign message fields</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<location line="+146"/>
<source>Clear &All</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-87"/>
<source>&Verify Message</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+6"/>
<source>Enter the signing address, message (ensure you copy line breaks, spaces, tabs, etc. exactly) and signature below to verify the message. Be careful not to read more into the signature than what is in the signed message itself, to avoid being tricked by a man-in-the-middle attack.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+21"/>
<source>The address the message was signed with (e.g. Ler4HNAEfwYhBmGXcFP2Po1NpRUEiK8km2)</source>
<translation>یک آدرس unicorncoin وارد کنید (مثال Ler4HNAEfwYhBmGXcFP2Po1NpRUEiK8km2)</translation>
</message>
<message>
<location line="+40"/>
<source>Verify the message to ensure it was signed with the specified Unicorncoin address</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Verify &Message</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+14"/>
<source>Reset all verify message fields</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../signverifymessagedialog.cpp" line="+27"/>
<location line="+3"/>
<source>Enter a Unicorncoin address (e.g. Ler4HNAEfwYhBmGXcFP2Po1NpRUEiK8km2)</source>
<translation>یک آدرس unicorncoin وارد کنید (مثال Ler4HNAEfwYhBmGXcFP2Po1NpRUEiK8km2)</translation>
</message>
<message>
<location line="-2"/>
<source>Click "Sign Message" to generate signature</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Enter Unicorncoin signature</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+82"/>
<location line="+81"/>
<source>The entered address is invalid.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-81"/>
<location line="+8"/>
<location line="+73"/>
<location line="+8"/>
<source>Please check the address and try again.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-81"/>
<location line="+81"/>
<source>The entered address does not refer to a key.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-73"/>
<source>Wallet unlock was cancelled.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+8"/>
<source>Private key for the entered address is not available.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+12"/>
<source>Message signing failed.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>Message signed.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+59"/>
<source>The signature could not be decoded.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<location line="+13"/>
<source>Please check the signature and try again.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<source>The signature did not match the message digest.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>Message verification failed.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>Message verified.</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>SplashScreen</name>
<message>
<location filename="../splashscreen.cpp" line="+22"/>
<source>The Litecoin developers</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>[testnet]</source>
<translation>[testnet]</translation>
</message>
</context>
<context>
<name>TransactionDesc</name>
<message>
<location filename="../transactiondesc.cpp" line="+20"/>
<source>Open until %1</source>
<translation>باز کن تا %1</translation>
</message>
<message>
<location line="+6"/>
<source>%1/offline</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>%1/unconfirmed</source>
<translation>%1 غیرقابل تایید</translation>
</message>
<message>
<location line="+2"/>
<source>%1 confirmations</source>
<translation>%1 تاییدها</translation>
</message>
<message>
<location line="+18"/>
<source>Status</source>
<translation type="unfinished"/>
</message>
<message numerus="yes">
<location line="+7"/>
<source>, broadcast through %n node(s)</source>
<translation type="unfinished"><numerusform></numerusform></translation>
</message>
<message>
<location line="+4"/>
<source>Date</source>
<translation>تاریخ</translation>
</message>
<message>
<location line="+7"/>
<source>Source</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<source>Generated</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<location line="+17"/>
<source>From</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<location line="+22"/>
<location line="+58"/>
<source>To</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-77"/>
<location line="+2"/>
<source>own address</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-2"/>
<source>label</source>
<translation>برچسب</translation>
</message>
<message>
<location line="+37"/>
<location line="+12"/>
<location line="+45"/>
<location line="+17"/>
<location line="+30"/>
<source>Credit</source>
<translation type="unfinished"/>
</message>
<message numerus="yes">
<location line="-102"/>
<source>matures in %n more block(s)</source>
<translation type="unfinished"><numerusform></numerusform></translation>
</message>
<message>
<location line="+2"/>
<source>not accepted</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+44"/>
<location line="+8"/>
<location line="+15"/>
<location line="+30"/>
<source>Debit</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-39"/>
<source>Transaction fee</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+16"/>
<source>Net amount</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+6"/>
<source>Message</source>
<translation>پیام</translation>
</message>
<message>
<location line="+2"/>
<source>Comment</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Transaction ID</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Generated coins must mature 120 blocks before they can be spent. When you generated this block, it was broadcast to the network to be added to the block chain. If it fails to get into the chain, its state will change to "not accepted" and it won't be spendable. This may occasionally happen if another node generates a block within a few seconds of yours.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>Debug information</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+8"/>
<source>Transaction</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Inputs</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+23"/>
<source>Amount</source>
<translation>میزان</translation>
</message>
<message>
<location line="+1"/>
<source>true</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<source>false</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-209"/>
<source>, has not been successfully broadcast yet</source>
<translation>تا به حال با موفقیت انتشار نیافته است</translation>
</message>
<message numerus="yes">
<location line="-35"/>
<source>Open for %n more block(s)</source>
<translation type="unfinished"><numerusform></numerusform></translation>
</message>
<message>
<location line="+70"/>
<source>unknown</source>
<translation>ناشناس</translation>
</message>
</context>
<context>
<name>TransactionDescDialog</name>
<message>
<location filename="../forms/transactiondescdialog.ui" line="+14"/>
<source>Transaction details</source>
<translation>جزئیات تراکنش</translation>
</message>
<message>
<location line="+6"/>
<source>This pane shows a detailed description of the transaction</source>
<translation>این بخش جزئیات تراکنش را نشان می دهد</translation>
</message>
</context>
<context>
<name>TransactionTableModel</name>
<message>
<location filename="../transactiontablemodel.cpp" line="+225"/>
<source>Date</source>
<translation>تاریخ</translation>
</message>
<message>
<location line="+0"/>
<source>Type</source>
<translation>نوع</translation>
</message>
<message>
<location line="+0"/>
<source>Address</source>
<translation>آدرس</translation>
</message>
<message>
<location line="+0"/>
<source>Amount</source>
<translation>میزان وجه</translation>
</message>
<message numerus="yes">
<location line="+57"/>
<source>Open for %n more block(s)</source>
<translation type="unfinished"><numerusform></numerusform></translation>
</message>
<message>
<location line="+3"/>
<source>Open until %1</source>
<translation>باز کن تا %1</translation>
</message>
<message>
<location line="+3"/>
<source>Offline (%1 confirmations)</source>
<translation>برون خطی (%1 تاییدها)</translation>
</message>
<message>
<location line="+3"/>
<source>Unconfirmed (%1 of %2 confirmations)</source>
<translation>تایید نشده (%1 از %2 تاییدها)</translation>
</message>
<message>
<location line="+3"/>
<source>Confirmed (%1 confirmations)</source>
<translation>تایید شده (%1 تاییدها)</translation>
</message>
<message numerus="yes">
<location line="+8"/>
<source>Mined balance will be available when it matures in %n more block(s)</source>
<translation type="unfinished"><numerusform></numerusform></translation>
</message>
<message>
<location line="+5"/>
<source>This block was not received by any other nodes and will probably not be accepted!</source>
<translation>این block توسط گره های دیگری دریافت نشده است و ممکن است قبول نشود</translation>
</message>
<message>
<location line="+3"/>
<source>Generated but not accepted</source>
<translation>تولید شده اما قبول نشده است</translation>
</message>
<message>
<location line="+43"/>
<source>Received with</source>
<translation>قبول با </translation>
</message>
<message>
<location line="+2"/>
<source>Received from</source>
<translation>دریافت شده از</translation>
</message>
<message>
<location line="+3"/>
<source>Sent to</source>
<translation>ارسال به</translation>
</message>
<message>
<location line="+2"/>
<source>Payment to yourself</source>
<translation>وجه برای شما </translation>
</message>
<message>
<location line="+2"/>
<source>Mined</source>
<translation>استخراج شده</translation>
</message>
<message>
<location line="+38"/>
<source>(n/a)</source>
<translation>خالی</translation>
</message>
<message>
<location line="+199"/>
<source>Transaction status. Hover over this field to show number of confirmations.</source>
<translation>وضعیت تراکنش. با اشاره به این بخش تعداد تاییدها نمایش داده می شود</translation>
</message>
<message>
<location line="+2"/>
<source>Date and time that the transaction was received.</source>
<translation>زمان و تاریخی که تراکنش دریافت شده است</translation>
</message>
<message>
<location line="+2"/>
<source>Type of transaction.</source>
<translation>نوع تراکنش</translation>
</message>
<message>
<location line="+2"/>
<source>Destination address of transaction.</source>
<translation>آدرس مقصد در تراکنش</translation>
</message>
<message>
<location line="+2"/>
<source>Amount removed from or added to balance.</source>
<translation>میزان وجه کم شده یا اضافه شده به حساب</translation>
</message>
</context>
<context>
<name>TransactionView</name>
<message>
<location filename="../transactionview.cpp" line="+52"/>
<location line="+16"/>
<source>All</source>
<translation>همه</translation>
</message>
<message>
<location line="-15"/>
<source>Today</source>
<translation>امروز</translation>
</message>
<message>
<location line="+1"/>
<source>This week</source>
<translation>این هفته</translation>
</message>
<message>
<location line="+1"/>
<source>This month</source>
<translation>این ماه</translation>
</message>
<message>
<location line="+1"/>
<source>Last month</source>
<translation>ماه گذشته</translation>
</message>
<message>
<location line="+1"/>
<source>This year</source>
<translation>این سال</translation>
</message>
<message>
<location line="+1"/>
<source>Range...</source>
<translation>حدود..</translation>
</message>
<message>
<location line="+11"/>
<source>Received with</source>
<translation>دریافت با</translation>
</message>
<message>
<location line="+2"/>
<source>Sent to</source>
<translation>ارسال به</translation>
</message>
<message>
<location line="+2"/>
<source>To yourself</source>
<translation>به شما</translation>
</message>
<message>
<location line="+1"/>
<source>Mined</source>
<translation>استخراج شده</translation>
</message>
<message>
<location line="+1"/>
<source>Other</source>
<translation>دیگر</translation>
</message>
<message>
<location line="+7"/>
<source>Enter address or label to search</source>
<translation>آدرس یا برچسب را برای جستجو وارد کنید</translation>
</message>
<message>
<location line="+7"/>
<source>Min amount</source>
<translation>حداقل میزان وجه</translation>
</message>
<message>
<location line="+34"/>
<source>Copy address</source>
<translation>آدرس را کپی کنید</translation>
</message>
<message>
<location line="+1"/>
<source>Copy label</source>
<translation>برچسب را کپی کنید</translation>
</message>
<message>
<location line="+1"/>
<source>Copy amount</source>
<translation>میزان وجه کپی شود</translation>
</message>
<message>
<location line="+1"/>
<source>Copy transaction ID</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Edit label</source>
<translation>برچسب را ویرایش کنید</translation>
</message>
<message>
<location line="+1"/>
<source>Show transaction details</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+139"/>
<source>Export Transaction Data</source>
<translation>داده های تراکنش را صادر کنید</translation>
</message>
<message>
<location line="+1"/>
<source>Comma separated file (*.csv)</source>
<translation>Comma separated file (*.csv) فایل جداگانه دستوری</translation>
</message>
<message>
<location line="+8"/>
<source>Confirmed</source>
<translation>تایید شده</translation>
</message>
<message>
<location line="+1"/>
<source>Date</source>
<translation>تاریخ</translation>
</message>
<message>
<location line="+1"/>
<source>Type</source>
<translation>نوع</translation>
</message>
<message>
<location line="+1"/>
<source>Label</source>
<translation>برچسب</translation>
</message>
<message>
<location line="+1"/>
<source>Address</source>
<translation>آدرس</translation>
</message>
<message>
<location line="+1"/>
<source>Amount</source>
<translation>میزان</translation>
</message>
<message>
<location line="+1"/>
<source>ID</source>
<translation>شناسه کاربری</translation>
</message>
<message>
<location line="+4"/>
<source>Error exporting</source>
<translation>خطا در ارسال</translation>
</message>
<message>
<location line="+0"/>
<source>Could not write to file %1.</source>
<translation>قابل کپی به فایل نیست %1.</translation>
</message>
<message>
<location line="+100"/>
<source>Range:</source>
<translation>دامنه:</translation>
</message>
<message>
<location line="+8"/>
<source>to</source>
<translation>به</translation>
</message>
</context>
<context>
<name>WalletModel</name>
<message>
<location filename="../walletmodel.cpp" line="+193"/>
<source>Send Coins</source>
<translation>سکه های ارسالی</translation>
</message>
</context>
<context>
<name>WalletView</name>
<message>
<location filename="../walletview.cpp" line="+42"/>
<source>&Export</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Export the data in the current tab to a file</source>
<translation>صدور داده نوار جاری به یک فایل</translation>
</message>
<message>
<location line="+193"/>
<source>Backup Wallet</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<source>Wallet Data (*.dat)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Backup Failed</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<source>There was an error trying to save the wallet data to the new location.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+4"/>
<source>Backup Successful</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<source>The wallet data was successfully saved to the new location.</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>bitcoin-core</name>
<message>
<location filename="../bitcoinstrings.cpp" line="+94"/>
<source>Unicorncoin version</source>
<translation>نسخه unicorncoin</translation>
</message>
<message>
<location line="+102"/>
<source>Usage:</source>
<translation>میزان استفاده:</translation>
</message>
<message>
<location line="-29"/>
<source>Send command to -server or unicorncoind</source>
<translation>ارسال دستور به سرور یا unicorncoined</translation>
</message>
<message>
<location line="-23"/>
<source>List commands</source>
<translation>فهرست دستورها</translation>
</message>
<message>
<location line="-12"/>
<source>Get help for a command</source>
<translation>درخواست کمک برای یک دستور</translation>
</message>
<message>
<location line="+24"/>
<source>Options:</source>
<translation>انتخابها:</translation>
</message>
<message>
<location line="+24"/>
<source>Specify configuration file (default: unicorncoin.conf)</source>
<translation>فایل پیکربندیِ را مشخص کنید (پیش فرض: unicorncoin.conf)</translation>
</message>
<message>
<location line="+3"/>
<source>Specify pid file (default: unicorncoind.pid)</source>
<translation>فایل pid را مشخص کنید (پیش فرض: unicorncoind.pid)</translation>
</message>
<message>
<location line="-1"/>
<source>Specify data directory</source>
<translation>دایرکتوری داده را مشخص کن</translation>
</message>
<message>
<location line="-9"/>
<source>Set database cache size in megabytes (default: 25)</source>
<translation>حافظه بانک داده را به مگابایت تنظیم کنید (پیش فرض: 25)</translation>
</message>
<message>
<location line="-28"/>
<source>Listen for connections on <port> (default: 9933 or testnet: 19933)</source>
<translation>ارتباطات را در <PORT> بشنوید (پیش فرض: 9933 or testnet: 19933)</translation>
</message>
<message>
<location line="+5"/>
<source>Maintain at most <n> connections to peers (default: 125)</source>
<translation>نگهداری <N> ارتباطات برای قرینه سازی (پیش فرض:125)</translation>
</message>
<message>
<location line="-48"/>
<source>Connect to a node to retrieve peer addresses, and disconnect</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+82"/>
<source>Specify your own public address</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Threshold for disconnecting misbehaving peers (default: 100)</source>
<translation>آستانه قطع برای قرینه سازی اشتباه (پیش فرض:100)</translation>
</message>
<message>
<location line="-134"/>
<source>Number of seconds to keep misbehaving peers from reconnecting (default: 86400)</source>
<translation>تعداد ثانیه ها برای اتصال دوباره قرینه های اشتباه (پیش فرض:86400)</translation>
</message>
<message>
<location line="-29"/>
<source>An error occurred while setting up the RPC port %u for listening on IPv4: %s</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+27"/>
<source>Listen for JSON-RPC connections on <port> (default: 9332 or testnet: 19332)</source>
<translation>ارتباطاتِ JSON-RPC را در <port> گوش کنید (پیش فرض:9332)</translation>
</message>
<message>
<location line="+37"/>
<source>Accept command line and JSON-RPC commands</source>
<translation>command line و JSON-RPC commands را قبول کنید</translation>
</message>
<message>
<location line="+76"/>
<source>Run in the background as a daemon and accept commands</source>
<translation>به عنوان daemon بک گراند را اجرا کنید و دستورات را قبول نمایید</translation>
</message>
<message>
<location line="+37"/>
<source>Use the test network</source>
<translation>از تستِ شبکه استفاده نمایید</translation>
</message>
<message>
<location line="-112"/>
<source>Accept connections from outside (default: 1 if no -proxy or -connect)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-80"/>
<source>%s, you must set a rpcpassword in the configuration file:
%s
It is recommended you use the following random password:
rpcuser=unicorncoinrpc
rpcpassword=%s
(you do not need to remember this password)
The username and password MUST NOT be the same.
If the file does not exist, create it with owner-readable-only file permissions.
It is also recommended to set alertnotify so you are notified of problems;
for example: alertnotify=echo %%s | mail -s "Unicorncoin Alert" [email protected]
</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+17"/>
<source>An error occurred while setting up the RPC port %u for listening on IPv6, falling back to IPv4: %s</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Bind to given address and always listen on it. Use [host]:port notation for IPv6</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Cannot obtain a lock on data directory %s. Unicorncoin is probably already running.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Error: The transaction was rejected! This might happen if some of the coins in your wallet were already spent, such as if you used a copy of wallet.dat and coins were spent in the copy but not marked as spent here.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+4"/>
<source>Error: This transaction requires a transaction fee of at least %s because of its amount, complexity, or use of recently received funds!</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Execute command when a relevant alert is received (%s in cmd is replaced by message)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Execute command when a wallet transaction changes (%s in cmd is replaced by TxID)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+11"/>
<source>Set maximum size of high-priority/low-fee transactions in bytes (default: 27000)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+6"/>
<source>This is a pre-release test build - use at your own risk - do not use for mining or merchant applications</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>Warning: -paytxfee is set very high! This is the transaction fee you will pay if you send a transaction.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Warning: Displayed transactions may not be correct! You may need to upgrade, or other nodes may need to upgrade.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Warning: Please check that your computer's date and time are correct! If your clock is wrong Unicorncoin will not work properly.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Warning: error reading wallet.dat! All keys read correctly, but transaction data or address book entries might be missing or incorrect.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Warning: wallet.dat corrupt, data salvaged! Original wallet.dat saved as wallet.{timestamp}.bak in %s; if your balance or transactions are incorrect you should restore from a backup.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+14"/>
<source>Attempt to recover private keys from a corrupt wallet.dat</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Block creation options:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>Connect only to the specified node(s)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Corrupted block database detected</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Discover own IP address (default: 1 when listening and no -externalip)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Do you want to rebuild the block database now?</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Error initializing block database</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Error initializing wallet database environment %s!</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Error loading block database</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+4"/>
<source>Error opening block database</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Error: Disk space is low!</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Error: Wallet locked, unable to create transaction!</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Error: system error: </source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Failed to listen on any port. Use -listen=0 if you want this.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Failed to read block info</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Failed to read block</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Failed to sync block index</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Failed to write block index</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Failed to write block info</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Failed to write block</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Failed to write file info</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Failed to write to coin database</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Failed to write transaction index</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Failed to write undo data</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Find peers using DNS lookup (default: 1 unless -connect)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Generate coins (default: 0)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>How many blocks to check at startup (default: 288, 0 = all)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>How thorough the block verification is (0-4, default: 3)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+19"/>
<source>Not enough file descriptors available.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+8"/>
<source>Rebuild block chain index from current blk000??.dat files</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+16"/>
<source>Set the number of threads to service RPC calls (default: 4)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+26"/>
<source>Verifying blocks...</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Verifying wallet...</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-69"/>
<source>Imports blocks from external blk000??.dat file</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-76"/>
<source>Set the number of script verification threads (up to 16, 0 = auto, <0 = leave that many cores free, default: 0)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+77"/>
<source>Information</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Invalid -tor address: '%s'</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Invalid amount for -minrelaytxfee=<amount>: '%s'</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Invalid amount for -mintxfee=<amount>: '%s'</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+8"/>
<source>Maintain a full transaction index (default: 0)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Maximum per-connection receive buffer, <n>*1000 bytes (default: 5000)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Maximum per-connection send buffer, <n>*1000 bytes (default: 1000)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Only accept block chain matching built-in checkpoints (default: 1)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Only connect to nodes in network <net> (IPv4, IPv6 or Tor)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Output extra debugging information. Implies all other -debug* options</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Output extra network debugging information</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Prepend debug output with timestamp</source>
<translation>برونداد اشکال زدایی با timestamp</translation>
</message>
<message>
<location line="+5"/>
<source>SSL options: (see the Unicorncoin Wiki for SSL setup instructions)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Select the version of socks proxy to use (4-5, default: 5)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Send trace/debug info to console instead of debug.log file</source>
<translation>ارسال اطلاعات پیگیری/خطایابی به کنسول به جای ارسال به فایل debug.log</translation>
</message>
<message>
<location line="+1"/>
<source>Send trace/debug info to debugger</source>
<translation>ارسال اطاعات خطایابی/پیگیری به سیستم خطایاب</translation>
</message>
<message>
<location line="+5"/>
<source>Set maximum block size in bytes (default: 250000)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Set minimum block size in bytes (default: 0)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Shrink debug.log file on client startup (default: 1 when no -debug)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Signing transaction failed</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Specify connection timeout in milliseconds (default: 5000)</source>
<translation>تعیین مدت زمان وقفه (time out) به هزارم ثانیه</translation>
</message>
<message>
<location line="+4"/>
<source>System error: </source>
<translation type="unfinished"/>
</message>
<message>
<location line="+4"/>
<source>Transaction amount too small</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Transaction amounts must be positive</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Transaction too large</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>Use UPnP to map the listening port (default: 0)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Use UPnP to map the listening port (default: 1 when listening)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Use proxy to reach tor hidden services (default: same as -proxy)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Username for JSON-RPC connections</source>
<translation>شناسه کاربری برای ارتباطاتِ JSON-RPC</translation>
</message>
<message>
<location line="+4"/>
<source>Warning</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Warning: This version is obsolete, upgrade required!</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>You need to rebuild the databases using -reindex to change -txindex</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>wallet.dat corrupt, salvage failed</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-50"/>
<source>Password for JSON-RPC connections</source>
<translation>رمز برای ارتباطاتِ JSON-RPC</translation>
</message>
<message>
<location line="-67"/>
<source>Allow JSON-RPC connections from specified IP address</source>
<translation>ارتباطاتِ JSON-RPC را از آدرس آی.پی. مشخصی برقرار کنید.</translation>
</message>
<message>
<location line="+76"/>
<source>Send commands to node running on <ip> (default: 127.0.0.1)</source>
<translation>دستورات را به گره اجرا شده در<ip> ارسال کنید (پیش فرض:127.0.0.1)</translation>
</message>
<message>
<location line="-120"/>
<source>Execute command when the best block changes (%s in cmd is replaced by block hash)</source>
<translation>دستور را وقتی بهترین بلاک تغییر کرد اجرا کن (%s در دستور توسط block hash جایگزین شده است)</translation>
</message>
<message>
<location line="+147"/>
<source>Upgrade wallet to latest format</source>
<translation>wallet را به جدیدترین نسخه روزآمد کنید</translation>
</message>
<message>
<location line="-21"/>
<source>Set key pool size to <n> (default: 100)</source>
<translation>حجم key pool را به اندازه <n> تنظیم کنید (پیش فرض:100)</translation>
</message>
<message>
<location line="-12"/>
<source>Rescan the block chain for missing wallet transactions</source>
<translation>زنجیره بلاک را برای تراکنش جا افتاده در WALLET دوباره اسکن کنید</translation>
</message>
<message>
<location line="+35"/>
<source>Use OpenSSL (https) for JSON-RPC connections</source>
<translation>برای ارتباطاتِ JSON-RPC از OpenSSL (https) استفاده کنید</translation>
</message>
<message>
<location line="-26"/>
<source>Server certificate file (default: server.cert)</source>
<translation>فایل certificate سرور (پیش فرض server.cert)</translation>
</message>
<message>
<location line="+1"/>
<source>Server private key (default: server.pem)</source>
<translation>رمز اختصاصی سرور (پیش فرض: server.pem)</translation>
</message>
<message>
<location line="-151"/>
<source>Acceptable ciphers (default: TLSv1+HIGH:!SSLv2:!aNULL:!eNULL:!AH:!3DES:@STRENGTH)</source>
<translation>ciphers قابل قبول (پیش فرض: default: TLSv1+HIGH:!SSLv2:!aNULL:!eNULL:!AH:!3DES:@STRENGTH)</translation>
</message>
<message>
<location line="+165"/>
<source>This help message</source>
<translation>این پیام راهنما</translation>
</message>
<message>
<location line="+6"/>
<source>Unable to bind to %s on this computer (bind returned error %d, %s)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-91"/>
<source>Connect through socks proxy</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-10"/>
<source>Allow DNS lookups for -addnode, -seednode and -connect</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+55"/>
<source>Loading addresses...</source>
<translation>لود شدن آدرسها..</translation>
</message>
<message>
<location line="-35"/>
<source>Error loading wallet.dat: Wallet corrupted</source>
<translation>خطا در هنگام لود شدن wallet.dat: Wallet corrupted</translation>
</message>
<message>
<location line="+1"/>
<source>Error loading wallet.dat: Wallet requires newer version of Unicorncoin</source>
<translation>خطا در هنگام لود شدن wallet.dat. به نسخه جدید Bitocin برای wallet نیاز است.</translation>
</message>
<message>
<location line="+93"/>
<source>Wallet needed to be rewritten: restart Unicorncoin to complete</source>
<translation>wallet نیاز به بازنویسی دارد. Unicorncoin را برای تکمیل عملیات دوباره اجرا کنید.</translation>
</message>
<message>
<location line="-95"/>
<source>Error loading wallet.dat</source>
<translation>خطا در هنگام لود شدن wallet.dat</translation>
</message>
<message>
<location line="+28"/>
<source>Invalid -proxy address: '%s'</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+56"/>
<source>Unknown network specified in -onlynet: '%s'</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-1"/>
<source>Unknown -socks proxy version requested: %i</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-96"/>
<source>Cannot resolve -bind address: '%s'</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Cannot resolve -externalip address: '%s'</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+44"/>
<source>Invalid amount for -paytxfee=<amount>: '%s'</source>
<translation>میزان اشتباه است for -paytxfee=<amount>: '%s'</translation>
</message>
<message>
<location line="+1"/>
<source>Invalid amount</source>
<translation>میزان اشتباه است</translation>
</message>
<message>
<location line="-6"/>
<source>Insufficient funds</source>
<translation>وجوه ناکافی</translation>
</message>
<message>
<location line="+10"/>
<source>Loading block index...</source>
<translation>لود شدن نمایه بلاکها..</translation>
</message>
<message>
<location line="-57"/>
<source>Add a node to connect to and attempt to keep the connection open</source>
<translation>یک گره برای اتصال اضافه کنید و تلاش کنید تا اتصال را باز نگاه دارید</translation>
</message>
<message>
<location line="-25"/>
<source>Unable to bind to %s on this computer. Unicorncoin is probably already running.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+64"/>
<source>Fee per KB to add to transactions you send</source>
<translation>هزینه بر اساس کیلو بایت برای اضافه شدن به تراکنشی که ارسال کرده اید</translation>
</message>
<message>
<location line="+19"/>
<source>Loading wallet...</source>
<translation>wallet در حال لود شدن است...</translation>
</message>
<message>
<location line="-52"/>
<source>Cannot downgrade wallet</source>
<translation>قابلیت برگشت به نسخه قبلی برای wallet امکان پذیر نیست</translation>
</message>
<message>
<location line="+3"/>
<source>Cannot write default address</source>
<translation>آدرس پیش فرض قابل ذخیره نیست</translation>
</message>
<message>
<location line="+64"/>
<source>Rescanning...</source>
<translation>اسکنِ دوباره...</translation>
</message>
<message>
<location line="-57"/>
<source>Done loading</source>
<translation>اتمام لود شدن</translation>
</message>
<message>
<location line="+82"/>
<source>To use the %s option</source>
<translation>برای استفاده از %s از اختیارات</translation>
</message>
<message>
<location line="-74"/>
<source>Error</source>
<translation>خطا</translation>
</message>
<message>
<location line="-31"/>
<source>You must set rpcpassword=<password> in the configuration file:
%s
If the file does not exist, create it with owner-readable-only file permissions.</source>
<translation>شما باید یک رمز rpcpassword=<password> را در فایل تنظیمات ایجاد کنید⏎ %s ⏎ اگر فایل ایجاد نشده است، آن را با یک فایل "فقط متنی" ایجاد کنید.
</translation>
</message>
</context>
</TS><|fim▁end|> | <message>
<location line="+23"/> |
<|file_name|>urls.py<|end_file_name|><|fim▁begin|><|fim▁hole|>from django.urls import re_path
from .views import PrivateStorageView
urlpatterns = [
re_path(r'^(?P<path>.*)$', PrivateStorageView.as_view(), name='serve_private_file'),
]<|fim▁end|> | |
<|file_name|>crowdsource.config.js<|end_file_name|><|fim▁begin|>(function () {
'use strict';
angular
.module('crowdsource.config', ['angular-loading-bar'])
.config(config);
config.$inject = ['$httpProvider', '$locationProvider', '$mdThemingProvider', 'cfpLoadingBarProvider'];
/**
* @name config
* @desc Enable HTML5 routing
*/
function config($httpProvider, $locationProvider, $mdThemingProvider, cfpLoadingBarProvider) {
$httpProvider.interceptors.push('AuthHttpResponseInterceptor');
$locationProvider.html5Mode(true);
$locationProvider.hashPrefix('!');
cfpLoadingBarProvider.includeSpinner = false;
// Extend palettes
var customBlue = $mdThemingProvider.extendPalette('indigo', {
"50":"#e8e9f2","100":"#babdd8","200":"#8d91bf",
"300":"#666ca9","400":"#404893","500":"#1a237e",
"600":"#171f6e","700":"#141a5f","800":"#10164f",
"900":"#0d123f","A100":"#babdd8","A200":"#8d91bf",<|fim▁hole|> var customYellow = $mdThemingProvider.extendPalette('yellow', {
"50": "#fffef3", "100": "#fffbdb", "200": "#fff9c4", "300": "#fff6b0",
"400": "#fff49c", "500": "#fff288", "600": "#dfd477", "700": "#bfb666",
"800": "#9f9755", "900": "#807944", "A100": "#fffbdb", "A200": "#fff9c4",
"A400": "#fff49c", "A700": "#bfb666"
});
var customGrey = $mdThemingProvider.extendPalette('grey', {
"50":"#ffffff","100":"#ffffff","200":"#ffffff",
"300":"#ffffff","400":"#ffffff","500":"#ffffff",
"600":"#dfdfdf","700":"#bfbfbf","800":"#9f9f9f",
"900":"#808080","A100":"#ffffff","A200":"#ffffff",
"A400":"#ffffff","A700":"#bfbfbf"
});
// Register the new color palette map with the name <code>neonRed</code>
$mdThemingProvider.definePalette('customBlue', customBlue);
$mdThemingProvider.definePalette('customYellow', customYellow);
$mdThemingProvider.definePalette('customGrey', customGrey);
$mdThemingProvider.theme('default')
.primaryPalette('customBlue')
.accentPalette('customYellow')
.warnPalette('red')
.backgroundPalette('customGrey');
$mdThemingProvider.theme('alternate')
.primaryPalette('indigo')
.accentPalette('orange')
.warnPalette('red')
.backgroundPalette('grey');
$mdThemingProvider.setDefaultTheme('default');
// $mdThemingProvider.alwaysWatchTheme(true);
//testing
/*OAuthProvider.configure({
baseUrl: 'http://localhost:8000',
clientId: 'client_id',
clientSecret: 'client_secret',
grantPath : '/api/oauth2-ng/token'
});
OAuthTokenProvider.configure({
name: 'token',
options: {
secure: false //TODO this has to be changed to True
}
});*/
}
})();<|fim▁end|> | "A400":"#404893","A700":"#141a5f"
});
|
<|file_name|>libprotoident.cc<|end_file_name|><|fim▁begin|>/*
*
* Copyright (c) 2011-2016 The University of Waikato, Hamilton, New Zealand.
* All rights reserved.
*
* This file is part of libprotoident.
*
* This code has been developed by the University of Waikato WAND
* research group. For further information please see http://www.wand.net.nz/
*
* libprotoident is free software; you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation; either version 3 of the License, or
* (at your option) any later version.
*
* libprotoident is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*
*
*/
#define __STDC_FORMAT_MACROS<|fim▁hole|>#include <assert.h>
#include <libtrace.h>
#include <inttypes.h>
#include <sys/types.h>
#include <stdint.h>
#include <stdlib.h>
#include <signal.h>
#include "libprotoident.h"
#include "proto_manager.h"
bool init_called = false;
LPIModuleMap TCP_protocols;
LPIModuleMap UDP_protocols;
lpi_module_t *lpi_icmp = NULL;
lpi_module_t *lpi_unsupported = NULL;
lpi_module_t *lpi_unknown_tcp = NULL;
lpi_module_t *lpi_unknown_udp = NULL;
static LPINameMap lpi_names;
static LPIProtocolMap lpi_protocols;
static LPICategoryMap lpi_categories;
static LPICategoryProtocolMap lpi_category_protocols;
static int seq_cmp (uint32_t seq_a, uint32_t seq_b) {
if (seq_a == seq_b) return 0;
if (seq_a > seq_b)
return (int)(seq_a - seq_b);
else
/* WRAPPING */
return (int)(UINT32_MAX - ((seq_b - seq_a) - 1));
}
int lpi_init_library() {
if (init_called) {
fprintf(stderr, "WARNING: lpi_init_library has already been called\n");
return 0;
}
if (register_tcp_protocols(&TCP_protocols) == -1)
return -1;
if (register_udp_protocols(&UDP_protocols) == -1)
return -1;
init_other_protocols(&lpi_names, &lpi_protocols, &lpi_category_protocols);
register_names(&TCP_protocols, &lpi_names, &lpi_protocols, &lpi_category_protocols);
register_names(&UDP_protocols, &lpi_names, &lpi_protocols, &lpi_category_protocols);
register_category_names(&lpi_categories);
init_called = true;
if (TCP_protocols.empty() && UDP_protocols.empty()) {
fprintf(stderr, "WARNING: No protocol modules loaded\n");
return -1;
}
return 0;
}
void lpi_free_library() {
free_protocols(&TCP_protocols);
free_protocols(&UDP_protocols);
if (lpi_icmp != NULL) {
delete lpi_icmp;
lpi_icmp = NULL;
}
if (lpi_unsupported != NULL) {
delete lpi_unsupported;
lpi_unsupported = NULL;
}
if (lpi_unknown_tcp != NULL) {
delete lpi_unknown_tcp;
lpi_unknown_tcp = NULL;
}
if (lpi_unknown_udp != NULL) {
delete lpi_unknown_udp;
lpi_unknown_udp = NULL;
}
init_called = false;
}
void lpi_init_data(lpi_data_t *data) {
data->payload[0] = 0;
data->payload[1] = 0;
data->seen_syn[0] = false;
data->seen_syn[1] = false;
data->seqno[0] = 0;
data->seqno[1] = 0;
data->observed[0] = 0;
data->observed[1] = 0;
data->server_port = 0;
data->client_port = 0;
data->trans_proto = 0;
data->payload_len[0] = 0;
data->payload_len[1] = 0;
data->ips[0] = 0;
data->ips[1] = 0;
}
static int update_tcp_flow(lpi_data_t *data, libtrace_tcp_t *tcp, uint8_t dir,
uint32_t rem, uint32_t psize) {
uint32_t seq = 0;
if (rem < sizeof(libtrace_tcp_t))
return 0;
if (tcp->rst)
return 0;
if (data->server_port == 0) {
data->server_port = ntohs(tcp->dest);
data->client_port = ntohs(tcp->source);
}
seq = ntohl(tcp->seq);
if (tcp->syn && data->payload_len[dir] == 0) {
data->seqno[dir] = seq + 1;
data->seen_syn[dir] = true;
}
/* Ok, we've got some payload but we never saw the SYN for this
* direction. What do we do?
*
* Current idea: just assume this is the first payload bearing
* packet. Better than running around with an uninitialised seqno */
if (data->seen_syn[dir] == false && psize > 0) {
data->seqno[dir] = seq;
data->seen_syn[dir] = true;
}
if (seq_cmp(seq, data->seqno[dir]) != 0)
return 0;
//data->seqno[dir] = seq;
return 1;
}
static int update_udp_flow(lpi_data_t *data, libtrace_udp_t *udp,
uint32_t rem) {
if (rem < sizeof(libtrace_udp_t))
return 0;
if (data->server_port == 0) {
data->server_port = ntohs(udp->dest);
data->client_port = ntohs(udp->source);
}
return 1;
}
int lpi_update_data(libtrace_packet_t *packet, lpi_data_t *data, uint8_t dir) {
char *payload = NULL;
uint32_t psize = 0;
uint32_t rem = 0;
uint8_t proto = 0;
void *transport;
uint32_t four_bytes;
libtrace_ip_t *ip = NULL;
//tcp = trace_get_tcp(packet);
psize = trace_get_payload_length(packet);
/* Don't bother if we've observed 32k of data - the first packet must
* surely been within that. This helps us avoid issues with sequence
* number wrapping when doing the reordering check below */
if (data->observed[dir] > 32 * 1024)
return 0;
data->observed[dir] += psize;
/* If we're TCP, we have to wait to check that we haven't been
* reordered */
if (data->trans_proto != 6 && data->payload_len[dir] != 0)
return 0;
transport = trace_get_transport(packet, &proto, &rem);
if (data->trans_proto == 0)
data->trans_proto = proto;
if (transport == NULL || rem == 0)
return 0;
if (proto == 6) {
if (update_tcp_flow(data, (libtrace_tcp_t *)transport, dir, rem, psize) == 0)
return 0;
payload = (char *)trace_get_payload_from_tcp(
(libtrace_tcp_t *)transport, &rem);
}
if (proto == 17) {
if (update_udp_flow(data, (libtrace_udp_t *)transport, rem) == 0)
return 0;
payload = (char *)trace_get_payload_from_udp(
(libtrace_udp_t *)transport, &rem);
}
ip = trace_get_ip(packet);
if (payload == NULL)
return 0;
if (psize <= 0)
return 0;
four_bytes = (*(uint32_t *)payload);
if (psize < 4) {
four_bytes = (ntohl(four_bytes)) >> (8 * (4 - psize));
four_bytes = htonl(four_bytes << (8 * (4 - psize)));
}
data->payload[dir] = four_bytes;
data->payload_len[dir] = psize;
if (ip != NULL && data->ips[0] == 0) {
if (dir == 0) {
data->ips[0] = ip->ip_src.s_addr;
data->ips[1] = ip->ip_dst.s_addr;
} else {
data->ips[1] = ip->ip_src.s_addr;
data->ips[0] = ip->ip_dst.s_addr;
}
}
return 1;
}
static lpi_module_t *test_protocol_list(LPIModuleList *ml, lpi_data_t *data) {
LPIModuleList::iterator l_it;
/* Turns out naively looping through the modules is quicker
* than trying to do intelligent stuff with threads. Most
* callbacks complete very quickly so threading overhead is a
* major problem */
for (l_it = ml->begin(); l_it != ml->end(); l_it ++) {
lpi_module_t *module = *l_it;
/* To save time, I'm going to break on the first successful
* match. A threaded version would wait for all the modules
* to run, storing all successful results in a list of some
* sort and selecting an appropriate result from there.
*/
if (module->lpi_callback(data, module))
return module;
}
return NULL;
}
static lpi_module_t *guess_protocol(LPIModuleMap *modmap, lpi_data_t *data) {
lpi_module_t *proto = NULL;
LPIModuleMap::iterator m_it;
/* Deal with each priority in turn - want to match higher priority
* rules first.
*/
for (m_it = modmap->begin(); m_it != modmap->end(); m_it ++) {
LPIModuleList *ml = m_it->second;
proto = test_protocol_list(ml, data);
if (proto != NULL)
break;
}
return proto;
}
lpi_module_t *lpi_guess_protocol(lpi_data_t *data) {
lpi_module_t *p = NULL;
if (!init_called) {
fprintf(stderr, "lpi_init_library was never called - cannot guess the protocol\n");
return NULL;
}
switch(data->trans_proto) {
case TRACE_IPPROTO_ICMP:
return lpi_icmp;
case TRACE_IPPROTO_TCP:
p = guess_protocol(&TCP_protocols, data);
if (p == NULL)
p = lpi_unknown_tcp;
return p;
case TRACE_IPPROTO_UDP:
p = guess_protocol(&UDP_protocols, data);
if (p == NULL)
p = lpi_unknown_udp;
return p;
default:
return lpi_unsupported;
}
return p;
}
lpi_category_t lpi_categorise(lpi_module_t *module) {
if (module == NULL)
return LPI_CATEGORY_NO_CATEGORY;
return module->category;
}
const char *lpi_print_category(lpi_category_t category) {
switch(category) {
case LPI_CATEGORY_WEB:
return "Web";
case LPI_CATEGORY_MAIL:
return "Mail";
case LPI_CATEGORY_CHAT:
return "Chat";
case LPI_CATEGORY_P2P:
return "P2P";
case LPI_CATEGORY_P2P_STRUCTURE:
return "P2P_Structure";
case LPI_CATEGORY_KEY_EXCHANGE:
return "Key_Exchange";
case LPI_CATEGORY_ECOMMERCE:
return "ECommerce";
case LPI_CATEGORY_GAMING:
return "Gaming";
case LPI_CATEGORY_ENCRYPT:
return "Encryption";
case LPI_CATEGORY_MONITORING:
return "Measurement";
case LPI_CATEGORY_NEWS:
return "News";
case LPI_CATEGORY_MALWARE:
return "Malware";
case LPI_CATEGORY_SECURITY:
return "Security";
case LPI_CATEGORY_ANTISPAM:
return "Antispam";
case LPI_CATEGORY_VOIP:
return "VOIP";
case LPI_CATEGORY_TUNNELLING:
return "Tunnelling";
case LPI_CATEGORY_NAT:
return "NAT_Traversal";
case LPI_CATEGORY_STREAMING:
return "Streaming";
case LPI_CATEGORY_SERVICES:
return "Services";
case LPI_CATEGORY_DATABASES:
return "Databases";
case LPI_CATEGORY_FILES:
return "File_Transfer";
case LPI_CATEGORY_REMOTE:
return "Remote_Access";
case LPI_CATEGORY_TELCO:
return "Telco_Services";
case LPI_CATEGORY_P2PTV:
return "P2PTV";
case LPI_CATEGORY_RCS:
return "Revision_Control";
case LPI_CATEGORY_LOGGING:
return "Logging";
case LPI_CATEGORY_PRINTING:
return "Printing";
case LPI_CATEGORY_TRANSLATION:
return "Translation";
case LPI_CATEGORY_CDN:
return "CDN";
case LPI_CATEGORY_CLOUD:
return "Cloud";
case LPI_CATEGORY_NOTIFICATION:
return "Notification";
case LPI_CATEGORY_SERIALISATION:
return "Serialisation";
case LPI_CATEGORY_BROADCAST:
return "Broadcast";
case LPI_CATEGORY_LOCATION:
return "Location";
case LPI_CATEGORY_CACHING:
return "Caching";
case LPI_CATEGORY_ICS:
return "ICS";
case LPI_CATEGORY_MOBILE_APP:
return "Mobile App";
case LPI_CATEGORY_IPCAMERAS:
return "IP Cameras";
case LPI_CATEGORY_EDUCATIONAL:
return "Educational";
case LPI_CATEGORY_MESSAGE_QUEUE:
return "Message_Queuing";
case LPI_CATEGORY_ICMP:
return "ICMP";
case LPI_CATEGORY_MIXED:
return "Mixed";
case LPI_CATEGORY_NOPAYLOAD:
return "No_Payload";
case LPI_CATEGORY_UNKNOWN:
return "Unknown";
case LPI_CATEGORY_UNSUPPORTED:
return "Unsupported";
case LPI_CATEGORY_NO_CATEGORY:
return "Uncategorised";
case LPI_CATEGORY_LAST:
return "Invalid_Category";
}
return "Invalid_Category";
}
const char *lpi_print(lpi_protocol_t proto) {
LPINameMap::iterator it;
it = lpi_names.find(proto);
if (it == lpi_names.end()) {
return "NULL";
}
return (it->second);
}
lpi_protocol_t lpi_get_protocol_by_name(char *name) {
LPIProtocolMap::iterator it;
it = lpi_protocols.find(name);
if (it == lpi_protocols.end()) {
return LPI_PROTO_UNKNOWN;
}
return (it->second);
}
lpi_category_t lpi_get_category_by_name(char *name) {
LPICategoryMap::iterator it;
it = lpi_categories.find(name);
if (it == lpi_categories.end()) {
return LPI_CATEGORY_UNKNOWN;
}
return (it->second);
}
lpi_category_t lpi_get_category_by_protocol(lpi_protocol_t protocol) {
LPICategoryProtocolMap::iterator it;
it = lpi_category_protocols.find(protocol);
if (it == lpi_category_protocols.end()) {
return LPI_CATEGORY_UNKNOWN;
}
return (it->second);
}
bool lpi_is_protocol_inactive(lpi_protocol_t proto) {
LPINameMap::iterator it;
it = lpi_names.find(proto);
if (it == lpi_names.end()) {
return true;
}
return false;
}<|fim▁end|> | #define __STDC_LIMIT_MACROS
#include <stdio.h> |
<|file_name|>delete_data.py<|end_file_name|><|fim▁begin|># muddersOnRails()
# Sara McAllister November 17, 2017
# Last updated: 11-17-2017
# delete all data from database and remove generated graphs (this is super sketch)
import os
import dbCalls
summary_file = 'app/assets/images/summary.png'
overall_file = 'app/assets/images/overall.png'
def main():
dbCalls.remove_all()
# remove both summary and overall picture
try:
os.remove(summary_file)
os.remove(overall_file)
except OSError:<|fim▁hole|><|fim▁end|> | pass
if __name__ == "__main__":
main() |
<|file_name|>archstatus.go<|end_file_name|><|fim▁begin|>package main
import (
"bytes"
"fmt"
"os/exec"
"github.com/mgutz/ansi"
"github.com/stevedomin/termtable"
"strings"
)
func fmtString(color, str, reset string) string {
return fmt.Sprintf("%s%s%s", color, str, reset)
}
func main() {
services := []string{
"cronie.service",
"httpd.service",<|fim▁hole|> "sshd.service",
"home.mount",
"mnt-extra.mount",
"tmp.mount",
"var-lib-mysqltmp.mount",
"var.mount",
}
t := termtable.NewTable(nil, nil)
t.SetHeader([]string{"SERVICE", "STATUS"})
for _, service := range services {
// The systemctl command.
syscommand := exec.Command("systemctl", "status", service)
// The grep command.
grepcommand := exec.Command("grep", "Active:")
// Pipe the stdout of syscommand to the stdin of grepcommand.
grepcommand.Stdin, _ = syscommand.StdoutPipe()
// Create a buffer of bytes.
var b bytes.Buffer
// Assign the address of our buffer to grepcommand.Stdout.
grepcommand.Stdout = &b
// Start grepcommand.
_ = grepcommand.Start()
// Run syscommand
_ = syscommand.Run()
// Wait for grepcommand to exit.
_ = grepcommand.Wait()
s := fmt.Sprintf("%s", &b)
if strings.Contains(s, "active (running)") || strings.Contains(s, "active (mounted)") {
color := ansi.ColorCode("green+h:black")
reset := ansi.ColorCode("reset")
t.AddRow([]string{fmtString(color, service, reset), fmtString(color, s, reset)})
} else {
color := ansi.ColorCode("red+h:black")
reset := ansi.ColorCode("reset")
t.AddRow([]string{fmtString(color, service, reset), fmtString(color, s, reset)})
}
}
fmt.Println(t.Render())
}<|fim▁end|> | "mysqld.service",
"ntpd.service",
"postfix.service", |
<|file_name|>seccat.go<|end_file_name|><|fim▁begin|>// package main provides an implementation of netcat using the secio package.
// This means the channel is encrypted (and MACed).
// It is meant to exercise the spipe package.
// Usage:
// seccat [<local address>] <remote address>
// seccat -l <local address>
//
// Address format is: [host]:port
package main
import (
"context"
"errors"
"flag"
"fmt"
"io"
"net"
"os"
"os/signal"
"syscall"
pstore "gx/ipfs/QmPgDWmTmuzvP7QE5zwo1TmjbJme9pmZHNujB2453jkCTr/go-libp2p-peerstore"
logging "gx/ipfs/QmSpJByNKFX1sCsHBEp3R73FL4NF6FnQTEGyNAXHm2GS52/go-log"
peer "gx/ipfs/QmXYjuNuxVzXKJCfWasQk1RqkhVLDM9jtUKhqc2WPQmFSB/go-libp2p-peer"
ci "gx/ipfs/QmaPbCnUMBohSGo3KnxEa2bHqyJVVeEEcwtqJAYxerieBo/go-libp2p-crypto"
secio "gx/ipfs/QmbSkjJvDuxaZYtR46sF9ust7XY1hcg7DrA6Mxu4UiSWqs/go-libp2p-secio"
)
var verbose = false
// Usage prints out the usage of this module.
// Assumes flags use go stdlib flag pacakage.
var Usage = func() {
text := `seccat - secure netcat in Go
Usage:
listen: %s [<local address>] <remote address>
dial: %s -l <local address>
Address format is Go's: [host]:port
`
fmt.Fprintf(os.Stderr, text, os.Args[0], os.Args[0])
flag.PrintDefaults()
}
type args struct {
listen bool
verbose bool
debug bool
localAddr string
remoteAddr string
// keyfile string
keybits int
}
func parseArgs() args {
var a args
// setup + parse flags
flag.BoolVar(&a.listen, "listen", false, "listen for connections")
flag.BoolVar(&a.listen, "l", false, "listen for connections (short)")
flag.BoolVar(&a.verbose, "v", true, "verbose")
flag.BoolVar(&a.debug, "debug", false, "debugging")
// flag.StringVar(&a.keyfile, "key", "", "private key file")
flag.IntVar(&a.keybits, "keybits", 2048, "num bits for generating private key")
flag.Usage = Usage
flag.Parse()
osArgs := flag.Args()
if len(osArgs) < 1 {
exit("")
}
if a.verbose {
out("verbose on")
}
if a.listen {
a.localAddr = osArgs[0]
} else {
if len(osArgs) > 1 {
a.localAddr = osArgs[0]
a.remoteAddr = osArgs[1]
} else {
a.remoteAddr = osArgs[0]
}
}
return a
}
func main() {
args := parseArgs()
verbose = args.verbose
if args.debug {
logging.SetDebugLogging()
}
go func() {
// wait until we exit.
sigc := make(chan os.Signal, 1)
signal.Notify(sigc, syscall.SIGABRT)
<-sigc
panic("ABORT! ABORT! ABORT!")
}()
if err := connect(args); err != nil {
exit("%s", err)
}
}<|fim▁hole|>func setupPeer(a args) (peer.ID, pstore.Peerstore, error) {
if a.keybits < 1024 {
return "", nil, errors.New("Bitsize less than 1024 is considered unsafe.")
}
out("generating key pair...")
sk, pk, err := ci.GenerateKeyPair(ci.RSA, a.keybits)
if err != nil {
return "", nil, err
}
p, err := peer.IDFromPublicKey(pk)
if err != nil {
return "", nil, err
}
ps := pstore.NewPeerstore()
ps.AddPrivKey(p, sk)
ps.AddPubKey(p, pk)
out("local peer id: %s", p)
return p, ps, nil
}
func connect(args args) error {
p, ps, err := setupPeer(args)
if err != nil {
return err
}
var conn net.Conn
if args.listen {
conn, err = Listen(args.localAddr)
} else {
conn, err = Dial(args.localAddr, args.remoteAddr)
}
if err != nil {
return err
}
// log everything that goes through conn
rwc := &logRW{n: "conn", rw: conn}
// OK, let's setup the channel.
sk := ps.PrivKey(p)
sg := secio.SessionGenerator{LocalID: p, PrivateKey: sk}
sess, err := sg.NewSession(context.TODO(), rwc)
if err != nil {
return err
}
out("remote peer id: %s", sess.RemotePeer())
netcat(sess.ReadWriter().(io.ReadWriteCloser))
return nil
}
// Listen listens and accepts one incoming UDT connection on a given port,
// and pipes all incoming data to os.Stdout.
func Listen(localAddr string) (net.Conn, error) {
l, err := net.Listen("tcp", localAddr)
if err != nil {
return nil, err
}
out("listening at %s", l.Addr())
c, err := l.Accept()
if err != nil {
return nil, err
}
out("accepted connection from %s", c.RemoteAddr())
// done with listener
l.Close()
return c, nil
}
// Dial connects to a remote address and pipes all os.Stdin to the remote end.
// If localAddr is set, uses it to Dial from.
func Dial(localAddr, remoteAddr string) (net.Conn, error) {
var laddr net.Addr
var err error
if localAddr != "" {
laddr, err = net.ResolveTCPAddr("tcp", localAddr)
if err != nil {
return nil, fmt.Errorf("failed to resolve address %s", localAddr)
}
}
if laddr != nil {
out("dialing %s from %s", remoteAddr, laddr)
} else {
out("dialing %s", remoteAddr)
}
d := net.Dialer{LocalAddr: laddr}
c, err := d.Dial("tcp", remoteAddr)
if err != nil {
return nil, err
}
out("connected to %s", c.RemoteAddr())
return c, nil
}
func netcat(c io.ReadWriteCloser) {
out("piping stdio to connection")
done := make(chan struct{}, 2)
go func() {
n, _ := io.Copy(c, os.Stdin)
out("sent %d bytes", n)
done <- struct{}{}
}()
go func() {
n, _ := io.Copy(os.Stdout, c)
out("received %d bytes", n)
done <- struct{}{}
}()
// wait until we exit.
sigc := make(chan os.Signal, 1)
signal.Notify(sigc, syscall.SIGHUP, syscall.SIGINT,
syscall.SIGTERM, syscall.SIGQUIT)
select {
case <-done:
case <-sigc:
return
}
c.Close()
}<|fim▁end|> | |
<|file_name|>symbolic_property.hpp<|end_file_name|><|fim▁begin|>//Symbolic Property
#ifndef _PROBMODELS_BASE_SYMBOLIC_PROPERTY_HPP_
#define _PROBMODELS_BASE_SYMBOLIC_PROPERTY_HPP_
#include <array>
#include "boost/pending/property.hpp"
namespace probmodels {
namespace base {
//Property must be copyable, assignable and copy constructable
template <std::size_t Len, typename Property = boost::no_property>
class SymbolicProperty {
static const std::size_t _array_len = Len+1;
<|fim▁hole|>
inline const char* label() const { return _label; }
SymbolicProperty& operator= (const SymbolicProperty& property) {
std::copy(property._label, property._label+Len+1, _label);
}
private:
std::array<char,_array_len> _label;
Property _prop;
};
}
}
#endif<|fim▁end|> | public:
SymbolicProperty() {}
SymbolicProperty(const SymbolicProperty& prop) :
_label(prop._label), _prop(prop._prop) {} |
<|file_name|>ios-device-product-name-mapper.ts<|end_file_name|><|fim▁begin|>import { IStringDictionary } from "../../declarations";
import { injector } from "../../yok";
class IosDeviceProductNameMapper implements Mobile.IiOSDeviceProductNameMapper {
// http://support.hockeyapp.net/kb/client-integration-ios-mac-os-x/ios-device-types
private map: IStringDictionary = {
"iPhone1,1": "iPhone",
"iPhone1,2": "iPhone 3G",
"iPhone2,1": "iPhone 3GS",
"iPhone3,1": "iPhone 4 (GSM)",
"iPhone3,3": "iPhone 4 (CDMA)",
"iPhone4,1": "iPhone 4S",
"iPhone5,1": "iPhone 5 (A1428)",
"iPhone5,2": "iPhone 5 (A1429)",
"iPhone5,3": "iPhone 5c (A1456/A1532)",
"iPhone5,4": "iPhone 5c (A1507/A1516/A1529)",
"iPhone6,1": "iPhone 5s (A1433/A1453)",
"iPhone6,2": "iPhone 5s (A1457/A1518/A1530)",
"iPhone7,1": "iPhone 6 Plus",
"iPhone7,2": "iPhone 6",
"iPhone8,1": "iPhone 6s",
"iPhone8,2": "iPhone 6s Plus",
"iPad1,1": "iPad",
"iPad2,1": "iPad 2 (Wi-Fi)",
"iPad2,2": "iPad 2 (GSM)",
"iPad2,3": "iPad 2 (CDMA)",
"iPad2,4": "iPad 2 (Wi-Fi, revised)",
"iPad2,5": "iPad mini (Wi-Fi)",
"iPad2,6": "iPad mini (A1454)",
"iPad2,7": "iPad mini (A1455)",
"iPad3,1": "iPad (3rd gen, Wi-Fi)",
"iPad3,2": "iPad (3rd gen, Wi-Fi+LTE Verizon)",
"iPad3,3": "iPad (3rd gen, Wi-Fi+LTE AT&T)",
"iPad3,4": "iPad (4th gen, Wi-Fi)",
"iPad3,5": "iPad (4th gen, A1459)",
"iPad3,6": "iPad (4th gen, A1460)",
"iPad4,1": "iPad Air (Wi-Fi)",
"iPad4,2": "iPad Air (Wi-Fi+LTE)",
"iPad4,3": "iPad Air (Rev)",
"iPad4,4": "iPad mini 2 (Wi-Fi)",
"iPad4,5": "iPad mini 2 (Wi-Fi+LTE)",
"iPad4,6": "iPad mini 2 (Rev)",
"iPad4,7": "iPad mini 3 (Wi-Fi)",
"iPad4,8": "iPad mini 3 (A1600)",<|fim▁hole|> "iPad5,1": "iPad mini 4 (Wi-Fi)",
"iPad5,2": "iPad mini 4 (Wi-Fi+LTE)",
"iPad5,3": "iPad Air 2 (Wi-Fi)",
"iPad5,4": "iPad Air 2 (Wi-Fi+LTE)",
"iPad6,7": "iPad Pro (Wi-Fi)",
"iPad6,8": "iPad Pro (Wi-Fi+LTE)",
"iPod1,1": "iPod touch",
"iPod2,1": "iPod touch (2nd gen)",
"iPod3,1": "iPod touch (3rd gen)",
"iPod4,1": "iPod touch (4th gen)",
"iPod5,1": "iPod touch (5th gen)",
"iPod7,1": "iPod touch (6th gen)",
};
public resolveProductName(deviceType: string): string {
return this.map[deviceType];
}
}
injector.register("iOSDeviceProductNameMapper", IosDeviceProductNameMapper);<|fim▁end|> | "iPad4,9": "iPad mini 3 (A1601)", |
<|file_name|>mysql_mail_log_test.go<|end_file_name|><|fim▁begin|>package dao
import (
"go-common/app/admin/ep/merlin/model"
"testing"
. "github.com/smartystreets/goconvey/convey"
)
var (
username = "[email protected]"
)
func Test_Mail_Log(t *testing.T) {
Convey("test add mail log", t, func() {
ml := &model.MailLog{
ReceiverName: username,
MailType: 1,
SendContext: "test add mail log",
}
err := d.InsertMailLog(ml)
So(err, ShouldBeNil)
})
Convey("test find mail log", t, func() {
mailLogs, err := d.FindMailLog(username)<|fim▁hole|> })
Convey("test delete mail log", t, func() {
err := d.DelMailLog(username)
So(err, ShouldBeNil)
})
Convey("test find mail log", t, func() {
mailLogs, err := d.FindMailLog(username)
So(len(mailLogs), ShouldEqual, 0)
So(err, ShouldBeNil)
})
}<|fim▁end|> | So(len(mailLogs), ShouldBeGreaterThan, 0)
So(err, ShouldBeNil) |
<|file_name|>var-captured-in-nested-closure.rs<|end_file_name|><|fim▁begin|><|fim▁hole|>// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// ignore-android: FIXME(#10381)
// compile-flags:-g
// === GDB TESTS ===================================================================================
// gdb-command:rbreak zzz
// gdb-command:run
// gdb-command:finish
// gdb-command:print variable
// gdb-check:$1 = 1
// gdb-command:print constant
// gdb-check:$2 = 2
// gdb-command:print a_struct
// gdb-check:$3 = {a = -3, b = 4.5, c = 5}
// gdb-command:print *struct_ref
// gdb-check:$4 = {a = -3, b = 4.5, c = 5}
// gdb-command:print *owned
// gdb-check:$5 = 6
// gdb-command:print managed->val
// gdb-check:$6 = 7
// gdb-command:print closure_local
// gdb-check:$7 = 8
// gdb-command:continue
// gdb-command:finish
// gdb-command:print variable
// gdb-check:$8 = 1
// gdb-command:print constant
// gdb-check:$9 = 2
// gdb-command:print a_struct
// gdb-check:$10 = {a = -3, b = 4.5, c = 5}
// gdb-command:print *struct_ref
// gdb-check:$11 = {a = -3, b = 4.5, c = 5}
// gdb-command:print *owned
// gdb-check:$12 = 6
// gdb-command:print managed->val
// gdb-check:$13 = 7
// gdb-command:print closure_local
// gdb-check:$14 = 8
// gdb-command:continue
// === LLDB TESTS ==================================================================================
// lldb-command:run
// lldb-command:print variable
// lldb-check:[...]$0 = 1
// lldb-command:print constant
// lldb-check:[...]$1 = 2
// lldb-command:print a_struct
// lldb-check:[...]$2 = Struct { a: -3, b: 4.5, c: 5 }
// lldb-command:print *struct_ref
// lldb-check:[...]$3 = Struct { a: -3, b: 4.5, c: 5 }
// lldb-command:print *owned
// lldb-check:[...]$4 = 6
// lldb-command:print managed->val
// lldb-check:[...]$5 = 7
// lldb-command:print closure_local
// lldb-check:[...]$6 = 8
// lldb-command:continue
// lldb-command:print variable
// lldb-check:[...]$7 = 1
// lldb-command:print constant
// lldb-check:[...]$8 = 2
// lldb-command:print a_struct
// lldb-check:[...]$9 = Struct { a: -3, b: 4.5, c: 5 }
// lldb-command:print *struct_ref
// lldb-check:[...]$10 = Struct { a: -3, b: 4.5, c: 5 }
// lldb-command:print *owned
// lldb-check:[...]$11 = 6
// lldb-command:print managed->val
// lldb-check:[...]$12 = 7
// lldb-command:print closure_local
// lldb-check:[...]$13 = 8
// lldb-command:continue
#![allow(unused_variable)]
use std::gc::GC;
struct Struct {
a: int,
b: f64,
c: uint
}
fn main() {
let mut variable = 1;
let constant = 2;
let a_struct = Struct {
a: -3,
b: 4.5,
c: 5
};
let struct_ref = &a_struct;
let owned = box 6;
let managed = box(GC) 7;
let closure = || {
let closure_local = 8;
let nested_closure = || {
zzz(); // #break
variable = constant + a_struct.a + struct_ref.a + *owned + *managed + closure_local;
};
zzz(); // #break
nested_closure();
};
closure();
}
fn zzz() {()}<|fim▁end|> | // Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT |
<|file_name|>0024_homepagefeaturedpage.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import modelcluster.fields
class Migration(migrations.Migration):
dependencies = [
('wagtailcore', '0028_merge'),
('home', '0023_remove_homepage_live_feed_intro'),<|fim▁hole|> ]
operations = [
migrations.CreateModel(
name='HomePageFeaturedPage',
fields=[
('id', models.AutoField(primary_key=True, auto_created=True, verbose_name='ID', serialize=False)),
('sort_order', models.IntegerField(blank=True, null=True, editable=False)),
('title', models.CharField(blank=True, max_length=255, help_text='Leave blank to use page title.')),
('subtitle', models.CharField(max_length=255)),
('featured_page', models.ForeignKey(verbose_name='page to feature', to='wagtailcore.Page', related_name='+')),
('home_page', modelcluster.fields.ParentalKey(to='home.HomePage', related_name='featured_pages')),
],
options={
'abstract': False,
'ordering': ['sort_order'],
},
),
]<|fim▁end|> | |
<|file_name|>convert.go<|end_file_name|><|fim▁begin|>// Copyright 2017 The go-darwin Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package hdiutil
import "os/exec"
// formatFlag implements a hdiutil convert command format flag interface.
type formatFlag interface {
formatFlag() []string
}
type convertFormot int
const (
// ConvertUDRW UDIF read/write image.
ConvertUDRW convertFormot = 1 << iota
// ConvertUDRO UDIF read-only image.
ConvertUDRO
// ConvertUDCO UDIF ADC-compressed image.
ConvertUDCO
// ConvertUDZO UDIF zlib-compressed image.
ConvertUDZO
// ConvertULFO UDIF lzfse-compressed image (OS X 10.11+ only).
ConvertULFO
// ConvertUDBZ UDIF bzip2-compressed image (Mac OS X 10.4+ only).
ConvertUDBZ
// ConvertUDTO DVD/CD-R master for export.
ConvertUDTO
// ConvertUDSP SPARSE (grows with content).
ConvertUDSP
// ConvertUDSB SPARSEBUNDLE (grows with content; bundle-backed).
ConvertUDSB
// ConvertUFBI UDIF entire image with MD5 checksum.
ConvertUFBI
// ConvertUDRo UDIF read-only (obsolete format).
ConvertUDRo
// ConvertUDCo UDIF compressed (obsolete format).
ConvertUDCo<|fim▁hole|> // ConvertRdxx NDIF read-only image (Disk Copy 6.3.3 format; deprecated).
ConvertRdxx
// ConvertROCo NDIF compressed image (deprecated).
ConvertROCo
// ConvertRken NDIF compressed (obsolete format).
ConvertRken
// ConvertDC42 Disk Copy 4.2 image (obsolete format).
ConvertDC42
)
// convertFlag implements a hdiutil convert command flag interface.
type convertFlag interface {
convertFlag() []string
}
// ConvertAlign default is 4 (2K).
type ConvertAlign int
func (c ConvertAlign) convertFlag() []string { return intFlag("align", int(c)) }
type convertPmap bool
func (c convertPmap) convertFlag() []string { return boolFlag("pmap", bool(c)) }
// ConvertSegmentSize specify segmentation into size_spec-sized segments as outfile is being written.
//
// The default size_spec when ConvertSegmentSize is specified alone is 2*1024*1024 (1 GB worth of sectors) for UDTO images and 4*1024*1024 (2 GB segments) for all other image types.
//
// size_spec(string) can also be specified ??b|??k|??m|??g|??t|??p|??e like create's CreateSize flag.
type ConvertSegmentSize string
func (c ConvertSegmentSize) convertFlag() []string { return stringFlag("segmentSize", string(c)) }
// ConvertTasks when converting an image into a compressed format, specify the number of threads to use for the compression operation.
//
// The default is the number of processors active in the current system.
type ConvertTasks int
func (c ConvertTasks) convertFlag() []string { return intFlag("tasks", int(c)) }
const (
// ConvertPmap add partition map.
ConvertPmap convertPmap = true
)
// Convert convert image to type format and write the result to outfile.
func Convert(image string, format formatFlag, outfile string, flags ...convertFlag) error {
cmd := exec.Command(hdiutilPath, "convert", image)
cmd.Args = append(cmd.Args, format.formatFlag()...)
cmd.Args = append(cmd.Args, outfile)
if len(flags) > 0 {
for _, flag := range flags {
cmd.Args = append(cmd.Args, flag.convertFlag()...)
}
}
err := cmd.Run()
if err != nil {
return err
}
return nil
}<|fim▁end|> | // ConvertRdWr NDIF read/write image (deprecated).
ConvertRdWr |
<|file_name|>views.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
'''
Copyright 2014 FreshPlanet (http://freshplanet.com | [email protected])
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import datetime
import random
from google.appengine.ext import ndb
import webapp2
from counter.models import Counter
<|fim▁hole|> def get(self):
"""
Increments some Counters to play with the feature.
"""
# Fill datastore with data to show case in admin view
otherSliceId = (datetime.datetime.utcnow() - datetime.timedelta(days=1)).strftime("%Y-%m-%d")
for client in ['iOS', 'Android', 'Windows']:
Counter.increment('newInstalls_' + client, random.randint(1, 5))
Counter.increment('newInstalls_' + client, random.randint(1, 5), sliceId=otherSliceId)
self.response.write("""
Counters updated!
Query for counters <a href="/admin/counters/?prefix=newInstalls">here</a>.
""")<|fim▁end|> |
class SampleHandler(webapp2.RequestHandler):
@ndb.toplevel |
<|file_name|>is_stateless.hpp<|end_file_name|><|fim▁begin|>// (C) Copyright Steve Cleary, Beman Dawes, Howard Hinnant & John Maddock 2000.<|fim▁hole|>// http://www.boost.org/LICENSE_1_0.txt).
//
// See http://www.boost.org/libs/type_traits for most recent version including documentation.
#ifndef BOOST_TT_IS_STATELESS_HPP_INCLUDED
#define BOOST_TT_IS_STATELESS_HPP_INCLUDED
#include <boost/type_traits/has_trivial_constructor.hpp>
#include <boost/type_traits/has_trivial_copy.hpp>
#include <boost/type_traits/has_trivial_destructor.hpp>
#include <boost/type_traits/is_class.hpp>
#include <boost/type_traits/is_empty.hpp>
#include <boost/type_traits/detail/ice_and.hpp>
#include <boost/config.hpp>
// should be the last #include
#include <boost/type_traits/detail/bool_trait_def.hpp>
namespace pdalboost {} namespace boost = pdalboost; namespace pdalboost {
namespace detail {
template <typename T>
struct is_stateless_impl
{
BOOST_STATIC_CONSTANT(bool, value =
(::pdalboost::type_traits::ice_and<
::pdalboost::has_trivial_constructor<T>::value,
::pdalboost::has_trivial_copy<T>::value,
::pdalboost::has_trivial_destructor<T>::value,
::pdalboost::is_class<T>::value,
::pdalboost::is_empty<T>::value
>::value));
};
} // namespace detail
BOOST_TT_AUX_BOOL_TRAIT_DEF1(is_stateless,T,::pdalboost::detail::is_stateless_impl<T>::value)
} // namespace pdalboost
#include <boost/type_traits/detail/bool_trait_undef.hpp>
#endif // BOOST_TT_IS_STATELESS_HPP_INCLUDED<|fim▁end|> | // Use, modification and distribution are subject to the Boost Software License,
// Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at |
<|file_name|>commandfactory.go<|end_file_name|><|fim▁begin|>package main
import "strings"
//CreateCommand is a factory method that returns the command that
//is infered from the passed command line arguments.
func CreateCommand(args []string, translator Translator) Command {<|fim▁hole|> if args == nil || len(args) == 0 {
return new(OverviewCommand)
}
count := len(args)
command := strings.ToLower(args[0])
switch command {
case "all":
return NewAllCommand(translator)
case "delete":
if count == 2 { //Delete list
return NewDeleteListCommand(args[1])
} else if count == 3 { //Delete list item
return NewDeleteListEntryCommand(args[1], args[2])
} else { //Do nothing.
return nil
}
case "open":
if count == 2 { //Open all in a list.
return NewOpenListCommand(args[1])
} else if count == 3 { //Open a specific entry.
return NewOpenListEntryCommand(args[1], args[2])
}
case "random":
if count == 1 {
return NewRandomEntryCommand()
} else if count == 2 {
return NewRandomSpecificEntryCommand(args[1])
}
case "echo":
if count == 2 {
return NewEchoEntryCommand(args[1])
}
return NewEchoSpecificEntryCommand(args[1], args[2])
case "copy":
if count == 2 {
return NewCopyEntryCommand(args[1])
}
return NewCopySpecificEntryCommand(args[1], args[2])
default:
return NewCatchAllCommand(args, translator)
}
return nil
}<|fim▁end|> | |
<|file_name|>AppcastReplaceItem.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python<|fim▁hole|>AppcastReplaceItem <path-to-appcast> <old-version> <new-version> <path-to-dmg>
Example: AppcastReplaceItem appcast-release.xml 1.1.4 1.2 Release/build/Adium_1.2.dmg
"""
# Configurable variables.
app_name = 'Adium'
changelog_fmt = 'http://www.adium.im/changelogs/%(version)s.html'
enclosure_fmt = ' <enclosure sparkle:md5Sum="%(md5)s" sparkle:version="%(version)s" url="%(url)s" length="%(file_size)s" type="application/octet-stream"/>\n'
# End of configurable variables.
import xml.etree.cElementTree as ElementTree
import sys
import os
import time
import subprocess
from stat import *
args = dict(zip(('appcast_path', 'old_version', 'version', 'dmg_pathname'), sys.argv[1:]))
try:
appcast_path = args['appcast_path']
old_version = args['old_version']
version = args['version']
dmg_pathname = args['dmg_pathname']
except KeyError:
sys.exit(__doc__.strip())
else:
args['app_name'] = app_name
# Get the length and modification time of the dmg file.
sb = os.stat(dmg_pathname)
file_size = args['file_size'] = sb[ST_SIZE]
dmg_mod_time = time.localtime(sb[ST_MTIME])
# Suffix for the day of the month.
th = (['st', 'nd', 'rd'] + ['th'] * 7) * 4
# GMT offset in hours.
gmt_offset = '%+i' % (-int(time.timezone / 3600),)
# Format, which we must fill in with the above items first.
time_fmt = '%A, %B %dth, %Y %H:%M:%S GMT+0'.replace('th', th[dmg_mod_time.tm_mday - 1]).replace('+0', gmt_offset)
dmg_mod_date = args['dmg_mod_date'] = time.strftime(time_fmt, dmg_mod_time)
openssl_md5 = subprocess.Popen(['openssl', 'md5', dmg_pathname], stdout=subprocess.PIPE)
# Skip the prefix
openssl_md5.stdout.read(len('MD5(') + len(dmg_pathname) + len(')= '))
md5 = args['md5'] = openssl_md5.stdout.read().strip()
exit_status = openssl_md5.wait()
if exit_status != 0: sys.exit('openssl md5 exited with status ' + str(exit_status))
# An MD5 hash is 16 bytes, which is 32 digits hexadecimal.
assert len(md5) == 32, 'MD5 sum is %u bytes' % (len(md5),)
dmg_filename = os.path.split(dmg_pathname)[1]
url = args['url'] = 'http://adiumx.cachefly.net/' + dmg_filename
# Because XML parsing with the standard library is a PITA, we're going to do it the hackish way.
xmlfile = file(appcast_path)
lines = []
is_in_item = False
is_correct_item = False
found_correct_item = False
for line in xmlfile:
if not is_in_item:
if '<item>' in line:
is_in_item = True
else:
if '</item>' in line:
is_in_item = False
is_correct_item = False
elif '<title>' in line:
if '>%(app_name)s %(old_version)s<' % args in line:
line = line.replace(old_version, version)
is_correct_item = found_correct_item = True
elif is_correct_item:
if'<pubDate>' in line:
line = ' <pubDate>%(dmg_mod_date)s</pubDate>\n' % args
elif '<sparkle:releaseNotesLink>' in line:
line = ' <sparkle:releaseNotesLink>%s</sparkle:releaseNotesLink>\n' % (changelog_fmt % args,)
elif '<enclosure' in line:
line = enclosure_fmt % args
lines.append(line)
if not found_correct_item:
sys.exit('No item found for version %(old_version)s' % args)
xmlfile = file(appcast_path, 'w')
xmlfile.writelines(lines)<|fim▁end|> |
"""
Usage: |
<|file_name|>coverage-missing.cc<|end_file_name|><|fim▁begin|>// Test for "sancov.py missing ...".
// First case: coverage from executable. main() is called on every code path.
// RUN: %clangxx_asan -fsanitize-coverage=func,trace-pc-guard %s -o %t -DFOOBAR -DMAIN
// RUN: rm -rf %t-dir
// RUN: mkdir -p %t-dir
// RUN: cd %t-dir
// RUN: %env_asan_opts=coverage=1:coverage_dir=%t-dir %run %t
// RUN: %sancov print *.sancov > main.txt
// RUN: rm *.sancov
// RUN: count 1 < main.txt
// RUN: %env_asan_opts=coverage=1:coverage_dir=%t-dir %run %t x
// RUN: %sancov print *.sancov > foo.txt
// RUN: rm *.sancov
// RUN: count 3 < foo.txt
// RUN: %env_asan_opts=coverage=1:coverage_dir=%t-dir %run %t x x
// RUN: %sancov print *.sancov > bar.txt
// RUN: rm *.sancov
// RUN: count 4 < bar.txt<|fim▁hole|>// lists can be tested for set inclusion with diff + grep.
// RUN: diff bar.txt foo-missing-with-main.txt > %t.log || true
// RUN: not grep "^<" %t.log
// Second case: coverage from DSO.
// cd %t-dir
// RUN: %clangxx_asan -fsanitize-coverage=func,trace-pc-guard %s -o %dynamiclib -DFOOBAR -shared -fPIC
// RUN: %clangxx_asan -fsanitize-coverage=func,trace-pc-guard %s %dynamiclib -o %t -DMAIN
// RUN: cd ..
// RUN: rm -rf %t-dir
// RUN: mkdir -p %t-dir
// RUN: cd %t-dir
// RUN: %env_asan_opts=coverage=1:coverage_dir=%t-dir %run %t x
// RUN: %sancov print %xdynamiclib_filename.*.sancov > foo.txt
// RUN: rm *.sancov
// RUN: count 2 < foo.txt
// RUN: %env_asan_opts=coverage=1:coverage_dir=%t-dir %run %t x x
// RUN: %sancov print %xdynamiclib_filename.*.sancov > bar.txt
// RUN: rm *.sancov
// RUN: count 3 < bar.txt
// RUN: %sancov missing %dynamiclib < foo.txt > foo-missing.txt
// RUN: diff bar.txt foo-missing.txt > %t.log || true
// RUN: not grep "^<" %t.log
// REQUIRES: x86-target-arch
// XFAIL: android
#include <stdio.h>
void foo1();
void foo2();
void bar1();
void bar2();
void bar3();
#if defined(FOOBAR)
void foo1() { fprintf(stderr, "foo1\n"); }
void foo2() { fprintf(stderr, "foo2\n"); }
void bar1() { fprintf(stderr, "bar1\n"); }
void bar2() { fprintf(stderr, "bar2\n"); }
void bar3() { fprintf(stderr, "bar3\n"); }
#endif
#if defined(MAIN)
int main(int argc, char **argv) {
switch (argc) {
case 1:
break;
case 2:
foo1();
foo2();
break;
case 3:
bar1();
bar2();
bar3();
break;
}
}
#endif<|fim▁end|> | // RUN: %sancov missing %t < foo.txt > foo-missing.txt
// RUN: sort main.txt foo-missing.txt -o foo-missing-with-main.txt
// The "missing from foo" set may contain a few bogus PCs from the sanitizer
// runtime, but it must include the entire "bar" code path as a subset. Sorted |
<|file_name|>tokenstream.rs<|end_file_name|><|fim▁begin|>//! # Token Streams
//!
//! `TokenStream`s represent syntactic objects before they are converted into ASTs.
//! A `TokenStream` is, roughly speaking, a sequence of [`TokenTree`]s,
//! which are themselves a single [`Token`] or a `Delimited` subsequence of tokens.
//!
//! ## Ownership
//!
//! `TokenStream`s are persistent data structures constructed as ropes with reference
//! counted-children. In general, this means that calling an operation on a `TokenStream`
//! (such as `slice`) produces an entirely new `TokenStream` from the borrowed reference to
//! the original. This essentially coerces `TokenStream`s into "views" of their subparts,
//! and a borrowed `TokenStream` is sufficient to build an owned `TokenStream` without taking
//! ownership of the original.
use crate::token::{self, DelimToken, Token, TokenKind};
use crate::AttrVec;
use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
use rustc_data_structures::sync::{self, Lrc};
use rustc_macros::HashStable_Generic;
use rustc_serialize::{Decodable, Decoder, Encodable, Encoder};
use rustc_span::{Span, DUMMY_SP};
use smallvec::{smallvec, SmallVec};
use std::{fmt, iter, mem};
/// When the main Rust parser encounters a syntax-extension invocation, it
/// parses the arguments to the invocation as a token tree. This is a very
/// loose structure, such that all sorts of different AST fragments can
/// be passed to syntax extensions using a uniform type.
///
/// If the syntax extension is an MBE macro, it will attempt to match its
/// LHS token tree against the provided token tree, and if it finds a
/// match, will transcribe the RHS token tree, splicing in any captured
/// `macro_parser::matched_nonterminals` into the `SubstNt`s it finds.
///
/// The RHS of an MBE macro is the only place `SubstNt`s are substituted.
/// Nothing special happens to misnamed or misplaced `SubstNt`s.
#[derive(Debug, Clone, PartialEq, Encodable, Decodable, HashStable_Generic)]
pub enum TokenTree {
/// A single token.
Token(Token),
/// A delimited sequence of token trees.
Delimited(DelimSpan, DelimToken, TokenStream),
}
#[derive(Copy, Clone)]
pub enum CanSynthesizeMissingTokens {
Yes,
No,
}
// Ensure all fields of `TokenTree` is `Send` and `Sync`.
#[cfg(parallel_compiler)]
fn _dummy()
where
Token: Send + Sync,
DelimSpan: Send + Sync,
DelimToken: Send + Sync,
TokenStream: Send + Sync,
{
}
impl TokenTree {
/// Checks if this `TokenTree` is equal to the other, regardless of span information.
pub fn eq_unspanned(&self, other: &TokenTree) -> bool {
match (self, other) {
(TokenTree::Token(token), TokenTree::Token(token2)) => token.kind == token2.kind,
(TokenTree::Delimited(_, delim, tts), TokenTree::Delimited(_, delim2, tts2)) => {
delim == delim2 && tts.eq_unspanned(&tts2)
}
_ => false,
}
}
/// Retrieves the `TokenTree`'s span.
pub fn span(&self) -> Span {
match self {
TokenTree::Token(token) => token.span,
TokenTree::Delimited(sp, ..) => sp.entire(),
}
}
/// Modify the `TokenTree`'s span in-place.
pub fn set_span(&mut self, span: Span) {
match self {
TokenTree::Token(token) => token.span = span,
TokenTree::Delimited(dspan, ..) => *dspan = DelimSpan::from_single(span),
}
}
pub fn token(kind: TokenKind, span: Span) -> TokenTree {
TokenTree::Token(Token::new(kind, span))
}
/// Returns the opening delimiter as a token tree.
pub fn open_tt(span: DelimSpan, delim: DelimToken) -> TokenTree {
TokenTree::token(token::OpenDelim(delim), span.open)
}
/// Returns the closing delimiter as a token tree.
pub fn close_tt(span: DelimSpan, delim: DelimToken) -> TokenTree {
TokenTree::token(token::CloseDelim(delim), span.close)
}
pub fn uninterpolate(self) -> TokenTree {
match self {
TokenTree::Token(token) => TokenTree::Token(token.uninterpolate().into_owned()),
tt => tt,
}
}
}
impl<CTX> HashStable<CTX> for TokenStream
where
CTX: crate::HashStableContext,
{
fn hash_stable(&self, hcx: &mut CTX, hasher: &mut StableHasher) {
for sub_tt in self.trees() {
sub_tt.hash_stable(hcx, hasher);
}
}
}
pub trait CreateTokenStream: sync::Send + sync::Sync {
fn create_token_stream(&self) -> AttrAnnotatedTokenStream;
}
impl CreateTokenStream for AttrAnnotatedTokenStream {
fn create_token_stream(&self) -> AttrAnnotatedTokenStream {
self.clone()
}
}
/// A lazy version of [`TokenStream`], which defers creation
/// of an actual `TokenStream` until it is needed.
/// `Box` is here only to reduce the structure size.
#[derive(Clone)]
pub struct LazyTokenStream(Lrc<Box<dyn CreateTokenStream>>);
impl LazyTokenStream {
pub fn new(inner: impl CreateTokenStream + 'static) -> LazyTokenStream {
LazyTokenStream(Lrc::new(Box::new(inner)))
}
pub fn create_token_stream(&self) -> AttrAnnotatedTokenStream {
self.0.create_token_stream()
}
}
impl fmt::Debug for LazyTokenStream {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "LazyTokenStream({:?})", self.create_token_stream())
}
}
impl<S: Encoder> Encodable<S> for LazyTokenStream {
fn encode(&self, s: &mut S) -> Result<(), S::Error> {
// Used by AST json printing.
Encodable::encode(&self.create_token_stream(), s)
}
}
impl<D: Decoder> Decodable<D> for LazyTokenStream {
fn decode(_d: &mut D) -> Result<Self, D::Error> {
panic!("Attempted to decode LazyTokenStream");
}
}
impl<CTX> HashStable<CTX> for LazyTokenStream {
fn hash_stable(&self, _hcx: &mut CTX, _hasher: &mut StableHasher) {
panic!("Attempted to compute stable hash for LazyTokenStream");
}
}
/// A `AttrAnnotatedTokenStream` is similar to a `TokenStream`, but with extra
/// information about the tokens for attribute targets. This is used
/// during expansion to perform early cfg-expansion, and to process attributes
/// during proc-macro invocations.
#[derive(Clone, Debug, Default, Encodable, Decodable)]
pub struct AttrAnnotatedTokenStream(pub Lrc<Vec<(AttrAnnotatedTokenTree, Spacing)>>);
/// Like `TokenTree`, but for `AttrAnnotatedTokenStream`
#[derive(Clone, Debug, Encodable, Decodable)]
pub enum AttrAnnotatedTokenTree {
Token(Token),
Delimited(DelimSpan, DelimToken, AttrAnnotatedTokenStream),
/// Stores the attributes for an attribute target,
/// along with the tokens for that attribute target.
/// See `AttributesData` for more information
Attributes(AttributesData),
}
impl AttrAnnotatedTokenStream {
pub fn new(tokens: Vec<(AttrAnnotatedTokenTree, Spacing)>) -> AttrAnnotatedTokenStream {
AttrAnnotatedTokenStream(Lrc::new(tokens))
}
/// Converts this `AttrAnnotatedTokenStream` to a plain `TokenStream
/// During conversion, `AttrAnnotatedTokenTree::Attributes` get 'flattened'
/// back to a `TokenStream` of the form `outer_attr attr_target`.
/// If there are inner attributes, they are inserted into the proper
/// place in the attribute target tokens.
pub fn to_tokenstream(&self) -> TokenStream {
let trees: Vec<_> = self
.0
.iter()
.flat_map(|tree| match &tree.0 {
AttrAnnotatedTokenTree::Token(inner) => {
smallvec![(TokenTree::Token(inner.clone()), tree.1)].into_iter()
}
AttrAnnotatedTokenTree::Delimited(span, delim, stream) => smallvec![(
TokenTree::Delimited(*span, *delim, stream.to_tokenstream()),
tree.1,
)]
.into_iter(),
AttrAnnotatedTokenTree::Attributes(data) => {
let mut outer_attrs = Vec::new();
let mut inner_attrs = Vec::new();
for attr in &data.attrs {
match attr.style {
crate::AttrStyle::Outer => {
outer_attrs.push(attr);
}
crate::AttrStyle::Inner => {
inner_attrs.push(attr);
}
}
}
let mut target_tokens: Vec<_> = data
.tokens
.create_token_stream()
.to_tokenstream()
.0
.iter()
.cloned()
.collect();
if !inner_attrs.is_empty() {
let mut found = false;
// Check the last two trees (to account for a trailing semi)
for (tree, _) in target_tokens.iter_mut().rev().take(2) {
if let TokenTree::Delimited(span, delim, delim_tokens) = tree {
// Inner attributes are only supported on extern blocks, functions, impls,
// and modules. All of these have their inner attributes placed at
// the beginning of the rightmost outermost braced group:
// e.g. fn foo() { #![my_attr} }
//
// Therefore, we can insert them back into the right location
// without needing to do any extra position tracking.
//
// Note: Outline modules are an exception - they can
// have attributes like `#![my_attr]` at the start of a file.
// Support for custom attributes in this position is not
// properly implemented - we always synthesize fake tokens,
// so we never reach this code.
let mut builder = TokenStreamBuilder::new();
for inner_attr in inner_attrs {
builder.push(inner_attr.tokens().to_tokenstream());
}
builder.push(delim_tokens.clone());
*tree = TokenTree::Delimited(*span, *delim, builder.build());
found = true;
break;
}
}
assert!(
found,
"Failed to find trailing delimited group in: {:?}",
target_tokens
);
}
let mut flat: SmallVec<[_; 1]> = SmallVec::new();
for attr in outer_attrs {
// FIXME: Make this more efficient
flat.extend(attr.tokens().to_tokenstream().0.clone().iter().cloned());
}
flat.extend(target_tokens);
flat.into_iter()
}
})
.collect();
TokenStream::new(trees)
}
}
/// Stores the tokens for an attribute target, along
/// with its attributes.
///
/// This is constructed during parsing when we need to capture
/// tokens.
///
/// For example, `#[cfg(FALSE)] struct Foo {}` would
/// have an `attrs` field containing the `#[cfg(FALSE)]` attr,
/// and a `tokens` field storing the (unparesd) tokens `struct Foo {}`
#[derive(Clone, Debug, Encodable, Decodable)]
pub struct AttributesData {
/// Attributes, both outer and inner.
/// These are stored in the original order that they were parsed in.
pub attrs: AttrVec,
/// The underlying tokens for the attribute target that `attrs`
/// are applied to
pub tokens: LazyTokenStream,
}
/// A `TokenStream` is an abstract sequence of tokens, organized into [`TokenTree`]s.
///
/// The goal is for procedural macros to work with `TokenStream`s and `TokenTree`s
/// instead of a representation of the abstract syntax tree.
/// Today's `TokenTree`s can still contain AST via `token::Interpolated` for
/// backwards compatibility.
#[derive(Clone, Debug, Default, Encodable, Decodable)]
pub struct TokenStream(pub(crate) Lrc<Vec<TreeAndSpacing>>);
pub type TreeAndSpacing = (TokenTree, Spacing);
// `TokenStream` is used a lot. Make sure it doesn't unintentionally get bigger.
#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
rustc_data_structures::static_assert_size!(TokenStream, 8);
#[derive(Clone, Copy, Debug, PartialEq, Encodable, Decodable)]
pub enum Spacing {
Alone,
Joint,
}
impl TokenStream {
/// Given a `TokenStream` with a `Stream` of only two arguments, return a new `TokenStream`
/// separating the two arguments with a comma for diagnostic suggestions.
pub fn add_comma(&self) -> Option<(TokenStream, Span)> {
// Used to suggest if a user writes `foo!(a b);`
let mut suggestion = None;
let mut iter = self.0.iter().enumerate().peekable();
while let Some((pos, ts)) = iter.next() {
if let Some((_, next)) = iter.peek() {
let sp = match (&ts, &next) {
(_, (TokenTree::Token(Token { kind: token::Comma, .. }), _)) => continue,
(
(TokenTree::Token(token_left), Spacing::Alone),
(TokenTree::Token(token_right), _),
) if ((token_left.is_ident() && !token_left.is_reserved_ident())
|| token_left.is_lit())
&& ((token_right.is_ident() && !token_right.is_reserved_ident())
|| token_right.is_lit()) =>
{
token_left.span
}
((TokenTree::Delimited(sp, ..), Spacing::Alone), _) => sp.entire(),
_ => continue,
};
let sp = sp.shrink_to_hi();
let comma = (TokenTree::token(token::Comma, sp), Spacing::Alone);
suggestion = Some((pos, comma, sp));
}
}
if let Some((pos, comma, sp)) = suggestion {
let mut new_stream = Vec::with_capacity(self.0.len() + 1);
let parts = self.0.split_at(pos + 1);
new_stream.extend_from_slice(parts.0);
new_stream.push(comma);
new_stream.extend_from_slice(parts.1);
return Some((TokenStream::new(new_stream), sp));
}
None
}
}
impl From<(AttrAnnotatedTokenTree, Spacing)> for AttrAnnotatedTokenStream {
fn from((tree, spacing): (AttrAnnotatedTokenTree, Spacing)) -> AttrAnnotatedTokenStream {
AttrAnnotatedTokenStream::new(vec![(tree, spacing)])
}
}
impl From<TokenTree> for TokenStream {
fn from(tree: TokenTree) -> TokenStream {
TokenStream::new(vec![(tree, Spacing::Alone)])
}
}
impl From<TokenTree> for TreeAndSpacing {
fn from(tree: TokenTree) -> TreeAndSpacing {
(tree, Spacing::Alone)
}
}
impl iter::FromIterator<TokenTree> for TokenStream {
fn from_iter<I: IntoIterator<Item = TokenTree>>(iter: I) -> Self {
TokenStream::new(iter.into_iter().map(Into::into).collect::<Vec<TreeAndSpacing>>())
}
}
impl Eq for TokenStream {}
impl PartialEq<TokenStream> for TokenStream {
fn eq(&self, other: &TokenStream) -> bool {
self.trees().eq(other.trees())
}
}
impl TokenStream {
pub fn new(streams: Vec<TreeAndSpacing>) -> TokenStream {
TokenStream(Lrc::new(streams))
}
pub fn is_empty(&self) -> bool {
self.0.is_empty()
}
pub fn len(&self) -> usize {
self.0.len()
}
pub fn from_streams(mut streams: SmallVec<[TokenStream; 2]>) -> TokenStream {
match streams.len() {
0 => TokenStream::default(),
1 => streams.pop().unwrap(),
_ => {
// We are going to extend the first stream in `streams` with
// the elements from the subsequent streams. This requires
// using `make_mut()` on the first stream, and in practice this
// doesn't cause cloning 99.9% of the time.
//
// One very common use case is when `streams` has two elements,
// where the first stream has any number of elements within
// (often 1, but sometimes many more) and the second stream has
// a single element within.
// Determine how much the first stream will be extended.
// Needed to avoid quadratic blow up from on-the-fly
// reallocations (#57735).
let num_appends = streams.iter().skip(1).map(|ts| ts.len()).sum();
// Get the first stream. If it's `None`, create an empty
// stream.
let mut iter = streams.drain(..);
let mut first_stream_lrc = iter.next().unwrap().0;
// Append the elements to the first stream, after reserving
// space for them.
let first_vec_mut = Lrc::make_mut(&mut first_stream_lrc);
first_vec_mut.reserve(num_appends);
for stream in iter {
first_vec_mut.extend(stream.0.iter().cloned());
}
// Create the final `TokenStream`.
TokenStream(first_stream_lrc)
}
}
}
pub fn trees(&self) -> Cursor {
self.clone().into_trees()
}
pub fn into_trees(self) -> Cursor {
Cursor::new(self)
}
/// Compares two `TokenStream`s, checking equality without regarding span information.
pub fn eq_unspanned(&self, other: &TokenStream) -> bool {
let mut t1 = self.trees();
let mut t2 = other.trees();
for (t1, t2) in iter::zip(&mut t1, &mut t2) {
if !t1.eq_unspanned(&t2) {
return false;
}
}
t1.next().is_none() && t2.next().is_none()
}
pub fn map_enumerated<F: FnMut(usize, &TokenTree) -> TokenTree>(self, mut f: F) -> TokenStream {
TokenStream(Lrc::new(
self.0
.iter()
.enumerate()
.map(|(i, (tree, is_joint))| (f(i, tree), *is_joint))
.collect(),
))
}
}
// 99.5%+ of the time we have 1 or 2 elements in this vector.
#[derive(Clone)]
pub struct TokenStreamBuilder(SmallVec<[TokenStream; 2]>);
impl TokenStreamBuilder {
pub fn new() -> TokenStreamBuilder {
TokenStreamBuilder(SmallVec::new())
}
pub fn push<T: Into<TokenStream>>(&mut self, stream: T) {
let mut stream = stream.into();
// If `self` is not empty and the last tree within the last stream is a
// token tree marked with `Joint`...
if let Some(TokenStream(ref mut last_stream_lrc)) = self.0.last_mut() {
if let Some((TokenTree::Token(last_token), Spacing::Joint)) = last_stream_lrc.last() {
// ...and `stream` is not empty and the first tree within it is
// a token tree...
let TokenStream(ref mut stream_lrc) = stream;
if let Some((TokenTree::Token(token), spacing)) = stream_lrc.first() {
// ...and the two tokens can be glued together...
if let Some(glued_tok) = last_token.glue(&token) {
// ...then do so, by overwriting the last token
// tree in `self` and removing the first token tree
// from `stream`. This requires using `make_mut()`
// on the last stream in `self` and on `stream`,
// and in practice this doesn't cause cloning 99.9%
// of the time.
// Overwrite the last token tree with the merged
// token.
let last_vec_mut = Lrc::make_mut(last_stream_lrc);
*last_vec_mut.last_mut().unwrap() = (TokenTree::Token(glued_tok), *spacing);
// Remove the first token tree from `stream`. (This
// is almost always the only tree in `stream`.)
let stream_vec_mut = Lrc::make_mut(stream_lrc);
stream_vec_mut.remove(0);
// Don't push `stream` if it's empty -- that could
// block subsequent token gluing, by getting
// between two token trees that should be glued
// together.
if !stream.is_empty() {
self.0.push(stream);
}
return;
}
}
}
}
self.0.push(stream);
}
pub fn build(self) -> TokenStream {
TokenStream::from_streams(self.0)
}
}
/// By-reference iterator over a [`TokenStream`].
#[derive(Clone)]
pub struct CursorRef<'t> {
stream: &'t TokenStream,
index: usize,
}
impl<'t> CursorRef<'t> {
fn next_with_spacing(&mut self) -> Option<&'t TreeAndSpacing> {
self.stream.0.get(self.index).map(|tree| {
self.index += 1;
tree
})
}
}
<|fim▁hole|> self.next_with_spacing().map(|(tree, _)| tree)
}
}
/// Owning by-value iterator over a [`TokenStream`].
// FIXME: Many uses of this can be replaced with by-reference iterator to avoid clones.
#[derive(Clone)]
pub struct Cursor {
pub stream: TokenStream,
index: usize,
}
impl Iterator for Cursor {
type Item = TokenTree;
fn next(&mut self) -> Option<TokenTree> {
self.next_with_spacing().map(|(tree, _)| tree)
}
}
impl Cursor {
fn new(stream: TokenStream) -> Self {
Cursor { stream, index: 0 }
}
pub fn next_with_spacing(&mut self) -> Option<TreeAndSpacing> {
if self.index < self.stream.len() {
self.index += 1;
Some(self.stream.0[self.index - 1].clone())
} else {
None
}
}
pub fn index(&self) -> usize {
self.index
}
pub fn append(&mut self, new_stream: TokenStream) {
if new_stream.is_empty() {
return;
}
let index = self.index;
let stream = mem::take(&mut self.stream);
*self = TokenStream::from_streams(smallvec![stream, new_stream]).into_trees();
self.index = index;
}
pub fn look_ahead(&self, n: usize) -> Option<&TokenTree> {
self.stream.0[self.index..].get(n).map(|(tree, _)| tree)
}
}
#[derive(Debug, Copy, Clone, PartialEq, Encodable, Decodable, HashStable_Generic)]
pub struct DelimSpan {
pub open: Span,
pub close: Span,
}
impl DelimSpan {
pub fn from_single(sp: Span) -> Self {
DelimSpan { open: sp, close: sp }
}
pub fn from_pair(open: Span, close: Span) -> Self {
DelimSpan { open, close }
}
pub fn dummy() -> Self {
Self::from_single(DUMMY_SP)
}
pub fn entire(self) -> Span {
self.open.with_hi(self.close.hi())
}
}<|fim▁end|> | impl<'t> Iterator for CursorRef<'t> {
type Item = &'t TokenTree;
fn next(&mut self) -> Option<&'t TokenTree> { |
<|file_name|>p2p_addrfetch.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python3
# Copyright (c) 2021 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""
Test p2p addr-fetch connections
"""
import time
from test_framework.messages import msg_addr, CAddress, NODE_NETWORK, NODE_WITNESS
from test_framework.p2p import P2PInterface, p2p_lock
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal
ADDR = CAddress()
ADDR.time = int(time.time())
ADDR.nServices = NODE_NETWORK | NODE_WITNESS
ADDR.ip = "192.0.0.8"
ADDR.port = 18444
class P2PAddrFetch(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
def run_test(self):
node = self.nodes[0]
self.log.info("Connect to an addr-fetch peer")<|fim▁hole|> assert_equal(len(info), 1)
assert_equal(info[0]['connection_type'], 'addr-fetch')
self.log.info("Check that we send getaddr but don't try to sync headers with the addr-fetch peer")
peer.sync_send_with_ping()
with p2p_lock:
assert peer.message_count['getaddr'] == 1
assert peer.message_count['getheaders'] == 0
self.log.info("Check that answering the getaddr with a single address does not lead to disconnect")
# This prevents disconnecting on self-announcements
msg = msg_addr()
msg.addrs = [ADDR]
peer.send_and_ping(msg)
assert_equal(len(node.getpeerinfo()), 1)
self.log.info("Check that answering with larger addr messages leads to disconnect")
msg.addrs = [ADDR] * 2
peer.send_message(msg)
peer.wait_for_disconnect(timeout=5)
self.log.info("Check timeout for addr-fetch peer that does not send addrs")
peer = node.add_outbound_p2p_connection(P2PInterface(), p2p_idx=1, connection_type="addr-fetch")
node.setmocktime(int(time.time()) + 301) # Timeout: 5 minutes
peer.wait_for_disconnect(timeout=5)
if __name__ == '__main__':
P2PAddrFetch().main()<|fim▁end|> | peer = node.add_outbound_p2p_connection(P2PInterface(), p2p_idx=0, connection_type="addr-fetch")
info = node.getpeerinfo() |
<|file_name|>item_update.py<|end_file_name|><|fim▁begin|>"""Contains tests for oweb.views.updates.item_update"""
# Python imports
from unittest import skip
# Django imports
from django.core.urlresolvers import reverse
from django.test.utils import override_settings
from django.contrib.auth.models import User
# app imports
from oweb.tests import OWebViewTests
from oweb.models.account import Account
from oweb.models.research import Research
from oweb.models.ship import Ship
from oweb.models.planet import Planet, Moon
from oweb.models.building import Building
from oweb.models.defense import Defense
@override_settings(AUTH_USER_MODEL='auth.User')
class OWebViewsItemUpdateTests(OWebViewTests):
def test_login_required(self):
"""Unauthenticated users should be redirected to oweb:app_login"""
r = self.client.get(reverse('oweb:item_update'))
self.assertRedirects(r,
reverse('oweb:app_login'),
status_code=302,
target_status_code=200)
def test_account_owner(self):
"""Can somebody update an item he doesn't posess?"""
u = User.objects.get(username='test01')
acc = Account.objects.get(owner=u)
res_pre = Research.objects.filter(account=acc).first()
self.client.login(username='test02', password='foo')
r = self.client.post(reverse('oweb:item_update'),
data={ 'item_type': 'research',
'item_id': res_pre.id,
'item_level': res_pre.level + 1 },
HTTP_REFERER=reverse('oweb:account_research',
args=[acc.id]))
self.assertEqual(r.status_code, 403)
self.assertTemplateUsed(r, 'oweb/403.html')
def test_no_post(self):
"""What if no POST data is supplied?"""
self.client.login(username='test01', password='foo')
r = self.client.post(reverse('oweb:item_update'))
self.assertEqual(r.status_code, 500)
self.assertTemplateUsed(r, 'oweb/500.html')
def test_research_update(self):
"""Does ``item_update()`` correctly update researches?
Basically the Django ORM can be trusted, but since there is some logic
involved in determine the correct field to update, this test is
included
"""
u = User.objects.get(username='test01')
acc = Account.objects.get(owner=u)
res_pre = Research.objects.filter(account=acc).first()
self.client.login(username='test01', password='foo')
r = self.client.post(reverse('oweb:item_update'),
data={ 'item_type': 'research',
'item_id': res_pre.id,
'item_level': res_pre.level + 1 },
HTTP_REFERER=reverse('oweb:account_research',
args=[acc.id]))
self.assertRedirects(r,
reverse('oweb:account_research', args=[acc.id]),
status_code=302,
target_status_code=200)
res_post = Research.objects.get(pk=res_pre.pk)
self.assertEqual(res_pre.level + 1, res_post.level)
def test_ship_update(self):
"""Does ``item_update()`` correctly update ships?
Basically the Django ORM can be trusted, but since there is some logic
involved in determine the correct field to update, this test is
included
"""
u = User.objects.get(username='test01')
acc = Account.objects.get(owner=u)
ship_pre = Ship.objects.filter(account=acc).first()
self.client.login(username='test01', password='foo')
r = self.client.post(reverse('oweb:item_update'),
data={ 'item_type': 'ship',
'item_id': ship_pre.id,
'item_level': ship_pre.count + 1338 },
HTTP_REFERER=reverse('oweb:account_ships',
args=[acc.id]))
self.assertRedirects(r,
reverse('oweb:account_ships', args=[acc.id]),
status_code=302,
target_status_code=200)
ship_post = Ship.objects.get(pk=ship_pre.pk)
self.assertEqual(ship_pre.count + 1338, ship_post.count)
def test_building_update(self):
"""Does ``item_update()`` correctly update buildings?
Basically the Django ORM can be trusted, but since there is some logic
involved in determine the correct field to update, this test is
included
"""
u = User.objects.get(username='test01')
acc = Account.objects.get(owner=u)
p = Planet.objects.filter(account=acc).first()
b_pre = Building.objects.filter(astro_object=p).first()
self.client.login(username='test01', password='foo')
r = self.client.post(reverse('oweb:item_update'),
data={ 'item_type': 'building',
'item_id': b_pre.id,
'item_level': b_pre.level - 1 },
HTTP_REFERER=reverse('oweb:planet_buildings',
args=[p.id]))
self.assertRedirects(r,
reverse('oweb:planet_buildings', args=[p.id]),
status_code=302,
target_status_code=200)
b_post = Building.objects.get(pk=b_pre.pk)
self.assertEqual(b_pre.level - 1, b_post.level)
def test_moon_building_update(self):
"""Does ``item_update()`` correctly update moon buildings?
Basically the Django ORM can be trusted, but since there is some logic
involved in determine the correct field to update, this test is
included
"""
u = User.objects.get(username='test01')
acc = Account.objects.get(owner=u)
p = Planet.objects.filter(account=acc).values_list('id', flat=True)
m = Moon.objects.filter(planet__in=p).first()
b_pre = Building.objects.filter(astro_object=m).first()
self.client.login(username='test01', password='foo')
r = self.client.post(reverse('oweb:item_update'),
data={ 'item_type': 'moon_building',
'item_id': b_pre.id,
'item_level': b_pre.level + 2 },
HTTP_REFERER=reverse('oweb:moon_buildings',
args=[m.id]))<|fim▁hole|> target_status_code=200)
b_post = Building.objects.get(pk=b_pre.pk)
self.assertEqual(b_pre.level + 2, b_post.level)
def test_defense_update(self):
"""Does ``item_update()`` correctly update defense devices?
Basically the Django ORM can be trusted, but since there is some logic
involved in determine the correct field to update, this test is
included
"""
u = User.objects.get(username='test01')
acc = Account.objects.get(owner=u)
p = Planet.objects.filter(account=acc).first()
d_pre = Defense.objects.filter(astro_object=p).first()
self.client.login(username='test01', password='foo')
r = self.client.post(reverse('oweb:item_update'),
data={ 'item_type': 'defense',
'item_id': d_pre.id,
'item_level': d_pre.count - 1 },
HTTP_REFERER=reverse('oweb:planet_defense',
args=[p.id]))
self.assertRedirects(r,
reverse('oweb:planet_defense', args=[p.id]),
status_code=302,
target_status_code=200)
d_post = Defense.objects.get(pk=d_pre.pk)
self.assertEqual(d_pre.count - 1, d_post.count)
def test_moon_defense_update(self):
"""Does ``item_update()`` correctly update moon defense devices?
Basically the Django ORM can be trusted, but since there is some logic
involved in determine the correct field to update, this test is
included
"""
u = User.objects.get(username='test01')
acc = Account.objects.get(owner=u)
p = Planet.objects.filter(account=acc).values_list('id', flat=True)
m = Moon.objects.filter(planet__in=p).first()
d_pre = Defense.objects.filter(astro_object=m).first()
self.client.login(username='test01', password='foo')
r = self.client.post(reverse('oweb:item_update'),
data={ 'item_type': 'moon_defense',
'item_id': d_pre.id,
'item_level': d_pre.count - 10000 },
HTTP_REFERER=reverse('oweb:moon_defense',
args=[m.id]))
self.assertRedirects(r,
reverse('oweb:moon_defense', args=[m.id]),
status_code=302,
target_status_code=200)
d_post = Defense.objects.get(pk=d_pre.pk)
self.assertEqual(0, d_post.count)
def test_unknown_item_type(self):
"""Does ``item_update()`` correctly handle unknown item_types?"""
self.client.login(username='test01', password='foo')
r = self.client.post(reverse('oweb:item_update'),
data={
'item_type': 'foobar',
'item_id': 1,
'item_level': 1
})
self.assertEqual(r.status_code, 500)
self.assertTemplateUsed(r, 'oweb/500.html')<|fim▁end|> | self.assertRedirects(r,
reverse('oweb:moon_buildings', args=[m.id]),
status_code=302, |
<|file_name|>exception.py<|end_file_name|><|fim▁begin|># Copyright 2013 - Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Solum base exception handling.
Includes decorator for re-raising Solum-type exceptions.
"""
import functools
import pecan
import sys
from keystoneclient import exceptions as keystone_exceptions
from oslo.config import cfg
import six
import wsme
from solum.common import safe_utils
from solum.openstack.common import excutils
from solum.openstack.common.gettextutils import _
from solum.openstack.common import log as logging
LOG = logging.getLogger(__name__)
exc_log_opts = [
cfg.BoolOpt('fatal_exception_format_errors',
default=False,
help='make exception message format errors fatal')
]
CONF = cfg.CONF
CONF.register_opts(exc_log_opts)
def wrap_exception(notifier=None, publisher_id=None, event_type=None,
level=None):
"""This decorator wraps a method to catch any exceptions that may
get thrown. It logs the exception as well as optionally sending
it to the notification system.
"""
def inner(f):
def wrapped(self, context, *args, **kw):
# Don't store self or context in the payload, it now seems to
# contain confidential information.
try:
return f(self, context, *args, **kw)
except Exception as e:
with excutils.save_and_reraise_exception():
if notifier:
call_dict = safe_utils.getcallargs(f, *args, **kw)
payload = dict(exception=e,
private=dict(args=call_dict)
)
# Use a temp vars so we don't shadow
# our outer definitions.
temp_level = level
if not temp_level:
temp_level = notifier.ERROR
temp_type = event_type
if not temp_type:
# If f has multiple decorators, they must use
# functools.wraps to ensure the name is
# propagated.
temp_type = f.__name__
notifier.notify(context, publisher_id, temp_type,
temp_level, payload)
return functools.wraps(f)(wrapped)
return inner
def wrap_controller_exception(func):
"""This decorator wraps controllers method to manage wsme exceptions:
a wsme ClientSideError is raised if a SolumException is thrown.
"""
@functools.wraps(func)
def wrapped(*args, **kw):
try:
return func(*args, **kw)
except SolumException as excp:
pecan.response.translatable_error = excp
raise wsme.exc.ClientSideError(six.text_type(excp), excp.code)
return wrapped
def wrap_keystone_exception(func):
"""This decorator wraps keystone exception by throwing Solum specific
exceptions.
"""
@functools.wraps(func)<|fim▁hole|> try:
return func(*args, **kw)
except keystone_exceptions.AuthorizationFailure:
raise AuthorizationFailure(
client=func.__name__, message="reason: %s" % sys.exc_info()[1])
except keystone_exceptions.ClientException:
raise AuthorizationFailure(
client=func.__name__,
message="unexpected keystone client error occurred: %s"
% sys.exc_info()[1])
return wrapped
class SolumException(Exception):
"""Base Solum Exception
To correctly use this class, inherit from it and define
a 'msg_fmt' property. That msg_fmt will get printf'd
with the keyword arguments provided to the constructor.
"""
message = _("An unknown exception occurred.")
code = 500
def __init__(self, **kwargs):
self.kwargs = kwargs
if CONF.fatal_exception_format_errors:
assert isinstance(self.msg_fmt, six.text_type)
try:
self.message = self.msg_fmt % kwargs
except KeyError:
#kwargs doesn't match a variable in the message
#log the issue and the kwargs
LOG.exception(_('Exception in string format operation'),
extra=dict(
private=dict(
msg=self.msg_fmt,
args=kwargs
)
)
)
if CONF.fatal_exception_format_errors:
raise
def __str__(self):
if six.PY3:
return self.message
return self.message.encode('utf-8')
def __unicode__(self):
return self.message
class ObjectNotFound(SolumException):
msg_fmt = _("The %(name)s %(id)s could not be found.")
class ObjectNotUnique(SolumException):
msg_fmt = _("The %(name)s already exists.")
class ResourceNotFound(ObjectNotFound):
msg_fmt = _("The %(name)s resource %(id)s could not be found.")
code = 404
class ResourceExists(ObjectNotUnique):
msg_fmt = _("The %(name)s resource already exists.")
code = 409
class BadRequest(SolumException):
msg_fmt = _("The request is malformed. Reason: %(reason)s")
code = 400
class NotImplemented(SolumException):
msg_fmt = _("The requested operation is not implemented.")
code = 501
class AuthorizationFailure(SolumException):
msg_fmt = _("%(client)s connection failed. %(message)s")<|fim▁end|> | def wrapped(*args, **kw): |
<|file_name|>example_config.py<|end_file_name|><|fim▁begin|>import os
BASEDIR = os.path.abspath(os.path.dirname(__file__))
DEBUG = False
##
# Database settings
##
DB_HOST = 'localhost'
DB_NAME = 'scoremodel'
DB_USER = 'scoremodel'
DB_PASS = 'scoremodel'
##
# MySQL SSL connections
##
use_ssl = False
SSL_CA = '/etc/mysql/certs/ca-cert.pem'
SSL_KEY = '/etc/mysql/keys/client-key.pem'
SSL_CERT = '/etc/mysql/certs/client-cert.pem'
##
# Flask-WTF
##
WTF_CSRF_ENABLED = True
SECRET_KEY = 'secret_key'
##
# Log-in
##<|fim▁hole|>##
# Babel
##
BABEL_DEFAULT_LOCALE = 'en'
BABEL_DEFAULT_TIMEZONE = 'UTC'
LANGUAGES = ['nl', 'en']
##
# Uploads
##
UPLOAD_FOLDER = 'uploads'
ALLOWED_EXTENSIONS = ('txt', 'pdf', 'png', 'jpg', 'jpeg', 'gif')
MAX_CONTENT_LENGTH = 16 * 1024 * 1024 # 16 MB
##
# Logger
##
LOG_FILENAME = 'logs/scoremodel.log'
if use_ssl is True:
SQLALCHEMY_DATABASE_URI = 'mysql+mysqlconnector://{user}:{passw}@{host}/{db}?ssl_key={ssl_key}&ssl_cert={ssl_cert}'.format(
user=DB_USER, passw=DB_PASS,
host=DB_HOST, db=DB_NAME, ssl_key=SSL_KEY, ssl_cert=SSL_CERT)
else:
SQLALCHEMY_DATABASE_URI = 'mysql+mysqlconnector://{user}:{passw}@{host}/{db}'.format(user=DB_USER, passw=DB_PASS,
host=DB_HOST, db=DB_NAME)<|fim▁end|> | REMEMBER_COOKIE_SECURE = True
REMEMBER_COOKIE_HTTPONLY = True
SESSION_PROTECTION = "strong"
|
<|file_name|>test_core.py<|end_file_name|><|fim▁begin|>#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import multiprocessing
import os
import signal
import unittest
from datetime import timedelta
from time import sleep
from dateutil.relativedelta import relativedelta
from numpy.testing import assert_array_almost_equal
from airflow import DAG, exceptions, settings
from airflow.exceptions import AirflowException
from airflow.hooks.base_hook import BaseHook
from airflow.jobs.local_task_job import LocalTaskJob
from airflow.models import DagBag, DagRun, TaskFail, TaskInstance
from airflow.models.baseoperator import BaseOperator
from airflow.operators.bash import BashOperator
from airflow.operators.check_operator import CheckOperator, ValueCheckOperator
from airflow.operators.dummy_operator import DummyOperator
from airflow.operators.python import PythonOperator
from airflow.settings import Session
from airflow.utils.dates import infer_time_unit, round_time, scale_time_units
from airflow.utils.state import State
from airflow.utils.timezone import datetime
from tests.test_utils.config import conf_vars
DEV_NULL = '/dev/null'
DEFAULT_DATE = datetime(2015, 1, 1)
TEST_DAG_ID = 'unit_tests'
class OperatorSubclass(BaseOperator):
"""
An operator to test template substitution
"""
template_fields = ['some_templated_field']
def __init__(self, some_templated_field, *args, **kwargs):
super().__init__(*args, **kwargs)
self.some_templated_field = some_templated_field
def execute(self, context):
pass
class TestCore(unittest.TestCase):
default_scheduler_args = {"num_runs": 1}
def setUp(self):
self.dagbag = DagBag(
dag_folder=DEV_NULL, include_examples=True)
self.args = {'owner': 'airflow', 'start_date': DEFAULT_DATE}
self.dag = DAG(TEST_DAG_ID, default_args=self.args)
self.dag_bash = self.dagbag.dags['example_bash_operator']
self.runme_0 = self.dag_bash.get_task('runme_0')
self.run_after_loop = self.dag_bash.get_task('run_after_loop')
self.run_this_last = self.dag_bash.get_task('run_this_last')
def tearDown(self):
session = Session()
session.query(DagRun).filter(
DagRun.dag_id == TEST_DAG_ID).delete(
synchronize_session=False)
session.query(TaskInstance).filter(
TaskInstance.dag_id == TEST_DAG_ID).delete(
synchronize_session=False)
session.query(TaskFail).filter(
TaskFail.dag_id == TEST_DAG_ID).delete(
synchronize_session=False)
session.commit()
session.close()
def test_check_operators(self):
conn_id = "sqlite_default"
captain_hook = BaseHook.get_hook(conn_id=conn_id) # quite funny :D
captain_hook.run("CREATE TABLE operator_test_table (a, b)")
captain_hook.run("insert into operator_test_table values (1,2)")
op = CheckOperator(
task_id='check',
sql="select count(*) from operator_test_table",
conn_id=conn_id,
dag=self.dag)
op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
op = ValueCheckOperator(
task_id='value_check',
pass_value=95,
tolerance=0.1,
conn_id=conn_id,
sql="SELECT 100",
dag=self.dag)
op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
captain_hook.run("drop table operator_test_table")
def test_clear_api(self):
task = self.dag_bash.tasks[0]
task.clear(
start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,
upstream=True, downstream=True)
ti = TaskInstance(task=task, execution_date=DEFAULT_DATE)
ti.are_dependents_done()
def test_illegal_args(self):
"""
Tests that Operators reject illegal arguments
"""
msg = 'Invalid arguments were passed to BashOperator (task_id: test_illegal_args).'
with conf_vars({('operators', 'allow_illegal_arguments'): 'True'}):<|fim▁hole|> BashOperator(
task_id='test_illegal_args',
bash_command='echo success',
dag=self.dag,
illegal_argument_1234='hello?')
assert any(msg in str(w) for w in warning.warnings)
def test_illegal_args_forbidden(self):
"""
Tests that operators raise exceptions on illegal arguments when
illegal arguments are not allowed.
"""
with self.assertRaises(AirflowException) as ctx:
BashOperator(
task_id='test_illegal_args',
bash_command='echo success',
dag=self.dag,
illegal_argument_1234='hello?')
self.assertIn(
('Invalid arguments were passed to BashOperator '
'(task_id: test_illegal_args).'),
str(ctx.exception))
def test_bash_operator(self):
op = BashOperator(
task_id='test_bash_operator',
bash_command="echo success",
dag=self.dag)
op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_bash_operator_multi_byte_output(self):
op = BashOperator(
task_id='test_multi_byte_bash_operator',
bash_command="echo \u2600",
dag=self.dag,
output_encoding='utf-8')
op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_bash_operator_kill(self):
import psutil
sleep_time = "100%d" % os.getpid()
op = BashOperator(
task_id='test_bash_operator_kill',
execution_timeout=timedelta(seconds=1),
bash_command="/bin/bash -c 'sleep %s'" % sleep_time,
dag=self.dag)
self.assertRaises(
exceptions.AirflowTaskTimeout,
op.run,
start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)
sleep(2)
pid = -1
for proc in psutil.process_iter():
if proc.cmdline() == ['sleep', sleep_time]:
pid = proc.pid
if pid != -1:
os.kill(pid, signal.SIGTERM)
self.fail("BashOperator's subprocess still running after stopping on timeout!")
def test_on_failure_callback(self):
# Annoying workaround for nonlocal not existing in python 2
data = {'called': False}
def check_failure(context, test_case=self):
data['called'] = True
error = context.get('exception')
test_case.assertIsInstance(error, AirflowException)
op = BashOperator(
task_id='check_on_failure_callback',
bash_command="exit 1",
dag=self.dag,
on_failure_callback=check_failure)
self.assertRaises(
exceptions.AirflowException,
op.run,
start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
self.assertTrue(data['called'])
def test_dryrun(self):
op = BashOperator(
task_id='test_dryrun',
bash_command="echo success",
dag=self.dag)
op.dry_run()
def test_sqlite(self):
import airflow.providers.sqlite.operators.sqlite
op = airflow.providers.sqlite.operators.sqlite.SqliteOperator(
task_id='time_sqlite',
sql="CREATE TABLE IF NOT EXISTS unitest (dummy VARCHAR(20))",
dag=self.dag)
op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_timeout(self):
op = PythonOperator(
task_id='test_timeout',
execution_timeout=timedelta(seconds=1),
python_callable=lambda: sleep(5),
dag=self.dag)
self.assertRaises(
exceptions.AirflowTaskTimeout,
op.run,
start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_python_op(self):
def test_py_op(templates_dict, ds, **kwargs):
if not templates_dict['ds'] == ds:
raise Exception("failure")
op = PythonOperator(
task_id='test_py_op',
python_callable=test_py_op,
templates_dict={'ds': "{{ ds }}"},
dag=self.dag)
op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_complex_template(self):
def verify_templated_field(context):
self.assertEqual(context['ti'].task.some_templated_field['bar'][1],
context['ds'])
op = OperatorSubclass(
task_id='test_complex_template',
some_templated_field={
'foo': '123',
'bar': ['baz', '{{ ds }}']
},
dag=self.dag)
op.execute = verify_templated_field
op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_template_non_bool(self):
"""
Test templates can handle objects with no sense of truthiness
"""
class NonBoolObject:
def __len__(self): # pylint: disable=invalid-length-returned
return NotImplemented
def __bool__(self):
return NotImplemented
op = OperatorSubclass(
task_id='test_bad_template_obj',
some_templated_field=NonBoolObject(),
dag=self.dag)
op.resolve_template_files()
def test_task_get_template(self):
TI = TaskInstance
ti = TI(
task=self.runme_0, execution_date=DEFAULT_DATE)
ti.dag = self.dag_bash
ti.run(ignore_ti_state=True)
context = ti.get_template_context()
# DEFAULT DATE is 2015-01-01
self.assertEqual(context['ds'], '2015-01-01')
self.assertEqual(context['ds_nodash'], '20150101')
# next_ds is 2015-01-02 as the dag interval is daily
self.assertEqual(context['next_ds'], '2015-01-02')
self.assertEqual(context['next_ds_nodash'], '20150102')
# prev_ds is 2014-12-31 as the dag interval is daily
self.assertEqual(context['prev_ds'], '2014-12-31')
self.assertEqual(context['prev_ds_nodash'], '20141231')
self.assertEqual(context['ts'], '2015-01-01T00:00:00+00:00')
self.assertEqual(context['ts_nodash'], '20150101T000000')
self.assertEqual(context['ts_nodash_with_tz'], '20150101T000000+0000')
self.assertEqual(context['yesterday_ds'], '2014-12-31')
self.assertEqual(context['yesterday_ds_nodash'], '20141231')
self.assertEqual(context['tomorrow_ds'], '2015-01-02')
self.assertEqual(context['tomorrow_ds_nodash'], '20150102')
def test_local_task_job(self):
TI = TaskInstance
ti = TI(
task=self.runme_0, execution_date=DEFAULT_DATE)
job = LocalTaskJob(task_instance=ti, ignore_ti_state=True)
job.run()
def test_raw_job(self):
TI = TaskInstance
ti = TI(
task=self.runme_0, execution_date=DEFAULT_DATE)
ti.dag = self.dag_bash
ti.run(ignore_ti_state=True)
def test_round_time(self):
rt1 = round_time(datetime(2015, 1, 1, 6), timedelta(days=1))
self.assertEqual(datetime(2015, 1, 1, 0, 0), rt1)
rt2 = round_time(datetime(2015, 1, 2), relativedelta(months=1))
self.assertEqual(datetime(2015, 1, 1, 0, 0), rt2)
rt3 = round_time(datetime(2015, 9, 16, 0, 0), timedelta(1), datetime(
2015, 9, 14, 0, 0))
self.assertEqual(datetime(2015, 9, 16, 0, 0), rt3)
rt4 = round_time(datetime(2015, 9, 15, 0, 0), timedelta(1), datetime(
2015, 9, 14, 0, 0))
self.assertEqual(datetime(2015, 9, 15, 0, 0), rt4)
rt5 = round_time(datetime(2015, 9, 14, 0, 0), timedelta(1), datetime(
2015, 9, 14, 0, 0))
self.assertEqual(datetime(2015, 9, 14, 0, 0), rt5)
rt6 = round_time(datetime(2015, 9, 13, 0, 0), timedelta(1), datetime(
2015, 9, 14, 0, 0))
self.assertEqual(datetime(2015, 9, 14, 0, 0), rt6)
def test_infer_time_unit(self):
self.assertEqual('minutes', infer_time_unit([130, 5400, 10]))
self.assertEqual('seconds', infer_time_unit([110, 50, 10, 100]))
self.assertEqual('hours', infer_time_unit([100000, 50000, 10000, 20000]))
self.assertEqual('days', infer_time_unit([200000, 100000]))
def test_scale_time_units(self):
# use assert_almost_equal from numpy.testing since we are comparing
# floating point arrays
arr1 = scale_time_units([130, 5400, 10], 'minutes')
assert_array_almost_equal(arr1, [2.167, 90.0, 0.167], decimal=3)
arr2 = scale_time_units([110, 50, 10, 100], 'seconds')
assert_array_almost_equal(arr2, [110.0, 50.0, 10.0, 100.0], decimal=3)
arr3 = scale_time_units([100000, 50000, 10000, 20000], 'hours')
assert_array_almost_equal(arr3, [27.778, 13.889, 2.778, 5.556],
decimal=3)
arr4 = scale_time_units([200000, 100000], 'days')
assert_array_almost_equal(arr4, [2.315, 1.157], decimal=3)
def test_bad_trigger_rule(self):
with self.assertRaises(AirflowException):
DummyOperator(
task_id='test_bad_trigger',
trigger_rule="non_existent",
dag=self.dag)
def test_terminate_task(self):
"""If a task instance's db state get deleted, it should fail"""
from airflow.executors.sequential_executor import SequentialExecutor
TI = TaskInstance
dag = self.dagbag.dags.get('test_utils')
task = dag.task_dict.get('sleeps_forever')
ti = TI(task=task, execution_date=DEFAULT_DATE)
job = LocalTaskJob(
task_instance=ti, ignore_ti_state=True, executor=SequentialExecutor())
# Running task instance asynchronously
proc = multiprocessing.Process(target=job.run)
proc.start()
sleep(5)
settings.engine.dispose()
session = settings.Session()
ti.refresh_from_db(session=session)
# making sure it's actually running
self.assertEqual(State.RUNNING, ti.state)
ti = session.query(TI).filter_by(
dag_id=task.dag_id,
task_id=task.task_id,
execution_date=DEFAULT_DATE
).one()
# deleting the instance should result in a failure
session.delete(ti)
session.commit()
# waiting for the async task to finish
proc.join()
# making sure that the task ended up as failed
ti.refresh_from_db(session=session)
self.assertEqual(State.FAILED, ti.state)
session.close()
def test_task_fail_duration(self):
"""If a task fails, the duration should be recorded in TaskFail"""
op1 = BashOperator(
task_id='pass_sleepy',
bash_command='sleep 3',
dag=self.dag)
op2 = BashOperator(
task_id='fail_sleepy',
bash_command='sleep 5',
execution_timeout=timedelta(seconds=3),
retry_delay=timedelta(seconds=0),
dag=self.dag)
session = settings.Session()
try:
op1.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
except Exception: # pylint: disable=broad-except
pass
try:
op2.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
except Exception: # pylint: disable=broad-except
pass
op1_fails = session.query(TaskFail).filter_by(
task_id='pass_sleepy',
dag_id=self.dag.dag_id,
execution_date=DEFAULT_DATE).all()
op2_fails = session.query(TaskFail).filter_by(
task_id='fail_sleepy',
dag_id=self.dag.dag_id,
execution_date=DEFAULT_DATE).all()
self.assertEqual(0, len(op1_fails))
self.assertEqual(1, len(op2_fails))
self.assertGreaterEqual(sum([f.duration for f in op2_fails]), 3)
def test_externally_triggered_dagrun(self):
TI = TaskInstance
# Create the dagrun between two "scheduled" execution dates of the DAG
execution_date = DEFAULT_DATE + timedelta(days=2)
execution_ds = execution_date.strftime('%Y-%m-%d')
execution_ds_nodash = execution_ds.replace('-', '')
dag = DAG(
TEST_DAG_ID,
default_args=self.args,
schedule_interval=timedelta(weeks=1),
start_date=DEFAULT_DATE)
task = DummyOperator(task_id='test_externally_triggered_dag_context',
dag=dag)
dag.create_dagrun(run_id=DagRun.id_for_date(execution_date),
execution_date=execution_date,
state=State.RUNNING,
external_trigger=True)
task.run(
start_date=execution_date, end_date=execution_date)
ti = TI(task=task, execution_date=execution_date)
context = ti.get_template_context()
# next_ds/prev_ds should be the execution date for manually triggered runs
self.assertEqual(context['next_ds'], execution_ds)
self.assertEqual(context['next_ds_nodash'], execution_ds_nodash)
self.assertEqual(context['prev_ds'], execution_ds)
self.assertEqual(context['prev_ds_nodash'], execution_ds_nodash)
if __name__ == '__main__':
unittest.main()<|fim▁end|> | with self.assertWarns(PendingDeprecationWarning) as warning: |
<|file_name|>resolveContext.d.ts<|end_file_name|><|fim▁begin|>import { PolicyWhen } from "./interface";
import { PathNode } from "../path/node";
import { Resolvable } from "./resolvable";
import { State } from "../state/stateObject";
import { Transition } from "../transition/transition";
import { UIInjector } from "../common/interface";
export declare const NATIVE_INJECTOR_TOKEN: string;
/**
* Encapsulates Depenency Injection for a path of nodes
*
* UI-Router states are organized as a tree.
* A nested state has a path of ancestors to the root of the tree.
* When a state is being activated, each element in the path is wrapped as a [[PathNode]].
* A `PathNode` is a stateful object that holds things like parameters and resolvables for the state being activated.
*
* The ResolveContext closes over the [[PathNode]]s, and provides DI for the last node in the path.
*/
export declare class ResolveContext {
private _path;
_injector: UIInjector;
constructor(_path: PathNode[]);
/** Gets all the tokens found in the resolve context, de-duplicated */
getTokens(): any[];
/**
* Gets the Resolvable that matches the token
*
* Gets the last Resolvable that matches the token in this context, or undefined.
* Throws an error if it doesn't exist in the ResolveContext
*/
getResolvable(token: any): Resolvable;
/**
* Returns a ResolveContext that includes a portion of this one
*
* Given a state, this method creates a new ResolveContext from this one.
* The new context starts at the first node (root) and stops at the node for the `state` parameter.
*
* #### Why<|fim▁hole|> * This method is used to create a narrower context when injecting ancestor nodes.
*
* @example
* `let ABCD = new ResolveContext([A, B, C, D]);`
*
* Given a path `[A, B, C, D]`, where `A`, `B`, `C` and `D` are nodes for states `a`, `b`, `c`, `d`:
* When injecting `D`, `D` should have access to all resolvables from `A`, `B`, `C`, `D`.
* However, `B` should only be able to access resolvables from `A`, `B`.
*
* When resolving for the `B` node, first take the full "To Path" Context `[A,B,C,D]` and limit to the subpath `[A,B]`.
* `let AB = ABCD.subcontext(a)`
*/
subContext(state: State): ResolveContext;
/**
* Adds Resolvables to the node that matches the state
*
* This adds a [[Resolvable]] (generally one created on the fly; not declared on a [[StateDeclaration.resolve]] block).
* The resolvable is added to the node matching the `state` parameter.
*
* These new resolvables are not automatically fetched.
* The calling code should either fetch them, fetch something that depends on them,
* or rely on [[resolvePath]] being called when some state is being entered.
*
* Note: each resolvable's [[ResolvePolicy]] is merged with the state's policy, and the global default.
*
* @param newResolvables the new Resolvables
* @param state Used to find the node to put the resolvable on
*/
addResolvables(newResolvables: Resolvable[], state: State): void;
/**
* Returns a promise for an array of resolved path Element promises
*
* @param when
* @param trans
* @returns {Promise<any>|any}
*/
resolvePath(when?: PolicyWhen, trans?: Transition): Promise<{
token: any;
value: any;
}[]>;
injector(): UIInjector;
findNode(resolvable: Resolvable): PathNode;
/**
* Gets the async dependencies of a Resolvable
*
* Given a Resolvable, returns its dependencies as a Resolvable[]
*/
getDependencies(resolvable: Resolvable): Resolvable[];
}<|fim▁end|> | *
* When a transition is created, the nodes in the "To Path" are injected from a ResolveContext.
* A ResolveContext closes over a path of [[PathNode]]s and processes the resolvables.
* The "To State" can inject values from its own resolvables, as well as those from all its ancestor state's (node's). |
<|file_name|>submit.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python2
"""
submit.py
~~~~~~~~~
submit code to poj
usage:
./submit.py file_name
file_name format: probmenId_xxx.c/cpp
"""
import requests
import sys
import os
import time
from bs4 import BeautifulSoup
s = requests.Session()<|fim▁hole|>except:
print 'read user file failed, use default username, password'
user = 'atupal'
password = ''
proxy_password = ''
http_proxy = {
'http': 'http://atupal:%[email protected]:3000' % proxy_password
}
#def login():
# data = {
# 'user_id1': user,
# 'password1': password,
# 'B1': 'login',
# 'url': '/',
# }
#
# r = s.post('http://poj.org/login', data=data, allow_redirects=0)
# print r
def submit_code():
with open(sys.argv[1]) as fi:
data = {
'Action': 'submit',
'SpaceID': '1',
'JudgeID': password,
'Language': '28',
'ProblemNum': sys.argv[1].split('_')[0],
'Source': fi.read(),
}
r = s.post('http://acm.timus.ru/submit.aspx?space=1', proxies=http_proxy, data=data)
print r
def colorful_print(text, color='red'):
color_dict = {
'red': '\033[31m',
}
print color_dict[color]+text+'\033[0m',
def fetch_result():
while 1:
try:
r = s.get('http://acm.timus.ru/status.aspx?space=1')
soup = BeautifulSoup(r.text)
os.system('cls')
time.sleep(0.2)
for tr in soup('tr')[7:13]:
flag = 0
for td in tr('td'):
if user in td.text:
flag = 1
break
elif 'BoardHome' in td.text:
flag = 2
break
if flag == 2:
continue
for td in tr('td'):
if flag:
colorful_print(td.text)
else:
print td.text,
print
print '-' * 100
time.sleep(1)
except KeyboardInterrupt:
exit(0)
def main():
if len(sys.argv) > 1 and sys.argv[1].lower() != 'status':
#login()
submit_code()
fetch_result()
if __name__ == '__main__':
main()<|fim▁end|> | try:
with open('./user') as fi:
user = fi.readline().strip()
password= fi.readline().strip() |
<|file_name|>ACLRoles.js<|end_file_name|><|fim▁begin|>/**
*
* SugarCRM Community Edition is a customer relationship management program developed by
* SugarCRM, Inc. Copyright (C) 2004-2013 SugarCRM Inc.
*
* SuiteCRM is an extension to SugarCRM Community Edition developed by SalesAgility Ltd.
* Copyright (C) 2011 - 2018 SalesAgility Ltd.
*
* This program is free software; you can redistribute it and/or modify it under
* the terms of the GNU Affero General Public License version 3 as published by the
* Free Software Foundation with the addition of the following permission added
* to Section 15 as permitted in Section 7(a): FOR ANY PART OF THE COVERED WORK
* IN WHICH THE COPYRIGHT IS OWNED BY SUGARCRM, SUGARCRM DISCLAIMS THE WARRANTY
* OF NON INFRINGEMENT OF THIRD PARTY RIGHTS.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
* FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more
* details.
*
* You should have received a copy of the GNU Affero General Public License along with
* this program; if not, see http://www.gnu.org/licenses or write to the Free
* Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301 USA.
*<|fim▁hole|> * of this program must display Appropriate Legal Notices, as required under
* Section 5 of the GNU Affero General Public License version 3.
*
* In accordance with Section 7(b) of the GNU Affero General Public License version 3,
* these Appropriate Legal Notices must retain the display of the "Powered by
* SugarCRM" logo and "Supercharged by SuiteCRM" logo. If the display of the logos is not
* reasonably feasible for technical reasons, the Appropriate Legal Notices must
* display the words "Powered by SugarCRM" and "Supercharged by SuiteCRM".
*/
var aclviewer = function () {
var lastDisplay = '';
return {
view: function (role_id, role_module) {
YAHOO.util.Connect.asyncRequest('POST', 'index.php', {
'success': aclviewer.display,
'failure': aclviewer.failed
}, 'module=ACLRoles&action=EditRole&record=' + role_id + '&category_name=' + role_module);
ajaxStatus.showStatus(SUGAR.language.get('app_strings', 'LBL_REQUEST_PROCESSED'));
},
save: function (form_name) {
var formObject = document.getElementById(form_name);
YAHOO.util.Connect.setForm(formObject);
YAHOO.util.Connect.asyncRequest('POST', 'index.php', {
'success': aclviewer.postSave,
'failure': aclviewer.failed
});
ajaxStatus.showStatus(SUGAR.language.get('app_strings', 'LBL_SAVING'));
},
postSave: function (o) {
SUGAR.util.globalEval(o.responseText);
aclviewer.view(result['role_id'], result['module']);
},
display: function (o) {
aclviewer.lastDisplay = '';
ajaxStatus.flashStatus('Done');
document.getElementById('category_data').innerHTML = o.responseText;
},
failed: function () {
ajax.flashStatus('Could Not Connect');
},
toggleDisplay: function (id) {
if (aclviewer.lastDisplay != '' && typeof(aclviewer.lastDisplay) != 'undefined') {
aclviewer.hideDisplay(aclviewer.lastDisplay);
}
if (aclviewer.lastDisplay != id) {
aclviewer.showDisplay(id);
aclviewer.lastDisplay = id;
} else {
aclviewer.lastDisplay = '';
}
},
hideDisplay: function (id) {
document.getElementById(id).style.display = 'none';
document.getElementById(id + 'link').style.display = '';
},
showDisplay: function (id) {
document.getElementById(id).style.display = '';
document.getElementById(id + 'link').style.display = 'none';
}
};
}();<|fim▁end|> | * You can contact SugarCRM, Inc. headquarters at 10050 North Wolfe Road,
* SW2-130, Cupertino, CA 95014, USA. or at email address [email protected].
*
* The interactive user interfaces in modified source and object code versions |
<|file_name|>web.py<|end_file_name|><|fim▁begin|>"""ShipToasting web handlers."""
import os
import sys
import atexit
import random
import traceback
import gevent
from flask import Response
from flask import redirect
from flask import render_template
from flask import request
from flask import session
from apscheduler.schedulers.gevent import GeventScheduler
from shiptoasting import app
from shiptoasting import HEARTBEAT
from shiptoasting import requires_logged_in
from shiptoasting.storage import ShipToasts
from shiptoasting.storage import ShipToaster
@app.route("/", methods=["GET"])
def index():
"""Main index. Displays most recent then streams."""
shiptoasts = app.shiptoasts.get_shiptoasts()
return render_template(
"index.html",
shiptoasts=shiptoasts,
last_seen=shiptoasts[0].id if shiptoasts else None,
)
@app.route("/", methods=["POST"])
@requires_logged_in
def add_shiptoast():
"""Accepts the POST form, stores the content."""
post_content = request.form.get("content").strip()
if post_content:
if len(post_content) > 500:
post_content = "{}... and I've said too much.".format(
post_content[:500]
)
posted_authors = app.shiptoasts.add_shiptoast(
post_content,
session["character"]["CharacterName"],<|fim▁hole|> if session["character"]["CharacterID"] not in posted_authors:
# spam filtered, time to calm the fuck down
enhance_your_calm_videos = [
"eCidRemUTKo",
"tYg6nP7yRRk",
"txQ6t4yPIM0",
"EYi5aW1GdUU",
"d-diB65scQU",
]
return redirect("https://www.youtube.com/watch?v={}".format(
random.choice(enhance_your_calm_videos)
))
return redirect("/")
@app.route("/shiptoasts")
def shiptoasts():
"""Returns the shiptoasts stream object."""
last_seen_id = request.args.get("last_seen", "None")
if last_seen_id == "None":
last_seen_id = None
else:
last_seen_id = int(last_seen_id)
return Response(
streaming_shiptoasts(last_seen_id),
mimetype="text/event-stream",
)
def streaming_shiptoasts(last_seen_id):
"""Iterator to asyncly deliver shiptoasts."""
for shiptoast in ShipToaster(last_seen_id).iter():
if shiptoast is HEARTBEAT:
data = HEARTBEAT
else:
data = (
'{id}%{author}%'
'<div class="shiptoaster">'
'<div class="prof_pic"><img src='
'"https://image.eveonline.com/Character/{author_id}_256.jpg" '
'height="256" width="256" alt="{author}" /></div>'
'<div class="author{ccp}">{author}</div>'
'</div>'
'<div class="content">{content}</div>'
'<div class="time">{time:%b %e, %H:%M:%S}</div>'
).format(
ccp=" ccp" * int(shiptoast.author.startswith("CCP ")),
**shiptoast._asdict()
)
yield "data: {}\n\n".format(data)
raise StopIteration
def traceback_formatter(excpt, value, tback):
"""Catches all exceptions and re-formats the traceback raised."""
sys.stdout.write("".join(traceback.format_exception(excpt, value, tback)))
def hook_exceptions():
"""Hooks into the sys module to set our formatter."""
if hasattr(sys.stdout, "fileno"): # when testing, sys.stdout is StringIO
# reopen stdout in non buffered mode
sys.stdout = os.fdopen(sys.stdout.fileno(), "wb", 0)
# set the hook
sys.excepthook = traceback_formatter
def production(*_, **settings):
"""Hooks exceptions and returns the Flask app."""
hook_exceptions()
app.shiptoasts = ShipToasts()
app.shiptoasts.initial_fill()
scheduler = GeventScheduler()
scheduler.add_job(app.shiptoasts.periodic_call, "interval", seconds=30)
cleaner = scheduler.start()
listener = gevent.Greenlet.spawn(app.shiptoasts.listen_for_updates)
atexit.register(cleaner.join, timeout=2)
atexit.register(listener.join, timeout=2)
atexit.register(scheduler.shutdown)
return app
def development():
"""Debug/cmdline entry point."""
production().run(
host="0.0.0.0",
port=8080,
debug=True,
use_reloader=False,
threaded=True,
)
if __name__ == "__main__":
development()<|fim▁end|> | session["character"]["CharacterID"],
)
|
<|file_name|>Datatypes.py<|end_file_name|><|fim▁begin|># Copyright (c) 2016 Iotic Labs Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://github.com/Iotic-Labs/py-IoticAgent/blob/master/LICENSE
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Constants to hide XSD Datatypes used by Point Values and Properties
These help to describe the data in a feed so the receiving Thing can know what kind of data to expect
See also http://www.w3.org/TR/xmlschema-2/#built-in-datatypes
"""
from __future__ import unicode_literals
BASE64 = 'base64Binary'
'''Represents a sequence of binary octets (bytes) encoded according to RFC 2045,
the standard defining the MIME types (look under "6.8 Base64 Content-Transfer-Encoding").
'''
BOOLEAN = 'boolean'
'''A Boolean true or false value. Representations of true are "true" and "1"; false is denoted as "false" or "0".'''
BYTE = 'byte'
'''A signed 8-bit integer in the range [-128 -> +127]. Derived from the short datatype.'''
UNSIGNED_BYTE = 'unsignedByte'
'''An unsigned 8-bit integer in the range [0, 255]. Derived from the unsignedShort datatype.'''
DATE = 'date'
'''Represents a specific date. The syntax is the same as that for the date part of dateTime,
with an optional time zone indicator. Example: "1889-09-24".
'''
DATETIME = 'dateTime'
'''
Represents a specific instant of time. It has the form YYYY-MM-DDThh:mm:ss followed by an optional time-zone suffix.
`YYYY` is the year, `MM` is the month number, `DD` is the day number,
`hh` the hour in 24-hour format, `mm` the minute, and `ss` the second (a decimal and fraction are allowed for the
seconds part).
The optional zone suffix is either `"Z"` for Universal Coordinated Time (UTC), or a time offset of the form
`"[+|-]hh:mm"`, giving the difference between UTC and local time in hours and minutes.
Example: "2004-10-31T21:40:35.5-07:00" is a time on Halloween 2004 in Mountain Standard time. The equivalent UTC would
be "2004-11-01T04:40:35.5Z".
'''
DECIMAL = 'decimal'
'''Any base-10 fixed-point number. There must be at least one digit to the left of the decimal point, and a leading "+"
or "-" sign is allowed.
Examples: "42", "-3.14159", "+0.004".
'''
DOUBLE = 'double'
'''A 64-bit floating-point decimal number as specified in the IEEE 754-1985 standard. The external form is the same as
the float datatype.
'''
FLOAT = 'float'
'''A 32-bit floating-point decimal number as specified in the IEEE 754-1985 standard.
Allowable values are the same as in the decimal type, optionally followed by an exponent,
or one of the special values "INF" (positive infinity), "-INF" (negative infinity), or "NaN" (not a number).<|fim▁hole|>The exponent starts with either "e" or "E", optionally followed by a sign, and one or more digits.
Example: "6.0235e-23".
'''
INT = 'int'
'''Represents a 32-bit signed integer in the range [-2,147,483,648, 2,147,483,647]. Derived from the long datatype.'''
INTEGER = 'integer'
'''Represents a signed integer. Values may begin with an optional "+" or "-" sign. Derived from the decimal datatype.'''
LONG = 'long'
'''A signed, extended-precision integer; at least 18 digits are guaranteed. Derived from the integer datatype. '''
STRING = 'string'
'''Any sequence of zero or more characters.'''
TIME = 'time'
'''A moment of time that repeats every day. The syntax is the same as that for dateTime,
omitting everything up to and including the separator "T". Examples: "00:00:00" is midnight,
and "13:04:00" is an hour and four minutes after noon.
'''
URI = 'anyURI'
'''
The data must conform to the syntax of a Uniform Resource Identifier (URI), as defined in RFC 2396
as amended by RFC 2732. Example: "http://www.nmt.edu/tcc/"
is the URI for the New Mexico Tech Computer Center's index page.
'''
IRI = 'IRI'
'''Only for use with property API calls. Used to handle properties which require an IRI (URIRef) value.'''<|fim▁end|> | |
<|file_name|>Popover.js<|end_file_name|><|fim▁begin|>"use strict";
var _interopRequireDefault = require("@babel/runtime/helpers/interopRequireDefault");
<|fim▁hole|> enumerable: true,
get: function get() {
return _uxcorePopover.default;
}
});
var _uxcorePopover = _interopRequireDefault(require("uxcore-popover"));
module.exports = exports.default;<|fim▁end|> | Object.defineProperty(exports, "__esModule", {
value: true
});
Object.defineProperty(exports, "default", { |
<|file_name|>PropertyFilter.java<|end_file_name|><|fim▁begin|>/**
* Copyright (c) 2005-20010 springside.org.cn
*
* Licensed under the Apache License, Version 2.0 (the "License");
*
* $Id: PropertyFilter.java 1205 2010-09-09 15:12:17Z calvinxiu $
*/
package com.snakerflow.framework.orm;
import java.util.ArrayList;
import java.util.Date;
import java.util.List;
import java.util.Map;
import javax.servlet.http.HttpServletRequest;
import org.apache.commons.lang.StringUtils;
import com.snakerflow.framework.utils.ConvertUtils;
import com.snakerflow.framework.utils.ServletUtils;
import org.springframework.util.Assert;
/**
* 与具体ORM实现无关的属性过滤条件封装类, 主要记录页面中简单的搜索过滤条件.
*
* @author calvin
*/<|fim▁hole|> /** 多个属性间OR关系的分隔符. */
public static final String OR_SEPARATOR = "_OR_";
/** 属性比较类型. */
public enum MatchType {
EQ, LIKE, LT, GT, LE, GE;
}
/** 属性数据类型. */
public enum PropertyType {
S(String.class), I(Integer.class), L(Long.class), N(Double.class), D(Date.class), B(Boolean.class);
private Class<?> clazz;
private PropertyType(Class<?> clazz) {
this.clazz = clazz;
}
public Class<?> getValue() {
return clazz;
}
}
private MatchType matchType = null;
private Object matchValue = null;
private Class<?> propertyClass = null;
private String[] propertyNames = null;
public PropertyFilter() {
}
/**
* @param filterName 比较属性字符串,含待比较的比较类型、属性值类型及属性列表.
* eg. LIKES_NAME_OR_LOGIN_NAME
* @param value 待比较的值.
*/
public PropertyFilter(final String filterName, final String value) {
String firstPart = StringUtils.substringBefore(filterName, "_");
String matchTypeCode = StringUtils.substring(firstPart, 0, firstPart.length() - 1);
String propertyTypeCode = StringUtils.substring(firstPart, firstPart.length() - 1, firstPart.length());
try {
matchType = Enum.valueOf(MatchType.class, matchTypeCode);
} catch (RuntimeException e) {
throw new IllegalArgumentException("filter名称" + filterName + "没有按规则编写,无法得到属性比较类型.", e);
}
try {
propertyClass = Enum.valueOf(PropertyType.class, propertyTypeCode).getValue();
} catch (RuntimeException e) {
throw new IllegalArgumentException("filter名称" + filterName + "没有按规则编写,无法得到属性值类型.", e);
}
String propertyNameStr = StringUtils.substringAfter(filterName, "_");
Assert.isTrue(StringUtils.isNotBlank(propertyNameStr), "filter名称" + filterName + "没有按规则编写,无法得到属性名称.");
propertyNames = StringUtils.splitByWholeSeparator(propertyNameStr, PropertyFilter.OR_SEPARATOR);
this.matchValue = ConvertUtils.convertStringToObject(value, propertyClass);
}
/**
* 从HttpRequest中创建PropertyFilter列表, 默认Filter属性名前缀为filter.
*
* @see #buildFromHttpRequest(HttpServletRequest, String)
*/
public static List<PropertyFilter> buildFromHttpRequest(final HttpServletRequest request) {
return buildFromHttpRequest(request, "filter");
}
/**
* 从HttpRequest中创建PropertyFilter列表
* PropertyFilter命名规则为Filter属性前缀_比较类型属性类型_属性名.
*
* eg.
* filter_EQS_name
* filter_LIKES_name_OR_email
*/
public static List<PropertyFilter> buildFromHttpRequest(final HttpServletRequest request, final String filterPrefix) {
List<PropertyFilter> filterList = new ArrayList<PropertyFilter>();
//从request中获取含属性前缀名的参数,构造去除前缀名后的参数Map.
Map<String, Object> filterParamMap = ServletUtils.getParametersStartingWith(request, filterPrefix + "_");
//分析参数Map,构造PropertyFilter列表
for (Map.Entry<String, Object> entry : filterParamMap.entrySet()) {
String filterName = entry.getKey();
String value = (String) entry.getValue();
//如果value值为空,则忽略此filter.
if (StringUtils.isNotBlank(value)) {
PropertyFilter filter = new PropertyFilter(filterName, value);
filterList.add(filter);
}
}
return filterList;
}
/**
* 获取比较值的类型.
*/
public Class<?> getPropertyClass() {
return propertyClass;
}
/**
* 获取比较方式.
*/
public MatchType getMatchType() {
return matchType;
}
/**
* 获取比较值.
*/
public Object getMatchValue() {
return matchValue;
}
/**
* 获取比较属性名称列表.
*/
public String[] getPropertyNames() {
return propertyNames;
}
/**
* 获取唯一的比较属性名称.
*/
public String getPropertyName() {
Assert.isTrue(propertyNames.length == 1, "There are not only one property in this filter.");
return propertyNames[0];
}
/**
* 是否比较多个属性.
*/
public boolean hasMultiProperties() {
return (propertyNames.length > 1);
}
}<|fim▁end|> | public class PropertyFilter {
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.