prompt
large_stringlengths
70
991k
completion
large_stringlengths
0
1.02k
<|file_name|>feed_parse_extractWwwTccedwardsCom.py<|end_file_name|><|fim▁begin|>def extractWwwTccedwardsCom(item): ''' Parser for 'www.tccedwards.com' ''' vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title']) if not (chp or vol) or "preview" in item['title'].lower(): return None<|fim▁hole|> tagmap = [ ('PRC', 'PRC', 'translated'), ('Loiterous', 'Loiterous', 'oel'), ] for tagname, name, tl_type in tagmap: if tagname in item['tags']: return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type) return False<|fim▁end|>
<|file_name|>parse_test.go<|end_file_name|><|fim▁begin|>package walnut import ( "fmt" "reflect" "testing" "time" ) var splitTests = []struct { in string out []line err error }{ {"", []line{}, nil}, {"# comment", []line{}, nil}, {"\n\na=1\n\n", []line{{3, 0, "a=1"}}, nil}, {"a=1\nb=2", []line{{1, 0, "a=1"}, {2, 0, "b=2"}}, nil}, {"a=1\na=1", []line{{1, 0, "a=1"}, {2, 0, "a=1"}}, nil}, {"a=1\n b=2", []line{{1, 0, "a=1"}, {2, 1, "b=2"}}, nil}, {"a=1\n\tb=2", []line{{1, 0, "a=1"}, {2, 1, "b=2"}}, nil}, {"a=1\n\t \n\tb=2", []line{{1, 0, "a=1"}, {3, 1, "b=2"}}, nil}, {"\n\t\t\n\n ", []line{}, nil}, {" a=1", nil, fmt.Errorf(errIndent, 1)}, {"a=1\n b=2\n c=3", nil, fmt.Errorf(errIndent, 3)}, {"a=1\n b=2\n\tc=3", nil, fmt.Errorf(errIndent, 3)}, } func TestSplit(t *testing.T) { for _, test := range splitTests { out, err := split([]byte(test.in)) if !eq(out, test.out) || !eq(err, test.err) { t.Errorf("split(%q):", test.in) t.Errorf(" got %+v, %v", out, err) t.Errorf(" want %+v, %v", test.out, test.err) } } } var interpretTests = []struct { in []line out []assignment err error }{ { []line{{3, 0, "a=1"}}, []assignment{{3, "a", "1", int64(1)}}, nil, }, { []line{{1, 0, "b=2"}, {2, 0, "c=3"}}, []assignment{{1, "b", "2", int64(2)}, {2, "c", "3", int64(3)}}, nil, }, { []line{{1, 0, "d"}, {2, 1, "e=4"}}, []assignment{ {2, "d.e", "4", int64(4)}, }, nil, }, { []line{{1, 0, "foo"}, {2, 1, "bar=5"}, {3, 1, "baz=6"}}, []assignment{ {2, "foo.bar", "5", int64(5)}, {3, "foo.baz", "6", int64(6)}, }, nil, }, { []line{{1, 0, "group#snug"}, {3, 1, "key=\"test\"#snug"}}, []assignment{ {3, "group.key", "\"test\"#snug", "test"}, }, nil, }, { []line{{1, 0, "bool = true"}}, []assignment{ {1, "bool", "true", true}, }, nil, }, { []line{{1, 0, "int64 = 12345"}}, []assignment{ {1, "int64", "12345", int64(12345)}, }, nil, }, { []line{{1, 0, "float64 = 123.45"}}, []assignment{ {1, "float64", "123.45", float64(123.45)}, }, nil, }, { []line{{1, 0, "string = \"hello\""}}, []assignment{ {1, "string", "\"hello\"", "hello"}, }, nil, },<|fim▁hole|> func() []assignment { raw := "2012-01-02 15:30:28.000000000789 +0000" t, _ := time.Parse("2006-01-02 15:04:05 -0700", raw) return []assignment{{1, "time", raw, t}} }(), nil, }, { []line{{1, 0, "duration = 10m 20s"}}, []assignment{ {1, "duration", "10m 20s", 10*time.Minute + 20*time.Second}, }, nil, }, { []line{{1, 0, "♫ = 123"}}, []assignment{ {1, "♫", "123", int64(123)}, }, nil, }, {[]line{{1, 0, "=1"}}, nil, fmt.Errorf(errKey, 1)}, {[]line{{1, 0, " = 1"}}, nil, fmt.Errorf(errKey, 1)}, {[]line{{1, 0, "== 1"}}, nil, fmt.Errorf(errKey, 1)}, {[]line{{1, 0, "a b = 1"}}, nil, fmt.Errorf(errKey, 1)}, {[]line{{1, 0, "a\tb"}}, nil, fmt.Errorf(errKey, 1)}, {[]line{{1, 0, "a = 0 0"}}, nil, fmt.Errorf(errValue, 1, "0 0")}, {[]line{{1, 0, "a == 0"}}, nil, fmt.Errorf(errValue, 1, "= 0")}, } func TestInterpret(t *testing.T) { for _, test := range interpretTests { out, err := interpret(test.in) if !eq(out, test.out) || !eq(err, test.err) { t.Errorf("interpret(%+v):", test.in) t.Errorf(" got %+v, %v", out, err) t.Errorf(" want %+v, %v", test.out, test.err) } } } var initializeTests = []struct { in []assignment out map[string]interface{} err error }{ { []assignment{{1, "a", "1", int64(1)}}, map[string]interface{}{ "a": int64(1), }, nil, }, { []assignment{{1, "foo.bar", "2", int64(2)}, {1, "foo.baz", "3", int64(3)}}, map[string]interface{}{ "foo.bar": int64(2), "foo.baz": int64(3), }, nil, }, { []assignment{{1, "a", "1", int64(1)}, {2, "a.b", "2", int64(2)}}, nil, fmt.Errorf(errConflict, "a.b", 2, "a", 1), }, { []assignment{{1, "a", "1", int64(1)}, {2, "a", "1", int64(1)}}, nil, fmt.Errorf(errConflict, "a", 2, "a", 1), }, { []assignment{{1, "a.b.c", "1", int64(1)}, {2, "a.b", "2", int64(2)}}, nil, fmt.Errorf(errConflict, "a.b", 2, "a.b.c", 1), }, { []assignment{{1, "a.b", "1", int64(1)}, {2, "a.b.c", "2", int64(2)}}, nil, fmt.Errorf(errConflict, "a.b.c", 2, "a.b", 1), }, } func TestInitialize(t *testing.T) { for _, test := range initializeTests { out, err := initialize(test.in) if !eq(out, test.out) || !eq(err, test.err) { t.Errorf("initialize(%+v):", test.in) t.Errorf(" got %+v, %v", out, err) t.Errorf(" want %+v, %v", test.out, test.err) } } } // shorthand for reflect.DeepEqual func eq(a, b interface{}) bool { return reflect.DeepEqual(a, b) }<|fim▁end|>
{ []line{{1, 0, "time = 2012-01-02 15:30:28.000000000789 +0000"}},
<|file_name|>dcf.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python import sys sys.path.append('/var/www/html/valumodel.com/scripts/dcf') from calc_dcf import calc_dcf def create_dcf(req, tax_rate, growth_rate_1_year_out, sga_of_sales, da_of_sales, capex_of_sales, nwc_of_sales, levered_beta, current_yield, exit_multiple, ticker): assumptions = {} try: assumptions['Tax Rate'] = float(tax_rate)/100.0 assumptions['Growth Rate 1 year out'] = float(growth_rate_1_year_out)/100.0 assumptions['SGA % of sales'] = float(sga_of_sales)/100.0<|fim▁hole|> assumptions['D&A % of sales'] = float(da_of_sales)/100.0 assumptions['CAPEX % of sales'] = float(capex_of_sales)/100.0 assumptions['NWC % of sales'] = float(nwc_of_sales)/100.0 assumptions['Levered Beta'] = float(levered_beta) assumptions['Current Yield'] = float(current_yield)/100.0 assumptions['Exit Multiple'] = float(exit_multiple) except ValueError: return '<!doctype html><html><body><h1>Invalid DCF Input. Please try again.</h1></body></html>' ticker = ticker.split(' ')[0] if not ticker.isalnum(): return '<!doctype html><html><body><h1>Invalid Ticker. Please try again.</h1></body></html>' return calc_dcf(assumptions, ticker.upper())<|fim▁end|>
<|file_name|>local.js<|end_file_name|><|fim▁begin|>/** * This is specifically for the builder where the * dependencies have been resolved and you just want * to access the component.jsons locally. */ var semver = require('semver'); var fs = require('graceful-fs'); var join = require('path').join; var resolve = require('path').resolve; var debug = require('debug')('remotes:local'); var Remote = require('../remote') module.exports = Local Remote.extend(Local) function Local(options) { if (!(this instanceof Local)) return new Local(options) options = Object.create(options || {}); this.out = resolve(options.out || options.dir || 'components') debug('checking local components at %s', this.out); Remote.call(this, options) } Local.prototype.name = 'local'; /** * Local resolution is a little different than other remotes. * In particular, if no `ref` is set, * we check for any version. * * @param {String} repo * @return {this} * @api public */ Local.prototype.resolve = function* (remotes, repo, ref) { debug('resolving local remote'); if (typeof remotes === 'string') { ref = repo; repo = remotes; } else if (Array.isArray(remotes) && !~remotes.indexOf('local')) { // if the current remote is not in this list, // then it's obviously not valid. return;<|fim▁hole|> // none installed if (!folders || !folders.length) return; // no specific version we care about if (!ref) return this; // exact tag version if (~folders.indexOf(ref)) return this; // check for equal semantic versions if (semver.maxSatisfying(folders.filter(valid), ref)) return this; } /** * Get the currently downloaded versions of a repo. * * @param {String} repo * @return {Array} folders * @api public */ Local.prototype.folders = function* (repo) { try { var frags = repo.toLowerCase().split('/'); // ignore malformed repos for now if (frags.length !== 2) return; var folder = join(this.out, frags[0], frags[1]); debug('checking folder: %s', folder); var folders = yield readdir(folder); debug('got folders: %s', folders.join(', ')); return folders.filter(noLeadingDot); } catch (err) { if (err.code === 'ENOENT') return; throw err; } } /** * Return the currently downloaded components' semantic versions. * * @param {String} repo * @return {Array} references * @api public */ Local.prototype._versions = function* (repo) { return yield* this.folders(repo); } /** * Return the existing component.json, if any. * @param {String} repo * @param {String} reference * @return {Object} component.json * @api public */ Local.prototype._json = function* (repo, ref) { var body; var filename = join(this.out, repo, ref, 'component.json'); try { body = yield read(filename); } catch (err) { if (err.code === 'ENOENT') return; throw err; } try { return JSON.parse(body); } catch (_err) { throw new Error('JSON parsing error with "' + filename + '"'); } } /** * NOT RELEVANT WITH THIS REMOTE */ Local.prototype._tree = function* () { /* jshint noyield:true */ } function valid(x) { return semver.valid(x, true); } function noLeadingDot(x) { return x[0] !== '.'; } function readdir(root) { return function (done) { fs.readdir(root, done) } } function read(filename) { return function (done) { fs.readFile(filename, 'utf8', done) } }<|fim▁end|>
} var folders = yield* this.folders(repo);
<|file_name|>test_observatory_negotiation.py<|end_file_name|><|fim▁begin|>#from interface.services.icontainer_agent import ContainerAgentClient #from pyon.ion.endpoint import ProcessRPCClient from pyon.public import Container, log, IonObject from pyon.util.containers import DotDict from pyon.util.int_test import IonIntegrationTestCase from interface.services.coi.iresource_registry_service import ResourceRegistryServiceClient from ion.services.sa.observatory.observatory_management_service import ObservatoryManagementService from interface.services.sa.iobservatory_management_service import IObservatoryManagementService, ObservatoryManagementServiceClient from interface.services.sa.iinstrument_management_service import InstrumentManagementServiceClient from pyon.util.context import LocalContextMixin from pyon.core.exception import BadRequest, NotFound, Conflict, Inconsistent from pyon.public import RT, PRED #from mock import Mock, patch from pyon.util.unit_test import PyonTestCase from nose.plugins.attrib import attr import unittest from ooi.logging import log from ion.services.sa.test.helpers import any_old class FakeProcess(LocalContextMixin): name = '' @attr('INT', group='sa') @unittest.skip('capabilities not yet available') class TestObservatoryNegotiation(IonIntegrationTestCase): def setUp(self): # Start container self._start_container() self.container.start_rel_from_url('res/deploy/r2deploy.yml')<|fim▁hole|> self.omsclient = ObservatoryManagementServiceClient(node=self.container.node) self.imsclient = InstrumentManagementServiceClient(node=self.container.node) @unittest.skip("TDB") def test_request_resource(self): # L4-CI-SA-RQ-348 : Marine facility shall provide capabilities to define instrument use policies # L4-CI-SA-RQ-115 : Marine facility shall present resource requests to the marine infrastructure # create an observatory with resources including platforms with instruments # create an instrument use policy for one of the defined instruments # request access to the instrument that aligns with defined policy, verify that access is granted # request access to the instrument that is in conflict with defined policy, verify that access is NOT granted pass @unittest.skip("TBD") def test_request_config_change(self): # L4-CI-SA-RQ-342 : Marine facility shall present platform configuration change requests to the marine infrastructure # create an observatory with resources including platforms with instruments # request a configuration change to the platform t, verify that the request is submitted to the # Observatory operator and that then access is granted when that operator approves pass<|fim▁end|>
self.rrclient = ResourceRegistryServiceClient(node=self.container.node)
<|file_name|>urls.py<|end_file_name|><|fim▁begin|><|fim▁hole|> from django.conf.urls.defaults import url, patterns from actstream import feeds from actstream import views from django.contrib.auth.decorators import login_required urlpatterns = patterns('actstream.views', # Syndication Feeds url(r'^feed/(?P<content_type_id>\d+)/(?P<object_id>\d+)/atom/$', feeds.AtomObjectActivityFeed(), name='actstream_object_feed_atom'), url(r'^feed/(?P<content_type_id>\d+)/(?P<object_id>\d+)/$', feeds.ObjectActivityFeed(), name='actstream_object_feed'), url(r'^feed/(?P<content_type_id>\d+)/atom/$', feeds.AtomModelActivityFeed(), name='actstream_model_feed_atom'), url(r'^feed/(?P<content_type_id>\d+)/(?P<object_id>\d+)/as/$', feeds.ActivityStreamsObjectActivityFeed(), name='actstream_object_feed_as'), url(r'^feed/(?P<content_type_id>\d+)/$', feeds.ModelActivityFeed(), name='actstream_model_feed'), url(r'^feed/$', feeds.UserActivityFeed(), name='actstream_feed'), url(r'^feed/atom/$', feeds.AtomUserActivityFeed(), name='actstream_feed_atom'), # Follow/Unfollow API url(r'^follow/(?P<content_type_id>\d+)/(?P<object_id>\d+)/$', 'follow_unfollow', name='actstream_follow'), url(r'^follow_all/(?P<content_type_id>\d+)/(?P<object_id>\d+)/$', 'follow_unfollow', {'actor_only': False}, name='actstream_follow_all'), url(r'^unfollow/(?P<content_type_id>\d+)/(?P<object_id>\d+)/$', 'follow_unfollow', {'do_follow': False}, name='actstream_unfollow'), # Follower and Actor lists url(r'^followers/(?P<content_type_id>\d+)/(?P<object_id>\d+)/$', 'followers', name='actstream_followers'), url(r'^actors/(?P<content_type_id>\d+)/(?P<object_id>\d+)/$', 'actor', name='actstream_actor'), url(r'^actors/(?P<content_type_id>\d+)/$', 'model', name='actstream_model'), url(r'^new_wall_post/$', view=login_required (views.new_wall_post), name='new_wall_post'), url(r'^detail/(?P<action_id>\d+)/$', view=login_required(views.detail), name='actstream_detail'), url(r'^(?P<username>[-\w]+)/$', view=login_required (views.user), name='actstream_user'), url(r'^$', view=login_required (views.stream), name='actstream'), url(r'^new_group_post', view=login_required (views.new_group_post), name='new_group_post'), )<|fim▁end|>
try: from django.conf.urls import url, patterns except ImportError:
<|file_name|>update_json.rs<|end_file_name|><|fim▁begin|>use toyunda_player::VideoMeta; use subtitles::*; use clap::ArgMatches; use std::path::PathBuf; use std::fs::File; extern crate serde_json; extern crate serde_yaml; <|fim▁hole|>/// false on failure pub fn update_json(args: &ArgMatches) -> bool { if let Some(path) = args.value_of("JSON_FILE") { let json_path = PathBuf::from(path); let yaml_path = json_path.with_extension("yaml"); if (json_path.is_file() && yaml_path.is_file()) { let json_file = File::open(&json_path); let yaml_file = File::open(&yaml_path); match (json_file, yaml_file) { (Ok(json_file), Ok(yaml_file)) => { let video_meta: Result<VideoMeta, _> = serde_yaml::from_reader(&yaml_file); let subs: Result<Subtitles, _> = serde_json::from_reader(&json_file); match (video_meta, subs) { (Ok(video_meta), Ok(mut subs)) => { subs.song_info = video_meta.song_info.clone(); let mut json_file = File::create(&json_path) .expect("Can't open json file for writing"); if let Err(e) = serde_json::to_writer_pretty(&mut json_file, &subs) { println!("Some error occured while generating new subtitles : \ {:?}", e); false } else { true } } (Err(err), _) => { println!("error while parsing video_meta : {:?}", err); false } (_, Err(err)) => { println!("error while parsing subtitles : {:?}", err); false } } } (Err(e), _) => { println!("file `{}` couldn't be opened : {:?}", json_path.display(), e); false } (_, Err(e)) => { println!("file `{}` couldn't be opened : {:?}", yaml_path.display(), e); false } } } else { if json_path.is_file() { println!("file `{}` not found", yaml_path.display()); } else { println!("file `{}` not found", json_path.display()); }; false } } else { println!("A file is required for the subcommand 'update'"); // clap shouldn't let this case happen but never too sure false } }<|fim▁end|>
/// true on success
<|file_name|>IteratorDeclaration.java<|end_file_name|><|fim▁begin|>/* * Copyright 2000-2016 JetBrains s.r.o. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.intellij.codeInspection.util; import com.intellij.psi.*; import com.intellij.psi.controlFlow.DefUseUtil; import com.intellij.psi.search.searches.ReferencesSearch; import com.intellij.psi.util.InheritanceUtil; import com.intellij.psi.util.PsiTreeUtil; import com.intellij.psi.util.PsiUtil; import com.siyeh.ig.psiutils.ExpressionUtils; import one.util.streamex.MoreCollectors; import one.util.streamex.StreamEx; import org.jetbrains.annotations.Contract; import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.Nullable; /** * Represents the iterator which traverses the iterable within the loop * * @author Tagir Valeev */ public class IteratorDeclaration { private final @NotNull PsiLocalVariable myIterator; private final @Nullable PsiExpression myIterable; private final boolean myCollection; private IteratorDeclaration(@NotNull PsiLocalVariable iterator, @Nullable PsiExpression iterable, boolean collection) { myIterator = iterator; myIterable = iterable; myCollection = collection; } @NotNull public PsiLocalVariable getIterator() { return myIterator; } @Nullable public PsiExpression getIterable() { return myIterable; } public boolean isCollection() { return myCollection; } public boolean isHasNextCall(PsiExpression condition) { return isIteratorMethodCall(condition, "hasNext"); } @Nullable public PsiElement findOnlyIteratorRef(PsiExpression parent) { PsiElement element = PsiUtil.getVariableCodeBlock(myIterator, null); PsiCodeBlock block = element instanceof PsiCodeBlock ? (PsiCodeBlock)element : PsiTreeUtil.getParentOfType(element, PsiCodeBlock.class); if (block == null) return null; return StreamEx.of(DefUseUtil.getRefs(block, myIterator, myIterator.getInitializer())) .filter(e -> PsiTreeUtil.isAncestor(parent, e, false)) .collect(MoreCollectors.onlyOne()).orElse(null); } public boolean isIteratorMethodCall(PsiElement candidate, String method) { if (!(candidate instanceof PsiMethodCallExpression)) return false; PsiMethodCallExpression call = (PsiMethodCallExpression)candidate; if (call.getArgumentList().getExpressions().length != 0) return false; PsiReferenceExpression expression = call.getMethodExpression(); return method.equals(expression.getReferenceName()) && ExpressionUtils.isReferenceTo(expression.getQualifierExpression(), myIterator); } public PsiVariable getNextElementVariable(PsiStatement statement) { if (!(statement instanceof PsiDeclarationStatement)) return null; PsiDeclarationStatement declaration = (PsiDeclarationStatement)statement; if (declaration.getDeclaredElements().length != 1) return null; PsiElement element = declaration.getDeclaredElements()[0]; if (!(element instanceof PsiLocalVariable)) return null; PsiLocalVariable var = (PsiLocalVariable)element; if (!isIteratorMethodCall(var.getInitializer(), "next")) return null; return var; } @Contract("null -> null") private static IteratorDeclaration extract(PsiStatement statement) { if (!(statement instanceof PsiDeclarationStatement)) return null; PsiDeclarationStatement declaration = (PsiDeclarationStatement)statement; if (declaration.getDeclaredElements().length != 1) return null; PsiElement element = declaration.getDeclaredElements()[0]; if (!(element instanceof PsiLocalVariable)) return null; PsiLocalVariable variable = (PsiLocalVariable)element; PsiExpression initializer = variable.getInitializer(); if (!(initializer instanceof PsiMethodCallExpression)) return null; PsiMethodCallExpression call = (PsiMethodCallExpression)initializer; if (call.getArgumentList().getExpressions().length != 0) return null; PsiReferenceExpression methodExpression = call.getMethodExpression(); if (!"iterator".equals(methodExpression.getReferenceName())) return null; PsiMethod method = call.resolveMethod(); if (method == null || !InheritanceUtil.isInheritor(method.getContainingClass(), CommonClassNames.JAVA_LANG_ITERABLE)) return null; boolean isCollection = InheritanceUtil.isInheritor(method.getContainingClass(), CommonClassNames.JAVA_UTIL_COLLECTION); PsiType type = variable.getType(); if (!(type instanceof PsiClassType) || !((PsiClassType)type).rawType().equalsToText(CommonClassNames.JAVA_UTIL_ITERATOR)) return null; return new IteratorDeclaration(variable, methodExpression.getQualifierExpression(), isCollection); } @Nullable private static IteratorDeclaration fromForLoop(PsiForStatement statement) { if (statement.getUpdate() != null) return null;<|fim▁hole|> } @Nullable private static IteratorDeclaration fromWhileLoop(PsiWhileStatement statement) { PsiElement previous = PsiTreeUtil.skipSiblingsBackward(statement, PsiComment.class, PsiWhiteSpace.class); if (!(previous instanceof PsiDeclarationStatement)) return null; IteratorDeclaration declaration = extract((PsiStatement)previous); if (declaration == null || !declaration.isHasNextCall(statement.getCondition())) return null; if (!ReferencesSearch.search(declaration.myIterator, declaration.myIterator.getUseScope()).forEach(ref -> { return PsiTreeUtil.isAncestor(statement, ref.getElement(), true); })) { return null; } return declaration; } /** * Creates {@code IteratorDeclaration} if the loop follows one of these patterns: * * <pre>{@code * Iterator<T> it = iterable.iterator(); * while(it.hasNext()) { * ... * } * // And iterator is not reused after the loop * }</pre> * * or * * <pre>{@code * for(Iterator<T> it = iterable.iterator();it.hasNext();) { * ... * } * }</pre> * * @param statement loop to create the {@code IteratorDeclaration} from * @return created IteratorDeclaration or null if the loop pattern is not recognized. */ @Contract("null -> null") public static IteratorDeclaration fromLoop(PsiLoopStatement statement) { if(statement instanceof PsiWhileStatement) { return fromWhileLoop((PsiWhileStatement)statement); } if(statement instanceof PsiForStatement) { return fromForLoop((PsiForStatement)statement); } return null; } }<|fim▁end|>
PsiStatement initialization = statement.getInitialization(); IteratorDeclaration declaration = extract(initialization); if (declaration == null || !declaration.isHasNextCall(statement.getCondition())) return null; return declaration;
<|file_name|>size_hint.rs<|end_file_name|><|fim▁begin|>#![feature(core)] extern crate core; #[cfg(test)] mod tests { use core::slice::SliceExt; use core::slice::IterMut; // fn size_from_ptr<T>(_: *const T) -> usize { // mem::size_of::<T>() // } // macro_rules! slice_offset { // ($ptr:expr, $by:expr) => {{ // let ptr = $ptr; // if size_from_ptr(ptr) == 0 { // ::intrinsics::arith_offset(ptr as *mut i8, $by) as *mut _ // } else { // ptr.offset($by) // } // }}; // } // macro_rules! slice_ref { // ($ptr:expr) => {{ // let ptr = $ptr; // if size_from_ptr(ptr) == 0 { // // Use a non-null pointer value // &mut *(1 as *mut _) // } else { // transmute(ptr) // } // }}; // } // pub unsafe fn from_raw_parts<'a, T>(p: *const T, len: usize) -> &'a [T] { // transmute(RawSlice { data: p, len: len }) // } // macro_rules! make_slice { // ($start: expr, $end: expr) => {{ // let start = $start; // let diff = ($end as usize).wrapping_sub(start as usize); // if size_from_ptr(start) == 0 { // // use a non-null pointer value // unsafe { from_raw_parts(1 as *const _, diff) } // } else { // let len = diff / size_from_ptr(start); // unsafe { from_raw_parts(start, len) } // } // }} // } // impl<T> SliceExt for [T] { // type Item = T; // // #[inline] // fn split_at(&self, mid: usize) -> (&[T], &[T]) { // (&self[..mid], &self[mid..]) // } // // #[inline] // fn iter<'a>(&'a self) -> Iter<'a, T> { // unsafe { // let p = if mem::size_of::<T>() == 0 { // 1 as *const _ // } else { // let p = self.as_ptr(); // assume(!p.is_null()); // p // }; // // Iter { // ptr: p, // end: slice_offset!(p, self.len() as isize), // _marker: marker::PhantomData // } // } // } // // #[inline] // fn split<'a, P>(&'a self, pred: P) -> Split<'a, T, P> where P: FnMut(&T) -> bool { // Split { // v: self, // pred: pred, // finished: false // } // } // // #[inline] // fn splitn<'a, P>(&'a self, n: usize, pred: P) -> SplitN<'a, T, P> where // P: FnMut(&T) -> bool, // { // SplitN { // inner: GenericSplitN { // iter: self.split(pred), // count: n, // invert: false // } // } // } // // #[inline] // fn rsplitn<'a, P>(&'a self, n: usize, pred: P) -> RSplitN<'a, T, P> where // P: FnMut(&T) -> bool, // { // RSplitN { // inner: GenericSplitN { // iter: self.split(pred), // count: n, // invert: true // } // } // } // // #[inline] // fn windows(&self, size: usize) -> Windows<T> { // assert!(size != 0); // Windows { v: self, size: size } // } // // #[inline] // fn chunks(&self, size: usize) -> Chunks<T> { // assert!(size != 0); // Chunks { v: self, size: size } // } // // #[inline] // fn get(&self, index: usize) -> Option<&T> { // if index < self.len() { Some(&self[index]) } else { None } // } // // #[inline] // fn first(&self) -> Option<&T> { // if self.is_empty() { None } else { Some(&self[0]) } // } // // #[inline] // fn tail(&self) -> &[T] { &self[1..] } // // #[inline] // fn init(&self) -> &[T] { // &self[..self.len() - 1] // } // // #[inline] // fn last(&self) -> Option<&T> { // if self.is_empty() { None } else { Some(&self[self.len() - 1]) } // } // // #[inline] // unsafe fn get_unchecked(&self, index: usize) -> &T { // transmute(self.repr().data.offset(index as isize)) // } // // #[inline] // fn as_ptr(&self) -> *const T { // self.repr().data // } // // #[unstable(feature = "core")] // fn binary_search_by<F>(&self, mut f: F) -> Result<usize, usize> where // F: FnMut(&T) -> Ordering // { // let mut base : usize = 0; // let mut lim : usize = self.len(); // // while lim != 0 { // let ix = base + (lim >> 1); // match f(&self[ix]) { // Equal => return Ok(ix), // Less => { // base = ix + 1; // lim -= 1; // } // Greater => () // } // lim >>= 1; // } // Err(base) // } // // #[inline] // fn len(&self) -> usize { self.repr().len } // // #[inline] // fn get_mut(&mut self, index: usize) -> Option<&mut T> { // if index < self.len() { Some(&mut self[index]) } else { None } // } // // #[inline] // fn split_at_mut(&mut self, mid: usize) -> (&mut [T], &mut [T]) { // unsafe { // let self2: &mut [T] = mem::transmute_copy(&self); // // (ops::IndexMut::index_mut(self, ops::RangeTo { end: mid } ), // ops::IndexMut::index_mut(self2, ops::RangeFrom { start: mid } )) // } // } // // #[inline] // fn iter_mut<'a>(&'a mut self) -> IterMut<'a, T> { // unsafe { // let p = if mem::size_of::<T>() == 0 { // 1 as *mut _ // } else { // let p = self.as_mut_ptr(); // assume(!p.is_null()); // p // }; // // IterMut { // ptr: p, // end: slice_offset!(p, self.len() as isize), // _marker: marker::PhantomData // } // } // } // // #[inline] // fn last_mut(&mut self) -> Option<&mut T> { // let len = self.len(); // if len == 0 { return None; } // Some(&mut self[len - 1]) // } // // #[inline] // fn first_mut(&mut self) -> Option<&mut T> { // if self.is_empty() { None } else { Some(&mut self[0]) } // } // // #[inline] // fn tail_mut(&mut self) -> &mut [T] { // &mut self[1 ..] // } // // #[inline] // fn init_mut(&mut self) -> &mut [T] { // let len = self.len(); // &mut self[.. (len - 1)] // } // // #[inline] // fn split_mut<'a, P>(&'a mut self, pred: P) -> SplitMut<'a, T, P> where P: FnMut(&T) -> bool { // SplitMut { v: self, pred: pred, finished: false } // } // // #[inline] // fn splitn_mut<'a, P>(&'a mut self, n: usize, pred: P) -> SplitNMut<'a, T, P> where // P: FnMut(&T) -> bool // { // SplitNMut { // inner: GenericSplitN { // iter: self.split_mut(pred), // count: n, // invert: false // } // } // } // // #[inline] // fn rsplitn_mut<'a, P>(&'a mut self, n: usize, pred: P) -> RSplitNMut<'a, T, P> where // P: FnMut(&T) -> bool, // { // RSplitNMut { // inner: GenericSplitN { // iter: self.split_mut(pred), // count: n, // invert: true // } // } // } // // #[inline] // fn chunks_mut(&mut self, chunk_size: usize) -> ChunksMut<T> { // assert!(chunk_size > 0); // ChunksMut { v: self, chunk_size: chunk_size } // }<|fim▁hole|> // // Can't take two mutable loans from one vector, so instead just cast // // them to their raw pointers to do the swap // let pa: *mut T = &mut self[a]; // let pb: *mut T = &mut self[b]; // ptr::swap(pa, pb); // } // } // // fn reverse(&mut self) { // let mut i: usize = 0; // let ln = self.len(); // while i < ln / 2 { // // Unsafe swap to avoid the bounds check in safe swap. // unsafe { // let pa: *mut T = self.get_unchecked_mut(i); // let pb: *mut T = self.get_unchecked_mut(ln - i - 1); // ptr::swap(pa, pb); // } // i += 1; // } // } // // #[inline] // unsafe fn get_unchecked_mut(&mut self, index: usize) -> &mut T { // transmute((self.repr().data as *mut T).offset(index as isize)) // } // // #[inline] // fn as_mut_ptr(&mut self) -> *mut T { // self.repr().data as *mut T // } // // #[inline] // fn position_elem(&self, x: &T) -> Option<usize> where T: PartialEq { // self.iter().position(|y| *x == *y) // } // // #[inline] // fn rposition_elem(&self, t: &T) -> Option<usize> where T: PartialEq { // self.iter().rposition(|x| *x == *t) // } // // #[inline] // fn contains(&self, x: &T) -> bool where T: PartialEq { // self.iter().any(|elt| *x == *elt) // } // // #[inline] // fn starts_with(&self, needle: &[T]) -> bool where T: PartialEq { // let n = needle.len(); // self.len() >= n && needle == &self[..n] // } // // #[inline] // fn ends_with(&self, needle: &[T]) -> bool where T: PartialEq { // let (m, n) = (self.len(), needle.len()); // m >= n && needle == &self[m-n..] // } // // #[unstable(feature = "core")] // fn binary_search(&self, x: &T) -> Result<usize, usize> where T: Ord { // self.binary_search_by(|p| p.cmp(x)) // } // // #[unstable(feature = "core")] // fn next_permutation(&mut self) -> bool where T: Ord { // // These cases only have 1 permutation each, so we can't do anything. // if self.len() < 2 { return false; } // // // Step 1: Identify the longest, rightmost weakly decreasing part of the vector // let mut i = self.len() - 1; // while i > 0 && self[i-1] >= self[i] { // i -= 1; // } // // // If that is the entire vector, this is the last-ordered permutation. // if i == 0 { // return false; // } // // // Step 2: Find the rightmost element larger than the pivot (i-1) // let mut j = self.len() - 1; // while j >= i && self[j] <= self[i-1] { // j -= 1; // } // // // Step 3: Swap that element with the pivot // self.swap(j, i-1); // // // Step 4: Reverse the (previously) weakly decreasing part // self[i..].reverse(); // // true // } // // #[unstable(feature = "core")] // fn prev_permutation(&mut self) -> bool where T: Ord { // // These cases only have 1 permutation each, so we can't do anything. // if self.len() < 2 { return false; } // // // Step 1: Identify the longest, rightmost weakly increasing part of the vector // let mut i = self.len() - 1; // while i > 0 && self[i-1] <= self[i] { // i -= 1; // } // // // If that is the entire vector, this is the first-ordered permutation. // if i == 0 { // return false; // } // // // Step 2: Reverse the weakly increasing part // self[i..].reverse(); // // // Step 3: Find the rightmost element equal to or bigger than the pivot (i-1) // let mut j = self.len() - 1; // while j >= i && self[j-1] < self[i-1] { // j -= 1; // } // // // Step 4: Swap that element with the pivot // self.swap(i-1, j); // // true // } // // #[inline] // fn clone_from_slice(&mut self, src: &[T]) -> usize where T: Clone { // let min = cmp::min(self.len(), src.len()); // let dst = &mut self[.. min]; // let src = &src[.. min]; // for i in 0..min { // dst[i].clone_from(&src[i]); // } // min // } // } // pub struct IterMut<'a, T: 'a> { // ptr: *mut T, // end: *mut T, // _marker: marker::PhantomData<&'a mut T>, // } // macro_rules! iterator { // (struct $name:ident -> $ptr:ty, $elem:ty) => { // #[stable(feature = "rust1", since = "1.0.0")] // impl<'a, T> Iterator for $name<'a, T> { // type Item = $elem; // // #[inline] // fn next(&mut self) -> Option<$elem> { // // could be implemented with slices, but this avoids bounds checks // unsafe { // if mem::size_of::<T>() != 0 { // assume(!self.ptr.is_null()); // assume(!self.end.is_null()); // } // if self.ptr == self.end { // None // } else { // let old = self.ptr; // self.ptr = slice_offset!(self.ptr, 1); // Some(slice_ref!(old)) // } // } // } // // #[inline] // fn size_hint(&self) -> (usize, Option<usize>) { // let diff = (self.end as usize).wrapping_sub(self.ptr as usize); // let size = mem::size_of::<T>(); // let exact = diff / (if size == 0 {1} else {size}); // (exact, Some(exact)) // } // // #[inline] // fn count(self) -> usize { // self.size_hint().0 // } // // #[inline] // fn nth(&mut self, n: usize) -> Option<$elem> { // // Call helper method. Can't put the definition here because mut versus const. // self.iter_nth(n) // } // // #[inline] // fn last(mut self) -> Option<$elem> { // self.next_back() // } // } // // #[stable(feature = "rust1", since = "1.0.0")] // impl<'a, T> DoubleEndedIterator for $name<'a, T> { // #[inline] // fn next_back(&mut self) -> Option<$elem> { // // could be implemented with slices, but this avoids bounds checks // unsafe { // if mem::size_of::<T>() != 0 { // assume(!self.ptr.is_null()); // assume(!self.end.is_null()); // } // if self.end == self.ptr { // None // } else { // self.end = slice_offset!(self.end, -1); // Some(slice_ref!(self.end)) // } // } // } // } // } // } // iterator!{struct IterMut -> *mut T, &'a mut T} type T = i32; #[test] fn size_hint_test1() { let slice: &mut [T] = &mut [1, 2, 3, 4, 5, 6]; let iter_mut: IterMut<T> = slice.iter_mut(); let (lower, upper): (usize, Option<usize>) = iter_mut.size_hint(); assert_eq!(lower, 6); assert_eq!(upper, Some::<usize>(6)); } #[test] fn size_hint_test2() { let slice: &mut [T] = &mut [1, 2, 3, 4, 5, 6]; let mut iter_mut: IterMut<T> = slice.iter_mut(); assert_eq!(iter_mut.next(), Some::<&mut T>(&mut 1)); let (lower, upper): (usize, Option<usize>) = iter_mut.size_hint(); assert_eq!(lower, 5); assert_eq!(upper, Some::<usize>(5)); } }<|fim▁end|>
// // #[inline] // fn swap(&mut self, a: usize, b: usize) { // unsafe {
<|file_name|>update_machine.py<|end_file_name|><|fim▁begin|># -*- cpy-indent-level: 4; indent-tabs-mode: nil -*- # ex: set expandtab softtabstop=4 shiftwidth=4: # # Copyright (C) 2008,2009,2010,2011,2012,2013,2014,2015,2016 Contributor # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Contains the logic for `aq update machine`.""" import re from aquilon.exceptions_ import ArgumentError from aquilon.aqdb.model import (Chassis, ChassisSlot, Model, Machine, Resource, BundleResource, Share, Filesystem) from aquilon.aqdb.types import CpuType from aquilon.worker.broker import BrokerCommand from aquilon.worker.dbwrappers.hardware_entity import update_primary_ip from aquilon.worker.dbwrappers.interface import set_port_group, generate_ip from aquilon.worker.dbwrappers.location import get_location from aquilon.worker.dbwrappers.resources import (find_resource, get_resource_holder) from aquilon.worker.templates import (PlenaryHostData, PlenaryServiceInstanceToplevel) from aquilon.worker.processes import DSDBRunner _disk_map_re = re.compile(r'^([^/]+)/(?:([^/]+)/)?([^/]+):([^/]+)/(?:([^/]+)/)?([^/]+)$') def parse_remap_disk(old_vmholder, new_vmholder, remap_disk): result = {} if not remap_disk: return result maps = remap_disk.split(",") for map in maps: res = _disk_map_re.match(map) if not res: raise ArgumentError("Invalid disk backend remapping " "specification: '%s'" % map) src_type, src_rg, src_name, dst_type, dst_rg, dst_name = res.groups() src_cls = Resource.polymorphic_subclass(src_type, "Invalid resource type") dst_cls = Resource.polymorphic_subclass(dst_type, "Invalid resource type") if dst_cls not in (Share, Filesystem): raise ArgumentError("%s is not a valid virtual disk backend " "resource type." % dst_type) src_backend = find_resource(src_cls, old_vmholder, src_rg, src_name) dst_backend = find_resource(dst_cls, new_vmholder, dst_rg, dst_name) result[src_backend] = dst_backend return result def get_metacluster(holder): if hasattr(holder, "metacluster"): return holder.metacluster # vmhost if hasattr(holder, "cluster") and holder.cluster: return holder.cluster.metacluster else: # TODO vlocal still has clusters, so this case not tested yet. return None def update_disk_backing_stores(dbmachine, old_holder, new_holder, remap_disk): if not old_holder: old_holder = dbmachine.vm_container.holder.holder_object if not new_holder: new_holder = old_holder disk_mapping = parse_remap_disk(old_holder, new_holder, remap_disk) for dbdisk in dbmachine.disks: old_bstore = dbdisk.backing_store if isinstance(old_bstore.holder, BundleResource): resourcegroup = old_bstore.holder.resourcegroup.name else: resourcegroup = None if old_bstore in disk_mapping: new_bstore = disk_mapping[old_bstore] else: new_bstore = find_resource(old_bstore.__class__, new_holder, resourcegroup, old_bstore.name, error=ArgumentError) dbdisk.backing_store = new_bstore def update_interface_bindings(session, logger, dbmachine, autoip): for dbinterface in dbmachine.interfaces: old_pg = dbinterface.port_group if not old_pg: continue old_net = old_pg.network # Suppress the warning about PG mismatch - we'll update the addresses # later set_port_group(session, logger, dbinterface, old_pg.name, check_pg_consistency=False) logger.info("Updated {0:l} to use {1:l}.".format(dbinterface, dbinterface.port_group)) new_net = dbinterface.port_group.network if new_net == old_net or not autoip: dbinterface.check_pg_consistency(logger=logger) continue for addr in dbinterface.assignments: if addr.network != old_net: continue new_ip = generate_ip(session, logger, dbinterface, autoip=True, network_environment=old_net.network_environment) for dbdns_rec in addr.dns_records: dbdns_rec.network = new_net dbdns_rec.ip = new_ip old_ip = addr.ip addr.ip = new_ip addr.network = new_net logger.info("Changed {0:l} IP address from {1!s} to {2!s}." .format(dbinterface, old_ip, new_ip)) dbinterface.check_pg_consistency(logger=logger) def move_vm(session, logger, dbmachine, resholder, remap_disk, allow_metacluster_change, autoip, plenaries): old_holder = dbmachine.vm_container.holder.holder_object if resholder: new_holder = resholder.holder_object else: new_holder = old_holder if new_holder != old_holder: old_mc = get_metacluster(old_holder) new_mc = get_metacluster(new_holder) if old_mc != new_mc and not allow_metacluster_change: raise ArgumentError("Moving VMs between metaclusters is " "disabled by default. Use the " "--allow_metacluster_change option to " "override.") plenaries.add(old_holder) plenaries.add(new_holder) dbmachine.vm_container.holder = resholder if new_holder != old_holder or remap_disk: update_disk_backing_stores(dbmachine, old_holder, new_holder, remap_disk) if new_holder != old_holder or autoip: update_interface_bindings(session, logger, dbmachine, autoip) if hasattr(new_holder, 'location_constraint'): dbmachine.location = new_holder.location_constraint else: dbmachine.location = new_holder.hardware_entity.location class CommandUpdateMachine(BrokerCommand): requires_plenaries = True required_parameters = ["machine"] def render(self, session, logger, plenaries, machine, model, vendor, serial, uuid, clear_uuid, chassis, slot, clearchassis, multislot, vmhost, cluster, metacluster, allow_metacluster_change, cpuname, cpuvendor, cpucount, memory, ip, autoip, uri, remap_disk, comments, **arguments): dbmachine = Machine.get_unique(session, machine, compel=True) oldinfo = DSDBRunner.snapshot_hw(dbmachine) old_location = dbmachine.location plenaries.add(dbmachine) if dbmachine.vm_container: plenaries.add(dbmachine.vm_container) if dbmachine.host: # Using PlenaryHostData directly, to avoid warnings if the host has # not been configured yet plenaries.add(dbmachine.host, cls=PlenaryHostData) if clearchassis: del dbmachine.chassis_slot[:] if chassis: dbchassis = Chassis.get_unique(session, chassis, compel=True) dbmachine.location = dbchassis.location if slot is None: raise ArgumentError("Option --chassis requires --slot " "information.") self.adjust_slot(session, logger, dbmachine, dbchassis, slot, multislot) elif slot is not None: dbchassis = None for dbslot in dbmachine.chassis_slot: if dbchassis and dbslot.chassis != dbchassis: raise ArgumentError("Machine in multiple chassis, please " "use --chassis argument.") dbchassis = dbslot.chassis if not dbchassis: raise ArgumentError("Option --slot requires --chassis " "information.") self.adjust_slot(session, logger, dbmachine, dbchassis, slot, multislot) dblocation = get_location(session, **arguments) if dblocation: loc_clear_chassis = False for dbslot in dbmachine.chassis_slot: dbcl = dbslot.chassis.location if dbcl != dblocation: if chassis or slot is not None: raise ArgumentError("{0} conflicts with chassis {1!s} " "location {2}." .format(dblocation, dbslot.chassis, dbcl)) else: loc_clear_chassis = True if loc_clear_chassis: del dbmachine.chassis_slot[:] dbmachine.location = dblocation if model: # If overriding model, should probably overwrite default # machine specs as well. dbmodel = Model.get_unique(session, name=model, vendor=vendor, compel=True) if not dbmodel.model_type.isMachineType(): raise ArgumentError("The update_machine command cannot update " "machines of type %s." % dbmodel.model_type)<|fim▁hole|> # We probably could do this by forcing either cluster or # location data to be available as appropriate, but really? # Failing seems reasonable. if dbmodel.model_type != dbmachine.model.model_type and \ (dbmodel.model_type.isVirtualMachineType() or dbmachine.model.model_type.isVirtualMachineType()): raise ArgumentError("Cannot change machine from %s to %s." % (dbmachine.model.model_type, dbmodel.model_type)) old_nic_model = dbmachine.model.nic_model new_nic_model = dbmodel.nic_model if old_nic_model != new_nic_model: for iface in dbmachine.interfaces: if iface.model == old_nic_model: iface.model = new_nic_model dbmachine.model = dbmodel if cpuname or cpuvendor: dbcpu = Model.get_unique(session, name=cpuname, vendor=cpuvendor, model_type=CpuType.Cpu, compel=True) dbmachine.cpu_model = dbcpu if cpucount is not None: dbmachine.cpu_quantity = cpucount if memory is not None: dbmachine.memory = memory if serial is not None: dbmachine.serial_no = serial if comments is not None: dbmachine.comments = comments if uuid: q = session.query(Machine) q = q.filter_by(uuid=uuid) existing = q.first() if existing: raise ArgumentError("{0} is already using UUID {1!s}." .format(existing, uuid)) dbmachine.uuid = uuid elif clear_uuid: dbmachine.uuid = None if uri and not dbmachine.model.model_type.isVirtualMachineType(): raise ArgumentError("URI can be specified only for virtual " "machines and the model's type is %s" % dbmachine.model.model_type) if uri is not None: dbmachine.uri = uri # FIXME: For now, if a machine has its interface(s) in a portgroup # this command will need to be followed by an update_interface to # re-evaluate the portgroup for overflow. # It would be better to have --pg and --autopg options to let it # happen at this point. if cluster or vmhost or metacluster: if not dbmachine.vm_container: raise ArgumentError("Cannot convert a physical machine to " "virtual.") resholder = get_resource_holder(session, logger, hostname=vmhost, cluster=cluster, metacluster=metacluster, compel=False) move_vm(session, logger, dbmachine, resholder, remap_disk, allow_metacluster_change, autoip, plenaries) elif remap_disk: update_disk_backing_stores(dbmachine, None, None, remap_disk) if ip: if dbmachine.host: for srv in dbmachine.host.services_provided: si = srv.service_instance plenaries.add(si, cls=PlenaryServiceInstanceToplevel) update_primary_ip(session, logger, dbmachine, ip) if dbmachine.location != old_location and dbmachine.host: for vm in dbmachine.host.virtual_machines: plenaries.add(vm) vm.location = dbmachine.location session.flush() # Check if the changed parameters still meet cluster capacity # requiremets if dbmachine.cluster: dbmachine.cluster.validate() if allow_metacluster_change and dbmachine.cluster.metacluster: dbmachine.cluster.metacluster.validate() if dbmachine.host and dbmachine.host.cluster: dbmachine.host.cluster.validate() for dbinterface in dbmachine.interfaces: dbinterface.check_pg_consistency(logger=logger) # The check to make sure a plenary file is not written out for # dummy aurora hardware is within the call to write(). This way # it is consistent without altering (and forgetting to alter) # all the calls to the method. with plenaries.transaction(): dsdb_runner = DSDBRunner(logger=logger) dsdb_runner.update_host(dbmachine, oldinfo) dsdb_runner.commit_or_rollback("Could not update machine in DSDB") return def adjust_slot(self, session, logger, dbmachine, dbchassis, slot, multislot): for dbslot in dbmachine.chassis_slot: # This update is a noop, ignore. # Technically, this could be a request to trim the list down # to just this one slot - in that case --clearchassis will be # required. if dbslot.chassis == dbchassis and dbslot.slot_number == slot: return if len(dbmachine.chassis_slot) > 1 and not multislot: raise ArgumentError("Use --multislot to support a machine in more " "than one slot, or --clearchassis to remove " "current chassis slot information.") if not multislot: slots = ", ".join(str(dbslot.slot_number) for dbslot in dbmachine.chassis_slot) logger.info("Clearing {0:l} out of {1:l} slot(s) " "{2}".format(dbmachine, dbchassis, slots)) del dbmachine.chassis_slot[:] q = session.query(ChassisSlot) q = q.filter_by(chassis=dbchassis, slot_number=slot) dbslot = q.first() if dbslot: if dbslot.machine: raise ArgumentError("{0} slot {1} already has machine " "{2}.".format(dbchassis, slot, dbslot.machine.label)) else: dbslot = ChassisSlot(chassis=dbchassis, slot_number=slot) dbmachine.chassis_slot.append(dbslot) return<|fim▁end|>
<|file_name|>StringUtil.cpp<|end_file_name|><|fim▁begin|>/* Copyright (C) 2013 Rainmeter Project Developers * * This Source Code Form is subject to the terms of the GNU General Public * License; either version 2 of the License, or (at your option) any later * version. If a copy of the GPL was not distributed with this file, You can * obtain one at <https://www.gnu.org/licenses/gpl-2.0.html>. */ #include "StdAfx.h" #include "StringUtil.h" namespace { // Is the character a end of sentence punctuation character? // English only? bool IsEOSPunct(wchar_t ch) { return ch == '?' || ch == '!' || ch == '.'; } } <|fim▁hole|> std::string narrowStr; if (str && *str) { if (strLen == -1) { strLen = (int)wcslen(str); } int bufLen = WideCharToMultiByte(cp, 0, str, strLen, nullptr, 0, nullptr, nullptr); if (bufLen > 0) { narrowStr.resize(bufLen); WideCharToMultiByte(cp, 0, str, strLen, &narrowStr[0], bufLen, nullptr, nullptr); } } return narrowStr; } std::wstring Widen(const char* str, int strLen, int cp) { std::wstring wideStr; if (str && *str) { if (strLen == -1) { strLen = (int)strlen(str); } int bufLen = MultiByteToWideChar(cp, 0, str, strLen, nullptr, 0); if (bufLen > 0) { wideStr.resize(bufLen); MultiByteToWideChar(cp, 0, str, strLen, &wideStr[0], bufLen); } } return wideStr; } void ToLowerCase(std::wstring& str) { WCHAR* srcAndDest = &str[0]; int strAndDestLen = (int)str.length(); LCMapString(LOCALE_USER_DEFAULT, LCMAP_LOWERCASE, srcAndDest, strAndDestLen, srcAndDest, strAndDestLen); } void ToUpperCase(std::wstring& str) { WCHAR* srcAndDest = &str[0]; int strAndDestLen = (int)str.length(); LCMapString(LOCALE_USER_DEFAULT, LCMAP_UPPERCASE, srcAndDest, strAndDestLen, srcAndDest, strAndDestLen); } void ToProperCase(std::wstring& str) { WCHAR* srcAndDest = &str[0]; int strAndDestLen = (int)str.length(); LCMapString(LOCALE_USER_DEFAULT, LCMAP_TITLECASE, srcAndDest, strAndDestLen, srcAndDest, strAndDestLen); } void ToSentenceCase(std::wstring& str) { if (!str.empty()) { ToLowerCase(str); bool isCapped = false; for (size_t i = 0; i < str.length(); ++i) { if (IsEOSPunct(str[i])) isCapped = false; if (!isCapped && iswalpha(str[i]) != 0) { WCHAR* srcAndDest = &str[i]; LCMapString(LOCALE_USER_DEFAULT, LCMAP_UPPERCASE, srcAndDest, 1, srcAndDest, 1); isCapped = true; } } } } /* ** Escapes reserved PCRE regex metacharacters. */ void EscapeRegExp(std::wstring& str) { size_t start = 0; while ((start = str.find_first_of(L"\\^$|()[{.+*?", start)) != std::wstring::npos) { str.insert(start, L"\\"); start += 2; } } /* ** Escapes reserved URL characters. */ void EncodeUrl(std::wstring& str) { size_t pos = 0; while ((pos = str.find_first_of(L" !*'();:@&=+$,/?#[]", pos)) != std::wstring::npos) { WCHAR buffer[3]; _snwprintf_s(buffer, _countof(buffer), L"%.2X", str[pos]); str[pos] = L'%'; str.insert(pos + 1, buffer); pos += 3; } } /* ** Case insensitive comparison of strings. If equal, strip str2 from str1 and any leading whitespace. */ bool CaseInsensitiveCompareN(std::wstring& str1, const std::wstring& str2) { size_t pos = str2.length(); if (_wcsnicmp(str1.c_str(), str2.c_str(), pos) == 0) { str1 = str1.substr(pos); // remove str2 from str1 str1.erase(0, str1.find_first_not_of(L" \t\r\n")); // remove any leading whitespace return true; } return false; } } // namespace StringUtil<|fim▁end|>
namespace StringUtil { std::string Narrow(const WCHAR* str, int strLen, int cp) {
<|file_name|>pysmile_tests.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python import os import glob import unittest import pysmile import json __author__ = 'Jonathan Hosmer' class PySmileTestDecode(unittest.TestCase): def setUp(self): curdir = os.path.dirname(os.path.abspath(__file__)) self.smile_dir = os.path.join(curdir, 'data', 'smile') self.json_dir = os.path.join(curdir, 'data', 'json') def test_json_org_sample1(self): s = os.path.join(self.smile_dir, 'json-org-sample1.smile') j = os.path.join(self.json_dir, 'json-org-sample1.jsn') b = json.load(open(j, 'rb')) try: a = pysmile.decode(open(s, 'rb').read()) except pysmile.SMILEDecodeError, e: self.fail('Failed to decode:\n{!r}\n{!r}'.format(b, e.args[1])) else: if isinstance(a, list): self.assertListEqual(a, b, '{}\nExpected:\n{!r}\nGot:\n{!r}'.format(s, b, a)) elif isinstance(a, dict): self.assertDictEqual(a, b, '{}\nExpected:\n{!r}\nGot:\n{!r}'.format(s, b, a)) else: self.fail('Unexpected Type: {!r}'.format(type(a))) def test_json_org_sample2(self): s = os.path.join(self.smile_dir, 'json-org-sample2.smile') j = os.path.join(self.json_dir, 'json-org-sample2.jsn') b = json.load(open(j, 'rb')) try: a = pysmile.decode(open(s, 'rb').read()) except pysmile.SMILEDecodeError, e: self.fail('Failed to decode:\n{!r}\n{!r}'.format(b, e.args[1])) else: if isinstance(a, list): self.assertListEqual(a, b, '{}\nExpected:\n{!r}\nGot:\n{!r}'.format(s, b, a)) elif isinstance(a, dict): self.assertDictEqual(a, b, '{}\nExpected:\n{!r}\nGot:\n{!r}'.format(s, b, a)) else: self.fail('Unexpected Type: {!r}'.format(type(a))) def test_json_org_sample3(self): s = os.path.join(self.smile_dir, 'json-org-sample3.smile') j = os.path.join(self.json_dir, 'json-org-sample3.jsn') b = json.load(open(j, 'rb')) try: a = pysmile.decode(open(s, 'rb').read()) except pysmile.SMILEDecodeError, e: self.fail('Failed to decode:\n{!r}\n{!r}'.format(b, e.args[1])) else: if isinstance(a, list): self.assertListEqual(a, b, '{}\nExpected:\n{!r}\nGot:\n{!r}'.format(s, b, a)) elif isinstance(a, dict): self.assertDictEqual(a, b, '{}\nExpected:\n{!r}\nGot:\n{!r}'.format(s, b, a)) else: self.fail('Unexpected Type: {!r}'.format(type(a))) def test_json_org_sample4(self): s = os.path.join(self.smile_dir, 'json-org-sample4.smile') j = os.path.join(self.json_dir, 'json-org-sample4.jsn') b = json.load(open(j, 'rb')) try: a = pysmile.decode(open(s, 'rb').read()) except pysmile.SMILEDecodeError, e: self.fail('Failed to decode:\n{!r}\n{!r}'.format(b, e.args[1])) else: if isinstance(a, list): self.assertListEqual(a, b, '{}\nExpected:\n{!r}\nGot:\n{!r}'.format(s, b, a)) elif isinstance(a, dict): self.assertDictEqual(a, b, '{}\nExpected:\n{!r}\nGot:\n{!r}'.format(s, b, a)) else: self.fail('Unexpected Type: {!r}'.format(type(a))) def test_json_org_sample5(self): s = os.path.join(self.smile_dir, 'json-org-sample5.smile') j = os.path.join(self.json_dir, 'json-org-sample5.jsn') b = json.load(open(j, 'rb')) try: a = pysmile.decode(open(s, 'rb').read()) except pysmile.SMILEDecodeError, e: self.fail('Failed to decode:\n{!r}\n{!r}'.format(b, e.args[1])) else: if isinstance(a, list): self.assertListEqual(a, b, '{}\nExpected:\n{!r}\nGot:\n{!r}'.format(s, b, a)) elif isinstance(a, dict): self.assertDictEqual(a, b, '{}\nExpected:\n{!r}\nGot:\n{!r}'.format(s, b, a)) else: self.fail('Unexpected Type: {!r}'.format(type(a))) def test_numbers_int_4k(self): s = os.path.join(self.smile_dir, 'numbers-int-4k.smile') j = os.path.join(self.json_dir, 'numbers-int-4k.jsn') b = json.load(open(j, 'rb')) try: a = pysmile.decode(open(s, 'rb').read()) except pysmile.SMILEDecodeError, e: self.fail('Failed to decode:\n{!r}\n{!r}'.format(b, e.args[1])) else: if isinstance(a, list): self.assertListEqual(a, b, '{}\nExpected:\n{!r}\nGot:\n{!r}'.format(s, b, a)) elif isinstance(a, dict): self.assertDictEqual(a, b, '{}\nExpected:\n{!r}\nGot:\n{!r}'.format(s, b, a)) else: self.fail('Unexpected Type: {!r}'.format(type(a))) def test_numbers_int_64k(self): s = os.path.join(self.smile_dir, 'numbers-int-64k.smile') j = os.path.join(self.json_dir, 'numbers-int-64k.jsn') b = json.load(open(j, 'rb')) try: a = pysmile.decode(open(s, 'rb').read()) except pysmile.SMILEDecodeError, e: self.fail('Failed to decode:\n{!r}\n{!r}'.format(b, e.args[1])) else: if isinstance(a, list): self.assertListEqual(a, b, '{}\nExpected:\n{!r}\nGot:\n{!r}'.format(s, b, a)) elif isinstance(a, dict): self.assertDictEqual(a, b, '{}\nExpected:\n{!r}\nGot:\n{!r}'.format(s, b, a)) else: self.fail('Unexpected Type: {!r}'.format(type(a))) def test_test1(self): s = os.path.join(self.smile_dir, 'test1.smile') j = os.path.join(self.json_dir, 'test1.jsn') b = json.load(open(j, 'rb')) try: a = pysmile.decode(open(s, 'rb').read()) except pysmile.SMILEDecodeError, e: self.fail('Failed to decode:\n{!r}\n{!r}'.format(b, e.args[1])) else: if isinstance(a, list): self.assertListEqual(a, b, '{}\nExpected:\n{!r}\nGot:\n{!r}'.format(s, b, a)) elif isinstance(a, dict): self.assertDictEqual(a, b, '{}\nExpected:\n{!r}\nGot:\n{!r}'.format(s, b, a)) else: self.fail('Unexpected Type: {!r}'.format(type(a))) def test_test2(self): s = os.path.join(self.smile_dir, 'test2.smile') j = os.path.join(self.json_dir, 'test2.jsn') b = json.load(open(j, 'rb')) try: a = pysmile.decode(open(s, 'rb').read()) except pysmile.SMILEDecodeError, e: self.fail('Failed to decode:\n{!r}\n{!r}'.format(b, e.args[1])) else: if isinstance(a, list): self.assertListEqual(a, b, '{}\nExpected:\n{!r}\nGot:\n{!r}'.format(s, b, a)) elif isinstance(a, dict): self.assertDictEqual(a, b, '{}\nExpected:\n{!r}\nGot:\n{!r}'.format(s, b, a)) else: self.fail('Unexpected Type: {!r}'.format(type(a))) class PySmileTestEncode(unittest.TestCase): def setUp(self): curdir = os.path.dirname(os.path.abspath(__file__)) self.smile_dir = os.path.join(curdir, 'data', 'smile') self.json_dir = os.path.join(curdir, 'data', 'json') def test_json_org_sample1(self): s = os.path.join(self.smile_dir, 'json-org-sample1.smile') j = os.path.join(self.json_dir, 'json-org-sample1.jsn') a = pysmile.encode(json.load(open(j, 'rb'))) b = open(s, 'rb').read() self.assertEqual(a, b, '{}\nExpected:\n{!r}\nGot:\n{!r}'.format(s, b, a)) def test_json_org_sample2(self): s = os.path.join(self.smile_dir, 'json-org-sample2.smile') j = os.path.join(self.json_dir, 'json-org-sample2.jsn') a = pysmile.encode(json.load(open(j, 'rb'))) b = open(s, 'rb').read()<|fim▁hole|> def test_json_org_sample3(self): s = os.path.join(self.smile_dir, 'json-org-sample3.smile') j = os.path.join(self.json_dir, 'json-org-sample3.jsn') a = pysmile.encode(json.load(open(j, 'rb'))) b = open(s, 'rb').read() self.assertEqual(a, b, '{}\nExpected:\n{!r}\nGot:\n{!r}'.format(s, b, a)) def test_json_org_sample4(self): s = os.path.join(self.smile_dir, 'json-org-sample4.smile') j = os.path.join(self.json_dir, 'json-org-sample4.jsn') a = pysmile.encode(json.load(open(j, 'rb'))) b = open(s, 'rb').read() self.assertEqual(a, b, '{}\nExpected:\n{!r}\nGot:\n{!r}'.format(s, b, a)) def test_json_org_sample5(self): s = os.path.join(self.smile_dir, 'json-org-sample5.smile') j = os.path.join(self.json_dir, 'json-org-sample5.jsn') a = pysmile.encode(json.load(open(j, 'rb'))) b = open(s, 'rb').read() self.assertEqual(a, b, '{}\nExpected:\n{!r}\nGot:\n{!r}'.format(s, b, a)) def test_numbers_int_4k(self): s = os.path.join(self.smile_dir, 'numbers-int-4k.smile') j = os.path.join(self.json_dir, 'numbers-int-4k.jsn') a = pysmile.encode(json.load(open(j, 'rb'))) b = open(s, 'rb').read() self.assertEqual(a, b, '{}\nExpected:\n{!r}\nGot:\n{!r}'.format(s, b, a)) def test_numbers_int_64k(self): s = os.path.join(self.smile_dir, 'numbers-int-64k.smile') j = os.path.join(self.json_dir, 'numbers-int-64k.jsn') a = pysmile.encode(json.load(open(j, 'rb'))) b = open(s, 'rb').read() self.assertEqual(a, b, '{}\nExpected:\n{!r}\nGot:\n{!r}'.format(s, b, a)) def test_test1(self): s = os.path.join(self.smile_dir, 'test1.smile') j = os.path.join(self.json_dir, 'test1.jsn') a = pysmile.encode(json.load(open(j, 'rb'))) b = open(s, 'rb').read() self.assertEqual(a, b, '{}\nExpected:\n{!r}\nGot:\n{!r}'.format(s, b, a)) def test_test2(self): s = os.path.join(self.smile_dir, 'test2.smile') j = os.path.join(self.json_dir, 'test2.jsn') a = pysmile.encode(json.load(open(j, 'rb'))) b = open(s, 'rb').read() self.assertEqual(a, b, '{}\nExpected:\n{!r}\nGot:\n{!r}'.format(s, b, a)) class PySmileTestMisc(unittest.TestCase): def test_1(self): a = [1] b = pysmile.decode(':)\n\x03\xf8\xc2\xf9') self.assertListEqual(a, b, 'Expected:\n{!r}\nGot:\n{!r}'.format(a, b)) def test_2(self): a = [1, 2] b = pysmile.decode(':)\n\x03\xf8\xc2\xc4\xf9') self.assertListEqual(a, b, 'Expected:\n{!r}\nGot:\n{!r}'.format(a, b)) def test_3(self): a = [1, 2, {'c': 3}] b = pysmile.decode(':)\n\x03\xf8\xc2\xc4\xfa\x80c\xc6\xfb\xf9') self.assertListEqual(a, b, 'Expected:\n{!r}\nGot:\n{!r}'.format(a, b)) def test_4(self): a = {'a': 1} b = pysmile.decode(':)\n\x03\xfa\x80a\xc2\xfb') self.assertDictEqual(a, b, 'Expected:\n{!r}\nGot:\n{!r}'.format(a, b)) def test_5(self): a = {'a': '1', 'b': 2, 'c': [3], 'd': -1, 'e': 4.20} b = pysmile.decode( ':)\n\x03\xfa\x80a@1\x80c\xf8\xc6\xf9\x80b\xc4\x80e(fL\x19\x04\x04\x80d\xc1\xfb') self.assertDictEqual(a, b, 'Expected:\n{!r}\nGot:\n{!r}'.format(a, b)) def test_6(self): a = {'a': {'b': {'c': {'d': ['e']}}}} b = pysmile.decode( ':)\n\x03\xfa\x80a\xfa\x80b\xfa\x80c\xfa\x80d\xf8@e\xf9\xfb\xfb\xfb\xfb') self.assertDictEqual(a, b, 'Expected:\n{!r}\nGot:\n{!r}'.format(a, b))<|fim▁end|>
self.assertEqual(a, b, '{}\nExpected:\n{!r}\nGot:\n{!r}'.format(s, b, a))
<|file_name|>file.dev.js<|end_file_name|><|fim▁begin|><|fim▁hole|>size 5099<|fim▁end|>
version https://git-lfs.github.com/spec/v1 oid sha256:22a8f31fdc015cfce492a52529a1c36e39643310cec60a6331d4a428ac8a4db6
<|file_name|>issue-3675.rs<|end_file_name|><|fim▁begin|>fn main() { println!("{}"<|fim▁hole|> , 111); }<|fim▁end|>
// comment
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*- # License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html). <|fim▁hole|><|fim▁end|>
from . import res_country
<|file_name|>OpServerProxy.cc<|end_file_name|><|fim▁begin|>/* * Copyright (c) 2013 Juniper Networks, Inc. All rights reserved. */ #include "viz_collector.h" #include "viz_constants.h" #include "OpServerProxy.h" #include <tbb/mutex.h> #include <boost/bind.hpp> #include <boost/assign/list_of.hpp> #include "base/util.h" #include "base/logging.h" #include "base/parse_object.h" #include <cstdlib> #include <utility> #include "hiredis/hiredis.h" #include "hiredis/base64.h" #include "hiredis/boostasio.hpp" #include <sandesh/sandesh.h> #include <sandesh/common/vns_types.h> #include <sandesh/common/vns_constants.h> #include "rapidjson/document.h" #include "redis_connection.h" #include "redis_processor_vizd.h" #include "viz_sandesh.h" #include "viz_collector.h" using std::string; using boost::assign::list_of; using boost::system::error_code; class OpServerProxy::OpServerImpl { public: enum RacConnType { RAC_CONN_TYPE_INVALID = 0, RAC_CONN_TYPE_TO_OPS = 1, RAC_CONN_TYPE_FROM_OPS = 2, }; void ToOpsConnUpPostProcess() { processor_cb_proc_fn = boost::bind(&OpServerImpl::processorCallbackProcess, this, _1, _2, _3); to_ops_conn_.get()->SetClientAsyncCmdCb(processor_cb_proc_fn); string module = g_vns_constants.ModuleNames.find(Module::COLLECTOR)->second; VizSandeshContext * vsc = static_cast<VizSandeshContext *>(Sandesh::client_context()); string source; if (vsc) source = vsc->Analytics()->name(); else source = Sandesh::source(); if (!started_) { RedisProcessorExec::SyncDeleteUVEs(redis_ip_, redis_port_, source, module, "", 0); started_=true; } if (collector_) collector_->RedisUpdate(true); } void ToOpsConnUp() { LOG(DEBUG, "ToOpsConnUp.. UP"); evm_->io_service()->post(boost::bind(&OpServerProxy::OpServerImpl::ToOpsConnUpPostProcess, this)); } void FromOpsConnUpPostProcess() { analytics_cb_proc_fn = boost::bind(&OpServerImpl::analyticsCallbackProcess, this, _1, _2, _3); from_ops_conn_.get()->SetClientAsyncCmdCb(analytics_cb_proc_fn); from_ops_conn_.get()->RedisAsyncCommand(NULL, "SUBSCRIBE analytics"); } void FromOpsConnUp() { LOG(DEBUG, "FromOpsConnUp.. UP"); evm_->io_service()->post(boost::bind(&OpServerProxy::OpServerImpl::FromOpsConnUpPostProcess, this)); } void RAC_ConnectProcess(RacConnType type) { if (type == RAC_CONN_TYPE_TO_OPS) { LOG(DEBUG, "Retry Connect to FromOpsConn"); to_ops_conn_.get()->RAC_Connect(); } else if (type == RAC_CONN_TYPE_FROM_OPS) { from_ops_conn_.get()->RAC_Connect(); } } void ToOpsConnDown() { LOG(DEBUG, "ToOpsConnDown.. DOWN.. Reconnect.."); collector_->RedisUpdate(false); evm_->io_service()->post(boost::bind(&OpServerProxy::OpServerImpl::RAC_ConnectProcess, this, RAC_CONN_TYPE_TO_OPS)); } void FromOpsConnDown() { LOG(DEBUG, "FromOpsConnDown.. DOWN.. Reconnect.."); evm_->io_service()->post(boost::bind(&OpServerProxy::OpServerImpl::RAC_ConnectProcess, this, RAC_CONN_TYPE_FROM_OPS)); } void processorCallbackProcess(const redisAsyncContext *c, void *r, void *privdata) { redisReply *reply = (redisReply*)r; RedisProcessorIf * rpi = NULL; if (privdata) rpi = reinterpret_cast<RedisProcessorIf *>(privdata); if (reply == NULL) { LOG(DEBUG, "NULL Reply...\n"); return; } if (rpi) { rpi->ProcessCallback(reply); } } void analyticsCallbackProcess(const redisAsyncContext *c, void *r, void *privdata) {<|fim▁hole|> LOG(DEBUG, "Received data on analytics channel from REDIS...\n"); if (reply == NULL) { LOG(DEBUG, "NULL Reply...\n"); return; } if (reply->type == REDIS_REPLY_ARRAY) { LOG(DEBUG, "REDIS_REPLY_ARRAY == " << reply->elements); int i; for (i = 0; i < (int)reply->elements; i++) { if (reply->element[i]->type == REDIS_REPLY_STRING) { LOG(DEBUG, "Element" << i << "== " << reply->element[i]->str); } else { LOG(DEBUG, "Element" << i << " type == " << reply->element[i]->type); } } } else if (reply->type == REDIS_REPLY_STRING) { LOG(DEBUG, "REDIS_REPLY_STRING == " << reply->str); return; } else { LOG(DEBUG, "reply->type == " << reply->type); return; } assert(reply->type == REDIS_REPLY_ARRAY); assert(reply->elements == 3); if (!strncmp(reply->element[0]->str, "subscribe", strlen("subscribe"))) { /* nothing to do, return */ return; } assert(!strncmp(reply->element[0]->str, "message", strlen("message"))); assert(!strncmp(reply->element[1]->str, "analytics", strlen("analytics"))); assert(reply->element[2]->type == REDIS_REPLY_STRING); std::string message = base64_decode(reply->element[2]->str); //std::string message(reply->element[2]->str); LOG(DEBUG, "message ==" << reply->element[2]->str); rapidjson::Document document; // Default template parameter uses UTF8 and MemoryPoolAllocator. if (document.ParseInsitu<0>(reply->element[2]->str).HasParseError()) { assert(0); } assert(document.HasMember("type")); assert(document["type"].IsString()); assert(document.HasMember("destination")); assert(document["destination"].IsString()); std::string destination(document["destination"].GetString()); assert(document.HasMember("message")); assert(document["message"].IsString()); std::string enc_sandesh(document["message"].GetString()); std::string dec_sandesh = base64_decode(enc_sandesh); //std::string dec_sandesh(enc_sandesh); LOG(DEBUG, "decoded sandesh_message ==" << dec_sandesh); collector_->SendRemote(destination, dec_sandesh); } RedisAsyncConnection *to_ops_conn() { return (to_ops_conn_.get()); } RedisAsyncConnection *from_ops_conn() { return (from_ops_conn_.get()); } OpServerImpl(EventManager *evm, VizCollector *collector, const std::string & redis_ip, unsigned short redis_port) : evm_(evm), collector_(collector), started_(false), analytics_cb_proc_fn(NULL), processor_cb_proc_fn(NULL), redis_ip_(redis_ip), redis_port_(redis_port) { to_ops_conn_.reset(new RedisAsyncConnection(evm, redis_ip, redis_port, boost::bind(&OpServerProxy::OpServerImpl::ToOpsConnUp, this), boost::bind(&OpServerProxy::OpServerImpl::ToOpsConnDown, this))); to_ops_conn_.get()->RAC_Connect(); from_ops_conn_.reset(new RedisAsyncConnection(evm, redis_ip, redis_port, boost::bind(&OpServerProxy::OpServerImpl::FromOpsConnUp, this), boost::bind(&OpServerProxy::OpServerImpl::FromOpsConnDown, this))); from_ops_conn_.get()->RAC_Connect(); } ~OpServerImpl() { } private: /* these are made public, so they are accessed by OpServerProxy */ EventManager *evm_; VizCollector *collector_; int gen_timeout_; bool started_; boost::scoped_ptr<RedisAsyncConnection> to_ops_conn_; boost::scoped_ptr<RedisAsyncConnection> from_ops_conn_; RedisAsyncConnection::ClientAsyncCmdCbFn analytics_cb_proc_fn; RedisAsyncConnection::ClientAsyncCmdCbFn processor_cb_proc_fn; public: std::string redis_ip_; unsigned short redis_port_; }; OpServerProxy::OpServerProxy(EventManager *evm, VizCollector *collector, const std::string & redis_ip, unsigned short redis_port, int gen_timeout) : gen_timeout_(gen_timeout) { impl_ = new OpServerImpl(evm, collector, redis_ip, redis_port); } OpServerProxy::~OpServerProxy() { if (impl_) delete impl_; } bool OpServerProxy::UVEUpdate(const std::string &type, const std::string &attr, const std::string &source, const std::string &module, const std::string &key, const std::string &message, int32_t seq, const std::string& agg, const std::string& atyp, int64_t ts) { if ((!impl_->to_ops_conn()) || (!impl_->to_ops_conn()->IsConnUp())) return false; RedisProcessorExec::UVEUpdate(impl_->to_ops_conn(), NULL, type, attr, source, module, key, message, seq, agg, atyp, ts); return true; } bool OpServerProxy::UVEDelete(const std::string &type, const std::string &source, const std::string &module, const std::string &key, int32_t seq) { if ((!impl_->to_ops_conn()) || (!impl_->to_ops_conn()->IsConnUp())) return false; RedisProcessorExec::UVEDelete(impl_->to_ops_conn(), NULL, type, source, module, key, seq); return true; } bool OpServerProxy::GetSeq(const string &source, const string &module, std::map<std::string,int32_t> & seqReply) { if (!impl_->to_ops_conn()) return false; VizSandeshContext * vsc = static_cast<VizSandeshContext *>(Sandesh::client_context()); string coll; if (vsc) coll = vsc->Analytics()->name(); else coll = Sandesh::source(); return RedisProcessorExec::SyncGetSeq(impl_->redis_ip_, impl_->redis_port_, source, module, coll, gen_timeout_, seqReply); } bool OpServerProxy::DeleteUVEs(const string &source, const string &module) { if (!impl_->to_ops_conn()) return false; VizSandeshContext * vsc = static_cast<VizSandeshContext *>(Sandesh::client_context()); string coll; if (vsc) coll = vsc->Analytics()->name(); else coll = Sandesh::source(); return RedisProcessorExec::SyncDeleteUVEs(impl_->redis_ip_, impl_->redis_port_, source, module, coll, gen_timeout_); } bool OpServerProxy::GeneratorCleanup(GenCleanupReply gcr) { if (!impl_->to_ops_conn()) return false; GenCleanupReq * dr = new GenCleanupReq(impl_->to_ops_conn(), boost::bind(gcr, _2)); return dr->RedisSend(); } bool OpServerProxy::RefreshGenerator(const std::string &source, const std::string &module) { if ((!impl_->to_ops_conn()) || (!impl_->to_ops_conn()->IsConnUp())) return false; if (!gen_timeout_) return true; VizSandeshContext * vsc = static_cast<VizSandeshContext *>(Sandesh::client_context()); string coll; if (vsc) coll = vsc->Analytics()->name(); else coll = Sandesh::source(); RedisProcessorExec::RefreshGenerator(impl_->to_ops_conn(), source, module, coll, gen_timeout_); return true; } bool OpServerProxy::WithdrawGenerator(const std::string &source, const std::string &module) { if ((!impl_->to_ops_conn()) || (!impl_->to_ops_conn()->IsConnUp())) return false; VizSandeshContext * vsc = static_cast<VizSandeshContext *>(Sandesh::client_context()); string coll; if (vsc) coll = vsc->Analytics()->name(); else coll = Sandesh::source(); RedisProcessorExec::WithdrawGenerator(impl_->to_ops_conn(), source, module, coll); return true; }<|fim▁end|>
redisReply *reply = (redisReply*)r;
<|file_name|>rq.py<|end_file_name|><|fim▁begin|>from __future__ import absolute_import import logging<|fim▁hole|> try: from redis import Redis from rq import Queue except ImportError: Redis = None Queue = None from kaneda.exceptions import ImproperlyConfigured from .base import BaseQueue class RQQueue(BaseQueue): """ RQ queue :param queue: queue instance of RQ class. :param redis_url: Redis connection url where RQ will attend the async reporting requests. :param queue_name: name of the queue being used by the RQ worker process. """ settings_namespace = 'RQ' def __init__(self, queue=None, redis_url=None, queue_name='kaneda'): if not Redis: raise ImproperlyConfigured('You need to install redis to use the RQ queue.') if not Queue: raise ImproperlyConfigured('You need to install rq library to use the RQ queue.') if queue: if not isinstance(queue, Queue): raise ImproperlyConfigured('"queue" parameter is not an instance of RQ queue.') self.queue = queue elif redis_url: self.queue = Queue(queue_name, connection=Redis.from_url(redis_url)) else: self.queue = Queue(queue_name, connection=Redis()) def report(self, name, metric, value, tags, id_): try: return self.queue.enqueue('kaneda.tasks.rq.report', name, metric, value, tags, id_) except Exception as e: logger = logging.getLogger(__name__) logger.exception(e)<|fim▁end|>
<|file_name|>disco_test.py<|end_file_name|><|fim▁begin|>"""Tests for certbot.plugins.disco.""" import unittest import mock import pkg_resources import zope.interface from certbot import errors from certbot import interfaces from certbot.plugins import standalone from certbot.plugins import webroot EP_SA = pkg_resources.EntryPoint( "sa", "certbot.plugins.standalone", attrs=("Authenticator",), dist=mock.MagicMock(key="certbot")) EP_WR = pkg_resources.EntryPoint( "wr", "certbot.plugins.webroot", attrs=("Authenticator",), dist=mock.MagicMock(key="certbot")) class PluginEntryPointTest(unittest.TestCase): """Tests for certbot.plugins.disco.PluginEntryPoint.""" def setUp(self): self.ep1 = pkg_resources.EntryPoint( "ep1", "p1.ep1", dist=mock.MagicMock(key="p1")) self.ep1prim = pkg_resources.EntryPoint( "ep1", "p2.ep2", dist=mock.MagicMock(key="p2")) # nested self.ep2 = pkg_resources.EntryPoint( "ep2", "p2.foo.ep2", dist=mock.MagicMock(key="p2")) # project name != top-level package name self.ep3 = pkg_resources.EntryPoint( "ep3", "a.ep3", dist=mock.MagicMock(key="p3")) from certbot.plugins.disco import PluginEntryPoint self.plugin_ep = PluginEntryPoint(EP_SA) def test_entry_point_to_plugin_name(self): from certbot.plugins.disco import PluginEntryPoint names = { self.ep1: "p1:ep1", self.ep1prim: "p2:ep1", self.ep2: "p2:ep2", self.ep3: "p3:ep3", EP_SA: "sa", } for entry_point, name in names.iteritems(): self.assertEqual( name, PluginEntryPoint.entry_point_to_plugin_name(entry_point)) def test_description(self): self.assertEqual( "Automatically use a temporary webserver", self.plugin_ep.description) def test_description_with_name(self): self.plugin_ep.plugin_cls = mock.MagicMock(description="Desc") self.assertEqual( "Desc (sa)", self.plugin_ep.description_with_name) def test_ifaces(self): self.assertTrue(self.plugin_ep.ifaces((interfaces.IAuthenticator,))) self.assertFalse(self.plugin_ep.ifaces((interfaces.IInstaller,))) self.assertFalse(self.plugin_ep.ifaces(( interfaces.IInstaller, interfaces.IAuthenticator))) def test__init__(self): self.assertFalse(self.plugin_ep.initialized) self.assertFalse(self.plugin_ep.prepared) self.assertFalse(self.plugin_ep.misconfigured) self.assertFalse(self.plugin_ep.available) self.assertTrue(self.plugin_ep.problem is None) self.assertTrue(self.plugin_ep.entry_point is EP_SA) self.assertEqual("sa", self.plugin_ep.name) self.assertTrue(self.plugin_ep.plugin_cls is standalone.Authenticator) def test_init(self): config = mock.MagicMock() plugin = self.plugin_ep.init(config=config) self.assertTrue(self.plugin_ep.initialized) self.assertTrue(plugin.config is config) # memoize! self.assertTrue(self.plugin_ep.init() is plugin) self.assertTrue(plugin.config is config) # try to give different config self.assertTrue(self.plugin_ep.init(123) is plugin) self.assertTrue(plugin.config is config) self.assertFalse(self.plugin_ep.prepared) self.assertFalse(self.plugin_ep.misconfigured) self.assertFalse(self.plugin_ep.available) def test_verify(self): iface1 = mock.MagicMock(__name__="iface1") iface2 = mock.MagicMock(__name__="iface2") iface3 = mock.MagicMock(__name__="iface3") # pylint: disable=protected-access self.plugin_ep._initialized = plugin = mock.MagicMock() exceptions = zope.interface.exceptions with mock.patch("certbot.plugins." "disco.zope.interface") as mock_zope: mock_zope.exceptions = exceptions def verify_object(iface, obj): # pylint: disable=missing-docstring assert obj is plugin assert iface is iface1 or iface is iface2 or iface is iface3 if iface is iface3: raise mock_zope.exceptions.BrokenImplementation(None, None) mock_zope.verify.verifyObject.side_effect = verify_object self.assertTrue(self.plugin_ep.verify((iface1,))) self.assertTrue(self.plugin_ep.verify((iface1, iface2))) self.assertFalse(self.plugin_ep.verify((iface3,))) self.assertFalse(self.plugin_ep.verify((iface1, iface3))) def test_prepare(self): config = mock.MagicMock() self.plugin_ep.init(config=config) self.plugin_ep.prepare() self.assertTrue(self.plugin_ep.prepared) self.assertFalse(self.plugin_ep.misconfigured) # output doesn't matter that much, just test if it runs str(self.plugin_ep) def test_prepare_misconfigured(self): plugin = mock.MagicMock() plugin.prepare.side_effect = errors.MisconfigurationError # pylint: disable=protected-access self.plugin_ep._initialized = plugin self.assertTrue(isinstance(self.plugin_ep.prepare(), errors.MisconfigurationError)) self.assertTrue(self.plugin_ep.prepared) self.assertTrue(self.plugin_ep.misconfigured) self.assertTrue(isinstance(self.plugin_ep.problem, errors.MisconfigurationError)) self.assertTrue(self.plugin_ep.available) def test_prepare_no_installation(self): plugin = mock.MagicMock() plugin.prepare.side_effect = errors.NoInstallationError # pylint: disable=protected-access self.plugin_ep._initialized = plugin self.assertTrue(isinstance(self.plugin_ep.prepare(), errors.NoInstallationError)) self.assertTrue(self.plugin_ep.prepared) self.assertFalse(self.plugin_ep.misconfigured) self.assertFalse(self.plugin_ep.available) def test_prepare_generic_plugin_error(self): plugin = mock.MagicMock() plugin.prepare.side_effect = errors.PluginError # pylint: disable=protected-access self.plugin_ep._initialized = plugin self.assertTrue(isinstance(self.plugin_ep.prepare(), errors.PluginError)) self.assertTrue(self.plugin_ep.prepared) self.assertFalse(self.plugin_ep.misconfigured) self.assertFalse(self.plugin_ep.available) def test_repr(self): self.assertEqual("PluginEntryPoint#sa", repr(self.plugin_ep)) class PluginsRegistryTest(unittest.TestCase): """Tests for certbot.plugins.disco.PluginsRegistry.""" def setUp(self): from certbot.plugins.disco import PluginsRegistry self.plugin_ep = mock.MagicMock(name="mock") self.plugin_ep.__hash__.side_effect = TypeError self.plugins = {"mock": self.plugin_ep} self.reg = PluginsRegistry(self.plugins) def test_find_all(self): from certbot.plugins.disco import PluginsRegistry with mock.patch("certbot.plugins.disco.pkg_resources") as mock_pkg: mock_pkg.iter_entry_points.side_effect = [iter([EP_SA]), iter([EP_WR])] plugins = PluginsRegistry.find_all() self.assertTrue(plugins["sa"].plugin_cls is standalone.Authenticator) self.assertTrue(plugins["sa"].entry_point is EP_SA) self.assertTrue(plugins["wr"].plugin_cls is webroot.Authenticator) self.assertTrue(plugins["wr"].entry_point is EP_WR) def test_getitem(self): self.assertEqual(self.plugin_ep, self.reg["mock"]) def test_iter(self): self.assertEqual(["mock"], list(self.reg)) def test_len(self): self.assertEqual(1, len(self.reg)) self.plugins.clear() self.assertEqual(0, len(self.reg)) def test_init(self): self.plugin_ep.init.return_value = "baz" self.assertEqual(["baz"], self.reg.init("bar")) self.plugin_ep.init.assert_called_once_with("bar") def test_filter(self): self.plugins.update({ "foo": "bar", "bar": "foo", "baz": "boo", }) self.assertEqual( {"foo": "bar", "baz": "boo"}, self.reg.filter(lambda p_ep: str(p_ep).startswith("b"))) def test_ifaces(self):<|fim▁hole|> self.assertEqual(self.plugins, self.reg.ifaces()._plugins) self.plugin_ep.ifaces.return_value = False self.assertEqual({}, self.reg.ifaces()._plugins) def test_verify(self): self.plugin_ep.verify.return_value = True # pylint: disable=protected-access self.assertEqual( self.plugins, self.reg.verify(mock.MagicMock())._plugins) self.plugin_ep.verify.return_value = False self.assertEqual({}, self.reg.verify(mock.MagicMock())._plugins) def test_prepare(self): self.plugin_ep.prepare.return_value = "baz" self.assertEqual(["baz"], self.reg.prepare()) self.plugin_ep.prepare.assert_called_once_with() def test_available(self): self.plugin_ep.available = True # pylint: disable=protected-access self.assertEqual(self.plugins, self.reg.available()._plugins) self.plugin_ep.available = False self.assertEqual({}, self.reg.available()._plugins) def test_find_init(self): self.assertTrue(self.reg.find_init(mock.Mock()) is None) self.plugin_ep.initalized = True self.assertTrue( self.reg.find_init(self.plugin_ep.init()) is self.plugin_ep) def test_repr(self): self.plugin_ep.__repr__ = lambda _: "PluginEntryPoint#mock" self.assertEqual("PluginsRegistry(PluginEntryPoint#mock)", repr(self.reg)) def test_str(self): self.plugin_ep.__str__ = lambda _: "Mock" self.plugins["foo"] = "Mock" self.assertEqual("Mock\n\nMock", str(self.reg)) self.plugins.clear() self.assertEqual("No plugins", str(self.reg)) if __name__ == "__main__": unittest.main() # pragma: no cover<|fim▁end|>
self.plugin_ep.ifaces.return_value = True # pylint: disable=protected-access
<|file_name|>evisgenericeventbrowsergui.cpp<|end_file_name|><|fim▁begin|>/* ** File: evisgenericeventbrowsergui.cpp ** Author: Peter J. Ersts ( ersts at amnh.org ) ** Creation Date: 2007-03-08 ** ** Copyright ( c ) 2007, American Museum of Natural History. All rights reserved. ** ** This library/program is free software; you can redistribute it ** and/or modify it under the terms of the GNU Library General Public ** License as published by the Free Software Foundation; either ** version 2 of the License, or ( at your option ) any later version. ** ** This library/program is distributed in the hope that it will be useful, ** but WITHOUT ANY WARRANTY; without even the implied warranty of ** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ** Library General Public License for more details. ** ** This work was made possible through a grant by the the John D. and ** Catherine T. MacArthur Foundation. Additionally, this program was prepared by ** the American Museum of Natural History under award No. NA05SEC46391002 ** from the National Oceanic and Atmospheric Administration, U.S. Department ** of Commerce. The statements, findings, conclusions, and recommendations ** are those of the author( s ) and do not necessarily reflect the views of the ** National Oceanic and Atmospheric Administration or the Department of Commerce. ** **/ #include "evisgenericeventbrowsergui.h" #include "qgsapplication.h" #include "qgsmaprenderer.h" #include "qgsmaptopixel.h" #include "qgsmapcanvas.h" #include "qgsgeometry.h" #include "qgslogger.h" #include "qgspoint.h" #include "qgsfield.h" #include "qgsrectangle.h" #include <QMessageBox> #include <QTreeWidgetItem> #include <QGraphicsScene> #include <QSettings> #include <QPainter> #include <QProcess> #include <QFileDialog> /** * Constructor called when browser is launched from the application plugin tool bar * @param parent - Pointer the to parent QWidget for modality * @param interface - Pointer the the application interface * @param fl - Window flags */ eVisGenericEventBrowserGui::eVisGenericEventBrowserGui( QWidget* parent, QgisInterface* interface, Qt::WFlags fl ) : QDialog( parent, fl ) { setupUi( this ); QSettings settings; restoreGeometry( settings.value( "/eVis/browser-geometry" ).toByteArray() ); mCurrentFeatureIndex = 0; mInterface = interface; mDataProvider = 0; mVectorLayer = 0; mCanvas = 0; mIgnoreEvent = false; if ( initBrowser( ) ) { loadRecord( ); show( ); } else { close( ); } } /** * Constructor called when browser is launched by the eVisEventIdTool * @param parent - Pointer to the parent QWidget for modality * @param canvas - Pointer to the map canvas * @param fl - Window flags */ eVisGenericEventBrowserGui::eVisGenericEventBrowserGui( QWidget* parent, QgsMapCanvas* canvas, Qt::WFlags fl ) : QDialog( parent, fl ) { setupUi( this ); mCurrentFeatureIndex = 0; mInterface = 0; mDataProvider = 0; mVectorLayer = 0; mCanvas = canvas; mIgnoreEvent = false; if ( initBrowser( ) ) { loadRecord( ); show( ); } else { close( ); } } /** * Basic descructor */ eVisGenericEventBrowserGui::~eVisGenericEventBrowserGui( ) { QSettings settings; settings.setValue( "/eVis/browser-geometry", saveGeometry() ); //Clean up, disconnect the highlighting routine and refesh the canvase to clear highlighting symbol if ( 0 != mCanvas ) { disconnect( mCanvas, SIGNAL( renderComplete( QPainter * ) ), this, SLOT( renderSymbol( QPainter * ) ) ); mCanvas->refresh( ); } //On close, clear selected feature if ( 0 != mVectorLayer ) { mVectorLayer->removeSelection( false ); } } /** * This method is an extension of the constructor. It was implemented to reduce the amount of code duplicated between the constuctors. */ bool eVisGenericEventBrowserGui::initBrowser( ) { //setup gui setWindowTitle( tr( "Generic Event Browser" ) ); connect( treeEventData, SIGNAL( itemDoubleClicked( QTreeWidgetItem *, int ) ), this, SLOT( launchExternalApplication( QTreeWidgetItem *, int ) ) ); mHighlightSymbol.load( ":/evis/eVisHighlightSymbol.png" ); mPointerSymbol.load( ":/evis/eVisPointerSymbol.png" ); mCompassOffset = 0.0; //Flag to let us know if the browser fully loaded mBrowserInitialized = false; //Initialize some class variables mDefaultEventImagePathField = 0; mDefaultCompassBearingField = 0; mDefaultCompassOffsetField = 0; //initialize Display tab GUI elements pbtnNext->setEnabled( false ); pbtnPrevious->setEnabled( false ); //Set up Attribute display treeEventData->setColumnCount( 2 ); QStringList treeHeaders; treeHeaders << tr( "Field" ) << tr( "Value" ); treeEventData->setHeaderLabels( treeHeaders ); //Initialize Options tab GUI elements cboxEventImagePathField->setEnabled( true ); chkboxEventImagePathRelative->setChecked( false ); chkboxDisplayCompassBearing->setChecked( false ); cboxCompassBearingField->setEnabled( true ); rbtnManualCompassOffset->setChecked( false ); dsboxCompassOffset->setEnabled( true ); dsboxCompassOffset->setValue( 0.0 ); rbtnAttributeCompassOffset->setChecked( false ); cboxCompassOffsetField->setEnabled( true ); chkboxUseOnlyFilename->setChecked( false ); QString myThemePath = QgsApplication::activeThemePath( ); pbtnResetEventImagePathData->setIcon( QIcon( QPixmap( myThemePath + "/mActionDraw.png" ) ) ); pbtnResetCompassBearingData->setIcon( QIcon( QPixmap( myThemePath + "/mActionDraw.png" ) ) ); pbtnResetCompassOffsetData->setIcon( QIcon( QPixmap( myThemePath + "/mActionDraw.png" ) ) ); pbtnResetBasePathData->setIcon( QIcon( QPixmap( myThemePath + "/mActionDraw.png" ) ) ); pbtnResetUseOnlyFilenameData->setIcon( QIcon( QPixmap( myThemePath + "/mActionDraw.png" ) ) ); pbtnResetApplyPathRulesToDocs->setIcon( QIcon( QPixmap( myThemePath + "/mActionDraw.png" ) ) ); chkboxSaveEventImagePathData->setChecked( false ); chkboxSaveCompassBearingData->setChecked( false ); chkboxSaveCompassOffsetData->setChecked( false ); chkboxSaveBasePathData->setChecked( false ); chkboxSaveUseOnlyFilenameData->setChecked( false ); //Set up Configure External Application buttons pbtnAddFileType->setIcon( QIcon( QPixmap( myThemePath + "/mActionNewAttribute.png" ) ) ); pbtnDeleteFileType->setIcon( QIcon( QPixmap( myThemePath + "/mActionDeleteAttribute.png" ) ) ); //Check to for interface, not null when launched from plugin toolbar, otherwise expect map canvas if ( 0 != mInterface ) { //check for active layer if ( mInterface->activeLayer( ) ) { //verify that the active layer is a vector layer if ( QgsMapLayer::VectorLayer == mInterface->activeLayer( )->type( ) ) { mVectorLayer = ( QgsVectorLayer* )mInterface->activeLayer( ); mCanvas = mInterface->mapCanvas( ); } else { QMessageBox::warning( this, tr( "Warning" ), tr( "This tool only supports vector data" ) ); return false; } } else { QMessageBox::warning( this, tr( "Warning" ), tr( "No active layers found" ) ); return false; } } //check for map canvas, if map canvas is null, throw error else if ( 0 != mCanvas ) { //check for active layer if ( mCanvas->currentLayer( ) ) { //verify that the active layer is a vector layer if ( QgsMapLayer::VectorLayer == mCanvas->currentLayer( )->type( ) ) { mVectorLayer = ( QgsVectorLayer* )mCanvas->currentLayer( ); } else { QMessageBox::warning( this, tr( "Warning" ), tr( "This tool only supports vector data" ) ); return false; } } else { QMessageBox::warning( this, tr( "Warning" ), tr( "No active layers found" ) ); return false; } } else { QMessageBox::warning( this, tr( "Error" ), tr( "Unable to connect to either the map canvas or application interface" ) ); return false; } //Connect rendering routine for highlighting symbols and load symbols connect( mCanvas, SIGNAL( renderComplete( QPainter * ) ), this, SLOT( renderSymbol( QPainter * ) ) ); mDataProvider = mVectorLayer->dataProvider( ); /* * A list of the selected feature ids is made so that we can move forward and backward through * the list. The data providers only have the ability to get one feature at a time or * sequentially move forward through the selected features */ if ( 0 == mVectorLayer->selectedFeatureCount( ) ) //if nothing is selected select everything { mVectorLayer->invertSelection(); mFeatureIds = mVectorLayer->selectedFeaturesIds().toList(); } else //use selected features { mFeatureIds = mVectorLayer->selectedFeaturesIds().toList(); } if ( 0 == mFeatureIds.size() ) return false; //get the first feature in the list so we can set the field in the pulldown menues QgsFeature* myFeature = featureAtId( mFeatureIds.at( mCurrentFeatureIndex ) ); if ( !myFeature ) { QMessageBox::warning( this, tr( "Error" ), tr( "An invalid feature was received during initialization" ) ); return false; } QgsFieldMap myFieldMap = mDataProvider->fields( ); QgsAttributeMap myAttributeMap = myFeature->attributeMap( ); mIgnoreEvent = true; //Ignore indexChanged event when adding items to combo boxes for ( int x = 0; x < myFieldMap.size( ); x++ ) { cboxEventImagePathField->addItem( myFieldMap[x].name( ) ); cboxCompassBearingField->addItem( myFieldMap[x].name( ) ); cboxCompassOffsetField->addItem( myFieldMap[x].name( ) ); if ( myAttributeMap[x].toString( ).contains( QRegExp( "(jpg|jpeg|tif|tiff|gif)", Qt::CaseInsensitive ) ) ) { mDefaultEventImagePathField = x; } if ( myFieldMap[x].name( ).contains( QRegExp( "(comp|bear)", Qt::CaseInsensitive ) ) ) { mDefaultCompassBearingField = x; } if ( myFieldMap[x].name( ).contains( QRegExp( "(offset|declination)", Qt::CaseInsensitive ) ) ) { mDefaultCompassOffsetField = x; } } mIgnoreEvent = false; //Set Display tab gui items if ( mFeatureIds.size( ) > 1 ) { pbtnNext->setEnabled( true ); } setWindowTitle( tr( "Event Browser - Displaying records 01 of %1" ).arg( mFeatureIds.size(), 2, 10, QChar( '0' ) ) ); //Set Options tab gui items initOptionsTab( ); //Load file associations into Configure External Applications tab gui items QSettings myQSettings; myQSettings.beginWriteArray( "/eVis/filetypeassociations" ); int myTotalAssociations = myQSettings.childGroups( ).count( ); int myIterator = 0; while ( myIterator < myTotalAssociations ) { myQSettings.setArrayIndex( myIterator ); tableFileTypeAssociations->insertRow( tableFileTypeAssociations->rowCount( ) ); tableFileTypeAssociations->setItem( myIterator, 0, new QTableWidgetItem( myQSettings.value( "extension", "" ).toString( ) ) ); tableFileTypeAssociations->setItem( myIterator, 1, new QTableWidgetItem( myQSettings.value( "application", "" ).toString( ) ) ); myIterator++; } myQSettings.endArray( ); mBrowserInitialized = true; return true; } /** * This method is an extension of the constructor. It was implemented so that it could be called by the GUI at anytime. */ void eVisGenericEventBrowserGui::initOptionsTab( ) { //The base path has to be set first. If not if/when cboxEventImagePathRelative state change slot //will all ways over write the base path with the path to the data source //TODO: Find some better logic to prevent this from happening. leBasePath->setText( mConfiguration.basePath( ) ); chkboxUseOnlyFilename->setChecked( mConfiguration.isUseOnlyFilenameSet( ) ); //Set Options tab gui items int myIndex = cboxEventImagePathField->findText( mConfiguration.eventImagePathField( ), Qt::MatchExactly ); if ( -1 != myIndex ) { cboxEventImagePathField->setCurrentIndex( myIndex ); } else { cboxEventImagePathField->setCurrentIndex( mDefaultEventImagePathField ); } chkboxEventImagePathRelative->setChecked( mConfiguration.isEventImagePathRelative( ) ); myIndex = cboxCompassBearingField->findText( mConfiguration.compassBearingField( ), Qt::MatchExactly ); if ( -1 != myIndex ) { cboxCompassBearingField->setCurrentIndex( myIndex ); } else { cboxCompassBearingField->setCurrentIndex( mDefaultCompassBearingField ); } chkboxDisplayCompassBearing->setChecked( mConfiguration.isDisplayCompassBearingSet( ) ); if ( !mConfiguration.isDisplayCompassBearingSet( ) ) { cboxCompassBearingField->setEnabled( false ); } dsboxCompassOffset->setValue( mConfiguration.compassOffset( ) ); myIndex = cboxCompassOffsetField->findText( mConfiguration.compassOffsetField( ), Qt::MatchExactly ); if ( -1 != myIndex ) { cboxCompassOffsetField->setCurrentIndex( myIndex ); } else { loadRecord( ); cboxCompassOffsetField->setCurrentIndex( mDefaultCompassOffsetField ); } if ( mConfiguration.isManualCompassOffsetSet( ) ) { rbtnManualCompassOffset->setChecked( true ); rbtnAttributeCompassOffset->setChecked( false ); } else if ( !mConfiguration.compassOffsetField().isEmpty() ) { rbtnManualCompassOffset->setChecked( false ); rbtnAttributeCompassOffset->setChecked( true ); } else { rbtnManualCompassOffset->setChecked( false ); rbtnAttributeCompassOffset->setChecked( false ); dsboxCompassOffset->setEnabled( false ); cboxCompassOffsetField->setEnabled( false ); } chkboxApplyPathRulesToDocs->setChecked( mConfiguration.isApplyPathRulesToDocsSet( ) ); } void eVisGenericEventBrowserGui::closeEvent( QCloseEvent *event ) { if ( mBrowserInitialized ) { accept( ); event->accept( ); } } void eVisGenericEventBrowserGui::accept( ) { QSettings myQSettings; if ( chkboxSaveEventImagePathData->isChecked( ) ) { myQSettings.setValue( "/eVis/eventimagepathfield", cboxEventImagePathField->currentText( ) ); myQSettings.setValue( "/eVis/eventimagepathrelative", chkboxEventImagePathRelative->isChecked( ) ); } if ( chkboxSaveCompassBearingData->isChecked( ) ) { myQSettings.setValue( "/eVis/compassbearingfield", cboxCompassBearingField->currentText( ) ); myQSettings.setValue( "/eVis/displaycompassbearing", chkboxDisplayCompassBearing->isChecked( ) ); } if ( chkboxSaveCompassOffsetData->isChecked( ) ) { myQSettings.setValue( "/eVis/manualcompassoffset", rbtnManualCompassOffset->isChecked( ) ); myQSettings.setValue( "/eVis/compassoffset", dsboxCompassOffset->value( ) ); myQSettings.setValue( "/eVis/attributecompassoffset", rbtnAttributeCompassOffset->isChecked( ) ); myQSettings.setValue( "/eVis/compassoffsetfield", cboxCompassOffsetField->currentText( ) ); } if ( chkboxSaveBasePathData->isChecked( ) ) { myQSettings.setValue( "/eVis/basepath", leBasePath->text( ) ); } if ( chkboxSaveUseOnlyFilenameData->isChecked( ) ) { myQSettings.setValue( "/eVis/useonlyfilename", chkboxUseOnlyFilename->isChecked( ) ); } if ( chkboxSaveApplyPathRulesToDocs->isChecked( ) ) { myQSettings.setValue( "/eVis/applypathrulestodocs", chkboxApplyPathRulesToDocs->isChecked( ) ); } myQSettings.remove( "/eVis/filetypeassociations" ); myQSettings.beginWriteArray( "/eVis/filetypeassociations" ); int myIterator = 0; int myIndex = 0; while ( myIterator < tableFileTypeAssociations->rowCount( ) ) { myQSettings.setArrayIndex( myIndex ); if ( 0 != tableFileTypeAssociations->item( myIterator, 0 ) && 0 != tableFileTypeAssociations->item( myIterator, 1 ) ) { myQSettings.setValue( "extension", tableFileTypeAssociations->item( myIterator, 0 )->text( ) ); myQSettings.setValue( "application", tableFileTypeAssociations->item( myIterator, 1 )->text( ) ); myIndex++; } myIterator++; } myQSettings.endArray( ); } /** * Modifies the Event Image Path according to the local and global settings */ void eVisGenericEventBrowserGui::buildEventImagePath( ) { //This if statement is a bit of a hack, have to track down where the 0 is comming from on initalization if ( "0" != mEventImagePath ) { int myImageNameMarker = 0; if ( mEventImagePath.contains( '/' ) ) { myImageNameMarker = mEventImagePath.lastIndexOf( '/' ); } else { myImageNameMarker = mEventImagePath.lastIndexOf( '\\' ); } QString myImageName = mEventImagePath; myImageName.remove( 0, myImageNameMarker + 1 ); if ( mConfiguration.isUseOnlyFilenameSet( ) ) { mEventImagePath = mConfiguration.basePath( ) + myImageName; } else { if ( mConfiguration.isEventImagePathRelative( ) ) { mEventImagePath = mConfiguration.basePath( ) + mEventImagePath; } } } } /** * Chooses which image loading method to use and centers the map canvas on the current feature */ void eVisGenericEventBrowserGui::displayImage( ) { //This if statement is a bit of a hack, have to track down where the 0 is comming from on initalization if ( "0" != mEventImagePath && 0 == displayArea->currentIndex( ) ) { if ( mEventImagePath.startsWith( "http://", Qt::CaseInsensitive ) ) { imageDisplayArea->displayUrlImage( mEventImagePath ); } else { imageDisplayArea->displayImage( mEventImagePath ); } //clear any selection that may be present mVectorLayer->removeSelection( false ); if ( mFeatureIds.size( ) > 0 ) { //select the current feature in the layer mVectorLayer->select( mFeatureIds.at( mCurrentFeatureIndex ), true ); //get a copy of the feature QgsFeature* myFeature = featureAtId( mFeatureIds.at( mCurrentFeatureIndex ) ); if ( 0 == myFeature ) return; QgsPoint myPoint = myFeature->geometry( )->asPoint( ); myPoint = mCanvas->mapRenderer( )->layerToMapCoordinates( mVectorLayer, myPoint ); //keep the extent the same just center the map canvas in the display so our feature is in the middle QgsRectangle myRect( myPoint.x( ) - ( mCanvas->extent( ).width( ) / 2 ), myPoint.y( ) - ( mCanvas->extent( ).height( ) / 2 ), myPoint.x( ) + ( mCanvas->extent( ).width( ) / 2 ), myPoint.y( ) + ( mCanvas->extent( ).height( ) / 2 ) ); // only change the extents if the point is beyond the current extents to minimise repaints if ( !mCanvas->extent().contains( myPoint ) ) { mCanvas->setExtent( myRect ); } mCanvas->refresh( ); } } } /** * Returns a pointer to the reqested feature with a given featureid * @param id - FeatureId of the feature to find/select */ QgsFeature* eVisGenericEventBrowserGui::featureAtId( QgsFeatureId id ) { //This method was originally necessary because delimited text data provider did not support featureAtId( ) //It has mostly been stripped down now if ( mDataProvider && mFeatureIds.size( ) != 0 ) { if ( !mVectorLayer->featureAtId( id, mFeature, true, true ) ) { return 0; } } return &mFeature; } /** * Display the attrbiutes for the current feature and load the image */ void eVisGenericEventBrowserGui::loadRecord( ) { treeEventData->clear(); //Get a pointer to the current feature QgsFeature* myFeature; myFeature = featureAtId( mFeatureIds.at( mCurrentFeatureIndex ) ); if ( 0 == myFeature ) return; QString myCompassBearingField = cboxCompassBearingField->currentText( ); QString myCompassOffsetField = cboxCompassOffsetField->currentText( ); QString myEventImagePathField = cboxEventImagePathField->currentText( ); QgsFieldMap myFieldMap = mDataProvider->fields( ); QgsAttributeMap myAttributeMap = myFeature->attributeMap( ); //loop through the attributes and display their contents for ( QgsAttributeMap::const_iterator it = myAttributeMap.begin( ); it != myAttributeMap.end( ); ++it ) { QStringList myValues; myValues << myFieldMap[it.key( )].name( ) << it->toString( ); QTreeWidgetItem* myItem = new QTreeWidgetItem( myValues ); if ( myFieldMap[it.key( )].name( ) == myEventImagePathField ) { mEventImagePath = it->toString( ); } if ( myFieldMap[it.key( )].name( ) == myCompassBearingField ) { mCompassBearing = it->toDouble( ); } if ( mConfiguration.isAttributeCompassOffsetSet( ) ) { if ( myFieldMap[it.key( )].name( ) == myCompassOffsetField ) { mCompassOffset = it->toDouble( ); } } else { mCompassOffset = 0.0; } //Check to see if the attribute is a know file type int myIterator = 0; while ( myIterator < tableFileTypeAssociations->rowCount( ) ) { if ( tableFileTypeAssociations->item( myIterator, 0 ) && ( it->toString( ).startsWith( tableFileTypeAssociations->item( myIterator, 0 )->text( ) + ":", Qt::CaseInsensitive ) || it->toString( ).endsWith( tableFileTypeAssociations->item( myIterator, 0 )->text( ), Qt::CaseInsensitive ) ) ) { myItem->setBackground( 1, QBrush( QColor( 183, 216, 125, 255 ) ) ); break; } else myIterator++; } treeEventData->addTopLevelItem( myItem ); } //Modify EventImagePath as needed buildEventImagePath( ); //Request the image to be displayed in the browser displayImage( ); } /** * Restore the default configuration options */ void eVisGenericEventBrowserGui::restoreDefaultOptions( ) { chkboxEventImagePathRelative->setChecked( false ); cboxEventImagePathField->setCurrentIndex( mDefaultEventImagePathField ); cboxCompassBearingField->setEnabled( true ); cboxCompassBearingField->setCurrentIndex( mDefaultCompassBearingField ); cboxCompassBearingField->setEnabled( false ); chkboxDisplayCompassBearing->setChecked( false ); cboxCompassOffsetField->setEnabled( true ); cboxCompassOffsetField->setCurrentIndex( mDefaultCompassOffsetField ); cboxCompassOffsetField->setEnabled( false ); rbtnManualCompassOffset->setChecked( true ); dsboxCompassOffset->setValue( 0.0 ); leBasePath->setText( "" ); chkboxUseOnlyFilename->setChecked( false ); chkboxSaveEventImagePathData->setChecked( false ); chkboxSaveCompassBearingData->setChecked( false ); chkboxSaveCompassOffsetData->setChecked( false ); chkboxSaveBasePathData->setChecked( false ); chkboxSaveUseOnlyFilenameData->setChecked( false ); chkboxApplyPathRulesToDocs->setChecked( false ); } /** * Sets the base path to the path of the data source */ void eVisGenericEventBrowserGui::setBasePathToDataSource( ) { //Noticed some strangeness here while cleaning up for migration to the QGIS trunk - PJE 2009-07-01 //TODO: The check for windows paths not longer does anything, remove or fix int myPathMarker = 0; bool isWindows = false; QString mySourceUri = mDataProvider->dataSourceUri( ); //Check to see which way the directory symbol goes, I think this is actually unnecessary in qt if ( mySourceUri.contains( '/' ) ) { myPathMarker = mySourceUri.lastIndexOf( '/' ); } else { myPathMarker = mySourceUri.lastIndexOf( '\\' ); } //Strip off the actual filename so we just have path mySourceUri.truncate( myPathMarker + 1 ); //check for duplicate directory symbols when concatinating the two strings if ( isWindows ) { mySourceUri.replace( "\\\\", "\\" ); } else { if ( mySourceUri.startsWith( "http://", Qt::CaseInsensitive ) ) { mySourceUri.replace( "//", "/" ); mySourceUri.replace( "http:/", "http://", Qt::CaseInsensitive ); } else { mySourceUri.replace( "//", "/" ); } } leBasePath->setText( mySourceUri ); } /* * * Public and Private Slots * */ /** * Slot called when a column is clicked in the tree displaying the attribute data * @param theItem - The tree widget item click * @param theColumn - The column that was clicked */ void eVisGenericEventBrowserGui::launchExternalApplication( QTreeWidgetItem * theItem, int theColumn ) { // At this point there is only attribute data with no children, ignore clicks on field name if ( 1 == theColumn ) { int myIterator = 0; bool startsWithExtension = false; while ( myIterator < tableFileTypeAssociations->rowCount( ) ) { if ( theItem->text( theColumn ).startsWith( tableFileTypeAssociations->item( myIterator, 0 )->text( ) + ":", Qt::CaseInsensitive ) ) { startsWithExtension = true; break; } else if ( theItem->text( theColumn ).endsWith( tableFileTypeAssociations->item( myIterator, 0 )->text( ), Qt::CaseInsensitive ) ) { startsWithExtension = false; break; } else myIterator++; } if ( myIterator != tableFileTypeAssociations->rowCount( ) ) { QProcess *myProcess = new QProcess( ); QString myApplication = tableFileTypeAssociations->item( myIterator, 1 )->text( ); QString myDocument = theItem->text( theColumn ); if ( startsWithExtension ) { myDocument = theItem->text( theColumn ).remove( tableFileTypeAssociations->item( myIterator, 0 )->text( ) + ":", Qt::CaseInsensitive ); } if ( "" != myApplication ) { if ( mConfiguration.isApplyPathRulesToDocsSet( ) ) { int myDocumentNameMarker = 0; if ( myDocument.contains( '/' ) ) { myDocumentNameMarker = myDocument.lastIndexOf( '/' ); } else { myDocumentNameMarker = myDocument.lastIndexOf( '\\' ); } QString myDocumentName = myDocument; myDocumentName.remove( 0, myDocumentNameMarker + 1 ); if ( mConfiguration.isUseOnlyFilenameSet( ) ) { myDocument = mConfiguration.basePath( ) + myDocumentName; } else { if ( mConfiguration.isEventImagePathRelative( ) ) { myDocument = mConfiguration.basePath( ) + myDocument; } } } myProcess->start( myApplication, QStringList( ) << myDocument ); } } else { QMessageBox::information( this, tr( "Attribute Contents" ), theItem->text( theColumn ) ); } } } /** * Slot called when the restore or save button is click on the options panel * @param state - The new state of the checkbox */ void eVisGenericEventBrowserGui::on_buttonboxOptions_clicked( QAbstractButton* theButton ) { if ( QDialogButtonBox::ResetRole == buttonboxOptions->buttonRole( theButton ) ) { restoreDefaultOptions( ); } else if ( QDialogButtonBox::AcceptRole == buttonboxOptions->buttonRole( theButton ) ) { accept( ); } } /** * Slot called when the state changes for the chkboxApplyPathRulesToDocs check box. * @param theState - The new state of the checkbox */ void eVisGenericEventBrowserGui::on_chkboxApplyPathRulesToDocs_stateChanged( int theState ) { Q_UNUSED( theState ); mConfiguration.setApplyPathRulesToDocs( chkboxApplyPathRulesToDocs->isChecked( ) ); } /** * Slot called when the index changes for the cboxEventImagePathField combo box. * @param theIndex - The index of the new selected item */ void eVisGenericEventBrowserGui::on_cboxEventImagePathField_currentIndexChanged( int theIndex ) { Q_UNUSED( theIndex ); if ( !mIgnoreEvent ) { mConfiguration.setEventImagePathField( cboxEventImagePathField->currentText( ) ); QgsFieldMap myFieldMap = mDataProvider->fields( ); QgsFeature* myFeature = featureAtId( mFeatureIds.at( mCurrentFeatureIndex ) ); if ( 0 == myFeature ) return; QgsAttributeMap myAttributeMap = myFeature->attributeMap( ); for ( QgsAttributeMap::const_iterator it = myAttributeMap.begin( ); it != myAttributeMap.end( ); ++it ) { if ( myFieldMap[it.key( )].name( ) == cboxEventImagePathField->currentText( ) ) { mEventImagePath = it->toString( ); } } } } /** * Slot called when the index changes for the cboxCompassBearingField combo box. * @param theIndex - The index of the new selected item */ void eVisGenericEventBrowserGui::on_cboxCompassBearingField_currentIndexChanged( int theIndex ) { Q_UNUSED( theIndex ); if ( !mIgnoreEvent ) { mConfiguration.setCompassBearingField( cboxCompassBearingField->currentText( ) ); QgsFieldMap myFieldMap = mDataProvider->fields( ); QgsFeature* myFeature = featureAtId( mFeatureIds.at( mCurrentFeatureIndex ) ); if ( 0 == myFeature ) return; QgsAttributeMap myAttributeMap = myFeature->attributeMap( ); for ( QgsAttributeMap::const_iterator it = myAttributeMap.begin( ); it != myAttributeMap.end( ); ++it ) { if ( myFieldMap[it.key( )].name( ) == cboxCompassBearingField->currentText( ) ) { mCompassBearing = it->toDouble( ); } } } } /** * Slot called when the index changes for the cboxCompassBearingField combo box. * @param theIndex - The index of the new selected item */ void eVisGenericEventBrowserGui::on_cboxCompassOffsetField_currentIndexChanged( int theIndex ) { Q_UNUSED( theIndex ); if ( !mIgnoreEvent ) { mConfiguration.setCompassOffsetField( cboxCompassOffsetField->currentText( ) ); QgsFieldMap myFieldMap = mDataProvider->fields( ); QgsFeature* myFeature = featureAtId( mFeatureIds.at( mCurrentFeatureIndex ) ); if ( 0 == myFeature ) return; QgsAttributeMap myAttributeMap = myFeature->attributeMap( ); for ( QgsAttributeMap::const_iterator it = myAttributeMap.begin( ); it != myAttributeMap.end( ); ++it ) { if ( myFieldMap[it.key( )].name( ) == cboxCompassOffsetField->currentText( ) ) { mCompassOffset = it->toDouble( ); } } } } /** * Slot called when the chkDisplayCompassBearing radio button is toggled * @param theState - The current selection state of the radio button */ void eVisGenericEventBrowserGui::on_chkboxDisplayCompassBearing_stateChanged( int theState ) { Q_UNUSED( theState ); mConfiguration.setDisplayCompassBearing( chkboxDisplayCompassBearing->isChecked( ) ); cboxCompassBearingField->setEnabled( chkboxDisplayCompassBearing->isChecked( ) ); } /** * Slot called when the state changes for the chkboxEventImagePathRelative check box. * @param theState - The new state of the checkbox */ void eVisGenericEventBrowserGui::on_chkboxEventImagePathRelative_stateChanged( int theState ) { Q_UNUSED( theState ); mConfiguration.setEventImagePathRelative( chkboxEventImagePathRelative->isChecked( ) ); if ( chkboxEventImagePathRelative->isChecked( ) && "" == leBasePath->text( ) ) { setBasePathToDataSource( ); } } /** * Slot called when the state changes for the chkboxUseOnlyFilename check box. * @param theState - The new state of the checkbox */ void eVisGenericEventBrowserGui::on_chkboxUseOnlyFilename_stateChanged( int theState ) { Q_UNUSED( theState ); mConfiguration.setUseOnlyFilename( chkboxUseOnlyFilename->isChecked( ) ); } /** * Slot called when the tabs in the tabWidget are selected<|fim▁hole|>void eVisGenericEventBrowserGui::on_displayArea_currentChanged( int theCurrentTabIndex ) { //Force redraw when we switching back to the Display tab if ( 0 == theCurrentTabIndex ) { loadRecord( ); } } /** * Slot called when a manual compass offset is entered * @param theValue - The new compass offset */ void eVisGenericEventBrowserGui::on_dsboxCompassOffset_valueChanged( double theValue ) { mConfiguration.setCompassOffset( theValue ); } /** * Slot called the text in leBasePath is set or changed * @param theText - The new base path */ void eVisGenericEventBrowserGui::on_leBasePath_textChanged( QString theText ) { mConfiguration.setBasePath( theText ); } /** * Slot called when the pbtnAddFileType button is clicked - adds a new row to the file associations table */ void eVisGenericEventBrowserGui::on_pbtnAddFileType_clicked( ) { tableFileTypeAssociations->insertRow( tableFileTypeAssociations->rowCount( ) ); } /** * Slot called when the pbtnDeleteFileType button is clicked - removes arow from the file associations table */ void eVisGenericEventBrowserGui::on_pbtnDeleteFileType_clicked( ) { if ( 1 <= tableFileTypeAssociations->rowCount( ) ) { tableFileTypeAssociations->removeRow( tableFileTypeAssociations->currentRow( ) ); } } /** * Slot called when the pbtnNext button is pressed */ void eVisGenericEventBrowserGui::on_pbtnNext_clicked( ) { if ( mCurrentFeatureIndex != mFeatureIds.size( ) - 1 ) { pbtnPrevious->setEnabled( true ); mCurrentFeatureIndex++; setWindowTitle( tr( "Event Browser - Displaying records %1 of %2" ) .arg( mCurrentFeatureIndex + 1, 2, 10, QChar( '0' ) ).arg( mFeatureIds.size(), 2, 10, QChar( '0' ) ) ); loadRecord( ); } if ( mCurrentFeatureIndex == mFeatureIds.size( ) - 1 ) { pbtnNext->setEnabled( false ); } } /** * Slot called when the pbtnPrevious button is pressed */ void eVisGenericEventBrowserGui::on_pbtnPrevious_clicked( ) { if ( mCurrentFeatureIndex > 0 ) { pbtnNext->setEnabled( true ); mCurrentFeatureIndex--; setWindowTitle( tr( "Event Browser - Displaying records %1 of %2" ) .arg( mCurrentFeatureIndex + 1, 2, 10, QChar( '0' ) ).arg( mFeatureIds.size(), 2, 10, QChar( '0' ) ) ); loadRecord( ); } if ( mCurrentFeatureIndex == 0 ) { pbtnPrevious->setEnabled( false ); } } void eVisGenericEventBrowserGui::on_pbtnResetApplyPathRulesToDocs_clicked( ) { chkboxApplyPathRulesToDocs->setChecked( false ); } void eVisGenericEventBrowserGui::on_pbtnResetBasePathData_clicked( ) { leBasePath->setText( "" ); if ( chkboxEventImagePathRelative->isChecked( ) ) { setBasePathToDataSource( ); } } void eVisGenericEventBrowserGui::on_pbtnResetCompassBearingData_clicked( ) { cboxCompassBearingField->setEnabled( true ); cboxCompassBearingField->setCurrentIndex( mDefaultCompassBearingField ); cboxCompassBearingField->setEnabled( false ); chkboxDisplayCompassBearing->setChecked( false ); } void eVisGenericEventBrowserGui::on_pbtnResetCompassOffsetData_clicked( ) { cboxCompassOffsetField->setEnabled( true ); cboxCompassOffsetField->setCurrentIndex( mDefaultCompassOffsetField ); cboxCompassOffsetField->setEnabled( false ); rbtnManualCompassOffset->setChecked( true ); dsboxCompassOffset->setValue( 0.0 ); } void eVisGenericEventBrowserGui::on_pbtnResetEventImagePathData_clicked( ) { chkboxEventImagePathRelative->setChecked( false ); cboxEventImagePathField->setCurrentIndex( mDefaultEventImagePathField ); } void eVisGenericEventBrowserGui::on_pbtnResetUseOnlyFilenameData_clicked( ) { chkboxUseOnlyFilename->setChecked( false ); } void eVisGenericEventBrowserGui::on_rbtnManualCompassOffset_toggled( bool theState ) { mConfiguration.setManualCompassOffset( theState ); mConfiguration.setAttributeCompassOffset( !theState ); dsboxCompassOffset->setEnabled( theState ); cboxCompassOffsetField->setEnabled( !theState ); } /** * Slot called when an entry in the file associations table is clicked * @param theRow - the row that was clicked * @param theColumn - the column that was clicked */ void eVisGenericEventBrowserGui::on_tableFileTypeAssociations_cellDoubleClicked( int theRow, int theColumn ) { if ( 1 == theColumn ) { QString myApplication = QFileDialog::getOpenFileName( this, tr( "Select Application" ), "", tr( "All ( * )" ) ); if ( "" != myApplication ) { tableFileTypeAssociations->setItem( theRow, theColumn, new QTableWidgetItem( myApplication ) ); } } } /** * This slot is coonnected to the map canvas. When the canvas is done drawing the slot is fired to display thee highlighting symbol * @param thePainter - Pointer to the QPainter object */ void eVisGenericEventBrowserGui::renderSymbol( QPainter* thePainter ) { if ( mFeatureIds.size( ) > 0 && mVectorLayer != 0 ) { //Get a pointer to the current feature QgsFeature* myFeature = featureAtId( mFeatureIds.at( mCurrentFeatureIndex ) ); if ( 0 == myFeature ) return; QgsPoint myPoint = myFeature->geometry( )->asPoint( ); myPoint = mCanvas->mapRenderer( )->layerToMapCoordinates( mVectorLayer, myPoint ); mCanvas->getCoordinateTransform( )->transform( &myPoint ); if ( mConfiguration.isDisplayCompassBearingSet( ) ) { //Make a copy of the pointersymbol and rotate it based on the values in the attribute field QPixmap myTempPixmap( mPointerSymbol.height( ), mPointerSymbol.height( ) ); myTempPixmap.fill( QColor( 255, 255, 255, 0 ) ); QPainter p( &myTempPixmap ); QMatrix wm; wm.translate( myTempPixmap.width( ) / 2, myTempPixmap.height( ) / 2 ); // really center double myBearing = mCompassBearing; if ( mConfiguration.isManualCompassOffsetSet( ) ) { myBearing = mCompassBearing + mConfiguration.compassOffset( ); } else { myBearing = mCompassBearing + mCompassOffset; } if ( myBearing < 0.0 ) { while ( myBearing < 0.0 ) myBearing = 360.0 + myBearing; } else if ( myBearing >= 360.0 ) { while ( myBearing >= 360.0 ) myBearing = myBearing - 360.0; } wm.rotate( myBearing ); p.setWorldMatrix( wm ); p.drawPixmap( -mPointerSymbol.width( ) / 2, -mPointerSymbol.height( ) / 2, mPointerSymbol ); int xShift = ( int )myPoint.x( ) - ( myTempPixmap.width( ) / 2 ); int yShift = ( int )myPoint.y( ) - ( myTempPixmap.height( ) / 2 ); thePainter->drawPixmap( xShift, yShift, myTempPixmap ); } else { int xShift = ( int )myPoint.x( ) - ( mHighlightSymbol.width( ) / 2 ); int yShift = ( int )myPoint.y( ) - ( mHighlightSymbol.height( ) / 2 ); thePainter->drawPixmap( xShift, yShift, mHighlightSymbol ); } } }<|fim▁end|>
* @param theCurrentTabIndex - The index of the currently selected tab */
<|file_name|>tkwindow.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 *-* # made for python3! from tkinter import * from tkinter.ttk import *<|fim▁hole|> class TkWindow(): registers = {} def __init__(self, parent, title, width=400, height=300): self.parent = parent #Tk or toplevel self.w = width self.h = height self.make_gui(title) self.loaded() def loaded(self): pass # overload me """register another window to receive a signal""" @classmethod def register(cls, target, signame): if not target in cls.registers: cls.registers[target] = [] cls.registers[target].append(signame) """send a signal to all registered windows""" def send(self, signame, data=None): cls = self.__class__ for targ, sigs in cls.registers.items(): if sigs != None: if signame in sigs: targ.receive(self, signame, data) """receive a signame""" def receive(self, sender, signame, data): print("receive not overloaded but signal registered for <" + signame + "> from <" + str(sender) + "> with <" + str(data) +">") # overload me in your receiving window for your application def make_gui(self, title): self.parent.title(title) Style().configure("TFrame", padding=5) self.frame = Frame(self.parent, width=self.w, height=self.h) def makelabel(self, parent, lcol=0, lrow=0, caption='', **options): entry = Label(parent, text=caption, **options).grid(row=lrow, column=lcol, sticky=NE) return entry """create a multiline text entry field with a label""" def maketext(self, parent, lcol=0, lrow=0, erow=0, ecol=1, caption='', width=None, **options): print(lrow, lcol) if caption != '': Label(parent, text=caption).grid(row=lrow, column=lcol, sticky=NE) entry = Text(parent, **options) if width: entry.config(width=width) entry.grid(row=erow, column=ecol, sticky=W) return entry def makeentry(self, parent, lcol=0, lrow=0, erow=0, ecol=1, caption='', width=None, **options): if caption!='': Label(parent, text=caption).grid(row=lrow, column=lcol, sticky=E) entry = Entry(parent, **options) if width: entry.config(width=width) entry.grid(row=erow, column=ecol, sticky=W) return entry def setentryvalue(self, entry, value): entry.delete(0,END) entry.insert(0, value) def settextvalue(self, entry, value): entry.delete(0.0,END); entry.insert(0.0, value); def setbuttontext(self, button, txt): button['text'] = txt def makecombo(self, parent, ccol=1, crow=0, lcol=0, lrow=0, caption='', width=None, **options): if caption!='': Label(parent, text=caption).grid(row=lrow, column=lcol, sticky=E) cbox = Combobox(parent, **options) if width: cbox.config(width=width) cbox.grid(row=crow, column=ccol) return cbox def makecheck(self, parent, ecol=0, erow=0, caption='', **options): cb = Checkbutton(parent, text=caption, **options) cb.grid(row=erow, column=ecol, sticky=W) return cb def makebutton(self, parent, bcol=0, brow=0, caption='Press me', sticky=W, **options): bu = Button(parent, text=caption, **options) bu.grid(row=brow, column=bcol, sticky=sticky) return bu """create a list at the givne position""" def makelist(self, parent, llcol=0, llrow=1, lcol=0, lrow=0, caption='List', elements=[], mode='v', lrowspan=1, lcolspan=1, **options): frame = Frame(parent) frame.grid(row=lrow, column=lcol, rowspan=lrowspan, columnspan=lcolspan) hscroll = vscroll = None if caption!='': Label(parent, text=caption).grid(row=llrow, column=llcol, sticky=W) lb = Listbox(frame, **options) if 'v' in mode: vscroll = Scrollbar(frame, orient=VERTICAL) lb.config(yscrollcommand = vscroll.set) vscroll.config(command=lb.yview) vscroll.pack(side=RIGHT, fill=Y) if 'h' in mode: hscroll = Scrollbar(frame, orient=HROZONTAL) lb.configure(xscrollcommand = hscroll.set) hscroll.config(command = lb.xview) hscroll.pack(side=BOTTOM, fill=X) lb.pack(side=LEFT, fill=BOTH, expand=1) if len(elements)>0: self.setlistelements(elements) return lb def setlistelements(self, lb, elements): lb.delete(0, END) for element in elements: lb.insert(END, element)<|fim▁end|>
<|file_name|>rendercontext.rs<|end_file_name|><|fim▁begin|>extern crate time; use scene::Scene; use color::Color; use trace::trace; // The render context is the data structure // that holds state about the current render. // // It needs to be thread safe. pub struct RenderContext { image: Vec<Color>, samples: Vec<usize>, pub width: usize, pub height: usize, pub rays_cast: u64,<|fim▁hole|>} pub struct RenderIterator { i: usize, pub width: usize, pub height: usize, pub samples: usize, pub chunk_size: usize, } #[derive(Debug, Copy, Clone, PartialEq)] pub struct RenderableChunk { pub xmin: usize, pub xmax: usize, pub ymin: usize, pub ymax: usize, pub supersamples: usize, } pub struct RenderedChunk { pixels: Vec<Color>, samples: Vec<usize>, rays_cast: u64, } fn format_f64(v: f64) -> String { if v > 1000000. { return format!("{:.2}M", v / 1000000.); } if v > 1000. { return format!("{:.2}K", v / 1000.); } return format!("{:.2}", v); } impl RenderContext { pub fn new(start_time: f64, width:usize, height:usize, progressive_render: bool, filename: &str) -> RenderContext { let output_filename = String::from(filename).replace(".json", ".png"); return RenderContext { image: vec![Color::black(); (width*height) as usize], samples: vec![0; (width*height) as usize], width: width, height: height, rays_cast: 0, start_time, progressive_render: progressive_render, pixels_rendered: 0, output_filename, } } pub fn set_pixel(&mut self, x: usize, y: usize, c:Color, samples: usize) { if x >= self.width || y.saturating_mul(self.width).saturating_add(x) >= self.width * self.height { return; } let i:usize = (y*self.width + x) as usize; self.image[i] = self.image[i] + c.ignore_nan(); self.samples[i] = self.samples[i] + samples; self.pixels_rendered += 1; } pub fn apply_chunk(&mut self, c: &RenderableChunk, p: &RenderedChunk){ let mut i = 0; for y in c.ymin .. c.ymax { for x in c.xmin .. c.xmax { self.set_pixel(x, y, p.pixels[i], p.samples[i]); i += 1; } } self.rays_cast += p.rays_cast; } pub fn get_pixel(&self, x:usize, y:usize) -> Color { let i = (y*self.width + x) as usize; return self.image[i] / self.samples[i].max(1) as f64; } /* pub fn get_pixel_array(&self) -> Vec<u8> { let len = (self.width * self.height) as usize; let mut out: Vec<u8> = vec![0; len * 3]; for i in 0 .. len { let (r, g, b) = self.image[i].to_u8(); out.push(r); out.push(g); out.push(b); print!("{} {} {} {} {} \n", i, self.image[i], r, g, b); } return out; } */ pub fn print_stats(&self) { let elapsed = time::precise_time_s() - self.start_time; print!("\n==========================================\n"); print!("| Rays Cast: {}\n", self.rays_cast); print!("| Elapsed Time (s): {:.4}\n", elapsed); print!("| Rays per sec: {:.2}\n", self.rays_cast as f64 / elapsed); print!("==========================================\n"); } pub fn print_scene_stats(&self, s: &Scene){ let elapsed = time::precise_time_s() - self.start_time; println!("# ============== Scene ==================="); println!("| Output: {}x{} {} samples", s.image.width, s.image.height, s.render.supersamples); println!("| -> : {}", self.output_filename); println!("|"); println!("| ----------- Scene Objects --------------"); println!("| Parsed in: {:.4}s", elapsed); println!("| Objects: \n {}", s.objects); println!("| - Primitives: {}\n", s.objects .items .iter() .map(|x| x.geometry.primitives()) .fold(0, |acc, x| acc + x)); println!("# ========================================\n"); } pub fn print_progress(&self, s: &Scene){ let elapsed = time::precise_time_s() - self.start_time; println!("- [{:.0}s] {} rays cast ({} RPS), {} Rays per pixel, {}%, {} threads", elapsed, format_f64(self.rays_cast as f64), format_f64(self.rays_cast as f64 / elapsed), format_f64(self.rays_cast as f64 / self.pixels_rendered as f64), format_f64((self.pixels_rendered as f64 / (self.width * self.height * s.render.supersamples as usize) as f64) * 100.), rayon::current_num_threads()); } pub fn iter(&self, s: &Scene) -> impl Iterator<Item=RenderableChunk> + '_ { let width = self.width; let height = self.height; let chunk_size = s.render.chunk_size; let chunk_layers = s.render.supersamples / s.render.samples_per_chunk; let samples = s.render.samples_per_chunk; return (0 .. chunk_layers) .map(move |_x| RenderIterator { i: 0, width, height, chunk_size, samples: samples, }).flatten(); } } impl RenderableChunk { pub fn width(&self) -> usize { return self.xmax - self.xmin; } pub fn render(&self, s: &Scene) -> RenderedChunk { let size = self.width() * (self.ymax - self.ymin); let mut pixels: Vec<Color> = Vec::with_capacity(size); let mut samples: Vec<usize> = Vec::with_capacity(size); let mut rays_cast = 0; for y in self.ymin .. self.ymax { for x in self.xmin .. self.xmax { let (cast, psamples, pixel) = render_pixel(x, y, self.supersamples, &s); pixels.push(pixel); samples.push(psamples); rays_cast += cast as u64; } } return RenderedChunk { pixels, samples, rays_cast } } } impl Iterator for RenderIterator { type Item = RenderableChunk; fn next(&mut self) -> Option<RenderableChunk> { if self.i >= self.width * self.height { return None } // From i (pixel index) find current chunk let y = self.i / self.width; let x = self.i % self.width; if self.height - y > self.chunk_size { if self.width - x > self.chunk_size { self.i = self.i + self.chunk_size; return Some(RenderableChunk { xmin: x, xmax: x + self.chunk_size, ymin: y, ymax: y + self.chunk_size, supersamples: self.samples, }); } else { // Increment down a row self.i = (self.i - x) + (self.width * self.chunk_size); // return remainder of x return Some(RenderableChunk { xmin: x , xmax: self.width, ymin: y, ymax: y + self.chunk_size, supersamples: self.samples, }); } } else { if self.width - x > self.chunk_size { self.i = self.i + self.chunk_size; return Some(RenderableChunk { xmin: x, xmax: x + self.chunk_size, ymin: y, ymax: self.height, supersamples: self.samples, }); } else { self.i = (self.i - x) + self.chunk_size * self.width; return Some(RenderableChunk { xmin: x , xmax: self.width, ymin: y, ymax: self.height, supersamples: self.samples, }); } } } } fn render_pixel(x: usize, y: usize, max_samples: usize, s: &Scene) -> (u64, usize, Color) { let mut pixel = Color::black(); let mut cast = 0; let mut samples = 0; // Monte-Carlo method: We sample many times and average. for sx in 0..max_samples { for sy in 0..max_samples { let (rays_cast, c) = trace( &s.camera.get_ray( x as f64 / (s.image.width as f64), y as f64 / (s.image.height as f64), sx as f64 / (max_samples as f64) * 1. / (s.image.width as f64), sy as f64 / (max_samples as f64) * 1. / (s.image.height as f64)) , 0, &s); cast = cast + rays_cast; pixel = pixel + c; samples = samples + 1; } } return (cast, samples, pixel) }<|fim▁end|>
pub start_time: f64, pub progressive_render: bool, pub pixels_rendered: u64, pub output_filename: String,
<|file_name|>glrenderer.cpp<|end_file_name|><|fim▁begin|>#include <list> #include <QGLFunctions> #include <QGLBuffer> #include <QPaintDevice> #include <QSharedPointer> #include <QColor> #include <QMap> #include "Scene/transformation.h" #include "Scene/camera.h" #include "Scene/scene.h" #include "Scene/mesh.h" #include "Scene/meshinstance.h" #include "Scene/helpers.h" #include "Math/matrix4.h" #include "glrenderer.h" #include "glutils.h" #include "shader.h" namespace GLDemo { namespace { /** * \internal Class representing GL buffer data for a particular element type. * Used to cache the buffer data associated with a particular mesh. */ class IndexBufferData { public: IndexBufferData(ElementList::ElementType type, const QGLBuffer& buffer, int numIndices) : m_type(type), m_indexData(buffer), m_numIndices(numIndices) { } // Make these public so they can be cheaply accessed from the cache item. ElementList::ElementType m_type; QGLBuffer m_indexData; int m_numIndices; }; typedef std::list<IndexBufferData> IndexDataList; /** * \internal Class for caching GL mesh data once it's been created. */ class CachedMesh { public: CachedMesh(const QString& meshId) : m_meshId(meshId) {} // Make these public so they can be cheaply accessed from the cache item. QString m_meshId; QGLBuffer m_vertexData; IndexDataList m_indexData; }; } /** * \internal * We extend QGLFunctions so we get access to OpenGL functions here. We extend * it publicly so that shaders can access the class without having to create their * own instance of the QGLFunctions class. */ class GLRendererImpl : public QGLFunctions { public: typedef QMap<QString, CachedMesh> MeshDataCache; GLRendererImpl(const GLRendererImpl&); GLRendererImpl& operator=(const GLRendererImpl&); GLRenderer& m_renderer; QPaintDevice& m_paintDevice; MeshDataCache m_meshCache; int m_width; int m_height; bool m_initialized; Matrix4f m_matProj; Matrix4f m_matView; Matrix4f m_matViewInv; Camera* m_camera; GLRendererImpl(GLRenderer& renderer, QPaintDevice& device); ~GLRendererImpl(); void setCamera(Camera* camera); bool process(const MeshInstance& instance); void setupViewport(int x, int y, int width, int height); bool setupMatrices(Scene& scene); bool preRender(Scene& scene); bool renderScene(Scene& scene); bool postRender(Scene& scene); bool initialize(); bool resize(int width, int height); }; /** * */ GLRendererImpl::GLRendererImpl(GLRenderer& renderer, QPaintDevice& device) : m_renderer(renderer), m_paintDevice(device), m_width(device.width()), m_height(device.height()), m_initialized(false), m_camera(0) { } /** * */ GLRendererImpl::~GLRendererImpl() { // We don't need to worry about cleaning up our allocated QGLBuffers, // as the destructor of the QGLBuffer object does this for us, according // to the Qt documentation. } /** * */ bool GLRendererImpl::setupMatrices(Scene& scene) { // Set up our perspective matrix const float aspectRatio = static_cast<float>(m_width) / m_height; const float fov = m_camera->getFieldOfView(); const float zNear = m_camera->getNearPlaneDistance(); const float zFar = m_camera->getFarPlaneDistance(); m_matProj.makePerspectiveProjectionFOV(fov, aspectRatio, zNear, zFar); m_camera->toViewMatrix(m_matView); m_matViewInv = m_matView.inverse(); return true; } /** * \param width Width of the viewport. * \param height Height of the viewport. */ void GLRendererImpl::setupViewport(int x, int y, int width, int height) { glViewport(x, y, width, height); } /** * */ void GLRendererImpl::setCamera(Camera* camera) {<|fim▁hole|> } /** * \param instance The mesh instance to process * * Processes a mesh instance, generating the appropriate OpenGL data structures * if the mesh hasn't before been rendered. */ bool GLRendererImpl::process(const MeshInstance& instance) { PtrMesh ptrMesh = instance.getMesh(); if (ptrMesh.isNull()) { std::cout << "ERROR: Mesh " << instance.instanceName() << " contents are invalid." << std::endl; return false; } Mesh& mesh = *ptrMesh; // For both our vertices and all our primitives, we check to see whether a buffer exists // for our data, and if not, we create one. MeshDataCache::iterator meshIter = m_meshCache.find(mesh.instanceName()); if (m_meshCache.end() == meshIter) { CachedMesh cachedMesh(mesh.instanceName()); // Qt does shallow copy, so we can copy buffers around in "shallow" manner. // Start by creating our vertex data. { QGLBuffer vbo(QGLBuffer::VertexBuffer); vbo.setUsagePattern(QGLBuffer::StreamDraw); if ( !vbo.create() ) { std::cout << "ERROR: Failed to create vertex buffer object." << std::endl; return false; } vbo.bind(); std::vector<Vertex>& vertices = mesh.getVertices(); vbo.allocate(&vertices.front(), vertices.size() * sizeof(Vertex)); cachedMesh.m_vertexData = vbo; } // Now process the elements. std::list<ElementList>& elementLists = mesh.getElementLists(); for (std::list<ElementList>::iterator elIter = elementLists.begin(); elIter != elementLists.end(); ++elIter) { QGLBuffer vbo(QGLBuffer::IndexBuffer); vbo.setUsagePattern(QGLBuffer::StreamDraw); if ( !vbo.create() ) { std::cout << "ERROR: Failed to create index buffer object." << std::endl; return false; } vbo.bind(); std::vector<unsigned>& indices = elIter->getIndices(); vbo.allocate(&indices.front(), indices.size() * sizeof(unsigned)); cachedMesh.m_indexData.push_front( IndexBufferData(elIter->getElementType(), vbo, indices.size()) ); } meshIter = m_meshCache.insert(mesh.instanceName(), cachedMesh); } // Now that we've created the data, we can bind and render it. CachedMesh& glMesh = *meshIter; // Activate the shader const PtrShader& ptrShader = instance.getShader(); if (ptrShader.isNull()) { std::cout << "ERROR: Mesh instance must have a valid shader in order to be rendered." << std::endl; return false; } // Compute our matrices, starting by obtaining the world matrix. Matrix4f matWorld; instance.getWorldTransformation().toMatrix(matWorld); Matrix4f matWorldView(m_matView * matWorld); Matrix4f matWorldViewInvTranspose(matWorldView.inverse().transpose()); Matrix4f matWorldViewProj(m_matProj * matWorldView); if (!ptrShader->activate(m_matView, matWorldView, matWorldViewInvTranspose, matWorldViewProj)) { std::cout << "ERROR: Failed to activate shader." << std::endl; return false; } // Bind our vertex data to the appropriate attribute locations. int stride = 8 * sizeof(GLfloat); glMesh.m_vertexData.bind(); glVertexAttribPointer(GLRenderer::Position, 3, GL_FLOAT, false, stride, 0); glVertexAttribPointer(GLRenderer::Normal, 3, GL_FLOAT, false, stride, (GLvoid*)(3 * sizeof(GLfloat))); glEnableVertexAttribArray(GLRenderer::Position); glEnableVertexAttribArray(GLRenderer::Normal); // Now render each set of elements. IndexBufferData* indices = 0; for (std::list<IndexBufferData>::iterator iIter = glMesh.m_indexData.begin(); iIter != glMesh.m_indexData.end(); ++iIter) { indices = &*iIter; indices->m_indexData.bind(); switch (indices->m_type) { case ElementList::TRI_LIST: glDrawElements(indices->m_type, 3 * indices->m_numIndices, GL_UNSIGNED_INT, 0); break; default: std::cout << "ERROR: Unable to render element type " << indices->m_type << std::endl; } } return GL_GOOD_STATE(); } /** * */ bool GLRendererImpl::preRender(Scene& scene) { // Make sure the relevant initialization has taken place prior to performing any rendering. assert(m_initialized); // Ensure we have a camera set if (!m_camera) { return false; } // Need to setup the view and projection matrices in preparation for rendering. if (!setupMatrices(scene)) return false; #ifndef NDEBUG #ifdef Q_OS_OSX // On newer versions of OSX, it's possible (somehow) to create a widget before creating // a main framebuffer. This causes OpenGL to return the GL_FRAMEBUFFER_UNDEFINED // error, and god knows what problems this causes if we continue to render to a context // with no current draw framebuffer. GLenum framebufferStatus = glCheckFramebufferStatusEXT(GL_FRAMEBUFFER); assert(framebufferStatus != GL_FRAMEBUFFER_UNDEFINED); #endif #endif // Clear the buffers in preparation for redrawing the scene. const QColor& bc = QColor(255, 255, 255, 255); glClearColor( bc.redF(), bc.greenF(), bc.blueF(), bc.alphaF() ); glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); return GL_GOOD_STATE(); } /** * */ bool GLRendererImpl::renderScene(Scene &scene) { // Currently, we are rendering directly as we process each item in the scene. This is less than ideal, // as what we'd really like to do is sort each item so that we can efficiently render them. The best way // to do this is to generate a queue of items, sorted according to some pre-generated key. if (!scene.getRootNode().draw(&m_renderer)) { std::cout << "ERROR: Failed to draw scene." << std::endl; return false; } return GL_GOOD_STATE(); } /** * */ bool GLRendererImpl::postRender(Scene& scene) { return GL_GOOD_STATE(); } /** * Initializes the OpenGL context - must be called only after the context has been created. * Cannot be invoked from the constructor. * * The core activity involved in the initialisation are ensuring the required OpenGL version * and OpenGL extensions are supported by and ready to use on the current platform. */ bool GLRendererImpl::initialize() { if (m_initialized) return true; initializeGLFunctions(); // Set up our 'permanently' enabled GL states. glEnable(GL_DEPTH_TEST); glEnable(GL_CULL_FACE); glCullFace(GL_BACK); glFrontFace(GL_CCW); m_initialized = true; return GL_GOOD_STATE(); } /** * */ bool GLRendererImpl::resize(int width, int height) { m_width = width; m_height = height; setupViewport(0, 0, width, height); return true; } //=============================// /** * Creates a new GLRenderer, used for drawing a Scene using OpenGL. * * \note While creating a GLRenderer does not require a current OpenGL context, * deleting one does, as any dedicated resources used by the renderer will potentially be * cleaned up at the time it is deleted. */ GLRenderer::GLRenderer(QPaintDevice& device) : m_pImpl(static_cast<GLRendererImpl*>(0)) { m_pImpl = new GLRendererImpl(*this, device); } /** * */ GLRenderer::~GLRenderer() { delete m_pImpl; } /** * \param camera The camera the renderer should use. */ void GLRenderer::setCamera(Camera* camera) { m_pImpl->setCamera(camera); } /** * \return The current camera. */ Camera* GLRenderer::getCamera() { return m_pImpl->m_camera; } /** * \return The current camera. */ const Camera* GLRenderer::getCamera() const { return m_pImpl->m_camera; } /** * \pre The context must have been made current prior to invoking this function. * \return True if the initialization of the renderer was successful. * * This function should be called once only after the creation of the renderer, as it requires that * a GL context exists. It will generally be invoked by the initialize event triggered by the GLWidget * parent of the renderer. If the parent of the renderer does not have such an * initialization event, this function will need to be invoked manually. * * No renderering will occur if initialize has not yet been called on the * renderer. */ bool GLRenderer::initialize() { return m_pImpl->initialize(); } /** * \pre The context must have been made current prior to invoking this function. */ bool GLRenderer::renderScene(Scene& scene) { if (!m_pImpl->preRender(scene)) return false; // Make sure we always post-render if we get past a pre-render bool success = true; if (!m_pImpl->renderScene(scene)) success = false; if (!m_pImpl->postRender(scene)) return false; return success; } /** * \pre The context must have been made current prior to invoking this function. * \param width The desired width of the renderer. * \param height The desired height of the renderer. * * \return true if the resize was successful. The resize may fail for a number of reasons, * but usually only if an invalid size is specified, or it is unable to set up the required * off-screen buffers. */ bool GLRenderer::resize(int width, int height) { return m_pImpl->resize(width, height); } /** * */ int GLRenderer::getWidth() const { return m_pImpl->m_width; } /** * */ int GLRenderer::getHeight() const { return m_pImpl->m_height; } /** * */ bool GLRenderer::process(const MeshInstance& instance) { return m_pImpl->process(instance); } }<|fim▁end|>
m_camera = camera;
<|file_name|>bitcoin_ro_RO.ts<|end_file_name|><|fim▁begin|><?xml version="1.0" ?><!DOCTYPE TS><TS language="ro_RO" version="2.1"> <context> <name>AboutDialog</name> <message> <location filename="../forms/aboutdialog.ui" line="+14"/> <source>About Vsync</source> <translation>Despre Vsync</translation> </message> <message> <location line="+39"/> <source>&lt;b&gt;Vsync&lt;/b&gt; version</source> <translation>Versiune &lt;b&gt;Vsync&lt;/b&gt;</translation> </message> <message> <location line="+41"/> <source>Copyright © 2009-2014 The Bitcoin developers Copyright © 2012-2014 The NovaCoin developers Copyright © 2014 The Vsync developers</source> <translation>Copyright © 2009-2014 The Bitcoin developers Copyright © 2012-2014 The NovaCoin developers Copyright © 2014 The Vsync developers</translation> </message> <message> <location line="+15"/> <source> This is experimental software. Distributed under the MIT/X11 software license, see the accompanying file COPYING or &lt;a href=&quot;http://www.opensource.org/licenses/mit-license.php&quot;&gt;http://www.opensource.org/licenses/mit-license.php&lt;/a&gt;. This product includes software developed by the OpenSSL Project for use in the OpenSSL Toolkit (&lt;a href=&quot;https://www.openssl.org/&quot;&gt;https://www.openssl.org/&lt;/a&gt;) and cryptographic software written by Eric Young (&lt;a href=&quot;mailto:[email protected]&quot;&gt;[email protected]&lt;/a&gt;) and UPnP software written by Thomas Bernard.</source> <translation type="unfinished"/> </message> </context> <context> <name>AddressBookPage</name> <message> <location filename="../forms/addressbookpage.ui" line="+14"/> <source>Address Book</source> <translation>Agendă</translation> </message> <message> <location line="+22"/> <source>Double-click to edit address or label</source> <translation>Dublu-click pentru a edita adresa sau eticheta</translation> </message> <message> <location line="+24"/> <source>Create a new address</source> <translation>Creează o adresă nouă</translation> </message> <message> <location line="+10"/> <source>Copy the currently selected address to the system clipboard</source> <translation>Copiază adresa selectată în clipboard</translation> </message> <message> <location line="-7"/> <source>&amp;New Address</source> <translation>Adresă nouă</translation> </message> <message> <location line="-43"/> <source>These are your Vsync addresses for receiving payments. You may want to give a different one to each sender so you can keep track of who is paying you.</source> <translation>Acestea sunt adresele Vsync pentru a primi plăți. Poate doriți sa dați o adresa noua fiecarui expeditor pentru a putea ține evidența la cine efectuează plăti.</translation> </message> <message> <location line="+53"/> <source>&amp;Copy Address</source> <translation>&amp;Copiază adresa</translation> </message> <message> <location line="+7"/> <source>Show &amp;QR Code</source> <translation>Arată cod &amp;QR</translation> </message> <message> <location line="+7"/> <source>Sign a message to prove you own a Vsync address</source> <translation>Semnează un mesaj pentru a dovedi că dețineti o adresă Vsync</translation> </message> <message> <location line="+3"/> <source>Sign &amp;Message</source> <translation>Semnează &amp;Mesajul</translation> </message> <message> <location line="+17"/> <source>Delete the currently selected address from the list</source> <translation>Sterge adresele curent selectate din lista</translation> </message> <message> <location line="-10"/> <source>Verify a message to ensure it was signed with a specified Vsync address</source> <translation>Verifică un mesaj pentru a vă asigura că a fost semnat cu o anumită adresă Vsync</translation> </message> <message> <location line="+3"/> <source>&amp;Verify Message</source> <translation>&amp;Verifică mesajul</translation> </message> <message> <location line="+10"/> <source>&amp;Delete</source> <translation>Ște&amp;rge</translation> </message> <message> <location filename="../addressbookpage.cpp" line="+66"/> <source>Copy &amp;Label</source> <translation>Copiază &amp;eticheta</translation> </message> <message> <location line="+1"/> <source>&amp;Edit</source> <translation>&amp;Editează</translation> </message> <message> <location line="+248"/> <source>Export Address Book Data</source> <translation>Exportă datele din Agendă</translation> </message> <message> <location line="+1"/> <source>Comma separated file (*.csv)</source> <translation>Valori separate prin virgulă (*.csv)</translation> </message> <message> <location line="+13"/> <source>Error exporting</source> <translation>Eroare la exportare</translation> </message> <message> <location line="+0"/> <source>Could not write to file %1.</source> <translation>Nu s-a putut scrie în fișier %1.</translation> </message> </context> <context> <name>AddressTableModel</name> <message> <location filename="../addresstablemodel.cpp" line="+145"/> <source>Label</source> <translation>Etichetă</translation> </message> <message> <location line="+0"/> <source>Address</source> <translation>Adresă</translation> </message> <message> <location line="+36"/> <source>(no label)</source> <translation>(fără etichetă)</translation> </message> </context> <context> <name>AskPassphraseDialog</name> <message> <location filename="../forms/askpassphrasedialog.ui" line="+26"/> <source>Passphrase Dialog</source> <translation>Dialogul pentru fraza de acces</translation> </message> <message> <location line="+21"/> <source>Enter passphrase</source> <translation>Introdu fraza de acces</translation> </message> <message> <location line="+14"/> <source>New passphrase</source> <translation>Frază de acces nouă</translation> </message> <message> <location line="+14"/> <source>Repeat new passphrase</source> <translation>Repetă noua frază de acces</translation> </message> <message> <location line="+33"/> <source>Serves to disable the trivial sendmoney when OS account compromised. Provides no real security.</source> <translation>Servește pentru a dezactiva sendmoneyl atunci când sistemul de operare este compromis. Nu oferă nicio garanție reală.</translation> </message> <message> <location line="+3"/> <source>For staking only</source> <translation>Doar pentru staking</translation> </message> <message> <location filename="../askpassphrasedialog.cpp" line="+38"/> <source>Encrypt wallet</source> <translation>Criptează portofelul</translation> </message> <message> <location line="+7"/> <source>This operation needs your wallet passphrase to unlock the wallet.</source> <translation>Această acțiune necesită fraza ta de acces pentru deblocarea portofelului.</translation> </message> <message> <location line="+5"/> <source>Unlock wallet</source> <translation>Deblochează portofelul</translation> </message> <message> <location line="+3"/> <source>This operation needs your wallet passphrase to decrypt the wallet.</source> <translation>Această acțiune necesită fraza ta de acces pentru decriptarea portofelului.</translation> </message> <message> <location line="+5"/> <source>Decrypt wallet</source> <translation>Decriptează portofelul.</translation> </message> <message> <location line="+3"/> <source>Change passphrase</source> <translation>Schimbă fraza de acces</translation> </message> <message> <location line="+1"/> <source>Enter the old and new passphrase to the wallet.</source> <translation>Introdu vechea și noua parolă pentru portofel.</translation> </message> <message> <location line="+45"/> <source>Confirm wallet encryption</source> <translation>Confirmă criptarea portofelului</translation> </message> <message> <location line="+1"/> <source>Warning: If you encrypt your wallet and lose your passphrase, you will &lt;b&gt;LOSE ALL OF YOUR COINS&lt;/b&gt;!</source> <translation>Atentie: Daca encriptezi portofelul si iti uiti parola, &lt;b&gt;VEI PIERDE TOATA MONEDELE&lt;/b&gt;!</translation> </message> <message> <location line="+0"/> <source>Are you sure you wish to encrypt your wallet?</source> <translation>Sunteţi sigur că doriţi să criptaţi portofelul electronic?</translation> </message> <message> <location line="+15"/> <source>IMPORTANT: Any previous backups you have made of your wallet file should be replaced with the newly generated, encrypted wallet file. For security reasons, previous backups of the unencrypted wallet file will become useless as soon as you start using the new, encrypted wallet.</source> <translation>IMPORTANT: Orice copie de siguranta facuta in prealabil portofelului dumneavoastra ar trebui inlocuita cu cea generata cel mai recent fisier criptat al portofelului. Pentru siguranta, copiile de siguranta vechi ale portofelului ne-criptat vor deveni inutile de indata ce veti incepe folosirea noului fisier criptat al portofelului.</translation> </message> <message> <location line="+103"/> <location line="+24"/> <source>Warning: The Caps Lock key is on!</source> <translation>Atentie! Caps Lock este pornit</translation> </message> <message> <location line="-133"/> <location line="+60"/> <source>Wallet encrypted</source> <translation>Portofel criptat</translation> </message> <message> <location line="-140"/> <source>Enter the new passphrase to the wallet.&lt;br/&gt;Please use a passphrase of &lt;b&gt;ten or more random characters&lt;/b&gt;, or &lt;b&gt;eight or more words&lt;/b&gt;.</source> <translation type="unfinished"/> </message> <message> <location line="+82"/> <source>Vsync will close now to finish the encryption process. Remember that encrypting your wallet cannot fully protect your coins from being stolen by malware infecting your computer.</source> <translation>Vsync se va inchide pentru a termina procesul de encriptie. Amintiți-vă, criptarea portofelul dumneavoastră nu poate proteja pe deplin monedele dvs. de a fi furate de infectarea cu malware a computerului.</translation> </message> <message> <location line="+13"/> <location line="+7"/> <location line="+44"/> <location line="+6"/> <source>Wallet encryption failed</source> <translation>Criptarea portofelului a eșuat</translation> </message> <message> <location line="-56"/> <source>Wallet encryption failed due to an internal error. Your wallet was not encrypted.</source> <translation>Criptarea portofelului a eșuat din cauza unei erori interne. Portofelul tău nu a fost criptat.</translation> </message> <message> <location line="+7"/> <location line="+50"/> <source>The supplied passphrases do not match.</source> <translation>Frazele de acces introduse nu se potrivesc.</translation> </message> <message> <location line="-38"/> <source>Wallet unlock failed</source> <translation>Deblocarea portofelului a eșuat</translation> </message> <message> <location line="+1"/> <location line="+12"/> <location line="+19"/> <source>The passphrase entered for the wallet decryption was incorrect.</source> <translation>Fraza de acces introdusă pentru decriptarea portofelului a fost incorectă.</translation> </message> <message> <location line="-20"/> <source>Wallet decryption failed</source> <translation>Decriptarea portofelului a eșuat</translation> </message> <message> <location line="+14"/> <source>Wallet passphrase was successfully changed.</source> <translation>Parola portofelului electronic a fost schimbată.</translation> </message> </context> <context> <name>BitcoinGUI</name> <message> <location filename="../bitcoingui.cpp" line="+297"/> <source>Sign &amp;message...</source> <translation>Semnează &amp;mesaj...</translation> </message> <message> <location line="-64"/> <source>Show general overview of wallet</source> <translation>Arată o stare generală de ansamblu a portofelului</translation> </message> <message> <location line="+17"/> <source>&amp;Transactions</source> <translation>&amp;Tranzacții</translation> </message> <message> <location line="+1"/> <source>Browse transaction history</source> <translation>Răsfoiește istoricul tranzacțiilor</translation> </message> <message> <location line="+5"/> <source>&amp;Address Book</source> <translation>Agendă</translation> </message> <message> <location line="+1"/> <source>Edit the list of stored addresses and labels</source> <translation>Editează lista de adrese si etichete stocate</translation> </message> <message> <location line="-18"/> <source>Show the list of addresses for receiving payments</source> <translation>Arată lista de adrese pentru primire plăți</translation> </message> <message> <location line="+34"/> <source>E&amp;xit</source> <translation>&amp;Ieșire</translation> </message> <message> <location line="+1"/> <source>Quit application</source> <translation>Închide aplicația</translation> </message> <message> <location line="+4"/> <source>Show information about Vsync</source> <translation>Arată informații despre Vsync</translation> </message> <message> <location line="+2"/> <source>About &amp;Qt</source> <translation>Despre &amp;Qt</translation> </message> <message> <location line="+1"/> <source>Show information about Qt</source> <translation>Arată informații despre Qt</translation> </message> <message> <location line="+2"/> <source>&amp;Options...</source> <translation>&amp;Setări...</translation> </message> <message> <location line="+4"/> <source>&amp;Encrypt Wallet...</source> <translation>Criptează portofelul electronic...</translation> </message> <message> <location line="+2"/> <source>&amp;Backup Wallet...</source> <translation>&amp;Fă o copie de siguranță a portofelului...</translation> </message> <message> <location line="+2"/> <source>&amp;Change Passphrase...</source> <translation>S&amp;chimbă parola...</translation> </message> <message> <location line="+9"/> <source>&amp;Export...</source> <translation>&amp;Exportă</translation> </message> <message> <location line="-55"/> <source>Send coins to a Vsync address</source> <translation>Trimite monede către o adresă Vsync</translation> </message> <message> <location line="+39"/> <source>Modify configuration options for Vsync</source> <translation>Modifică opțiuni de configurare pentru Vsync</translation> </message> <message> <location line="+17"/> <source>Export the data in the current tab to a file</source> <translation>Exportă datele din tab-ul curent într-un fișier</translation> </message> <message> <location line="-13"/> <source>Encrypt or decrypt wallet</source> <translation>Criptează sau decriptează portofelul</translation> </message> <message> <location line="+2"/> <source>Backup wallet to another location</source> <translation>Creează o copie de rezervă a portofelului într-o locație diferită</translation> </message> <message> <location line="+2"/> <source>Change the passphrase used for wallet encryption</source> <translation>Schimbă fraza de acces folosită pentru criptarea portofelului</translation> </message> <message> <location line="+10"/> <source>&amp;Debug window</source> <translation>Fereastră &amp;debug</translation> </message> <message> <location line="+1"/> <source>Open debugging and diagnostic console</source> <translation>Deschide consola de debug și diagnosticare</translation> </message> <message> <location line="-5"/> <source>&amp;Verify message...</source> <translation>&amp;Verifică mesajul...</translation> </message> <message> <location line="-214"/> <location line="+555"/> <source>Vsync</source> <translation>Vsync</translation> </message> <message> <location line="-555"/> <source>Wallet</source> <translation>Portofelul</translation> </message> <message> <location line="+193"/> <source>&amp;About Vsync</source> <translation>Despre Vsync</translation> </message> <message> <location line="+9"/> <source>&amp;Show / Hide</source> <translation>Arata/Ascunde</translation> </message> <message> <location line="+8"/> <source>Unlock wallet</source> <translation>Deblochează portofelul</translation> </message> <message> <location line="+1"/> <source>&amp;Lock Wallet</source> <translation>Blochează portofelul</translation> </message> <message> <location line="+1"/> <source>Lock wallet</source> <translation>Blochează portofelul</translation> </message> <message> <location line="+32"/> <source>&amp;File</source> <translation>&amp;Fișier</translation> </message> <message> <location line="+8"/> <source>&amp;Settings</source> <translation>&amp;Setări</translation> </message> <message> <location line="+8"/> <source>&amp;Help</source> <translation>A&amp;jutor</translation> </message> <message> <location line="+17"/> <source>Tabs toolbar</source> <translation>Bara de file</translation> </message> <message> <location line="+46"/> <location line="+9"/> <source>[testnet]</source> <translation>[testnet]</translation> </message> <message> <location line="+0"/> <location line="+58"/> <source>Vsync client</source> <translation>Clientul Vsync</translation> </message> <message numerus="yes"> <location line="+70"/> <source>%n active connection(s) to Vsync network</source> <translation><numerusform>%n conexiune activă la reteaua Vsync</numerusform><numerusform>%n conexiuni active la reteaua Vsync</numerusform><numerusform>%n conexiuni active la reteaua Vsync</numerusform></translation> </message> <message> <location line="+488"/> <source>Staking.&lt;br&gt;Your weight is %1&lt;br&gt;Network weight is %2&lt;br&gt;Expected time to earn reward is %3</source> <translation>Staking. &lt;br&gt;Greutatea este %1&lt;br&gt;Greutatea retelei este %2&lt;br&gt;Timp estimat pentru a castiga recompensa este %3</translation> </message> <message> <location line="+6"/> <source>Not staking because wallet is locked</source> <translation>Nu este in modul stake deoarece portofelul este blocat</translation> </message> <message> <location line="+2"/> <source>Not staking because wallet is offline</source> <translation>Nu este in modul stake deoarece portofelul este offline</translation> </message> <message> <location line="+2"/> <source>Not staking because wallet is syncing</source> <translation>Nu este in modul stake deoarece portofelul se sincronizeaza</translation> </message> <message> <location line="+2"/> <source>Not staking because you don&apos;t have mature coins</source> <translation>Nu este in modul stake deoarece nu sunt destule monede maturate</translation> </message> <message> <location line="-812"/> <source>&amp;Dashboard</source> <translation type="unfinished"/> </message> <message> <location line="+6"/> <source>&amp;Receive</source> <translation type="unfinished"/> </message> <message> <location line="+6"/> <source>&amp;Send</source> <translation type="unfinished"/> </message> <message> <location line="+49"/> <source>&amp;Unlock Wallet...</source> <translation>&amp;Deblochează portofelul</translation> </message> <message> <location line="+277"/> <source>Up to date</source> <translation>Actualizat</translation> </message> <message> <location line="+43"/> <source>Catching up...</source> <translation>Se actualizează...</translation> </message> <message> <location line="+113"/> <source>Confirm transaction fee</source> <translation>Confirmă comisinoul tranzacției</translation> </message> <message> <location line="+27"/> <source>Sent transaction</source> <translation>Tranzacție expediată</translation> </message> <message> <location line="+1"/> <source>Incoming transaction</source> <translation>Tranzacție recepționată</translation> </message> <message> <location line="+1"/> <source>Date: %1 Amount: %2 Type: %3 Address: %4 </source> <translation>Data: %1 Suma: %2 Tipul: %3 Adresa: %4 </translation> </message> <message> <location line="+100"/> <location line="+15"/> <source>URI handling</source> <translation>Manipulare URI</translation> </message> <message> <location line="-15"/> <location line="+15"/> <source>URI can not be parsed! This can be caused by an invalid Vsync address or malformed URI parameters.</source> <translation>URI nu poate fi parsatt! Cauza poate fi o adresa Vsync invalidă sau parametrii URI malformați.</translation> </message> <message> <location line="+9"/> <source>Wallet is &lt;b&gt;not encrypted&lt;/b&gt;</source> <translation type="unfinished"/> </message> <message> <location line="+8"/> <source>Wallet is &lt;b&gt;encrypted&lt;/b&gt; and currently &lt;b&gt;unlocked&lt;/b&gt;</source> <translation>Portofelul este &lt;b&gt;criptat&lt;/b&gt; iar în momentul de față este &lt;b&gt;deblocat&lt;/b&gt;</translation> </message> <message> <location line="+8"/> <source>Wallet is &lt;b&gt;encrypted&lt;/b&gt; and currently &lt;b&gt;locked&lt;/b&gt;</source> <translation>Portofelul este &lt;b&gt;criptat&lt;/b&gt; iar în momentul de față este &lt;b&gt;blocat&lt;/b&gt;</translation> </message> <message> <location line="+24"/> <source>Backup Wallet</source> <translation>Fă o copie de siguranță a portofelului</translation> </message> <message> <location line="+0"/> <source>Wallet Data (*.dat)</source> <translation>Date portofel(*.dat)</translation> </message> <message> <location line="+3"/> <source>Backup Failed</source> <translation>Copia de rezerva a esuat</translation> </message> <message> <location line="+0"/> <source>There was an error trying to save the wallet data to the new location.</source> <translation>Eroare la încercarea de a salva datele portofelului în noua locaţie.</translation> </message> <message numerus="yes"> <location line="+91"/> <source>%n second(s)</source> <translation><numerusform>%n secundă</numerusform><numerusform>%n secunde</numerusform><numerusform>%n secunde</numerusform></translation> </message> <message numerus="yes"> <location line="+4"/> <source>%n minute(s)</source> <translation><numerusform>%n minut</numerusform><numerusform>%n minute</numerusform><numerusform>%n minute</numerusform></translation> </message> <message numerus="yes"> <location line="-429"/> <location line="+433"/> <source>%n hour(s)</source> <translation><numerusform>%n oră</numerusform><numerusform>%n ore</numerusform><numerusform>%n ore</numerusform></translation> </message> <message> <location line="-456"/> <source>Processed %1 blocks of transaction history.</source> <translation type="unfinished"/> </message> <message numerus="yes"> <location line="+27"/> <location line="+433"/> <source>%n day(s)</source> <translation><numerusform>%n zi</numerusform><numerusform>%n zile</numerusform><numerusform>%n zile</numerusform></translation> </message> <message numerus="yes"> <location line="-429"/> <location line="+6"/> <source>%n week(s)</source> <translation type="unfinished"><numerusform></numerusform><numerusform></numerusform><numerusform></numerusform></translation> </message> <message> <location line="+0"/> <source>%1 and %2</source> <translation type="unfinished"/> </message> <message numerus="yes"> <location line="+0"/> <source>%n year(s)</source> <translation type="unfinished"><numerusform></numerusform><numerusform></numerusform><numerusform></numerusform></translation> </message> <message> <location line="+5"/> <source>%1 behind</source> <translation type="unfinished"/> </message> <message> <location line="+15"/> <source>Last received block was generated %1 ago.</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>Transactions after this will not yet be visible.</source> <translation type="unfinished"/> </message> <message> <location line="+23"/> <source>Error</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Warning</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Information</source> <translation type="unfinished"/> </message> <message> <location line="+69"/> <source>This transaction is over the size limit. You can still send it for a fee of %1, which goes to the nodes that process your transaction and helps to support the network. Do you want to pay the fee?</source> <translation type="unfinished"/> </message> <message> <location line="+324"/> <source>Not staking</source> <translation>Not staking</translation> </message> <message> <location filename="../bitcoin.cpp" line="+104"/> <source>A fatal error occurred. Vsync can no longer continue safely and will quit.</source> <translation>A apărut o eroare fatală. Vsync nu mai poate continua în condiții de siguranță și va iesi.</translation> </message> </context> <context> <name>ClientModel</name> <message> <location filename="../clientmodel.cpp" line="+119"/> <source>Network Alert</source> <translation>Alertă rețea</translation> </message> </context> <context> <name>CoinControlDialog</name> <message> <location filename="../forms/coincontroldialog.ui" line="+14"/> <source>Coin Control</source> <translation>Controlează moneda</translation> </message> <message> <location line="+31"/> <source>Quantity:</source> <translation>Cantitate:</translation> </message> <message> <location line="+32"/> <source>Bytes:</source> <translation>Octeţi:</translation> </message> <message> <location line="+48"/> <source>Amount:</source> <translation>Sumă:</translation> </message> <message> <location line="+48"/> <source>Fee:</source> <translation>Taxa:</translation> </message> <message> <location line="+35"/> <source>Low Output:</source> <translation>Ieşire minimă: </translation> </message> <message> <location filename="../coincontroldialog.cpp" line="+493"/> <source>no</source> <translation>nu</translation> </message> <message> <location filename="../forms/coincontroldialog.ui" line="+51"/> <source>After Fee:</source> <translation>După taxe:</translation> </message> <message> <location line="+35"/> <source>Change:</source> <translation>Schimb:</translation> </message> <message> <location line="+69"/> <source>(un)select all</source> <translation>(de)selectaţi tot</translation> </message> <message> <location line="+13"/> <source>Tree mode</source> <translation>Modul arborescent</translation> </message> <message> <location line="+16"/> <source>List mode</source> <translation>Modul lista</translation> </message> <message> <location line="+45"/> <source>Amount</source> <translation>Sumă</translation> </message> <message> <location line="+5"/> <source>Label</source> <translation>Etichetă</translation> </message> <message> <location line="+5"/> <source>Address</source> <translation>Adresă</translation> </message> <message> <location line="+5"/> <source>Date</source> <translation>Data</translation> </message> <message> <location line="+5"/> <source>Confirmations</source> <translation>Confirmări</translation> </message> <message> <location line="+3"/> <source>Confirmed</source> <translation>Confirmat</translation> </message> <message> <location line="+5"/> <source>Priority</source> <translation>Prioritate</translation> </message> <message> <location filename="../coincontroldialog.cpp" line="-456"/> <source>Copy address</source> <translation>Copiază adresa</translation> </message> <message> <location line="+1"/> <source>Copy label</source> <translation>Copiază eticheta</translation> </message> <message> <location line="+1"/> <location line="+26"/> <source>Copy amount</source> <translation>Copiază suma</translation> </message> <message> <location line="-25"/> <source>Copy transaction ID</source> <translation>Copiază ID tranzacție</translation> </message> <message> <location line="+24"/> <source>Copy quantity</source> <translation>Copiaţi quantitea</translation> </message> <message> <location line="+2"/> <source>Copy fee</source> <translation>Copiaţi taxele</translation> </message> <message> <location line="+1"/> <source>Copy after fee</source> <translation>Copiaţi după taxe</translation> </message> <message> <location line="+1"/> <source>Copy bytes</source> <translation>Copiaţi octeţi</translation> </message> <message> <location line="+1"/> <source>Copy low output</source> <translation>Copiaţi ieşire minimă:</translation> </message> <message> <location line="+1"/> <source>Copy change</source> <translation>Copiaţi schimb</translation> </message> <message> <location line="+423"/> <source>DUST</source> <translation>DUST</translation> </message> <message> <location line="+0"/> <source>yes</source> <translation>da</translation> </message> <message> <location line="+9"/> <source>This label turns red, if the transaction size is bigger than 10000 bytes. This means a fee of at least %1 per kb is required. Can vary +/- 1 Byte per input.</source> <translation>Aceasta eticheta se inroseste daca marimea tranzactiei este mai mare de 10000 bytes. Acest lucru inseamna ca este nevoie de o taxa de cel putin %1 pe kb Poate varia +/- 1 Byte pe imput.</translation> </message> <message> <location line="+1"/> <source>This label turns red, if any recipient receives an amount smaller than %1. This means a fee of at least %2 is required. Amounts below 0.546 times the minimum relay fee are shown as DUST.</source> <translation>Aceasta eticheta se inroseste daca oricare din contacte primeste o suma mai mica decat %1. Acest lucru inseamna ca un comision de cel putin %2 este necesar. Sume mai mici decat 0.546 ori minimul comisionului de relay sunt afisate ca DUST</translation> </message> <message> <location line="+1"/> <source>This label turns red, if the change is smaller than %1. This means a fee of at least %2 is required.</source> <translation>Această eticheta se înroseste dacă schimbul este mai mic de %1. Acest lucru înseamnă că o taxă de cel puțin %2 este necesară</translation> </message> <message> <location line="+35"/> <location line="+58"/> <source>(no label)</source> <translation>(fără etichetă)</translation> </message> <message> <location line="-9"/> <source>change from %1 (%2)</source> <translation>schimbă la %1(%2)</translation> </message> <message> <location line="+1"/> <source>(change)</source> <translation>(schimb)</translation> </message> </context> <context> <name>EditAddressDialog</name> <message> <location filename="../forms/editaddressdialog.ui" line="+14"/> <source>Edit Address</source> <translation>Editează adresa</translation> </message> <message> <location line="+11"/> <source>&amp;Label</source> <translation>&amp;Etichetă</translation> </message> <message> <location line="+10"/> <source>The label associated with this address book entry</source> <translation>Eticheta asociată cu această intrare în agendă</translation> </message> <message> <location line="+7"/> <source>&amp;Address</source> <translation>&amp;Adresă</translation> </message> <message> <location line="+10"/> <source>The address associated with this address book entry. This can only be modified for sending addresses.</source> <translation>Adresa asociată cu această intrare în agendă. Acest lucru poate fi modificat numai pentru adresele de trimitere.</translation> </message> <message> <location filename="../editaddressdialog.cpp" line="+21"/> <source>New receiving address</source> <translation>Noua adresă de primire</translation> </message> <message> <location line="+4"/> <source>New sending address</source> <translation>Noua adresă de trimitere</translation> </message> <message> <location line="+3"/> <source>Edit receiving address</source> <translation>Editează adresa de primire</translation> </message> <message> <location line="+4"/> <source>Edit sending address</source> <translation>Editează adresa de trimitere</translation> </message> <message> <location line="+76"/> <source>The entered address &quot;%1&quot; is already in the address book.</source> <translation>Adresa introdusă &quot;%1&quot; se află deja în lista de adrese.</translation> </message> <message> <location line="-5"/> <source>The entered address &quot;%1&quot; is not a valid Vsync address.</source> <translation>Adresa introdusă &quot;%1&quot; nu este o adresă Vsync validă</translation> </message> <message> <location line="+10"/> <source>Could not unlock wallet.</source> <translation>Portofelul nu a putut fi deblocat.</translation> </message> <message> <location line="+5"/> <source>New key generation failed.</source> <translation>Generarea noii chei a eșuat.</translation> </message> </context> <context> <name>GUIUtil::HelpMessageBox</name> <message> <location filename="../guiutil.cpp" line="+426"/> <location line="+12"/> <source>Vsync-Qt</source> <translation>Vsync-Qt</translation> </message> <message> <location line="-12"/> <source>version</source> <translation>versiune</translation> </message> <message> <location line="+2"/> <source>Usage:</source> <translation>Utilizare:</translation> </message> <message> <location line="+1"/> <source>command-line options</source> <translation>Optiuni linie de comanda</translation> </message> <message> <location line="+4"/> <source>UI options</source> <translation>Setări UI</translation> </message> <message> <location line="+1"/> <source>Set language, for example &quot;de_DE&quot; (default: system locale)</source> <translation>Setează limba, de exemplu: &quot;de_DE&quot; (inițial: setare locală)</translation> </message> <message> <location line="+1"/> <source>Start minimized</source> <translation>Pornește miniaturizat</translation> </message> <message> <location line="+1"/> <source>Show splash screen on startup (default: 1)</source> <translation>Afișează ecran splash la pornire (implicit: 1)</translation> </message> </context> <context> <name>OptionsDialog</name> <message> <location filename="../forms/optionsdialog.ui" line="+14"/> <source>Options</source> <translation>Setări</translation> </message> <message> <location line="+16"/> <source>&amp;Main</source> <translation>&amp;Principal</translation> </message> <message> <location line="+6"/> <source>Optional transaction fee per kB that helps make sure your transactions are processed quickly. Most transactions are 1 kB. Fee 0.01 recommended.</source> <translation>Comision de tranzacție opțional pe kB, care vă ajută ca tranzacțiile sa fie procesate rapid. Majoritatea tranzactiilor sunt de 1 kB. Comision de 0.01 recomandat</translation> </message> <message> <location line="+15"/> <source>Pay transaction &amp;fee</source> <translation>Plăteşte comision pentru tranzacţie &amp;f</translation> </message> <message> <location line="+31"/> <source>Reserved amount does not participate in staking and is therefore spendable at any time.</source> <translation>Suma rezervată nu participă la maturare și, prin urmare, se poate cheltui în orice moment.</translation> </message> <message> <location line="+15"/> <source>Reserve</source> <translation>Rezervă</translation> </message> <message> <location line="+31"/> <source>Automatically start Vsync after logging in to the system.</source> <translation>Pornește Vsync imdiat după logarea în sistem</translation> </message> <message> <location line="+3"/> <source>&amp;Start Vsync on system login</source> <translation>$Pornește Vsync la logarea în sistem</translation> </message> <message> <location line="+21"/> <source>&amp;Network</source> <translation>&amp;Retea</translation> </message> <message> <location line="+6"/> <source>Automatically open the Vsync client port on the router. This only works when your router supports UPnP and it is enabled.</source> <translation>Deschide automat portul pentru cientul Vsync pe router. Aces lucru este posibil doara daca routerul suporta UPnP si este activat</translation> </message> <message> <location line="+3"/> <source>Map port using &amp;UPnP</source> <translation>Mapeaza portul folosind &amp;UPnP</translation> </message> <message> <location line="+19"/> <source>Proxy &amp;IP:</source> <translation>Proxy &amp;IP:</translation> </message> <message> <location line="+19"/> <source>IP address of the proxy (e.g. 127.0.0.1)</source> <translation>Adresa IP a proxy-ului(ex. 127.0.0.1)</translation> </message> <message> <location line="+7"/> <source>&amp;Port:</source> <translation>&amp;Port:</translation> </message> <message> <location line="+19"/> <source>Port of the proxy (e.g. 9050)</source> <translation>Portul pe care se concetează proxy serverul (de exemplu: 9050)</translation> </message> <message> <location line="-57"/> <source>Connect to the Vsync network through a SOCKS5 proxy (e.g. when connecting through Tor).</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>&amp;Connect through SOCKS5 proxy:</source> <translation type="unfinished"/> </message> <message> <location line="+90"/> <source>&amp;Window</source> <translation>&amp;Fereastra</translation> </message> <message> <location line="+6"/> <source>Show only a tray icon after minimizing the window.</source> <translation>Afişează doar un icon in tray la ascunderea ferestrei</translation> </message> <message> <location line="+3"/> <source>&amp;Minimize to the tray instead of the taskbar</source> <translation>&amp;M Ascunde în tray în loc de taskbar</translation> </message> <message> <location line="+7"/> <source>Minimize instead of exit the application when the window is closed. When this option is enabled, the application will be closed only after selecting Quit in the menu.</source> <translation>Ascunde fereastra în locul părăsirii programului în momentul închiderii ferestrei. Când acestă opţiune e activă, aplicaţia se va opri doar în momentul selectării comenzii Quit din menu.</translation> </message> <message> <location line="+3"/> <source>M&amp;inimize on close</source> <translation>&amp;i Ascunde fereastra în locul închiderii programului</translation> </message> <message> <location line="+21"/> <source>&amp;Display</source> <translation>&amp;Afişare</translation> </message> <message> <location line="+8"/> <source>User Interface &amp;language:</source> <translation>Interfata &amp; limba userului</translation> </message> <message> <location line="+13"/> <source>The user interface language can be set here. This setting will take effect after restarting Vsync.</source> <translation>Limba interfeței utilizator poate fi setat aici. Această setare va avea efect după repornirea Vsync.</translation> </message> <message> <location line="+11"/> <source>&amp;Unit to show amounts in:</source> <translation>&amp;Unitatea de măsură pentru afişarea sumelor:</translation> </message> <message> <location line="+13"/> <source>Choose the default subdivision unit to show in the interface and when sending coins.</source> <translation>Alege subdiviziunea folosită la afişarea interfeţei şi la trimiterea de bitcoin.</translation> </message> <message> <location line="+9"/> <source>Whether to show coin control features or not.</source> <translation>Dacă să se afişeze controlul caracteristicilor monedei sau nu.</translation> </message> <message> <location line="+3"/> <source>Display coin &amp;control features (experts only!)</source> <translation>Afiseaza &amp;caracteristiclei de control ale monedei(numai experti!)</translation> </message> <message> <location line="+7"/> <source>Use black visual theme (requires restart)</source> <translation type="unfinished"/> </message> <message> <location line="+71"/> <source>&amp;OK</source> <translation>&amp; OK</translation> </message> <message> <location line="+7"/> <source>&amp;Cancel</source> <translation>&amp; Renunta</translation> </message> <message> <location line="+10"/> <source>&amp;Apply</source> <translation>&amp;Aplica</translation> </message> <message> <location filename="../optionsdialog.cpp" line="+47"/> <source>default</source> <translation>Initial</translation> </message> <message> <location line="+147"/> <location line="+9"/> <source>Warning</source> <translation>Avertizare</translation> </message> <message> <location line="-9"/> <location line="+9"/> <source>This setting will take effect after restarting Vsync.</source> <translation>Aceasta setare va avea efect dupa repornirea Vsync.</translation> </message> <message> <location line="+29"/> <source>The supplied proxy address is invalid.</source> <translation>Adresa bitcoin pe care a-ti specificat-o este invalida</translation> </message> </context> <context> <name>OverviewPage</name> <message> <location filename="../forms/overviewpage.ui" line="+14"/> <source>Form</source> <translation>Form</translation> </message> <message> <location line="+46"/> <location line="+247"/> <source>The displayed information may be out of date. Your wallet automatically synchronizes with the Vsync network after a connection is established, but this process has not completed yet.</source> <translation>Informatia afisata poate fi depasita. Portofel se sincronizează automat cu rețeaua Vsync după ce se stabilește o conexiune, dar acest proces nu s-a finalizat încă.</translation> </message> <message> <location line="-173"/> <source>Stake:</source> <translation>Stake:</translation> </message> <message> <location line="+32"/> <source>Unconfirmed:</source> <translation>Neconfirmat:</translation> </message> <message> <location line="-113"/> <source>Wallet</source> <translation>Portofel</translation> </message> <message> <location line="+49"/> <source>Spendable:</source> <translation>Cheltuibil:</translation> </message> <message> <location line="+16"/> <source>Your current spendable balance</source> <translation>Balanța ta curentă de cheltuieli</translation> </message> <message> <location line="+80"/> <source>Immature:</source> <translation>Nematurizat:</translation> </message> <message> <location line="+13"/> <source>Mined balance that has not yet matured</source> <translation>Balanta minata care nu s-a maturizat inca</translation> </message> <message> <location line="+23"/> <source>Total:</source> <translation>Total:</translation> </message> <message> <location line="+16"/> <source>Your current total balance</source> <translation>Balanța totală curentă</translation> </message> <message> <location line="+50"/> <source>&lt;b&gt;Recent transactions&lt;/b&gt;</source> <translation>&lt;b&gt;Tranzacții recente&lt;/b&gt;</translation> </message> <message> <location line="-118"/> <source>Total of transactions that have yet to be confirmed, and do not yet count toward the current balance</source> <translation>Total al tranzacțiilor care nu au fost confirmate încă și nu contează față de balanța curentă</translation> </message> <message> <location line="-32"/> <source>Total of coins that was staked, and do not yet count toward the current balance</source> <translation>Totalul de monede care au fost in stake si nu sunt numarate in balanta curenta</translation> </message> <message> <location filename="../overviewpage.cpp" line="+116"/> <location line="+1"/> <source>out of sync</source> <translation>Nu este sincronizat</translation> </message> </context> <context> <name>PaymentServer</name> <message> <location filename="../paymentserver.cpp" line="+107"/> <source>Cannot start vsync: click-to-pay handler</source> <translation type="unfinished"/> </message> </context> <context> <name>QRCodeDialog</name> <message> <location filename="../forms/qrcodedialog.ui" line="+14"/> <source>QR Code Dialog</source> <translation>Dialog cod QR</translation> </message> <message> <location line="+59"/> <source>Request Payment</source> <translation>Cerere de plată</translation> </message> <message> <location line="+56"/> <source>Amount:</source> <translation>Cantitate:</translation> </message> <message> <location line="-44"/> <source>Label:</source> <translation>Etichetă</translation> </message> <message> <location line="+19"/> <source>Message:</source> <translation>Mesaj:</translation> </message> <message> <location line="+71"/> <source>&amp;Save As...</source> <translation>&amp;Salvează ca...</translation> </message> <message> <location filename="../qrcodedialog.cpp" line="+62"/> <source>Error encoding URI into QR Code.</source> <translation>Eroare la codarea URl-ului în cod QR.</translation> </message> <message> <location line="+40"/> <source>The entered amount is invalid, please check.</source> <translation>Suma introdusă nu este validă, vă rugăm să verificați.</translation> </message> <message> <location line="+23"/> <source>Resulting URI too long, try to reduce the text for label / message.</source> <translation>URI rezultat este prea lung, încearcă să reduci textul pentru etichetă / mesaj.</translation> </message> <message> <location line="+25"/> <source>Save QR Code</source> <translation>Salvează codul QR</translation> </message> <message> <location line="+0"/> <source>PNG Images (*.png)</source> <translation>Imagini PNG(*png)</translation> </message> </context> <context> <name>RPCConsole</name> <message> <location filename="../forms/rpcconsole.ui" line="+46"/> <source>Client name</source> <translation>Nume client</translation> </message> <message> <location line="+10"/> <location line="+23"/> <location line="+26"/> <location line="+23"/> <location line="+23"/> <location line="+36"/> <location line="+53"/> <location line="+23"/> <source>N/A</source> <translation>N/A</translation> </message> <message> <location line="-194"/> <source>Client version</source> <translation>Versiune client</translation> </message> <message> <location line="-45"/> <source>&amp;Information</source> <translation>&amp;Informație</translation> </message> <message> <location line="+68"/> <source>Using OpenSSL version</source> <translation>Foloseste versiunea OpenSSL</translation> </message> <message> <location line="+49"/> <source>Startup time</source> <translation>Durata pornirii</translation> </message> <message> <location line="+29"/> <source>Network</source> <translation>Rețea</translation> </message> <message> <location line="+7"/> <source>Number of connections</source> <translation>Numărul de conexiuni</translation> </message> <message> <location line="+23"/> <source>On testnet</source> <translation>Pe testnet</translation> </message> <message> <location line="+23"/> <source>Block chain</source> <translation>Lanț de blocuri</translation> </message> <message> <location line="+7"/> <source>Current number of blocks</source> <translation>Numărul curent de blocuri</translation> </message> <message> <location line="+197"/> <source>&amp;Network Traffic</source> <translation type="unfinished"/> </message> <message> <location line="+52"/> <source>&amp;Clear</source> <translation type="unfinished"/> </message> <message> <location line="+13"/> <source>Totals</source> <translation type="unfinished"/> </message> <message> <location line="+64"/> <location filename="../rpcconsole.cpp" line="+352"/> <source>In:</source> <translation type="unfinished"/> </message> <message> <location line="+80"/> <location filename="../rpcconsole.cpp" line="+1"/> <source>Out:</source> <translation type="unfinished"/> </message> <message> <location line="-383"/> <source>Last block time</source> <translation>Data ultimului bloc</translation> </message> <message> <location line="+52"/> <source>&amp;Open</source> <translation>&amp;Deschide</translation> </message> <message> <location line="+16"/> <source>Command-line options</source> <translation>Optiuni linii de comandă</translation> </message> <message> <location line="+7"/> <source>Show the Vsync-Qt help message to get a list with possible Vsync command-line options.</source> <translation>Afișa mesajul de ajutor Vsync-Qt pentru a obține o listă cu posibile opțiuni de linie de comandă Vsync.</translation> </message> <message> <location line="+3"/> <source>&amp;Show</source> <translation>&amp;Arată</translation> </message> <message> <location line="+24"/> <source>&amp;Console</source> <translation>&amp;Consolă</translation> </message> <message> <location line="-237"/> <source>Build date</source> <translation>Construit la data</translation> </message> <message> <location line="-104"/> <source>Vsync - Debug window</source> <translation>Vsync - fereastră depanare</translation> </message> <message> <location line="+25"/> <source>Vsync Core</source> <translation>Vsync Core</translation> </message> <message> <location line="+256"/> <source>Debug log file</source> <translation>Loguri debug</translation> </message> <message> <location line="+7"/> <source>Open the Vsync debug log file from the current data directory. This can take a few seconds for large log files.</source> <translation>Deschideti fisierul de depanare Vsync din folderul curent. Acest lucru poate dura cateva secunde pentru fisiere de log mari.</translation> </message> <message> <location line="+102"/> <source>Clear console</source> <translation>Curăță consola</translation> </message> <message> <location filename="../rpcconsole.cpp" line="-28"/> <source>Welcome to the Vsync RPC console.</source> <translation>Bine ati venit la consola Vsync RPC.</translation> </message> <message> <location line="+1"/> <source>Use up and down arrows to navigate history, and &lt;b&gt;Ctrl-L&lt;/b&gt; to clear screen.</source> <translation>Foloseste sagetile sus si jos pentru a naviga in istoric si &lt;b&gt;Ctrl-L&lt;/b&gt; pentru a curata.</translation> </message> <message> <location line="+1"/> <source>Type &lt;b&gt;help&lt;/b&gt; for an overview of available commands.</source> <translation>Scrie &lt;b&gt;help&lt;/b&gt; pentru a vedea comenzile disponibile</translation> </message> <message> <location line="+134"/> <source>%1 B</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>%1 KB</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>%1 MB</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>%1 GB</source> <translation type="unfinished"/> </message> <message> <location line="+7"/> <source>%1 m</source> <translation type="unfinished"/> </message> <message> <location line="+5"/> <source>%1 h</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>%1 h %2 m</source> <translation type="unfinished"/> </message> </context> <context> <name>SendCoinsDialog</name> <message> <location filename="../forms/sendcoinsdialog.ui" line="+14"/> <location filename="../sendcoinsdialog.cpp" line="+179"/> <location line="+5"/> <location line="+5"/> <location line="+5"/> <location line="+6"/> <location line="+5"/> <location line="+5"/> <source>Send Coins</source> <translation>Trimite monede</translation> </message> <message> <location line="+76"/> <source>Coin Control Features</source> <translation>Caracteristici control ale monedei</translation> </message> <message> <location line="+20"/> <source>Inputs...</source> <translation>Intrări</translation> </message> <message> <location line="+7"/> <source>automatically selected</source> <translation>Selectie automatică</translation> </message> <message> <location line="+19"/> <source>Insufficient funds!</source> <translation>Fonduri insuficiente!</translation> </message> <message> <location line="+77"/> <source>Quantity:</source> <translation>Cantitate:</translation> </message> <message> <location line="+22"/> <location line="+35"/> <source>0</source> <translation>0</translation> </message> <message> <location line="-19"/> <source>Bytes:</source> <translation>Octeţi:</translation> </message> <message> <location line="+51"/> <source>Amount:</source> <translation>Sumă:</translation> </message> <message> <location line="+54"/> <source>Fee:</source> <translation>Taxa:</translation> </message> <message> <location line="+35"/> <source>Low Output:</source> <translation>Ieşire minimă: </translation> </message> <message> <location line="+19"/> <source>no</source> <translation>nu</translation> </message> <message> <location line="+32"/> <source>After Fee:</source> <translation>După taxe:</translation> </message> <message> <location line="+35"/> <source>Change</source> <translation>Schimbă:</translation> </message> <message> <location line="+50"/> <source>custom change address</source> <translation>personalizează schimbarea adresei</translation> </message> <message> <location line="+106"/> <source>Send to multiple recipients at once</source> <translation>Trimite simultan către mai mulți destinatari</translation> </message> <message> <location line="+3"/> <source>Add &amp;Recipient</source> <translation>&amp;Adaugă destinatar</translation> </message> <message> <location line="+16"/> <source>Remove all transaction fields</source> <translation>Scoateți toate câmpuirile de tranzacții</translation> </message> <message> <location line="+3"/> <source>Clear &amp;All</source> <translation>Șterge &amp;tot</translation> </message> <message> <location line="+24"/> <source>Balance:</source> <translation>Balanță:</translation> </message> <message> <location line="+47"/> <source>Confirm the send action</source> <translation>Confirmă operațiunea de trimitere</translation> </message> <message> <location line="+3"/> <source>S&amp;end</source> <translation>&amp;S Trimite</translation> </message> <message> <location filename="../sendcoinsdialog.cpp" line="-171"/> <source>Enter a Vsync address (e.g. B8gZqgY4r2RoEdqYk3QsAqFckyf9pRHN6i)</source> <translation>Introduceți o adresă Vsync(ex:B8gZqgY4r2RoEdqYk3QsAqFckyf9pRHN6i)</translation> </message> <message> <location line="+15"/> <source>Copy quantity</source> <translation>Copiaţi quantitea</translation> </message> <message> <location line="+1"/> <source>Copy amount</source> <translation>Copiază suma</translation> </message> <message> <location line="+1"/> <source>Copy fee</source> <translation>Copiaţi taxele</translation> </message> <message> <location line="+1"/> <source>Copy after fee</source> <translation>Copiaţi după taxe</translation> </message> <message> <location line="+1"/> <source>Copy bytes</source> <translation>Copiaţi octeţi</translation> </message> <message> <location line="+1"/> <source>Copy low output</source> <translation>Copiaţi ieşire minimă:</translation> </message> <message> <location line="+1"/> <source>Copy change</source> <translation>Copiaţi schimb</translation> </message> <message> <location line="+85"/> <source>&lt;b&gt;%1&lt;/b&gt; to %2 (%3)</source> <translation>&lt;b&gt;%1&lt;/b&gt; to %2 (%3)</translation> </message> <message> <location line="+5"/> <source>Confirm send coins</source> <translation>Confirmă trimiterea de monede</translation> </message> <message> <location line="+1"/> <source>Are you sure you want to send %1?</source> <translation>Sunteți sigur că doriți să trimiteți %1?</translation> </message> <message> <location line="+0"/> <source> and </source> <translation>și</translation> </message> <message> <location line="+29"/> <source>The recipient address is not valid, please recheck.</source> <translation>Adresa destinatarului nu este validă, vă rugăm să o verificaţi.</translation> </message> <message> <location line="+5"/> <source>The amount to pay must be larger than 0.</source> <translation>Suma de plată trebuie să fie mai mare decât 0.</translation> </message> <message> <location line="+5"/> <source>The amount exceeds your balance.</source> <translation>Suma depășește soldul contului.</translation> </message> <message> <location line="+5"/> <source>The total exceeds your balance when the %1 transaction fee is included.</source> <translation>Totalul depășește soldul contului dacă se include și plata comisionului de %1.</translation> </message> <message> <location line="+6"/> <source>Duplicate address found, can only send to each address once per send operation.</source> <translation>S-a descoperit o adresă care figurează de două ori. Expedierea se poate realiza către fiecare adresă doar o singură dată pe operațiune.</translation> </message> <message> <location line="+5"/> <source>Error: Transaction creation failed!</source> <translation type="unfinished"/> </message> <message> <location line="+5"/> <source>Error: The transaction was rejected. This might happen if some of the coins in your wallet were already spent, such as if you used a copy of wallet.dat and coins were spent in the copy but not marked as spent here.</source> <translation>Eroare: tranzacția a fost respinsă. Acest lucru s-ar putea întâmpla în cazul în care unele dintre monedele din portofel au fost deja cheltuite, cum si cum ați utilizat o copie a wallet.dat și monedele au fost cheltuite în copie dar nu au fost marcate ca și cheltuite aici.</translation> </message> <message> <location line="+241"/> <source>WARNING: Invalid Vsync address</source> <translation>Atenție: Adresă Vsync invalidă</translation> </message> <message> <location line="+13"/> <source>(no label)</source> <translation>(fără etichetă)</translation> </message> <message> <location line="+4"/> <source>WARNING: unknown change address</source> <translation>ATENTIE: adresa schimb necunoscuta</translation> </message> </context> <context> <name>SendCoinsEntry</name> <message> <location filename="../forms/sendcoinsentry.ui" line="+14"/> <source>Form</source> <translation>Formular</translation> </message> <message> <location line="+15"/> <source>A&amp;mount:</source> <translation>Su&amp;mă:</translation> </message> <message> <location line="+13"/> <source>Pay &amp;To:</source> <translation>Plătește că&amp;tre:</translation> </message> <message> <location line="+34"/> <source>The address to send the payment to (e.g. B8gZqgY4r2RoEdqYk3QsAqFckyf9pRHN6i)</source> <translation type="unfinished"/> </message> <message> <location line="+60"/> <location filename="../sendcoinsentry.cpp" line="+26"/> <source>Enter a label for this address to add it to your address book</source> <translation>Introdu o etichetă pentru această adresă pentru a fi adăugată în lista ta de adrese</translation> </message> <message> <location line="-78"/> <source>&amp;Label:</source> <translation>&amp;Etichetă:</translation> </message> <message> <location line="+28"/> <source>Choose address from address book</source> <translation>Alegeti adresa din agenda</translation> </message> <message> <location line="+10"/> <source>Alt+A</source> <translation>Alt+A</translation> </message> <message> <location line="+7"/> <source>Paste address from clipboard</source> <translation>Lipește adresa din clipboard</translation> </message> <message> <location line="+10"/> <source>Alt+P</source> <translation>Alt+P</translation> </message> <message> <location line="+7"/> <source>Remove this recipient</source> <translation>Scoateti acest destinatar</translation> </message> <message> <location filename="../sendcoinsentry.cpp" line="+1"/> <source>Enter a Vsync address (e.g. B8gZqgY4r2RoEdqYk3QsAqFckyf9pRHN6i)</source> <translation>Introduceți o adresă Vsync(ex:B8gZqgY4r2RoEdqYk3QsAqFckyf9pRHN6i)</translation> </message> </context> <context> <name>SignVerifyMessageDialog</name> <message> <location filename="../forms/signverifymessagedialog.ui" line="+14"/> <source>Signatures - Sign / Verify a Message</source> <translation>Semnatura- Semneaza/verifica un mesaj</translation> </message> <message> <location line="+13"/> <location line="+124"/> <source>&amp;Sign Message</source> <translation>Semneaza Mesajul</translation> </message> <message> <location line="-118"/> <source>You can sign messages with your addresses to prove you own them. Be careful not to sign anything vague, as phishing attacks may try to trick you into signing your identity over to them. Only sign fully-detailed statements you agree to.</source> <translation>Puteti semna mesaje cu adresa dumneavoastra pentru a demostra ca sunteti proprietarul lor. Aveti grija sa nu semnati nimic vag, deoarece atacurile de tip phishing va pot pacali sa le transferati identitatea. Semnati numai declaratiile detaliate cu care sunteti deacord.</translation> </message> <message> <location line="+18"/> <source>The address to sign the message with (e.g. B8gZqgY4r2RoEdqYk3QsAqFckyf9pRHN6i)</source> <translation>Adresa cu care semnati mesajul(ex. B8gZqgY4r2RoEdqYk3QsAqFckyf9pRHN6i)</translation> </message> <message> <location line="+10"/> <location line="+203"/> <source>Choose an address from the address book</source> <translation>Alegeti o adresa din agenda</translation> </message> <message> <location line="-193"/> <location line="+203"/> <source>Alt+A</source> <translation>Alt+A</translation> </message> <message> <location line="-193"/> <source>Paste address from clipboard</source> <translation>Lipiţi adresa copiată in clipboard.</translation> </message> <message> <location line="+10"/> <source>Alt+P</source> <translation>Alt+P</translation> </message> <message> <location line="+12"/> <source>Enter the message you want to sign here</source> <translation>Introduce mesajul pe care vrei sa il semnezi, aici.</translation> </message> <message> <location line="+24"/> <source>Copy the current signature to the system clipboard</source> <translation>Copiaza semnatura curenta in clipboard-ul sistemului</translation> </message> <message> <location line="+21"/> <source>Sign the message to prove you own this Vsync address</source> <translation>Semnează un mesaj pentru a dovedi că dețineti o adresă Vsync</translation> </message> <message> <location line="+17"/> <source>Reset all sign message fields</source> <translation>Reseteaza toate spatiile mesajelor semnate.</translation> </message> <message> <location line="+3"/> <location line="+146"/> <source>Clear &amp;All</source> <translation>Şterge &amp;tot</translation> </message> <message> <location line="-87"/> <location line="+70"/> <source>&amp;Verify Message</source> <translation>Verifica mesajul</translation> </message> <message> <location line="-64"/> <source>Enter the signing address, message (ensure you copy line breaks, spaces, tabs, etc. exactly) and signature below to verify the message. Be careful not to read more into the signature than what is in the signed message itself, to avoid being tricked by a man-in-the-middle attack.</source> <translation>Introduceti adresa de semnatura, mesajul (asigurati-va ca ati copiat spatiile, taburile etc. exact) si semnatura dedesubt pentru a verifica mesajul. Aveti grija sa nu cititi mai mult in semnatura decat mesajul in sine, pentru a evita sa fiti pacaliti de un atac de tip man-in-the-middle.</translation> </message> <message> <location line="+21"/> <source>The address the message was signed with (e.g. B8gZqgY4r2RoEdqYk3QsAqFckyf9pRHN6i)</source> <translation>Adresa cu care a fost semnat mesajul(ex. B8gZqgY4r2RoEdqYk3QsAqFckyf9pRHN6i)</translation> </message> <message> <location line="+40"/> <source>Verify the message to ensure it was signed with the specified Vsync address</source> <translation>Verifică un mesaj pentru a vă asigura că a fost semnat cu o anumită adresă Vsync</translation> </message> <message> <location line="+17"/> <source>Reset all verify message fields</source> <translation>Reseteaza toate spatiile mesajelor semnate.</translation> </message> <message> <location filename="../signverifymessagedialog.cpp" line="+27"/> <location line="+3"/> <source>Enter a Vsync address (e.g. B8gZqgY4r2RoEdqYk3QsAqFckyf9pRHN6i)</source> <translation>Introduceți o adresă Vsync(ex:B8gZqgY4r2RoEdqYk3QsAqFckyf9pRHN6i)</translation> </message> <message> <location line="-2"/> <source>Click &quot;Sign Message&quot; to generate signature</source> <translation>Click &quot;Semneaza msajul&quot; pentru a genera semnatura</translation> </message> <message> <location line="+3"/> <source>Enter Vsync signature</source> <translation>Introduceti semnatura Vsync</translation> </message> <message> <location line="+85"/> <location line="+81"/> <source>The entered address is invalid.</source> <translation>Adresa introdusa nu este valida</translation> </message> <message> <location line="-81"/> <location line="+8"/> <location line="+73"/> <location line="+8"/> <source>Please check the address and try again.</source> <translation>Te rugam verifica adresa si introduce-o din nou</translation> </message> <message> <location line="-81"/> <location line="+81"/> <source>The entered address does not refer to a key.</source> <translation>Adresa introdusa nu se refera la o cheie.</translation> </message> <message> <location line="-73"/> <source>Wallet unlock was cancelled.</source> <translation>Blocarea portofelului a fost intrerupta</translation> </message> <message> <location line="+8"/> <source>Private key for the entered address is not available.</source> <translation>Cheia privata pentru adresa introdusa nu este valida.</translation> </message> <message> <location line="+12"/> <source>Message signing failed.</source> <translation>Semnarea mesajului a esuat</translation> </message> <message> <location line="+5"/> <source>Message signed.</source> <translation>Mesaj Semnat!</translation> </message> <message> <location line="+59"/> <source>The signature could not be decoded.</source> <translation>Aceasta semnatura nu a putut fi decodata</translation> </message> <message> <location line="+0"/> <location line="+13"/> <source>Please check the signature and try again.</source> <translation>Verifica semnatura si incearca din nou</translation> </message> <message> <location line="+0"/> <source>The signature did not match the message digest.</source> <translation>Semnatura nu seamana!</translation> </message> <message> <location line="+7"/> <source>Message verification failed.</source> <translation>Verificarea mesajului a esuat</translation> </message> <message> <location line="+5"/> <source>Message verified.</source> <translation>Mesaj verificat</translation> </message> </context> <context> <name>TrafficGraphWidget</name> <message> <location filename="../trafficgraphwidget.cpp" line="+75"/> <source>KB/s</source> <translation type="unfinished"/> </message> </context> <context> <name>TransactionDesc</name> <message> <location filename="../transactiondesc.cpp" line="+25"/> <source>Open until %1</source> <translation>Deschis până la %1</translation> </message> <message> <location line="+6"/> <source>conflicted</source> <translation>conflictual</translation> </message> <message> <location line="+2"/> <source>%1/offline</source> <translation>%1/deconectat</translation> </message> <message> <location line="+2"/> <source>%1/unconfirmed</source> <translation>%1/neconfirmat</translation> </message> <message> <location line="+2"/> <source>%1 confirmations</source> <translation>%1 confirmări</translation> </message> <message> <location line="+17"/> <source>Status</source> <translation>Stare</translation> </message> <message numerus="yes"> <location line="+7"/> <source>, broadcast through %n node(s)</source> <translation><numerusform>, distribuit prin %n nod</numerusform><numerusform>, distribuit prin %n noduri</numerusform><numerusform>, distribuit prin %n de noduri</numerusform></translation> </message> <message> <location line="+4"/> <source>Date</source> <translation>Data</translation> </message> <message> <location line="+7"/> <source>Source</source> <translation>Sursa</translation> </message> <message> <location line="+0"/> <source>Generated</source> <translation>Generat</translation> </message> <message> <location line="+5"/> <location line="+13"/> <source>From</source> <translation>De la</translation> </message> <message> <location line="+1"/> <location line="+19"/> <location line="+58"/> <source>To</source> <translation>Către</translation> </message> <message> <location line="-74"/> <location line="+2"/> <source>own address</source> <translation>Adresa posedata</translation> </message> <message> <location line="-2"/> <source>label</source> <translation>etichetă</translation> </message> <message> <location line="+34"/> <location line="+12"/> <location line="+45"/> <location line="+17"/> <location line="+30"/> <source>Credit</source> <translation>Credit</translation> </message> <message numerus="yes"> <location line="-102"/> <source>matures in %n more block(s)</source> <translation><numerusform>se maturizează în încă %n bloc</numerusform><numerusform>se maturizează în încă %n blocuri</numerusform><numerusform>se maturizează în încă %n de blocuri</numerusform></translation> </message> <message> <location line="+2"/> <source>not accepted</source> <translation>nu este acceptat</translation> </message> <message> <location line="+44"/> <location line="+8"/> <location line="+15"/> <location line="+30"/> <source>Debit</source> <translation>Debit</translation> </message> <message> <location line="-39"/> <source>Transaction fee</source> <translation>Comisionul tranzacţiei</translation> </message> <message> <location line="+16"/> <source>Net amount</source> <translation>Suma netă</translation> </message> <message> <location line="+6"/> <source>Message</source> <translation>Mesaj</translation> </message> <message> <location line="+2"/> <source>Comment</source> <translation>Comentarii</translation> </message> <message> <location line="+2"/> <source>Transaction ID</source> <translation>ID-ul tranzactiei</translation> </message> <message> <location line="+3"/> <source>Generated coins must mature 510 blocks before they can be spent. When you generated this block, it was broadcast to the network to be added to the block chain. If it fails to get into the chain, its state will change to &quot;not accepted&quot; and it won&apos;t be spendable. This may occasionally happen if another node generates a block within a few seconds of yours.</source> <translation>Monedele generate trebuie să se maturizeze 510 blocuri înainte de a fi cheltuite. Când ați generat acest bloc, a fost trimis la rețea pentru a fi adăugat la lanțul de blocuri. În cazul în care nu reușește să intre în lanț, starea sa se ​​va schimba in &quot;nu a fost acceptat&quot;, și nu va putea fi cheltuit. Acest lucru se poate întâmpla din când în când, dacă un alt nod generează un bloc cu câteva secunde inaintea blocului tau.</translation> </message> <message> <location line="+7"/> <source>Debug information</source> <translation>Informatii pentru debug</translation> </message> <message> <location line="+8"/> <source>Transaction</source> <translation>Tranzacţie</translation> </message> <message> <location line="+5"/> <source>Inputs</source> <translation>Intrari</translation> </message> <message> <location line="+21"/> <source>Amount</source> <translation>Sumă</translation> </message> <message> <location line="+1"/> <source>true</source> <translation>Adevarat!</translation> </message> <message> <location line="+0"/> <source>false</source> <translation>Fals!</translation> </message> <message> <location line="-202"/> <source>, has not been successfully broadcast yet</source> <translation>, nu s-a propagat încă</translation> </message> <message numerus="yes"> <location line="-36"/> <source>Open for %n more block(s)</source> <translation type="unfinished"><numerusform></numerusform><numerusform></numerusform><numerusform></numerusform></translation> </message> <message> <location line="+67"/> <source>unknown</source> <translation>necunoscut</translation> </message> </context> <context> <name>TransactionDescDialog</name> <message> <location filename="../forms/transactiondescdialog.ui" line="+14"/> <source>Transaction details</source> <translation>Detaliile tranzacției</translation> </message> <message> <location line="+6"/> <source>This pane shows a detailed description of the transaction</source> <translation>Acest panou afișează o descriere detaliată a tranzacției</translation> </message> </context> <context> <name>TransactionTableModel</name> <message> <location filename="../transactiontablemodel.cpp" line="+231"/> <source>Date</source> <translation>Data</translation> </message> <message> <location line="+0"/> <source>Type</source> <translation>Tipul</translation> </message> <message> <location line="+0"/> <source>Address</source> <translation>Adresa</translation> </message> <message> <location line="+0"/> <source>Amount</source> <translation>Cantitate</translation> </message> <message> <location line="+52"/> <source>Open until %1</source> <translation>Deschis până la %1</translation> </message> <message> <location line="+12"/> <source>Confirmed (%1 confirmations)</source> <translation>Confirmat (%1 confirmări)</translation> </message> <message numerus="yes"> <location line="-15"/> <source>Open for %n more block(s)</source> <translation><numerusform>Deschis pentru încă %1 bloc</numerusform><numerusform>Deschis pentru încă %1 blocuri</numerusform><numerusform>Deschis pentru încă %1 de blocuri</numerusform></translation> </message> <message> <location line="+6"/> <source>Offline</source> <translation>Deconectat</translation> </message> <message> <location line="+3"/> <source>Unconfirmed</source> <translation>Neconfirmat</translation> </message> <message> <location line="+3"/> <source>Confirming (%1 of %2 recommended confirmations)</source> <translation>Confirmare (%1 dintre %2 confirmări recomandate)</translation> </message> <message> <location line="+6"/> <source>Conflicted</source> <translation>Conflictual</translation> </message> <message> <location line="+3"/> <source>Immature (%1 confirmations, will be available after %2)</source> <translation>Nematurate(%1 confirmari, vor fi valabile dupa %2)</translation> </message> <message> <location line="+3"/> <source>This block was not received by any other nodes and will probably not be accepted!</source> <translation>Acest bloc nu a fost recepționat de niciun alt nod și probabil nu va fi acceptat!</translation> </message> <message> <location line="+3"/> <source>Generated but not accepted</source> <translation>Generat dar neacceptat</translation> </message> <message> <location line="+42"/> <source>Received with</source> <translation>Recepționat cu</translation> </message> <message> <location line="+2"/> <source>Received from</source> <translation>Primit de la</translation> </message> <message> <location line="+3"/> <source>Sent to</source> <translation>Trimis către</translation> </message> <message> <location line="+2"/> <source>Payment to yourself</source> <translation>Plată către tine</translation> </message> <message> <location line="+2"/> <source>Mined</source> <translation>Produs</translation> </message> <message> <location line="+38"/> <source>(n/a)</source> <translation>(n/a)</translation> </message> <message> <location line="+194"/> <source>Transaction status. Hover over this field to show number of confirmations.</source> <translation>Starea tranzacției. Treci cu mausul peste acest câmp pentru afișarea numărului de confirmări.</translation> </message> <message> <location line="+2"/> <source>Date and time that the transaction was received.</source> <translation>Data și ora la care a fost recepționată tranzacția.</translation> </message> <message> <location line="+2"/> <source>Type of transaction.</source> <translation>Tipul tranzacției.</translation> </message> <message> <location line="+2"/> <source>Destination address of transaction.</source> <translation>Adresa de destinație a tranzacției.</translation> </message> <message> <location line="+2"/> <source>Amount removed from or added to balance.</source> <translation>Suma extrasă sau adăugată la sold.</translation> </message> </context> <context> <name>TransactionView</name> <message> <location filename="../transactionview.cpp" line="+54"/> <location line="+17"/> <source>All</source> <translation>Toate</translation> </message> <message> <location line="-16"/> <source>Today</source> <translation>Astăzi</translation> </message> <message> <location line="+1"/> <source>This week</source> <translation>Săptămâna aceasta</translation> </message> <message> <location line="+1"/> <source>This month</source> <translation>Luna aceasta</translation> </message> <message> <location line="+1"/> <source>Last month</source> <translation>Luna trecută</translation> </message> <message> <location line="+1"/> <source>This year</source> <translation>Anul acesta</translation> </message> <message> <location line="+1"/> <source>Range...</source> <translation>Între...</translation> </message> <message> <location line="+12"/> <source>Received with</source> <translation>Recepționat cu</translation> </message> <message> <location line="+2"/> <source>Sent to</source> <translation>Trimis către</translation> </message> <message> <location line="+2"/> <source>To yourself</source> <translation>Către tine</translation> </message> <message> <location line="+1"/> <source>Mined</source> <translation>Produs</translation> </message> <message> <location line="+1"/> <source>Other</source> <translation>Altele</translation> </message> <message> <location line="+7"/> <source>Enter address or label to search</source> <translation>Introdu adresa sau eticheta pentru căutare</translation> </message> <message> <location line="+7"/> <source>Min amount</source> <translation>Cantitatea minimă</translation> </message> <message> <location line="+34"/> <source>Copy address</source> <translation>Copiază adresa</translation> </message> <message> <location line="+1"/> <source>Copy label</source> <translation>Copiază eticheta</translation> </message> <message> <location line="+1"/> <source>Copy amount</source> <translation>Copiază suma</translation> </message> <message> <location line="+1"/> <source>Copy transaction ID</source> <translation>Copiază ID tranzacție</translation> </message> <message> <location line="+1"/> <source>Edit label</source> <translation>Editează eticheta</translation> </message> <message> <location line="+1"/> <source>Show transaction details</source> <translation>Arată detaliile tranzacției</translation> </message> <message> <location line="+138"/> <source>Export Transaction Data</source> <translation>Exporta datele trazactiei</translation> </message> <message> <location line="+1"/> <source>Comma separated file (*.csv)</source> <translation>Fișier text cu valori separate prin virgulă (*.csv)</translation> </message> <message> <location line="+8"/> <source>Confirmed</source> <translation>Confirmat</translation> </message> <message> <location line="+1"/> <source>Date</source> <translation>Data</translation> </message> <message> <location line="+1"/> <source>Type</source> <translation>Tipul</translation> </message> <message> <location line="+1"/> <source>Label</source> <translation>Etichetă</translation> </message> <message> <location line="+1"/> <source>Address</source> <translation>Adresă</translation> </message> <message> <location line="+1"/> <source>Amount</source> <translation>Sumă</translation> </message> <message> <location line="+1"/> <source>ID</source> <translation>ID</translation> </message> <message> <location line="+4"/> <source>Error exporting</source> <translation>Eroare la exportare</translation> </message> <message> <location line="+0"/> <source>Could not write to file %1.</source> <translation>Nu s-a putut scrie în fișier %1.</translation> </message> <message> <location line="+100"/> <source>Range:</source> <translation>Interval:</translation> </message> <message> <location line="+8"/> <source>to</source> <translation>către</translation> </message> </context> <context> <name>WalletModel</name> <message> <location filename="../walletmodel.cpp" line="+212"/> <source>Sending...</source> <translation>Se trimite...</translation> </message> </context> <context> <name>bitcoin-core</name> <message> <location filename="../bitcoinstrings.cpp" line="+8"/> <source>Vsync version</source> <translation>Versiune Vsync</translation> </message> <message> <location line="+1"/> <source>Usage:</source> <translation>Uz:</translation> </message> <message> <location line="+1"/> <source>Send command to -server or vsyncd</source> <translation>Trimite comanda catre server sau vsyncd</translation> </message> <message> <location line="+1"/> <source>List commands</source> <translation>Listă de comenzi</translation> </message> <message> <location line="+1"/> <source>Get help for a command</source> <translation>Ajutor pentru o comandă</translation> </message> <message> <location line="+1"/> <source>Options:</source> <translation>Setări:</translation> </message> <message> <location line="+2"/> <source>Specify configuration file (default: vsync.conf)</source> <translation>Specifica fisier de configurare(implicit: vsync.conf)</translation> </message> <message> <location line="+1"/> <source>Specify pid file (default: vsyncd.pid)</source> <translation>Speficica fisier pid(implicit: vsync.pid)</translation> </message> <message> <location line="+2"/> <source>Specify wallet file (within data directory)</source> <translation>Specifică fișierul wallet (în dosarul de date)</translation> </message> <message> <location line="-1"/> <source>Specify data directory</source> <translation>Specifică dosarul de date</translation> </message> <message> <location line="+163"/> <source>%s, you must set a rpcpassword in the configuration file: %s It is recommended you use the following random password: rpcuser=vsyncrpc rpcpassword=%s (you do not need to remember this password) The username and password MUST NOT be the same. If the file does not exist, create it with owner-readable-only file permissions. It is also recommended to set alertnotify so you are notified of problems; for example: alertnotify=echo %%s | mail -s &quot;Vsync Alert&quot; [email protected] </source> <translation type="unfinished"/> </message> <message> <location line="-161"/> <source>Set database cache size in megabytes (default: 25)</source> <translation>Setează mărimea cache a bazei de date în megabiți (implicit: 25)</translation> </message> <message> <location line="+1"/> <source>Set database disk log size in megabytes (default: 100)</source> <translation>Setează mărimea cache a bazei de date în megabiți (implicit: 100)</translation> </message> <message> <location line="+5"/> <source>Listen for connections on &lt;port&gt; (default: 18154 or testnet: 25714)</source> <translation>Ascultă pentru conectări pe &lt;port&gt; (implicit: 18154 sau testnet: 25714) </translation> </message> <message> <location line="+1"/> <source>Maintain at most &lt;n&gt; connections to peers (default: 125)</source> <translation>Menține cel mult &lt;n&gt; conexiuni cu partenerii (implicit: 125)</translation> </message> <message> <location line="+3"/> <source>Connect to a node to retrieve peer addresses, and disconnect</source> <translation>Conectează-te la nod pentru a obține adresele partenerilor, și apoi deconectează-te</translation> </message> <message> <location line="+1"/> <source>Specify your own public address</source> <translation>Specifică adresa ta publică</translation> </message> <message> <location line="+4"/> <source>Bind to given address. Use [host]:port notation for IPv6</source> <translation>Leaga la o adresa data. Utilizeaza notatie [host]:port pt IPv6</translation> </message> <message> <location line="+1"/> <source>Query for peer addresses via DNS lookup, if low on addresses (default: 1 unless -connect)</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Always query for peer addresses via DNS lookup (default: 0)</source> <translation type="unfinished"/> </message> <message> <location line="+4"/> <source>Threshold for disconnecting misbehaving peers (default: 100)</source> <translation>Prag pentru deconectarea partenerilor care nu funcționează corect (implicit: 100)</translation> </message> <message> <location line="+1"/> <source>Number of seconds to keep misbehaving peers from reconnecting (default: 86400)</source> <translation>Numărul de secunde pentru a preveni reconectarea partenerilor care nu funcționează corect (implicit: 86400)</translation> </message> <message> <location line="+153"/> <source>An error occurred while setting up the RPC port %u for listening on IPv4: %s</source> <translation>A intervenit o eroare in timp ce se seta portul RPC %u pentru ascultare pe IPv4: %s</translation> </message> <message> <location line="-126"/> <source>Listen for JSON-RPC connections on &lt;port&gt; (default: 18155 or testnet: 25715)</source> <translation>Ascultă pentru conexiuni JSON-RPC pe &lt;port&gt; (implicit:18155 sau testnet: 25715)</translation> </message> <message> <location line="-16"/> <source>Accept command line and JSON-RPC commands</source> <translation>Se acceptă comenzi din linia de comandă și comenzi JSON-RPC</translation> </message> <message> <location line="+1"/> <source>Run in the background as a daemon and accept commands</source> <translation>Rulează în fundal ca un demon și acceptă comenzi</translation> </message> <message> <location line="+1"/> <source>Use the test network</source> <translation>Utilizează rețeaua de test</translation> </message> <message> <location line="-23"/> <source>Accept connections from outside (default: 1 if no -proxy or -connect)</source> <translation>Acceptă conexiuni din afară (implicit: 1 dacă nu se folosește -proxy sau -connect)</translation> </message> <message> <location line="+160"/> <source>An error occurred while setting up the RPC port %u for listening on IPv6, falling back to IPv4: %s</source> <translation>A intervenit o eroare in timp ce se seta portul RPC %u pentru ascultare pe IPv6, reintoarcere la IPv4: %s</translation> </message> <message> <location line="-84"/> <source>Warning: -paytxfee is set very high! This is the transaction fee you will pay if you send a transaction.</source> <translation>Atentie: setarea -paytxfee este foarte ridicata! Aceasta este taxa tranzactiei pe care o vei plati daca trimiti o tranzactie.</translation> </message> <message> <location line="+46"/> <source>Warning: Please check that your computer&apos;s date and time are correct! If your clock is wrong Vsync will not work properly.</source> <translation>Atentie: Va rugam verificati ca timpul si data calculatorului sunt corete. Daca timpul este gresit Vsync nu va functiona corect.</translation> </message> <message> <location line="-19"/> <source>Warning: error reading wallet.dat! All keys read correctly, but transaction data or address book entries might be missing or incorrect.</source> <translation>Atentie: eroare la citirea fisierului wallet.dat! Toate cheile sunt citite corect, dar datele tranzactiei sau anumite intrari din agenda sunt incorecte sau lipsesc.</translation> </message> <message> <location line="-16"/> <source>Warning: wallet.dat corrupt, data salvaged! Original wallet.dat saved as wallet.{timestamp}.bak in %s; if your balance or transactions are incorrect you should restore from a backup.</source> <translation>Atentie: fisierul wallet.dat este corupt, date salvate! Fisierul original wallet.dat a fost salvat ca wallet.{timestamp}.bak in %s; daca balansul sau tranzactiile sunt incorecte ar trebui sa restaurati dintr-o copie de siguranta. </translation> </message> <message> <location line="-31"/> <source>Attempt to recover private keys from a corrupt wallet.dat</source> <translation>Încearcă recuperarea cheilor private dintr-un wallet.dat corupt</translation> </message> <message> <location line="+5"/> <source>Block creation options:</source> <translation>Optiuni creare block</translation> </message> <message> <location line="-66"/> <source>Connect only to the specified node(s)</source> <translation>Conecteaza-te doar la nod(urile) specifice</translation> </message> <message> <location line="+4"/> <source>Discover own IP address (default: 1 when listening and no -externalip)</source> <translation>Descopera propria ta adresa IP (intial: 1)</translation> </message> <message> <location line="+97"/> <source>Failed to listen on any port. Use -listen=0 if you want this.</source> <translation>Am esuat ascultarea pe orice port. Folositi -listen=0 daca vreti asta.</translation> </message> <message> <location line="-2"/> <source>Invalid -tor address: &apos;%s&apos;</source> <translation>Adresa -tor invalida: &apos;%s&apos;</translation> </message> <message> <location line="+4"/> <source>Invalid amount for -reservebalance=&lt;amount&gt;</source> <translation>Suma invalida pentru -reservebalance=&lt;amount&gt;</translation> </message> <message> <location line="-85"/> <source>Maximum per-connection receive buffer, &lt;n&gt;*1000 bytes (default: 5000)</source> <translation>Tampon maxim pentru recepție per conexiune, &lt;n&gt;*1000 baiți (implicit: 5000)</translation> </message> <message> <location line="+1"/> <source>Maximum per-connection send buffer, &lt;n&gt;*1000 bytes (default: 1000)</source> <translation>Tampon maxim pentru transmitere per conexiune, &lt;n&gt;*1000 baiți (implicit: 1000)</translation> </message> <message> <location line="-16"/> <source>Only connect to nodes in network &lt;net&gt; (IPv4, IPv6 or Tor)</source> <translation>Efectuează conexiuni doar către nodurile din rețeaua &lt;net&gt; (IPv4, IPv6 sau Tor)</translation> </message> <message> <location line="+30"/> <source>Prepend debug output with timestamp</source> <translation>Ataseaza output depanare cu log de timp</translation> </message> <message> <location line="+36"/> <source>SSL options: (see the Bitcoin Wiki for SSL setup instructions)</source> <translation>Optiuni SSl (vezi Bitcoin wiki pentru intructiunile de instalare)</translation> </message> <message> <location line="-34"/> <source>Send trace/debug info to console instead of debug.log file</source> <translation>Trimite informațiile trace/debug la consolă în locul fișierului debug.log</translation> </message> <message> <location line="+33"/> <source>Set maximum block size in bytes (default: 250000)</source> <translation>Setează mărimea maxima a blocului în bytes (implicit: 250000)</translation> </message> <message> <location line="-1"/> <source>Set minimum block size in bytes (default: 0)</source> <translation>Setează mărimea minimă a blocului în baiți (implicit: 0)</translation> </message> <message> <location line="-33"/> <source>Shrink debug.log file on client startup (default: 1 when no -debug)</source> <translation>Micsorati fisierul debug.log la inceperea clientului (implicit: 1 cand nu -debug)</translation> </message> <message> <location line="-41"/> <source>Specify connection timeout in milliseconds (default: 5000)</source> <translation>Specifică intervalul maxim de conectare în milisecunde (implicit: 5000)</translation> </message> <message> <location line="+28"/> <source>Use UPnP to map the listening port (default: 0)</source> <translation>Foloseste UPnP pentru a vedea porturile (initial: 0)</translation> </message> <message> <location line="-1"/> <source>Use UPnP to map the listening port (default: 1 when listening)</source> <translation>Foloseste UPnP pentru a vedea porturile (initial: 1 cand listezi)</translation> </message> <message><|fim▁hole|> <message> <location line="+45"/> <source>Username for JSON-RPC connections</source> <translation>Utilizator pentru conexiunile JSON-RPC</translation> </message> <message> <location line="+50"/> <source>Verifying database integrity...</source> <translation>Se verifica integritatea bazei de date...</translation> </message> <message> <location line="+43"/> <source>Error: Wallet locked, unable to create transaction!</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>Error: This transaction requires a transaction fee of at least %s because of its amount, complexity, or use of recently received funds!</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Error: Transaction creation failed!</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>Error: The transaction was rejected! This might happen if some of the coins in your wallet were already spent, such as if you used a copy of wallet.dat and coins were spent in the copy but not marked as spent here.</source> <translation type="unfinished"/> </message> <message> <location line="-15"/> <source>Warning</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Information</source> <translation type="unfinished"/> </message> <message> <location line="-7"/> <source>Warning: This version is obsolete, upgrade required!</source> <translation>Atenție: această versiune este depășită, este necesară actualizarea!</translation> </message> <message> <location line="-23"/> <source>wallet.dat corrupt, salvage failed</source> <translation>wallet.dat corupt, recuperare eșuată</translation> </message> <message> <location line="-55"/> <source>Password for JSON-RPC connections</source> <translation>Parola pentru conexiunile JSON-RPC</translation> </message> <message> <location line="-47"/> <source>Connect through SOCKS5 proxy</source> <translation type="unfinished"/> </message> <message> <location line="+17"/> <source>Sync time with other nodes. Disable if time on your system is precise e.g. syncing with NTP (default: 1)</source> <translation>Sincronizează timp cu alte noduri. Dezactivează daca timpul de pe sistemul dumneavoastră este precis ex: sincronizare cu NTP (implicit: 1)</translation> </message> <message> <location line="+12"/> <source>When creating transactions, ignore inputs with value less than this (default: 0.01)</source> <translation>Când creați tranzacții, ignorați intrări cu valori mai mici decât aceasta (implicit: 0,01)</translation> </message> <message> <location line="+6"/> <source>Output debugging information (default: 0, supplying &lt;category&gt; is optional)</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>If &lt;category&gt; is not supplied, output all debugging information.</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>&lt;category&gt; can be:</source> <translation type="unfinished"/> </message> <message> <location line="+4"/> <source>Enter regression test mode, which uses a special chain in which blocks can be solved instantly. This is intended for regression testing tools and app development.</source> <translation type="unfinished"/> </message> <message> <location line="+8"/> <source>Allow JSON-RPC connections from specified IP address</source> <translation>Permite conexiuni JSON-RPC de la adresa IP specificată</translation> </message> <message> <location line="+1"/> <source>Send commands to node running on &lt;ip&gt; (default: 127.0.0.1)</source> <translation>Trimite comenzi la nodul care rulează la &lt;ip&gt; (implicit: 127.0.0.1)</translation> </message> <message> <location line="+1"/> <source>Wait for RPC server to start</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Set the number of threads to service RPC calls (default: 4)</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Execute command when the best block changes (%s in cmd is replaced by block hash)</source> <translation>Execută comanda când cel mai bun bloc se modifică (%s în cmd este înlocuit cu hash-ul blocului)</translation> </message> <message> <location line="+3"/> <source>Execute command when a wallet transaction changes (%s in cmd is replaced by TxID)</source> <translation>Executati comanda cand o tranzactie a portofelului se schimba (%s in cmd este inlocuit de TxID)</translation> </message> <message> <location line="+3"/> <source>Require a confirmations for change (default: 0)</source> <translation>Necesita confirmari pentru schimbare (implicit: 0)</translation> </message> <message> <location line="+1"/> <source>Execute command when a relevant alert is received (%s in cmd is replaced by message)</source> <translation>Execută o comandă când o alerta relevantâ este primitâ(%s in cmd este înlocuit de mesaj)</translation> </message> <message> <location line="+3"/> <source>Upgrade wallet to latest format</source> <translation>Actualizează portofelul la ultimul format</translation> </message> <message> <location line="+1"/> <source>Set key pool size to &lt;n&gt; (default: 100)</source> <translation>Setează mărimea bazinului de chei la &lt;n&gt; (implicit: 100)</translation> </message> <message> <location line="+1"/> <source>Rescan the block chain for missing wallet transactions</source> <translation>Rescanează lanțul de bloc pentru tranzacțiile portofel lipsă</translation> </message> <message> <location line="+3"/> <source>How thorough the block verification is (0-6, default: 1)</source> <translation>Cat de temeinica sa fie verificarea blocurilor( 0-6, implicit: 1)</translation> </message> <message> <location line="+1"/> <source>Imports blocks from external blk000?.dat file</source> <translation>Importă blocuri dintr-un fișier extern blk000?.dat</translation> </message> <message> <location line="+1"/> <source>Keep at most &lt;n&gt; MiB of unconnectable blocks in memory (default: %u)</source> <translation type="unfinished"/> </message> <message> <location line="+5"/> <source>Use OpenSSL (https) for JSON-RPC connections</source> <translation>Folosește OpenSSL (https) pentru conexiunile JSON-RPC</translation> </message> <message> <location line="+1"/> <source>Server certificate file (default: server.cert)</source> <translation>Certificatul serverului (implicit: server.cert)</translation> </message> <message> <location line="+1"/> <source>Server private key (default: server.pem)</source> <translation>Cheia privată a serverului (implicit: server.pem)</translation> </message> <message> <location line="+5"/> <source>Error: Unsupported argument -socks found. Setting SOCKS version isn&apos;t possible anymore, only SOCKS5 proxies are supported.</source> <translation type="unfinished"/> </message> <message> <location line="+8"/> <source>Initialization sanity check failed. Vsync is shutting down.</source> <translation type="unfinished"/> </message> <message> <location line="+20"/> <source>Error loading block database</source> <translation type="unfinished"/> </message> <message> <location line="+29"/> <source>Error: Wallet unlocked for staking only, unable to create transaction.</source> <translation>Eroare: portofel blocat doar pentru staking, tranzactia nu s-a creat.</translation> </message> <message> <location line="-14"/> <source>Error: Disk space is low!</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>This is a pre-release test build - use at your own risk - do not use for mining or merchant applications</source> <translation type="unfinished"/> </message> <message> <location line="-135"/> <source>This help message</source> <translation>Acest mesaj de ajutor</translation> </message> <message> <location line="+100"/> <source>Wallet %s resides outside data directory %s.</source> <translation>Portofelul %s este in afara directorului %s</translation> </message> <message> <location line="+46"/> <source>Unable to bind to %s on this computer (bind returned error %d, %s)</source> <translation>Nu se poate folosi %s pe acest calculator (eroarea returnată este %d, %s)</translation> </message> <message> <location line="-136"/> <source>Allow DNS lookups for -addnode, -seednode and -connect</source> <translation>Permite căutări DNS pentru -addnode, -seednode și -connect</translation> </message> <message> <location line="+121"/> <source>Loading addresses...</source> <translation>Încarc adrese...</translation> </message> <message> <location line="-10"/> <source>Error loading wallet.dat: Wallet corrupted</source> <translation>Eroare la încărcarea wallet.dat: Portofel corupt</translation> </message> <message> <location line="+4"/> <source>Error loading wallet.dat: Wallet requires newer version of Vsync</source> <translation>Eroare la încărcarea wallet.dat: Portofelul necesita o versiune mai noua de Vsync</translation> </message> <message> <location line="+1"/> <source>Wallet needed to be rewritten: restart Vsync to complete</source> <translation>A fost nevoie de rescrierea portofelului: restartați Vsync pentru a finaliza</translation> </message> <message> <location line="+1"/> <source>Error loading wallet.dat</source> <translation>Eroare la încărcarea wallet.dat</translation> </message> <message> <location line="-15"/> <source>Invalid -proxy address: &apos;%s&apos;</source> <translation>Adresa -proxy nevalidă: &apos;%s&apos;</translation> </message> <message> <location line="-1"/> <source>Unknown network specified in -onlynet: &apos;%s&apos;</source> <translation>Rețeaua specificată în -onlynet este necunoscută: &apos;%s&apos;</translation> </message> <message> <location line="+3"/> <source>Cannot resolve -bind address: &apos;%s&apos;</source> <translation>Nu se poate rezolva adresa -bind: &apos;%s&apos;</translation> </message> <message> <location line="+2"/> <source>Cannot resolve -externalip address: &apos;%s&apos;</source> <translation>Nu se poate rezolva adresa -externalip: &apos;%s&apos;</translation> </message> <message> <location line="-22"/> <source>Invalid amount for -paytxfee=&lt;amount&gt;: &apos;%s&apos;</source> <translation>Suma nevalidă pentru -paytxfee=&lt;amount&gt;: &apos;%s&apos;</translation> </message> <message> <location line="+59"/> <source>Sending...</source> <translation>Se trimite...</translation> </message> <message> <location line="+5"/> <source>Invalid amount</source> <translation>Sumă nevalidă</translation> </message> <message> <location line="+1"/> <source>Insufficient funds</source> <translation>Fonduri insuficiente</translation> </message> <message> <location line="-41"/> <source>Loading block index...</source> <translation>Încarc indice bloc...</translation> </message> <message> <location line="-105"/> <source>Add a node to connect to and attempt to keep the connection open</source> <translation>Adaugă un nod la care te poți conecta pentru a menține conexiunea deschisă</translation> </message> <message> <location line="+131"/> <source>Unable to bind to %s on this computer. Vsync is probably already running.</source> <translation>Imposibil de conectat %s pe acest computer. Cel mai probabil Vsync ruleaza</translation> </message> <message> <location line="-108"/> <source>Fee per KB to add to transactions you send</source> <translation>Comision pe kB de adaugat la tranzactiile pe care le trimiti</translation> </message> <message> <location line="+40"/> <source>How many blocks to check at startup (default: 500, 0 = all)</source> <translation type="unfinished"/> </message> <message> <location line="+11"/> <source>Acceptable ciphers (default: TLSv1.2+HIGH:TLSv1+HIGH:!SSLv2:!aNULL:!eNULL:!3DES:@STRENGTH)</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Warning: Deprecated argument -debugnet ignored, use -debug=net</source> <translation type="unfinished"/> </message> <message> <location line="+8"/> <source>Invalid amount for -mininput=&lt;amount&gt;: &apos;%s&apos;</source> <translation>Suma invalida pentru -mininput=&lt;amount&gt;: &apos;%s&apos;</translation> </message> <message> <location line="+3"/> <source>Cannot obtain a lock on data directory %s. Vsync is probably already running.</source> <translation type="unfinished"/> </message> <message> <location line="+4"/> <source>Error initializing wallet database environment %s!</source> <translation type="unfinished"/> </message> <message> <location line="+15"/> <source>Loading wallet...</source> <translation>Încarc portofel...</translation> </message> <message> <location line="+8"/> <source>Cannot downgrade wallet</source> <translation>Nu se poate retrograda portofelul</translation> </message> <message> <location line="+1"/> <source>Cannot write default address</source> <translation>Nu se poate scrie adresa implicită</translation> </message> <message> <location line="+1"/> <source>Rescanning...</source> <translation>Rescanez...</translation> </message> <message> <location line="+2"/> <source>Done loading</source> <translation>Încărcare terminată</translation> </message> <message> <location line="+33"/> <source>To use the %s option</source> <translation>Pentru a folosi opțiunea %s</translation> </message> <message> <location line="-27"/> <source>Error</source> <translation>Eroare</translation> </message> <message> <location line="+22"/> <source>You must set rpcpassword=&lt;password&gt; in the configuration file: %s If the file does not exist, create it with owner-readable-only file permissions.</source> <translation>Trebuie sa setezi rpcpassword=&lt;password&gt; în fișierul de configurare:⏎ %s⏎ Dacă fișierul nu există, creează-l cu permisiuni de citire doar de către proprietar.</translation> </message> </context> </TS><|fim▁end|>
<location line="-25"/> <source>Use proxy to reach tor hidden services (default: same as -proxy)</source> <translation>Utilizati proxy pentru a ajunge la serviciile tor (implicit: la fel ca proxy)</translation> </message>
<|file_name|>entry.js<|end_file_name|><|fim▁begin|><|fim▁hole|> React.render(<ColorWheel title='ColorWheel' />, document.querySelector('#color-wheel'))<|fim▁end|>
import React from 'react' import ColorWheel from './ColorWheel.jsx'
<|file_name|>resolve_lock_lite.rs<|end_file_name|><|fim▁begin|>// Copyright 2020 TiKV Project Authors. Licensed under Apache-2.0. use crate::storage::kv::WriteData; use crate::storage::lock_manager::LockManager; use crate::storage::mvcc::MvccTxn; use crate::storage::txn::commands::{ Command, CommandExt, ReleasedLocks, ResponsePolicy, TypedCommand, WriteCommand, WriteContext, WriteResult, }; use crate::storage::txn::{cleanup, commit, Result}; use crate::storage::{ProcessResult, Snapshot}; use txn_types::{Key, TimeStamp}; command! { /// Resolve locks on `resolve_keys` according to `start_ts` and `commit_ts`. ResolveLockLite: cmd_ty => (), display => "kv::resolve_lock_lite", (), content => { /// The transaction timestamp. start_ts: TimeStamp, /// The transaction commit timestamp. commit_ts: TimeStamp, /// The keys to resolve. resolve_keys: Vec<Key>, } } impl CommandExt for ResolveLockLite { ctx!(); tag!(resolve_lock_lite); ts!(start_ts); command_method!(is_sys_cmd, bool, true); write_bytes!(resolve_keys: multiple); gen_lock!(resolve_keys: multiple); } impl<S: Snapshot, L: LockManager> WriteCommand<S, L> for ResolveLockLite { fn process_write(self, snapshot: S, context: WriteContext<'_, L>) -> Result<WriteResult> { let mut txn = MvccTxn::new( snapshot, self.start_ts, !self.ctx.get_not_fill_cache(), context.concurrency_manager, ); let rows = self.resolve_keys.len(); // ti-client guarantees the size of resolve_keys will not too large, so no necessary // to control the write_size as ResolveLock. let mut released_locks = ReleasedLocks::new(self.start_ts, self.commit_ts); for key in self.resolve_keys { released_locks.push(if !self.commit_ts.is_zero() { commit(&mut txn, key, self.commit_ts)?<|fim▁hole|> } else { cleanup(&mut txn, key, TimeStamp::zero(), false)? }); } released_locks.wake_up(context.lock_mgr); context.statistics.add(&txn.take_statistics()); let write_data = WriteData::from_modifies(txn.into_modifies()); Ok(WriteResult { ctx: self.ctx, to_be_write: write_data, rows, pr: ProcessResult::Res, lock_info: None, lock_guards: vec![], response_policy: ResponsePolicy::OnApplied, }) } }<|fim▁end|>
<|file_name|>hotshots_fr.ts<|end_file_name|><|fim▁begin|><?xml version="1.0" ?><!DOCTYPE TS><TS language="fr" version="2.0"> <context> <name/> <message> <location filename="../src/AboutDialog.cpp" line="243"/> <source>release of translation and translator name please</source> <comment>put your name here dear translator and the release of the translation file!!</comment> <translation>traduction 2.0 par xbee</translation> </message> </context> <context> <name>AboutDialog</name> <message> <location filename="../src/AboutDialog.cpp" line="143"/> <source>&lt;a href=&quot;%1&quot;&gt;Check for new release ...&lt;/a&gt;</source> <translation>&lt;a href=&quot;%1&quot;&gt;Voir pour une nouvelle version ...&lt;/a&gt;</translation> </message> <message> <location filename="../src/AboutDialog.cpp" line="160"/> <source>About</source> <translation>A propos</translation> </message> <message> <location filename="../src/AboutDialog.cpp" line="161"/> <source>Preferences</source> <translation>Préférences</translation> </message> <message> <location filename="../src/AboutDialog.cpp" line="162"/> <source>Info</source> <translation>Informations</translation> </message> <message> <location filename="../src/AboutDialog.cpp" line="163"/> <source>Log</source> <translation>Journal</translation> </message> <message> <location filename="../src/AboutDialog.cpp" line="202"/> <source>Qt version %1</source> <translation>Qt version %1</translation> </message> <message> <location filename="../src/AboutDialog.cpp" line="290"/> <source>The %1 file can&apos;t be found, sorry ...</source> <translation>Le fichier %1 n&apos;est pas accessible, désolé ...</translation> </message> <message> <location filename="../src/AboutDialog.cpp" line="314"/> <source>detected language: %1</source> <translation>Langage détecté : %1</translation> </message> <message> <location filename="../src/AboutDialog.cpp" line="319"/> <source>Automatic detection</source> <translation>Détection automatique</translation> </message> <message> <location filename="../src/AboutDialog.cpp" line="322"/> <source>Default (no use of translation files)</source> <translation>Défaut (pas de fichier de traduction)</translation> </message> <message> <location filename="../src/AboutDialog.cpp" line="348"/> <source>The application need to restart in order to take into account new translation , Do you want to restart application now?</source> <translation>L&apos;application a besoin de redémarrer pour tenir compte de la nouvelle langue Voulez-vous redémarrer l&apos;application maintenant ?</translation> </message> <message> <location filename="../src/AboutDialog.cpp" line="363"/> <source>Do you want to reset all the preferences to default value? If yes, the application will be relaunch</source> <translation>Voulez-vous effacer toutes les préférences ? Si oui, l&apos;application va redémarrer</translation> </message> <message> <location filename="../src/AboutDialog.cpp" line="384"/> <source>Example: </source> <translation>Exemple :</translation> </message> </context> <context> <name>AboutDialogClass</name> <message> <location filename="../ui/AboutDialog.ui" line="17"/> <location filename="../ui/AboutDialog.ui" line="44"/> <source>About</source> <translation>A propos</translation> </message> <message> <location filename="../ui/AboutDialog.ui" line="120"/> <source>Credits</source> <translation>Crédits</translation> </message> <message> <location filename="../ui/AboutDialog.ui" line="154"/> <source>Changelog</source> <translation>Journal des modifications</translation> </message> <message> <location filename="../ui/AboutDialog.ui" line="199"/> <source>Release Number</source> <translation>Version numéro</translation> </message> <message> <location filename="../ui/AboutDialog.ui" line="235"/> <source>Preferences</source> <translation>Préférences</translation> </message> <message> <location filename="../ui/AboutDialog.ui" line="249"/> <location filename="../ui/AboutDialog.ui" line="277"/> <source>Startup</source> <translation>Démarrage</translation> </message> <message> <location filename="../ui/AboutDialog.ui" line="255"/> <source>Language</source> <translation>Langue</translation> </message> <message> <location filename="../ui/AboutDialog.ui" line="264"/> <source>Detected language ...</source> <translation>Langue détectée ...</translation> </message> <message> <location filename="../ui/AboutDialog.ui" line="283"/> <source> Start in notification tray (minimized)</source> <translation>Démarrer dans la zone de notification (minimisé)</translation> </message> <message> <location filename="../ui/AboutDialog.ui" line="290"/> <source>Display splashscreen at startup</source> <translation>Afficher l&apos;écran d&apos;accueil au démarrage</translation> </message> <message> <location filename="../ui/AboutDialog.ui" line="297"/> <source>Auto start with system</source> <translation>Démarrage automatique avec le système</translation> </message> <message><|fim▁hole|> </message> <message> <location filename="../ui/AboutDialog.ui" line="337"/> <source>Splashscreen with transparent background</source> <translation>Ecran d&apos;acceuil avec fond transparent</translation> </message> <message> <location filename="../ui/AboutDialog.ui" line="368"/> <location filename="../ui/AboutDialog.ui" line="374"/> <source>Snapshot</source> <translation>Capture d&apos;écran</translation> </message> <message> <location filename="../ui/AboutDialog.ui" line="380"/> <source>Auto copy new snapshot to clipboard</source> <translation>Copie automatique de la capture d&apos;écran vers le presse-papiers</translation> </message> <message> <location filename="../ui/AboutDialog.ui" line="387"/> <source>Inserting current date time into saved filename</source> <translation>Ajouter la date courante au nom du fichier</translation> </message> <message> <location filename="../ui/AboutDialog.ui" line="390"/> <source>Insert DateTime in filename</source> <translation>Ajouter la date courante au nom du fichier</translation> </message> <message> <location filename="../ui/AboutDialog.ui" line="397"/> <source>Default filename:</source> <translation>Nom par défaut :</translation> </message> <message> <location filename="../ui/AboutDialog.ui" line="404"/> <source>Format:</source> <translation>Format :</translation> </message> <message> <location filename="../ui/AboutDialog.ui" line="411"/> <source>Template: </source> <translation>Modèle :</translation> </message> <message> <location filename="../ui/AboutDialog.ui" line="437"/> <source>Default saving image format</source> <translation>Format de sauvegarde par défaut</translation> </message> <message> <location filename="../ui/AboutDialog.ui" line="444"/> <source>Default filename</source> <translation>Nom par défaut</translation> </message> <message> <location filename="../ui/AboutDialog.ui" line="461"/> <source>Post effect:</source> <translation>Post-effet :</translation> </message> <message> <location filename="../ui/AboutDialog.ui" line="471"/> <source>Rotate snapshot</source> <translation>Tourner la capture d&apos;écran</translation> </message> <message> <location filename="../ui/AboutDialog.ui" line="497"/> <source>Angle:</source> <translation>Angle :</translation> </message> <message> <location filename="../ui/AboutDialog.ui" line="504"/> <source>Capture window with decoration</source> <translation>Capturer la fenêtre avec sa décoration</translation> </message> <message> <location filename="../ui/AboutDialog.ui" line="514"/> <source>Play a sound when action is finished</source> <translation>Jouer un son quand l&apos;action est terminée</translation> </message> <message> <location filename="../ui/AboutDialog.ui" line="521"/> <source>Hide interface during screen capture</source> <translation>Cacher l&apos;interface pendant la capture d&apos;écran</translation> </message> <message> <location filename="../ui/AboutDialog.ui" line="531"/> <source>Force background color for clipboard/web services</source> <translation>Forcer la couleur de fond pour le presse papier/service web</translation> </message> <message> <location filename="../ui/AboutDialog.ui" line="557"/> <source>Add mouse cursor to snapshot</source> <translation>Ajouter le pointeur de souris aux captures d&apos;écran</translation> </message> <message> <location filename="../ui/AboutDialog.ui" line="564"/> <source>Snapshot Delay (s):</source> <translation>Délai (s) :</translation> </message> <message> <location filename="../ui/AboutDialog.ui" line="583"/> <source>Snapshot delay in seconds</source> <translation>Délai en secondes</translation> </message> <message> <location filename="../ui/AboutDialog.ui" line="589"/> <source>No delay</source> <translation>Aucun délai</translation> </message> <message> <location filename="../ui/AboutDialog.ui" line="602"/> <source>Scale snapshot</source> <translation>Mettre à l&apos;échelle la capture d&apos;écran</translation> </message> <message> <location filename="../ui/AboutDialog.ui" line="611"/> <source>%</source> <translation>%</translation> </message> <message> <location filename="../ui/AboutDialog.ui" line="621"/> <source>Max width * height</source> <translation>Taille maximale</translation> </message> <message> <location filename="../ui/AboutDialog.ui" line="631"/> <source>800</source> <translation>800</translation> </message> <message> <location filename="../ui/AboutDialog.ui" line="641"/> <source>600</source> <translation>600</translation> </message> <message> <location filename="../ui/AboutDialog.ui" line="682"/> <source>Shortcut/Menu</source> <translation>Raccourcis/Menu</translation> </message> <message> <location filename="../ui/AboutDialog.ui" line="691"/> <source>Action performed by double-clicking on system tray icon:</source> <translation>Action effectuée en double cliquant dans la zone de notification :</translation> </message> <message> <location filename="../ui/AboutDialog.ui" line="698"/> <source>Shortcut editor</source> <translation>Editeur de raccourcis</translation> </message> <message> <location filename="../ui/AboutDialog.ui" line="705"/> <source>Action</source> <translation>Action</translation> </message> <message> <location filename="../ui/AboutDialog.ui" line="710"/> <source>Shortcut</source> <translation>Raccourci</translation> </message> <message> <location filename="../ui/AboutDialog.ui" line="715"/> <source>Description</source> <translation>Description</translation> </message> <message> <location filename="../ui/AboutDialog.ui" line="723"/> <source>Use system screen capture shortcuts (only available for screen grab functions)</source> <translation>Utiliser les raccourcis au niveau système pour les captures d&apos;écran</translation> </message> <message> <location filename="../ui/AboutDialog.ui" line="736"/> <source>Warning! Duplicated shortcuts will not be checked.</source> <translation>Attention ! les duplications de raccourcis ne sont pas vérifiées.</translation> </message> <message> <location filename="../ui/AboutDialog.ui" line="746"/> <source>System tray additional functions:</source> <translation>Actions supplémentaires de la zone de notification :</translation> </message> <message> <location filename="../ui/AboutDialog.ui" line="761"/> <source>Network</source> <translation>Réseau</translation> </message> <message> <location filename="../ui/AboutDialog.ui" line="770"/> <source>Default web service</source> <translation>Service Web par défaut</translation> </message> <message> <location filename="../ui/AboutDialog.ui" line="796"/> <source>Launch default Internet browser when done</source> <translation>Lancer le navigateur par défaut au final</translation> </message> <message> <location filename="../ui/AboutDialog.ui" line="803"/> <source>Copy uploaded file URL to clipboard</source> <translation>Copier l&apos;URL du fichier téléchargé vers le presse-papier</translation> </message> <message> <location filename="../ui/AboutDialog.ui" line="812"/> <source>as </source> <translation>comme</translation> </message> <message> <location filename="../ui/AboutDialog.ui" line="820"/> <source>No format</source> <translation>Aucun format</translation> </message> <message> <location filename="../ui/AboutDialog.ui" line="825"/> <source>Instant Messaging (IM) format</source> <translation>Format de Messagerie Instantannée (IM)</translation> </message> <message> <location filename="../ui/AboutDialog.ui" line="830"/> <source>HTML format</source> <translation>Format HTML</translation> </message> <message> <location filename="../ui/AboutDialog.ui" line="835"/> <source>BBCode format</source> <translation>Format BBCode</translation> </message> <message> <location filename="../ui/AboutDialog.ui" line="840"/> <source>User format</source> <translation>Format définit par l&apos;utilisateur</translation> </message> <message> <location filename="../ui/AboutDialog.ui" line="851"/> <source>@url@</source> <translation>@url@</translation> </message> <message> <location filename="../ui/AboutDialog.ui" line="863"/> <source>(*) @url@ is the uploaded URL</source> <translation>(*) @url@ is the uploaded URL</translation> </message> <message> <location filename="../ui/AboutDialog.ui" line="876"/> <source>Use a proxy server to connect to Internet</source> <translation>Utiliser un Proxy pour se connecter à Internet</translation> </message> <message> <location filename="../ui/AboutDialog.ui" line="879"/> <source> use Proxy</source> <translation>Utiliser un Proxy</translation> </message> <message> <location filename="../ui/AboutDialog.ui" line="891"/> <source>Host </source> <translation>Serveur</translation> </message> <message> <location filename="../ui/AboutDialog.ui" line="898"/> <source>Proxy hostname</source> <translation>Nom du Proxy</translation> </message> <message> <location filename="../ui/AboutDialog.ui" line="905"/> <source>Port </source> <translation>Port</translation> </message> <message> <location filename="../ui/AboutDialog.ui" line="912"/> <source> Proxy Authentification </source> <translation>Authentification sur le serveur Proxy</translation> </message> <message> <location filename="../ui/AboutDialog.ui" line="924"/> <source>Username</source> <translation>Nom d&apos;utilisateur</translation> </message> <message> <location filename="../ui/AboutDialog.ui" line="931"/> <source>Proxy username</source> <translation>Nom d&apos;utilisateur sur le serveur Proxy</translation> </message> <message> <location filename="../ui/AboutDialog.ui" line="938"/> <source>Password</source> <translation>Mot de passe</translation> </message> <message> <location filename="../ui/AboutDialog.ui" line="945"/> <source>Proxy password</source> <translation>Mot de passe sur le serveur Proxy</translation> </message> <message> <location filename="../ui/AboutDialog.ui" line="958"/> <source>Show/Hide password in interface</source> <translation>Afficher/Cacher le mot de passe dans l&apos;interface</translation> </message> <message> <location filename="../ui/AboutDialog.ui" line="961"/> <source>Show password</source> <translation>Afficher le mot de passe</translation> </message> <message> <location filename="../ui/AboutDialog.ui" line="984"/> <source>Proxy port</source> <translation>Port du serveur Proxy</translation> </message> <message> <location filename="../ui/AboutDialog.ui" line="1000"/> <source>Show dialog before download</source> <translation>Afficher une boite de dialogue avant téléchargement</translation> </message> <message> <location filename="../ui/AboutDialog.ui" line="1010"/> <source>Show dialog after download</source> <translation>Afficher une boite de dialogue après téléchargement</translation> </message> <message> <location filename="../ui/AboutDialog.ui" line="1022"/> <source>Uploaders</source> <translation>Téléchargeurs</translation> </message> <message> <location filename="../ui/AboutDialog.ui" line="1055"/> <source>Infos</source> <translation>Infos</translation> </message> <message> <location filename="../ui/AboutDialog.ui" line="1061"/> <source>Supported formats (via Qt plugins)</source> <translation>Formats supportés (via greffons Qt)</translation> </message> <message> <location filename="../ui/AboutDialog.ui" line="1095"/> <source>Available translations</source> <translation>Traductions disponibles</translation> </message> <message> <location filename="../ui/AboutDialog.ui" line="1134"/> <source>Log</source> <translation>Journal</translation> </message> </context> <context> <name>BaseUploader</name> <message> <location filename="../src/uploaders/BaseUploader.cpp" line="95"/> <source>Image has been resized according to uploading service constraint (%1x%2)</source> <translation>L&apos;image a été redimensionnée par rapport aux contraintes du service web (%1x%2)</translation> </message> <message> <location filename="../src/uploaders/BaseUploader.cpp" line="109"/> <location filename="../src/uploaders/BaseUploader.cpp" line="127"/> <source>Can&apos;t save tmp file %1</source> <translation>Impossible de sauver le fichier temporaire %1</translation> </message> <message> <location filename="../src/uploaders/BaseUploader.cpp" line="118"/> <source>reduce file size according to limit (%1/%2 bytes)</source> <translation>Redimensionnement du fichier par rapport aux limitations (%1/%2 octets)</translation> </message> </context> <context> <name>CanardPCUploader</name> <message> <location filename="../src/uploaders/CanardPCUploader.cpp" line="93"/> <source>%1 transfert end without error.</source> <translation>Le transfert %1 s&apos;est terminé sans erreur.</translation> </message> <message> <location filename="../src/uploaders/CanardPCUploader.cpp" line="97"/> <source>%1 transfert end with error! (%2)</source> <translation>Le transfert %1 s&apos;est terminé avec une erreur! (%2)</translation> </message> <message> <location filename="../src/uploaders/CanardPCUploader.cpp" line="160"/> <source>Can&apos;t extract URL from reply</source> <translation>Impossible d&apos;extraire l&apos;URL de la réponse</translation> </message> </context> <context> <name>DefaultUploaderSettingsClass</name> <message> <location filename="../ui/DefaultUploaderSettings.ui" line="14"/> <source>DefaultUploaderSettings</source> <translation>Paramètres par défaut du téléchargeur</translation> </message> <message> <location filename="../ui/DefaultUploaderSettings.ui" line="33"/> <source>No parameter for this uploader</source> <translation>Aucun paramètre pour ce téléchargeur</translation> </message> </context> <context> <name>EditorWidget</name> <message> <location filename="../src/editor/EditorWidget.cpp" line="194"/> <source>Left-click to select item, Ctrl+Left Click then drag to select a group of items</source> <translation>Click gauche pour sélectionner l&apos;élément, Ctrl+Click Gauche puis faire glisser la souris pour sélectionner un groupe d&apos;éléments</translation> </message> <message> <location filename="../src/editor/EditorWidget.cpp" line="195"/> <location filename="../src/editor/EditorWidget.cpp" line="203"/> <source>Left-click to set center</source> <translation>Click gauche pour positionner le centre</translation> </message> <message> <location filename="../src/editor/EditorWidget.cpp" line="196"/> <location filename="../src/editor/EditorWidget.cpp" line="197"/> <location filename="../src/editor/EditorWidget.cpp" line="200"/> <location filename="../src/editor/EditorWidget.cpp" line="201"/> <location filename="../src/editor/EditorWidget.cpp" line="202"/> <location filename="../src/editor/EditorWidget.cpp" line="205"/> <location filename="../src/editor/EditorWidget.cpp" line="206"/> <location filename="../src/editor/EditorWidget.cpp" line="207"/> <source>Left-click then drag to create the shape</source> <translation>Click gauche puis faire glisser la souris pour créer l&apos;élément</translation> </message> <message> <location filename="../src/editor/EditorWidget.cpp" line="198"/> <location filename="../src/editor/EditorWidget.cpp" line="199"/> <location filename="../src/editor/EditorWidget.cpp" line="208"/> <source>Creation: left-click to create a point, middle-click to delete the last one and right-click to create a new shape. Modification: select vertex, left-click then drag to move, middle-click to delete and right-click to add a new one</source> <translation>Création: Click-Gauche pour créer un point, Click-Milieu pour effacer le dernier et Click-Droit pour créer une nouvelle figure. Modification: sélectionnerun sommet, Click-Gauche puis glisser pour déplacer, Click-Milieu pour effacer et Click-Droit sur segment pour ajouter un point </translation> </message> <message> <location filename="../src/editor/EditorWidget.cpp" line="204"/> <source>Left-click to set image center and drag if you want change the size</source> <translation>Click gauche pourpositionner le centre de l&apos;image puis glisser la souris si vous voulez redimensionner l&apos;image</translation> </message> <message> <location filename="../src/editor/EditorWidget.cpp" line="209"/> <source>Left-click then drag to create the shape, select the area and press &lt;Enter&gt; to perform the operation</source> <translation>Clic-Gauche puis faire glisser pour créer la figure, la sélectionner et tapez &lt;Entrée&gt; pour effectuer l&apos;opération</translation> </message> <message> <location filename="../src/editor/EditorWidget.cpp" line="259"/> <source>There is existing annotation items, what do you want to do?</source> <translation>Il existe des éléments d&apos;annotation, que voulez-vous faire ?</translation> </message> <message> <location filename="../src/editor/EditorWidget.cpp" line="260"/> <source>clear annotation items ...</source> <translation>effacer les éléments existants ...</translation> </message> <message> <location filename="../src/editor/EditorWidget.cpp" line="261"/> <source>update background image ...</source> <translation>mettre à jour l&apos;image de fond ...</translation> </message> <message> <location filename="../src/editor/EditorWidget.cpp" line="297"/> <source>Loading HotShots file failed</source> <translation>Le chargement du fichier HotShots a échoué</translation> </message> <message> <location filename="../src/editor/EditorWidget.cpp" line="297"/> <source>Error loading HotShots file %1</source> <translation>Erreur lors du chargement du fichier HotSHots %1</translation> </message> <message> <location filename="../src/editor/EditorWidget.cpp" line="351"/> <source>Snapshot editor: </source> <translation>Editeur de captures d&apos;écran :</translation> </message> <message> <location filename="../src/editor/EditorWidget.cpp" line="614"/> <source>Scale x%1</source> <translation>Echelle x%1</translation> </message> <message> <location filename="../src/editor/EditorWidget.cpp" line="855"/> <source>Clear edit</source> <translation>Effacer l&apos;édition</translation> </message> <message> <location filename="../src/editor/EditorWidget.cpp" line="856"/> <source>Clear all the exiting items ? (no possible undo)</source> <translation>Effacer tous les éléments ? (pas d&apos;annulation possible)</translation> </message> <message> <location filename="../src/editor/EditorWidget.cpp" line="879"/> <source>Clear document</source> <translation>Effacer le document</translation> </message> <message> <location filename="../src/editor/EditorWidget.cpp" line="880"/> <source>Create a new document?</source> <translation>Créer un nouveau document ?</translation> </message> <message> <location filename="../src/editor/EditorWidget.cpp" line="912"/> <source>Save HotShots File</source> <translation>Sauver le fichier HotShots</translation> </message> <message> <location filename="../src/editor/EditorWidget.cpp" line="914"/> <source>HotShots files (*.hot)</source> <translation>Fichiers HotShots (*.hot)</translation> </message> <message> <location filename="../src/editor/EditorWidget.cpp" line="946"/> <source>Saving HotShots file failed</source> <translation>La sauvagarde du fichier HotShots à échouée</translation> </message> <message> <location filename="../src/editor/EditorWidget.cpp" line="946"/> <source>Error saving HotShots file %1</source> <translation>Erreur lors de la sauvegarde du fichier HotSHots %1</translation> </message> </context> <context> <name>EditorWidgetClass</name> <message> <location filename="../ui/EditorWidget.ui" line="17"/> <source>Snapshot editor</source> <translation>Editeur de snapshot</translation> </message> <message> <location filename="../ui/EditorWidget.ui" line="148"/> <location filename="../ui/EditorWidget.ui" line="167"/> <location filename="../ui/EditorWidget.ui" line="186"/> <location filename="../ui/EditorWidget.ui" line="205"/> <location filename="../ui/EditorWidget.ui" line="224"/> <location filename="../ui/EditorWidget.ui" line="243"/> <location filename="../ui/EditorWidget.ui" line="262"/> <location filename="../ui/EditorWidget.ui" line="281"/> <location filename="../ui/EditorWidget.ui" line="300"/> <location filename="../ui/EditorWidget.ui" line="319"/> <location filename="../ui/EditorWidget.ui" line="338"/> <location filename="../ui/EditorWidget.ui" line="357"/> <location filename="../ui/EditorWidget.ui" line="376"/> <location filename="../ui/EditorWidget.ui" line="395"/> <location filename="../ui/EditorWidget.ui" line="414"/> <location filename="../ui/EditorWidget.ui" line="433"/> <location filename="../ui/EditorWidget.ui" line="452"/> <location filename="../ui/EditorWidget.ui" line="474"/> <source>...</source> <translation>...</translation> </message> <message> <location filename="../ui/EditorWidget.ui" line="496"/> <source>Preview font</source> <translation>Prévisualisation police de caractères</translation> </message> <message> <location filename="../ui/EditorWidget.ui" line="533"/> <source>Reset colors</source> <translation>Réinitialiser les couleurs</translation> </message> <message> <location filename="../ui/EditorWidget.ui" line="550"/> <source>swap background and foreground colors</source> <translation>Intervertir les couleurs</translation> </message> <message> <location filename="../ui/EditorWidget.ui" line="575"/> <source>Foreground and background colors</source> <translation>Couleurs de premier-plan et d&apos;arrière plan</translation> </message> <message> <location filename="../ui/EditorWidget.ui" line="602"/> <source>Main actions</source> <translation>Action principales</translation> </message> <message> <location filename="../ui/EditorWidget.ui" line="629"/> <source>Edition actions</source> <translation>Actions d&apos;édition</translation> </message> <message> <location filename="../ui/EditorWidget.ui" line="663"/> <source>Select item</source> <translation>Sélectionner un élément</translation> </message> <message> <location filename="../ui/EditorWidget.ui" line="666"/> <source>Select</source> <translation>Sélectionner</translation> </message> <message> <location filename="../ui/EditorWidget.ui" line="669"/> <source>Esc</source> <translation>Echap</translation> </message> <message> <location filename="../ui/EditorWidget.ui" line="684"/> <source>Text item</source> <translation>Elément texte</translation> </message> <message> <location filename="../ui/EditorWidget.ui" line="687"/> <source>Draw text</source> <translation>Tracer un texte</translation> </message> <message> <location filename="../ui/EditorWidget.ui" line="702"/> <source>Line item</source> <translation>Elément ligne</translation> </message> <message> <location filename="../ui/EditorWidget.ui" line="705"/> <source>Draw a simple line</source> <translation>Tracer une ligne</translation> </message> <message> <location filename="../ui/EditorWidget.ui" line="723"/> <source>Encircle item</source> <translation>Elément entourage</translation> </message> <message> <location filename="../ui/EditorWidget.ui" line="726"/> <source>Encircle</source> <translation>Entourer</translation> </message> <message> <location filename="../ui/EditorWidget.ui" line="744"/> <source>Polyline item</source> <translation>Élément polyligne</translation> </message> <message> <location filename="../ui/EditorWidget.ui" line="747"/> <source>Draw a polyline</source> <translation>Tracer une polyligne</translation> </message> <message> <location filename="../ui/EditorWidget.ui" line="765"/> <source>Polygon item</source> <translation>Elément polygone</translation> </message> <message> <location filename="../ui/EditorWidget.ui" line="768"/> <source>Draw a polygon</source> <translation>Tracer un polygone</translation> </message> <message> <location filename="../ui/EditorWidget.ui" line="786"/> <source>Curve item</source> <translation>Elément courbe</translation> </message> <message> <location filename="../ui/EditorWidget.ui" line="789"/> <source>Draw a curve</source> <translation>Dessiner une courbe</translation> </message> <message> <location filename="../ui/EditorWidget.ui" line="807"/> <source>Crop item</source> <translation>Découpage</translation> </message> <message> <location filename="../ui/EditorWidget.ui" line="810"/> <source>Crop background image</source> <translation>Couper l&apos;image de fond</translation> </message> <message> <location filename="../ui/EditorWidget.ui" line="828"/> <source>Rectangle item</source> <translation>Elément rectangle</translation> </message> <message> <location filename="../ui/EditorWidget.ui" line="831"/> <source>Draw a rectangle</source> <translation>Tracer un rectangle</translation> </message> <message> <location filename="../ui/EditorWidget.ui" line="849"/> <source>Ellipse item</source> <translation>Elément ellipse</translation> </message> <message> <location filename="../ui/EditorWidget.ui" line="852"/> <source>Draw an ellipse</source> <translation>Tracer une ellipse</translation> </message> <message> <location filename="../ui/EditorWidget.ui" line="870"/> <source>Arrow item</source> <translation>Elément flèche</translation> </message> <message> <location filename="../ui/EditorWidget.ui" line="873"/> <source>Draw an arrow</source> <translation>Tracer une flèche</translation> </message> <message> <location filename="../ui/EditorWidget.ui" line="891"/> <source>Tag item</source> <translation>Elément balise</translation> </message> <message> <location filename="../ui/EditorWidget.ui" line="894"/> <source>Draw a numbered tag</source> <translation>Tracer une balise</translation> </message> <message> <location filename="../ui/EditorWidget.ui" line="909"/> <source>Highlighter item</source> <translation>Mettre en évidence</translation> </message> <message> <location filename="../ui/EditorWidget.ui" line="912"/> <source>Highlight</source> <translation>Mettre en évidence</translation> </message> <message> <location filename="../ui/EditorWidget.ui" line="930"/> <source>Magnifier item</source> <translation>Loupe</translation> </message> <message> <location filename="../ui/EditorWidget.ui" line="933"/> <source>Magnifier</source> <translation>Loupe</translation> </message> <message> <location filename="../ui/EditorWidget.ui" line="951"/> <source>Image item</source> <translation>Elément image</translation> </message> <message> <location filename="../ui/EditorWidget.ui" line="954"/> <source>Add an image</source> <translation>Ajouter une image</translation> </message> <message> <location filename="../ui/EditorWidget.ui" line="972"/> <source>Blur item</source> <translation>Flou</translation> </message> <message> <location filename="../ui/EditorWidget.ui" line="975"/> <source>Blur</source> <translation>Flou</translation> </message> <message> <location filename="../ui/EditorWidget.ui" line="990"/> <source>Font size down</source> <translation>Taille de la police plus petite</translation> </message> <message> <location filename="../ui/EditorWidget.ui" line="993"/> <source>Decrease font size</source> <translation>Décroître la taille de la fonte</translation> </message> <message> <location filename="../ui/EditorWidget.ui" line="1008"/> <source>Font size up</source> <translation>Taille de la police plus grande</translation> </message> <message> <location filename="../ui/EditorWidget.ui" line="1011"/> <source>Increase font size</source> <translation>Accroïtre la taille de la fonte</translation> </message> <message> <location filename="../ui/EditorWidget.ui" line="1022"/> <source>aAbB...</source> <translation>aAbB...</translation> </message> <message> <location filename="../ui/EditorWidget.ui" line="1025"/> <source>Change font</source> <translation>Changer la fonte</translation> </message> <message> <location filename="../ui/EditorWidget.ui" line="1037"/> <source>Quit</source> <translation>Quitter</translation> </message> <message> <location filename="../ui/EditorWidget.ui" line="1040"/> <source>Quit editor</source> <translation>Quitter l&apos;éditeur</translation> </message> <message> <location filename="../ui/EditorWidget.ui" line="1055"/> <source>Load</source> <translation>Charger</translation> </message> <message> <location filename="../ui/EditorWidget.ui" line="1058"/> <source>Open a file</source> <translation>Ouvrir un fichier</translation> </message> <message> <location filename="../ui/EditorWidget.ui" line="1073"/> <location filename="../ui/EditorWidget.ui" line="1076"/> <source>New document</source> <translation>Nouveau document</translation> </message> <message> <location filename="../ui/EditorWidget.ui" line="1091"/> <source>Import clipboard</source> <translation>Importer une image du presse-papier</translation> </message> <message> <location filename="../ui/EditorWidget.ui" line="1094"/> <source>Import image from clipboard</source> <translation>Importer une image du presse-papier</translation> </message> <message> <location filename="../ui/EditorWidget.ui" line="1109"/> <source>Save</source> <translation>Sauver</translation> </message> <message> <location filename="../ui/EditorWidget.ui" line="1112"/> <source>Save edition</source> <translation>Sauver l&apos;édition</translation> </message> <message> <location filename="../ui/EditorWidget.ui" line="1127"/> <source>Save as</source> <translation>Sauver sous</translation> </message> <message> <location filename="../ui/EditorWidget.ui" line="1130"/> <source>Save edition as</source> <translation>Sauver sous</translation> </message> <message> <location filename="../ui/EditorWidget.ui" line="1142"/> <location filename="../ui/EditorWidget.ui" line="1145"/> <source>Copy to clipboard</source> <translation>Copie vers le presse-papier</translation> </message> <message> <location filename="../ui/EditorWidget.ui" line="1160"/> <source>Export</source> <translation>Exporter l&apos;image</translation> </message> <message> <location filename="../ui/EditorWidget.ui" line="1163"/> <source>Export image</source> <translation>Exporter l&apos;image</translation> </message> <message> <location filename="../ui/EditorWidget.ui" line="1175"/> <source>Upload</source> <translation>Télécharger</translation> </message> <message> <location filename="../ui/EditorWidget.ui" line="1178"/> <source>Upload the image to the web</source> <translation>Télécharger l&apos;image vers le Web</translation> </message> <message> <location filename="../ui/EditorWidget.ui" line="1190"/> <source>Print</source> <translation>Imprimer</translation> </message> <message> <location filename="../ui/EditorWidget.ui" line="1205"/> <source>Fit to view</source> <translation>Ajuster à la fenêtre</translation> </message> <message> <location filename="../ui/EditorWidget.ui" line="1208"/> <source>Scale to fit window</source> <translation>Ajuster à la fenêtre</translation> </message> <message> <location filename="../ui/EditorWidget.ui" line="1223"/> <location filename="../ui/EditorWidget.ui" line="1226"/> <source>Reset scale</source> <translation>Réinitialiser l&apos;échelle</translation> </message> <message> <location filename="../ui/EditorWidget.ui" line="1241"/> <source>Undo</source> <translation>Annuler</translation> </message> <message> <location filename="../ui/EditorWidget.ui" line="1244"/> <source>Undo the last action</source> <translation>Annuler la dernière action</translation> </message> <message> <location filename="../ui/EditorWidget.ui" line="1259"/> <source>Redo</source> <translation>Refaire</translation> </message> <message> <location filename="../ui/EditorWidget.ui" line="1262"/> <source>Redo the last undo action</source> <translation>Refaire la dernière action</translation> </message> <message> <location filename="../ui/EditorWidget.ui" line="1274"/> <source>Add frame</source> <translation>Ajouter un cadre</translation> </message> <message> <location filename="../ui/EditorWidget.ui" line="1277"/> <source>Add a post effect to background image</source> <translation>Ajouter un effet à l&apos;image de fond</translation> </message> <message> <location filename="../ui/EditorWidget.ui" line="1299"/> <source>add shadow</source> <translation>Ajouter une ombre</translation> </message> <message> <location filename="../ui/EditorWidget.ui" line="1302"/> <source>Enable/Disable item&apos;s shadow</source> <translation>Afficher/Cacher l&apos;ombrage des éléments</translation> </message> <message> <location filename="../ui/EditorWidget.ui" line="1317"/> <source>Item up</source> <translation>Element vers le haut</translation> </message> <message> <location filename="../ui/EditorWidget.ui" line="1320"/> <source>Push selected elements up</source> <translation>Mettre les éléments sélectionnés au dessus</translation> </message> <message> <location filename="../ui/EditorWidget.ui" line="1335"/> <source>Item down</source> <translation>Elément vers le bas</translation> </message> <message> <location filename="../ui/EditorWidget.ui" line="1338"/> <source>Push selected elements down</source> <translation>Mettre les éléments sélectionnés en dessous</translation> </message> <message> <location filename="../ui/EditorWidget.ui" line="1353"/> <source>Duplicate items</source> <translation>Dupliquer les éléments courants</translation> </message> <message> <location filename="../ui/EditorWidget.ui" line="1356"/> <source>Duplicate selected element</source> <translation>Dupliquer l&apos;élément courant</translation> </message> <message> <location filename="../ui/EditorWidget.ui" line="1368"/> <source>Clear items</source> <translation>Effacer l&apos;édition</translation> </message> <message> <location filename="../ui/EditorWidget.ui" line="1371"/> <source>Erase all elements</source> <translation>Effacer tous les éléments</translation> </message> <message> <location filename="../ui/EditorWidget.ui" line="1386"/> <source>Delete items</source> <translation>Effacer les éléments sélectionnés</translation> </message> <message> <location filename="../ui/EditorWidget.ui" line="1389"/> <source>Delete selected elements</source> <translation>Effacer les éléments sélectionnés</translation> </message> <message> <location filename="../ui/EditorWidget.ui" line="1401"/> <source>No post effect</source> <translation>Aucun post-effet</translation> </message> <message> <location filename="../ui/EditorWidget.ui" line="1413"/> <source>Simple drop shadow</source> <translation>Ombre portée simple</translation> </message> <message> <location filename="../ui/EditorWidget.ui" line="1425"/> <source>Drop shadow with borders</source> <translation>Ombre portée avec bordures</translation> </message> <message> <location filename="../ui/EditorWidget.ui" line="1437"/> <source>Progressive opacity</source> <translation>Opacité progressive</translation> </message> <message> <location filename="../ui/EditorWidget.ui" line="1449"/> <source>Torn edge</source> <translation>Bords déchirés</translation> </message> <message> <location filename="../ui/EditorWidget.ui" line="1461"/> <source>Item to top</source> <translation>Elément en haut</translation> </message> <message> <location filename="../ui/EditorWidget.ui" line="1464"/> <source>Push selected elements to top</source> <translation>Mettre les éléments sélectionnés en haut</translation> </message> <message> <location filename="../ui/EditorWidget.ui" line="1476"/> <source>Item to bottom</source> <translation>Elément en bas</translation> </message> <message> <location filename="../ui/EditorWidget.ui" line="1479"/> <source>Push selected elements to bottom</source> <translation>Mettre les éléments sélectionnés en bas</translation> </message> <message> <location filename="../ui/EditorWidget.ui" line="1491"/> <location filename="../ui/EditorWidget.ui" line="1494"/> <source>Align group to left</source> <translation>Aligner le groupe à gauche</translation> </message> <message> <location filename="../ui/EditorWidget.ui" line="1506"/> <location filename="../ui/EditorWidget.ui" line="1509"/> <source>Align group to horizontal center</source> <translation>Centrer horizontalement le groupe</translation> </message> <message> <location filename="../ui/EditorWidget.ui" line="1521"/> <location filename="../ui/EditorWidget.ui" line="1524"/> <source>Align group to vertical center</source> <translation>Centrer verticalement le groupe</translation> </message> <message> <location filename="../ui/EditorWidget.ui" line="1536"/> <location filename="../ui/EditorWidget.ui" line="1539"/> <source>Align group to right</source> <translation>Aligner le groupe à droite</translation> </message> <message> <location filename="../ui/EditorWidget.ui" line="1551"/> <location filename="../ui/EditorWidget.ui" line="1554"/> <source>Align group to top</source> <translation>Aligner le groupe en haut</translation> </message> <message> <location filename="../ui/EditorWidget.ui" line="1566"/> <location filename="../ui/EditorWidget.ui" line="1569"/> <source>Align group to bottom</source> <translation>Aligner le groupe en bas</translation> </message> <message> <location filename="../ui/EditorWidget.ui" line="1581"/> <source>Set alignment</source> <translation>Appliquer un alignement</translation> </message> <message> <location filename="../ui/EditorWidget.ui" line="1584"/> <source>Set alignment to a group</source> <translation>Appliquer un alignement à un groupe</translation> </message> <message> <location filename="../ui/EditorWidget.ui" line="1599"/> <source>Rescale background image</source> <translation>Retailler l&apos;image de fond</translation> </message> </context> <context> <name>FileParser</name> <message> <location filename="../src/editor/io/FileParser.cpp" line="64"/> <source>Cannot read file %1: (%2)</source> <translation>Impossible de lire %1: (%2)</translation> </message> <message> <location filename="../src/editor/io/FileParser.cpp" line="101"/> <source>Saving failed for project %1: %2</source> <translation>La sauvegarde du projet à échouée %1 : %2</translation> </message> <message> <location filename="../src/editor/io/FileParser.cpp" line="107"/> <source>Cannot save file %1: (%2)</source> <translation>Impossible de sauver le fichier%1: (%2) </translation> </message> <message> <location filename="../src/editor/io/FileParser.cpp" line="122"/> <source>Parse error at line %1, column %2: (%3)</source> <translation>Erreur d&apos;analyse ligne %1, colonne %2: (%3) </translation> </message> <message> <location filename="../src/editor/io/FileParser.cpp" line="129"/> <source>The file is not a &quot;Hotshots&quot; file!</source> <translation>Ce n&apos;est pas un fichier HotShots !</translation> </message> <message> <location filename="../src/editor/io/FileParser.cpp" line="134"/> <source>Configuration has been automatically updated!</source> <translation>La configuration a été automatiquement mise à jour !</translation> </message> <message> <location filename="../src/editor/io/FileParser.cpp" line="198"/> <source>Unknown version of hotshots file (%1).</source> <translation>Version inconnue (%1). </translation> </message> </context> <context> <name>FreeImageHostingUploader</name> <message> <location filename="../src/uploaders/FreeImageHostingUploader.cpp" line="84"/> <source>%1 transfert end without error.</source> <translation>Le transfert %1 s&apos;est terminé sans erreur.</translation> </message> <message> <location filename="../src/uploaders/FreeImageHostingUploader.cpp" line="88"/> <source>%1 transfert end with error! (%2)</source> <translation>Le transfert %1 s&apos;est terminé avec une erreur ! (%2)</translation> </message> </context> <context> <name>FreehandGrabber</name> <message> <location filename="../src/FreehandGrabber.cpp" line="150"/> <source>Select a region using the mouse. To take the snapshot, press the Enter key or double click. Press Esc to quit.</source> <translation>Sélectionner une région en utilisant la souris. Pour effectuer la capture d&apos;écran, appuyer sur &lt;Entrée&gt; ou double cliquer. Appuyer sur &lt;Esc&gt; pour quitter.</translation> </message> </context> <context> <name>FtpUploader</name> <message> <location filename="../src/uploaders/FtpUploader.cpp" line="66"/> <source>Unable to connect to the FTP server at %1. Please check that the hostname is correct.</source> <translation>Impossible de se connecter au serveur FTP à %1. Merci de vérifier si le nom du serveur est correct.</translation> </message> <message> <location filename="../src/uploaders/FtpUploader.cpp" line="70"/> <source>Logged onto %1.</source> <translation>Connecté sur %1.</translation> </message> <message> <location filename="../src/uploaders/FtpUploader.cpp" line="76"/> <source>Canceled upload of %1</source> <translation>Arrêt du téléchargement de %1</translation> </message> <message> <location filename="../src/uploaders/FtpUploader.cpp" line="79"/> <source>Uploaded successfully onto %1.</source> <translation>Envoi réussi sur %1.</translation> </message> <message> <location filename="../src/uploaders/FtpUploader.cpp" line="91"/> <source>%1 is available</source> <translation>%1 est disponible</translation> </message> <message> <location filename="../src/uploaders/FtpUploader.cpp" line="97"/> <source>%1 is&apos;nt available on server %2</source> <translation>%1 n&apos;est pas accessible sur le serveur %2</translation> </message> <message> <location filename="../src/uploaders/FtpUploader.cpp" line="227"/> <source>%1 transfert end with error!</source> <translation>Le transfert de %1 s&apos;est terminé avec une erreur !</translation> </message> <message> <location filename="../src/uploaders/FtpUploader.cpp" line="229"/> <source>%1 transfert end without error.</source> <translation>Le transfert %1 s&apos;est terminé sans erreur.</translation> </message> </context> <context> <name>FtpUploaderSettingsClass</name> <message> <location filename="../ui/FtpUploaderSettings.ui" line="14"/> <source>FtpUploaderSettings</source> <translation>Paramètres FTP</translation> </message> <message> <location filename="../ui/FtpUploaderSettings.ui" line="20"/> <source>Initial FTP directory</source> <translation>Répertoire de base distant</translation> </message> <message> <location filename="../ui/FtpUploaderSettings.ui" line="84"/> <source>@filename@ is the uploaded file</source> <translation>@filename@ est le fichier téléchargé</translation> </message> <message> <location filename="../ui/FtpUploaderSettings.ui" line="91"/> <source>Username:</source> <translation>Nom d&apos;utilisateur :</translation> </message> <message> <location filename="../ui/FtpUploaderSettings.ui" line="98"/> <source>Server:</source> <translation>Serveur :</translation> </message> <message> <location filename="../ui/FtpUploaderSettings.ui" line="105"/> <source>FTP password</source> <translation>Mot de passe FTP</translation> </message> <message> <location filename="../ui/FtpUploaderSettings.ui" line="134"/> <source>Password:</source> <translation>Mot de passe :</translation> </message> <message> <location filename="../ui/FtpUploaderSettings.ui" line="141"/> <source>Directory:</source> <translation>Répertoire :</translation> </message> <message> <location filename="../ui/FtpUploaderSettings.ui" line="148"/> <source>Show/Hide password in interface</source> <translation>Afficher/Cacher le mot de passe dans l&apos;interface</translation> </message> <message> <location filename="../ui/FtpUploaderSettings.ui" line="151"/> <source>Show password</source> <translation>Afficher le mot de passe</translation> </message> <message> <location filename="../ui/FtpUploaderSettings.ui" line="158"/> <source>FTP username</source> <translation>Nom d&apos;utilisateur FTP</translation> </message> <message> <location filename="../ui/FtpUploaderSettings.ui" line="168"/> <source>Define result url</source> <translation>Définir l&apos;URL résultat</translation> </message> <message> <location filename="../ui/FtpUploaderSettings.ui" line="178"/> <source>ftp://whatyouwant/@filename@</source> <translation>ftp://cequevousvoulez/@filename@</translation> </message> <message> <location filename="../ui/FtpUploaderSettings.ui" line="191"/> <source>FTP port</source> <translation>Port FTP</translation> </message> <message> <location filename="../ui/FtpUploaderSettings.ui" line="201"/> <source>FTP Server name</source> <translation>Nom du serveur FTP</translation> </message> <message> <location filename="../ui/FtpUploaderSettings.ui" line="211"/> <source>Port:</source> <translation>Port :</translation> </message> </context> <context> <name>GrabManager</name> <message> <location filename="../src/GrabManager.cpp" line="222"/> <source>Some Windows Managers applies restrictions on the region and freehand capture tools, so the output can be a bit buggy. If you have such problem, a workaround can be to choose full screen capture then crop the needed part in the editor.</source> <translation>Certains gestionnaires de fenêtres appliquent des restrictions sur les outils de capture de région et à main levée, de sorte que la sortie peut être un peu bogué. Si vous rencontrez ce problème, une solution peut être de choisir la capture en plein écran, puis couper la partie nécessaire dans l&apos;éditeur.</translation> </message> </context> <context> <name>ImageshackUploader</name> <message> <location filename="../src/uploaders/ImageshackUploader.cpp" line="108"/> <source>%1 transfert end without error.</source> <translation>Le transfert %1 s&apos;est terminé sans erreur.</translation> </message> <message> <location filename="../src/uploaders/ImageshackUploader.cpp" line="112"/> <source>%1 transfert end with error! (%2)</source> <translation>Le transfert %1 s&apos;est terminé avec une erreur ! (%2)</translation> </message> <message> <location filename="../src/uploaders/ImageshackUploader.cpp" line="147"/> <source>%1 service reply with error (%2)</source> <translation>Le service %1 répond avec une erreur (%2)</translation> </message> </context> <context> <name>ImgurUploader</name> <message> <location filename="../src/uploaders/ImgurUploader.cpp" line="92"/> <source>%1 transfert end without error.</source> <translation>Le transfert %1 s&apos;est terminé sans erreur.</translation> </message> <message> <location filename="../src/uploaders/ImgurUploader.cpp" line="96"/> <source>%1 transfert end with error! (%2)</source> <translation>Le transfert %1 s&apos;est terminé avec une erreur ! (%2)</translation> </message> <message> <location filename="../src/uploaders/ImgurUploader.cpp" line="116"/> <source>An error occurred during parsing of service response</source> <translation>Une erreur est intervenue lors de la lecture de la réponse du service</translation> </message> </context> <context> <name>MainWindow</name> <message> <location filename="../src/MainWindow.cpp" line="146"/> <source>Save Directory</source> <translation>Répertoire de sauvegarde</translation> </message> <message> <location filename="../src/MainWindow.cpp" line="179"/> <source>Register of %1 uploaders</source> <translation>Enregistrement de %1 téléchargeurs</translation> </message> <message> <location filename="../src/MainWindow.cpp" line="219"/> <source>Can&apos;t create storage location, check why?! (%1)</source> <translation>Impossible de créer le répertoire de stockage, recherchez pourquoi ?! (%1)</translation> </message> <message> <location filename="../src/MainWindow.cpp" line="600"/> <source>No post effect</source> <translation>Aucun post-effet</translation> </message> <message> <location filename="../src/MainWindow.cpp" line="601"/> <source>Simple drop shadow</source> <translation>Ombre portée simple</translation> </message> <message> <location filename="../src/MainWindow.cpp" line="602"/> <source>Drop shadow with borders</source> <translation>Ombre portée avec bordures</translation> </message> <message> <location filename="../src/MainWindow.cpp" line="603"/> <source>Progressive opacity</source> <translation>Opacité progressive</translation> </message> <message> <location filename="../src/MainWindow.cpp" line="604"/> <source>Torn edge</source> <translation>Bords déchirés</translation> </message> <message> <location filename="../src/MainWindow.cpp" line="639"/> <source>Main application</source> <translation>Application principale</translation> </message> <message> <location filename="../src/MainWindow.cpp" line="640"/> <source>Annotation editor</source> <translation>Editeur d&apos;annotations</translation> </message> <message> <location filename="../src/MainWindow.cpp" line="875"/> <location filename="../src/MainWindow.cpp" line="905"/> <source>Upload the image to the web</source> <translation>Télécharger l&apos;image vers le Web</translation> </message> <message> <location filename="../src/MainWindow.cpp" line="875"/> <source>No default uploading web service defined!</source> <translation>Aucun service de téléchargement spécifié !</translation> </message> <message> <location filename="../src/MainWindow.cpp" line="895"/> <source>Uploader %1</source> <translation>Téléchargeur %1</translation> </message> <message> <location filename="../src/MainWindow.cpp" line="896"/> <source>The default uploader (%1) does not appear correctly configured, no value given for required parameters!!</source> <translation>Le téléchargeur par défaut (%1) ne semble pas correctement configuré, aucune valeur donnée pour les champs requis !!</translation> </message> <message> <location filename="../src/MainWindow.cpp" line="906"/> <source>Upload image %1 to %2 service?</source> <translation>Envoyé l&apos;image %1 vers le service %2 ?</translation> </message> <message> <location filename="../src/MainWindow.cpp" line="949"/> <source>Print Document</source> <translation>Imprimer le document</translation> </message> <message> <location filename="../src/MainWindow.cpp" line="977"/> <source>Save File</source> <translation>Sauver le fichier</translation> </message> <message> <location filename="../src/MainWindow.cpp" line="979"/> <source>Images (*.%1)</source> <translation>Images (*.%1)</translation> </message> <message> <location filename="../src/MainWindow.cpp" line="1010"/> <source>Save failed</source> <translation>La sauvegarde à échouée</translation> </message> <message> <location filename="../src/MainWindow.cpp" line="1010"/> <source>Error saving file</source> <translation>Erreur lors de la sauvegarde</translation> </message> <message> <location filename="../src/MainWindow.cpp" line="1027"/> <source>Systray</source> <translation>Zone de notification</translation> </message> <message> <location filename="../src/MainWindow.cpp" line="1028"/> <source>The program will keep running in the system tray. To terminate the program, choose &lt;b&gt;Quit&lt;/b&gt; in the context menu of the system tray entry.</source> <translation>Le programme va rester actif dans la zone de notification. Pour sortir définitivement, choisir &lt;b&gt;Quitter&lt;/b&gt; dans le menu contextuel de la zone de notification.</translation> </message> <message> <location filename="../src/MainWindow.cpp" line="1352"/> <source>Snapshot was successfully uploaded to %1 </source> <translation>La capture d&apos;écran a été téléchargée avec succès vers %1</translation> </message> <message> <location filename="../src/MainWindow.cpp" line="1354"/> <source>and url copied to clipboard</source> <translation>et l&apos;URL copié vers le presse papier</translation> </message> <message> <location filename="../src/MainWindow.cpp" line="1380"/> <source>message from other instance.</source> <translation>message d&apos;une autre instance.</translation> </message> </context> <context> <name>MainWindowClass</name> <message> <location filename="../ui/MainWindow.ui" line="17"/> <source>MainWindow</source> <translation> </translation> </message> <message> <location filename="../ui/MainWindow.ui" line="34"/> <location filename="../ui/MainWindow.ui" line="98"/> <source>Take a new snapshot</source> <translation>Prendre un nouveau cliché</translation> </message> <message> <location filename="../ui/MainWindow.ui" line="79"/> <source>Uploaded URL</source> <translation>URL téléchargée</translation> </message> <message> <location filename="../ui/MainWindow.ui" line="111"/> <source>Parameters</source> <translation>Paramètres</translation> </message> <message> <location filename="../ui/MainWindow.ui" line="120"/> <source>Output path:</source> <translation>Répertoire de sortie :</translation> </message> <message> <location filename="../ui/MainWindow.ui" line="140"/> <source>Cap&amp;ture mode:</source> <translation>Mode de capture :</translation> </message> <message> <location filename="../ui/MainWindow.ui" line="160"/> <source>After snapshot:</source> <translation>Après la capture :</translation> </message> <message> <location filename="../ui/MainWindow.ui" line="168"/> <source>Do nothing</source> <translation>Ne rien faire</translation> </message> <message> <location filename="../ui/MainWindow.ui" line="173"/> <source>Auto save image</source> <translation>Sauver automatiquement l&apos;image</translation> </message> <message> <location filename="../ui/MainWindow.ui" line="178"/> <source>Save image (ask user)</source> <translation>Sauver l&apos;image (demande à l&apos;utilisateur)</translation> </message> <message> <location filename="../ui/MainWindow.ui" line="183"/> <source>Launch editor</source> <translation>Lancer l&apos;éditeur</translation> </message> <message> <location filename="../ui/MainWindow.ui" line="197"/> <source>Snapshot delay in seconds</source> <translation>Délai en secondes</translation> </message> <message> <location filename="../ui/MainWindow.ui" line="203"/> <source>No delay</source> <translation>Aucun délai</translation> </message> <message> <location filename="../ui/MainWindow.ui" line="213"/> <source>Snapshot Delay (s):</source> <translation>Délai (s) :</translation> </message> <message> <location filename="../ui/MainWindow.ui" line="269"/> <source>Quit</source> <translation>Quitter</translation> </message> <message> <location filename="../ui/MainWindow.ui" line="281"/> <source>About</source> <translation>A propos</translation> </message> <message> <location filename="../ui/MainWindow.ui" line="293"/> <source>Preferences</source> <translation>Préférences</translation> </message> <message> <location filename="../ui/MainWindow.ui" line="305"/> <source>Save Snapshot</source> <translation>Sauver la capture d&apos;écran</translation> </message> <message> <location filename="../ui/MainWindow.ui" line="308"/> <source>Save snapshot</source> <translation>Sauver la capture d&apos;écran</translation> </message> <message> <location filename="../ui/MainWindow.ui" line="320"/> <source>Show Interface</source> <translation>Afficher l&apos;interface</translation> </message> <message> <location filename="../ui/MainWindow.ui" line="323"/> <source>Show interface</source> <translation>Afficher l&apos;interface</translation> </message> <message> <location filename="../ui/MainWindow.ui" line="332"/> <source>Copy To Clipboard</source> <translation>Copie vers le presse-papier</translation> </message> <message> <location filename="../ui/MainWindow.ui" line="335"/> <source>Copy to clipboard</source> <translation>Copie vers le presse-papier</translation> </message> <message> <location filename="../ui/MainWindow.ui" line="347"/> <source>Open editor</source> <translation>Ouvrir l&apos;éditeur</translation> </message> <message> <location filename="../ui/MainWindow.ui" line="350"/> <source>Open editor and add annotations</source> <translation>Ouvrir l&apos;éditeur et ajouter des annotations</translation> </message> <message> <location filename="../ui/MainWindow.ui" line="362"/> <source>Grab Screen</source> <translation>Capturer l&apos;écran</translation> </message> <message> <location filename="../ui/MainWindow.ui" line="365"/> <source>Grab screen (current)</source> <translation>Capturer l&apos;écran courant</translation> </message> <message> <location filename="../ui/MainWindow.ui" line="377"/> <source>Grab all screens</source> <translation>Capturer tous les écrans</translation> </message> <message> <location filename="../ui/MainWindow.ui" line="380"/> <source>Grab all screens (multi-monitors)</source> <translation>Capturer tous les écrans (multi-moniteurs)</translation> </message> <message> <location filename="../ui/MainWindow.ui" line="392"/> <source>Grab Window</source> <translation>Capturer la fenêtre</translation> </message> <message> <location filename="../ui/MainWindow.ui" line="395"/> <source>Grab window</source> <translation>Capturer une fenêtre</translation> </message> <message> <location filename="../ui/MainWindow.ui" line="407"/> <source>Grab Region</source> <translation>Capturer la région</translation> </message> <message> <location filename="../ui/MainWindow.ui" line="410"/> <source>Grab region</source> <translation>Capturer une région</translation> </message> <message> <location filename="../ui/MainWindow.ui" line="422"/> <source>Help</source> <translation>Aide</translation> </message> <message> <location filename="../ui/MainWindow.ui" line="434"/> <source>Upload</source> <translation>Télécharger</translation> </message> <message> <location filename="../ui/MainWindow.ui" line="437"/> <source>Upload the image to the web</source> <translation>Télécharger l&apos;image vers le Web</translation> </message> <message> <location filename="../ui/MainWindow.ui" line="452"/> <location filename="../ui/MainWindow.ui" line="455"/> <source>Grab freehand region</source> <translation>Capturer une région à main levée</translation> </message> <message> <location filename="../ui/MainWindow.ui" line="467"/> <source>Print</source> <translation>Imprimer</translation> </message> </context> <context> <name>PaypalButton</name> <message> <location filename="../src/PaypalButton.cpp" line="177"/> <source>Donation</source> <translation>Support</translation> </message> <message> <location filename="../src/PaypalButton.cpp" line="178"/> <source>Make a donation via Paypal</source> <translation>Faire un don via Paypal</translation> </message> </context> <context> <name>QObject</name> <message> <location filename="../src/AboutDialog.cpp" line="282"/> <source>%1 found</source> <translation>%1 trouvé</translation> </message> <message> <location filename="../src/main.cpp" line="59"/> <source>Screenshot management and annotation tool</source> <translation>Gestionnaire de captures d&apos;écran et d&apos;annotations</translation> </message> <message> <location filename="../src/main.cpp" line="60"/> <source>Usage: </source> <translation>Usage : </translation> </message> <message> <location filename="../src/main.cpp" line="60"/> <source>[options] [file]</source> <translation>[options] [fichier]</translation> </message> <message> <location filename="../src/main.cpp" line="62"/> <source>Following options are available:</source> <translation>Les options suivantes sont possibles :</translation> </message> <message> <location filename="../src/main.cpp" line="63"/> <source> --help : displays this help.</source> <translation> --help : Affichage de cette aide.</translation> </message> <message> <location filename="../src/main.cpp" line="64"/> <source> --reset-config : clear the saved preference parameters.</source> <translation> --reset-config : Effacement des paramètres de configuration sauvegardés.</translation> </message> <message> <location filename="../src/main.cpp" line="65"/> <source> --no-singleinstance : enable the use of multiple instance of program (not recommended).</source> <translation> --no-singleinstance : autorise l&apos;utilisation de plusieurs instances du programme (non recommandé).</translation> </message> <message> <location filename="../src/main.cpp" line="66"/> <source> --portable : use settings file location near the executable (for portable use).</source> <translation>--portable : utiliser le fichier de configuration associé à l’exécutable (pour une utilisation portable).</translation> </message> <message> <location filename="../src/main.cpp" line="67"/> <source> file : file to load in the editor, can be a .hot file or an image file.</source> <translation>fichier: fichier à charger dans l&apos;éditeur, peut être un .hot ou un fichier image.</translation> </message> <message> <location filename="../src/main.cpp" line="136"/> <source>%1 is already running!!</source> <translation>%1 est déjà en fonctionnement !!</translation> </message> <message> <location filename="../src/MiscFunctions.cpp" line="223"/> <source>Translations path %1</source> <translation>Chemin des traductions %1</translation> </message> <message> <location filename="../src/MiscFunctions.cpp" line="263"/> <source>Setting language to: %1</source> <translation>Positionner le language à : %1</translation> </message> <message> <location filename="../src/MiscFunctions.cpp" line="284"/> <location filename="../src/MiscFunctions.cpp" line="305"/> <source>Successfully loaded data from %1</source> <translation>Chargement réussi des données de %1</translation> </message> <message> <location filename="../src/MiscFunctions.cpp" line="310"/> <source>Failed to load data from %1</source> <translation>Échec de chargement des données de %1</translation> </message> <message> <location filename="../src/editor/io/IOHelper.cpp" line="189"/> <source>stringToVariant(): Error in digit (%1).</source> <translation>stringToVariant(): Erreur sur les nombres (%1).</translation> </message> <message> <location filename="../src/editor/io/IOHelper.cpp" line="266"/> <source>&lt;no image&gt;</source> <translation>&lt;aucune image&gt;</translation> </message> </context> <context> <name>QwwTwoColorIndicator</name> <message> <location filename="../src/3rdparty/wwWidgets/qwwtwocolorindicator.cpp" line="286"/> <source>Choose foreground color</source> <translation>Choisir la couleur de premier plan</translation> </message> <message> <location filename="../src/3rdparty/wwWidgets/qwwtwocolorindicator.cpp" line="298"/> <source>Choose background color</source> <translation>Choisir la couleur d&apos;arrière plan</translation> </message> </context> <context> <name>RegionGrabber</name> <message> <location filename="../src/RegionGrabber.cpp" line="217"/> <source>Select a region using the mouse. To take the snapshot, press the Enter key or double click. Press Esc to quit.</source> <translation>Selectionner une région en utilisant la souris. Pour effectuer la capture d&apos;écran, appuyer sur &lt;Entrée&gt; ou double cliquer. Appuyer sur &lt;Esc&gt; pour quitter.</translation> </message> </context> <context> <name>RescaleDialogClass</name> <message> <location filename="../ui/RescaleDialog.ui" line="17"/> <source>Rescale snapshot</source> <translation>Mettre à l&apos;échelle la capture d&apos;écran</translation> </message> <message> <location filename="../ui/RescaleDialog.ui" line="26"/> <source>Original size:</source> <translation>Taille originale :</translation> </message> <message> <location filename="../ui/RescaleDialog.ui" line="53"/> <source>Target size:</source> <translation>Taille cible :</translation> </message> <message> <location filename="../ui/RescaleDialog.ui" line="67"/> <source>Scale snapshot</source> <translation>Mettre à l&apos;échelle la capture d&apos;écran</translation> </message> <message> <location filename="../ui/RescaleDialog.ui" line="76"/> <source>%</source> <translation>%</translation> </message> <message> <location filename="../ui/RescaleDialog.ui" line="86"/> <source>Size</source> <translation>Taille</translation> </message> <message> <location filename="../ui/RescaleDialog.ui" line="141"/> <source>Keep aspect ratio</source> <translation>Conserver l&apos;aspect</translation> </message> </context> <context> <name>WidgetBrush</name> <message> <location filename="../src/editor/widgets/WidgetBrush.cpp" line="182"/> <source>1x1</source> <translation>1x1</translation> </message> <message> <location filename="../src/editor/widgets/WidgetBrush.cpp" line="190"/> <source>Circle</source> <translation>Cercle</translation> </message> <message> <location filename="../src/editor/widgets/WidgetBrush.cpp" line="193"/> <source>Square</source> <translation>Carré</translation> </message> <message> <location filename="../src/editor/widgets/WidgetBrush.cpp" line="198"/> <source>Slash</source> <translation>Barre oblique</translation> </message> <message> <location filename="../src/editor/widgets/WidgetBrush.cpp" line="203"/> <source>Backslash</source> <translation>Antislash</translation> </message> </context> <context> <name>WidgetColor</name> <message> <location filename="../src/editor/widgets/WidgetColor.cpp" line="79"/> <source>Black</source> <translation>Noir</translation> </message> <message> <location filename="../src/editor/widgets/WidgetColor.cpp" line="82"/> <source>White</source> <translation>Blanc</translation> </message> <message> <location filename="../src/editor/widgets/WidgetColor.cpp" line="85"/> <source>Red</source> <translation>Rouge</translation> </message> <message> <location filename="../src/editor/widgets/WidgetColor.cpp" line="88"/> <source>Green</source> <translation>Vert</translation> </message> <message> <location filename="../src/editor/widgets/WidgetColor.cpp" line="91"/> <source>Blue</source> <translation>Bleu</translation> </message> <message> <location filename="../src/editor/widgets/WidgetColor.cpp" line="94"/> <source>Cyan</source> <translation>Cyan</translation> </message> <message> <location filename="../src/editor/widgets/WidgetColor.cpp" line="97"/> <source>Magenta</source> <translation>Magenta</translation> </message> <message> <location filename="../src/editor/widgets/WidgetColor.cpp" line="100"/> <source>Orange</source> <translation>Orange</translation> </message> <message> <location filename="../src/editor/widgets/WidgetColor.cpp" line="103"/> <source>Gray</source> <translation>Gris</translation> </message> </context> <context> <name>WidgetDashStyle</name> <message> <location filename="../src/editor/widgets/WidgetDashStyle.cpp" line="104"/> <source>Solid Line</source> <translation>Ligne pleine</translation> </message> <message> <location filename="../src/editor/widgets/WidgetDashStyle.cpp" line="108"/> <source>Dash pattern</source> <translation>Motif de pointillé</translation> </message> <message> <location filename="../src/editor/widgets/WidgetDashStyle.cpp" line="112"/> <source>Dot pattern</source> <translation>Motif de points</translation> </message> <message> <location filename="../src/editor/widgets/WidgetDashStyle.cpp" line="116"/> <source>Dash Dot pattern</source> <translation>motif point-trait</translation> </message> <message> <location filename="../src/editor/widgets/WidgetDashStyle.cpp" line="120"/> <source>Dash Dot Dot pattern</source> <translation>Motif point-point-trait</translation> </message> </context> <context> <name>WidgetFillStyle</name> <message> <location filename="../src/editor/widgets/WidgetFillStyle.cpp" line="113"/> <source>No Fill</source> <translation>Aucun remplissage</translation> </message> <message> <location filename="../src/editor/widgets/WidgetFillStyle.cpp" line="117"/> <source>Fill with transparent background color</source> <translation>Remplissage avec couleur translucide</translation> </message> <message> <location filename="../src/editor/widgets/WidgetFillStyle.cpp" line="121"/> <source>Fill with background color</source> <translation>Remplissage avec couleur</translation> </message> </context> </TS><|fim▁end|>
<location filename="../ui/AboutDialog.ui" line="304"/> <source>Reset preferences</source> <translation>Effacer toutes les préférences</translation>
<|file_name|>lib.rs<|end_file_name|><|fim▁begin|>/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at https://mozilla.org/MPL/2.0/. */ extern crate hashglobe; extern crate smallvec; #[cfg(feature = "known_system_malloc")] use hashglobe::alloc; use hashglobe::FailedAllocationError; use smallvec::Array; use smallvec::SmallVec; use std::vec::Vec; pub trait FallibleVec<T> { /// Append |val| to the end of |vec|. Returns Ok(()) on success, /// Err(reason) if it fails, with |reason| describing the failure. fn try_push(&mut self, value: T) -> Result<(), FailedAllocationError>; } ///////////////////////////////////////////////////////////////// // Vec impl<T> FallibleVec<T> for Vec<T> { #[inline(always)] fn try_push(&mut self, val: T) -> Result<(), FailedAllocationError> { #[cfg(feature = "known_system_malloc")] { if self.capacity() == self.len() { try_double_vec(self)?; debug_assert!(self.capacity() > self.len()); }<|fim▁hole|> Ok(()) } } // Double the capacity of |vec|, or fail to do so due to lack of memory. // Returns Ok(()) on success, Err(..) on failure. #[cfg(feature = "known_system_malloc")] #[inline(never)] #[cold] fn try_double_vec<T>(vec: &mut Vec<T>) -> Result<(), FailedAllocationError> { use std::mem; let old_ptr = vec.as_mut_ptr(); let old_len = vec.len(); let old_cap: usize = vec.capacity(); let new_cap: usize = if old_cap == 0 { 4 } else { old_cap .checked_mul(2) .ok_or(FailedAllocationError::new("capacity overflow for Vec"))? }; let new_size_bytes = new_cap .checked_mul(mem::size_of::<T>()) .ok_or(FailedAllocationError::new("capacity overflow for Vec"))?; let new_ptr = unsafe { if old_cap == 0 { alloc::alloc(new_size_bytes, 0) } else { alloc::realloc(old_ptr as *mut u8, new_size_bytes) } }; if new_ptr.is_null() { return Err(FailedAllocationError::new( "out of memory when allocating Vec", )); } let new_vec = unsafe { Vec::from_raw_parts(new_ptr as *mut T, old_len, new_cap) }; mem::forget(mem::replace(vec, new_vec)); Ok(()) } ///////////////////////////////////////////////////////////////// // SmallVec impl<T: Array> FallibleVec<T::Item> for SmallVec<T> { #[inline(always)] fn try_push(&mut self, val: T::Item) -> Result<(), FailedAllocationError> { #[cfg(feature = "known_system_malloc")] { if self.capacity() == self.len() { try_double_small_vec(self)?; debug_assert!(self.capacity() > self.len()); } } self.push(val); Ok(()) } } // Double the capacity of |svec|, or fail to do so due to lack of memory. // Returns Ok(()) on success, Err(..) on failure. #[cfg(feature = "known_system_malloc")] #[inline(never)] #[cold] fn try_double_small_vec<T>(svec: &mut SmallVec<T>) -> Result<(), FailedAllocationError> where T: Array, { use std::mem; use std::ptr::copy_nonoverlapping; let old_ptr = svec.as_mut_ptr(); let old_len = svec.len(); let old_cap: usize = svec.capacity(); let new_cap: usize = if old_cap == 0 { 4 } else { old_cap .checked_mul(2) .ok_or(FailedAllocationError::new("capacity overflow for SmallVec"))? }; // This surely shouldn't fail, if |old_cap| was previously accepted as a // valid value. But err on the side of caution. let old_size_bytes = old_cap .checked_mul(mem::size_of::<T>()) .ok_or(FailedAllocationError::new("capacity overflow for SmallVec"))?; let new_size_bytes = new_cap .checked_mul(mem::size_of::<T>()) .ok_or(FailedAllocationError::new("capacity overflow for SmallVec"))?; let new_ptr; if svec.spilled() { // There's an old block to free, and, presumably, old contents to // copy. realloc takes care of both aspects. unsafe { new_ptr = alloc::realloc(old_ptr as *mut u8, new_size_bytes); } } else { // There's no old block to free. There may be old contents to copy. unsafe { new_ptr = alloc::alloc(new_size_bytes, 0); if !new_ptr.is_null() && old_size_bytes > 0 { copy_nonoverlapping(old_ptr as *const u8, new_ptr as *mut u8, old_size_bytes); } } } if new_ptr.is_null() { return Err(FailedAllocationError::new( "out of memory when allocating SmallVec", )); } let new_vec = unsafe { Vec::from_raw_parts(new_ptr as *mut T::Item, old_len, new_cap) }; let new_svec = SmallVec::from_vec(new_vec); mem::forget(mem::replace(svec, new_svec)); Ok(()) }<|fim▁end|>
} self.push(val);
<|file_name|>LogDBIngester.py<|end_file_name|><|fim▁begin|><|fim▁hole|>#!/usr/local/bin/python # check python version import sys ver_info = sys.version_info # parse commandlines if ver_info[0] < 3 and ver_info[1] < 7: from optparse import OptionParser parser = OptionParser() parser.add_option("-f", "--file", dest="filename", help="input log file", metavar="LOG_FILE") # parser.add_option("-d", "--directory", dest="dirname", help="input directory with log files", metavar="LOG_DIR") parser.add_option("-t", "--dbtype", dest="dbtype", help="database type", default="mongodb", metavar="DB_TYPE") (options, args) = parser.parse_args(); else: import argparse parser = argparse.ArgumentParser(description="Log to database ingester") parser.add_argument("-f, --file", dest="filename", help="input log file", metavar="LOG_FILE") # parser.add_argument("-d, --directory", dest="dirname", help="input directory with log files", metavar="LOG_DIR") parser.add_argument("-t, --dbtype", dest="dbtype", help="database type", default="mongodb", metavar="DB_TYPE") options = parser.parse_args() print "file {0} ".format(options.filename) # print "dirname {0} ".format(options.dirname) print "dbtype {0}".format(options.dbtype) if options.dbtype == "mongodb": from DBDriver.MongoDBDriver import MongoDBDriver dbingester = MongoDBDriver(); elif options.dbtype == "cassandra": from DBDriver.CassandraDBDriver import CassandraDBDriver dbingester = CassandraDBDriver(); else: print "ERROR: unsupported db type {0}".format(options.dbtype); sys.exit(2); import re # open the file and iterate with open(options.filename) as f: # read the first line line = f.readline() if re.match("v2.1", line): from LogParser.LogParsers import LogParserV2_1 lparser = LogParserV2_1(options.filename) elif re.match("v2", line): from LogParser.LogParsers import LogParserV2 lparser = LogParserV2_1(options.filename) else: print "UNSUPPORTED LOG VERSION: {0}".format(line) sys.exit(1) for line in f: lparser.parseLine(line, dbingester)<|fim▁end|>
<|file_name|>JpsProjectSerializationTest.java<|end_file_name|><|fim▁begin|>/* * Copyright 2000-2012 JetBrains s.r.o. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.jetbrains.jps.model.serialization; import com.intellij.openapi.application.PathManager; import com.intellij.openapi.util.io.FileUtil; import com.intellij.testFramework.PlatformTestUtil; import org.jdom.Element; import org.jetbrains.jps.model.JpsDummyElement; import org.jetbrains.jps.model.JpsEncodingConfigurationService; import org.jetbrains.jps.model.JpsEncodingProjectConfiguration; import org.jetbrains.jps.model.artifact.JpsArtifactService; import org.jetbrains.jps.model.java.*; import org.jetbrains.jps.model.library.JpsLibrary; import org.jetbrains.jps.model.library.JpsOrderRootType; import org.jetbrains.jps.model.library.sdk.JpsSdkReference; import org.jetbrains.jps.model.module.*; import org.jetbrains.jps.model.serialization.library.JpsLibraryTableSerializer; import org.jetbrains.jps.model.serialization.module.JpsModuleRootModelSerializer; import java.io.File; import java.io.IOException; import java.util.Collections; import java.util.List; /** * @author nik */ public class JpsProjectSerializationTest extends JpsSerializationTestCase { public static final String SAMPLE_PROJECT_PATH = "/jps/model-serialization/testData/sampleProject"; public void testLoadProject() { loadProject(SAMPLE_PROJECT_PATH); String baseDirPath = getTestDataFileAbsolutePath(SAMPLE_PROJECT_PATH); assertTrue(FileUtil.filesEqual(new File(baseDirPath), JpsModelSerializationDataService.getBaseDirectory(myProject))); assertEquals("sampleProjectName", myProject.getName()); List<JpsModule> modules = myProject.getModules(); assertEquals(3, modules.size()); JpsModule main = modules.get(0); assertEquals("main", main.getName()); JpsModule util = modules.get(1); assertEquals("util", util.getName()); JpsModule xxx = modules.get(2); assertEquals("xxx", xxx.getName()); assertTrue(FileUtil.filesEqual(new File(baseDirPath, "util"), JpsModelSerializationDataService.getBaseDirectory(util))); List<JpsLibrary> libraries = myProject.getLibraryCollection().getLibraries(); assertEquals(3, libraries.size()); List<JpsDependencyElement> dependencies = util.getDependenciesList().getDependencies(); assertEquals(4, dependencies.size()); JpsSdkDependency sdkDependency = assertInstanceOf(dependencies.get(0), JpsSdkDependency.class); assertSame(JpsJavaSdkType.INSTANCE, sdkDependency.getSdkType()); JpsSdkReference<?> reference = sdkDependency.getSdkReference(); assertNotNull(reference); assertEquals("1.5", reference.getSdkName()); assertInstanceOf(dependencies.get(1), JpsModuleSourceDependency.class); assertInstanceOf(dependencies.get(2), JpsLibraryDependency.class); assertInstanceOf(dependencies.get(3), JpsLibraryDependency.class); JpsSdkDependency inheritedSdkDependency = assertInstanceOf(main.getDependenciesList().getDependencies().get(0), JpsSdkDependency.class); JpsSdkReference<?> projectSdkReference = inheritedSdkDependency.getSdkReference(); assertNotNull(projectSdkReference); assertEquals("1.6", projectSdkReference.getSdkName()); assertEquals(getUrl("xxx/output"), JpsJavaExtensionService.getInstance().getOutputUrl(xxx, true)); assertEquals(getUrl("xxx/output"), JpsJavaExtensionService.getInstance().getOutputUrl(xxx, false)); } public void testFileBasedProjectNameAndBaseDir() { String relativePath = "/jps/model-serialization/testData/run-configurations/run-configurations.ipr"; String absolutePath = getTestDataFileAbsolutePath(relativePath); loadProject(relativePath); assertEquals("run-configurations", myProject.getName()); assertTrue(FileUtil.filesEqual(new File(absolutePath).getParentFile(), JpsModelSerializationDataService.getBaseDirectory(myProject))); } public void testDirectoryBasedProjectName() { loadProject("/jps/model-serialization/testData/run-configurations-dir"); assertEquals("run-configurations-dir", myProject.getName()); } public void testImlUnderDotIdea() { loadProject("/jps/model-serialization/testData/imlUnderDotIdea"); JpsModule module = assertOneElement(myProject.getModules()); JpsModuleSourceRoot root = assertOneElement(module.getSourceRoots()); assertEquals(getUrl("src"), root.getUrl()); } public void testProjectSdkWithoutType() { loadProject("/jps/model-serialization/testData/projectSdkWithoutType/projectSdkWithoutType.ipr"); JpsSdkReference<JpsDummyElement> reference = myProject.getSdkReferencesTable().getSdkReference(JpsJavaSdkType.INSTANCE); assertNotNull(reference); assertEquals("1.6", reference.getSdkName()); } public void testInvalidDependencyScope() { loadProject("/jps/model-serialization/testData/invalidDependencyScope/invalidDependencyScope.ipr"); JpsModule module = assertOneElement(myProject.getModules()); List<JpsDependencyElement> dependencies = module.getDependenciesList().getDependencies(); assertEquals(3, dependencies.size()); JpsJavaDependencyExtension extension = JpsJavaExtensionService.getInstance().getDependencyExtension(dependencies.get(2)); assertNotNull(extension); assertEquals(JpsJavaDependencyScope.COMPILE, extension.getScope()); } public void testDuplicatedModuleLibrary() { loadProject("/jps/model-serialization/testData/duplicatedModuleLibrary/duplicatedModuleLibrary.ipr"); JpsModule module = assertOneElement(myProject.getModules()); List<JpsDependencyElement> dependencies = module.getDependenciesList().getDependencies(); assertEquals(4, dependencies.size()); JpsLibrary lib1 = assertInstanceOf(dependencies.get(2), JpsLibraryDependency.class).getLibrary(); assertNotNull(lib1); assertSameElements(lib1.getRootUrls(JpsOrderRootType.COMPILED), getUrl("data/lib1")); JpsLibrary lib2 = assertInstanceOf(dependencies.get(3), JpsLibraryDependency.class).getLibrary(); assertNotSame(lib1, lib2); assertNotNull(lib2); assertSameElements(lib2.getRootUrls(JpsOrderRootType.COMPILED), getUrl("data/lib2")); } public void testDotIdeaUnderDotIdea() { loadProject("/jps/model-serialization/testData/matryoshka/.idea"); JpsJavaProjectExtension extension = JpsJavaExtensionService.getInstance().getProjectExtension(myProject); assertNotNull(extension); assertEquals(getUrl("out"), extension.getOutputUrl()); } public void testLoadEncoding() { loadProject(SAMPLE_PROJECT_PATH); JpsEncodingConfigurationService service = JpsEncodingConfigurationService.getInstance(); assertEquals("UTF-8", service.getProjectEncoding(myModel)); JpsEncodingProjectConfiguration configuration = service.getEncodingConfiguration(myProject); assertNotNull(configuration); assertEquals("UTF-8", configuration.getProjectEncoding()); assertEquals("windows-1251", configuration.getEncoding(new File(getAbsolutePath("util")))); assertEquals("windows-1251", configuration.getEncoding(new File(getAbsolutePath("util/foo/bar/file.txt")))); assertEquals("UTF-8", configuration.getEncoding(new File(getAbsolutePath("other")))); } public void testResourceRoots() { String projectPath = "/jps/model-serialization/testData/resourceRoots/"; loadProject(projectPath + "resourceRoots.ipr"); JpsModule module = assertOneElement(myProject.getModules()); List<JpsModuleSourceRoot> roots = module.getSourceRoots(); assertSame(JavaSourceRootType.SOURCE, roots.get(0).getRootType()); checkResourceRoot(roots.get(1), false, ""); checkResourceRoot(roots.get(2), true, ""); checkResourceRoot(roots.get(3), true, "foo"); doTestSaveModule(module, projectPath + "resourceRoots.iml"); } private static void checkResourceRoot(JpsModuleSourceRoot root, boolean forGenerated, String relativeOutput) { assertSame(JavaResourceRootType.RESOURCE, root.getRootType()); JavaResourceRootProperties properties = root.getProperties(JavaResourceRootType.RESOURCE);<|fim▁hole|> assertEquals(forGenerated, properties.isForGeneratedSources()); assertEquals(relativeOutput, properties.getRelativeOutputPath()); } public void testSaveProject() { loadProject(SAMPLE_PROJECT_PATH); List<JpsModule> modules = myProject.getModules(); doTestSaveModule(modules.get(0), SAMPLE_PROJECT_PATH + "/main.iml"); doTestSaveModule(modules.get(1), SAMPLE_PROJECT_PATH + "/util/util.iml"); //tod[nik] remember that test output root wasn't specified and doesn't save it to avoid unnecessary modifications of iml files //doTestSaveModule(modules.get(2), "xxx/xxx.iml"); File[] libs = getFileInSampleProject(".idea/libraries").listFiles(); assertNotNull(libs); for (File libFile : libs) { String libName = FileUtil.getNameWithoutExtension(libFile); JpsLibrary library = myProject.getLibraryCollection().findLibrary(libName); assertNotNull(libName, library); doTestSaveLibrary(libFile, libName, library); } } private void doTestSaveLibrary(File libFile, String libName, JpsLibrary library) { try { Element actual = new Element("library"); JpsLibraryTableSerializer.saveLibrary(library, actual, libName); JpsMacroExpander macroExpander = JpsProjectLoader.createProjectMacroExpander(Collections.<String, String>emptyMap(), getFileInSampleProject("")); Element rootElement = JpsLoaderBase.loadRootElement(libFile, macroExpander); Element expected = rootElement.getChild("library"); PlatformTestUtil.assertElementsEqual(expected, actual); } catch (IOException e) { throw new RuntimeException(e); } } private void doTestSaveModule(JpsModule module, final String moduleFilePath) { try { Element actual = JDomSerializationUtil.createComponentElement("NewModuleRootManager"); JpsModuleRootModelSerializer.saveRootModel(module, actual); File imlFile = new File(getTestDataFileAbsolutePath(moduleFilePath)); Element rootElement = loadModuleRootTag(imlFile); Element expected = JDomSerializationUtil.findComponent(rootElement, "NewModuleRootManager"); PlatformTestUtil.assertElementsEqual(expected, actual); } catch (Exception e) { throw new RuntimeException(e); } } public File getFileInSampleProject(String relativePath) { return new File(getTestDataFileAbsolutePath(SAMPLE_PROJECT_PATH + "/" + relativePath)); } public void testLoadIdeaProject() { long start = System.currentTimeMillis(); loadProjectByAbsolutePath(PathManager.getHomePath()); assertTrue(myProject.getModules().size() > 0); System.out.println("JpsProjectSerializationTest: " + myProject.getModules().size() + " modules, " + myProject.getLibraryCollection().getLibraries().size() + " libraries and " + JpsArtifactService.getInstance().getArtifacts(myProject).size() + " artifacts loaded in " + (System.currentTimeMillis() - start) + "ms"); } }<|fim▁end|>
assertNotNull(properties);
<|file_name|>theme-loader.js<|end_file_name|><|fim▁begin|>/** * @file theme loader * * @desc 向每个.vue文件中注入样式相关的变量,不需要手动import * @author echaoo([email protected]) */ /* eslint-disable fecs-no-require, fecs-prefer-destructure */ 'use strict'; const theme = require('../../config/theme'); const loaderUtils = require('loader-utils'); const STYLE_TAG_REG = /(\<style.*?lang="styl(?:us)?".*?\>)([\S\s]*?)(\<\/style\>)/g; <|fim▁hole|> primary: '$blue.darken-2', accent: '$blue.accent-2', secondary: '$grey.darken-3', info: '$blue.base', warning: '$amber.base', error: '$red.accent-2', success: '$green.base' }, materialDesign: { 'bg-color': '#fff', 'fg-color': '#000', 'text-color': '#000', 'primary-text-percent': .87, 'secondary-text-percent': .54, 'disabledORhints-text-percent': .38, 'divider-percent': .12, 'active-icon-percent': .54, 'inactive-icon-percent': .38 } }; // 使用用户定义在config/theme.js中的变量覆盖默认值 let themeColor = Object.assign( {}, defaultVuetifyVariables.themeColor, theme.theme.themeColor ); // 最终输出的stylus hash(themeColor部分) let themeColorTemplate = ` $theme := { primary: ${themeColor.primary} accent: ${themeColor.accent} secondary: ${themeColor.secondary} info: ${themeColor.info} warning: ${themeColor.warning} error: ${themeColor.error} success: ${themeColor.success} } `; let materialDesign = Object.assign( {}, defaultVuetifyVariables.materialDesign, theme.theme.materialDesign ); let materialDesignTemplate = ` $material-custom := { bg-color: ${materialDesign['bg-color']} fg-color: ${materialDesign['fg-color']} text-color: ${materialDesign['text-color']} primary-text-percent: ${materialDesign['primary-text-percent']} secondary-text-percent: ${materialDesign['secondary-text-percent']} disabledORhints-text-percent: ${materialDesign['disabledORhints-text-percent']} divider-percent: ${materialDesign['divider-percent']} active-icon-percent: ${materialDesign['active-icon-percent']} inactive-icon-percent: ${materialDesign['inactive-icon-percent']} } $material-theme := $material-custom `; // 引入项目变量和vuetify中使用的颜色变量 let importVariablesTemplate = ` @import '~@/assets/styles/variables'; @import '~vuetify/src/stylus/settings/_colors'; `; let injectedTemplate = importVariablesTemplate + themeColorTemplate + materialDesignTemplate; module.exports = function (source) { this.cacheable(); let options = loaderUtils.getOptions(this); if (options && options.injectInVueFile) { // 向每一个.vue文件的<style>块中注入 return source.replace(STYLE_TAG_REG, `$1${injectedTemplate}$2$3`); } return injectedTemplate + source; };<|fim▁end|>
// 定义在vuetify中默认的两组stylus hash:主题色和material相关 let defaultVuetifyVariables = { themeColor: {
<|file_name|>setup.py<|end_file_name|><|fim▁begin|>import os import runpy from codecs import open from setuptools import setup, find_packages # Based on https://github.com/pypa/sampleproject/blob/master/setup.py # and https://python-packaging-user-guide.readthedocs.org/ here = os.path.abspath(os.path.dirname(__file__)) with open(os.path.join(here, 'README.rst'), encoding='utf-8') as f: long_description = f.read() VERSION = runpy.run_path(os.path.join(here, "mitmproxy", "version.py"))["VERSION"] setup( name="mitmproxy", version=VERSION, description="An interactive, SSL-capable, man-in-the-middle HTTP proxy for penetration testers and software developers.", long_description=long_description, url="http://mitmproxy.org", author="Aldo Cortesi", author_email="[email protected]", license="MIT", classifiers=[ "License :: OSI Approved :: MIT License", "Development Status :: 5 - Production/Stable", "Environment :: Console", "Environment :: Console :: Curses", "Operating System :: MacOS :: MacOS X", "Operating System :: POSIX", "Operating System :: Microsoft :: Windows", "Programming Language :: Python", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3 :: Only", "Programming Language :: Python :: 3.5", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: Implementation :: CPython", "Topic :: Security", "Topic :: Internet", "Topic :: Internet :: WWW/HTTP", "Topic :: Internet :: Proxy Servers", "Topic :: Software Development :: Testing" ], packages=find_packages(include=[ "mitmproxy", "mitmproxy.*", "pathod", "pathod.*", ]), include_package_data=True, entry_points={ 'console_scripts': [ "mitmproxy = mitmproxy.tools.main:mitmproxy", "mitmdump = mitmproxy.tools.main:mitmdump", "mitmweb = mitmproxy.tools.main:mitmweb", "pathod = pathod.pathod_cmdline:go_pathod", "pathoc = pathod.pathoc_cmdline:go_pathoc" ] }, # https://packaging.python.org/en/latest/requirements/#install-requires # It is not considered best practice to use install_requires to pin dependencies to specific versions. install_requires=[ "blinker>=1.4, <1.5", "click>=6.2, <7", "certifi>=2015.11.20.1", # no semver here - this should always be on the last release! "construct>=2.8, <2.9", "cryptography>=1.3, <1.9", "cssutils>=1.0.1, <1.1", "h2>=3.0, <4",<|fim▁hole|> "html2text>=2016.1.8, <=2016.9.19", "hyperframe>=5.0, <6", "jsbeautifier>=1.6.3, <1.7", "kaitaistruct>=0.6, <0.7", "passlib>=1.6.5, <1.8", "pyasn1>=0.1.9, <0.3", "pyOpenSSL>=16.0, <17.0", "pyparsing>=2.1.3, <2.3", "pyperclip>=1.5.22, <1.6", "requests>=2.9.1, <3", "ruamel.yaml>=0.13.2, <0.15", "tornado>=4.3, <4.5", "urwid>=1.3.1, <1.4", "watchdog>=0.8.3, <0.9", "brotlipy>=0.5.1, <0.7", "sortedcontainers>=1.5.4, <1.6", # transitive from cryptography, we just blacklist here. # https://github.com/pypa/setuptools/issues/861 "setuptools>=11.3, !=29.0.0", ], extras_require={ ':sys_platform == "win32"': [ "pydivert>=2.0.3, <2.1", ], ':sys_platform != "win32"': [ ], 'dev': [ "Flask>=0.10.1, <0.13", "flake8>=3.2.1, <3.4", "mypy>=0.501, <0.502", "rstcheck>=2.2, <4.0", "tox>=2.3, <3", "pytest>=3, <3.1", "pytest-cov>=2.2.1, <3", "pytest-timeout>=1.0.0, <2", "pytest-xdist>=1.14, <2", "pytest-faulthandler>=1.3.0, <2", "sphinx>=1.3.5, <1.6", "sphinx-autobuild>=0.5.2, <0.7", "sphinxcontrib-documentedlist>=0.5.0, <0.7", "sphinx_rtd_theme>=0.1.9, <0.3", ], 'contentviews': [ ], 'examples': [ "beautifulsoup4>=4.4.1, <4.6", "Pillow>=3.2, <4.1", ] } )<|fim▁end|>
<|file_name|>router_config_loader.ts<|end_file_name|><|fim▁begin|>/** * @license * Copyright Google Inc. All Rights Reserved. * * Use of this source code is governed by an MIT-style license that can be * found in the LICENSE file at https://angular.io/license */ import {Compiler, InjectionToken, Injector, NgModuleFactory, NgModuleFactoryLoader} from '@angular/core'; // TODO(i): switch to fromPromise once it's expored in rxjs import {Observable, from, of } from 'rxjs'; import {map, mergeMap} from 'rxjs/operators'; import {LoadChildren, LoadedRouterConfig, Route, standardizeConfig} from './config'; import {flatten, wrapIntoObservable} from './utils/collection'; /** * @docsNotRequired * @experimental */ export const ROUTES = new InjectionToken<Route[][]>('ROUTES'); export class RouterConfigLoader { constructor( private loader: NgModuleFactoryLoader, private compiler: Compiler, private onLoadStartListener?: (r: Route) => void, private onLoadEndListener?: (r: Route) => void) {} load(parentInjector: Injector, route: Route): Observable<LoadedRouterConfig> { if (this.onLoadStartListener) { this.onLoadStartListener(route); } const moduleFactory$ = this.loadModuleFactory(route.loadChildren !); return moduleFactory$.pipe(map((factory: NgModuleFactory<any>) => { if (this.onLoadEndListener) {<|fim▁hole|> this.onLoadEndListener(route); } const module = factory.create(parentInjector); return new LoadedRouterConfig( flatten(module.injector.get(ROUTES)).map(standardizeConfig), module); })); } private loadModuleFactory(loadChildren: LoadChildren): Observable<NgModuleFactory<any>> { if (typeof loadChildren === 'string') { return from(this.loader.load(loadChildren)); } else { return wrapIntoObservable(loadChildren()).pipe(mergeMap((t: any) => { if (t instanceof NgModuleFactory) { return of (t); } else { return from(this.compiler.compileModuleAsync(t)); } })); } } }<|fim▁end|>
<|file_name|>test.py<|end_file_name|><|fim▁begin|>__author__ = 'emre' <|fim▁hole|><|fim▁end|>
print "hello world"
<|file_name|>FlyFi.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python # -*- coding: utf-8 -*- """ FlyFi - Floppy-Fidelity ======= Created to fulfill all your floppy music needs. Created on Tue 06-01-2013_05:17:42+0100 @author: Ricardo (XeN) Band <[email protected]>, Stephan (coon) Thiele <[email protected]> This file is part of FlyFi. FlyFi is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. FlyFi is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with FlyFi. If not, see <http://www.gnu.org/licenses/>. Diese Datei ist Teil von FlyFi. FlyFi ist Freie Software: Sie können es unter den Bedingungen der GNU General Public License, wie von der Free Software Foundation, Version 3 der Lizenz oder (nach Ihrer Option) jeder späteren veröffentlichten Version, weiterverbreiten und/oder modifizieren. FlyFi wird in der Hoffnung, dass es nützlich sein wird, aber OHNE JEDE GEWÄHELEISTUNG, bereitgestellt; sogar ohne die implizite Gewährleistung der MARKTFÄHIGKEIT oder EIGNUNG FÜR EINEN BESTIMMTEN ZWECK. Siehe die GNU General Public License für weitere Details. Sie sollten eine Kopie der GNU General Public License zusammen mit diesem Programm erhalten haben. Wenn nicht, siehe <http://www.gnu.org/licenses/>. FlyFi is using tango icons: <http://tango.freedesktop.org/>. """ __author__ = "Ricardo (XeN) Band <[email protected]>, \ Stephan (coon) Thiele <[email protected]>" __copyright__ = "Copyright (C) 2013 Ricardo Band, Stephan Thiele" __revision__ = "$Id$"<|fim▁hole|>from PySide import QtGui from MainWindow import MainWindow def main(): """ create QApp and show MainWindow """ app = QtGui.QApplication(sys.argv) win = MainWindow() win.show() sys.exit(app.exec_()) if __name__ == "__main__": main()<|fim▁end|>
__version__ = "0.1" import sys
<|file_name|>atn-generate-food-web.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python3 """ Generates a plot and JSON file describing a food web. Files are stored in a directory named based on the species in the food web. If --parent-dir is not specified, the parent directory is determined automatically based on DATA_HOME. """ import os import sys import argparse from atntools import settings from atntools import foodwebs from atntools import util parser = argparse.ArgumentParser(description=globals()['__doc__']) parser.add_argument('--parent-dir', help="Parent directory to use instead of automatically-determined directory under DATA_HOME") parser.add_argument('--figsize', nargs=2, type=int, default=[4, 3], help="Width and height of the food web plot, in inches (combine with --dpi)") parser.add_argument('--dpi', type=int, default=100, help="Image resolution (dots per inch)") subparsers = parser.add_subparsers(dest='subparser_name') # 'generate' sub-command parser_generate = subparsers.add_parser('generate', help="Generate a new food web and save plot and JSON") parser_generate.add_argument('size', type=int, help="Number of species")<|fim▁hole|> # 'regenerate' sub-command parser_regenerate = subparsers.add_parser('regenerate', help="Regenerate files in existing food web directory") parser_regenerate.add_argument('existing_dir', help="Existing food web directory") # 'from-node-ids' sub-command parser_from_node_ids = subparsers.add_parser('from-node-ids', help="Generate plot and JSON from given node IDs") parser_from_node_ids.add_argument('node_ids', nargs='+', type=int, help="List of node IDs") args = parser.parse_args() if not args.subparser_name: # No sub-command given parser.print_usage() sys.exit(1) if args.subparser_name == 'generate': subweb = foodwebs.serengeti_predator_complete_subweb(args.size, args.num_basal_species) node_ids = sorted(subweb.nodes()) food_web_id = '-'.join([str(x) for x in node_ids]) if args.parent_dir is None: food_web_dir = util.get_food_web_dir(food_web_id) else: food_web_dir = os.path.join(os.path.expanduser(args.parent_dir), food_web_id) print("Creating food web directory " + food_web_dir) os.makedirs(food_web_dir) elif args.subparser_name == 'regenerate': food_web_dir = os.path.normpath(args.existing_dir) if not os.path.isdir(food_web_dir): print("Error: directory doesn't exist: " + food_web_dir, file=sys.stderr) sys.exit(1) food_web_id = os.path.basename(food_web_dir) node_ids = [int(x) for x in food_web_id.split('-')] serengeti = foodwebs.read_serengeti() subweb = serengeti.subgraph(node_ids) elif args.subparser_name == 'from-node-ids': node_ids = sorted(args.node_ids) serengeti = foodwebs.read_serengeti() subweb = serengeti.subgraph(node_ids) food_web_id = '-'.join([str(x) for x in node_ids]) if args.parent_dir is None: food_web_dir = util.get_food_web_dir(food_web_id) else: food_web_dir = os.path.join(os.path.expanduser(args.parent_dir), food_web_id) print("Creating food web directory " + food_web_dir) os.makedirs(food_web_dir) foodwebs.draw_food_web(subweb, show_legend=True, output_file=os.path.join(food_web_dir, 'foodweb.{}.png'.format(food_web_id)), figsize=args.figsize, dpi=args.dpi) with open(os.path.join(food_web_dir, 'foodweb.{}.json'.format(food_web_id)), 'w') as f: print(foodwebs.food_web_json(subweb), file=f)<|fim▁end|>
parser_generate.add_argument('num_basal_species', type=int, help="Number of basal species")
<|file_name|>gystogra.cpp<|end_file_name|><|fim▁begin|>/* Copyright (c) 1993-2008, Cognitive Technologies All rights reserved. Разрешается повторное распространение и использование как в виде исходного кода, так и в двоичной форме, с изменениями или без, при соблюдении следующих условий: * При повторном распространении исходного кода должны оставаться указанное выше уведомление об авторском праве, этот список условий и последующий отказ от гарантий. * При повторном распространении двоичного кода в документации и/или в других материалах, поставляемых при распространении, должны сохраняться указанная выше информация об авторском праве, этот список условий и последующий отказ от гарантий. * Ни название Cognitive Technologies, ни имена ее сотрудников не могут быть использованы в качестве средства поддержки и/или продвижения продуктов, основанных на этом ПО, без предварительного письменного разрешения. ЭТА ПРОГРАММА ПРЕДОСТАВЛЕНА ВЛАДЕЛЬЦАМИ АВТОРСКИХ ПРАВ И/ИЛИ ДРУГИМИ ЛИЦАМИ "КАК ОНА ЕСТЬ" БЕЗ КАКОГО-ЛИБО ВИДА ГАРАНТИЙ, ВЫРАЖЕННЫХ ЯВНО ИЛИ ПОДРАЗУМЕВАЕМЫХ, ВКЛЮЧАЯ ГАРАНТИИ КОММЕРЧЕСКОЙ ЦЕННОСТИ И ПРИГОДНОСТИ ДЛЯ КОНКРЕТНОЙ ЦЕЛИ, НО НЕ ОГРАНИЧИВАЯСЬ ИМИ. НИ ВЛАДЕЛЕЦ АВТОРСКИХ ПРАВ И НИ ОДНО ДРУГОЕ ЛИЦО, КОТОРОЕ МОЖЕТ ИЗМЕНЯТЬ И/ИЛИ ПОВТОРНО РАСПРОСТРАНЯТЬ ПРОГРАММУ, НИ В КОЕМ СЛУЧАЕ НЕ НЕСЁТ ОТВЕТСТВЕННОСТИ, ВКЛЮЧАЯ ЛЮБЫЕ ОБЩИЕ, СЛУЧАЙНЫЕ, СПЕЦИАЛЬНЫЕ ИЛИ ПОСЛЕДОВАВШИЕ УБЫТКИ, СВЯЗАННЫЕ С ИСПОЛЬЗОВАНИЕМ ИЛИ ПОНЕСЕННЫЕ ВСЛЕДСТВИЕ НЕВОЗМОЖНОСТИ ИСПОЛЬЗОВАНИЯ ПРОГРАММЫ (ВКЛЮЧАЯ ПОТЕРИ ДАННЫХ, ИЛИ ДАННЫЕ, СТАВШИЕ НЕГОДНЫМИ, ИЛИ УБЫТКИ И/ИЛИ ПОТЕРИ ДОХОДОВ, ПОНЕСЕННЫЕ ИЗ-ЗА ДЕЙСТВИЙ ТРЕТЬИХ ЛИЦ И/ИЛИ ОТКАЗА ПРОГРАММЫ РАБОТАТЬ СОВМЕСТНО С ДРУГИМИ ПРОГРАММАМИ, НО НЕ ОГРАНИЧИВАЯСЬ ЭТИМИ СЛУЧАЯМИ), НО НЕ ОГРАНИЧИВАЯСЬ ИМИ, ДАЖЕ ЕСЛИ ТАКОЙ ВЛАДЕЛЕЦ ИЛИ ДРУГОЕ ЛИЦО БЫЛИ ИЗВЕЩЕНЫ О ВОЗМОЖНОСТИ ТАКИХ УБЫТКОВ И ПОТЕРЬ. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the Cognitive Technologies nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <stdio.h> /* interface my */ #include "gystogra.h" /* interface our util */ #include "skew1024.h" using namespace cf; /*---------------------------------------------------------------------------*/ Bool MakeTopBotGysts(Rect16 *pRc, int nRc, int32_t Skew, int MaxSize, Un_GYST *pBegGt, Un_GYST *pEndGt) { int MinBeg, MaxBeg, MinEnd, MaxEnd, i, End; long dy, ddy; int32_t x, yBeg, yEnd; int32_t SkewSquar; int *pBegSig, *pEndSig; SkewSquar = Skew * Skew; pBegGt->nElem = nRc; pEndGt->nElem = nRc; pBegSig = pBegGt->Signal; pEndSig = pEndGt->Signal; /* Предельные значения проекций */ x = (pRc[0].left + pRc[0].right + 1) / 2; dy = ((-Skew * x + 0x200) >> 10); yBeg = pRc[0].top; yEnd = pRc[0].bottom; ddy = ((SkewSquar * yBeg + 0x100000) >> 21); yBeg += dy; yBeg -= ddy; ddy = ((SkewSquar * yEnd + 0x100000) >> 21); yEnd += dy; yEnd -= ddy; MinBeg = yBeg; MaxBeg = yBeg; MinEnd = yEnd; MaxEnd = yEnd; for (i = 1; i < nRc; i++) { x = (pRc[i].left + pRc[i].right + 1) / 2; dy = ((-Skew * x + 0x200) >> 10); yBeg = pRc[i].top; yEnd = pRc[i].bottom; ddy = ((SkewSquar * yBeg + 0x100000) >> 21); yBeg += dy; yBeg -= ddy; ddy = ((SkewSquar * yEnd + 0x100000) >> 21); yEnd += dy; yEnd -= ddy; if (MinBeg > yBeg) MinBeg = yBeg; if (MaxBeg < yBeg) MaxBeg = yBeg; if (MinEnd > yEnd) MinEnd = yEnd; if (MaxEnd < yEnd) MaxEnd = yEnd; } if (MaxBeg - MinBeg >= MaxSize) return FALSE; if (MaxEnd - MinEnd >= MaxSize) return FALSE; pBegGt->Shift = MinBeg; pBegGt->End = MaxBeg - MinBeg; pEndGt->Shift = MinEnd; pEndGt->End = MaxEnd - MinEnd; End = pBegGt->End; if (End < pEndGt->End) End = pEndGt->End; for (i = 0; i <= End; i++) { pBegSig[i] = 0; pEndSig[i] = 0; } for (i = 0; i < nRc; i++) { x = (pRc[i].left + pRc[i].right + 1) / 2; dy = ((-Skew * x + 0x200) >> 10); yBeg = pRc[i].top; yEnd = pRc[i].bottom; ddy = ((SkewSquar * yBeg + 0x100000) >> 21); yBeg += dy; yBeg -= ddy; ddy = ((SkewSquar * yEnd + 0x100000) >> 21); yEnd += dy; yEnd -= ddy; pBegSig[yBeg - MinBeg]++; pEndSig[yEnd - MinEnd]++; } return TRUE; } /*---------------------------------------------------------------------------*/ Bool MakeLefRigGysts(Rect16 *pRc, int nRc, int32_t Skew, int MaxSize, Un_GYST *pBegGt, Un_GYST *pEndGt) { int MinBeg, MaxBeg, MinEnd, MaxEnd, i, End; long dx, ddx; int32_t y, xBeg, xEnd; int32_t SkewSquar; int *pBegSig, *pEndSig; SkewSquar = Skew * Skew; pBegGt->nElem = nRc; pEndGt->nElem = nRc; pBegSig = pBegGt->Signal; pEndSig = pEndGt->Signal; /* Предельные значения проекций */ y = (pRc[0].top + pRc[0].bottom + 1) / 2; dx = ((-Skew * y + 0x200) >> 10); xBeg = pRc[0].left; xEnd = pRc[0].right; ddx = ((SkewSquar * xBeg + 0x100000) >> 21); xBeg -= dx; xBeg -= ddx; ddx = ((SkewSquar * xEnd + 0x100000) >> 21); xEnd -= dx; xEnd -= ddx; MinBeg = xBeg; MaxBeg = xBeg; MinEnd = xEnd; MaxEnd = xEnd; for (i = 1; i < nRc; i++) { y = (pRc[i].top + pRc[i].bottom + 1) / 2; dx = ((-Skew * y + 0x200) >> 10); xBeg = pRc[i].left; xEnd = pRc[i].right; ddx = ((SkewSquar * xBeg + 0x100000) >> 21); xBeg -= dx; xBeg -= ddx; ddx = ((SkewSquar * xEnd + 0x100000) >> 21); xEnd -= dx; xEnd -= ddx; if (MinBeg > xBeg) MinBeg = xBeg; if (MaxBeg < xBeg) MaxBeg = xBeg; if (MinEnd > xEnd) MinEnd = xEnd; if (MaxEnd < xEnd) MaxEnd = xEnd; } if (MaxBeg - MinBeg >= MaxSize) return FALSE; if (MaxEnd - MinEnd >= MaxSize) return FALSE; pBegGt->Shift = MinBeg; pBegGt->End = MaxBeg - MinBeg; pEndGt->Shift = MinEnd; pEndGt->End = MaxEnd - MinEnd; End = pBegGt->End; if (End < pEndGt->End) End = pEndGt->End; for (i = 0; i <= End; i++) { pBegSig[i] = 0; pEndSig[i] = 0; } for (i = 0; i < nRc; i++) { y = (pRc[i].top + pRc[i].bottom + 1) / 2; dx = ((-Skew * y + 0x200) >> 10); xBeg = pRc[i].left; xEnd = pRc[i].right; ddx = ((SkewSquar * xBeg + 0x100000) >> 21); xBeg -= dx; xBeg -= ddx; ddx = ((SkewSquar * xEnd + 0x100000) >> 21); xEnd -= dx; xEnd -= ddx; pBegSig[xBeg - MinBeg]++; pEndSig[xEnd - MinEnd]++; } return TRUE; } /*---------------------------------------------------------------------------*/ Bool MakeTopMidBotGysts(Rect16 *pRc, int nRc, int32_t Skew, int MaxSize, Un_GYST *pBegGt, Un_GYST *pMidGt, Un_GYST *pEndGt) { int MinBeg, MaxBeg, MinMid, MaxMid, MinEnd, MaxEnd, i, End; long dy, ddy; int32_t x, yBeg, yMid, yEnd; int32_t SkewSquar; int *pBegSig, *pMidSig, *pEndSig; SkewSquar = Skew * Skew; pBegGt->nElem = nRc; pMidGt->nElem = nRc; pEndGt->nElem = nRc; pBegSig = pBegGt->Signal; pMidSig = pMidGt->Signal; pEndSig = pEndGt->Signal; /* Предельные значения проекций */ x = (pRc[0].left + pRc[0].right + 1) / 2; dy = ((-Skew * x + 0x200) >> 10); yBeg = pRc[0].top; yMid = (pRc[0].top + pRc[0].bottom + 1) / 2; yEnd = pRc[0].bottom; ddy = ((SkewSquar * yBeg + 0x100000) >> 21); yBeg += dy; yBeg -= ddy; ddy = ((SkewSquar * yMid + 0x100000) >> 21); yMid += dy; yMid -= ddy; ddy = ((SkewSquar * yEnd + 0x100000) >> 21); yEnd += dy; yEnd -= ddy; MinBeg = yBeg; MaxBeg = yBeg; MinMid = yMid; MaxMid = yMid; MinEnd = yEnd; MaxEnd = yEnd; for (i = 1; i < nRc; i++) { x = (pRc[i].left + pRc[i].right + 1) / 2; dy = ((-Skew * x + 0x200) >> 10); yBeg = pRc[i].top; yMid = (pRc[i].top + pRc[i].bottom + 1) / 2; yEnd = pRc[i].bottom; ddy = ((SkewSquar * yBeg + 0x100000) >> 21); yBeg += dy; yBeg -= ddy; ddy = ((SkewSquar * yMid + 0x100000) >> 21); yMid += dy; yMid -= ddy; ddy = ((SkewSquar * yEnd + 0x100000) >> 21); yEnd += dy; yEnd -= ddy; if (MinBeg > yBeg) MinBeg = yBeg; if (MaxBeg < yBeg) MaxBeg = yBeg; if (MinMid > yMid) MinMid = yMid; if (MaxMid < yMid) MaxMid = yMid; if (MinEnd > yEnd) MinEnd = yEnd; if (MaxEnd < yEnd) MaxEnd = yEnd; } if (MaxBeg - MinBeg >= MaxSize) return FALSE; if (MaxMid - MinMid >= MaxSize) return FALSE; if (MaxEnd - MinEnd >= MaxSize) return FALSE; pBegGt->Shift = MinBeg; pBegGt->End = MaxBeg - MinBeg; pMidGt->Shift = MinMid; pMidGt->End = MaxMid - MinMid; pEndGt->Shift = MinEnd; pEndGt->End = MaxEnd - MinEnd; End = pBegGt->End; if (End < pMidGt->End) End = pMidGt->End; if (End < pEndGt->End) End = pEndGt->End; for (i = 0; i <= End; i++) { pBegSig[i] = 0; pMidSig[i] = 0; pEndSig[i] = 0; } for (i = 0; i < nRc; i++) { x = (pRc[i].left + pRc[i].right + 1) / 2; dy = ((-Skew * x + 0x200) >> 10); yBeg = pRc[i].top; yMid = (pRc[i].top + pRc[i].bottom + 1) / 2; yEnd = pRc[i].bottom; ddy = ((SkewSquar * yBeg + 0x100000) >> 21); yBeg += dy; yBeg -= ddy; ddy = ((SkewSquar * yMid + 0x100000) >> 21); yMid += dy; yMid -= ddy; ddy = ((SkewSquar * yEnd + 0x100000) >> 21); yEnd += dy; yEnd -= ddy; pBegSig[yBeg - MinBeg]++; pMidSig[yMid - MinMid]++; pEndSig[yEnd - MinEnd]++; } return TRUE; } /*---------------------------------------------------------------------------*/ Bool MakeLefMidRigGysts(Rect16 *pRc, int nRc, int32_t Skew, int MaxSize, Un_GYST *pBegGt, Un_GYST *pMidGt, Un_GYST *pEndGt) { int MinBeg, MaxBeg, MinMid, MaxMid, MinEnd, MaxEnd, i, End; long dx, ddx; int32_t y, xBeg, xMid, xEnd; int32_t SkewSquar; int *pBegSig, *pMidSig, *pEndSig; SkewSquar = Skew * Skew; pBegGt->nElem = nRc; pMidGt->nElem = nRc; pEndGt->nElem = nRc; pBegSig = pBegGt->Signal; pMidSig = pMidGt->Signal; pEndSig = pEndGt->Signal; /* Предельные значения проекций */ y = (pRc[0].top + pRc[0].bottom + 1) / 2; dx = ((-Skew * y + 0x200) >> 10); xBeg = pRc[0].left; xMid = (pRc[0].left + pRc[0].right + 1) / 2; xEnd = pRc[0].right; ddx = ((SkewSquar * xBeg + 0x100000) >> 21); xBeg -= dx; xBeg -= ddx; ddx = ((SkewSquar * xMid + 0x100000) >> 21); xMid -= dx; xMid -= ddx; ddx = ((SkewSquar * xEnd + 0x100000) >> 21); xEnd -= dx; xEnd -= ddx; MinBeg = xBeg; MaxBeg = xBeg; MinMid = xMid; MaxMid = xMid; MinEnd = xEnd; MaxEnd = xEnd; for (i = 1; i < nRc; i++) { y = (pRc[i].top + pRc[i].bottom + 1) / 2; dx = ((-Skew * y + 0x200) >> 10); xBeg = pRc[i].left; xMid = (pRc[i].left + pRc[i].right + 1) / 2; xEnd = pRc[i].right; ddx = ((SkewSquar * xBeg + 0x100000) >> 21); xBeg -= dx; xBeg -= ddx; ddx = ((SkewSquar * xMid + 0x100000) >> 21); xMid -= dx; xMid -= ddx; ddx = ((SkewSquar * xEnd + 0x100000) >> 21); xEnd -= dx; xEnd -= ddx; if (MinBeg > xBeg) MinBeg = xBeg; if (MaxBeg < xBeg) MaxBeg = xBeg; if (MinMid > xMid) MinMid = xMid; if (MaxMid < xMid) MaxMid = xMid; if (MinEnd > xEnd) MinEnd = xEnd; if (MaxEnd < xEnd) MaxEnd = xEnd; } if (MaxBeg - MinBeg >= MaxSize) return FALSE; if (MaxMid - MinMid >= MaxSize) return FALSE; if (MaxEnd - MinEnd >= MaxSize) return FALSE; pBegGt->Shift = MinBeg; pBegGt->End = MaxBeg - MinBeg; pMidGt->Shift = MinMid; pMidGt->End = MaxMid - MinMid; pEndGt->Shift = MinEnd; pEndGt->End = MaxEnd - MinEnd; End = pBegGt->End; if (End < pMidGt->End) End = pMidGt->End; if (End < pEndGt->End) End = pEndGt->End; for (i = 0; i <= End; i++) { pBegSig[i] = 0; pMidSig[i] = 0; pEndSig[i] = 0; } for (i = 0; i < nRc; i++) { y = (pRc[i].top + pRc[i].bottom + 1) / 2; dx = ((-Skew * y + 0x200) >> 10); xBeg = pRc[i].left; xMid = (pRc[i].left + pRc[i].right + 1) / 2; xEnd = pRc[i].right; ddx = ((SkewSquar * xBeg + 0x100000) >> 21); xBeg -= dx; xBeg -= ddx; ddx = ((SkewSquar * xMid + 0x100000) >> 21); xMid -= dx; xMid -= ddx; ddx = ((SkewSquar * xEnd + 0x100000) >> 21); xEnd -= dx; xEnd -= ddx; pBegSig[xBeg - MinBeg]++; pMidSig[xMid - MinMid]++; pEndSig[xEnd - MinEnd]++; } return TRUE; } int ScoreComp(const Rect16 *pRcReg, const int32_t Skew, const Rect16 *pRc, const int nRc) { int i, k; Point PosIdeal; k = 0; for (i = 0; i < nRc; i++) { if (pRc[i].right - pRc[i].left < 2) continue; if (pRc[i].right - pRc[i].left > 100) continue; if (pRc[i].bottom - pRc[i].top < 2) continue; if (pRc[i].bottom - pRc[i].top > 100) continue; PosIdeal.rx() = (int) (.5 * (pRc[i].left + pRc[i].right + 1)); PosIdeal.ry() = (int) (.5 * (pRc[i].top + pRc[i].bottom + 1)); PosIdeal.deskew(-Skew); if (PosIdeal.x() > pRcReg->right) continue; if (PosIdeal.x() < pRcReg->left) continue; if (PosIdeal.y() > pRcReg->bottom) continue; if (PosIdeal.y() < pRcReg->top) continue; k++; } return k; } /*---------------------------------------------------------------------------*/ void MakeNormVertGyst(const Rect16 *pRcReg, const int32_t Skew, const Rect16 *pRc, const int nRc, int *Sig) { int i, k; Point BegDirIdeal; Point EndDirIdeal; for (i = 0; i < nRc; i++) { if (pRc[i].right - pRc[i].left < 2) continue; if (pRc[i].right - pRc[i].left > 100) continue; if (pRc[i].bottom - pRc[i].top < 2) continue; if (pRc[i].bottom - pRc[i].top > 100) continue; BegDirIdeal.rx() = (int) (.5 * (pRc[i].left + pRc[i].right + 1)); BegDirIdeal.ry() = pRc[i].top; BegDirIdeal.deskew(-Skew); if (BegDirIdeal.x() > pRcReg->right) continue; if (BegDirIdeal.x() < pRcReg->left) continue; if (BegDirIdeal.y() >= pRcReg->bottom) continue; if (BegDirIdeal.y() < pRcReg->top) BegDirIdeal.ry() = pRcReg->top; EndDirIdeal.rx() = (int) (.5 * (pRc[i].left + pRc[i].right + 1)); EndDirIdeal.ry() = pRc[i].bottom; EndDirIdeal.deskew(-Skew); if (EndDirIdeal.y() <= pRcReg->top) continue; if (EndDirIdeal.y() > pRcReg->bottom) EndDirIdeal.ry() = pRcReg->bottom; for (k = BegDirIdeal.y(); k <= EndDirIdeal.y(); k++) Sig[k - pRcReg->top]++; } } /*---------------------------------------------------------------------------*/ Bool MakeVertGysts(Rect16 *pRc, int nRc, int32_t Skew, int Amnist, int MaxSize, Un_GYST *pVerGt, int *pWhatDo) { int MinBeg, MaxBeg, MinEnd, MaxEnd, CurBeg, CurEnd, i, End, k, iFirst; Point BegDirIdeal; Point EndDirIdeal; iFirst = -1; for (i = 0; i < nRc; i++) { if (pWhatDo[i] != 1) continue; iFirst = i; break; } if (iFirst == -1) return FALSE; /* Предельные значения проекций */ BegDirIdeal.rx() = (int) (.5 * (pRc[iFirst].left + pRc[iFirst].right + 1)); BegDirIdeal.ry() = pRc[iFirst].top; BegDirIdeal.deskew(-Skew); MinBeg = BegDirIdeal.y(); MaxBeg = BegDirIdeal.y(); EndDirIdeal.rx() = (int) (.5 * (pRc[iFirst].left + pRc[iFirst].right + 1)); EndDirIdeal.ry() = pRc[iFirst].bottom; EndDirIdeal.deskew(-Skew); MinEnd = EndDirIdeal.y(); MaxEnd = EndDirIdeal.y(); for (i = iFirst + 1; i < nRc; i++) { if (pWhatDo[i] != 1) continue; BegDirIdeal.rx() = (int) (.5 * (pRc[i].left + pRc[i].right + 1)); BegDirIdeal.ry() = pRc[i].top; BegDirIdeal.deskew(-Skew); CurBeg = BegDirIdeal.y(); EndDirIdeal.rx() = (int) (.5 * (pRc[i].left + pRc[i].right + 1)); EndDirIdeal.ry() = pRc[i].bottom; EndDirIdeal.deskew(-Skew); CurEnd = EndDirIdeal.y(); if (MinBeg > CurBeg) MinBeg = CurBeg; if (MaxBeg < CurBeg) MaxBeg = CurBeg; if (MinEnd > CurEnd) MinEnd = CurEnd; if (MaxEnd < CurEnd) MaxEnd = CurEnd; } if (MaxBeg - MinBeg >= MaxSize) return FALSE; if (MaxEnd - MinEnd >= MaxSize) return FALSE; if (MinBeg > MinEnd) return FALSE; if (MaxBeg > MaxEnd) return FALSE; pVerGt->Shift = MinBeg; pVerGt->End = MaxEnd - MinBeg; pVerGt->nElem = nRc; End = pVerGt->End; for (i = 0; i <= End; i++) { pVerGt->Signal[i] = 0; } for (i = 0; i < nRc; i++) { if (pWhatDo[i] != 1) continue; BegDirIdeal.rx() = (int) (.5 * (pRc[i].left + pRc[i].right + 1)); BegDirIdeal.ry() = pRc[i].top; BegDirIdeal.deskew(-Skew); CurBeg = BegDirIdeal.y(); EndDirIdeal.rx() = (int) (.5 * (pRc[i].left + pRc[i].right + 1)); EndDirIdeal.ry() = pRc[i].bottom; EndDirIdeal.deskew(-Skew); CurEnd = EndDirIdeal.y(); for (k = CurBeg + Amnist; k <= CurEnd - Amnist; k++)<|fim▁hole|> return TRUE; } /*---------------------------------------------------------------------------*/ void MakeNormHoriGyst(const Rect16 *pRcReg, const int32_t Skew, const Rect16 *pRc, const int nRc, int *Sig) { int i, k; Point BegDirIdeal; Point EndDirIdeal; for (i = 0; i < nRc; i++) { if (pRc[i].right - pRc[i].left < 2) continue; if (pRc[i].right - pRc[i].left > 100) continue; if (pRc[i].bottom - pRc[i].top < 2) continue; if (pRc[i].bottom - pRc[i].top > 100) continue; BegDirIdeal.rx() = pRc[i].left; BegDirIdeal.ry() = (int) (.5 * (pRc[i].top + pRc[i].bottom + 1)); BegDirIdeal.deskew(-Skew); if (BegDirIdeal.y() > pRcReg->bottom) continue; if (BegDirIdeal.y() < pRcReg->top) continue; if (BegDirIdeal.x() >= pRcReg->right) continue; if (BegDirIdeal.x() < pRcReg->left) BegDirIdeal.rx() = pRcReg->left; EndDirIdeal.rx() = pRc[i].right; EndDirIdeal.ry() = (int) (.5 * (pRc[i].top + pRc[i].bottom + 1)); EndDirIdeal.deskew(-Skew); if (EndDirIdeal.x() <= pRcReg->left) continue; if (EndDirIdeal.x() > pRcReg->right) EndDirIdeal.rx() = pRcReg->right; for (k = BegDirIdeal.x(); k <= EndDirIdeal.x(); k++) Sig[k - pRcReg->left]++; } } /*---------------------------------------------------------------------------*/ Bool MakeHoriGysts(Rect16 *pRc, int nRc, int32_t Skew, int MaxSize, Un_GYST *pHorGt, int *pWhatDo) { int MinBeg, MaxBeg, MinEnd, MaxEnd, CurBeg, CurEnd, i, End, k, iFirst; Point BegDirIdeal; Point EndDirIdeal; iFirst = -1; for (i = 0; i < nRc; i++) { if (pWhatDo[i] != 1) continue; iFirst = i; break; } if (iFirst == -1) return FALSE; /* Предельные значения проекций */ BegDirIdeal.rx() = pRc[iFirst].left; BegDirIdeal.ry() = (int) (.5 * (pRc[iFirst].top + pRc[iFirst].bottom + 1)); BegDirIdeal.deskew(-Skew); MinBeg = BegDirIdeal.x(); MaxBeg = BegDirIdeal.x(); EndDirIdeal.rx() = pRc[iFirst].right; EndDirIdeal.ry() = (int) (.5 * (pRc[iFirst].top + pRc[iFirst].bottom + 1)); EndDirIdeal.deskew(-Skew); MinEnd = EndDirIdeal.x(); MaxEnd = EndDirIdeal.x(); for (i = iFirst + 1; i < nRc; i++) { if (pWhatDo[i] != 1) continue; BegDirIdeal.rx() = pRc[i].left; BegDirIdeal.ry() = (int) (.5 * (pRc[i].top + pRc[i].bottom + 1)); BegDirIdeal.deskew(-Skew); CurBeg = BegDirIdeal.x(); EndDirIdeal.rx() = pRc[i].right; EndDirIdeal.ry() = (int) (.5 * (pRc[i].top + pRc[i].bottom + 1)); EndDirIdeal.deskew(-Skew); CurEnd = EndDirIdeal.x(); if (MinBeg > CurBeg) MinBeg = CurBeg; if (MaxBeg < CurBeg) MaxBeg = CurBeg; if (MinEnd > CurEnd) MinEnd = CurEnd; if (MaxEnd < CurEnd) MaxEnd = CurEnd; } if (MaxBeg - MinBeg >= MaxSize) return FALSE; if (MaxEnd - MinEnd >= MaxSize) return FALSE; if (MinBeg > MinEnd) return FALSE; if (MaxBeg > MaxEnd) return FALSE; pHorGt->Shift = MinBeg; pHorGt->End = MaxEnd - MinBeg; pHorGt->nElem = nRc; End = pHorGt->End; for (i = 0; i <= End; i++) { pHorGt->Signal[i] = 0; } for (i = 0; i < nRc; i++) { if (pWhatDo[i] != 1) continue; BegDirIdeal.rx() = pRc[i].left; BegDirIdeal.ry() = (int) (.5 * (pRc[i].top + pRc[i].bottom + 1)); BegDirIdeal.deskew(-Skew); CurBeg = BegDirIdeal.x(); EndDirIdeal.rx() = pRc[i].right; EndDirIdeal.ry() = (int) (.5 * (pRc[i].top + pRc[i].bottom + 1)); EndDirIdeal.deskew(-Skew); CurEnd = EndDirIdeal.x(); for (k = CurBeg; k <= CurEnd; k++) pHorGt->Signal[k - MinBeg]++; } return TRUE; } /*---------------------------------------------------------------------------*/ Bool MakeHoriSrez(Rect16 *pRcId, int nRc, int BegSrez, int EndSrez, int MaxSize, Un_GYST *pHorGt, int *pWhatDo) { int MinBeg, MaxBeg, MinEnd, MaxEnd, CurBeg, CurEnd, i, End, k, iFirst; iFirst = -1; for (i = 0; i < nRc; i++) { if (pWhatDo[i] != 1) continue; iFirst = i; break; } if (iFirst == -1) return FALSE; /* Предельные значения проекций */ MinBeg = pRcId[iFirst].left; MaxBeg = MinBeg; MinEnd = pRcId[iFirst].right; MaxEnd = MinEnd; for (i = iFirst + 1; i < nRc; i++) { if (pWhatDo[i] != 1) continue; CurBeg = pRcId[i].left; CurEnd = pRcId[i].right; if (MinBeg > CurBeg) MinBeg = CurBeg; if (MaxBeg < CurBeg) MaxBeg = CurBeg; if (MinEnd > CurEnd) MinEnd = CurEnd; if (MaxEnd < CurEnd) MaxEnd = CurEnd; } if (MaxBeg - MinBeg >= MaxSize) return FALSE; if (MaxEnd - MinEnd >= MaxSize) return FALSE; if (MinBeg > MinEnd) return FALSE; if (MaxBeg > MaxEnd) return FALSE; pHorGt->Shift = MinBeg; pHorGt->End = MaxEnd - MinBeg; pHorGt->nElem = nRc; End = pHorGt->End; for (i = 0; i <= End; i++) { pHorGt->Signal[i] = 0; } for (i = 0; i < nRc; i++) { if (pWhatDo[i] != 1) continue; if (pRcId[i].top >= EndSrez) continue; if (pRcId[i].bottom <= BegSrez) continue; CurBeg = pRcId[i].left; CurEnd = pRcId[i].right; for (k = CurBeg; k <= CurEnd; k++) pHorGt->Signal[k - MinBeg]++; } return TRUE; } /*---------------------------------------------------------------------------*/ Bool MakeVertSrez(Rect16 *pRcId, int nRc, int BegSrez, int EndSrez, int MaxSize, Un_GYST *pVerGt, int *pWhatDo) { int MinBeg, MaxBeg, MinEnd, MaxEnd, CurBeg, CurEnd, i, End, k, iFirst; iFirst = -1; for (i = 0; i < nRc; i++) { if (pWhatDo[i] != 1) continue; iFirst = i; break; } if (iFirst == -1) return FALSE; /* Предельные значения проекций */ MinBeg = pRcId[iFirst].top; MaxBeg = MinBeg; MinEnd = pRcId[iFirst].bottom; MaxEnd = MinEnd; for (i = iFirst + 1; i < nRc; i++) { if (pWhatDo[i] != 1) continue; CurBeg = pRcId[i].top; CurEnd = pRcId[i].bottom; if (MinBeg > CurBeg) MinBeg = CurBeg; if (MaxBeg < CurBeg) MaxBeg = CurBeg; if (MinEnd > CurEnd) MinEnd = CurEnd; if (MaxEnd < CurEnd) MaxEnd = CurEnd; } if (MaxBeg - MinBeg >= MaxSize) return FALSE; if (MaxEnd - MinEnd >= MaxSize) return FALSE; if (MinBeg > MinEnd) return FALSE; if (MaxBeg > MaxEnd) return FALSE; pVerGt->Shift = MinBeg; pVerGt->End = MaxEnd - MinBeg; pVerGt->nElem = nRc; End = pVerGt->End; for (i = 0; i <= End; i++) { pVerGt->Signal[i] = 0; } for (i = 0; i < nRc; i++) { if (pWhatDo[i] != 1) continue; if (pRcId[i].left >= EndSrez) continue; if (pRcId[i].right <= BegSrez) continue; CurBeg = pRcId[i].top; CurEnd = pRcId[i].bottom; for (k = CurBeg; k <= CurEnd; k++) pVerGt->Signal[k - MinBeg]++; } return TRUE; } /*---------------------------------------------------------------------------*/ Bool FindNextHole(Un_GYST *pDarkGt, int Beg, int End, int *NewBeg, int *NewEnd) { int i; Bool ret; if (Beg > End) return FALSE; ret = FALSE; for (i = Beg; i <= End; i++) { if (i < pDarkGt->Shift) continue; if (i > pDarkGt->Shift + pDarkGt->End) break; if (pDarkGt->Signal[i - pDarkGt->Shift] > 0) continue; *NewBeg = i; ret = TRUE; break; } if (!ret) return FALSE; for (i = *NewBeg; i <= End; i++) { if (i > pDarkGt->Shift + pDarkGt->End) break; if (pDarkGt->Signal[i - pDarkGt->Shift] > 0) break; *NewEnd = i; continue; } return TRUE; } /*---------------------------------------------------------------------------*/ Bool FindNextHoleWithBound(int MaxSig, Un_GYST *pDarkGt, int Beg, int End, int *NewBeg, int *NewEnd, int MinLent) { int i, Beg_C, End_C; Bool ret; if (Beg > End) return FALSE; Beg_C = Beg; if (Beg_C < pDarkGt->Shift) Beg_C = pDarkGt->Shift; End_C = End; if (End_C > pDarkGt->Shift + pDarkGt->End) End_C = pDarkGt->Shift + pDarkGt->End; if (Beg_C > End_C) return FALSE; while (Beg_C <= End_C) { ret = FALSE; for (i = Beg_C; i <= End_C; i++) { if (pDarkGt->Signal[i - pDarkGt->Shift] > MaxSig) continue; *NewBeg = i; ret = TRUE; break; } if (!ret) return FALSE; *NewEnd = *NewBeg; for (i = *NewBeg; i <= End_C; i++) { if (pDarkGt->Signal[i - pDarkGt->Shift] > MaxSig) break; *NewEnd = i; continue; } if (*NewEnd - *NewBeg >= MinLent) return TRUE; Beg_C = *NewEnd + 1; } return FALSE; } /*---------------------------------------------------------------------------*/ Bool FindNormNextHoleWithBound(int *pSig, int LenSig, int Beg, int End, int *NewBeg, int *NewEnd, int MaxSig, int MinLent) { int i, Beg_C, End_C; Bool ret; if (Beg > End) return FALSE; Beg_C = Beg; if (Beg_C < 0) Beg_C = 0; End_C = End; if (End_C > LenSig - 1) End_C = LenSig - 1; if (Beg_C > End_C) return FALSE; while (Beg_C <= End_C) { ret = FALSE; for (i = Beg_C; i <= End_C; i++) { if (pSig[i] > MaxSig) continue; *NewBeg = i; ret = TRUE; break; } if (!ret) return FALSE; *NewEnd = *NewBeg; for (i = *NewBeg; i <= End_C; i++) { if (pSig[i] > MaxSig) break; *NewEnd = i; continue; } if (*NewEnd - *NewBeg >= MinLent) return TRUE; Beg_C = *NewEnd + 1; } return FALSE; } /*---------------------------------------------------------------------------*/ Bool FindMainHole(int Beg, int End, int MaxSig, Un_GYST *pOrtGt, int *NewBeg, int *NewEnd, int *NewMax) { int CurBeg, CurEnd, i, BegPos; Bool ret; ret = FindNextHoleWithBound(MaxSig, pOrtGt, Beg, End, &CurBeg, &CurEnd, 0); if (!ret) return FALSE; *NewBeg = CurBeg; *NewEnd = CurEnd; BegPos = *NewEnd + 1; while (1) { ret = FindNextHoleWithBound(MaxSig, pOrtGt, BegPos, End, &CurBeg, &CurEnd, 0); if (!ret) break; BegPos = CurEnd + 1; if (*NewEnd - *NewBeg > CurEnd - CurBeg) continue; *NewBeg = CurBeg; *NewEnd = CurEnd; } *NewMax = pOrtGt->Signal[*NewBeg - pOrtGt->Shift]; for (i = *NewBeg; i <= *NewEnd; i++) if (*NewMax < pOrtGt->Signal[i - pOrtGt->Shift]) *NewMax = pOrtGt->Signal[i - pOrtGt->Shift]; return TRUE; } /*---------------------------------------------------------------------------*/<|fim▁end|>
pVerGt->Signal[k - MinBeg]++; }
<|file_name|>NanoInstant.java<|end_file_name|><|fim▁begin|>/* $Id$ * $URL: https://dev.almende.com/svn/abms/coala-common/src/main/java/com/almende/coala/time/NanoInstant.java $ * <|fim▁hole|> * @license * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy * of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. * * Copyright (c) 2010-2013 Almende B.V. */ package io.coala.time; /** * {@link NanoInstant} has the nano-second as base time unit * * @date $Date: 2014-06-03 14:26:09 +0200 (Tue, 03 Jun 2014) $ * @version $Revision: 296 $ * @author <a href="mailto:[email protected]">Rick</a> */ public class NanoInstant extends AbstractInstant<NanoInstant> { /** */ private static final long serialVersionUID = 1L; /** */ // private static final Logger LOG = LogUtil.getLogger(NanoInstant.class); /** */ // private static final TimeUnit BASE_UNIT = TimeUnit.NANOS; /** */ public static final NanoInstant ZERO = new NanoInstant(null, 0); /** * {@link NanoInstant} constructor * * @param value */ public NanoInstant(final ClockID clockID, final Number value) { super(clockID, value, TimeUnit.NANOS); } // /** // * {@link NanoInstant} constructor // * // * @param value // */ // public NanoInstant(final ClockID clockID, final Number value, // final TimeUnit unit) // { // super(clockID, value, unit); // } // // /** @see Instant#getBaseUnit() */ // @Override // public TimeUnit getBaseUnit() // { // return BASE_UNIT; // } /** @see Instant#toUnit(TimeUnit) */ @Override public NanoInstant toUnit(final TimeUnit unit) { throw new RuntimeException( "Can't convert NanoInstant to another TimeUnit"); } /** @see Instant#plus(Number) */ @Override public NanoInstant plus(final Number value) { return new NanoInstant(getClockID(), getValue().doubleValue() + value.doubleValue()); } }<|fim▁end|>
* Part of the EU project Adapt4EE, see http://www.adapt4ee.eu/ *
<|file_name|>document_test.go<|end_file_name|><|fim▁begin|>package document_test<|fim▁hole|>import ( "fmt" "github.com/tummychow/goose/document" _ "github.com/tummychow/goose/document/file" _ "github.com/tummychow/goose/document/sql" "gopkg.in/check.v1" "os" "time" ) type DocumentStoreSuite struct { Store document.DocumentStore } type documentChecker struct { *check.CheckerInfo } var DocumentEquals check.Checker = &documentChecker{ &check.CheckerInfo{Name: "DocumentEquals", Params: []string{"obtained", "Name", "Content"}}, } func init() { if len(os.Getenv("GOOSE_TEST_FILE")) != 0 { fileStore, err := document.NewStore(os.Getenv("GOOSE_TEST_FILE")) if err != nil { fmt.Printf("Could not initialize FileDocumentStore %q, skipping\n(error was: %v)\n", os.Getenv("GOOSE_TEST_FILE"), err) } else { fmt.Printf("Running tests against FileDocumentStore %q\n", os.Getenv("GOOSE_TEST_FILE")) check.Suite(&DocumentStoreSuite{Store: fileStore}) } } if len(os.Getenv("GOOSE_TEST_SQL")) != 0 { sqlStore, err := document.NewStore(os.Getenv("GOOSE_TEST_SQL")) if err != nil { fmt.Printf("Could not initialize SqlDocumentStore %q, skipping\n(error was: %v)\n", os.Getenv("GOOSE_TEST_SQL"), err) } else { fmt.Printf("Running tests against SqlDocumentStore %q\n", os.Getenv("GOOSE_TEST_SQL")) check.Suite(&DocumentStoreSuite{Store: sqlStore}) } } } // Check compares a Document against an expected Name and Content. The Document // is checked for validity, and the Name and Content are then matched. Passing // nil for the Name or Content will cause that comparison to be skipped. func (checker *documentChecker) Check(params []interface{}, names []string) (result bool, error string) { if params[0] == nil { return false, "obtained value is nil" } doc, ok := params[0].(document.Document) if !ok { return false, "obtained value is not a Document" } if !document.ValidateName(doc.Name) { return false, "obtained Document has invalid Name" } if len([]byte(doc.Content)) >= document.MAX_CONTENT_SIZE { return false, "obtained Document has oversized Content" } if doc.Timestamp.Location() != time.UTC { return false, "obtained Document has non-UTC Timestamp" } if params[1] != nil { expectedName, ok := params[1].(string) if !ok { return false, "Name is not a string" } if doc.Name != expectedName { return false, "obtained Document has wrong Name" } } if params[2] != nil { expectedContent, ok := params[2].(string) if !ok { return false, "Content is not a string" } if doc.Content != expectedContent { return false, "obtained Document has wrong Content" } } return true, "" } func (s *DocumentStoreSuite) SetUpTest(c *check.C) { s.Store.Clear() } func (s *DocumentStoreSuite) TearDownSuite(c *check.C) { s.Store.Close() } func (s *DocumentStoreSuite) TestEmpty(c *check.C) { _, err := s.Store.Get("/foo/bar") c.Assert(err, check.NotNil) c.Assert(err, check.FitsTypeOf, document.NotFoundError{}) docAll, err := s.Store.GetAll("/foo/bar") c.Assert(err, check.NotNil) c.Assert(err, check.FitsTypeOf, document.NotFoundError{}) c.Assert(docAll, check.HasLen, 0) children, err := s.Store.GetDescendants("/foo/bar") c.Assert(err, check.IsNil) c.Assert(children, check.HasLen, 0) } func (s *DocumentStoreSuite) TestInvalidNames(c *check.C) { _, err := s.Store.Get("/foo/bar/") c.Assert(err, check.NotNil) c.Assert(err, check.FitsTypeOf, document.InvalidNameError{}) docAll, err := s.Store.GetAll("/foo/bar/") c.Assert(err, check.NotNil) c.Assert(err, check.FitsTypeOf, document.InvalidNameError{}) c.Assert(docAll, check.HasLen, 0) err = s.Store.Update("/foo/bar/", "foo bar") c.Assert(err, check.NotNil) c.Assert(err, check.FitsTypeOf, document.InvalidNameError{}) // the empty string is not invalid for GetDescendants _, err = s.Store.GetDescendants("") c.Assert(err, check.IsNil) children, err := s.Store.GetDescendants("/foo/bar/") c.Assert(err, check.NotNil) c.Assert(err, check.FitsTypeOf, document.InvalidNameError{}) c.Assert(children, check.HasLen, 0) } func (s *DocumentStoreSuite) TestBasic(c *check.C) { err := s.Store.Update("/foo/bar", "foo bar") c.Assert(err, check.IsNil) doc, err := s.Store.Get("/foo/bar") c.Assert(err, check.IsNil) c.Assert(doc, DocumentEquals, "/foo/bar", "foo bar") } func (s *DocumentStoreSuite) TestMultipleVersions(c *check.C) { err := s.Store.Update("/foo/bar", "the duck quacked") c.Assert(err, check.IsNil) err = s.Store.Update("/foo/bar", "qux and baz oh my") c.Assert(err, check.IsNil) doc, err := s.Store.Get("/foo/bar") c.Assert(err, check.IsNil) c.Assert(doc, DocumentEquals, "/foo/bar", "qux and baz oh my") docAll, err := s.Store.GetAll("/foo/bar") c.Assert(err, check.IsNil) c.Assert(docAll, check.HasLen, 2) c.Assert(docAll[0], DocumentEquals, "/foo/bar", "qux and baz oh my") c.Assert(docAll[1], DocumentEquals, "/foo/bar", "the duck quacked") } func (s *DocumentStoreSuite) TestMultipleDocuments(c *check.C) { err := s.Store.Update("/foo", "foo v1") c.Assert(err, check.IsNil) err = s.Store.Update("/foo", "foo v2") c.Assert(err, check.IsNil) err = s.Store.Update("/foo/bar", "bar v1") c.Assert(err, check.IsNil) err = s.Store.Update("/foo/bar", "bar v2") c.Assert(err, check.IsNil) err = s.Store.Update("/foo/bar/baz", "baz v1") c.Assert(err, check.IsNil) err = s.Store.Update("/foo/bar/baz", "baz v2") c.Assert(err, check.IsNil) doc, err := s.Store.Get("/foo") c.Assert(err, check.IsNil) c.Assert(doc, DocumentEquals, "/foo", "foo v2") doc, err = s.Store.Get("/foo/bar") c.Assert(err, check.IsNil) c.Assert(doc, DocumentEquals, "/foo/bar", "bar v2") doc, err = s.Store.Get("/foo/bar/baz") c.Assert(err, check.IsNil) c.Assert(doc, DocumentEquals, "/foo/bar/baz", "baz v2") } func (s *DocumentStoreSuite) TestDescendants(c *check.C) { /* tree structure: * ├─ foo * │ ├─ bar (X) * │ │ └─ baz * │ └─ qux * └─ sf (X) * └─ nu (X) * └─ ab (X) * └─ fa (X) * └─ ur */ err := s.Store.Update("/foo", "lorem ipsum") c.Assert(err, check.IsNil) err = s.Store.Update("/foo", "lorem ipsum two") c.Assert(err, check.IsNil) err = s.Store.Update("/foo/bar/baz", "lorem ipsum") c.Assert(err, check.IsNil) err = s.Store.Update("/foo/qux", "lorem ipsum") c.Assert(err, check.IsNil) err = s.Store.Update("/sf/nu/ab/fa/ur", "lorem ipsum") c.Assert(err, check.IsNil) children, err := s.Store.GetDescendants("") c.Assert(err, check.IsNil) c.Assert(children, check.DeepEquals, []string{"/foo", "/foo/bar/baz", "/foo/qux", "/sf/nu/ab/fa/ur"}) children, err = s.Store.GetDescendants("/foo") c.Assert(err, check.IsNil) c.Assert(children, check.DeepEquals, []string{"/foo/bar/baz", "/foo/qux"}) children, err = s.Store.GetDescendants("/foo/bar") c.Assert(err, check.IsNil) c.Assert(children, check.DeepEquals, []string{"/foo/bar/baz"}) children, err = s.Store.GetDescendants("/sf/nu") c.Assert(err, check.IsNil) c.Assert(children, check.DeepEquals, []string{"/sf/nu/ab/fa/ur"}) }<|fim▁end|>
<|file_name|>derive_on_struct.rs<|end_file_name|><|fim▁begin|>// Copyright 2013-2015 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. //<|fim▁hole|>// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. extern crate num; #[macro_use] extern crate num_derive; #[derive(Debug, PartialEq, FromPrimitive)] //~ ERROR struct Color { r: u8, g: u8, b: u8, } fn main() {}<|fim▁end|>
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
<|file_name|>AddressViewDAOImpl.java<|end_file_name|><|fim▁begin|>package com.vertabelo.mobileorm.myplaces.orm.gen; public class AddressViewDAOImpl extends com.vertabelo.mobileorm.myplaces.orm.runtime.dao.BaseDAO<AddressView> implements AddressViewDAO { public AddressViewDAOImpl(com.vertabelo.mobileorm.myplaces.orm.runtime.util.SQLiteDataSource dataSource) { super(dataSource); } public AddressViewDAOImpl(com.vertabelo.mobileorm.myplaces.orm.runtime.util.SQLiteDataSource dataSource, com.vertabelo.mobileorm.myplaces.orm.runtime.util.DAOMonitor daoMonitor) { super(dataSource, daoMonitor); } @Override public Class<AddressView> getPojoClass() { return POJO_CLASS; } @Override public com.vertabelo.mobileorm.myplaces.orm.runtime.query.TableExpression getTableExpression() { return TABLE_EXPRESSION; } @Override public com.vertabelo.mobileorm.myplaces.orm.runtime.util.ResultSetHandler getResultSetHandler() { return RESULT_SET_HANDLER; } @Override public java.util.List<AddressView> getAddressViewList() { com.vertabelo.mobileorm.myplaces.orm.runtime.query.SelectQuery query = new com.vertabelo.mobileorm.myplaces.orm.runtime.query.SelectQuery(TABLE_EXPRESSION); com.vertabelo.mobileorm.myplaces.orm.runtime.dao.SelectObjectListResult<AddressView> selectObjectListResult = select(query, RESULT_SET_HANDLER); return selectObjectListResult.getObjectList(); } @Override public java.util.List<AddressView> getAddressViewList(com.vertabelo.mobileorm.myplaces.orm.runtime.query.AExp orderBy) { com.vertabelo.mobileorm.myplaces.orm.runtime.query.SelectQuery query = new com.vertabelo.mobileorm.myplaces.orm.runtime.query.SelectQuery(TABLE_EXPRESSION); query.orderBy(orderBy); com.vertabelo.mobileorm.myplaces.orm.runtime.dao.SelectObjectListResult<AddressView> selectObjectListResult = select(query, RESULT_SET_HANDLER); return selectObjectListResult.getObjectList(); } @Override public java.util.List<AddressView> getAddressViewList(com.vertabelo.mobileorm.myplaces.orm.runtime.query.AExp orderBy, com.vertabelo.mobileorm.myplaces.orm.runtime.query.OrderByDirection asc) { com.vertabelo.mobileorm.myplaces.orm.runtime.query.SelectQuery query = new com.vertabelo.mobileorm.myplaces.orm.runtime.query.SelectQuery(TABLE_EXPRESSION); query.orderBy(orderBy, asc); com.vertabelo.mobileorm.myplaces.orm.runtime.dao.SelectObjectListResult<AddressView> selectObjectListResult = select(query, RESULT_SET_HANDLER); return selectObjectListResult.getObjectList(); } @Override public java.util.List<AddressView> getAddressViewList(com.vertabelo.mobileorm.myplaces.orm.runtime.query.LExp where) { com.vertabelo.mobileorm.myplaces.orm.runtime.query.SelectQuery query = new com.vertabelo.mobileorm.myplaces.orm.runtime.query.SelectQuery(TABLE_EXPRESSION); query.setWhere(where); com.vertabelo.mobileorm.myplaces.orm.runtime.dao.SelectObjectListResult<AddressView> selectObjectListResult = select(query, RESULT_SET_HANDLER); return selectObjectListResult.getObjectList(); } @Override public java.util.List<AddressView> getAddressViewList(com.vertabelo.mobileorm.myplaces.orm.runtime.query.LExp where, com.vertabelo.mobileorm.myplaces.orm.runtime.query.AExp orderBy) { com.vertabelo.mobileorm.myplaces.orm.runtime.query.SelectQuery query = new com.vertabelo.mobileorm.myplaces.orm.runtime.query.SelectQuery(TABLE_EXPRESSION); query.setWhere(where); query.orderBy(orderBy); com.vertabelo.mobileorm.myplaces.orm.runtime.dao.SelectObjectListResult<AddressView> selectObjectListResult = select(query, RESULT_SET_HANDLER); return selectObjectListResult.getObjectList(); } @Override public java.util.List<AddressView> getAddressViewList(com.vertabelo.mobileorm.myplaces.orm.runtime.query.LExp where, com.vertabelo.mobileorm.myplaces.orm.runtime.query.AExp orderBy, com.vertabelo.mobileorm.myplaces.orm.runtime.query.OrderByDirection asc) { com.vertabelo.mobileorm.myplaces.orm.runtime.query.SelectQuery query = new com.vertabelo.mobileorm.myplaces.orm.runtime.query.SelectQuery(TABLE_EXPRESSION); query.setWhere(where); query.orderBy(orderBy, asc); com.vertabelo.mobileorm.myplaces.orm.runtime.dao.SelectObjectListResult<AddressView> selectObjectListResult = select(query, RESULT_SET_HANDLER); return selectObjectListResult.getObjectList(); } @Override public Long getCount() { com.vertabelo.mobileorm.myplaces.orm.runtime.query.SelectQuery query = new com.vertabelo.mobileorm.myplaces.orm.runtime.query.SelectQuery(TABLE_EXPRESSION, com.vertabelo.mobileorm.myplaces.orm.runtime.query.AExp.fun("COUNT", com.vertabelo.mobileorm.myplaces.orm.runtime.query.AExp.ASTERISK)); java.util.List<Long> list = select(query, new com.vertabelo.mobileorm.myplaces.orm.runtime.util.handlers.LongResultSetHandler()).getObjectList(); if (list.size() > 1) { throw new RuntimeException("More than one object returned"); } else if (list.size() == 1) { return list.get(0); } else { throw new RuntimeException("Cannot retrieve count() method result"); } } @Override public Long getCount(com.vertabelo.mobileorm.myplaces.orm.runtime.query.LExp where) { com.vertabelo.mobileorm.myplaces.orm.runtime.query.SelectQuery query = new com.vertabelo.mobileorm.myplaces.orm.runtime.query.SelectQuery(TABLE_EXPRESSION, com.vertabelo.mobileorm.myplaces.orm.runtime.query.AExp.fun("COUNT", com.vertabelo.mobileorm.myplaces.orm.runtime.query.AExp.ASTERISK)); query.setWhere(where); java.util.List<Long> list = select(query, new com.vertabelo.mobileorm.myplaces.orm.runtime.util.handlers.LongResultSetHandler()).getObjectList(); if (list.size() > 1) { throw new RuntimeException("More than one object returned"); } else if (list.size() == 1) { return list.get(0); } else { throw new RuntimeException("Cannot retrieve count() method result");<|fim▁hole|>}<|fim▁end|>
} }
<|file_name|>isLength.js<|end_file_name|><|fim▁begin|>/** * Used as the [maximum length](http://ecma-international.org/ecma-262/6.0/#sec-number.max_safe_integer) * of an array-like value.<|fim▁hole|> * Checks if `value` is a valid array-like length. * * **Note:** This function is based on [`ToLength`](http://ecma-international.org/ecma-262/6.0/#sec-tolength). * * @private * @param {*} value The value to check. * @returns {boolean} Returns `true` if `value` is a valid length, else `false`. */ function isLength(value) { return typeof value == 'number' && value > -1 && value % 1 == 0 && value <= MAX_SAFE_INTEGER; } export default isLength;<|fim▁end|>
*/ var MAX_SAFE_INTEGER = 9007199254740991; /**
<|file_name|>TileZTC.java<|end_file_name|><|fim▁begin|>package net.gigimoi.zombietc.tile; import net.gigimoi.zombietc.ZombieTC; import net.gigimoi.zombietc.util.IListenerZTC; import net.minecraft.nbt.NBTTagCompound; import net.minecraft.network.NetworkManager; import net.minecraft.network.Packet; import net.minecraft.network.play.server.S35PacketUpdateTileEntity; import net.minecraft.tileentity.TileEntity; /** * Created by gigimoi on 8/8/2014. */<|fim▁hole|> @Override public void updateEntity() { super.updateEntity(); if(!ZombieTC.gameManager.isRegisteredListener(this)) { ZombieTC.gameManager.registerListener(this); } } @Override public Packet getDescriptionPacket() { NBTTagCompound tagCompound = new NBTTagCompound(); writeToNBT(tagCompound); return new S35PacketUpdateTileEntity(xCoord, yCoord, zCoord, 1, tagCompound); } @Override public void onDataPacket(NetworkManager net, S35PacketUpdateTileEntity pkt) { super.onDataPacket(net, pkt); readFromNBT(pkt.func_148857_g()); } }<|fim▁end|>
public abstract class TileZTC extends TileEntity implements IListenerZTC {
<|file_name|>csgsetg.rs<|end_file_name|><|fim▁begin|>#[doc = "Register `CSGSETG` writer"] pub struct W(crate::W<CSGSETG_SPEC>); impl core::ops::Deref for W { type Target = crate::W<CSGSETG_SPEC>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } impl core::ops::DerefMut for W { #[inline(always)] fn deref_mut(&mut self) -> &mut Self::Target { &mut self.0 } } impl From<crate::W<CSGSETG_SPEC>> for W { #[inline(always)] fn from(writer: crate::W<CSGSETG_SPEC>) -> Self { W(writer) } } #[doc = "Field `SD0R` writer - DAC0 run bit set"] pub struct SD0R_W<'a> { w: &'a mut W, } impl<'a> SD0R_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !0x01) | (value as u32 & 0x01); self.w } } #[doc = "Field `SC0R` writer - CMP0 run bit set"] pub struct SC0R_W<'a> { w: &'a mut W, } impl<'a> SC0R_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 1)) | ((value as u32 & 0x01) << 1); self.w } } #[doc = "Field `SC0P` writer - CMP0 passive level set"] pub struct SC0P_W<'a> { w: &'a mut W, } impl<'a> SC0P_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 2)) | ((value as u32 & 0x01) << 2); self.w } } #[doc = "Field `SD1R` writer - DAC1 run bit set"] pub struct SD1R_W<'a> { w: &'a mut W, } impl<'a> SD1R_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 4)) | ((value as u32 & 0x01) << 4); self.w } } #[doc = "Field `SC1R` writer - CMP1 run bit set"] pub struct SC1R_W<'a> { w: &'a mut W, } impl<'a> SC1R_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 5)) | ((value as u32 & 0x01) << 5); self.w } } #[doc = "Field `SC1P` writer - CMP1 passive level set"] pub struct SC1P_W<'a> { w: &'a mut W, } impl<'a> SC1P_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 6)) | ((value as u32 & 0x01) << 6); self.w } } #[doc = "Field `SD2R` writer - DAC2 run bit set"] pub struct SD2R_W<'a> { w: &'a mut W, } impl<'a> SD2R_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 8)) | ((value as u32 & 0x01) << 8); self.w } } #[doc = "Field `SC2R` writer - CMP2 run bit set"] pub struct SC2R_W<'a> { w: &'a mut W, } impl<'a> SC2R_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 9)) | ((value as u32 & 0x01) << 9); self.w } } #[doc = "Field `SC2P` writer - CMP2 passive level set"] pub struct SC2P_W<'a> { w: &'a mut W, } impl<'a> SC2P_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"]<|fim▁hole|> #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 10)) | ((value as u32 & 0x01) << 10); self.w } } impl W { #[doc = "Bit 0 - DAC0 run bit set"] #[inline(always)] pub fn sd0r(&mut self) -> SD0R_W { SD0R_W { w: self } } #[doc = "Bit 1 - CMP0 run bit set"] #[inline(always)] pub fn sc0r(&mut self) -> SC0R_W { SC0R_W { w: self } } #[doc = "Bit 2 - CMP0 passive level set"] #[inline(always)] pub fn sc0p(&mut self) -> SC0P_W { SC0P_W { w: self } } #[doc = "Bit 4 - DAC1 run bit set"] #[inline(always)] pub fn sd1r(&mut self) -> SD1R_W { SD1R_W { w: self } } #[doc = "Bit 5 - CMP1 run bit set"] #[inline(always)] pub fn sc1r(&mut self) -> SC1R_W { SC1R_W { w: self } } #[doc = "Bit 6 - CMP1 passive level set"] #[inline(always)] pub fn sc1p(&mut self) -> SC1P_W { SC1P_W { w: self } } #[doc = "Bit 8 - DAC2 run bit set"] #[inline(always)] pub fn sd2r(&mut self) -> SD2R_W { SD2R_W { w: self } } #[doc = "Bit 9 - CMP2 run bit set"] #[inline(always)] pub fn sc2r(&mut self) -> SC2R_W { SC2R_W { w: self } } #[doc = "Bit 10 - CMP2 passive level set"] #[inline(always)] pub fn sc2p(&mut self) -> SC2P_W { SC2P_W { w: self } } #[doc = "Writes raw bits to the register."] #[inline(always)] pub unsafe fn bits(&mut self, bits: u32) -> &mut Self { self.0.bits(bits); self } } #[doc = "Global CSG run bit set\n\nThis register you can [`write_with_zero`](crate::generic::Reg::write_with_zero), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [csgsetg](index.html) module"] pub struct CSGSETG_SPEC; impl crate::RegisterSpec for CSGSETG_SPEC { type Ux = u32; } #[doc = "`write(|w| ..)` method takes [csgsetg::W](W) writer structure"] impl crate::Writable for CSGSETG_SPEC { type Writer = W; } #[doc = "`reset()` method sets CSGSETG to value 0"] impl crate::Resettable for CSGSETG_SPEC { #[inline(always)] fn reset_value() -> Self::Ux { 0 } }<|fim▁end|>
#[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) }
<|file_name|>leafgeometry.rs<|end_file_name|><|fim▁begin|>// Copyright 2016 spatial-rs Developers // // Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or // http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or // http://opensource.org/licenses/MIT>, at your option. This file may not be // copied, modified, or distributed except according to those terms. use num::{Zero, One, Signed, Float, Bounded, ToPrimitive, FromPrimitive, pow}; use std::ops::{MulAssign, AddAssign}; use geometry::{Shapes, Point, LineSegment, Rect}; use std::fmt::Debug; use std::ops::{Deref, DerefMut}; use generic_array::ArrayLength; /// The minimum functionality required to insert leaf geometry into `MbrMap` /// Until the rust compiler allows compile-time generic integers, we'll be using generic_array's `ArrayLength` to specify /// geometry dimensions at compile time. /// /// The parameter `mbr` represents a minimum bounding rectangle. /// An mbr whose corners are at (x1, y1), (x2, y2) will have the corresponding edges: (x1, x2), (y1, y2) pub trait MbrLeafGeometry<P, DIM: ArrayLength<P> + ArrayLength<(P, P)>> { /// The geometry's dimension count fn dim(&self) -> usize; /// Determine the area of the geometry fn area(&self) -> P; /// the minimum extent for a given axis fn min_for_axis(&self, dim: usize) -> P; /// the maximum extent for a given axis fn max_for_axis(&self, dim: usize) -> P; /// Expand the mbr to minimally fit the leaf fn expand_mbr_to_fit(&self, mbr: &mut Rect<P, DIM>); /// Determine the distance from the mbr's center fn distance_from_mbr_center(&self, mbr: &Rect<P, DIM>) -> P; /// Determine if the leaf is completely contained in the mbr fn contained_by_mbr(&self, mbr: &Rect<P, DIM>) -> bool; /// Determine if the leaf overlaps the mbr fn overlapped_by_mbr(&self, mbr: &Rect<P, DIM>) -> bool; /// Determines the leaf area shared with the rectangle. /// In cases where the leaf and mbr overlap, but the leaf has no area (point or a line, for example), return 0 fn area_overlapped_with_mbr(&self, mbr: &Rect<P, DIM>) -> P; } impl<P, DIM> MbrLeafGeometry<P, DIM> for Point<P, DIM> where P: Float + Signed + Bounded + MulAssign + AddAssign + ToPrimitive + FromPrimitive + Copy + Debug, DIM: ArrayLength<P> + ArrayLength<(P,P)> { fn dim(&self) -> usize { self.coords.len() } fn area(&self) -> P { Zero::zero() } fn min_for_axis(&self, dim: usize) -> P { *self.coords.get(dim).unwrap() } fn max_for_axis(&self, dim: usize) -> P { *self.coords.get(dim).unwrap() } fn expand_mbr_to_fit(&self, mbr: &mut Rect<P, DIM>) { for (&mut(ref mut x, ref mut y), &z) in izip!(mbr.deref_mut(), self.deref()){ *x = x.min(z); *y = y.max(z); } } fn distance_from_mbr_center(&self, mbr: &Rect<P, DIM>) -> P { let two = FromPrimitive::from_usize(2).unwrap(); let dist: P = izip!(mbr.deref(), self.deref()) .fold(Zero::zero(), |distance, (&(x, y), &z)| distance + pow((((x + y)/two) - z), 2)); dist.sqrt() } fn contained_by_mbr(&self, mbr: &Rect<P, DIM>) -> bool { self.overlapped_by_mbr(mbr) } fn overlapped_by_mbr(&self, mbr: &Rect<P, DIM>) -> bool { for (&(x, y), &z) in izip!(mbr.deref(), self.deref()){ if z < x || y < z { return false; } } true } #[allow(unused_variables)] fn area_overlapped_with_mbr(&self, mbr: &Rect<P, DIM>) -> P { Zero::zero() } } impl<P, DIM> MbrLeafGeometry<P, DIM> for LineSegment<P, DIM> where P: Float + Signed + Bounded + MulAssign + AddAssign + ToPrimitive + FromPrimitive + Copy + Debug, DIM: ArrayLength<P> + ArrayLength<(P,P)> { fn dim(&self) -> usize { self.x.dim() } fn area(&self) -> P { Zero::zero() } fn min_for_axis(&self, dim: usize) -> P { self.x.coords.get(dim).unwrap().min(*self.y.coords.get(dim).unwrap()) } fn max_for_axis(&self, dim: usize) -> P { self.x.coords.get(dim).unwrap().max(*self.y.coords.get(dim).unwrap()) } fn expand_mbr_to_fit(&self, mbr: &mut Rect<P, DIM>) { self.x.expand_mbr_to_fit(mbr); self.y.expand_mbr_to_fit(mbr); } fn distance_from_mbr_center(&self, mbr: &Rect<P, DIM>) -> P { let two = FromPrimitive::from_usize(2).unwrap(); let dist: P = izip!(mbr.deref(), self.x.deref(), self.y.deref()) .fold(Zero::zero(), |distance, (&(x1, y1), &x2, &y2)| distance + pow(((x1 + y1)/two - (x2 + y2)/two), 2)); dist.sqrt() } fn contained_by_mbr(&self, mbr: &Rect<P, DIM>) -> bool { self.x.contained_by_mbr(mbr) && self.y.contained_by_mbr(mbr) } fn overlapped_by_mbr(&self, mbr: &Rect<P, DIM>) -> bool { self.x.overlapped_by_mbr(mbr) || self.y.overlapped_by_mbr(mbr) } #[allow(unused_variables)] fn area_overlapped_with_mbr(&self, mbr: &Rect<P, DIM>) -> P { Zero::zero() } } impl<P, DIM> MbrLeafGeometry<P, DIM> for Rect<P, DIM> where P: Float + Signed + Bounded + MulAssign + AddAssign + ToPrimitive + FromPrimitive + Copy + Debug, DIM: ArrayLength<P> + ArrayLength<(P,P)> { fn dim(&self) -> usize { self.edges.len() } fn area(&self) -> P { self.deref() .iter() .fold(One::one(), |area, &(x, y)| area * (y - x)) } fn min_for_axis(&self, dim: usize) -> P { self.edges.get(dim).unwrap().0 } fn max_for_axis(&self, dim: usize) -> P { self.edges.get(dim).unwrap().1 } fn expand_mbr_to_fit(&self, mbr: &mut Rect<P, DIM>) { for (&mut (ref mut x1, ref mut y1), &(x2, y2)) in izip!(mbr.deref_mut(), self.deref()) { *x1 = x1.min(x2); *y1 = y1.max(y2); } } fn distance_from_mbr_center(&self, mbr: &Rect<P, DIM>) -> P { let two = FromPrimitive::from_usize(2).unwrap(); let dist: P = izip!(mbr.deref(), self.deref()) .fold(Zero::zero(), |distance, (&(x1, y1), &(x2, y2))| { distance + pow(((x1 + y1) / two - (x2 + y2) / two), 2) }); dist.sqrt() } fn contained_by_mbr(&self, mbr: &Rect<P, DIM>) -> bool { for (&(x1, y1), &(x2, y2)) in izip!(mbr.deref(), self.deref()) { if x2 < x1 || y1 < y2 { return false; } } true } fn overlapped_by_mbr(&self, mbr: &Rect<P, DIM>) -> bool { for (&(x1, y1), &(x2, y2)) in izip!(mbr.deref(), self.deref()) { if !(x1 < y2) || !(x2 < y1) { return false; } } true } fn area_overlapped_with_mbr(&self, mbr: &Rect<P, DIM>) -> P { izip!(mbr.deref(), self.deref()).fold(One::one(), |area, (&(x1, y1), &(x2, y2))| { area * (y1.min(y2) - x1.max(x2)).max(Zero::zero()) }) } } impl<P, DIM> MbrLeafGeometry<P, DIM> for Shapes<P, DIM> where P: Float + Signed + Bounded + MulAssign + AddAssign + ToPrimitive + FromPrimitive + Copy + Debug + Default, DIM: ArrayLength<P> + ArrayLength<(P,P)> { fn dim(&self) -> usize { match *self { Shapes::Point(ref point) => point.dim(), Shapes::LineSegment(ref linesegment) => linesegment.dim(), Shapes::Rect(ref rect) => rect.dim() } } fn area(&self) -> P { match *self {<|fim▁hole|> Shapes::Rect(ref rect) => rect.area() } } fn min_for_axis(&self, dim: usize) -> P { match *self { Shapes::Point(ref point) => point.min_for_axis(dim), Shapes::LineSegment(ref linesegment) => linesegment.min_for_axis(dim), Shapes::Rect(ref rect) => rect.min_for_axis(dim) } } fn max_for_axis(&self, dim: usize) -> P { match *self { Shapes::Point(ref point) => point.max_for_axis(dim), Shapes::LineSegment(ref linesegment) => linesegment.max_for_axis(dim), Shapes::Rect(ref rect) => rect.max_for_axis(dim) } } fn expand_mbr_to_fit(&self, mbr: &mut Rect<P, DIM>) { match *self { Shapes::Point(ref point) => point.expand_mbr_to_fit(mbr), Shapes::LineSegment(ref linesegment) => linesegment.expand_mbr_to_fit(mbr), Shapes::Rect(ref rect) => rect.expand_mbr_to_fit(mbr) } } fn distance_from_mbr_center(&self, mbr: &Rect<P, DIM>) -> P { match *self { Shapes::Point(ref point) => point.distance_from_mbr_center(mbr), Shapes::LineSegment(ref linesegment) => linesegment.distance_from_mbr_center(mbr), Shapes::Rect(ref rect) => rect.distance_from_mbr_center(mbr) } } fn contained_by_mbr(&self, mbr: &Rect<P, DIM>) -> bool { match *self { Shapes::Point(ref point) => point.contained_by_mbr(mbr), Shapes::LineSegment(ref linesegment) => linesegment.contained_by_mbr(mbr), Shapes::Rect(ref rect) => rect.contained_by_mbr(mbr) } } fn overlapped_by_mbr(&self, mbr: &Rect<P, DIM>) -> bool { match *self { Shapes::Point(ref point) => point.overlapped_by_mbr(mbr), Shapes::LineSegment(ref linesegment) => linesegment.overlapped_by_mbr(mbr), Shapes::Rect(ref rect) => rect.overlapped_by_mbr(mbr) } } fn area_overlapped_with_mbr(&self, mbr: &Rect<P, DIM>) -> P { match *self { Shapes::Point(ref point) => point.area_overlapped_with_mbr(mbr), Shapes::LineSegment(ref linesegment) => linesegment.area_overlapped_with_mbr(mbr), Shapes::Rect(ref rect) => rect.area_overlapped_with_mbr(mbr) } } } #[cfg(test)] mod tests { use std::ops::Deref; use typenum::consts::U3; use geometry::{Shapes, Point, LineSegment, Rect}; use generic_array::GenericArray; use super::*; const ONE: [f64; 3] = [1.0f64, 1.0f64, 1.0f64]; const ZERO: [f64; 3] = [0.0f64, 0.0f64, 0.0f64]; const NEG_ONE: [f64; 3] = [-1.0f64, -1.0f64, -1.0f64]; const NEG_TWO: [f64; 3] = [-2.0f64, -2.0f64, -2.0f64]; // distance of [0.5, 0.5, 0.5] const EXPECTED_DISTANCE: f64 = 0.86602540378f64; #[test] fn point() { let point: Point<f64, U3> = Point::new(GenericArray::new()); for i in point.deref() { assert_relative_eq!(0.0f64, i); } let zero: Shapes<f64, U3> = Shapes::Point(Point::from_slice(&ZERO)); let one: Shapes<f64, U3> = Shapes::Point(Point::from_slice(&ONE)); let neg_one: Shapes<f64, U3> = Shapes::Point(Point::from_slice(&NEG_ONE)); // Shape tests // dim assert_eq!(ZERO.len(), zero.dim()); // area assert_relative_eq!(0.0f64, zero.area()); // min/max for axis for (i, item) in ZERO.iter().enumerate() { assert_relative_eq!(*item, zero.min_for_axis(i)); assert_relative_eq!(*item, zero.max_for_axis(i)); } let mut bounding_mbr = Rect::max_inverted(); // expand_mbr_to_fit zero.expand_mbr_to_fit(&mut bounding_mbr); one.expand_mbr_to_fit(&mut bounding_mbr); for (i, (x, y)) in izip!(&ZERO, &ONE).enumerate() { assert_relative_eq!(*x, bounding_mbr.min_for_axis(i)); assert_relative_eq!(*y, bounding_mbr.max_for_axis(i)); } // distance_from_mbr_center assert_relative_eq!(EXPECTED_DISTANCE, zero.distance_from_mbr_center(&bounding_mbr), max_relative = 0.00000001); // contained_by_mbr assert!(zero.contained_by_mbr(&bounding_mbr)); assert!(one.contained_by_mbr(&bounding_mbr)); assert!(!neg_one.contained_by_mbr(&bounding_mbr)); // overlapped_by_mbr assert!(zero.overlapped_by_mbr(&bounding_mbr)); assert!(one.overlapped_by_mbr(&bounding_mbr)); assert!(!neg_one.overlapped_by_mbr(&bounding_mbr)); // area_overlapped_with_mbr assert_relative_eq!(0.0f64, zero.area_overlapped_with_mbr(&bounding_mbr)); } #[test] fn line_segment() { // contained let zero_one: Shapes<f64, U3> = Shapes::LineSegment(LineSegment::from_slices(&ZERO, &ONE)); // overlap let neg_one_one: Shapes<f64, U3> = Shapes::LineSegment(LineSegment::from_slices(&NEG_ONE, &ONE)); // outside let neg_two_neg_one: Shapes<f64, U3> = Shapes::LineSegment(LineSegment::from_slices(&NEG_TWO, &NEG_ONE)); // Shape tests // dim assert_eq!(ZERO.len(), zero_one.dim()); // area assert_relative_eq!(0.0f64, zero_one.area()); // min/max for axis for (i, (x, y)) in izip!(&ZERO, &ONE).enumerate() { assert_relative_eq!(*x, zero_one.min_for_axis(i)); assert_relative_eq!(*y, zero_one.max_for_axis(i)); } let mut bounding_mbr = Rect::max_inverted(); // expand_mbr_to_fit zero_one.expand_mbr_to_fit(&mut bounding_mbr); for (i, (x, y)) in izip!(&ZERO, &ONE).enumerate() { assert_relative_eq!(*x, bounding_mbr.min_for_axis(i)); assert_relative_eq!(*y, bounding_mbr.max_for_axis(i)); } // distance_from_mbr_center assert_relative_eq!(EXPECTED_DISTANCE, neg_one_one.distance_from_mbr_center(&bounding_mbr), max_relative = 0.00000001); // contained_by_mbr assert!(zero_one.contained_by_mbr(&bounding_mbr)); assert!(!neg_one_one.contained_by_mbr(&bounding_mbr)); assert!(!neg_two_neg_one.contained_by_mbr(&bounding_mbr)); // overlapped_by_mbr assert!(zero_one.overlapped_by_mbr(&bounding_mbr)); assert!(neg_one_one.overlapped_by_mbr(&bounding_mbr)); assert!(!neg_two_neg_one.overlapped_by_mbr(&bounding_mbr)); // area_overlapped_with_mbr assert_relative_eq!(0.0f64, zero_one.area_overlapped_with_mbr(&bounding_mbr)); } #[test] fn rect() { let g_one: GenericArray<f64, U3> = arr![f64; 1.0f64, 1.0f64, 1.0f64]; let g_zero: GenericArray<f64, U3> = arr![f64; 0.0f64, 0.0f64, 0.0f64]; let g_neg_one: GenericArray<f64, U3> = arr![f64; -1.0f64, -1.0f64, -1.0f64]; let g_neg_two: GenericArray<f64, U3> = arr![f64; -2.0f64, -2.0f64, -2.0f64]; // contained let zero_one = Rect::from_corners(g_zero.clone(), g_one.clone()); // overlapped let neg_one_one = Rect::from_corners(g_neg_one.clone(), g_one.clone()); // outside let neg_two_neg_one = Rect::from_corners(g_neg_two.clone(), g_neg_one.clone()); // Shape tests // dim assert_eq!(zero_one.len(), zero_one.dim()); // area assert_relative_eq!(1.0f64, zero_one.area()); // min/max for axis for (i, (x, y)) in izip!(&ZERO, &ONE).enumerate() { assert_relative_eq!(*x, zero_one.min_for_axis(i)); assert_relative_eq!(*y, zero_one.max_for_axis(i)); } let mut bounding_mbr = Rect::max_inverted(); // expand_mbr_to_fit zero_one.expand_mbr_to_fit(&mut bounding_mbr); for (i, (x, y)) in izip!(&ZERO, &ONE).enumerate() { assert_relative_eq!(*x, bounding_mbr.min_for_axis(i)); assert_relative_eq!(*y, bounding_mbr.max_for_axis(i)); } // distance_from_mbr_center assert_relative_eq!(EXPECTED_DISTANCE, neg_one_one.distance_from_mbr_center(&bounding_mbr), max_relative = 0.00000001); // contained_by_mbr assert!(zero_one.contained_by_mbr(&bounding_mbr)); assert!(!neg_one_one.contained_by_mbr(&bounding_mbr)); assert!(!neg_two_neg_one.contained_by_mbr(&bounding_mbr)); // overlapped_by_mbr assert!(zero_one.overlapped_by_mbr(&bounding_mbr)); assert!(neg_one_one.overlapped_by_mbr(&bounding_mbr)); assert!(!neg_two_neg_one.overlapped_by_mbr(&bounding_mbr)); // area_overlapped_with_mbr assert_relative_eq!(1.0f64, zero_one.area_overlapped_with_mbr(&bounding_mbr)); assert_relative_eq!(1.0f64, neg_one_one.area_overlapped_with_mbr(&bounding_mbr)); } }<|fim▁end|>
Shapes::Point(ref point) => point.area(), Shapes::LineSegment(ref linesegment) => linesegment.area(),
<|file_name|>arm_linux_androideabi.rs<|end_file_name|><|fim▁begin|>// Copyright 2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. //<|fim▁hole|>// except according to those terms. use target::Target; pub fn target() -> Target { let mut base = super::linux_base::opts(); base.features = "+v7".to_string(); // Many of the symbols defined in compiler-rt are also defined in libgcc. Android // linker doesn't like that by default. base.pre_link_args.push("-Wl,--allow-multiple-definition".to_string()); // FIXME #17437 (and #17448): Android doesn't support position dependent executables anymore. base.position_independent_executables = false; Target { data_layout: "e-p:32:32:32\ -i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64\ -f32:32:32-f64:64:64\ -v64:64:64-v128:64:128\ -a:0:64-n32".to_string(), llvm_target: "arm-linux-androideabi".to_string(), target_endian: "little".to_string(), target_pointer_width: "32".to_string(), arch: "arm".to_string(), target_os: "android".to_string(), options: base, } }<|fim▁end|>
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed
<|file_name|>a2Object.hh<|end_file_name|><|fim▁begin|>// Copyright 2016 Martin Fracker, Jr. // All Rights Reserved. // // This project is free software, released under the terms // of the GNU General Public License v3. Please see the // file LICENSE in the root directory or visit // www.gnu.org/licenses/gpl-3.0.en.html for license terms. #pragma once #include "delegate/entity.hh" #include "tween/tween.hh" #include "util/dragger.hh" #include "util/mouseHome.hh" #include "util/tweenable.hh" #include <GL/glut.h> // draws and controls a hexagon on the screen class A2Object : public Entity { public:<|fim▁hole|> virtual void update() override; virtual void draw() override; virtual void keyboardEvent(unsigned char key, Vector2 mousePosition) override; virtual void leftMouse() override; virtual void middleMouse() override; virtual void rightMouse() override; private: Vector2 dimensions; bool spinning; // whether we are spinning bool reverse; // whether spin velocity should be reversed MouseHome mouseHome; Tweenable spinSpeed; // how fast we are spinning GLenum hexagonMode; // openGL mode used when drawing the hexagon Dragger drag; // mechanism for dragging myself void mouseDown(bool reverse); void spinLayer(); void updateSpinSpeed(); void updateSpin(); };<|fim▁end|>
A2Object(int x, int y);
<|file_name|>math.py<|end_file_name|><|fim▁begin|>import nltk PLUS = 'plus' MUL = 'multiplied by' DIV = 'divided by' MIN = 'minus' OPENB = 'open bracket' CLOSEB = 'close bracket' with open('../grammars/math.cfg', 'r') as file: grammar_str = file.read() def validate(text): grammar = nltk.CFG.fromstring(grammar_str) parser = nltk.ChartParser(grammar) trees = parser.parse(list(text)) valid = False answer = math_form = None for tree in trees: addition = tree[4].leaves() operation_string = '' for i in addition: operation_string = operation_string + i p = operation_string.replace(PLUS, '+')\ .replace(MUL, '*') \ .replace(DIV, '/') \ .replace(MIN, '-') \ .replace(OPENB, '(') \ .replace(CLOSEB, ')') math_form= p answer = eval(p) valid = True break return (valid, math_form, answer) def print_validity(text, state, marker = None): if marker is not None: text = 'Input text: ' + text + ' --> ' + marker + ' ' + state + ' Math question ' else: text = 'Input text: ' + text + ' --> ' + state + ' Math question ' print(text) lines = [line.rstrip('\n') for line in open('../test/inputs/math')] for line in lines: print('##################################################################\n') invalid_marker = '??????????' try: result = validate(line) if result[0]:<|fim▁hole|> print_validity(line, "Valid") print(' math form : ' + result[1]) print(' answer : ' + str(result[2])) else: print_validity(line, "Invalid", invalid_marker) except ValueError as ve: print(ve) print_validity(line, "Invalid", invalid_marker) continue<|fim▁end|>
<|file_name|>version.py<|end_file_name|><|fim▁begin|># This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. """Provide a simple /version handler.""" import handlers import handlers.base as hbase import handlers.response as hresponse import models # pylint: disable=too-many-public-methods class VersionHandler(hbase.BaseHandler): """Handle request to the /version URL. Provide the backend version number in use. """ def __init__(self, application, request, **kwargs): super(VersionHandler, self).__init__(application, request, **kwargs) def execute_get(self, *args, **kwargs): response = hresponse.HandlerResponse() response.result = [ { models.VERSION_FULL_KEY: handlers.__versionfull__, models.VERSION_KEY: handlers.__version__, } ] return response def execute_post(self, *args, **kwargs): return hresponse.HandlerResponse(501) <|fim▁hole|><|fim▁end|>
def execute_delete(self, *args, **kwargs): return hresponse.HandlerResponse(501)
<|file_name|>work-item-comment-wrapper.module.ts<|end_file_name|><|fim▁begin|>import { CommonModule } from '@angular/common'; import { NgModule } from '@angular/core'; <|fim▁hole|>//ngrx stuff import { EffectsModule } from '@ngrx/effects'; import { StoreModule } from '@ngrx/store'; import { CommentEffects } from './../../effects/comment.effects'; import { CommentReducer } from './../../reducers/comment.reducer'; import { CommentState, initialState as initialCommentState } from './../../states/comment.state'; import { WorkItemCommentModule } from './../work-item-comment/work-item-comment.module'; import { WorkItemCommentWrapperComponent } from './work-item-comment-wrapper.component'; @NgModule({ imports: [CommonModule, WorkItemCommentModule], declarations: [WorkItemCommentWrapperComponent], exports: [WorkItemCommentWrapperComponent], }) export class WorkItemCommentWrapperModule {}<|fim▁end|>
<|file_name|>admin.py<|end_file_name|><|fim▁begin|># Copyright (C) 2013 Johnny Vestergaard <[email protected]> # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful,<|fim▁hole|># but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. import bottle from bottle import get, post, route, static_file, view, HTTPError import shared_state import logging logger = logging.getLogger(__name__) @route('/unauth') def login(): return HTTPError(401, 'Unauthorized') @post('/login') def login(): """Authenticate users""" username = post_get('username') password = post_get('password') logger.info("Authentication attempt with username: [{0}]".format(username)) if shared_state.auth.login(username, password): return "You provided valid credentials" else: return HTTPError(401, 'Invalid credentials') @route('/logout') def logout(): shared_state.auth.logout(success_redirect='/unauth') @route('/admin') @view('admin_page') def admin(): """Only admin users can see this""" shared_state.auth.require(role='admin', fail_redirect='/unauth') return dict( current_user=shared_state.auth.current_user, users=shared_state.auth.list_users(), roles=shared_state.auth.list_roles() ) @post('/create_user') def create_user(): try: shared_state.auth.create_user(postd().username, postd().role, postd().password) return dict(ok=True, msg='') except Exception, e: return dict(ok=False, msg=e.message) @post('/delete_user') def delete_user(): try: shared_state.auth.delete_user(post_get('username')) return dict(ok=True, msg='') except Exception, e: return dict(ok=False, msg=e.message) @post('/create_role') def create_role(): try: shared_state.auth.create_role(post_get('role'), post_get('level')) return dict(ok=True, msg='') except Exception, e: return dict(ok=False, msg=e.message) @post('/delete_role') def delete_role(): try: shared_state.auth.delete_role(post_get('role')) return dict(ok=True, msg='') except Exception, e: return dict(ok=False, msg=e.message) def postd(): return bottle.request.forms def post_get(name, default=''): return bottle.request.POST.get(name, default).strip()<|fim▁end|>
<|file_name|>test_pipeline_yml.py<|end_file_name|><|fim▁begin|># Copyright 2015 Joel Granados [email protected] # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import glob from os import path import shutil import textwrap from unittest import TestCase from run_pipeline import maincli from tests import helpers from tests.helpers import ( FILES, PIPELINES_DIR, TMPDIR, ) class PipelineRunTestcase(TestCase): """Tests all example pipeline YAMLs under ./pipelines with run_pipeline""" def setUp(self): self.tmp_in = FILES["timestream_good_images"] self.tmp_out = path.join(TMPDIR, 'out') def _run_pipeline_yaml(self, ymlfile): yml_opts = {'--comp': False, '--conf': False, '--doc': False, '--gui': False, '--help': False, '--logfile': None, '--recalculate': False, '--set': None, '-i': self.tmp_in, '-o': self.tmp_out, '-p': path.join(PIPELINES_DIR, ymlfile), '-s': True, '-t': None, '-v': 0} maincli(yml_opts) def _run_yaml_str(self, ymlstr): # NB: you have to start the 'pipeline:' bit on a new line, indented # correctly, and start the triple-quote string with '"""\', so the # whole string is indented in the same way. ymlstr = textwrap.dedent(ymlstr) ymlfile = helpers.make_tmp_file() with open(ymlfile, 'w') as ymlfh: ymlfh.write(ymlstr + '\n') # Extra newline, just in case yml_opts = {'--comp': False, '--conf': False, '--doc': False, '--gui': False, '--help': False, '--logfile': None, '--recalculate': False, '--set': None, '-i': self.tmp_in, '-o': self.tmp_out, '-p': ymlfile, '-s': True, '-t': None, '-v': 0} maincli(yml_opts)<|fim▁hole|> def tearDown(self): if path.isdir(self.tmp_out): shutil.rmtree(self.tmp_out) class TestPipelinesInPLDir(PipelineRunTestcase): """Ensure all demo pipelines work with test dataset""" def test_all_demo_pipelines(self): """Ensure all demo pipelines work with test dataset""" for config in glob.glob(path.join(PIPELINES_DIR, '*.yml')): self._run_pipeline_yaml(config) class TestResizingPipelines(PipelineRunTestcase): """Test the resizing in ResultingImageWriter""" fs = """\ pipeline: - name: imagewrite mess: '---Write image---' outstream: -small size: %s outstreams: - { name: -small } general: visualise: False """ def _test_resize_pl(self, size): self._run_yaml_str(self.fs % size) def test_resize_xy(self): """Test the resizing in ResultingImageWriter with cols x rows""" self._test_resize_pl('[50,30]') self._test_resize_pl('50x30') def test_resize_float(self): """Test the resizing in ResultingImageWriter with scaling factor""" self._test_resize_pl('1.5') self._test_resize_pl('0.5') self._test_resize_pl('0.1') def test_resize_fullsize(self): """Test the resizing in ResultingImageWriter with no resizing""" self._test_resize_pl('1.0') self._test_resize_pl('fullres')<|fim▁end|>
<|file_name|>tagExclusion.ts<|end_file_name|><|fim▁begin|>/** * @license * Copyright 2013 Palantir Technologies, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import * as ts from "typescript"; import { Exclusion } from "./exclusion"; export interface ITagExclusionDescriptor { tags?: { content: IContentTags; existence: string[]; }; } export interface IContentTags { [i: string]: string; } export class TagExclusion extends Exclusion<ITagExclusionDescriptor> { private readonly contentTags: IContentTags = this.descriptor.tags === undefined ? {} : this.descriptor.tags.content; private readonly existenceTags = new Set( this.descriptor.tags !== undefined && this.descriptor.tags.existence !== undefined ? this.descriptor.tags.existence : undefined, ); public excludes(node: ts.Node) { const documentationNode = this.getDocumentationNode(node); const tagsWithContents = this.parseTagsWithContents(documentationNode.getFullText()); for (const tagWithContent of tagsWithContents) { if (this.existenceTags.has(tagWithContent[0])) { return true; } if (this.contentTags === undefined) { return false; } const matcherBody = this.contentTags[tagWithContent[0]]; if (matcherBody === undefined) { continue; } if (new RegExp(matcherBody).test(tagWithContent[1])) { return true; } } return false; } private getDocumentationNode(node: ts.Node) { if (node.kind === ts.SyntaxKind.VariableDeclaration) { return node.parent; } return node; } private parseTagsWithContents(nodeText: string | undefined): Array<[string, string]> { if (nodeText === undefined) { return []; } const docMatches = nodeText.match(/\/\*\*\s*\n?([^\*]*(\*[^\/])?)*\*\//); if (docMatches === null || docMatches.length === 0) { return []; } const lines = docMatches[0].match(/[\r\n\s]*\*\s*@.*[\r\n\s]/g); if (lines === null) { return []; } return lines.map( (line): [string, string] => { const body = line.substring(line.indexOf("@"));<|fim▁hole|> ); } }<|fim▁end|>
const firstSpaceIndex = body.search(/\s/); return [body.substring(1, firstSpaceIndex), body.substring(firstSpaceIndex).trim()]; },
<|file_name|>createProgramFromFiles.js<|end_file_name|><|fim▁begin|>define([ 'webgl/createProgram', 'webgl/shader/compileShaderFromFile' ], function( createProgram, compileShaderFromFile ) { /** * Creates a program from 2 script tags.<|fim▁hole|> * @param {string} vertexShaderFileName The file name of the vertex shader. * @param {string} fragmentShaderFileName The file name of the fragment shader. * @return {!WebGLProgram} A program */ return function createProgramFromScripts(gl, vertexShaderFileName, fragmentShaderFileName, callback) { var async = !!callback; if(async) { compileShaderFromFile(gl, vertexShaderFileName, 'vertex', function(vertexShader) { compileShaderFromFile(gl, fragmentShaderFileName, 'fragment', function(fragmentShader) { callback(createProgram(gl, vertexShader, fragmentShader)); }); }); } else { return createProgram(gl, compileShaderFromFile(gl, vertexShaderFileName, 'vertex'), compileShaderFromFile(gl, fragmentShaderFileName, 'fragment')); } }; });<|fim▁end|>
* * @param {!WebGLRenderingContext} gl The WebGL Context.
<|file_name|>repo.go<|end_file_name|><|fim▁begin|>// Copyright 2015 The Gogs Authors. All rights reserved. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. package git import ( "bytes" "container/list" "errors" "os" "path" "path/filepath" "strings" "time" "github.com/Unknwon/com" ) // Repository represents a Git repository. type Repository struct { Path string commitCache *objectCache tagCache *objectCache } const _PRETTY_LOG_FORMAT = `--pretty=format:%H` func (repo *Repository) parsePrettyFormatLogToList(logs []byte) (*list.List, error) { l := list.New() if len(logs) == 0 { return l, nil } parts := bytes.Split(logs, []byte{'\n'}) for _, commitId := range parts { commit, err := repo.GetCommit(string(commitId)) if err != nil { return nil, err } l.PushBack(commit) } return l, nil } type NetworkOptions struct { URL string Timeout time.Duration } // IsRepoURLAccessible checks if given repository URL is accessible. func IsRepoURLAccessible(opts NetworkOptions) bool { cmd := NewCommand("ls-remote", "-q", "-h", opts.URL, "HEAD") if opts.Timeout <= 0 { opts.Timeout = -1 } _, err := cmd.RunTimeout(opts.Timeout) if err != nil { return false } return true } // InitRepository initializes a new Git repository. func InitRepository(repoPath string, bare bool) error { os.MkdirAll(repoPath, os.ModePerm) cmd := NewCommand("init") if bare { cmd.AddArguments("--bare") } _, err := cmd.RunInDir(repoPath) return err } // OpenRepository opens the repository at the given path. func OpenRepository(repoPath string) (*Repository, error) { repoPath, err := filepath.Abs(repoPath) if err != nil { return nil, err } else if !isDir(repoPath) { return nil, errors.New("no such file or directory") } return &Repository{ Path: repoPath, commitCache: newObjectCache(), tagCache: newObjectCache(), }, nil } type CloneRepoOptions struct { Mirror bool Bare bool Quiet bool Branch string Timeout time.Duration } // Clone clones original repository to target path. func Clone(from, to string, opts CloneRepoOptions) (err error) { toDir := path.Dir(to) if err = os.MkdirAll(toDir, os.ModePerm); err != nil { return err } cmd := NewCommand("clone") if opts.Mirror { cmd.AddArguments("--mirror") } if opts.Bare { cmd.AddArguments("--bare") } if opts.Quiet { cmd.AddArguments("--quiet") } if len(opts.Branch) > 0 { cmd.AddArguments("-b", opts.Branch) } cmd.AddArguments(from, to) if opts.Timeout <= 0 { opts.Timeout = -1 } _, err = cmd.RunTimeout(opts.Timeout) return err } type FetchRemoteOptions struct { Prune bool Timeout time.Duration } // Fetch fetches changes from remotes without merging. func Fetch(repoPath string, opts FetchRemoteOptions) error { cmd := NewCommand("fetch") if opts.Prune { cmd.AddArguments("--prune") } if opts.Timeout <= 0 { opts.Timeout = -1 } _, err := cmd.RunInDirTimeout(opts.Timeout, repoPath) return err } type PullRemoteOptions struct { All bool Rebase bool Remote string Branch string Timeout time.Duration } // Pull pulls changes from remotes. func Pull(repoPath string, opts PullRemoteOptions) error { cmd := NewCommand("pull") if opts.Rebase { cmd.AddArguments("--rebase") } if opts.All { cmd.AddArguments("--all") } else { cmd.AddArguments(opts.Remote) cmd.AddArguments(opts.Branch) } if opts.Timeout <= 0 { opts.Timeout = -1 } _, err := cmd.RunInDirTimeout(opts.Timeout, repoPath) return err } // Push pushs local commits to given remote branch. func Push(repoPath, remote, branch string) error { _, err := NewCommand("push", remote, branch).RunInDir(repoPath) return err } type CheckoutOptions struct { Branch string OldBranch string Timeout time.Duration } // Checkout checkouts a branch func Checkout(repoPath string, opts CheckoutOptions) error { cmd := NewCommand("checkout") if len(opts.OldBranch) > 0 { cmd.AddArguments("-b") } cmd.AddArguments(opts.Branch) if len(opts.OldBranch) > 0 { cmd.AddArguments(opts.OldBranch) } if opts.Timeout <= 0 { opts.Timeout = -1 } _, err := cmd.RunInDirTimeout(opts.Timeout, repoPath) return err } // ResetHEAD resets HEAD to given revision or head of branch. func ResetHEAD(repoPath string, hard bool, revision string) error { cmd := NewCommand("reset") if hard { cmd.AddArguments("--hard") } _, err := cmd.AddArguments(revision).RunInDir(repoPath) return err } // MoveFile moves a file to another file or directory. func MoveFile(repoPath, oldTreeName, newTreeName string) error { _, err := NewCommand("mv").AddArguments(oldTreeName, newTreeName).RunInDir(repoPath) return err } // CountObject represents disk usage report of Git repository. type CountObject struct { Count int64 Size int64 InPack int64 Packs int64 SizePack int64 PrunePackable int64 Garbage int64 SizeGarbage int64 } const ( _STAT_COUNT = "count: " _STAT_SIZE = "size: " _STAT_IN_PACK = "in-pack: " _STAT_PACKS = "packs: "<|fim▁hole|> _STAT_SIZE_GARBAGE = "size-garbage: " ) // GetRepoSize returns disk usage report of repository in given path. func GetRepoSize(repoPath string) (*CountObject, error) { cmd := NewCommand("count-objects", "-v") stdout, err := cmd.RunInDir(repoPath) if err != nil { return nil, err } countObject := new(CountObject) for _, line := range strings.Split(stdout, "\n") { switch { case strings.HasPrefix(line, _STAT_COUNT): countObject.Count = com.StrTo(line[7:]).MustInt64() case strings.HasPrefix(line, _STAT_SIZE): countObject.Size = com.StrTo(line[6:]).MustInt64() * 1024 case strings.HasPrefix(line, _STAT_IN_PACK): countObject.InPack = com.StrTo(line[9:]).MustInt64() * 1024 case strings.HasPrefix(line, _STAT_PACKS): countObject.Packs = com.StrTo(line[7:]).MustInt64() case strings.HasPrefix(line, _STAT_SIZE_PACK): countObject.SizePack = com.StrTo(line[11:]).MustInt64() * 1024 case strings.HasPrefix(line, _STAT_PRUNE_PACKABLE): countObject.PrunePackable = com.StrTo(line[16:]).MustInt64() case strings.HasPrefix(line, _STAT_GARBAGE): countObject.Garbage = com.StrTo(line[9:]).MustInt64() case strings.HasPrefix(line, _STAT_SIZE_GARBAGE): countObject.SizeGarbage = com.StrTo(line[14:]).MustInt64() * 1024 } } return countObject, nil }<|fim▁end|>
_STAT_SIZE_PACK = "size-pack: " _STAT_PRUNE_PACKABLE = "prune-packable: " _STAT_GARBAGE = "garbage: "
<|file_name|>indent_list_item.py<|end_file_name|><|fim▁begin|>import re import sublime import sublime_plugin class IndentListItemCommand(sublime_plugin.TextCommand): bullet_pattern = r'([-+*]|([(]?(\d+|#|[a-y]|[A-Y]|[MDCLXVImdclxvi]+))([).]))' bullet_pattern_re = re.compile(bullet_pattern) line_pattern_re = re.compile(r'^\s*' + bullet_pattern) spaces_re = re.compile(r'^\s*') def run(self, edit, reverse=False): for region in self.view.sel(): if region.a != region.b: continue line = self.view.line(region) line_content = self.view.substr(line) new_line = line_content m = self.line_pattern_re.match(new_line) if not m: return # Determine how to indent (tab or spaces) tab_str = self.view.settings().get('tab_size', 4) * ' ' sep_str = ' ' if m.group(4) else '' prev_line = self.view.line(sublime.Region(line.begin() - 1, line.begin() - 1)) prev_line_content = self.view.substr(prev_line) prev_prev_line = self.view.line(sublime.Region(prev_line.begin() - 1, prev_line.begin() - 1)) prev_prev_line_content = self.view.substr(prev_prev_line) if not reverse: # Do the indentation new_line = self.bullet_pattern_re.sub(tab_str + sep_str + r'\1', new_line) # Insert the new item if prev_line_content: new_line = '\n' + new_line <|fim▁hole|> else: if not new_line.startswith(tab_str): continue # Do the unindentation new_line = re.sub(tab_str + sep_str + self.bullet_pattern, r'\1', new_line) # Insert the new item if prev_line_content: new_line = '\n' + new_line else: prev_spaces = self.spaces_re.match(prev_prev_line_content).group(0) spaces = self.spaces_re.match(new_line).group(0) if prev_spaces == spaces: line = sublime.Region(line.begin() - 1, line.end()) endings = ['.', ')'] # Transform the bullet to the next/previous bullet type if self.view.settings().get('list_indent_auto_switch_bullet', True): bullets = self.view.settings().get('list_indent_bullets', ['*', '-', '+']) def change_bullet(m): bullet = m.group(1) try: return bullets[(bullets.index(bullet) + (1 if not reverse else -1)) % len(bullets)] except ValueError: pass n = m.group(2) ending = endings[(endings.index(m.group(4)) + (1 if not reverse else -1)) % len(endings)] if n.isdigit(): return '${1:a}' + ending elif n != '#': return '${1:0}' + ending return m.group(2) + ending new_line = self.bullet_pattern_re.sub(change_bullet, new_line) self.view.replace(edit, line, '') self.view.run_command('insert_snippet', {'contents': new_line}) def is_enabled(self): return bool(self.view.score_selector(self.view.sel()[0].a, 'text.restructuredtext'))<|fim▁end|>
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|>from pip._internal.utils.typing import MYPY_CHECK_RUNNING if MYPY_CHECK_RUNNING: from typing import List, Optional __version__ = "20.0.dev0" <|fim▁hole|> For additional details, see https://github.com/pypa/pip/issues/7498. """ from pip._internal.utils.entrypoints import _wrapper return _wrapper(args)<|fim▁end|>
def main(args=None): # type: (Optional[List[str]]) -> int """This is an internal API only meant for use by pip's own console scripts.
<|file_name|>IntroduceTwoStepAggregateRule.java<|end_file_name|><|fim▁begin|>/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.vxquery.compiler.rewriter.rules; import java.util.HashMap; import java.util.Map; import org.apache.commons.lang3.mutable.Mutable; import org.apache.vxquery.functions.BuiltinFunctions; import org.apache.vxquery.functions.BuiltinOperators; import org.apache.hyracks.algebricks.common.exceptions.AlgebricksException;<|fim▁hole|>import org.apache.hyracks.algebricks.core.algebra.base.IOptimizationContext; import org.apache.hyracks.algebricks.core.algebra.base.LogicalExpressionTag; import org.apache.hyracks.algebricks.core.algebra.base.LogicalOperatorTag; import org.apache.hyracks.algebricks.core.algebra.expressions.AbstractFunctionCallExpression; import org.apache.hyracks.algebricks.core.algebra.expressions.AggregateFunctionCallExpression; import org.apache.hyracks.algebricks.core.algebra.functions.FunctionIdentifier; import org.apache.hyracks.algebricks.core.algebra.functions.IFunctionInfo; import org.apache.hyracks.algebricks.core.algebra.operators.logical.AbstractLogicalOperator; import org.apache.hyracks.algebricks.core.algebra.operators.logical.AggregateOperator; import org.apache.hyracks.algebricks.core.rewriter.base.IAlgebraicRewriteRule; /** * The rule searches for aggregate operators with an aggregate function * expression that has not been initialized for two step aggregation. * * <pre> * Before * * plan__parent * AGGREGATE( $v : af1( $v1 ) ) * plan__child * * Where af1 is a VXquery aggregate function expression configured for single * step processing and $v1 is defined in plan__child. * * After * * if (af1 == count) aggregate operating settings: * Step 1: count * Step 2: sum * if (af1 == avg) aggregate operating settings: * Step 1: avg-local * Step 2: avg-global * if (af1 in (max, min, sum)) aggregate operating settings: * Step 1: af1 * Step 2: af1 * </pre> * * @author prestonc */ public class IntroduceTwoStepAggregateRule implements IAlgebraicRewriteRule { final Map<FunctionIdentifier, Pair<IFunctionInfo, IFunctionInfo>> AGGREGATE_MAP = new HashMap<FunctionIdentifier, Pair<IFunctionInfo, IFunctionInfo>>(); public IntroduceTwoStepAggregateRule() { AGGREGATE_MAP.put(BuiltinFunctions.FN_AVG_1.getFunctionIdentifier(), new Pair<IFunctionInfo, IFunctionInfo>(BuiltinOperators.AVG_LOCAL, BuiltinOperators.AVG_GLOBAL)); AGGREGATE_MAP.put(BuiltinFunctions.FN_COUNT_1.getFunctionIdentifier(), new Pair<IFunctionInfo, IFunctionInfo>(BuiltinFunctions.FN_COUNT_1, BuiltinFunctions.FN_SUM_1)); AGGREGATE_MAP.put(BuiltinFunctions.FN_MAX_1.getFunctionIdentifier(), new Pair<IFunctionInfo, IFunctionInfo>(BuiltinFunctions.FN_MAX_1, BuiltinFunctions.FN_MAX_1)); AGGREGATE_MAP.put(BuiltinFunctions.FN_MIN_1.getFunctionIdentifier(), new Pair<IFunctionInfo, IFunctionInfo>(BuiltinFunctions.FN_MIN_1, BuiltinFunctions.FN_MIN_1)); AGGREGATE_MAP.put(BuiltinFunctions.FN_SUM_1.getFunctionIdentifier(), new Pair<IFunctionInfo, IFunctionInfo>(BuiltinFunctions.FN_SUM_1, BuiltinFunctions.FN_SUM_1)); } @Override public boolean rewritePre(Mutable<ILogicalOperator> opRef, IOptimizationContext context) throws AlgebricksException { // Check if aggregate function. AbstractLogicalOperator op = (AbstractLogicalOperator) opRef.getValue(); if (op.getOperatorTag() != LogicalOperatorTag.AGGREGATE) { return false; } AggregateOperator aggregate = (AggregateOperator) op; if (aggregate.getExpressions().size() == 0) { return false; } Mutable<ILogicalExpression> mutableLogicalExpression = aggregate.getExpressions().get(0); ILogicalExpression logicalExpression = mutableLogicalExpression.getValue(); if (logicalExpression.getExpressionTag() != LogicalExpressionTag.FUNCTION_CALL) { return false; } AbstractFunctionCallExpression functionCall = (AbstractFunctionCallExpression) logicalExpression; if (AGGREGATE_MAP.containsKey(functionCall.getFunctionIdentifier())) { AggregateFunctionCallExpression aggregateFunctionCall = (AggregateFunctionCallExpression) functionCall; if (aggregateFunctionCall.isTwoStep()) { return false; } aggregateFunctionCall.setTwoStep(true); aggregateFunctionCall.setStepOneAggregate(AGGREGATE_MAP.get(functionCall.getFunctionIdentifier()).first); aggregateFunctionCall.setStepTwoAggregate(AGGREGATE_MAP.get(functionCall.getFunctionIdentifier()).second); return true; } return false; } @Override public boolean rewritePost(Mutable<ILogicalOperator> opRef, IOptimizationContext context) { return false; } }<|fim▁end|>
import org.apache.hyracks.algebricks.common.utils.Pair; import org.apache.hyracks.algebricks.core.algebra.base.ILogicalExpression; import org.apache.hyracks.algebricks.core.algebra.base.ILogicalOperator;
<|file_name|>app.module.ts<|end_file_name|><|fim▁begin|>import { NgModule } from '@angular/core'; import { BrowserModule } from '@angular/platform-browser'; import { AppComponent } from './app.component'; import { BlockUIModule } from 'ng-block-ui'; @NgModule({ imports: [<|fim▁hole|> BlockUIModule.forRoot({ message: 'Default Message...' }) ], declarations: [AppComponent], bootstrap: [AppComponent] }) export class AppModule { }<|fim▁end|>
BrowserModule,
<|file_name|>BinaryLink.js<|end_file_name|><|fim▁begin|>// xiNET interaction viewer // Copyright 2013 Rappsilber Laboratory // // This product includes software developed at // the Rappsilber Laboratory (http://www.rappsilberlab.org/). "use strict"; var Config = require('../../controller/Config'); var Link = require('./Link'); var SequenceLink = require('./SequenceLink'); //josh - following are libraries and should be in 'vendor'? // but I don't know how to set up the dependency if its there var Intersection = require('../../controller/Intersection'); var Point2D = require('../../controller/Point2D'); // BinaryLink.js // the class representing a binary interaction BinaryLink.prototype = new Link(); function BinaryLink(id, xlvController, fromI, toI) { this.id = id; this.evidences = d3.map(); this.interactors = [fromI, toI]; this.sequenceLinks = d3.map(); this.ctrl = xlvController; this.ambig = false; //used to avoid some unnecessary manipulation of DOM this.shown = false; } BinaryLink.prototype.initSVG = function() { this.line = document.createElementNS(Config.svgns, "line"); this.highlightLine = document.createElementNS(Config.svgns, "line"); this.thickLine = document.createElementNS(Config.svgns, "line"); this.line.setAttribute("class", "link"); this.line.setAttribute("fill", "none"); this.line.setAttribute("stroke", "black"); this.line.setAttribute("stroke-width", "1"); this.line.setAttribute("stroke-linecap", "round"); this.highlightLine.setAttribute("class", "link"); this.highlightLine.setAttribute("fill", "none"); this.highlightLine.setAttribute("stroke", Config.highlightColour); this.highlightLine.setAttribute("stroke-width", "10"); this.highlightLine.setAttribute("stroke-linecap", "round"); this.highlightLine.setAttribute("stroke-opacity", "0"); this.thickLine.setAttribute("class", "link"); this.thickLine.setAttribute("fill", "none"); this.thickLine.setAttribute("stroke", "lightgray"); this.thickLine.setAttribute("stroke-linecap", "round"); this.thickLine.setAttribute("stroke-linejoin", "round"); //set the events for it var self = this; this.line.onmousedown = function(evt) { self.mouseDown(evt); }; this.line.onmouseover = function(evt) { self.mouseOver(evt); }; this.line.onmouseout = function(evt) { self.mouseOut(evt); }; this.line.ontouchstart = function(evt) { self.touchStart(evt); }; this.highlightLine.onmousedown = function(evt) { self.mouseDown(evt); }; this.highlightLine.onmouseover = function(evt) { self.mouseOver(evt); }; this.highlightLine.onmouseout = function(evt) { self.mouseOut(evt); }; this.highlightLine.ontouchstart = function(evt) { self.touchStart(evt); }; this.thickLine.onmousedown = function(evt) { self.mouseDown(evt); }; this.thickLine.onmouseover = function(evt) { self.mouseOver(evt); }; this.thickLine.onmouseout = function(evt) { self.mouseOut(evt); }; this.thickLine.ontouchstart = function(evt) { self.touchStart(evt); }; this.isSelected = false; } ; BinaryLink.prototype.showHighlight = function(show) { if (this.shown) { if (this.notSubLink === true){ this.highlightInteractors(show); } if (show) { //~ this.highlightLine.setAttribute("stroke", xiNET.highlightColour.toRGB()); this.highlightLine.setAttribute("stroke-opacity", "1"); } else { //~ this.highlightLine.setAttribute("stroke", xiNET.selectedColour.toRGB()); //~ if (this.isSelected === false) { this.highlightLine.setAttribute("stroke-opacity", "0"); //~ } } } }; BinaryLink.prototype.check = function() { //~ if (!this.fromInteractor) {//TEMP HACK //~ return false; //~ } if (this.interactors[0].form === 0 && this.interactors[1].form === 0) { //~ this.ambig = true; //~ var filteredEvids = this.getFilteredEvidences(); //~ var evidCount = filteredEvids.length; //~ for (var i = 0; i < evidCount; i++) { //~ var evid = filteredEvids[i]; //~ if (typeof evid.expansion === 'undefined') { //~ this.ambig = false; //~ } //~ } //~ if (evidCount > 0) { //~ //tooltip //~ this.tooltip = /*this.id + ', ' +*/ evidCount + ' experiment'; //~ if (evidCount > 1) { //~ this.tooltip += 's'; //~ } //~ this.tooltip += ' ('; //~ var nested_data = d3.nest() //~ .key(function(d) { //~ return d.experiment.detmethod.name; //~ }) //~ .rollup(function(leaves) { //~ return leaves.length; //~ }) //~ .entries(filteredEvids); //~ //~ nested_data.sort(function(a, b) { //~ return b.values - a.values //~ }); //~ var countDetMethods = nested_data.length //~ for (var i = 0; i < countDetMethods; i++) { //~ if (i > 0) { //~ this.tooltip += ', '; //~ } //~ this.tooltip += nested_data[i].values + ' ' + nested_data[i].key; //~ } //~ this.tooltip += ' )'; //~ //thickLine //~ if (evidCount > 1) { //~ this.thickLineShown = true //~ this.w = evidCount * (45 / BinaryLink.maxNoEvidences); //~ } //~ else { //~ // this.thickLineShown = false;//hack //~ this.w = evidCount * (45 / BinaryLink.maxNoEvidences);//hack //~ } //~ //ambig? //~ this.dashedLine(this.ambig); //sequence links will have been hidden previously this.show(); return true; } else {//at least one end was in stick form this.hide(); <|fim▁hole|>BinaryLink.prototype.show = function() { if (this.ctrl.initComplete) { if (!this.shown) { this.shown = true; if (typeof this.line === 'undefined') { this.initSVG(); } this.line.setAttribute("stroke-width", this.ctrl.z * 1); this.highlightLine.setAttribute("stroke-width", this.ctrl.z * 10); this.setLinkCoordinates(this.interactors[0]); this.setLinkCoordinates(this.interactors[1]); if (this.thickLineShown) { this.ctrl.p_pLinksWide.appendChild(this.thickLine); } this.ctrl.highlights.appendChild(this.highlightLine); this.ctrl.p_pLinks.appendChild(this.line); if (this.thickLineShown) { this.thickLine.setAttribute("stroke-width", this.w); } } } }; BinaryLink.prototype.hide = function() { if (this.shown) { this.shown = false; if (this.thickLineShown) { this.ctrl.p_pLinksWide.removeChild(this.thickLine); } this.ctrl.highlights.removeChild(this.highlightLine); this.ctrl.p_pLinks.removeChild(this.line); } }; BinaryLink.prototype.setLinkCoordinates = function(interactor) { if (this.shown) {//don't waste time changing DOM if link not visible var pos = interactor.getPosition(); if (interactor.type !== 'complex'){ if (this.interactors[0] === interactor) { this.line.setAttribute("x1", pos[0]); this.line.setAttribute("y1", pos[1]); this.highlightLine.setAttribute("x1", pos[0]); this.highlightLine.setAttribute("y1", pos[1]); if (this.thickLineShown) { this.thickLine.setAttribute("x1", pos[0]); this.thickLine.setAttribute("y1", pos[1]); } } else { this.line.setAttribute("x2", pos[0]); this.line.setAttribute("y2", pos[1]); this.highlightLine.setAttribute("x2", pos[0]); this.highlightLine.setAttribute("y2", pos[1]); if (this.thickLineShown) { this.thickLine.setAttribute("x2", pos[0]); this.thickLine.setAttribute("y2", pos[1]); } } }else {//interactor is a complex var otherEndPos = this.getOtherEnd(interactor).getPosition(); var naryPath = interactor.naryLink.hull; var iPath = new Array(); for (var pi = 0; pi < naryPath.length; pi++) { var p = naryPath[pi]; iPath.push(new Point2D(p[0],p[1])); } var a1 = new Point2D(pos[0], pos[1]); var a2 = new Point2D(otherEndPos[0], otherEndPos[1]); var intersect = Intersection.intersectLinePolygon(a1, a2, iPath); var newPos; if (intersect.points[0]){ newPos = [intersect.points[0].x,intersect.points[0].y]; } else { newPos = pos; } if (this.interactors[0] === interactor) { this.line.setAttribute("x1", newPos[0]); this.line.setAttribute("y1", newPos[1]); this.highlightLine.setAttribute("x1", newPos[0]); this.highlightLine.setAttribute("y1", newPos[1]); if (this.thickLineShown) { this.thickLine.setAttribute("x1", newPos[0]); this.thickLine.setAttribute("y1", newPos[1]); } } else { this.line.setAttribute("x2", newPos[0]); this.line.setAttribute("y2", newPos[1]); this.highlightLine.setAttribute("x2", newPos[0]); this.highlightLine.setAttribute("y2", newPos[1]); if (this.thickLineShown) { this.thickLine.setAttribute("x2", newPos[0]); this.thickLine.setAttribute("y2", newPos[1]); } } } } }; BinaryLink.prototype.getOtherEnd = function(interactor) { return ((this.interactors[0] === interactor) ? this.interactors[1] : this.interactors[0]); }; module.exports = BinaryLink;<|fim▁end|>
return false; } };
<|file_name|>AllocationTracker.java<|end_file_name|><|fim▁begin|>// Copyright 2017 The Bazel Authors. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package com.google.devtools.build.lib.profiler.memory; import com.google.common.base.Objects; import com.google.common.collect.ImmutableList; import com.google.common.collect.Iterables; import com.google.common.collect.MapMaker; import com.google.devtools.build.lib.concurrent.ThreadSafety.ConditionallyThreadCompatible; import com.google.devtools.build.lib.concurrent.ThreadSafety.ThreadSafe; import com.google.devtools.build.lib.packages.AspectClass; import com.google.devtools.build.lib.packages.RuleClass; import com.google.devtools.build.lib.packages.RuleFunction; import com.google.devtools.build.lib.syntax.Debug; import com.google.devtools.build.lib.syntax.Location; import com.google.devtools.build.lib.syntax.StarlarkCallable; import com.google.devtools.build.lib.syntax.StarlarkThread; import com.google.monitoring.runtime.instrumentation.Sampler; import com.google.perftools.profiles.ProfileProto.Function; import com.google.perftools.profiles.ProfileProto.Line; import com.google.perftools.profiles.ProfileProto.Profile; import com.google.perftools.profiles.ProfileProto.Sample; import com.google.perftools.profiles.ProfileProto.ValueType; import java.io.FileOutputStream; import java.io.IOException; import java.time.Instant; import java.util.HashMap; import java.util.Map; import java.util.Random; import java.util.zip.GZIPOutputStream; import javax.annotation.Nullable; /** Tracks allocations for memory reporting. */ @ConditionallyThreadCompatible @SuppressWarnings("ThreadLocalUsage") // the AllocationTracker is effectively a global public final class AllocationTracker implements Sampler, Debug.ThreadHook { // A mapping from Java thread to StarlarkThread. // Used to effect a hidden StarlarkThread parameter to sampleAllocation. // TODO(adonovan): opt: merge the three different ThreadLocals in use here. private final ThreadLocal<StarlarkThread> starlarkThread = new ThreadLocal<>(); @Override public void onPushFirst(StarlarkThread thread) { starlarkThread.set(thread); } @Override public void onPopLast(StarlarkThread thread) { starlarkThread.remove(); } private static class AllocationSample { @Nullable final RuleClass ruleClass; // Current rule being analysed, if any @Nullable final AspectClass aspectClass; // Current aspect being analysed, if any final ImmutableList<Frame> callstack; // Starlark callstack, if any final long bytes; AllocationSample( @Nullable RuleClass ruleClass, @Nullable AspectClass aspectClass, ImmutableList<Frame> callstack, long bytes) { this.ruleClass = ruleClass; this.aspectClass = aspectClass; this.callstack = callstack; this.bytes = bytes; } } private static class Frame { final String name; final Location loc; @Nullable final RuleFunction ruleFunction; Frame(String name, Location loc, @Nullable RuleFunction ruleFunction) { this.name = name; this.loc = loc; this.ruleFunction = ruleFunction; } } private final Map<Object, AllocationSample> allocations = new MapMaker().weakKeys().makeMap(); private final int samplePeriod; private final int sampleVariance; private boolean enabled = true; /** * Cheap wrapper class for a long. Avoids having to do two thread-local lookups per allocation. */ private static final class LongValue { long value; } private final ThreadLocal<LongValue> currentSampleBytes = ThreadLocal.withInitial(LongValue::new); private final ThreadLocal<Long> nextSampleBytes = ThreadLocal.withInitial(this::getNextSample); private final Random random = new Random(); AllocationTracker(int samplePeriod, int variance) { this.samplePeriod = samplePeriod; this.sampleVariance = variance; } // Called by instrumentation.recordAllocation, which is in turn called // by an instrumented version of the application assembled on the fly // by instrumentation.AllocationInstrumenter. // The instrumenter inserts a call to recordAllocation after every // memory allocation instruction in the original class. // // This function runs within 'new', so is not supposed to allocate memory; // see Sampler interface. In fact it allocates in nearly a dozen places. // TODO(adonovan): suppress reentrant calls by setting a thread-local flag. @Override @ThreadSafe public void sampleAllocation(int count, String desc, Object newObj, long size) { if (!enabled) { return; } @Nullable StarlarkThread thread = starlarkThread.get(); // Calling Debug.getCallStack is a dubious operation here. // First it allocates memory, which breaks the Sampler contract. // Second, the allocation could in principle occur while the thread's // representation invariants are temporarily broken (that is, during // the call to ArrayList.add when pushing a new stack frame). // For now at least, the allocation done by ArrayList.add occurs before // the representation of the ArrayList is changed, so it is safe, // but this is a fragile assumption. ImmutableList<Debug.Frame> callstack = thread != null ? Debug.getCallStack(thread) : ImmutableList.of(); RuleClass ruleClass = CurrentRuleTracker.getRule(); AspectClass aspectClass = CurrentRuleTracker.getAspect(); // Should we bother sampling? if (callstack.isEmpty() && ruleClass == null && aspectClass == null) { return; } // Convert the thread's stack right away to our internal form. // It is not safe to inspect Debug.Frame references once the thread resumes, // and keeping StarlarkCallable values live defeats garbage collection. ImmutableList.Builder<Frame> frames = ImmutableList.builderWithExpectedSize(callstack.size()); for (Debug.Frame fr : callstack) { // The frame's PC location is currently not updated at every step, // only at function calls, so the leaf frame's line number may be // slightly off; see the tests. // TODO(b/149023294): remove comment when we move to a compiled representation. StarlarkCallable fn = fr.getFunction(); frames.add( new Frame( fn.getName(), fr.getLocation(), fn instanceof RuleFunction ? (RuleFunction) fn : null)); } // If we start getting stack overflows here, it's because the memory sampling // implementation has changed to call back into the sampling method immediately on // every allocation. Since thread locals can allocate, this can in this case lead // to infinite recursion. This method will then need to be rewritten to not // allocate, or at least not allocate to obtain its sample counters. LongValue bytesValue = currentSampleBytes.get(); long bytes = bytesValue.value + size; if (bytes < nextSampleBytes.get()) { bytesValue.value = bytes; return; } bytesValue.value = 0; nextSampleBytes.set(getNextSample()); allocations.put(newObj, new AllocationSample(ruleClass, aspectClass, frames.build(), bytes)); } private long getNextSample() { return (long) samplePeriod + (sampleVariance > 0 ? (random.nextInt(sampleVariance * 2) - sampleVariance) : 0); } /** A pair of rule/aspect name and the bytes it consumes. */ public static final class RuleBytes { private final String name; private long bytes; public RuleBytes(String name) { this.name = name; } /** The number of bytes total occupied by this rule or aspect class. */ public long getBytes() { return bytes; } public RuleBytes addBytes(long bytes) { this.bytes += bytes; return this; } @Override public String toString() { return String.format("RuleBytes(%s, %d)", name, bytes); } @Override public boolean equals(Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } RuleBytes ruleBytes = (RuleBytes) o; return bytes == ruleBytes.bytes && Objects.equal(name, ruleBytes.name); } @Override public int hashCode() { return Objects.hashCode(name, bytes); } } // If the topmost stack entry is a call to a rule function, returns it. @Nullable private static RuleFunction getRule(AllocationSample sample) { Frame top = Iterables.getLast(sample.callstack, null); return top != null ? top.ruleFunction : null; } /** * Returns the total memory consumption for rules and aspects, keyed by {@link RuleClass#getKey} * or {@link AspectClass#getKey}. */ public void getRuleMemoryConsumption( Map<String, RuleBytes> rules, Map<String, RuleBytes> aspects) { // Make sure we don't track our own allocations enabled = false; System.gc(); // Get loading phase memory for rules. for (AllocationSample sample : allocations.values()) { RuleFunction rule = getRule(sample); if (rule != null) { RuleClass ruleClass = rule.getRuleClass(); String key = ruleClass.getKey(); RuleBytes ruleBytes = rules.computeIfAbsent(key, k -> new RuleBytes(ruleClass.getName())); rules.put(key, ruleBytes.addBytes(sample.bytes)); } } // Get analysis phase memory for rules and aspects for (AllocationSample sample : allocations.values()) { if (sample.ruleClass != null) { String key = sample.ruleClass.getKey(); RuleBytes ruleBytes = rules.computeIfAbsent(key, k -> new RuleBytes(sample.ruleClass.getName())); rules.put(key, ruleBytes.addBytes(sample.bytes)); } if (sample.aspectClass != null) { String key = sample.aspectClass.getKey(); RuleBytes ruleBytes = aspects.computeIfAbsent(key, k -> new RuleBytes(sample.aspectClass.getName())); aspects.put(key, ruleBytes.addBytes(sample.bytes)); } } enabled = true; } /** Dumps all Starlark analysis time allocations to a pprof-compatible file. */ public void dumpSkylarkAllocations(String path) throws IOException { // Make sure we don't track our own allocations enabled = false; System.gc(); Profile profile = buildMemoryProfile(); try (GZIPOutputStream outputStream = new GZIPOutputStream(new FileOutputStream(path))) { profile.writeTo(outputStream); outputStream.finish(); } enabled = true; } Profile buildMemoryProfile() { Profile.Builder profile = Profile.newBuilder(); StringTable stringTable = new StringTable(profile); FunctionTable functionTable = new FunctionTable(profile, stringTable); LocationTable locationTable = new LocationTable(profile, functionTable); profile.addSampleType( ValueType.newBuilder() .setType(stringTable.get("memory")) .setUnit(stringTable.get("bytes")) .build()); for (AllocationSample sample : allocations.values()) { // Skip empty callstacks if (sample.callstack.isEmpty()) { continue; } Sample.Builder b = Sample.newBuilder().addValue(sample.bytes); for (Frame fr : sample.callstack.reverse()) { b.addLocationId(locationTable.get(fr.loc.file(), fr.name, fr.loc.line())); } profile.addSample(b.build()); } profile.setTimeNanos(Instant.now().getEpochSecond() * 1000000000); return profile.build(); } private static class StringTable { final Profile.Builder profile; final Map<String, Long> table = new HashMap<>(); long index = 0; StringTable(Profile.Builder profile) { this.profile = profile; get(""); // 0 is reserved for the empty string } long get(String str) { return table.computeIfAbsent( str, key -> { profile.addStringTable(key); return index++; }); } } private static class FunctionTable { final Profile.Builder profile; final StringTable stringTable; final Map<String, Long> table = new HashMap<>(); long index = 1; // 0 is reserved FunctionTable(Profile.Builder profile, StringTable stringTable) { this.profile = profile; this.stringTable = stringTable; }<|fim▁hole|> long get(String file, String function) { return table.computeIfAbsent( file + "#" + function, key -> { Function fn = Function.newBuilder() .setId(index) .setFilename(stringTable.get(file)) .setName(stringTable.get(function)) .build(); profile.addFunction(fn); return index++; }); } } private static class LocationTable { final Profile.Builder profile; final FunctionTable functionTable; final Map<String, Long> table = new HashMap<>(); long index = 1; // 0 is reserved LocationTable(Profile.Builder profile, FunctionTable functionTable) { this.profile = profile; this.functionTable = functionTable; } long get(String file, String function, long line) { return table.computeIfAbsent( file + "#" + function + "#" + line, key -> { com.google.perftools.profiles.ProfileProto.Location location = com.google.perftools.profiles.ProfileProto.Location.newBuilder() .setId(index) .addLine( Line.newBuilder() .setFunctionId(functionTable.get(file, function)) .setLine(line) .build()) .build(); profile.addLocation(location); return index++; }); } } }<|fim▁end|>
<|file_name|>15.3.4.5-13.b-4.js<|end_file_name|><|fim▁begin|><|fim▁hole|>/// Copyright (c) 2012 Ecma International. All rights reserved. /// Ecma International makes this code available under the terms and conditions set /// forth on http://hg.ecmascript.org/tests/test262/raw-file/tip/LICENSE (the /// "Use Terms"). Any redistribution of this code must retain the above /// copyright and this notice and otherwise comply with the Use Terms. /** * @path ch15/15.3/15.3.4/15.3.4.5/15.3.4.5-13.b-4.js * @description Function.prototype.bind, 'length' set to remaining number of expected args (target takes 0 args) */ function testcase() { function foo() { } var o = {}; var bf = foo.bind(o); if (bf.length === 0) { return true; } } runTestCase(testcase);<|fim▁end|>
<|file_name|>tsort.py<|end_file_name|><|fim▁begin|>__all__ = ['tsort'] from functools import reduce def tsort(data, smallest_first=False, fewest_edges_first=False, flatten=False): # FIXME: support fewest_edges_first # make copy of data tmp = data.copy() # remove self-references for k, v in tmp.items(): v.discard(k) # initially find vertices that do not point to anything all_vertices = reduce(set.union, tmp.values()) starting_vertices = set(tmp.keys()) empty_vertices = all_vertices - starting_vertices # insert empty vertices for k in empty_vertices: tmp[k] = set() # algorithm starts here sorted_vertices = [] while True: # get all vertices that do not point to anything empty_vertices = {k for k, v in tmp.items() if not v} if not empty_vertices: break # if required, sort by smallest-numbered available vertex first if smallest_first: _empty_vertices = sorted(empty_vertices) else: _empty_vertices = (v for v in empty_vertices) # add current vertices that do not point to any other vertices if flatten: sorted_vertices.extend(_empty_vertices) else: sorted_vertices.append(_empty_vertices) # traverse all vertices and take set difference for # vertices which are not in previously found vertices # that do not point to any other vertices # tmp = { # k: (v - empty_vertices) # for k, v in tmp.items() # if k not in empty_vertices # } for k, v in list(tmp.items()): if k in empty_vertices: del tmp[k] else: tmp[k] = v - empty_vertices<|fim▁hole|> raise ValueError('Cyclic dependencies found') return sorted_vertices if __name__ == '__main__': from pprint import pprint data = { 2: {11}, 9: {11, 8}, 10: {11, 3}, 11: {7, 5}, 8: {7, 3}, } out = tsort(data, smallest_first=True) pprint(out)<|fim▁end|>
if tmp:
<|file_name|>test_alignment.py<|end_file_name|><|fim▁begin|>"""Unit test of the AlignmentScan @author: Kay Kasemir """ from __future__ import print_function import unittest from scan.commands import Set, CommandSequence from scan.alignment import AlignmentScan class AlignmentTest(unittest.TestCase): def testBasics(self):<|fim▁hole|> find_command="FindPeak") cmds = align.createScan() print(CommandSequence(cmds)) self.assertEqual(str(cmds), "[Set('Demo:CS:Scan:Fit:Height', 0), Set('motor_y', 3), Loop('motor_x', 0, 10, 0.5, [ Delay(0.5), Log('signal', 'motor_x'), Script('WriteDataToPV', 'motor_x', 'Demo:CS:Scan:Fit:Data:X'), Script('WriteDataToPV', 'signal', 'Demo:CS:Scan:Fit:Data:Y', '-', '1') ]), Script('FindPeak', 'motor_x', 'signal', '-', '1', 'Demo:CS:Scan:Fit:Pos', 'Demo:CS:Scan:Fit:Height', 'Demo:CS:Scan:Fit:Width')]") if __name__ == "__main__": unittest.main()<|fim▁end|>
align = AlignmentScan("motor_x", 0, 10, 0.5, "seconds", 0.5, "signal", pre=Set("motor_y", 3),
<|file_name|>smartDataActions.tests.ts<|end_file_name|><|fim▁begin|>import { Observable, BehaviorSubject } from 'rxjs'; import { SortDirection } from '../../sorts/index'; import { IFilterWithValue, defaultThrottleLimit, toRequestStream, throttled, unthrottled, toFiltersWithValues, suppressInactiveFilters, toTypesWithValues, toFilterChanges, combineWithSortsAndPaging, toObservableArray, pipe, } from './smartDataActions'; const pagingParams = { pageNumber: 1, pageSize: defaultThrottleLimit, }; describe('smart data source actions', () => { describe('toRequestStream', () => { it('should skip the first event on each throttle change', () => { const filters = [ { type: 'one', subject: new BehaviorSubject('Filter 1'), serialize: () => filters[0].subject, }, ]; const sorts = [ { column: { label: 'col1' }, direction: SortDirection.ascending, }, ]; const throttled$ = new BehaviorSubject(false); const appliedFiltersSpy = sinon.spy(); toRequestStream(throttled$, Observable.of(<any>filters), Observable.of(<any>sorts)).subscribe(appliedFiltersSpy); sinon.assert.notCalled(appliedFiltersSpy); filters[0].subject.next('Filter 1 changed'); let expected: any = { filters: { one: 'Filter 1 changed' }, sorts: [{ column: 'col1', direction: SortDirection.getFullName(SortDirection.ascending) }], paging: pagingParams, }; sinon.assert.calledOnce(appliedFiltersSpy); sinon.assert.calledWith(appliedFiltersSpy, expected); appliedFiltersSpy.reset(); throttled$.next(true); sinon.assert.notCalled(appliedFiltersSpy); filters[0].subject.next('Filter 1 changed again'); expected = { filters: { one: 'Filter 1 changed again' }, sorts: [{ column: 'col1', direction: SortDirection.getFullName(SortDirection.ascending) }], paging: pagingParams, }; sinon.assert.calledOnce(appliedFiltersSpy); sinon.assert.calledWith(appliedFiltersSpy, expected); }); }); describe('throttled', () => { it('should fire an event for every filter that changes whether active or inactive', () => { const filters = [ { type: 'one', subject: new BehaviorSubject('Filter 1'), serialize: () => filters[0].subject, }, { type: 'two', subject: new BehaviorSubject('Filter 2'), serialize: () => filters[1].subject, }, { type: 'three', subject: new BehaviorSubject(null), serialize: () => filters[2].subject, }, ]; const sorts = [ { column: { label: 'col1' }, direction: SortDirection.ascending, }, ]; const appliedFiltersSpy = sinon.spy(); throttled(Observable.of(<any>filters), Observable.of(<any>sorts)).subscribe(appliedFiltersSpy); let expected: any = { filters: { one: 'Filter 1', two: 'Filter 2' },<|fim▁hole|> sorts: [{ column: 'col1', direction: SortDirection.getFullName(SortDirection.ascending) }], paging: pagingParams, }; sinon.assert.calledOnce(appliedFiltersSpy); sinon.assert.calledWith(appliedFiltersSpy, expected); appliedFiltersSpy.reset(); filters[2].subject.next('Filter 3'); expected = { filters: { one: 'Filter 1', two: 'Filter 2', three: 'Filter 3' }, sorts: [{ column: 'col1', direction: SortDirection.getFullName(SortDirection.ascending) }], paging: pagingParams, }; sinon.assert.calledOnce(appliedFiltersSpy); sinon.assert.calledWith(appliedFiltersSpy, expected); appliedFiltersSpy.reset(); filters[1].subject.next('Filter 2 changed'); expected = { filters: { one: 'Filter 1', two: 'Filter 2 changed', three: 'Filter 3' }, sorts: [{ column: 'col1', direction: SortDirection.getFullName(SortDirection.ascending) }], paging: pagingParams, }; sinon.assert.calledOnce(appliedFiltersSpy); sinon.assert.calledWith(appliedFiltersSpy, expected); }); it('should fire an event for every sort change', () => { const filters = [ { type: 'one', serialize: () => Observable.of('value1'), }, { type: 'two', serialize: () => Observable.of('value2'), }, ]; const sorts = [ { column: { label: 'col1' }, direction: SortDirection.ascending, }, ]; const sorts$: BehaviorSubject<any> = new BehaviorSubject(sorts); const appliedFiltersSpy = sinon.spy(); throttled(Observable.of(<any>filters), sorts$).subscribe(appliedFiltersSpy); let expected = { filters: { one: 'value1', two: 'value2' }, sorts: [{ column: 'col1', direction: SortDirection.getFullName(SortDirection.ascending) }], paging: pagingParams, }; sinon.assert.calledOnce(appliedFiltersSpy); sinon.assert.calledWith(appliedFiltersSpy, expected); appliedFiltersSpy.reset(); const newSorts = [ { column: { label: 'col2' }, direction: SortDirection.ascending, }, ]; sorts$.next(newSorts); expected = { filters: { one: 'value1', two: 'value2' }, sorts: [{ column: 'col2', direction: SortDirection.getFullName(SortDirection.ascending) }], paging: pagingParams, }; sinon.assert.calledOnce(appliedFiltersSpy); sinon.assert.calledWith(appliedFiltersSpy, expected); }); }); describe('unthrottled', () => { it('should fire an event for every active filter that changes', () => { const filters = [ { type: 'one', subject: new BehaviorSubject('Filter 1'), serialize: () => filters[0].subject, }, { type: 'two', subject: new BehaviorSubject('Filter 2'), serialize: () => filters[1].subject, }, { type: 'three', subject: new BehaviorSubject(null), serialize: () => filters[2].subject, }, ]; const sorts = [ { column: { label: 'col1' }, direction: SortDirection.ascending, }, ]; const appliedFiltersSpy = sinon.spy(); unthrottled(Observable.of(filters), Observable.of(<any>sorts)).subscribe(appliedFiltersSpy); let expected = { filters: { one: 'Filter 1', two: 'Filter 2' }, sorts: [{ column: 'col1', direction: SortDirection.getFullName(SortDirection.ascending) }], paging: pagingParams, }; sinon.assert.calledOnce(appliedFiltersSpy); sinon.assert.calledWith(appliedFiltersSpy, expected); appliedFiltersSpy.reset(); filters[2].subject.next('Filter 3'); sinon.assert.notCalled(appliedFiltersSpy); filters[1].subject.next('Filter 2 changed'); expected = { filters: { one: 'Filter 1', two: 'Filter 2 changed' }, sorts: [{ column: 'col1', direction: SortDirection.getFullName(SortDirection.ascending) }], paging: pagingParams, }; sinon.assert.calledOnce(appliedFiltersSpy); sinon.assert.calledWith(appliedFiltersSpy, expected); }); it('should suppress sort changes', () => { const filters = [ { type: 'one', serialize: () => Observable.of('value1'), }, { type: 'two', serialize: () => Observable.of('value2'), }, ]; const sorts = [ { column: { label: 'col1' }, direction: SortDirection.ascending, }, ]; const sorts$: BehaviorSubject<any> = new BehaviorSubject(sorts); const appliedFiltersSpy = sinon.spy(); unthrottled(Observable.of(<any>filters), sorts$).subscribe(appliedFiltersSpy); let expected = { filters: { one: 'value1', two: 'value2' }, sorts: [{ column: 'col1', direction: SortDirection.getFullName(SortDirection.ascending) }], paging: pagingParams, }; sinon.assert.calledOnce(appliedFiltersSpy); sinon.assert.calledWith(appliedFiltersSpy, expected); appliedFiltersSpy.reset(); const newSorts = [ { column: { label: 'col2' }, direction: SortDirection.ascending, }, ]; sorts$.next(newSorts); sinon.assert.notCalled(appliedFiltersSpy); }); }); describe('suppressInactiveFilters', () => { it('should drop filters with a null current value', () => { const filters = [ { serialize: () => Observable.of('Filter 1') }, { serialize: () => Observable.of(null) }, ]; const activeFiltersSpy = sinon.spy(); suppressInactiveFilters(Observable.of(<any>filters)).subscribe(activeFiltersSpy); sinon.assert.calledOnce(activeFiltersSpy); sinon.assert.calledWith(activeFiltersSpy, [filters[0]]); }); it('should suppress changes to inactive filters', () => { const activeFilter = { serialize: () => activeFilter.stream, stream: new BehaviorSubject('Filter 1'), }; const inactiveFilter = { serialize: () => inactiveFilter.stream, stream: new BehaviorSubject(null), }; const activeFiltersSpy = sinon.spy(); suppressInactiveFilters(Observable.of(<any>[activeFilter, inactiveFilter])).subscribe(activeFiltersSpy); sinon.assert.calledOnce(activeFiltersSpy); sinon.assert.calledWith(activeFiltersSpy, [activeFilter]); activeFiltersSpy.reset(); inactiveFilter.stream.next('Filter 2'); sinon.assert.notCalled(activeFiltersSpy); }); }); describe('toFiltersWithValues', () => { it('should map to an array of filters and values', () => { const filters = [ { serialize: () => Observable.of('Filter 1') }, { serialize: () => Observable.of('Filter 2') }, ]; let filtersWithValues; toFiltersWithValues(Observable.of(<any>filters)).subscribe(result => filtersWithValues = result); expect(filtersWithValues[0].filter).to.equal(filters[0]); expect(filtersWithValues[0].value).to.equal('Filter 1'); expect(filtersWithValues[1].filter).to.equal(filters[1]); expect(filtersWithValues[1].value).to.equal('Filter 2'); }); }); describe('toFilterChanges', () => { it('should map the types and values to an object mapping', () => { const filters = [ { type: 'type1', serialize: () => Observable.of('value1') }, { type: 'type2', serialize: () => Observable.of('value2') }, ]; const activeFilterChanges = sinon.spy(); toFilterChanges(Observable.of(<any>filters)).subscribe(activeFilterChanges); sinon.assert.calledOnce(activeFilterChanges); sinon.assert.calledWith(activeFilterChanges, { type1: 'value1', type2: 'value2' }); }); it('should filter out null values', () => { const filters = [ { type: 'type1', serialize: () => Observable.of('value1') }, { type: 'type2', serialize: () => Observable.of(null) }, ]; const activeFilterChanges = sinon.spy(); toFilterChanges(Observable.of(<any>filters)).subscribe(activeFilterChanges); sinon.assert.calledOnce(activeFilterChanges); sinon.assert.calledWith(activeFilterChanges, { type1: 'value1' }); }); }); describe('toTypesWithValues', () => { it('should map to an array of types and values', () => { const filters = [ { type: 'type1', serialize: () => Observable.of('Value 1') }, { type: 'type2', serialize: () => Observable.of('Value 2') }, ]; let typesWithValues; toTypesWithValues(Observable.of(<any>filters)).subscribe(result => typesWithValues = result); expect(typesWithValues[0].type).to.equal('type1'); expect(typesWithValues[0].value).to.equal('Value 1'); expect(typesWithValues[1].type).to.equal('type2'); expect(typesWithValues[1].value).to.equal('Value 2'); }); }); describe('combineWithSorts', () => { it('should combine the filter values and sorts into a single event object', () => { const filterValues = { type1: 'value1' }; const sorts = [ { column: { label: 'col1' }, direction: SortDirection.ascending, }, ]; let filtersAndSorts; combineWithSortsAndPaging(Observable.of(filterValues), Observable.of(<any>sorts)).subscribe(result => filtersAndSorts = result); expect(filtersAndSorts.filters).to.equal(filterValues); expect(filtersAndSorts.sorts).to.have.length(1); expect(filtersAndSorts.sorts[0].column).to.equal('col1'); expect(filtersAndSorts.sorts[0].direction).to.equal(SortDirection.getFullName(SortDirection.ascending)); }); }); describe('toObservableArray', () => { it('should map the array to an array of observables using the specified transform', () => { const array = [1, 2, 3]; const transform = item => Observable.of(item); const expectedResult = [Observable.of(1), Observable.of(2), Observable.of(3)]; const observableArray = toObservableArray(array, transform); expect(observableArray).to.deep.equal(expectedResult); }); }); describe('pipe', () => { it('should run the actions in order and pipe the result of each action to the next', () => { const one = sinon.spy(() => 1); const two = sinon.spy(() => 2); const three = sinon.spy(() => 3); const result = pipe(0, [one, two, three]); sinon.assert.calledOnce(one); sinon.assert.calledWith(one, 0); sinon.assert.calledOnce(two); sinon.assert.calledWith(two, 1); sinon.assert.calledOnce(three); sinon.assert.calledWith(three, 2); expect(result).to.equal(3); }); }); });<|fim▁end|>
<|file_name|>casttype.pb.go<|end_file_name|><|fim▁begin|>// Code generated by protoc-gen-gogo. // source: combos/unsafemarshaler/casttype.proto // DO NOT EDIT! /* Package casttype is a generated protocol buffer package. It is generated from these files: combos/unsafemarshaler/casttype.proto It has these top-level messages: Castaway Wilson */ package casttype import proto "github.com/gogo/protobuf/proto" import fmt "fmt" import math "math" import _ "github.com/gogo/protobuf/gogoproto" import github_com_gogo_protobuf_test_casttype "github.com/gogo/protobuf/test/casttype" import github_com_gogo_protobuf_protoc_gen_gogo_descriptor "github.com/gogo/protobuf/protoc-gen-gogo/descriptor" import github_com_gogo_protobuf_proto "github.com/gogo/protobuf/proto" import compress_gzip "compress/gzip" import bytes "bytes" import io_ioutil "io/ioutil" import strings "strings" import reflect "reflect" import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf // This is a compile-time assertion to ensure that this generated file // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package type Castaway struct { Int32Ptr *int32 `protobuf:"varint,1,opt,name=Int32Ptr,casttype=int32" json:"Int32Ptr,omitempty"` Int32 int32 `protobuf:"varint,2,opt,name=Int32,casttype=int32" json:"Int32"` MyUint64Ptr *github_com_gogo_protobuf_test_casttype.MyUint64Type `protobuf:"varint,3,opt,name=MyUint64Ptr,casttype=github.com/gogo/protobuf/test/casttype.MyUint64Type" json:"MyUint64Ptr,omitempty"` MyUint64 github_com_gogo_protobuf_test_casttype.MyUint64Type `protobuf:"varint,4,opt,name=MyUint64,casttype=github.com/gogo/protobuf/test/casttype.MyUint64Type" json:"MyUint64"` MyFloat32Ptr *github_com_gogo_protobuf_test_casttype.MyFloat32Type `protobuf:"fixed32,5,opt,name=MyFloat32Ptr,casttype=github.com/gogo/protobuf/test/casttype.MyFloat32Type" json:"MyFloat32Ptr,omitempty"` MyFloat32 github_com_gogo_protobuf_test_casttype.MyFloat32Type `protobuf:"fixed32,6,opt,name=MyFloat32,casttype=github.com/gogo/protobuf/test/casttype.MyFloat32Type" json:"MyFloat32"` MyFloat64Ptr *github_com_gogo_protobuf_test_casttype.MyFloat64Type `protobuf:"fixed64,7,opt,name=MyFloat64Ptr,casttype=github.com/gogo/protobuf/test/casttype.MyFloat64Type" json:"MyFloat64Ptr,omitempty"` MyFloat64 github_com_gogo_protobuf_test_casttype.MyFloat64Type `protobuf:"fixed64,8,opt,name=MyFloat64,casttype=github.com/gogo/protobuf/test/casttype.MyFloat64Type" json:"MyFloat64"` MyBytes github_com_gogo_protobuf_test_casttype.Bytes `protobuf:"bytes,9,opt,name=MyBytes,casttype=github.com/gogo/protobuf/test/casttype.Bytes" json:"MyBytes,omitempty"` NormalBytes []byte `protobuf:"bytes,10,opt,name=NormalBytes" json:"NormalBytes,omitempty"` MyUint64S []github_com_gogo_protobuf_test_casttype.MyUint64Type `protobuf:"varint,11,rep,name=MyUint64s,casttype=github.com/gogo/protobuf/test/casttype.MyUint64Type" json:"MyUint64s,omitempty"` MyMap github_com_gogo_protobuf_test_casttype.MyMapType `protobuf:"bytes,12,rep,name=MyMap,casttype=github.com/gogo/protobuf/test/casttype.MyMapType" json:"MyMap,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"varint,2,opt,name=value"` MyCustomMap map[github_com_gogo_protobuf_test_casttype.MyStringType]github_com_gogo_protobuf_test_casttype.MyUint64Type `protobuf:"bytes,13,rep,name=MyCustomMap,castkey=github.com/gogo/protobuf/test/casttype.MyStringType,castvalue=github.com/gogo/protobuf/test/casttype.MyUint64Type" json:"MyCustomMap,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"varint,2,opt,name=value"` MyNullableMap map[github_com_gogo_protobuf_test_casttype.MyInt32Type]*Wilson `protobuf:"bytes,14,rep,name=MyNullableMap,castkey=github.com/gogo/protobuf/test/casttype.MyInt32Type" json:"MyNullableMap,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` MyEmbeddedMap map[github_com_gogo_protobuf_test_casttype.MyInt32Type]Wilson `protobuf:"bytes,15,rep,name=MyEmbeddedMap,castkey=github.com/gogo/protobuf/test/casttype.MyInt32Type" json:"MyEmbeddedMap" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` String_ *github_com_gogo_protobuf_test_casttype.MyStringType `protobuf:"bytes,16,opt,name=String,casttype=github.com/gogo/protobuf/test/casttype.MyStringType" json:"String,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *Castaway) Reset() { *m = Castaway{} } func (*Castaway) ProtoMessage() {} func (*Castaway) Descriptor() ([]byte, []int) { return fileDescriptorCasttype, []int{0} } type Wilson struct { Int64 *int64 `protobuf:"varint,1,opt,name=Int64" json:"Int64,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *Wilson) Reset() { *m = Wilson{} } func (*Wilson) ProtoMessage() {} func (*Wilson) Descriptor() ([]byte, []int) { return fileDescriptorCasttype, []int{1} } func init() { proto.RegisterType((*Castaway)(nil), "casttype.Castaway") proto.RegisterType((*Wilson)(nil), "casttype.Wilson") } func (this *Castaway) Description() (desc *github_com_gogo_protobuf_protoc_gen_gogo_descriptor.FileDescriptorSet) { return CasttypeDescription() } func (this *Wilson) Description() (desc *github_com_gogo_protobuf_protoc_gen_gogo_descriptor.FileDescriptorSet) { return CasttypeDescription() } func CasttypeDescription() (desc *github_com_gogo_protobuf_protoc_gen_gogo_descriptor.FileDescriptorSet) { d := &github_com_gogo_protobuf_protoc_gen_gogo_descriptor.FileDescriptorSet{} var gzipped = []byte{ // 3994 bytes of a gzipped FileDescriptorSet 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xcc, 0x7a, 0x5d, 0x70, 0x1b, 0xd7, 0x75, 0x3f, 0x17, 0x1f, 0x24, 0x70, 0x00, 0x82, 0xcb, 0x4b, 0x5a, 0x86, 0xe8, 0x18, 0xa4, 0x68, 0xcb, 0xa6, 0xed, 0x84, 0xf2, 0x48, 0x94, 0x2c, 0x43, 0x89, 0x3d, 0x00, 0x09, 0x31, 0xd4, 0x10, 0x24, 0xff, 0x4b, 0x32, 0xfe, 0xc8, 0x7f, 0x66, 0xe7, 0x72, 0x71, 0x09, 0xae, 0xb4, 0xd8, 0x45, 0x77, 0x17, 0x92, 0xe1, 0x27, 0x25, 0x6e, 0x9b, 0x49, 0x33, 0xfd, 0xee, 0x4c, 0x13, 0xc7, 0x71, 0xdd, 0xcc, 0xb4, 0x4e, 0xd3, 0xaf, 0xa4, 0x6d, 0xd2, 0x4e, 0x9e, 0xf2, 0x92, 0xd6, 0x4f, 0x9d, 0xe4, 0xad, 0x0f, 0x1d, 0x39, 0x62, 0x3d, 0x53, 0xa7, 0x75, 0x5b, 0xb7, 0xf5, 0x4c, 0x33, 0xf2, 0x4b, 0xe7, 0x7e, 0x2d, 0x16, 0x1f, 0xd4, 0x82, 0xca, 0x24, 0xe9, 0x13, 0x79, 0xcf, 0x3d, 0xbf, 0xdf, 0x9e, 0x7b, 0xee, 0xb9, 0xe7, 0x9c, 0xbd, 0x0b, 0xf8, 0xcc, 0x12, 0xcc, 0xd5, 0x1d, 0xa7, 0x6e, 0x91, 0x33, 0x4d, 0xd7, 0xf1, 0x9d, 0xbd, 0xd6, 0xfe, 0x99, 0x1a, 0xf1, 0x0c, 0xd7, 0x6c, 0xfa, 0x8e, 0xbb, 0xc8, 0x64, 0x68, 0x82, 0x6b, 0x2c, 0x4a, 0x8d, 0xf9, 0x2a, 0x4c, 0x5e, 0x36, 0x2d, 0xb2, 0x12, 0x28, 0x6e, 0x13, 0x1f, 0x5d, 0x84, 0xc4, 0xbe, 0x69, 0x91, 0xbc, 0x32, 0x17, 0x5f, 0xc8, 0x9c, 0x7d, 0x78, 0xb1, 0x07, 0xb4, 0xd8, 0x8d, 0xd8, 0xa2, 0x62, 0x8d, 0x21, 0xe6, 0xdf, 0x49, 0xc0, 0xd4, 0x80, 0x59, 0x84, 0x20, 0x61, 0xe3, 0x06, 0x65, 0x54, 0x16, 0xd2, 0x1a, 0xfb, 0x1f, 0xe5, 0x61, 0xac, 0x89, 0x8d, 0x6b, 0xb8, 0x4e, 0xf2, 0x31, 0x26, 0x96, 0x43, 0x54, 0x00, 0xa8, 0x91, 0x26, 0xb1, 0x6b, 0xc4, 0x36, 0xda, 0xf9, 0xf8, 0x5c, 0x7c, 0x21, 0xad, 0x85, 0x24, 0xe8, 0x09, 0x98, 0x6c, 0xb6, 0xf6, 0x2c, 0xd3, 0xd0, 0x43, 0x6a, 0x30, 0x17, 0x5f, 0x48, 0x6a, 0x2a, 0x9f, 0x58, 0xe9, 0x28, 0x3f, 0x0a, 0x13, 0x37, 0x08, 0xbe, 0x16, 0x56, 0xcd, 0x30, 0xd5, 0x1c, 0x15, 0x87, 0x14, 0x97, 0x21, 0xdb, 0x20, 0x9e, 0x87, 0xeb, 0x44, 0xf7, 0xdb, 0x4d, 0x92, 0x4f, 0xb0, 0xd5, 0xcf, 0xf5, 0xad, 0xbe, 0x77, 0xe5, 0x19, 0x81, 0xda, 0x69, 0x37, 0x09, 0x2a, 0x41, 0x9a, 0xd8, 0xad, 0x06, 0x67, 0x48, 0x1e, 0xe1, 0xbf, 0x8a, 0xdd, 0x6a, 0xf4, 0xb2, 0xa4, 0x28, 0x4c, 0x50, 0x8c, 0x79, 0xc4, 0xbd, 0x6e, 0x1a, 0x24, 0x3f, 0xca, 0x08, 0x1e, 0xed, 0x23, 0xd8, 0xe6, 0xf3, 0xbd, 0x1c, 0x12, 0x87, 0x96, 0x21, 0x4d, 0x5e, 0xf2, 0x89, 0xed, 0x99, 0x8e, 0x9d, 0x1f, 0x63, 0x24, 0xa7, 0x07, 0xec, 0x22, 0xb1, 0x6a, 0xbd, 0x14, 0x1d, 0x1c, 0xba, 0x00, 0x63, 0x4e, 0xd3, 0x37, 0x1d, 0xdb, 0xcb, 0xa7, 0xe6, 0x94, 0x85, 0xcc, 0xd9, 0x8f, 0x0c, 0x0c, 0x84, 0x4d, 0xae, 0xa3, 0x49, 0x65, 0xb4, 0x06, 0xaa, 0xe7, 0xb4, 0x5c, 0x83, 0xe8, 0x86, 0x53, 0x23, 0xba, 0x69, 0xef, 0x3b, 0xf9, 0x34, 0x23, 0x98, 0xed, 0x5f, 0x08, 0x53, 0x5c, 0x76, 0x6a, 0x64, 0xcd, 0xde, 0x77, 0xb4, 0x9c, 0xd7, 0x35, 0x46, 0x27, 0x60, 0xd4, 0x6b, 0xdb, 0x3e, 0x7e, 0x29, 0x9f, 0x65, 0x11, 0x22, 0x46, 0xf3, 0xff, 0x93, 0x84, 0x89, 0x61, 0x42, 0xec, 0x12, 0x24, 0xf7, 0xe9, 0x2a, 0xf3, 0xb1, 0xe3, 0xf8, 0x80, 0x63, 0xba, 0x9d, 0x38, 0x7a, 0x8f, 0x4e, 0x2c, 0x41, 0xc6, 0x26, 0x9e, 0x4f, 0x6a, 0x3c, 0x22, 0xe2, 0x43, 0xc6, 0x14, 0x70, 0x50, 0x7f, 0x48, 0x25, 0xee, 0x29, 0xa4, 0x9e, 0x87, 0x89, 0xc0, 0x24, 0xdd, 0xc5, 0x76, 0x5d, 0xc6, 0xe6, 0x99, 0x28, 0x4b, 0x16, 0x2b, 0x12, 0xa7, 0x51, 0x98, 0x96, 0x23, 0x5d, 0x63, 0xb4, 0x02, 0xe0, 0xd8, 0xc4, 0xd9, 0xd7, 0x6b, 0xc4, 0xb0, 0xf2, 0xa9, 0x23, 0xbc, 0xb4, 0x49, 0x55, 0xfa, 0xbc, 0xe4, 0x70, 0xa9, 0x61, 0xa1, 0xa7, 0x3b, 0xa1, 0x36, 0x76, 0x44, 0xa4, 0x54, 0xf9, 0x21, 0xeb, 0x8b, 0xb6, 0x5d, 0xc8, 0xb9, 0x84, 0xc6, 0x3d, 0xa9, 0x89, 0x95, 0xa5, 0x99, 0x11, 0x8b, 0x91, 0x2b, 0xd3, 0x04, 0x8c, 0x2f, 0x6c, 0xdc, 0x0d, 0x0f, 0xd1, 0x43, 0x10, 0x08, 0x74, 0x16, 0x56, 0xc0, 0xb2, 0x50, 0x56, 0x0a, 0x37, 0x70, 0x83, 0xcc, 0x5c, 0x84, 0x5c, 0xb7, 0x7b, 0xd0, 0x34, 0x24, 0x3d, 0x1f, 0xbb, 0x3e, 0x8b, 0xc2, 0xa4, 0xc6, 0x07, 0x48, 0x85, 0x38, 0xb1, 0x6b, 0x2c, 0xcb, 0x25, 0x35, 0xfa, 0xef, 0xcc, 0x53, 0x30, 0xde, 0xf5, 0xf8, 0x61, 0x81, 0xf3, 0x5f, 0x1c, 0x85, 0xe9, 0x41, 0x31, 0x37, 0x30, 0xfc, 0x4f, 0xc0, 0xa8, 0xdd, 0x6a, 0xec, 0x11, 0x37, 0x1f, 0x67, 0x0c, 0x62, 0x84, 0x4a, 0x90, 0xb4, 0xf0, 0x1e, 0xb1, 0xf2, 0x89, 0x39, 0x65, 0x21, 0x77, 0xf6, 0x89, 0xa1, 0xa2, 0x7a, 0x71, 0x9d, 0x42, 0x34, 0x8e, 0x44, 0xcf, 0x40, 0x42, 0xa4, 0x38, 0xca, 0xf0, 0xf8, 0x70, 0x0c, 0x34, 0x16, 0x35, 0x86, 0x43, 0x0f, 0x40, 0x9a, 0xfe, 0xe5, 0xbe, 0x1d, 0x65, 0x36, 0xa7, 0xa8, 0x80, 0xfa, 0x15, 0xcd, 0x40, 0x8a, 0x85, 0x59, 0x8d, 0xc8, 0xd2, 0x10, 0x8c, 0xe9, 0xc6, 0xd4, 0xc8, 0x3e, 0x6e, 0x59, 0xbe, 0x7e, 0x1d, 0x5b, 0x2d, 0xc2, 0x02, 0x26, 0xad, 0x65, 0x85, 0xf0, 0x53, 0x54, 0x86, 0x66, 0x21, 0xc3, 0xa3, 0xd2, 0xb4, 0x6b, 0xe4, 0x25, 0x96, 0x7d, 0x92, 0x1a, 0x0f, 0xd4, 0x35, 0x2a, 0xa1, 0x8f, 0xbf, 0xea, 0x39, 0xb6, 0xdc, 0x5a, 0xf6, 0x08, 0x2a, 0x60, 0x8f, 0x7f, 0xaa, 0x37, 0xf1, 0x3d, 0x38, 0x78, 0x79, 0xbd, 0xb1, 0x38, 0xff, 0xed, 0x18, 0x24, 0xd8, 0x79, 0x9b, 0x80, 0xcc, 0xce, 0x0b, 0x5b, 0x15, 0x7d, 0x65, 0x73, 0xb7, 0xbc, 0x5e, 0x51, 0x15, 0x94, 0x03, 0x60, 0x82, 0xcb, 0xeb, 0x9b, 0xa5, 0x1d, 0x35, 0x16, 0x8c, 0xd7, 0x36, 0x76, 0x2e, 0x2c, 0xa9, 0xf1, 0x00, 0xb0, 0xcb, 0x05, 0x89, 0xb0, 0xc2, 0xb9, 0xb3, 0x6a, 0x12, 0xa9, 0x90, 0xe5, 0x04, 0x6b, 0xcf, 0x57, 0x56, 0x2e, 0x2c, 0xa9, 0xa3, 0xdd, 0x92, 0x73, 0x67, 0xd5, 0x31, 0x34, 0x0e, 0x69, 0x26, 0x29, 0x6f, 0x6e, 0xae, 0xab, 0xa9, 0x80, 0x73, 0x7b, 0x47, 0x5b, 0xdb, 0x58, 0x55, 0xd3, 0x01, 0xe7, 0xaa, 0xb6, 0xb9, 0xbb, 0xa5, 0x42, 0xc0, 0x50, 0xad, 0x6c, 0x6f, 0x97, 0x56, 0x2b, 0x6a, 0x26, 0xd0, 0x28, 0xbf, 0xb0, 0x53, 0xd9, 0x56, 0xb3, 0x5d, 0x66, 0x9d, 0x3b, 0xab, 0x8e, 0x07, 0x8f, 0xa8, 0x6c, 0xec, 0x56, 0xd5, 0x1c, 0x9a, 0x84, 0x71, 0xfe, 0x08, 0x69, 0xc4, 0x44, 0x8f, 0xe8, 0xc2, 0x92, 0xaa, 0x76, 0x0c, 0xe1, 0x2c, 0x93, 0x5d, 0x82, 0x0b, 0x4b, 0x2a, 0x9a, 0x5f, 0x86, 0x24, 0x8b, 0x2e, 0x84, 0x20, 0xb7, 0x5e, 0x2a, 0x57, 0xd6, 0xf5, 0xcd, 0xad, 0x9d, 0xb5, 0xcd, 0x8d, 0xd2, 0xba, 0xaa, 0x74, 0x64, 0x5a, 0xe5, 0xff, 0xed, 0xae, 0x69, 0x95, 0x15, 0x35, 0x16, 0x96, 0x6d, 0x55, 0x4a, 0x3b, 0x95, 0x15, 0x35, 0x3e, 0x6f, 0xc0, 0xf4, 0xa0, 0x3c, 0x33, 0xf0, 0x64, 0x84, 0xb6, 0x38, 0x76, 0xc4, 0x16, 0x33, 0xae, 0xbe, 0x2d, 0xfe, 0xaa, 0x02, 0x53, 0x03, 0x72, 0xed, 0xc0, 0x87, 0x3c, 0x0b, 0x49, 0x1e, 0xa2, 0xbc, 0xfa, 0x3c, 0x36, 0x30, 0x69, 0xb3, 0x80, 0xed, 0xab, 0x40, 0x0c, 0x17, 0xae, 0xc0, 0xf1, 0x23, 0x2a, 0x30, 0xa5, 0xe8, 0x33, 0xf2, 0x15, 0x05, 0xf2, 0x47, 0x71, 0x47, 0x24, 0x8a, 0x58, 0x57, 0xa2, 0xb8, 0xd4, 0x6b, 0xc0, 0xa9, 0xa3, 0xd7, 0xd0, 0x67, 0xc5, 0x9b, 0x0a, 0x9c, 0x18, 0xdc, 0xa8, 0x0c, 0xb4, 0xe1, 0x19, 0x18, 0x6d, 0x10, 0xff, 0xc0, 0x91, 0xc5, 0xfa, 0x91, 0x01, 0x25, 0x80, 0x4e, 0xf7, 0xfa, 0x4a, 0xa0, 0xc2, 0x35, 0x24, 0x7e, 0x54, 0xb7, 0xc1, 0xad, 0xe9, 0xb3, 0xf4, 0xf3, 0x31, 0xb8, 0x6f, 0x20, 0xf9, 0x40, 0x43, 0x1f, 0x04, 0x30, 0xed, 0x66, 0xcb, 0xe7, 0x05, 0x99, 0xe7, 0xa7, 0x34, 0x93, 0xb0, 0xb3, 0x4f, 0x73, 0x4f, 0xcb, 0x0f, 0xe6, 0xe3, 0x6c, 0x1e, 0xb8, 0x88, 0x29, 0x5c, 0xec, 0x18, 0x9a, 0x60, 0x86, 0x16, 0x8e, 0x58, 0x69, 0x5f, 0xad, 0x7b, 0x12, 0x54, 0xc3, 0x32, 0x89, 0xed, 0xeb, 0x9e, 0xef, 0x12, 0xdc, 0x30, 0xed, 0x3a, 0x4b, 0xc0, 0xa9, 0x62, 0x72, 0x1f, 0x5b, 0x1e, 0xd1, 0x26, 0xf8, 0xf4, 0xb6, 0x9c, 0xa5, 0x08, 0x56, 0x65, 0xdc, 0x10, 0x62, 0xb4, 0x0b, 0xc1, 0xa7, 0x03, 0xc4, 0xfc, 0x17, 0xc6, 0x20, 0x13, 0x6a, 0xeb, 0xd0, 0x29, 0xc8, 0x5e, 0xc5, 0xd7, 0xb1, 0x2e, 0x5b, 0x75, 0xee, 0x89, 0x0c, 0x95, 0x6d, 0x89, 0x76, 0xfd, 0x49, 0x98, 0x66, 0x2a, 0x4e, 0xcb, 0x27, 0xae, 0x6e, 0x58, 0xd8, 0xf3, 0x98, 0xd3, 0x52, 0x4c, 0x15, 0xd1, 0xb9, 0x4d, 0x3a, 0xb5, 0x2c, 0x67, 0xd0, 0x79, 0x98, 0x62, 0x88, 0x46, 0xcb, 0xf2, 0xcd, 0xa6, 0x45, 0x74, 0xfa, 0xf2, 0xe0, 0xb1, 0x44, 0x1c, 0x58, 0x36, 0x49, 0x35, 0xaa, 0x42, 0x81, 0x5a, 0xe4, 0xa1, 0x55, 0x78, 0x90, 0xc1, 0xea, 0xc4, 0x26, 0x2e, 0xf6, 0x89, 0x4e, 0x7e, 0xa1, 0x85, 0x2d, 0x4f, 0xc7, 0x76, 0x4d, 0x3f, 0xc0, 0xde, 0x41, 0x7e, 0x3a, 0x4c, 0x70, 0x92, 0xea, 0xae, 0x0a, 0xd5, 0x0a, 0xd3, 0x2c, 0xd9, 0xb5, 0x4f, 0x62, 0xef, 0x00, 0x15, 0xe1, 0x04, 0x23, 0xf2, 0x7c, 0xd7, 0xb4, 0xeb, 0xba, 0x71, 0x40, 0x8c, 0x6b, 0x7a, 0xcb, 0xdf, 0xbf, 0x98, 0x7f, 0x20, 0xcc, 0xc0, 0x8c, 0xdc, 0x66, 0x3a, 0xcb, 0x54, 0x65, 0xd7, 0xdf, 0xbf, 0x88, 0xb6, 0x21, 0x4b, 0xf7, 0xa3, 0x61, 0xbe, 0x4c, 0xf4, 0x7d, 0xc7, 0x65, 0xc5, 0x25, 0x37, 0xe0, 0x70, 0x87, 0x9c, 0xb8, 0xb8, 0x29, 0x00, 0x55, 0xa7, 0x46, 0x8a, 0xc9, 0xed, 0xad, 0x4a, 0x65, 0x45, 0xcb, 0x48, 0x96, 0xcb, 0x8e, 0x4b, 0x63, 0xaa, 0xee, 0x04, 0x3e, 0xce, 0xf0, 0x98, 0xaa, 0x3b, 0xd2, 0xc3, 0xe7, 0x61, 0xca, 0x30, 0xf8, 0xb2, 0x4d, 0x43, 0x17, 0x5d, 0xbe, 0x97, 0x57, 0xbb, 0xfc, 0x65, 0x18, 0xab, 0x5c, 0x41, 0x84, 0xb9, 0x87, 0x9e, 0x86, 0xfb, 0x3a, 0xfe, 0x0a, 0x03, 0x27, 0xfb, 0x56, 0xd9, 0x0b, 0x3d, 0x0f, 0x53, 0xcd, 0x76, 0x3f, 0x10, 0x75, 0x3d, 0xb1, 0xd9, 0xee, 0x85, 0x9d, 0x66, 0x6f, 0x6e, 0x2e, 0x31, 0xb0, 0x4f, 0x6a, 0xf9, 0xfb, 0xc3, 0xda, 0xa1, 0x09, 0x74, 0x06, 0x54, 0xc3, 0xd0, 0x89, 0x8d, 0xf7, 0x2c, 0xa2, 0x63, 0x97, 0xd8, 0xd8, 0xcb, 0xcf, 0x86, 0x95, 0x73, 0x86, 0x51, 0x61, 0xb3, 0x25, 0x36, 0x89, 0x1e, 0x87, 0x49, 0x67, 0xef, 0xaa, 0xc1, 0x83, 0x4b, 0x6f, 0xba, 0x64, 0xdf, 0x7c, 0x29, 0xff, 0x30, 0x73, 0xd3, 0x04, 0x9d, 0x60, 0xa1, 0xb5, 0xc5, 0xc4, 0xe8, 0x31, 0x50, 0x0d, 0xef, 0x00, 0xbb, 0x4d, 0x56, 0xdd, 0xbd, 0x26, 0x36, 0x48, 0xfe, 0x34, 0x57, 0xe5, 0xf2, 0x0d, 0x29, 0x46, 0xcf, 0xc3, 0x74, 0xcb, 0x36, 0x6d, 0x9f, 0xb8, 0x4d, 0x97, 0xd0, 0x26, 0x9d, 0x9f, 0xb4, 0xfc, 0x3f, 0x8f, 0x1d, 0xd1, 0x66, 0xef, 0x86, 0xb5, 0xf9, 0xee, 0x6a, 0x53, 0xad, 0x7e, 0xe1, 0x7c, 0x11, 0xb2, 0xe1, 0x4d, 0x47, 0x69, 0xe0, 0xdb, 0xae, 0x2a, 0xb4, 0x86, 0x2e, 0x6f, 0xae, 0xd0, 0xea, 0xf7, 0x62, 0x45, 0x8d, 0xd1, 0x2a, 0xbc, 0xbe, 0xb6, 0x53, 0xd1, 0xb5, 0xdd, 0x8d, 0x9d, 0xb5, 0x6a, 0x45, 0x8d, 0x3f, 0x9e, 0x4e, 0xbd, 0x3b, 0xa6, 0xde, 0xbc, 0x79, 0xf3, 0x66, 0x6c, 0xfe, 0x7b, 0x31, 0xc8, 0x75, 0x77, 0xbe, 0xe8, 0xe3, 0x70, 0xbf, 0x7c, 0x4d, 0xf5, 0x88, 0xaf, 0xdf, 0x30, 0x5d, 0x16, 0x87, 0x0d, 0xcc, 0x7b, 0xc7, 0xc0, 0x85, 0xd3, 0x42, 0x6b, 0x9b, 0xf8, 0xcf, 0x99, 0x2e, 0x8d, 0xb2, 0x06, 0xf6, 0xd1, 0x3a, 0xcc, 0xda, 0x8e, 0xee, 0xf9, 0xd8, 0xae, 0x61, 0xb7, 0xa6, 0x77, 0x2e, 0x08, 0x74, 0x6c, 0x18, 0xc4, 0xf3, 0x1c, 0x5e, 0x02, 0x02, 0x96, 0x8f, 0xd8, 0xce, 0xb6, 0x50, 0xee, 0xe4, 0xc6, 0x92, 0x50, 0xed, 0xd9, 0xee, 0xf8, 0x51, 0xdb, 0xfd, 0x00, 0xa4, 0x1b, 0xb8, 0xa9, 0x13, 0xdb, 0x77, 0xdb, 0xac, 0x5f, 0x4b, 0x69, 0xa9, 0x06, 0x6e, 0x56, 0xe8, 0xf8, 0xa7, 0xb7, 0x07, 0x61, 0x3f, 0xfe, 0x63, 0x1c, 0xb2, 0xe1, 0x9e, 0x8d, 0xb6, 0xc0, 0x06, 0xcb, 0xcf, 0x0a, 0x3b, 0xbe, 0x0f, 0xdd, 0xb5, 0xc3, 0x5b, 0x5c, 0xa6, 0x89, 0xbb, 0x38, 0xca, 0x3b, 0x29, 0x8d, 0x23, 0x69, 0xd1, 0xa4, 0x07, 0x96, 0xf0, 0xfe, 0x3c, 0xa5, 0x89, 0x11, 0x5a, 0x85, 0xd1, 0xab, 0x1e, 0xe3, 0x1e, 0x65, 0xdc, 0x0f, 0xdf, 0x9d, 0xfb, 0xca, 0x36, 0x23, 0x4f, 0x5f, 0xd9, 0xd6, 0x37, 0x36, 0xb5, 0x6a, 0x69, 0x5d, 0x13, 0x70, 0x74, 0x12, 0x12, 0x16, 0x7e, 0xb9, 0xdd, 0x9d, 0xe2, 0x99, 0x68, 0x58, 0xc7, 0x9f, 0x84, 0xc4, 0x0d, 0x82, 0xaf, 0x75, 0x27, 0x56, 0x26, 0xfa, 0x29, 0x86, 0xfe, 0x19, 0x48, 0x32, 0x7f, 0x21, 0x00, 0xe1, 0x31, 0x75, 0x04, 0xa5, 0x20, 0xb1, 0xbc, 0xa9, 0xd1, 0xf0, 0x57, 0x21, 0xcb, 0xa5, 0xfa, 0xd6, 0x5a, 0x65, 0xb9, 0xa2, 0xc6, 0xe6, 0xcf, 0xc3, 0x28, 0x77, 0x02, 0x3d, 0x1a, 0x81, 0x1b, 0xd4, 0x11, 0x31, 0x14, 0x1c, 0x8a, 0x9c, 0xdd, 0xad, 0x96, 0x2b, 0x9a, 0x1a, 0x0b, 0x6f, 0xaf, 0x07, 0xd9, 0x70, 0xbb, 0xf6, 0xb3, 0x89, 0xa9, 0xef, 0x28, 0x90, 0x09, 0xb5, 0x5f, 0xb4, 0xf0, 0x63, 0xcb, 0x72, 0x6e, 0xe8, 0xd8, 0x32, 0xb1, 0x27, 0x82, 0x02, 0x98, 0xa8, 0x44, 0x25, 0xc3, 0x6e, 0xda, 0xcf, 0xc4, 0xf8, 0xd7, 0x15, 0x50, 0x7b, 0x5b, 0xb7, 0x1e, 0x03, 0x95, 0x9f, 0xab, 0x81, 0xaf, 0x29, 0x90, 0xeb, 0xee, 0xd7, 0x7a, 0xcc, 0x3b, 0xf5, 0x73, 0x35, 0xef, 0xcb, 0x0a, 0x8c, 0x77, 0x75, 0x69, 0xff, 0xa7, 0xac, 0x7b, 0x35, 0x0e, 0x53, 0x03, 0x70, 0xa8, 0x24, 0xda, 0x59, 0xde, 0x61, 0x7f, 0x6c, 0x98, 0x67, 0x2d, 0xd2, 0x6a, 0xb9, 0x85, 0x5d, 0x5f, 0x74, 0xbf, 0x8f, 0x81, 0x6a, 0xd6, 0x88, 0xed, 0x9b, 0xfb, 0x26, 0x71, 0xc5, 0x2b, 0x38, 0xef, 0x71, 0x27, 0x3a, 0x72, 0xfe, 0x16, 0xfe, 0x51, 0x40, 0x4d, 0xc7, 0x33, 0x7d, 0xf3, 0x3a, 0xd1, 0x4d, 0x5b, 0xbe, 0xaf, 0xd3, 0x9e, 0x37, 0xa1, 0xa9, 0x72, 0x66, 0xcd, 0xf6, 0x03, 0x6d, 0x9b, 0xd4, 0x71, 0x8f, 0x36, 0xcd, 0x7d, 0x71, 0x4d, 0x95, 0x33, 0x81, 0xf6, 0x29, 0xc8, 0xd6, 0x9c, 0x16, 0x6d, 0x1f, 0xb8, 0x1e, 0x4d, 0xb5, 0x8a, 0x96, 0xe1, 0xb2, 0x40, 0x45, 0xf4, 0x77, 0x9d, 0x8b, 0x82, 0xac, 0x96, 0xe1, 0x32, 0xae, 0xf2, 0x28, 0x4c, 0xe0, 0x7a, 0xdd, 0xa5, 0xe4, 0x92, 0x88, 0x37, 0xad, 0xb9, 0x40, 0xcc, 0x14, 0x67, 0xae, 0x40, 0x4a, 0xfa, 0x81, 0x56, 0x33, 0xea, 0x09, 0xbd, 0xc9, 0xaf, 0x6b, 0x62, 0x0b, 0x69, 0x2d, 0x65, 0xcb, 0xc9, 0x53, 0x90, 0x35, 0x3d, 0xbd, 0x73, 0x6f, 0x18, 0x9b, 0x8b, 0x2d, 0xa4, 0xb4, 0x8c, 0xe9, 0x05, 0x17, 0x45, 0xf3, 0x6f, 0xc6, 0x20, 0xd7, 0x7d, 0xef, 0x89, 0x56, 0x20, 0x65, 0x39, 0x06, 0x66, 0x81, 0xc0, 0x2f, 0xdd, 0x17, 0x22, 0xae, 0x4a, 0x17, 0xd7, 0x85, 0xbe, 0x16, 0x20, 0x67, 0xfe, 0x5e, 0x81, 0x94, 0x14, 0xa3, 0x13, 0x90, 0x68, 0x62, 0xff, 0x80, 0xd1, 0x25, 0xcb, 0x31, 0x55, 0xd1, 0xd8, 0x98, 0xca, 0xbd, 0x26, 0xb6, 0x59, 0x08, 0x08, 0x39, 0x1d, 0xd3, 0x7d, 0xb5, 0x08, 0xae, 0xb1, 0x76, 0xd8, 0x69, 0x34, 0x88, 0xed, 0x7b, 0x72, 0x5f, 0x85, 0x7c, 0x59, 0x88, 0xd1, 0x13, 0x30, 0xe9, 0xbb, 0xd8, 0xb4, 0xba, 0x74, 0x13, 0x4c, 0x57, 0x95, 0x13, 0x81, 0x72, 0x11, 0x4e, 0x4a, 0xde, 0x1a, 0xf1, 0xb1, 0x71, 0x40, 0x6a, 0x1d, 0xd0, 0x28, 0xbb, 0x54, 0xbb, 0x5f, 0x28, 0xac, 0x88, 0x79, 0x89, 0x9d, 0xff, 0x81, 0x02, 0x93, 0xb2, 0x81, 0xaf, 0x05, 0xce, 0xaa, 0x02, 0x60, 0xdb, 0x76, 0xfc, 0xb0, 0xbb, 0xfa, 0x43, 0xb9, 0x0f, 0xb7, 0x58, 0x0a, 0x40, 0x5a, 0x88, 0x60, 0xa6, 0x01, 0xd0, 0x99, 0x39, 0xd2, 0x6d, 0xb3, 0x90, 0x11, 0x97, 0xda, 0xec, 0xcb, 0x08, 0x7f, 0xeb, 0x03, 0x2e, 0xa2, 0x9d, 0x3e, 0x9a, 0x86, 0xe4, 0x1e, 0xa9, 0x9b, 0xb6, 0xb8, 0x6a, 0xe3, 0x03, 0x79, 0x81, 0x97, 0x08, 0x2e, 0xf0, 0xca, 0x9f, 0x86, 0x29, 0xc3, 0x69, 0xf4, 0x9a, 0x5b, 0x56, 0x7b, 0xde, 0x3c, 0xbd, 0x4f, 0x2a, 0x2f, 0x42, 0xa7, 0x3b, 0x7b, 0x43, 0x51, 0xbe, 0x1a, 0x8b, 0xaf, 0x6e, 0x95, 0xbf, 0x1e, 0x9b, 0x59, 0xe5, 0xd0, 0x2d, 0xb9, 0x52, 0x8d, 0xec, 0x5b, 0xc4, 0xa0, 0xd6, 0xc3, 0x1b, 0x8f, 0xc0, 0xc7, 0xea, 0xa6, 0x7f, 0xd0, 0xda, 0x5b, 0x34, 0x9c, 0xc6, 0x99, 0xba, 0x53, 0x77, 0x3a, 0x1f, 0x83, 0xe8, 0x88, 0x0d, 0xd8, 0x7f, 0xe2, 0x83, 0x50, 0x3a, 0x90, 0xce, 0x44, 0x7e, 0x3d, 0x2a, 0x6e, 0xc0, 0x94, 0x50, 0xd6, 0xd9, 0x8d, 0x34, 0xef, 0xc3, 0xd1, 0x5d, 0x6f, 0x25, 0xf2, 0xdf, 0x7c, 0x87, 0x55, 0x3a, 0x6d, 0x52, 0x40, 0xe9, 0x1c, 0xef, 0xd4, 0x8b, 0x1a, 0xdc, 0xd7, 0xc5, 0xc7, 0x8f, 0x26, 0x71, 0x23, 0x18, 0xbf, 0x27, 0x18, 0xa7, 0x42, 0x8c, 0xdb, 0x02, 0x5a, 0x5c, 0x86, 0xf1, 0xe3, 0x70, 0xfd, 0xad, 0xe0, 0xca, 0x92, 0x30, 0xc9, 0x2a, 0x4c, 0x30, 0x12, 0xa3, 0xe5, 0xf9, 0x4e, 0x83, 0xe5, 0xbd, 0xbb, 0xd3, 0xfc, 0xdd, 0x3b, 0xfc, 0xac, 0xe4, 0x28, 0x6c, 0x39, 0x40, 0x15, 0x8b, 0xc0, 0x2e, 0xe1, 0x6b, 0xc4, 0xb0, 0x22, 0x18, 0xde, 0x12, 0x86, 0x04, 0xfa, 0xc5, 0x4f, 0xc1, 0x34, 0xfd, 0x9f, 0xa5, 0xa5, 0xb0, 0x25, 0xd1, 0x77, 0x30, 0xf9, 0x1f, 0xbc, 0xc2, 0x8f, 0xe3, 0x54, 0x40, 0x10, 0xb2, 0x29, 0xb4, 0x8b, 0x75, 0xe2, 0xfb, 0xc4, 0xf5, 0x74, 0x6c, 0x0d, 0x32, 0x2f, 0xf4, 0x06, 0x9b, 0xff, 0xd2, 0x7b, 0xdd, 0xbb, 0xb8, 0xca, 0x91, 0x25, 0xcb, 0x2a, 0xee, 0xc2, 0xfd, 0x03, 0xa2, 0x62, 0x08, 0xce, 0x57, 0x05, 0xe7, 0x74, 0x5f, 0x64, 0x50, 0xda, 0x2d, 0x90, 0xf2, 0x60, 0x2f, 0x87, 0xe0, 0xfc, 0xb2, 0xe0, 0x44, 0x02, 0x2b, 0xb7, 0x94, 0x32, 0x5e, 0x81, 0xc9, 0xeb, 0xc4, 0xdd, 0x73, 0x3c, 0x71, 0x71, 0x30, 0x04, 0xdd, 0x6b, 0x82, 0x6e, 0x42, 0x00, 0xd9, 0x35, 0x02, 0xe5, 0x7a, 0x1a, 0x52, 0xfb, 0xd8, 0x20, 0x43, 0x50, 0x7c, 0x45, 0x50, 0x8c, 0x51, 0x7d, 0x0a, 0x2d, 0x41, 0xb6, 0xee, 0x88, 0xca, 0x14, 0x0d, 0x7f, 0x5d, 0xc0, 0x33, 0x12, 0x23, 0x28, 0x9a, 0x4e, 0xb3, 0x65, 0xd1, 0xb2, 0x15, 0x4d, 0xf1, 0x7b, 0x92, 0x42, 0x62, 0x04, 0xc5, 0x31, 0xdc, 0xfa, 0x86, 0xa4, 0xf0, 0x42, 0xfe, 0x7c, 0x16, 0x32, 0x8e, 0x6d, 0xb5, 0x1d, 0x7b, 0x18, 0x23, 0x7e, 0x5f, 0x30, 0x80, 0x80, 0x50, 0x82, 0x4b, 0x90, 0x1e, 0x76, 0x23, 0xfe, 0xe0, 0x3d, 0x79, 0x3c, 0xe4, 0x0e, 0xac, 0xc2, 0x84, 0x4c, 0x50, 0xa6, 0x63, 0x0f, 0x41, 0xf1, 0x87, 0x82, 0x22, 0x17, 0x82, 0x89, 0x65, 0xf8, 0xc4, 0xf3, 0xeb, 0x64, 0x18, 0x92, 0x37, 0xe5, 0x32, 0x04, 0x44, 0xb8, 0x72, 0x8f, 0xd8, 0xc6, 0xc1, 0x70, 0x0c, 0x5f, 0x93, 0xae, 0x94, 0x18, 0x4a, 0xb1, 0x0c, 0xe3, 0x0d, 0xec, 0x7a, 0x07, 0xd8, 0x1a, 0x6a, 0x3b, 0xfe, 0x48, 0x70, 0x64, 0x03, 0x90, 0xf0, 0x48, 0xcb, 0x3e, 0x0e, 0xcd, 0xd7, 0xa5, 0x47, 0x42, 0x30, 0x71, 0xf4, 0x3c, 0x9f, 0xdd, 0xcd, 0x1c, 0x87, 0xed, 0x8f, 0xe5, 0xd1, 0xe3, 0xd8, 0x6a, 0x98, 0xf1, 0x12, 0xa4, 0x3d, 0xf3, 0xe5, 0xa1, 0x68, 0xfe, 0x44, 0xee, 0x34, 0x03, 0x50, 0xf0, 0x0b, 0x70, 0x72, 0x60, 0x99, 0x18, 0x82, 0xec, 0x4f, 0x05, 0xd9, 0x89, 0x01, 0xa5, 0x42, 0xa4, 0x84, 0xe3, 0x52, 0xfe, 0x99, 0x4c, 0x09, 0xa4, 0x87, 0x6b, 0x8b, 0x76, 0xf6, 0x1e, 0xde, 0x3f, 0x9e, 0xd7, 0xfe, 0x5c, 0x7a, 0x8d, 0x63, 0xbb, 0xbc, 0xb6, 0x03, 0x27, 0x04, 0xe3, 0xf1, 0xf6, 0xf5, 0x1b, 0x32, 0xb1, 0x72, 0xf4, 0x6e, 0xf7, 0xee, 0x7e, 0x1a, 0x66, 0x02, 0x77, 0xca, 0xa6, 0xd4, 0xd3, 0x1b, 0xb8, 0x39, 0x04, 0xf3, 0x37, 0x05, 0xb3, 0xcc, 0xf8, 0x41, 0x57, 0xeb, 0x55, 0x71, 0x93, 0x92, 0x3f, 0x0f, 0x79, 0x49, 0xde, 0xb2, 0x5d, 0x62, 0x38, 0x75, 0xdb, 0x7c, 0x99, 0xd4, 0x86, 0xa0, 0xfe, 0x8b, 0x9e, 0xad, 0xda, 0x0d, 0xc1, 0x29, 0xf3, 0x1a, 0xa8, 0x41, 0xaf, 0xa2, 0x9b, 0x8d, 0xa6, 0xe3, 0xfa, 0x11, 0x8c, 0x7f, 0x29, 0x77, 0x2a, 0xc0, 0xad, 0x31, 0x58, 0xb1, 0x02, 0x39, 0x36, 0x1c, 0x36, 0x24, 0xff, 0x4a, 0x10, 0x8d, 0x77, 0x50, 0x22, 0x71, 0x18, 0x4e, 0xa3, 0x89, 0xdd, 0x61, 0xf2, 0xdf, 0xb7, 0x64, 0xe2, 0x10, 0x10, 0x91, 0x38, 0xfc, 0x76, 0x93, 0xd0, 0x6a, 0x3f, 0x04, 0xc3, 0xb7, 0x65, 0xe2, 0x90, 0x18, 0x41, 0x21, 0x1b, 0x86, 0x21, 0x28, 0xfe, 0x5a, 0x52, 0x48, 0x0c, 0x3f, 0x03, 0x13, 0x3d, 0xfd, 0x00, 0x8a, 0xfa, 0xfc, 0x9e, 0xff, 0xcc, 0x07, 0x22, 0x73, 0x74, 0xb7, 0x03, 0xc5, 0x75, 0xba, 0x49, 0xdd, 0x45, 0x3b, 0x9a, 0xec, 0x95, 0x0f, 0x82, 0x7d, 0xea, 0xaa, 0xd9, 0xc5, 0xcb, 0x30, 0xde, 0x55, 0xb0, 0xa3, 0xa9, 0x7e, 0x51, 0x50, 0x65, 0xc3, 0xf5, 0xba, 0x78, 0x1e, 0x12, 0xb4, 0xf8, 0x46, 0xc3, 0x7f, 0x49, 0xc0, 0x99, 0x7a, 0xf1, 0x13, 0x90, 0x92, 0x45, 0x37, 0x1a, 0xfa, 0xcb, 0x02, 0x1a, 0x40, 0x28, 0x5c, 0x16, 0xdc, 0x68, 0xf8, 0xe7, 0x24, 0x5c, 0x42, 0x28, 0x7c, 0x78, 0x17, 0x7e, 0xf7, 0x0b, 0x09, 0x91, 0x34, 0xa5, 0xef, 0x2e, 0xc1, 0x98, 0xa8, 0xb4, 0xd1, 0xe8, 0xcf, 0x8b, 0x87, 0x4b, 0x44, 0xf1, 0x29, 0x48, 0x0e, 0xe9, 0xf0, 0x5f, 0x15, 0x50, 0xae, 0x5f, 0x5c, 0x86, 0x4c, 0xa8, 0xba, 0x46, 0xc3, 0x7f, 0x4d, 0xc0, 0xc3, 0x28, 0x6a, 0xba, 0xa8, 0xae, 0xd1, 0x04, 0xbf, 0x2e, 0x4d, 0x17, 0x08, 0xea, 0x36, 0x59, 0x58, 0xa3, 0xd1, 0xbf, 0x21, 0xbd, 0x2e, 0x21, 0xc5, 0x67, 0x21, 0x1d, 0x24, 0xcb, 0x68, 0xfc, 0x6f, 0x0a, 0x7c, 0x07, 0x43, 0x3d, 0x10, 0x4a, 0xd6, 0xd1, 0x14, 0xbf, 0x25, 0x3d, 0x10, 0x42, 0xd1, 0x63, 0xd4, 0x5b, 0x80, 0xa3, 0x99, 0x7e, 0x5b, 0x1e, 0xa3, 0x9e, 0xfa, 0x4b, 0x77, 0x93, 0xe5, 0xac, 0x68, 0x8a, 0xdf, 0x91, 0xbb, 0xc9, 0xf4, 0xa9, 0x19, 0xbd, 0x15, 0x2d, 0x9a, 0xe3, 0x77, 0xa5, 0x19, 0x3d, 0x05, 0xad, 0xb8, 0x05, 0xa8, 0xbf, 0x9a, 0x45, 0xf3, 0x7d, 0x51, 0xf0, 0x4d, 0xf6, 0x15, 0xb3, 0xe2, 0x73, 0x70, 0x62, 0x70, 0x25, 0x8b, 0x66, 0xfd, 0xd2, 0x07, 0x3d, 0xef, 0x1e, 0xe1, 0x42, 0x56, 0xdc, 0xe9, 0xbc, 0x7b, 0x84, 0xab, 0x58, 0x34, 0xed, 0xab, 0x1f, 0x74, 0xbf, 0x9a, 0x86, 0x8b, 0x58, 0xb1, 0x04, 0xd0, 0x29, 0x20, 0xd1, 0x5c, 0xaf, 0x09, 0xae, 0x10, 0x88, 0x1e, 0x0d, 0x51, 0x3f, 0xa2, 0xf1, 0x5f, 0x91, 0x47, 0x43, 0x20, 0xe8, 0xd1, 0x90, 0xa5, 0x23, 0x1a, 0xfd, 0xba, 0x3c, 0x1a, 0x12, 0x52, 0xbc, 0x04, 0x29, 0xbb, 0x65, 0x59, 0x34, 0xb6, 0xd0, 0xdd, 0x7f, 0x11, 0x93, 0xff, 0xd1, 0x87, 0x02, 0x2c, 0x01, 0xc5, 0xf3, 0x90, 0x24, 0x8d, 0x3d, 0x52, 0x8b, 0x42, 0xfe, 0xcb, 0x87, 0x32, 0x9f, 0x50, 0xed, 0xe2, 0xb3, 0x00, 0xfc, 0xcd, 0x97, 0x7d, 0x10, 0x89, 0xc0, 0xfe, 0xeb, 0x87, 0xe2, 0x63, 0x7b, 0x07, 0xd2, 0x21, 0xe0, 0x9f, 0xee, 0xef, 0x4e, 0xf0, 0x5e, 0x37, 0x01, 0x7b, 0x5b, 0x7e, 0x1a, 0xc6, 0xae, 0x7a, 0x8e, 0xed, 0xe3, 0x7a, 0x14, 0xfa, 0xdf, 0x04, 0x5a, 0xea, 0x53, 0x87, 0x35, 0x1c, 0x97, 0xf8, 0xb8, 0xee, 0x45, 0x61, 0xff, 0x5d, 0x60, 0x03, 0x00, 0x05, 0x1b, 0xd8, 0xf3, 0x87, 0x59, 0xf7, 0x7f, 0x48, 0xb0, 0x04, 0x50, 0xa3, 0xe9, 0xff, 0xd7, 0x48, 0x3b, 0x0a, 0xfb, 0xbe, 0x34, 0x5a, 0xe8, 0x17, 0x3f, 0x01, 0x69, 0xfa, 0x2f, 0xff, 0x01, 0x4a, 0x04, 0xf8, 0x3f, 0x05, 0xb8, 0x83, 0xa0, 0x4f, 0xf6, 0xfc, 0x9a, 0x6f, 0x46, 0x3b, 0xfb, 0xbf, 0xc4, 0x4e, 0x4b, 0xfd, 0x62, 0x09, 0x32, 0x9e, 0x5f, 0xab, 0xb5, 0x5c, 0x7e, 0x13, 0x17, 0x01, 0xff, 0xef, 0x0f, 0x83, 0x37, 0xd2, 0x00, 0x53, 0x3e, 0x35, 0xf8, 0x72, 0x0d, 0x56, 0x9d, 0x55, 0x87, 0x5f, 0xab, 0xc1, 0x77, 0x26, 0xe0, 0xb4, 0xe1, 0x34, 0xf6, 0x1c, 0xef, 0x0c, 0x4f, 0x28, 0x41, 0x3a, 0x39, 0x23, 0xdd, 0x27, 0xae, 0xc6, 0x02, 0x77, 0xce, 0x1c, 0xef, 0x4e, 0x6d, 0xfe, 0x47, 0xe3, 0x90, 0x5a, 0xc6, 0x9e, 0x8f, 0x6f, 0xe0, 0x36, 0x3a, 0x0d, 0xa9, 0x35, 0xdb, 0x3f, 0x77, 0x76, 0xcb, 0x77, 0xd9, 0x17, 0x95, 0x78, 0x39, 0x7d, 0xe7, 0xd6, 0x6c, 0xd2, 0xa4, 0x32, 0x2d, 0x98, 0x42, 0x0f, 0x41, 0x92, 0xfd, 0xcf, 0x6e, 0x16, 0xe3, 0xe5, 0xf1, 0xb7, 0x6e, 0xcd, 0x8e, 0x74, 0xf4, 0xf8, 0x1c, 0x7a, 0x01, 0x32, 0xd5, 0xf6, 0xae, 0x69, 0xfb, 0x17, 0x96, 0x28, 0x1d, 0x75, 0x40, 0xa2, 0xfc, 0xd4, 0x9d, 0x5b, 0xb3, 0xe7, 0x8e, 0x34, 0x90, 0x96, 0xc5, 0xce, 0xc2, 0x24, 0x9a, 0xfd, 0x10, 0x2f, 0xcc, 0x85, 0x9e, 0x83, 0x94, 0x1c, 0xf2, 0x1b, 0xfa, 0xf2, 0x25, 0x61, 0xc2, 0x3d, 0x71, 0x07, 0x64, 0xe8, 0xff, 0x43, 0xb6, 0xda, 0xbe, 0x6c, 0x39, 0x58, 0xf8, 0x20, 0x39, 0xa7, 0x2c, 0xc4, 0xca, 0x17, 0xef, 0xdc, 0x9a, 0x5d, 0x1a, 0x9a, 0x58, 0xc0, 0x19, 0x73, 0x17, 0x1b, 0x7a, 0x11, 0xd2, 0xc1, 0x98, 0x7d, 0x03, 0x88, 0x95, 0x3f, 0x2e, 0xec, 0xbe, 0x37, 0xfa, 0x0e, 0x5d, 0xc8, 0x72, 0xee, 0xee, 0xb1, 0x39, 0x65, 0x41, 0xb9, 0x17, 0xcb, 0x85, 0x4f, 0xba, 0xd8, 0x42, 0x96, 0x5f, 0x58, 0x62, 0x1f, 0x1d, 0x94, 0x7b, 0xb5, 0x5c, 0xd0, 0x77, 0xe8, 0xd0, 0x15, 0x18, 0xab, 0xb6, 0xcb, 0x6d, 0x9f, 0x78, 0xec, 0xd7, 0x29, 0xd9, 0xf2, 0x93, 0x77, 0x6e, 0xcd, 0x7e, 0x74, 0x48, 0x56, 0x86, 0xd3, 0x24, 0x01, 0x9a, 0x83, 0xcc, 0x86, 0xe3, 0x36, 0xb0, 0xc5, 0xf9, 0x80, 0x7f, 0x44, 0x09, 0x89, 0xd0, 0x2e, 0x5d, 0x09, 0xdf, 0x6d, 0x8f, 0xfd, 0xb4, 0xfe, 0x27, 0x88, 0xc9, 0x0e, 0x13, 0x32, 0x21, 0x59, 0x6d, 0x57, 0x71, 0x33, 0x9f, 0x65, 0x37, 0xfc, 0x0f, 0x2e, 0x06, 0x08, 0x79, 0xb6, 0x16, 0xd9, 0x3c, 0xfb, 0x15, 0x41, 0x79, 0xe9, 0xce, 0xad, 0xd9, 0x27, 0x87, 0x7e, 0x62, 0x15, 0x37, 0xd9, 0xe3, 0xf8, 0x13, 0xd0, 0xb7, 0x14, 0x7a, 0xb0, 0xf8, 0x15, 0x29, 0x7d, 0xe2, 0x38, 0x7b, 0xe2, 0x43, 0x03, 0x9f, 0x18, 0x68, 0xf1, 0xe7, 0xda, 0x9f, 0x7d, 0xfb, 0x18, 0x2b, 0xe5, 0x6f, 0x36, 0xf4, 0xd1, 0xbf, 0xf2, 0xf6, 0x3d, 0x1f, 0xda, 0xc0, 0x02, 0xf4, 0x8a, 0x02, 0xe3, 0xd5, 0xf6, 0x86, 0xa8, 0xb1, 0xd4, 0xf2, 0x9c, 0xf8, 0x01, 0xf6, 0x20, 0xcb, 0x43, 0x7a, 0xdc, 0xf6, 0x0b, 0x9f, 0x7d, 0x7b, 0xf6, 0xec, 0xd0, 0x46, 0xb0, 0x14, 0xc4, 0x6c, 0xe8, 0x7e, 0x26, 0xfa, 0x1c, 0xb3, 0xa2, 0x42, 0xeb, 0x75, 0x8d, 0xd4, 0xa8, 0x15, 0x13, 0x77, 0xb1, 0x22, 0xa4, 0xc7, 0xad, 0x28, 0xd2, 0xa8, 0xbf, 0x77, 0x4b, 0x42, 0x7c, 0x68, 0x13, 0x46, 0xb9, 0x87, 0xd9, 0x2f, 0xa3, 0xd2, 0xc7, 0x0c, 0xc3, 0xce, 0xe6, 0x68, 0x82, 0x66, 0xe6, 0x22, 0x40, 0x27, 0xc6, 0x90, 0x0a, 0xf1, 0x6b, 0xa4, 0x2d, 0x7e, 0x01, 0x47, 0xff, 0x45, 0xd3, 0x9d, 0x5f, 0x78, 0x2a, 0x0b, 0x09, 0xf1, 0xb3, 0xcd, 0x62, 0xec, 0xa2, 0x32, 0xf3, 0x0c, 0xa8, 0xbd, 0xb1, 0x72, 0x2c, 0xbc, 0x06, 0xa8, 0x7f, 0xc7, 0xc2, 0x0c, 0x49, 0xce, 0xf0, 0x48, 0x98, 0x21, 0x73, 0x56, 0xed, 0xf8, 0xfc, 0x39, 0xd3, 0xf2, 0x1c, 0xbb, 0x8f, 0xb3, 0xd7, 0xff, 0x3f, 0x19, 0xe7, 0x7c, 0x01, 0x46, 0xb9, 0x90, 0xae, 0x65, 0x8d, 0x95, 0x0f, 0x56, 0xe5, 0x34, 0x3e, 0x28, 0xaf, 0xbf, 0x75, 0xbb, 0x30, 0xf2, 0xfd, 0xdb, 0x85, 0x91, 0x7f, 0xb8, 0x5d, 0x18, 0xf9, 0xe1, 0xed, 0x82, 0xf2, 0xee, 0xed, 0x82, 0xf2, 0xfe, 0xed, 0x82, 0xf2, 0xe3, 0xdb, 0x05, 0xe5, 0xe6, 0x61, 0x41, 0xf9, 0xda, 0x61, 0x41, 0xf9, 0xc6, 0x61, 0x41, 0xf9, 0x9b, 0xc3, 0x82, 0xf2, 0xdd, 0xc3, 0x82, 0xf2, 0xd6, 0x61, 0x61, 0xe4, 0xfb, 0x87, 0x85, 0x91, 0x1f, 0x1e, 0x16, 0x94, 0x77, 0x0f, 0x0b, 0x23, 0xef, 0x1f, 0x16, 0x94, 0x1f, 0x1f, 0x16, 0x94, 0x9b, 0xff, 0x54, 0x18, 0xf9, 0xdf, 0x00, 0x00, 0x00, 0xff, 0xff, 0xeb, 0x34, 0x72, 0x4f, 0xe4, 0x34, 0x00, 0x00, } r := bytes.NewReader(gzipped) gzipr, err := compress_gzip.NewReader(r) if err != nil { panic(err) } ungzipped, err := io_ioutil.ReadAll(gzipr) if err != nil { panic(err) } if err := github_com_gogo_protobuf_proto.Unmarshal(ungzipped, d); err != nil { panic(err) } return d } func (this *Castaway) VerboseEqual(that interface{}) error { if that == nil { if this == nil { return nil } return fmt.Errorf("that == nil && this != nil") } that1, ok := that.(*Castaway) if !ok { that2, ok := that.(Castaway) if ok { that1 = &that2 } else { return fmt.Errorf("that is not of type *Castaway") } } if that1 == nil { if this == nil { return nil } return fmt.Errorf("that is type *Castaway but is nil && this != nil") } else if this == nil { return fmt.Errorf("that is type *Castaway but is not nil && this == nil") } if this.Int32Ptr != nil && that1.Int32Ptr != nil { if *this.Int32Ptr != *that1.Int32Ptr { return fmt.Errorf("Int32Ptr this(%v) Not Equal that(%v)", *this.Int32Ptr, *that1.Int32Ptr) } } else if this.Int32Ptr != nil { return fmt.Errorf("this.Int32Ptr == nil && that.Int32Ptr != nil") } else if that1.Int32Ptr != nil { return fmt.Errorf("Int32Ptr this(%v) Not Equal that(%v)", this.Int32Ptr, that1.Int32Ptr) } if this.Int32 != that1.Int32 { return fmt.Errorf("Int32 this(%v) Not Equal that(%v)", this.Int32, that1.Int32) } if this.MyUint64Ptr != nil && that1.MyUint64Ptr != nil { if *this.MyUint64Ptr != *that1.MyUint64Ptr { return fmt.Errorf("MyUint64Ptr this(%v) Not Equal that(%v)", *this.MyUint64Ptr, *that1.MyUint64Ptr) } } else if this.MyUint64Ptr != nil { return fmt.Errorf("this.MyUint64Ptr == nil && that.MyUint64Ptr != nil") } else if that1.MyUint64Ptr != nil { return fmt.Errorf("MyUint64Ptr this(%v) Not Equal that(%v)", this.MyUint64Ptr, that1.MyUint64Ptr) } if this.MyUint64 != that1.MyUint64 { return fmt.Errorf("MyUint64 this(%v) Not Equal that(%v)", this.MyUint64, that1.MyUint64) } if this.MyFloat32Ptr != nil && that1.MyFloat32Ptr != nil { if *this.MyFloat32Ptr != *that1.MyFloat32Ptr { return fmt.Errorf("MyFloat32Ptr this(%v) Not Equal that(%v)", *this.MyFloat32Ptr, *that1.MyFloat32Ptr) } } else if this.MyFloat32Ptr != nil { return fmt.Errorf("this.MyFloat32Ptr == nil && that.MyFloat32Ptr != nil") } else if that1.MyFloat32Ptr != nil { return fmt.Errorf("MyFloat32Ptr this(%v) Not Equal that(%v)", this.MyFloat32Ptr, that1.MyFloat32Ptr) } if this.MyFloat32 != that1.MyFloat32 { return fmt.Errorf("MyFloat32 this(%v) Not Equal that(%v)", this.MyFloat32, that1.MyFloat32) } if this.MyFloat64Ptr != nil && that1.MyFloat64Ptr != nil { if *this.MyFloat64Ptr != *that1.MyFloat64Ptr { return fmt.Errorf("MyFloat64Ptr this(%v) Not Equal that(%v)", *this.MyFloat64Ptr, *that1.MyFloat64Ptr) } } else if this.MyFloat64Ptr != nil { return fmt.Errorf("this.MyFloat64Ptr == nil && that.MyFloat64Ptr != nil") } else if that1.MyFloat64Ptr != nil { return fmt.Errorf("MyFloat64Ptr this(%v) Not Equal that(%v)", this.MyFloat64Ptr, that1.MyFloat64Ptr) } if this.MyFloat64 != that1.MyFloat64 { return fmt.Errorf("MyFloat64 this(%v) Not Equal that(%v)", this.MyFloat64, that1.MyFloat64) } if !bytes.Equal(this.MyBytes, that1.MyBytes) { return fmt.Errorf("MyBytes this(%v) Not Equal that(%v)", this.MyBytes, that1.MyBytes) } if !bytes.Equal(this.NormalBytes, that1.NormalBytes) { return fmt.Errorf("NormalBytes this(%v) Not Equal that(%v)", this.NormalBytes, that1.NormalBytes) } if len(this.MyUint64S) != len(that1.MyUint64S) { return fmt.Errorf("MyUint64S this(%v) Not Equal that(%v)", len(this.MyUint64S), len(that1.MyUint64S)) } for i := range this.MyUint64S { if this.MyUint64S[i] != that1.MyUint64S[i] { return fmt.Errorf("MyUint64S this[%v](%v) Not Equal that[%v](%v)", i, this.MyUint64S[i], i, that1.MyUint64S[i]) } } if len(this.MyMap) != len(that1.MyMap) { return fmt.Errorf("MyMap this(%v) Not Equal that(%v)", len(this.MyMap), len(that1.MyMap)) } for i := range this.MyMap { if this.MyMap[i] != that1.MyMap[i] { return fmt.Errorf("MyMap this[%v](%v) Not Equal that[%v](%v)", i, this.MyMap[i], i, that1.MyMap[i]) } } if len(this.MyCustomMap) != len(that1.MyCustomMap) { return fmt.Errorf("MyCustomMap this(%v) Not Equal that(%v)", len(this.MyCustomMap), len(that1.MyCustomMap)) } for i := range this.MyCustomMap { if this.MyCustomMap[i] != that1.MyCustomMap[i] { return fmt.Errorf("MyCustomMap this[%v](%v) Not Equal that[%v](%v)", i, this.MyCustomMap[i], i, that1.MyCustomMap[i]) } } if len(this.MyNullableMap) != len(that1.MyNullableMap) { return fmt.Errorf("MyNullableMap this(%v) Not Equal that(%v)", len(this.MyNullableMap), len(that1.MyNullableMap)) } for i := range this.MyNullableMap { if !this.MyNullableMap[i].Equal(that1.MyNullableMap[i]) { return fmt.Errorf("MyNullableMap this[%v](%v) Not Equal that[%v](%v)", i, this.MyNullableMap[i], i, that1.MyNullableMap[i]) } } if len(this.MyEmbeddedMap) != len(that1.MyEmbeddedMap) { return fmt.Errorf("MyEmbeddedMap this(%v) Not Equal that(%v)", len(this.MyEmbeddedMap), len(that1.MyEmbeddedMap)) } for i := range this.MyEmbeddedMap { a := this.MyEmbeddedMap[i] b := that1.MyEmbeddedMap[i] if !(&a).Equal(&b) { return fmt.Errorf("MyEmbeddedMap this[%v](%v) Not Equal that[%v](%v)", i, this.MyEmbeddedMap[i], i, that1.MyEmbeddedMap[i]) } } if this.String_ != nil && that1.String_ != nil { if *this.String_ != *that1.String_ { return fmt.Errorf("String_ this(%v) Not Equal that(%v)", *this.String_, *that1.String_) } } else if this.String_ != nil { return fmt.Errorf("this.String_ == nil && that.String_ != nil") } else if that1.String_ != nil { return fmt.Errorf("String_ this(%v) Not Equal that(%v)", this.String_, that1.String_) } if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { return fmt.Errorf("XXX_unrecognized this(%v) Not Equal that(%v)", this.XXX_unrecognized, that1.XXX_unrecognized) } return nil } func (this *Castaway) Equal(that interface{}) bool { if that == nil { if this == nil { return true } return false } that1, ok := that.(*Castaway) if !ok { that2, ok := that.(Castaway) if ok { that1 = &that2 } else { return false } } if that1 == nil { if this == nil { return true } return false } else if this == nil { return false } if this.Int32Ptr != nil && that1.Int32Ptr != nil { if *this.Int32Ptr != *that1.Int32Ptr { return false } } else if this.Int32Ptr != nil { return false } else if that1.Int32Ptr != nil { return false } if this.Int32 != that1.Int32 { return false } if this.MyUint64Ptr != nil && that1.MyUint64Ptr != nil { if *this.MyUint64Ptr != *that1.MyUint64Ptr { return false } } else if this.MyUint64Ptr != nil { return false } else if that1.MyUint64Ptr != nil { return false } if this.MyUint64 != that1.MyUint64 { return false } if this.MyFloat32Ptr != nil && that1.MyFloat32Ptr != nil { if *this.MyFloat32Ptr != *that1.MyFloat32Ptr { return false } } else if this.MyFloat32Ptr != nil { return false } else if that1.MyFloat32Ptr != nil { return false } if this.MyFloat32 != that1.MyFloat32 { return false } if this.MyFloat64Ptr != nil && that1.MyFloat64Ptr != nil { if *this.MyFloat64Ptr != *that1.MyFloat64Ptr { return false } } else if this.MyFloat64Ptr != nil { return false } else if that1.MyFloat64Ptr != nil { return false } if this.MyFloat64 != that1.MyFloat64 { return false } if !bytes.Equal(this.MyBytes, that1.MyBytes) { return false } if !bytes.Equal(this.NormalBytes, that1.NormalBytes) { return false } if len(this.MyUint64S) != len(that1.MyUint64S) { return false } for i := range this.MyUint64S { if this.MyUint64S[i] != that1.MyUint64S[i] { return false } } if len(this.MyMap) != len(that1.MyMap) { return false } for i := range this.MyMap { if this.MyMap[i] != that1.MyMap[i] { return false } } if len(this.MyCustomMap) != len(that1.MyCustomMap) { return false } for i := range this.MyCustomMap { if this.MyCustomMap[i] != that1.MyCustomMap[i] { return false } } if len(this.MyNullableMap) != len(that1.MyNullableMap) { return false } for i := range this.MyNullableMap { if !this.MyNullableMap[i].Equal(that1.MyNullableMap[i]) { return false } } if len(this.MyEmbeddedMap) != len(that1.MyEmbeddedMap) { return false } for i := range this.MyEmbeddedMap { a := this.MyEmbeddedMap[i] b := that1.MyEmbeddedMap[i] if !(&a).Equal(&b) { return false } } if this.String_ != nil && that1.String_ != nil { if *this.String_ != *that1.String_ { return false } } else if this.String_ != nil { return false } else if that1.String_ != nil { return false } if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { return false } return true } func (this *Wilson) VerboseEqual(that interface{}) error { if that == nil { if this == nil { return nil } return fmt.Errorf("that == nil && this != nil") } that1, ok := that.(*Wilson) if !ok { that2, ok := that.(Wilson) if ok { that1 = &that2 } else { return fmt.Errorf("that is not of type *Wilson") } } if that1 == nil { if this == nil { return nil } return fmt.Errorf("that is type *Wilson but is nil && this != nil") } else if this == nil { return fmt.Errorf("that is type *Wilson but is not nil && this == nil") } if this.Int64 != nil && that1.Int64 != nil { if *this.Int64 != *that1.Int64 { return fmt.Errorf("Int64 this(%v) Not Equal that(%v)", *this.Int64, *that1.Int64) } } else if this.Int64 != nil { return fmt.Errorf("this.Int64 == nil && that.Int64 != nil") } else if that1.Int64 != nil { return fmt.Errorf("Int64 this(%v) Not Equal that(%v)", this.Int64, that1.Int64) } if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { return fmt.Errorf("XXX_unrecognized this(%v) Not Equal that(%v)", this.XXX_unrecognized, that1.XXX_unrecognized) } return nil } func (this *Wilson) Equal(that interface{}) bool { if that == nil { if this == nil { return true } return false } that1, ok := that.(*Wilson) if !ok { that2, ok := that.(Wilson) if ok { that1 = &that2 } else { return false } } if that1 == nil { if this == nil { return true } return false } else if this == nil { return false } if this.Int64 != nil && that1.Int64 != nil { if *this.Int64 != *that1.Int64 { return false } } else if this.Int64 != nil { return false } else if that1.Int64 != nil { return false } if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { return false } return true } type CastawayFace interface { Proto() github_com_gogo_protobuf_proto.Message GetInt32Ptr() *int32 GetInt32() int32 GetMyUint64Ptr() *github_com_gogo_protobuf_test_casttype.MyUint64Type GetMyUint64() github_com_gogo_protobuf_test_casttype.MyUint64Type GetMyFloat32Ptr() *github_com_gogo_protobuf_test_casttype.MyFloat32Type GetMyFloat32() github_com_gogo_protobuf_test_casttype.MyFloat32Type GetMyFloat64Ptr() *github_com_gogo_protobuf_test_casttype.MyFloat64Type GetMyFloat64() github_com_gogo_protobuf_test_casttype.MyFloat64Type GetMyBytes() github_com_gogo_protobuf_test_casttype.Bytes GetNormalBytes() []byte GetMyUint64S() []github_com_gogo_protobuf_test_casttype.MyUint64Type GetMyMap() github_com_gogo_protobuf_test_casttype.MyMapType GetMyCustomMap() map[github_com_gogo_protobuf_test_casttype.MyStringType]github_com_gogo_protobuf_test_casttype.MyUint64Type GetMyNullableMap() map[github_com_gogo_protobuf_test_casttype.MyInt32Type]*Wilson GetMyEmbeddedMap() map[github_com_gogo_protobuf_test_casttype.MyInt32Type]Wilson GetString_() *github_com_gogo_protobuf_test_casttype.MyStringType } func (this *Castaway) Proto() github_com_gogo_protobuf_proto.Message { return this } func (this *Castaway) TestProto() github_com_gogo_protobuf_proto.Message { return NewCastawayFromFace(this) } func (this *Castaway) GetInt32Ptr() *int32 { return this.Int32Ptr } func (this *Castaway) GetInt32() int32 { return this.Int32 } func (this *Castaway) GetMyUint64Ptr() *github_com_gogo_protobuf_test_casttype.MyUint64Type { return this.MyUint64Ptr } func (this *Castaway) GetMyUint64() github_com_gogo_protobuf_test_casttype.MyUint64Type { return this.MyUint64 } func (this *Castaway) GetMyFloat32Ptr() *github_com_gogo_protobuf_test_casttype.MyFloat32Type { return this.MyFloat32Ptr } func (this *Castaway) GetMyFloat32() github_com_gogo_protobuf_test_casttype.MyFloat32Type { return this.MyFloat32 } func (this *Castaway) GetMyFloat64Ptr() *github_com_gogo_protobuf_test_casttype.MyFloat64Type { return this.MyFloat64Ptr } func (this *Castaway) GetMyFloat64() github_com_gogo_protobuf_test_casttype.MyFloat64Type { return this.MyFloat64 } func (this *Castaway) GetMyBytes() github_com_gogo_protobuf_test_casttype.Bytes { return this.MyBytes } func (this *Castaway) GetNormalBytes() []byte { return this.NormalBytes } func (this *Castaway) GetMyUint64S() []github_com_gogo_protobuf_test_casttype.MyUint64Type { return this.MyUint64S } func (this *Castaway) GetMyMap() github_com_gogo_protobuf_test_casttype.MyMapType { return this.MyMap } func (this *Castaway) GetMyCustomMap() map[github_com_gogo_protobuf_test_casttype.MyStringType]github_com_gogo_protobuf_test_casttype.MyUint64Type { return this.MyCustomMap } func (this *Castaway) GetMyNullableMap() map[github_com_gogo_protobuf_test_casttype.MyInt32Type]*Wilson { return this.MyNullableMap } func (this *Castaway) GetMyEmbeddedMap() map[github_com_gogo_protobuf_test_casttype.MyInt32Type]Wilson { return this.MyEmbeddedMap } func (this *Castaway) GetString_() *github_com_gogo_protobuf_test_casttype.MyStringType { return this.String_ } func NewCastawayFromFace(that CastawayFace) *Castaway { this := &Castaway{} this.Int32Ptr = that.GetInt32Ptr() this.Int32 = that.GetInt32() this.MyUint64Ptr = that.GetMyUint64Ptr() this.MyUint64 = that.GetMyUint64()<|fim▁hole|> this.MyFloat32Ptr = that.GetMyFloat32Ptr() this.MyFloat32 = that.GetMyFloat32() this.MyFloat64Ptr = that.GetMyFloat64Ptr() this.MyFloat64 = that.GetMyFloat64() this.MyBytes = that.GetMyBytes() this.NormalBytes = that.GetNormalBytes() this.MyUint64S = that.GetMyUint64S() this.MyMap = that.GetMyMap() this.MyCustomMap = that.GetMyCustomMap() this.MyNullableMap = that.GetMyNullableMap() this.MyEmbeddedMap = that.GetMyEmbeddedMap() this.String_ = that.GetString_() return this } type WilsonFace interface { Proto() github_com_gogo_protobuf_proto.Message GetInt64() *int64 } func (this *Wilson) Proto() github_com_gogo_protobuf_proto.Message { return this } func (this *Wilson) TestProto() github_com_gogo_protobuf_proto.Message { return NewWilsonFromFace(this) } func (this *Wilson) GetInt64() *int64 { return this.Int64 } func NewWilsonFromFace(that WilsonFace) *Wilson { this := &Wilson{} this.Int64 = that.GetInt64() return this } func (this *Castaway) GoString() string { if this == nil { return "nil" } s := make([]string, 0, 20) s = append(s, "&casttype.Castaway{") if this.Int32Ptr != nil { s = append(s, "Int32Ptr: "+valueToGoStringCasttype(this.Int32Ptr, "int32")+",\n") } s = append(s, "Int32: "+fmt.Sprintf("%#v", this.Int32)+",\n") if this.MyUint64Ptr != nil { s = append(s, "MyUint64Ptr: "+valueToGoStringCasttype(this.MyUint64Ptr, "github_com_gogo_protobuf_test_casttype.MyUint64Type")+",\n") } s = append(s, "MyUint64: "+fmt.Sprintf("%#v", this.MyUint64)+",\n") if this.MyFloat32Ptr != nil { s = append(s, "MyFloat32Ptr: "+valueToGoStringCasttype(this.MyFloat32Ptr, "github_com_gogo_protobuf_test_casttype.MyFloat32Type")+",\n") } s = append(s, "MyFloat32: "+fmt.Sprintf("%#v", this.MyFloat32)+",\n") if this.MyFloat64Ptr != nil { s = append(s, "MyFloat64Ptr: "+valueToGoStringCasttype(this.MyFloat64Ptr, "github_com_gogo_protobuf_test_casttype.MyFloat64Type")+",\n") } s = append(s, "MyFloat64: "+fmt.Sprintf("%#v", this.MyFloat64)+",\n") if this.MyBytes != nil { s = append(s, "MyBytes: "+valueToGoStringCasttype(this.MyBytes, "github_com_gogo_protobuf_test_casttype.Bytes")+",\n") } if this.NormalBytes != nil { s = append(s, "NormalBytes: "+valueToGoStringCasttype(this.NormalBytes, "byte")+",\n") } if this.MyUint64S != nil { s = append(s, "MyUint64S: "+fmt.Sprintf("%#v", this.MyUint64S)+",\n") } keysForMyMap := make([]string, 0, len(this.MyMap)) for k := range this.MyMap { keysForMyMap = append(keysForMyMap, k) } github_com_gogo_protobuf_sortkeys.Strings(keysForMyMap) mapStringForMyMap := "github_com_gogo_protobuf_test_casttype.MyMapType{" for _, k := range keysForMyMap { mapStringForMyMap += fmt.Sprintf("%#v: %#v,", k, this.MyMap[k]) } mapStringForMyMap += "}" if this.MyMap != nil { s = append(s, "MyMap: "+mapStringForMyMap+",\n") } keysForMyCustomMap := make([]string, 0, len(this.MyCustomMap)) for k := range this.MyCustomMap { keysForMyCustomMap = append(keysForMyCustomMap, string(k)) } github_com_gogo_protobuf_sortkeys.Strings(keysForMyCustomMap) mapStringForMyCustomMap := "map[github_com_gogo_protobuf_test_casttype.MyStringType]github_com_gogo_protobuf_test_casttype.MyUint64Type{" for _, k := range keysForMyCustomMap { mapStringForMyCustomMap += fmt.Sprintf("%#v: %#v,", k, this.MyCustomMap[github_com_gogo_protobuf_test_casttype.MyStringType(k)]) } mapStringForMyCustomMap += "}" if this.MyCustomMap != nil { s = append(s, "MyCustomMap: "+mapStringForMyCustomMap+",\n") } keysForMyNullableMap := make([]int32, 0, len(this.MyNullableMap)) for k := range this.MyNullableMap { keysForMyNullableMap = append(keysForMyNullableMap, int32(k)) } github_com_gogo_protobuf_sortkeys.Int32s(keysForMyNullableMap) mapStringForMyNullableMap := "map[github_com_gogo_protobuf_test_casttype.MyInt32Type]*Wilson{" for _, k := range keysForMyNullableMap { mapStringForMyNullableMap += fmt.Sprintf("%#v: %#v,", k, this.MyNullableMap[github_com_gogo_protobuf_test_casttype.MyInt32Type(k)]) } mapStringForMyNullableMap += "}" if this.MyNullableMap != nil { s = append(s, "MyNullableMap: "+mapStringForMyNullableMap+",\n") } keysForMyEmbeddedMap := make([]int32, 0, len(this.MyEmbeddedMap)) for k := range this.MyEmbeddedMap { keysForMyEmbeddedMap = append(keysForMyEmbeddedMap, int32(k)) } github_com_gogo_protobuf_sortkeys.Int32s(keysForMyEmbeddedMap) mapStringForMyEmbeddedMap := "map[github_com_gogo_protobuf_test_casttype.MyInt32Type]Wilson{" for _, k := range keysForMyEmbeddedMap { mapStringForMyEmbeddedMap += fmt.Sprintf("%#v: %#v,", k, this.MyEmbeddedMap[github_com_gogo_protobuf_test_casttype.MyInt32Type(k)]) } mapStringForMyEmbeddedMap += "}" if this.MyEmbeddedMap != nil { s = append(s, "MyEmbeddedMap: "+mapStringForMyEmbeddedMap+",\n") } if this.String_ != nil { s = append(s, "String_: "+valueToGoStringCasttype(this.String_, "github_com_gogo_protobuf_test_casttype.MyStringType")+",\n") } if this.XXX_unrecognized != nil { s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") } s = append(s, "}") return strings.Join(s, "") } func (this *Wilson) GoString() string { if this == nil { return "nil" } s := make([]string, 0, 5) s = append(s, "&casttype.Wilson{") if this.Int64 != nil { s = append(s, "Int64: "+valueToGoStringCasttype(this.Int64, "int64")+",\n") } if this.XXX_unrecognized != nil { s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") } s = append(s, "}") return strings.Join(s, "") } func valueToGoStringCasttype(v interface{}, typ string) string { rv := reflect.ValueOf(v) if rv.IsNil() { return "nil" } pv := reflect.Indirect(rv).Interface() return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) } func NewPopulatedCastaway(r randyCasttype, easy bool) *Castaway { this := &Castaway{} if r.Intn(10) != 0 { v1 := int32(r.Int63()) if r.Intn(2) == 0 { v1 *= -1 } this.Int32Ptr = &v1 } this.Int32 = int32(r.Int63()) if r.Intn(2) == 0 { this.Int32 *= -1 } if r.Intn(10) != 0 { v2 := github_com_gogo_protobuf_test_casttype.MyUint64Type(uint64(r.Uint32())) this.MyUint64Ptr = &v2 } this.MyUint64 = github_com_gogo_protobuf_test_casttype.MyUint64Type(uint64(r.Uint32())) if r.Intn(10) != 0 { v3 := github_com_gogo_protobuf_test_casttype.MyFloat32Type(r.Float32()) if r.Intn(2) == 0 { v3 *= -1 } this.MyFloat32Ptr = &v3 } this.MyFloat32 = github_com_gogo_protobuf_test_casttype.MyFloat32Type(r.Float32()) if r.Intn(2) == 0 { this.MyFloat32 *= -1 } if r.Intn(10) != 0 { v4 := github_com_gogo_protobuf_test_casttype.MyFloat64Type(r.Float64()) if r.Intn(2) == 0 { v4 *= -1 } this.MyFloat64Ptr = &v4 } this.MyFloat64 = github_com_gogo_protobuf_test_casttype.MyFloat64Type(r.Float64()) if r.Intn(2) == 0 { this.MyFloat64 *= -1 } if r.Intn(10) != 0 { v5 := r.Intn(100) this.MyBytes = make(github_com_gogo_protobuf_test_casttype.Bytes, v5) for i := 0; i < v5; i++ { this.MyBytes[i] = byte(r.Intn(256)) } } if r.Intn(10) != 0 { v6 := r.Intn(100) this.NormalBytes = make([]byte, v6) for i := 0; i < v6; i++ { this.NormalBytes[i] = byte(r.Intn(256)) } } if r.Intn(10) != 0 { v7 := r.Intn(10) this.MyUint64S = make([]github_com_gogo_protobuf_test_casttype.MyUint64Type, v7) for i := 0; i < v7; i++ { this.MyUint64S[i] = github_com_gogo_protobuf_test_casttype.MyUint64Type(uint64(r.Uint32())) } } if r.Intn(10) != 0 { v8 := r.Intn(10) this.MyMap = make(github_com_gogo_protobuf_test_casttype.MyMapType) for i := 0; i < v8; i++ { v9 := randStringCasttype(r) this.MyMap[v9] = uint64(uint64(r.Uint32())) } } if r.Intn(10) != 0 { v10 := r.Intn(10) this.MyCustomMap = make(map[github_com_gogo_protobuf_test_casttype.MyStringType]github_com_gogo_protobuf_test_casttype.MyUint64Type) for i := 0; i < v10; i++ { v11 := github_com_gogo_protobuf_test_casttype.MyStringType(randStringCasttype(r)) this.MyCustomMap[v11] = github_com_gogo_protobuf_test_casttype.MyUint64Type(uint64(r.Uint32())) } } if r.Intn(10) != 0 { v12 := r.Intn(10) this.MyNullableMap = make(map[github_com_gogo_protobuf_test_casttype.MyInt32Type]*Wilson) for i := 0; i < v12; i++ { this.MyNullableMap[github_com_gogo_protobuf_test_casttype.MyInt32Type(int32(r.Int31()))] = NewPopulatedWilson(r, easy) } } if r.Intn(10) != 0 { v13 := r.Intn(10) this.MyEmbeddedMap = make(map[github_com_gogo_protobuf_test_casttype.MyInt32Type]Wilson) for i := 0; i < v13; i++ { this.MyEmbeddedMap[github_com_gogo_protobuf_test_casttype.MyInt32Type(int32(r.Int31()))] = *NewPopulatedWilson(r, easy) } } if r.Intn(10) != 0 { v14 := github_com_gogo_protobuf_test_casttype.MyStringType(randStringCasttype(r)) this.String_ = &v14 } if !easy && r.Intn(10) != 0 { this.XXX_unrecognized = randUnrecognizedCasttype(r, 17) } return this } func NewPopulatedWilson(r randyCasttype, easy bool) *Wilson { this := &Wilson{} if r.Intn(10) != 0 { v15 := int64(r.Int63()) if r.Intn(2) == 0 { v15 *= -1 } this.Int64 = &v15 } if !easy && r.Intn(10) != 0 { this.XXX_unrecognized = randUnrecognizedCasttype(r, 2) } return this } type randyCasttype interface { Float32() float32 Float64() float64 Int63() int64 Int31() int32 Uint32() uint32 Intn(n int) int } func randUTF8RuneCasttype(r randyCasttype) rune { ru := r.Intn(62) if ru < 10 { return rune(ru + 48) } else if ru < 36 { return rune(ru + 55) } return rune(ru + 61) } func randStringCasttype(r randyCasttype) string { v16 := r.Intn(100) tmps := make([]rune, v16) for i := 0; i < v16; i++ { tmps[i] = randUTF8RuneCasttype(r) } return string(tmps) } func randUnrecognizedCasttype(r randyCasttype, maxFieldNumber int) (dAtA []byte) { l := r.Intn(5) for i := 0; i < l; i++ { wire := r.Intn(4) if wire == 3 { wire = 5 } fieldNumber := maxFieldNumber + r.Intn(100) dAtA = randFieldCasttype(dAtA, r, fieldNumber, wire) } return dAtA } func randFieldCasttype(dAtA []byte, r randyCasttype, fieldNumber int, wire int) []byte { key := uint32(fieldNumber)<<3 | uint32(wire) switch wire { case 0: dAtA = encodeVarintPopulateCasttype(dAtA, uint64(key)) v17 := r.Int63() if r.Intn(2) == 0 { v17 *= -1 } dAtA = encodeVarintPopulateCasttype(dAtA, uint64(v17)) case 1: dAtA = encodeVarintPopulateCasttype(dAtA, uint64(key)) dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) case 2: dAtA = encodeVarintPopulateCasttype(dAtA, uint64(key)) ll := r.Intn(100) dAtA = encodeVarintPopulateCasttype(dAtA, uint64(ll)) for j := 0; j < ll; j++ { dAtA = append(dAtA, byte(r.Intn(256))) } default: dAtA = encodeVarintPopulateCasttype(dAtA, uint64(key)) dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) } return dAtA } func encodeVarintPopulateCasttype(dAtA []byte, v uint64) []byte { for v >= 1<<7 { dAtA = append(dAtA, uint8(uint64(v)&0x7f|0x80)) v >>= 7 } dAtA = append(dAtA, uint8(v)) return dAtA } func (m *Castaway) Size() (n int) { var l int _ = l if m.Int32Ptr != nil { n += 1 + sovCasttype(uint64(*m.Int32Ptr)) } n += 1 + sovCasttype(uint64(m.Int32)) if m.MyUint64Ptr != nil { n += 1 + sovCasttype(uint64(*m.MyUint64Ptr)) } n += 1 + sovCasttype(uint64(m.MyUint64)) if m.MyFloat32Ptr != nil { n += 5 } n += 5 if m.MyFloat64Ptr != nil { n += 9 } n += 9 if m.MyBytes != nil { l = len(m.MyBytes) n += 1 + l + sovCasttype(uint64(l)) } if m.NormalBytes != nil { l = len(m.NormalBytes) n += 1 + l + sovCasttype(uint64(l)) } if len(m.MyUint64S) > 0 { for _, e := range m.MyUint64S { n += 1 + sovCasttype(uint64(e)) } } if len(m.MyMap) > 0 { for k, v := range m.MyMap { _ = k _ = v mapEntrySize := 1 + len(k) + sovCasttype(uint64(len(k))) + 1 + sovCasttype(uint64(v)) n += mapEntrySize + 1 + sovCasttype(uint64(mapEntrySize)) } } if len(m.MyCustomMap) > 0 { for k, v := range m.MyCustomMap { _ = k _ = v mapEntrySize := 1 + len(k) + sovCasttype(uint64(len(k))) + 1 + sovCasttype(uint64(v)) n += mapEntrySize + 1 + sovCasttype(uint64(mapEntrySize)) } } if len(m.MyNullableMap) > 0 { for k, v := range m.MyNullableMap { _ = k _ = v l = 0 if v != nil { l = v.Size() l += 1 + sovCasttype(uint64(l)) } mapEntrySize := 1 + sovCasttype(uint64(k)) + l n += mapEntrySize + 1 + sovCasttype(uint64(mapEntrySize)) } } if len(m.MyEmbeddedMap) > 0 { for k, v := range m.MyEmbeddedMap { _ = k _ = v l = v.Size() mapEntrySize := 1 + sovCasttype(uint64(k)) + 1 + l + sovCasttype(uint64(l)) n += mapEntrySize + 1 + sovCasttype(uint64(mapEntrySize)) } } if m.String_ != nil { l = len(*m.String_) n += 2 + l + sovCasttype(uint64(l)) } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } return n } func (m *Wilson) Size() (n int) { var l int _ = l if m.Int64 != nil { n += 1 + sovCasttype(uint64(*m.Int64)) } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } return n } func sovCasttype(x uint64) (n int) { for { n++ x >>= 7 if x == 0 { break } } return n } func sozCasttype(x uint64) (n int) { return sovCasttype(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } func (this *Castaway) String() string { if this == nil { return "nil" } keysForMyMap := make([]string, 0, len(this.MyMap)) for k := range this.MyMap { keysForMyMap = append(keysForMyMap, k) } github_com_gogo_protobuf_sortkeys.Strings(keysForMyMap) mapStringForMyMap := "github_com_gogo_protobuf_test_casttype.MyMapType{" for _, k := range keysForMyMap { mapStringForMyMap += fmt.Sprintf("%v: %v,", k, this.MyMap[k]) } mapStringForMyMap += "}" keysForMyCustomMap := make([]string, 0, len(this.MyCustomMap)) for k := range this.MyCustomMap { keysForMyCustomMap = append(keysForMyCustomMap, string(k)) } github_com_gogo_protobuf_sortkeys.Strings(keysForMyCustomMap) mapStringForMyCustomMap := "map[github_com_gogo_protobuf_test_casttype.MyStringType]github_com_gogo_protobuf_test_casttype.MyUint64Type{" for _, k := range keysForMyCustomMap { mapStringForMyCustomMap += fmt.Sprintf("%v: %v,", k, this.MyCustomMap[github_com_gogo_protobuf_test_casttype.MyStringType(k)]) } mapStringForMyCustomMap += "}" keysForMyNullableMap := make([]int32, 0, len(this.MyNullableMap)) for k := range this.MyNullableMap { keysForMyNullableMap = append(keysForMyNullableMap, int32(k)) } github_com_gogo_protobuf_sortkeys.Int32s(keysForMyNullableMap) mapStringForMyNullableMap := "map[github_com_gogo_protobuf_test_casttype.MyInt32Type]*Wilson{" for _, k := range keysForMyNullableMap { mapStringForMyNullableMap += fmt.Sprintf("%v: %v,", k, this.MyNullableMap[github_com_gogo_protobuf_test_casttype.MyInt32Type(k)]) } mapStringForMyNullableMap += "}" keysForMyEmbeddedMap := make([]int32, 0, len(this.MyEmbeddedMap)) for k := range this.MyEmbeddedMap { keysForMyEmbeddedMap = append(keysForMyEmbeddedMap, int32(k)) } github_com_gogo_protobuf_sortkeys.Int32s(keysForMyEmbeddedMap) mapStringForMyEmbeddedMap := "map[github_com_gogo_protobuf_test_casttype.MyInt32Type]Wilson{" for _, k := range keysForMyEmbeddedMap { mapStringForMyEmbeddedMap += fmt.Sprintf("%v: %v,", k, this.MyEmbeddedMap[github_com_gogo_protobuf_test_casttype.MyInt32Type(k)]) } mapStringForMyEmbeddedMap += "}" s := strings.Join([]string{`&Castaway{`, `Int32Ptr:` + valueToStringCasttype(this.Int32Ptr) + `,`, `Int32:` + fmt.Sprintf("%v", this.Int32) + `,`, `MyUint64Ptr:` + valueToStringCasttype(this.MyUint64Ptr) + `,`, `MyUint64:` + fmt.Sprintf("%v", this.MyUint64) + `,`, `MyFloat32Ptr:` + valueToStringCasttype(this.MyFloat32Ptr) + `,`, `MyFloat32:` + fmt.Sprintf("%v", this.MyFloat32) + `,`, `MyFloat64Ptr:` + valueToStringCasttype(this.MyFloat64Ptr) + `,`, `MyFloat64:` + fmt.Sprintf("%v", this.MyFloat64) + `,`, `MyBytes:` + valueToStringCasttype(this.MyBytes) + `,`, `NormalBytes:` + valueToStringCasttype(this.NormalBytes) + `,`, `MyUint64S:` + fmt.Sprintf("%v", this.MyUint64S) + `,`, `MyMap:` + mapStringForMyMap + `,`, `MyCustomMap:` + mapStringForMyCustomMap + `,`, `MyNullableMap:` + mapStringForMyNullableMap + `,`, `MyEmbeddedMap:` + mapStringForMyEmbeddedMap + `,`, `String_:` + valueToStringCasttype(this.String_) + `,`, `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, `}`, }, "") return s } func (this *Wilson) String() string { if this == nil { return "nil" } s := strings.Join([]string{`&Wilson{`, `Int64:` + valueToStringCasttype(this.Int64) + `,`, `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, `}`, }, "") return s } func valueToStringCasttype(v interface{}) string { rv := reflect.ValueOf(v) if rv.IsNil() { return "nil" } pv := reflect.Indirect(rv).Interface() return fmt.Sprintf("*%v", pv) } func (m *Castaway) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } func (m *Castaway) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if m.Int32Ptr != nil { dAtA[i] = 0x8 i++ i = encodeVarintCasttype(dAtA, i, uint64(*m.Int32Ptr)) } dAtA[i] = 0x10 i++ i = encodeVarintCasttype(dAtA, i, uint64(m.Int32)) if m.MyUint64Ptr != nil { dAtA[i] = 0x18 i++ i = encodeVarintCasttype(dAtA, i, uint64(*m.MyUint64Ptr)) } dAtA[i] = 0x20 i++ i = encodeVarintCasttype(dAtA, i, uint64(m.MyUint64)) if m.MyFloat32Ptr != nil { dAtA[i] = 0x2d i++ i = encodeFixed32Casttype(dAtA, i, uint32(math.Float32bits(float32(*m.MyFloat32Ptr)))) } dAtA[i] = 0x35 i++ i = encodeFixed32Casttype(dAtA, i, uint32(math.Float32bits(float32(m.MyFloat32)))) if m.MyFloat64Ptr != nil { dAtA[i] = 0x39 i++ i = encodeFixed64Casttype(dAtA, i, uint64(math.Float64bits(float64(*m.MyFloat64Ptr)))) } dAtA[i] = 0x41 i++ i = encodeFixed64Casttype(dAtA, i, uint64(math.Float64bits(float64(m.MyFloat64)))) if m.MyBytes != nil { dAtA[i] = 0x4a i++ i = encodeVarintCasttype(dAtA, i, uint64(len(m.MyBytes))) i += copy(dAtA[i:], m.MyBytes) } if m.NormalBytes != nil { dAtA[i] = 0x52 i++ i = encodeVarintCasttype(dAtA, i, uint64(len(m.NormalBytes))) i += copy(dAtA[i:], m.NormalBytes) } if len(m.MyUint64S) > 0 { for _, num := range m.MyUint64S { dAtA[i] = 0x58 i++ i = encodeVarintCasttype(dAtA, i, uint64(num)) } } if len(m.MyMap) > 0 { for k := range m.MyMap { dAtA[i] = 0x62 i++ v := m.MyMap[k] mapSize := 1 + len(k) + sovCasttype(uint64(len(k))) + 1 + sovCasttype(uint64(v)) i = encodeVarintCasttype(dAtA, i, uint64(mapSize)) dAtA[i] = 0xa i++ i = encodeVarintCasttype(dAtA, i, uint64(len(k))) i += copy(dAtA[i:], k) dAtA[i] = 0x10 i++ i = encodeVarintCasttype(dAtA, i, uint64(v)) } } if len(m.MyCustomMap) > 0 { for k := range m.MyCustomMap { dAtA[i] = 0x6a i++ v := m.MyCustomMap[k] mapSize := 1 + len(k) + sovCasttype(uint64(len(k))) + 1 + sovCasttype(uint64(v)) i = encodeVarintCasttype(dAtA, i, uint64(mapSize)) dAtA[i] = 0xa i++ i = encodeVarintCasttype(dAtA, i, uint64(len(k))) i += copy(dAtA[i:], k) dAtA[i] = 0x10 i++ i = encodeVarintCasttype(dAtA, i, uint64(v)) } } if len(m.MyNullableMap) > 0 { for k := range m.MyNullableMap { dAtA[i] = 0x72 i++ v := m.MyNullableMap[k] msgSize := 0 if v != nil { msgSize = v.Size() msgSize += 1 + sovCasttype(uint64(msgSize)) } mapSize := 1 + sovCasttype(uint64(k)) + msgSize i = encodeVarintCasttype(dAtA, i, uint64(mapSize)) dAtA[i] = 0x8 i++ i = encodeVarintCasttype(dAtA, i, uint64(k)) if v != nil { dAtA[i] = 0x12 i++ i = encodeVarintCasttype(dAtA, i, uint64(v.Size())) n1, err := v.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n1 } } } if len(m.MyEmbeddedMap) > 0 { for k := range m.MyEmbeddedMap { dAtA[i] = 0x7a i++ v := m.MyEmbeddedMap[k] msgSize := 0 if (&v) != nil { msgSize = (&v).Size() msgSize += 1 + sovCasttype(uint64(msgSize)) } mapSize := 1 + sovCasttype(uint64(k)) + msgSize i = encodeVarintCasttype(dAtA, i, uint64(mapSize)) dAtA[i] = 0x8 i++ i = encodeVarintCasttype(dAtA, i, uint64(k)) dAtA[i] = 0x12 i++ i = encodeVarintCasttype(dAtA, i, uint64((&v).Size())) n2, err := (&v).MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n2 } } if m.String_ != nil { dAtA[i] = 0x82 i++ dAtA[i] = 0x1 i++ i = encodeVarintCasttype(dAtA, i, uint64(len(*m.String_))) i += copy(dAtA[i:], *m.String_) } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) } return i, nil } func (m *Wilson) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } func (m *Wilson) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if m.Int64 != nil { dAtA[i] = 0x8 i++ i = encodeVarintCasttype(dAtA, i, uint64(*m.Int64)) } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) } return i, nil } func encodeFixed64Casttype(dAtA []byte, offset int, v uint64) int { dAtA[offset] = uint8(v) dAtA[offset+1] = uint8(v >> 8) dAtA[offset+2] = uint8(v >> 16) dAtA[offset+3] = uint8(v >> 24) dAtA[offset+4] = uint8(v >> 32) dAtA[offset+5] = uint8(v >> 40) dAtA[offset+6] = uint8(v >> 48) dAtA[offset+7] = uint8(v >> 56) return offset + 8 } func encodeFixed32Casttype(dAtA []byte, offset int, v uint32) int { dAtA[offset] = uint8(v) dAtA[offset+1] = uint8(v >> 8) dAtA[offset+2] = uint8(v >> 16) dAtA[offset+3] = uint8(v >> 24) return offset + 4 } func encodeVarintCasttype(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) return offset + 1 } func init() { proto.RegisterFile("combos/unsafemarshaler/casttype.proto", fileDescriptorCasttype) } var fileDescriptorCasttype = []byte{ // 701 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xa4, 0x95, 0xbf, 0x6f, 0xd3, 0x40, 0x14, 0xc7, 0x7d, 0x4d, 0xd3, 0x26, 0x97, 0x06, 0xa2, 0x13, 0x83, 0x55, 0x89, 0xb3, 0xd5, 0xaa, 0xc8, 0x03, 0x24, 0x55, 0x1a, 0x95, 0xaa, 0x20, 0x06, 0x57, 0x45, 0x2a, 0xc2, 0x05, 0x19, 0xaa, 0x0a, 0xc4, 0x72, 0x69, 0xdd, 0x34, 0xc2, 0xb1, 0x23, 0xfb, 0x02, 0xf2, 0x56, 0x95, 0x01, 0x89, 0xbf, 0x84, 0x91, 0x05, 0x89, 0x91, 0xb1, 0x63, 0x47, 0xa6, 0xb4, 0x36, 0x4b, 0xd9, 0x3a, 0x56, 0x99, 0xd0, 0xdd, 0x39, 0xb1, 0xfb, 0x03, 0x94, 0xa6, 0xdb, 0xbd, 0xbb, 0xf7, 0x3e, 0xef, 0x7b, 0xef, 0xde, 0xdd, 0xc1, 0xb9, 0x2d, 0xb7, 0x55, 0x77, 0xfd, 0x4a, 0xc7, 0xf1, 0xc9, 0x8e, 0xd5, 0x22, 0x9e, 0xbf, 0x4b, 0x6c, 0xcb, 0xab, 0x6c, 0x11, 0x9f, 0xd2, 0xa0, 0x6d, 0x95, 0xdb, 0x9e, 0x4b, 0x5d, 0x94, 0xeb, 0xdb, 0xd3, 0x0f, 0x1a, 0x4d, 0xba, 0xdb, 0xa9, 0x97, 0xb7, 0xdc, 0x56, 0xa5, 0xe1, 0x36, 0xdc, 0x0a, 0x77, 0xa8, 0x77, 0x76, 0xb8, 0xc5, 0x0d, 0x3e, 0x12, 0x81, 0x33, 0x7f, 0x8a, 0x30, 0xb7, 0x42, 0x7c, 0x4a, 0x3e, 0x92, 0x00, 0xcd, 0xc1, 0xdc, 0x9a, 0x43, 0x17, 0xaa, 0x2f, 0xa9, 0x27, 0x03, 0x15, 0x68, 0x19, 0x3d, 0xdf, 0xeb, 0x2a, 0xd9, 0x26, 0x9b, 0x33, 0x07, 0x4b, 0x68, 0x16, 0x66, 0xf9, 0x58, 0x1e, 0xe3, 0x3e, 0xc5, 0x83, 0xae, 0x22, 0x25, 0x7e, 0x62, 0x0d, 0xbd, 0x81, 0x05, 0x23, 0xd8, 0x68, 0x3a, 0x74, 0xb1, 0xc6, 0x70, 0x19, 0x15, 0x68, 0xe3, 0xfa, 0xc3, 0x5e, 0x57, 0x59, 0xf8, 0xa7, 0x40, 0x6a, 0xf9, 0x34, 0xd9, 0x58, 0x3f, 0xfa, 0x75, 0xd0, 0xb6, 0xcc, 0x34, 0x0b, 0x6d, 0xc2, 0x5c, 0xdf, 0x94, 0xc7, 0x39, 0xf7, 0x51, 0x2c, 0x61, 0x24, 0xf6, 0x00, 0x86, 0xde, 0xc1, 0x29, 0x23, 0x78, 0x6a, 0xbb, 0x24, 0xae, 0x41, 0x56, 0x05, 0xda, 0x98, 0xbe, 0xd4, 0xeb, 0x2a, 0xb5, 0xa1, 0xc1, 0x71, 0x38, 0x27, 0x9f, 0xa3, 0xa1, 0xb7, 0x30, 0x3f, 0xb0, 0xe5, 0x09, 0x8e, 0x7e, 0x1c, 0xeb, 0x1e, 0x0d, 0x9f, 0xe0, 0x52, 0xca, 0x45, 0xb9, 0x27, 0x55, 0xa0, 0x81, 0x51, 0x94, 0xc7, 0x35, 0x39, 0x47, 0x4b, 0x29, 0x5f, 0xac, 0xc9, 0x39, 0x8e, 0x1e, 0x51, 0x79, 0x8c, 0x4f, 0x70, 0xe8, 0x19, 0x9c, 0x34, 0x02, 0x3d, 0xa0, 0x96, 0x2f, 0xe7, 0x55, 0xa0, 0x4d, 0xe9, 0xf3, 0xbd, 0xae, 0x72, 0x7f, 0x48, 0x2a, 0x8f, 0x33, 0xfb, 0x00, 0xa4, 0xc2, 0xc2, 0xba, 0xeb, 0xb5, 0x88, 0x2d, 0x78, 0x90, 0xf1, 0xcc, 0xf4, 0x14, 0xda, 0x60, 0x3b, 0x11, 0xa7, 0xed, 0xcb, 0x05, 0x35, 0x73, 0x93, 0x9e, 0x4c, 0x48, 0xa8, 0x09, 0xb3, 0x46, 0x60, 0x90, 0xb6, 0x3c, 0xa5, 0x66, 0xb4, 0x42, 0xf5, 0x6e, 0x79, 0x10, 0xd1, 0xbf, 0x5b, 0x65, 0xbe, 0xbe, 0xea, 0x50, 0x2f, 0xd0, 0x6b, 0xbd, 0xae, 0x32, 0x3f, 0x74, 0x46, 0x83, 0xb4, 0x79, 0x3a, 0x91, 0x01, 0x7d, 0x07, 0xec, 0x62, 0xad, 0x74, 0x7c, 0xea, 0xb6, 0x58, 0xc6, 0x22, 0xcf, 0x38, 0x7b, 0x65, 0xc6, 0x81, 0x97, 0xc8, 0xeb, 0xec, 0x1f, 0x5d, 0x63, 0xa7, 0xaf, 0xa8, 0xd7, 0x74, 0x1a, 0x2c, 0xf5, 0x97, 0xa3, 0x91, 0x2f, 0xed, 0x40, 0x01, 0xfa, 0x04, 0x60, 0xd1, 0x08, 0xd6, 0x3b, 0xb6, 0x4d, 0xea, 0xb6, 0xc5, 0x94, 0xdf, 0xe2, 0xca, 0xe7, 0xae, 0x54, 0x9e, 0xf2, 0x13, 0xda, 0x17, 0xf7, 0x8f, 0x94, 0xea, 0xd0, 0x22, 0xf8, 0x13, 0xc4, 0x35, 0x9c, 0xcf, 0x89, 0x3e, 0x73, 0x15, 0xab, 0xad, 0xba, 0xb5, 0xbd, 0x6d, 0x6d, 0x33, 0x15, 0xb7, 0xff, 0xa3, 0x22, 0xe5, 0x27, 0x54, 0x2c, 0xb3, 0xae, 0x1f, 0x5d, 0x49, 0x8a, 0x87, 0x5e, 0xc0, 0x09, 0x51, 0x61, 0xb9, 0xa4, 0x02, 0x2d, 0x7f, 0xcd, 0x36, 0x4c, 0x0e, 0xc7, 0x8c, 0x31, 0xd3, 0x4b, 0x10, 0x26, 0x3d, 0x86, 0x4a, 0x30, 0xf3, 0xde, 0x0a, 0xf8, 0x2b, 0x9e, 0x37, 0xd9, 0x10, 0xdd, 0x81, 0xd9, 0x0f, 0xc4, 0xee, 0x58, 0xfc, 0xd5, 0x1e, 0x37, 0x85, 0xb1, 0x3c, 0xb6, 0x04, 0xa6, 0x9f, 0xc0, 0xd2, 0xc5, 0x5e, 0xb9, 0x56, 0xbc, 0x09, 0xd1, 0xe5, 0x13, 0x4b, 0x13, 0xb2, 0x82, 0x70, 0x2f, 0x4d, 0x28, 0x54, 0x4b, 0x49, 0xcd, 0x37, 0x9b, 0xb6, 0xef, 0x3a, 0x97, 0x98, 0x17, 0xeb, 0x7f, 0x33, 0xe6, 0x0c, 0x86, 0x13, 0x62, 0x92, 0xed, 0x65, 0x8d, 0x7f, 0x1f, 0xfc, 0x97, 0x33, 0x85, 0xa1, 0x3f, 0x3f, 0x08, 0xb1, 0x74, 0x18, 0x62, 0xe9, 0x57, 0x88, 0xa5, 0xe3, 0x10, 0x83, 0x93, 0x10, 0x83, 0xd3, 0x10, 0x83, 0xb3, 0x10, 0x83, 0xbd, 0x08, 0x83, 0xaf, 0x11, 0x06, 0xdf, 0x22, 0x0c, 0x7e, 0x44, 0x18, 0xfc, 0x8c, 0x30, 0x38, 0x88, 0xb0, 0x74, 0x18, 0x61, 0xe9, 0x38, 0xc2, 0xe0, 0x24, 0xc2, 0xd2, 0x69, 0x84, 0xc1, 0x59, 0x84, 0xc1, 0xde, 0x6f, 0x2c, 0xfd, 0x0d, 0x00, 0x00, 0xff, 0xff, 0xd8, 0x47, 0x3b, 0xeb, 0xba, 0x07, 0x00, 0x00, }<|fim▁end|>
<|file_name|>match-bindings.rs<|end_file_name|><|fim▁begin|>// Copyright 2017 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed<|fim▁hole|>#![allow(dead_code)] #![feature(generators)] enum Enum { A(String), B } fn main() { || { loop { if let true = true { match Enum::A(String::new()) { Enum::A(_var) => {} Enum::B => {} } } yield; } }; }<|fim▁end|>
// except according to those terms. // run-pass
<|file_name|>heap-snapshot-summary-show-all.js<|end_file_name|><|fim▁begin|>// Copyright 2017 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. (async function() { TestRunner.addResult(`Tests Summary view of detailed heap snapshots. The "Show All" button must show all nodes.\n`); await TestRunner.loadTestModule('heap_profiler_test_runner'); await TestRunner.showPanel('heap_profiler'); var instanceCount = 25; function createHeapSnapshot() { return HeapProfilerTestRunner.createHeapSnapshot(instanceCount); } HeapProfilerTestRunner.runHeapSnapshotTestSuite([function testShowAll(next) { HeapProfilerTestRunner.takeAndOpenSnapshot(createHeapSnapshot, step1); function step1() { HeapProfilerTestRunner.switchToView('Summary', step2); } function step2() { var row = HeapProfilerTestRunner.findRow('A'); TestRunner.assertEquals(true, !!row, '"A" row'); HeapProfilerTestRunner.expandRow(row, step3); } function step3(row) { var count = row.data['count']; TestRunner.assertEquals(instanceCount.toString(), count); var buttonsNode = HeapProfilerTestRunner.findButtonsNode(row); TestRunner.assertEquals(true, !!buttonsNode, 'buttons node'); var words = buttonsNode.showAll.textContent.split(' '); for (var i = 0; i < words.length; ++i) { var maybeNumber = parseInt(words[i], 10);<|fim▁hole|> TestRunner.assertEquals( instanceCount - row.dataGrid.defaultPopulateCount(), maybeNumber, buttonsNode.showAll.textContent); } HeapProfilerTestRunner.clickShowMoreButton('showAll', buttonsNode, step4); } function step4(row) { var rowsShown = HeapProfilerTestRunner.countDataRows(row); TestRunner.assertEquals(instanceCount, rowsShown, 'after showAll click'); var buttonsNode = HeapProfilerTestRunner.findButtonsNode(row); TestRunner.assertEquals(false, !!buttonsNode, 'buttons node found when all instances are shown!'); setTimeout(next, 0); } }]); })();<|fim▁end|>
if (!isNaN(maybeNumber))
<|file_name|>main.rs<|end_file_name|><|fim▁begin|>// http://rosettacode.org/wiki/Walk_a_directory/Recursively extern crate regex; use regex::Regex; use std::convert::AsRef; use std::fs; use std::path::Path; fn walk<P>(pth: P, regex: &Regex) where P: AsRef<Path>, { let result = match fs::read_dir(pth) {<|fim▁hole|> Ok(result) => result, Err(_) => return, }; for subp in result.into_iter().flatten() { if let Ok(filename) = subp.path().into_os_string().into_string() { if regex.is_match(&filename) { println!("{}", filename); } walk(filename, regex); } } } fn main() { walk(".", &Regex::new(r".*\.rs").unwrap()); }<|fim▁end|>
<|file_name|>auth.service.js<|end_file_name|><|fim▁begin|>const mongoose = require('mongoose'); const UserModel = mongoose.model('User'); module.exports = { login: (email, password) => {<|fim▁hole|><|fim▁end|>
return UserModel.findOne({email, password}); } };
<|file_name|>canvas_fractal.js<|end_file_name|><|fim▁begin|>var fractal = fractal || {}; fractal.workerPaths = { "mandelbrot": "public/js/mandel.js", }; fractal.Fractal = function (canvas, workerCount) { this.canvas = canvas; this.workerCount = workerCount; this.workerDoneCount = 0; this.ctx = canvas.getContext("2d"); this.width = canvas.width; this.height = canvas.height; this.workerPath = fractal.workerPaths["mandelbrot"]; this.topLeft = new Complex(-1.5, 1.1); this.bottomRight = new Complex(0.8, -1.1); this.maxIter = 1200; var lingrad = this.ctx.createLinearGradient(0, 0, this.width, 0); lingrad.addColorStop(0, '#00f'); lingrad.addColorStop(0.1, '#fa0'); lingrad.addColorStop(0.5, '#ff0'); lingrad.addColorStop(0.7, '#f1b');<|fim▁hole|> lingrad.addColorStop(1, '#fff'); this.ctx.fillStyle = lingrad; this.ctx.fillRect(0, 0, this.width, 2); this.gradientImage = this.ctx.getImageData(0, 0, this.width, 1); this.imgData = this.ctx.getImageData(0, 0, this.width, this.height); this.ondone = null; this.workers = []; }; fractal.Fractal.prototype = function () { var computeRow = function (workerIndex, row) { var args = { action: "computeRow", row: row, workerIndex: workerIndex }; this.workers[workerIndex].postMessage(args); }; var initializeWorker = function (workerIndex) { var drow = (this.bottomRight.imag - this.topLeft.imag) / this.height; var dcol = (this.bottomRight.real - this.topLeft.real) / this.width; var args = { action: "setup", maxIter: this.maxIter, width: this.width, height: this.height, topLeft: this.topLeft, bottomRight: this.bottomRight, drow: drow, dcol: dcol, workerIndex: workerIndex, juliaPoint: this.juliaPoint }; this.workers[workerIndex].postMessage(args); }; var createWorkers = function (workerPath) { var obj = this; var rowData = obj.ctx.createImageData(obj.width, 1); for (var workerIndex = 0; workerIndex < obj.workerCount; workerIndex++) { obj.workers[workerIndex] = new Worker(obj.workerPath); this.workers[workerIndex].onmessage = function (event) { if (event.data.logData) { console.log("Worker: " + event.data.logData); } if (event.data.row >= 0) { var wIndex = event.data.workerIndex; for (var index = 0; index < obj.width; index++) { var color = getColor.call(obj, event.data.iterData[index]); var destIndex = 4 * index; rowData.data[destIndex] = color.red; rowData.data[destIndex + 1] = color.green; rowData.data[destIndex + 2] = color.blue; rowData.data[destIndex + 3] = color.alpha; } obj.ctx.putImageData(rowData, 0, event.data.row); if (obj.nextRow < obj.height) { console.log("Worker: " + wIndex, " nextRow: " + obj.nextRow); computeRow.call(obj, wIndex, obj.nextRow); obj.nextRow = obj.nextRow + 1; } else { obj.workerDoneCount++; if (obj.workerDoneCount == obj.workerCount) { var duration = new Date().getTime() - obj.startTime; if (typeof obj.ondone === 'function') { obj.ondone(duration); } } } } }; } }; var getColor = function (iter) { if (iter == this.maxIter) { return { red: 0, green: 0, blue: 0, alpha: 255 }; } var index = (iter % this.gradientImage.width) * 4; return { red: this.gradientImage.data[index], green: this.gradientImage.data[index + 1], blue: this.gradientImage.data[index + 2], alpha: this.gradientImage.data[index + 3] }; }, render = function () { this.startTime = new Date().getTime(); this.workerDoneCount = 0; createWorkers.call(this, this.workerPath); this.nextRow = this.workerCount; for (var workerIndex = 0; workerIndex < this.workerCount; workerIndex++) { initializeWorker.call(this, workerIndex); computeRow.call(this, workerIndex, workerIndex); } } return { render: render }; } (); jQuery(function($) { var fra = new fractal.Fractal(document.getElementById("fractal"), 2); $('#draw-fractal').on('click',function() { fra.render(); }); });<|fim▁end|>
<|file_name|>leaf.js<|end_file_name|><|fim▁begin|>/** * @class PrettyJSON.view.Leaf * @extends Backbone.View * * @author #rbarriga * @version 0.1 * */ PrettyJSON.view.Leaf = Backbone.View.extend({ tagName:'span', data:null, level:0, path:'', type:'string', isLast: true, events: { "mouseover .leaf-container": "mouseover", "mouseout .leaf-container": "mouseout" }, initialize: function(){ this.data = this.options.data; this.level = this.options.level; this.path = this.options.path; this.type = this.getType(); this.isLast = _.isUndefined(this.options.isLast) ? this.isLast : this.options.isLast; this.render(); }, getType: function(){ var m = 'string'; var d = this.data; if(_.isNumber(d)) m = 'number'; else if(_.isBoolean(d)) m = 'boolean'; else if(_.isDate(d)) m = 'date'; return m; }, getState:function(){ var coma = this.isLast ? '': ','; var state = { data: this.data, level: this.level, path: this.path, type: this.type, coma: coma }; return state; }, render: function(){ var state = this.getState();<|fim▁hole|> }, mouseover:function(e){ e.stopPropagation(); this.toggleTdPath(true); var path = this.path + '&nbsp;:&nbsp;<span class="' + this.type +'"><b>' + this.data + '</b></span>'; this.trigger("mouseover",e, path); }, mouseout:function(e){ e.stopPropagation(); this.toggleTdPath(false); this.trigger("mouseout",e); }, getTds:function(){ this.tds = []; var view = this; while (view){ var td = view.parentTd; if(td) this.tds.push(td); view = view.parent; } }, toggleTdPath:function(show){ this.getTds(); _.each(this.tds,function(td){ show ? td.addClass('node-hgl-path'): td.removeClass('node-hgl-path'); },this); } });<|fim▁end|>
this.tpl = _.template(PrettyJSON.tpl.Leaf, state); $(this.el).html(this.tpl); return this;
<|file_name|>backends.py<|end_file_name|><|fim▁begin|>"""This file exists for backwards compatability. Please use the separate backends found in either `djangae.contrib.gauth.datastore.backends` or `djangae.contrib.gauth.sql.backends`. """ import warnings from djangae.contrib.gauth.datastore.backends import AppEngineUserAPIBackend<|fim▁hole|>warnings.warn( 'AppEngineUserAPI is deprecated. Please use the specific backends from gauth.datastore ' 'or gauth.sql instead.' ) class AppEngineUserAPI(AppEngineUserAPIBackend): pass<|fim▁end|>
<|file_name|>generate_testdata.py<|end_file_name|><|fim▁begin|># Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Generate some standard test data for debugging TensorBoard. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import bisect import math import os import os.path import random import shutil import numpy as np from six.moves import xrange # pylint: disable=redefined-builtin import tensorflow as tf tf.flags.DEFINE_string("target", None, """The directoy where serialized data will be written""") tf.flags.DEFINE_boolean("overwrite", False, """Whether to remove and overwrite TARGET if it already exists.""") FLAGS = tf.flags.FLAGS # Hardcode a start time and reseed so script always generates the same data. _start_time = 0 random.seed(0) def _MakeHistogramBuckets(): v = 1E-12 buckets = [] neg_buckets = [] while v < 1E20: buckets.append(v) neg_buckets.append(-v) v *= 1.1 # Should include DBL_MAX, but won't bother for test data. return neg_buckets[::-1] + [0] + buckets def _MakeHistogram(values): """Convert values into a histogram proto using logic from histogram.cc.""" limits = _MakeHistogramBuckets() counts = [0] * len(limits) for v in values: idx = bisect.bisect_left(limits, v) counts[idx] += 1 limit_counts = [(limits[i], counts[i]) for i in xrange(len(limits)) if counts[i]] bucket_limit = [lc[0] for lc in limit_counts] bucket = [lc[1] for lc in limit_counts] sum_sq = sum(v * v for v in values) return tf.HistogramProto(min=min(values), max=max(values), num=len(values), sum=sum(values), sum_squares=sum_sq, bucket_limit=bucket_limit, bucket=bucket) def WriteScalarSeries(writer, tag, f, n=5): """Write a series of scalar events to writer, using f to create values.""" step = 0 wall_time = _start_time for i in xrange(n): v = f(i) value = tf.Summary.Value(tag=tag, simple_value=v) summary = tf.Summary(value=[value]) event = tf.Event(wall_time=wall_time, step=step, summary=summary) writer.add_event(event) step += 1 wall_time += 10 def WriteHistogramSeries(writer, tag, mu_sigma_tuples, n=20): """Write a sequence of normally distributed histograms to writer.""" step = 0 wall_time = _start_time for [mean, stddev] in mu_sigma_tuples: data = [random.normalvariate(mean, stddev) for _ in xrange(n)] histo = _MakeHistogram(data) summary = tf.Summary(value=[tf.Summary.Value(tag=tag, histo=histo)]) event = tf.Event(wall_time=wall_time, step=step, summary=summary) writer.add_event(event) step += 10 wall_time += 100 def WriteImageSeries(writer, tag, n_images=1): """Write a few dummy images to writer.""" step = 0 session = tf.Session() p = tf.placeholder("uint8", (1, 4, 4, 3)) s = tf.image_summary(tag, p) for _ in xrange(n_images): im = np.random.random_integers(0, 255, (1, 4, 4, 3)) summ = session.run(s, feed_dict={p: im}) writer.add_summary(summ, step) step += 20 session.close() def WriteAudioSeries(writer, tag, n_audio=1): """Write a few dummy audio clips to writer.""" step = 0 session = tf.Session() min_frequency_hz = 440 max_frequency_hz = 880 sample_rate = 4000 duration_frames = sample_rate * 0.5 # 0.5 seconds. frequencies_per_run = 1 num_channels = 2 p = tf.placeholder("float32", (frequencies_per_run, duration_frames, num_channels)) s = tf.audio_summary(tag, p, sample_rate) for _ in xrange(n_audio): # Generate a different frequency for each channel to show stereo works. frequencies = np.random.random_integers( min_frequency_hz, max_frequency_hz, size=(frequencies_per_run, num_channels)) tiled_frequencies = np.tile(frequencies, (1, duration_frames)) tiled_increments = np.tile( np.arange(0, duration_frames), (num_channels, 1)).T.reshape( 1, duration_frames * num_channels) tones = np.sin(2.0 * np.pi * tiled_frequencies * tiled_increments / sample_rate) tones = tones.reshape(frequencies_per_run, duration_frames, num_channels) summ = session.run(s, feed_dict={p: tones}) writer.add_summary(summ, step) step += 20 session.close() def GenerateTestData(path): """Generates the test data directory.""" run1_path = os.path.join(path, "run1") os.makedirs(run1_path) writer1 = tf.train.SummaryWriter(run1_path) WriteScalarSeries(writer1, "foo/square", lambda x: x * x) WriteScalarSeries(writer1, "bar/square", lambda x: x * x) WriteScalarSeries(writer1, "foo/sin", math.sin) WriteScalarSeries(writer1, "foo/cos", math.cos) WriteHistogramSeries(writer1, "histo1", [[0, 1], [0.3, 1], [0.5, 1], [0.7, 1], [1, 1]]) WriteImageSeries(writer1, "im1") WriteImageSeries(writer1, "im2") WriteAudioSeries(writer1, "au1") run2_path = os.path.join(path, "run2") os.makedirs(run2_path) writer2 = tf.train.SummaryWriter(run2_path) WriteScalarSeries(writer2, "foo/square", lambda x: x * x * 2) WriteScalarSeries(writer2, "bar/square", lambda x: x * x * 3) WriteScalarSeries(writer2, "foo/cos", lambda x: math.cos(x) * 2) WriteHistogramSeries(writer2, "histo1", [[0, 2], [0.3, 2], [0.5, 2], [0.7, 2], [1, 2]]) WriteHistogramSeries(writer2, "histo2", [[0, 1], [0.3, 1], [0.5, 1], [0.7, 1], [1, 1]]) WriteImageSeries(writer2, "im1") WriteAudioSeries(writer2, "au2") graph_def = tf.GraphDef() node1 = graph_def.node.add() node1.name = "a" node1.op = "matmul" node2 = graph_def.node.add() node2.name = "b" node2.op = "matmul" node2.input.extend(["a:0"]) writer1.add_graph(graph_def) node3 = graph_def.node.add() node3.name = "c" node3.op = "matmul" node3.input.extend(["a:0", "b:0"]) writer2.add_graph(graph_def) writer1.close() writer2.close() def main(unused_argv=None): target = FLAGS.target if not target: print("The --target flag is required.") return -1 if os.path.exists(target): if FLAGS.overwrite: if os.path.isdir(target):<|fim▁hole|> shutil.rmtree(target) else: os.remove(target) else: print("Refusing to overwrite target %s without --overwrite" % target) return -2 GenerateTestData(target) if __name__ == "__main__": tf.app.run()<|fim▁end|>
<|file_name|>s3object_test.go<|end_file_name|><|fim▁begin|>package awsat import ( "testing" "os" "path/filepath" "github.com/aws/aws-sdk-go/service/s3" "github.com/wallix/awless/aws/spec" ) func TestS3object(t *testing.T) { t.Run("create", func(t *testing.T) { f, filePath, cleanup := generateTmpFile("body content") defer cleanup() readSeeker, err := awsspec.NewProgressReader(f) if err != nil { t.Fatal(err) } awsspec.ProgressBarFactory = func(*os.File) (*awsspec.ProgressReadSeeker, error) { return readSeeker, nil } t.Run("with filename", func(t *testing.T) { Template("create s3object name=my-s3object file="+filePath+" bucket=any-bucket acl=public-read").Mock(&s3Mock{ PutObjectFunc: func(input *s3.PutObjectInput) (*s3.PutObjectOutput, error) { return &s3.PutObjectOutput{}, nil }}). ExpectInput("PutObject", &s3.PutObjectInput{ ACL: String("public-read"),<|fim▁hole|> Bucket: String("any-bucket"), Key: String("my-s3object"), Body: readSeeker, }).ExpectCommandResult("my-s3object").ExpectCalls("PutObject").Run(t) }) t.Run("no filename", func(t *testing.T) { filename := filepath.Base(filePath) Template("create s3object file="+filePath+" bucket=any-bucket acl=public-read").Mock(&s3Mock{ PutObjectFunc: func(input *s3.PutObjectInput) (*s3.PutObjectOutput, error) { return &s3.PutObjectOutput{}, nil }}). ExpectInput("PutObject", &s3.PutObjectInput{ ACL: String("public-read"), Bucket: String("any-bucket"), Key: String(filename), Body: readSeeker, }).ExpectCommandResult(filename).ExpectCalls("PutObject").Run(t) }) }) t.Run("update", func(t *testing.T) { Template("update s3object name=any-file bucket=other-bucket acl=public-read version=2").Mock(&s3Mock{ PutObjectAclFunc: func(input *s3.PutObjectAclInput) (*s3.PutObjectAclOutput, error) { return &s3.PutObjectAclOutput{}, nil }}). ExpectInput("PutObjectAcl", &s3.PutObjectAclInput{ ACL: String("public-read"), Key: String("any-file"), VersionId: String("2"), Bucket: String("other-bucket"), }).ExpectCalls("PutObjectAcl").Run(t) }) t.Run("delete", func(t *testing.T) { Template("delete s3object name=any-file bucket=any-bucket").Mock(&s3Mock{ DeleteObjectFunc: func(input *s3.DeleteObjectInput) (*s3.DeleteObjectOutput, error) { return &s3.DeleteObjectOutput{}, nil }}). ExpectInput("DeleteObject", &s3.DeleteObjectInput{ Key: String("any-file"), Bucket: String("any-bucket"), }).ExpectCalls("DeleteObject").Run(t) }) }<|fim▁end|>
<|file_name|>issue-6458-2.rs<|end_file_name|><|fim▁begin|>// Copyright 2013 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT.<|fim▁hole|>// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. extern crate debug; fn main() { format!("{:?}", None); //~ ERROR: cannot determine a type for this bounded }<|fim▁end|>
// // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
<|file_name|>hello.rs<|end_file_name|><|fim▁begin|>use std::num::strconv; <|fim▁hole|>}<|fim▁end|>
fn main() { strconv::float_to_str_bytes_common( 1.1_f64, 10u, true, strconv::SignNeg, strconv::DigAll);
<|file_name|>show.rs<|end_file_name|><|fim▁begin|>// Copyright 2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. use ast; use ast::{MetaItem, Expr,}; use codemap::Span; use ext::base::{ExtCtxt, Annotatable}; use ext::build::AstBuilder; use ext::deriving::generic::*; use ext::deriving::generic::ty::*; use parse::token; use ptr::P; pub fn expand_deriving_show(cx: &mut ExtCtxt, span: Span, mitem: &MetaItem, item: &Annotatable, push: &mut FnMut(Annotatable)) { // &mut ::std::fmt::Formatter let fmtr = Ptr(Box::new(Literal(path_std!(cx, core::fmt::Formatter))), Borrowed(None, ast::MutMutable)); let trait_def = TraitDef { span: span, attributes: Vec::new(), path: path_std!(cx, core::fmt::Debug), additional_bounds: Vec::new(), generics: LifetimeBounds::empty(), methods: vec![ MethodDef { name: "fmt", generics: LifetimeBounds::empty(),<|fim▁hole|> args: vec!(fmtr), ret_ty: Literal(path_std!(cx, core::fmt::Result)), attributes: Vec::new(), is_unsafe: false, combine_substructure: combine_substructure(Box::new(|a, b, c| { show_substructure(a, b, c) })) } ], associated_types: Vec::new(), }; trait_def.expand(cx, mitem, item, push) } /// We use the debug builders to do the heavy lifting here fn show_substructure(cx: &mut ExtCtxt, span: Span, substr: &Substructure) -> P<Expr> { // build fmt.debug_struct(<name>).field(<fieldname>, &<fieldval>)....build() // or fmt.debug_tuple(<name>).field(&<fieldval>)....build() // based on the "shape". let name = match *substr.fields { Struct(_) => substr.type_ident, EnumMatching(_, v, _) => v.node.name, EnumNonMatchingCollapsed(..) | StaticStruct(..) | StaticEnum(..) => { cx.span_bug(span, "nonsensical .fields in `#[derive(Debug)]`") } }; // We want to make sure we have the expn_id set so that we can use unstable methods let span = Span { expn_id: cx.backtrace(), .. span }; let name = cx.expr_lit(span, ast::Lit_::LitStr(token::get_ident(name), ast::StrStyle::CookedStr)); let mut expr = substr.nonself_args[0].clone(); match *substr.fields { Struct(ref fields) | EnumMatching(_, _, ref fields) => { if fields.is_empty() || fields[0].name.is_none() { // tuple struct/"normal" variant expr = cx.expr_method_call(span, expr, token::str_to_ident("debug_tuple"), vec![name]); for field in fields { // Use double indirection to make sure this works for unsized types let field = cx.expr_addr_of(field.span, field.self_.clone()); let field = cx.expr_addr_of(field.span, field); expr = cx.expr_method_call(span, expr, token::str_to_ident("field"), vec![field]); } } else { // normal struct/struct variant expr = cx.expr_method_call(span, expr, token::str_to_ident("debug_struct"), vec![name]); for field in fields { let name = cx.expr_lit(field.span, ast::Lit_::LitStr( token::get_ident(field.name.clone().unwrap()), ast::StrStyle::CookedStr)); // Use double indirection to make sure this works for unsized types let field = cx.expr_addr_of(field.span, field.self_.clone()); let field = cx.expr_addr_of(field.span, field); expr = cx.expr_method_call(span, expr, token::str_to_ident("field"), vec![name, field]); } } } _ => unreachable!() } cx.expr_method_call(span, expr, token::str_to_ident("finish"), vec![]) }<|fim▁end|>
explicit_self: borrowed_explicit_self(),
<|file_name|>SailBooleanExprQuery.java<|end_file_name|><|fim▁begin|>package de.unikiel.inf.comsys.neo4j.inference.sail; /* * #%L * neo4j-sparql-extension * %% * Copyright (C) 2014 Niclas Hoyer * %% * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as * published by the Free Software Foundation, either version 3 of the * License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this program. If not, see * <http://www.gnu.org/licenses/gpl-3.0.html>. * #L% */ import de.unikiel.inf.comsys.neo4j.inference.QueryRewriter; import org.openrdf.query.algebra.TupleExpr; import org.openrdf.query.parser.ParsedBooleanQuery; import org.openrdf.repository.sail.SailBooleanQuery; import org.openrdf.repository.sail.SailRepositoryConnection; /** * A subclass of {@link SailBooleanQuery} with a public constructor to * pass in a boolean query containing a tuple expression.<|fim▁hole|> * that is used to create a query from a {@link TupleExpr}. * * @see QueryRewriter */ public class SailBooleanExprQuery extends SailBooleanQuery { public SailBooleanExprQuery(ParsedBooleanQuery booleanQuery, SailRepositoryConnection sailConnection) { super(booleanQuery, sailConnection); } }<|fim▁end|>
* * The original constructor of {@link SailBooleanQuery} is protected, thus * it is not possible to create a new boolean query from a parsed query
<|file_name|>document.py<|end_file_name|><|fim▁begin|>"""DocumentSource scrapes MDN wiki documents.""" import logging import dateutil from .base import DocumentBaseSource logger = logging.getLogger("kuma.scraper") class DocumentSource(DocumentBaseSource): """Coordinate scraping and local cloning of an MDN Document.""" OPTIONS = DocumentBaseSource.STANDARD_DOC_OPTIONS def load_and_validate_existing(self, storage): """Load the document from storage in simple cases.""" just_this_doc = ( not self.translations and self.depth == 0 and self.revisions == 1 ) if not self.force and just_this_doc and self.locale and self.slug: document = storage.get_document(self.locale, self.slug) if document: return True, [] return False, [] def load_prereqs(self, requester, storage): """Load the data needed for a document.""" data = {"needs": []} if self.locale is None and self.slug is None: raise self.SourceError('Not a document path "%s"', self.path) # Load data, gathering further source needs self.load_prereq_parent_topic(storage, data) self.load_prereq_redirect_check(storage, data) if data.get("has_redirect_check"): self.load_prereq_redirect(storage, data) if data.get("is_standard_page"): self.load_prereq_metadata(storage, data) self.load_prereq_english_parent(storage, data) self.load_prereq_history(storage, data) self.load_prereq_children(storage, data) return not data["needs"], data def load_prereq_parent_topic(self, storage, data): """Load the parent topic, if a child page.""" if not self.parent_slug: return # No parent to load parent_topic = storage.get_document(self.locale, self.parent_slug) if parent_topic is None: data["needs"].append(("document", self.parent_path, {})) else: data["parent_topic"] = parent_topic def load_prereq_redirect_check(self, storage, data): """Check the URL for redirects.""" redirect = storage.get_document_redirect(self.locale, self.slug) if redirect is None: data["needs"].append(("document_redirect", self.path, {})) else: data["has_redirect_check"] = True data["redirect_to"] = redirect.get("redirect_to") def load_prereq_redirect(self, storage, data): """Load the destination of a redirect.""" data["is_standard_page"] = data.get("has_redirect_check") redirect_to = data.get("redirect_to") if not redirect_to: return # Not a redirect, don't follow # Load the destination page rd_locale, rd_slug = self.locale_and_slug(redirect_to) redirect = storage.get_document(rd_locale, rd_slug) data["is_standard_page"] = False if redirect is None: data["needs"].append(("document", redirect_to, {})) def load_prereq_metadata(self, storage, data): """Load the document metadata.""" meta = storage.get_document_metadata(self.locale, self.slug) if meta is None: data["needs"].append(("document_meta", self.path, self.current_options())) elif "error" in meta: raise self.SourceError("Error getting metadata for %s", self.path) elif meta: data["id"] = meta["id"] data["locale"] = meta["locale"] data["modified"] = dateutil.parser.parse(meta["modified"]) data["slug"] = meta["slug"] data["tags"] = meta["tags"] data["title"] = meta["title"] data["translations"] = meta["translations"] # Redirects don't have UUIDs if "uuid" in meta: data["uuid"] = meta["uuid"] else: logger.warning("No uuid: %s", self.path) def load_prereq_english_parent(self, storage, data): """Load the English parent, if this is a translation.""" if self.locale == "en-US": return # No English parent for English docs if "translations" not in data: return # Metadata not loaded yet # For translations - have we loaded the English document? for translation in data["translations"]: if translation["locale"] == "en-US": en_path = self.decode_href(translation["url"]) try: en_locale, en_slug = self.locale_and_slug(en_path) except ValueError: raise self.SourceError( 'Invalid meta for "%s": In translations,' ' invalid path "%s" for "en-US"', self.path, en_path, )<|fim▁hole|> en_doc = storage.get_document(en_locale, en_slug) if en_doc is None: data["needs"].append(("document", en_path, {})) else: data["parent"] = en_doc def load_prereq_history(self, storage, data): """Load the revision history.""" history = storage.get_document_history(self.locale, self.slug) if history is None: data["needs"].append( ("document_history", self.path, {"revisions": self.revisions}) ) elif len(history) == 0: raise self.SourceError('Empty history for document "%s"', self.path) def load_prereq_children(self, storage, data): """Load the document children.""" if self.depth == 0: return children = storage.get_document_children(self.locale, self.slug) if children is None: options = self.current_options() data["needs"].append(("document_children", self.path, options)) def save_data(self, storage, data): """Save the document as a redirect or full document.""" redirect_to = data.get("redirect_to") if redirect_to: # Prepare data for a redirect document doc_data = { "locale": self.locale, "slug": self.slug, "redirect_to": redirect_to, } else: # Prepare data for a full document keys = ( "id", "locale", "modified", "parent", "parent_topic", "slug", "tags", "title", "uuid", ) doc_data = {} for key in keys: if key in data: doc_data[key] = data[key] if doc_data["slug"] != self.slug: logger.warning( 'Meta slug "%s" does not match slug for "%s".', doc_data["slug"], self.path, ) doc_data["slug"] = self.slug if doc_data["locale"] != self.locale: logger.warning( 'Meta locale "%s" does not match locale for "%s".', doc_data["locale"], self.path, ) doc_data["locale"] = self.locale storage.save_document(doc_data) return [("document_current", self.path, {"revisions": self.revisions})]<|fim▁end|>
else:
<|file_name|>sparql_select_result.py<|end_file_name|><|fim▁begin|>"""sparql_select_result.py Data structure for storing the results of SPARQL SELECT queries""" __all__ = ["SPARQLSelectResult"] from xml.etree import ElementTree as et class SPARQLSelectResult(object): def __init__(self): self.variables = [] self.results = [] def parse(self, s): tree = et.fromstring(s) head = tree.find("{http://www.w3.org/2005/sparql-results#}head") self.variables = [x.get("name") for x in head.findall("{http://www.w3.org/2005/sparql-results#}variable")] results = tree.find("{http://www.w3.org/2005/sparql-results#}results").findall("{http://www.w3.org/2005/sparql-results#}result") self.results = [] for result in results: d = {} bindings = result.findall("{http://www.w3.org/2005/sparql-results#}binding") for binding in bindings: uri = binding.find("{http://www.w3.org/2005/sparql-results#}uri") if uri is None: literal = binding.find("{http://www.w3.org/2005/sparql-results#}literal") if literal is None: raise InvalidSPARQLSelectResultSyntax("Neither URI or Literal were found") else: d[binding.get("name")] = (literal.text, None, None) else:<|fim▁hole|> def get_variables(self): return self.variables def get_results(self): return self.results<|fim▁end|>
d[binding.get("name")] = uri.text self.results.append(d)
<|file_name|>test_link.py<|end_file_name|><|fim▁begin|>from common_fixtures import * # NOQA def test_link_instance_stop_start(super_client, client, context): target1 = context.create_container(ports=['180', '122/udp']) target2 = context.create_container(ports=['280', '222/udp']) c = context.create_container(instanceLinks={ 'target1_link': target1.id, 'target2_link': target2.id}) assert c.state == 'running' ports = set() for link in c.instanceLinks(): for port in super_client.reload(link).data.fields.ports: ports.add('{}:{}'.format(port.publicPort, port.privatePort)) assert len(ports) > 0 new_ports = set() c = client.wait_success(c.stop()) assert c.state == 'stopped' for link in super_client.reload(c).instanceLinks(): assert len(link.data.fields.ports) == 2 for port in link.data.fields.ports: new_ports.add('{}:{}'.format(port.publicPort, port.privatePort)) assert ports == new_ports new_ports = set() c = client.wait_success(c.start()) assert c.state == 'running' for link in super_client.reload(c).instanceLinks(): assert len(link.data.fields.ports) == 2<|fim▁hole|> assert ports == new_ports def _find_agent_instance_ip(nsp, source): assert source is not None vnet_id = source.nics()[0].vnetId assert vnet_id is not None for agent_instance in nsp.instances(): if agent_instance.nics()[0].vnetId == vnet_id: assert agent_instance.primaryIpAddress is not None return agent_instance.primaryIpAddress assert False, 'Failed to find agent instance for ' + source.id def test_link_create(client, super_client, context): target1 = context.create_container(ports=['180', '122/udp']) target2 = context.create_container(ports=['280', '222/udp']) c = context.create_container(instanceLinks={ 'target1_link': target1.id, 'target2_link': target2.id}) assert c.state == 'running' assert len(c.instanceLinks()) == 2 assert len(target1.targetInstanceLinks()) == 1 assert len(target2.targetInstanceLinks()) == 1 links = c.instanceLinks() names = set([x.linkName for x in links]) assert names == set(['target1_link', 'target2_link']) for link in links: link = super_client.reload(link) assert link.state == 'active' assert link.instanceId == c.id ip_address = _find_agent_instance_ip(context.nsp, super_client.reload(c)) if link.linkName == 'target1_link': assert link.targetInstanceId == target1.id assert len(link.data.fields.ports) == 2 for port in link.data.fields.ports: assert port.ipAddress == ip_address assert port.publicPort is not None if port.privatePort == 180: assert port.protocol == 'tcp' elif port.privatePort == 122: assert port.protocol == 'udp' else: assert False if link.linkName == 'target2_link': assert link.targetInstanceId == target2.id assert len(link.data.fields.ports) == 2 for port in link.data.fields.ports: assert port.ipAddress == ip_address assert port.publicPort is not None if port.privatePort == 280: assert port.protocol == 'tcp' elif port.privatePort == 222: assert port.protocol == 'udp' else: assert False def test_link_update(client, context): target1 = context.create_container() target2 = context.create_container() c = context.create_container(instanceLinks={ 'target1_link': target1.id, }) link = c.instanceLinks()[0] assert link.targetInstanceId == target1.id link.targetInstanceId = target2.id link = client.update(link, link) assert link.state == 'updating-active' link = client.wait_success(link) assert link.targetInstanceId == target2.id assert link.state == 'active' def test_link_remove_restore(client, context): target1 = context.create_container() c = client.create_container(imageUuid=context.image_uuid, startOnCreate=False, instanceLinks={ 'target1_link': target1.id}) c = client.wait_success(c) links = c.instanceLinks() assert len(links) == 1 link = links[0] assert link.state == 'inactive' c = client.wait_success(c.start()) link = client.reload(link) assert c.state == 'running' assert link.state == 'active' c = client.wait_success(c.stop()) link = client.reload(link) assert c.state == 'stopped' assert link.state == 'inactive' c = client.wait_success(client.delete(c)) link = client.reload(link) assert c.state == 'removed' assert link.state == 'inactive' c = client.wait_success(c.restore()) link = client.reload(link) assert c.state == 'stopped' assert link.state == 'inactive' c = client.wait_success(client.delete(c)) link = client.reload(link) assert c.state == 'removed' assert link.state == 'inactive' c = client.wait_success(c.purge()) link = client.reload(link) assert c.state == 'purged' assert link.state == 'removed' def test_null_links(context): c = context.create_container(instanceLinks={ 'null_link': None }) links = c.instanceLinks() assert len(links) == 1 assert links[0].state == 'active' assert links[0].linkName == 'null_link' assert links[0].targetInstanceId is None def test_link_timeout(super_client, client, context): t = client.create_container(imageUuid=context.image_uuid, startOnCreate=False) c = super_client.create_container(accountId=context.project.id, imageUuid=context.image_uuid, instanceLinks={'t': t.id}, data={'linkWaitTime': 100}) c = client.wait_transitioning(c) assert c.state == 'running' def test_link_remove_instance_restart(client, super_client, context): target1 = context.create_container() c = client.create_container(imageUuid=context.image_uuid, startOnCreate=False, instanceLinks={ 'target1_link': target1.id}) c = client.wait_success(c) links = c.instanceLinks() assert len(links) == 1 link = links[0] assert link.state == 'inactive' c = client.wait_success(c.start()) link = client.reload(link) assert c.state == 'running' assert link.state == 'active' c = client.wait_success(c.stop()) assert c.state == 'stopped' link = client.reload(link) link = super_client.wait_success(link.remove()) assert link.state == 'removed' c = client.wait_success(c.start()) assert c.state == 'running'<|fim▁end|>
for port in link.data.fields.ports: new_ports.add('{}:{}'.format(port.publicPort, port.privatePort))
<|file_name|>capability_list.rs<|end_file_name|><|fim▁begin|>// Copyright (c) 2017 David Renshaw and contributors // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. //! List of capabilities. use alloc::boxed::Box; use core::marker::PhantomData; use crate::capability::{FromClientHook}; use crate::private::capability::ClientHook; use crate::private::layout::{ListReader, ListBuilder, PointerReader, PointerBuilder, Pointer}; use crate::traits::{FromPointerReader, FromPointerBuilder, IndexMove, ListIter}; use crate::Result; #[derive(Copy, Clone)] pub struct Owned<T> where T: FromClientHook { marker: PhantomData<T>, } impl<'a, T> crate::traits::Owned<'a> for Owned<T> where T: FromClientHook { type Reader = Reader<'a, T>; type Builder = Builder<'a, T>; } pub struct Reader<'a, T> where T: FromClientHook { marker: PhantomData<T>, reader: ListReader<'a> } impl <'a, T> Clone for Reader<'a, T> where T: FromClientHook { fn clone(&self) -> Reader<'a, T> { Reader { marker : self.marker, reader : self.reader } } } impl <'a, T> Copy for Reader<'a, T> where T: FromClientHook {} impl <'a, T> Reader<'a, T> where T: FromClientHook { pub fn len(&self) -> u32 { self.reader.len() } pub fn iter(self) -> ListIter<Reader<'a, T>, Result<T>> { ListIter::new(self, self.len()) } } impl <'a, T> Reader<'a, T> where T: FromClientHook { pub fn reborrow<'b>(&'b self) -> Reader<'b, T> { Reader { reader: self.reader, marker: PhantomData } } } impl <'a, T> FromPointerReader<'a> for Reader<'a, T> where T: FromClientHook { fn get_from_pointer(reader: &PointerReader<'a>, default: Option<&'a [crate::Word]>) -> Result<Reader<'a, T>> { Ok(Reader { reader: reader.get_list(Pointer, default)?, marker: PhantomData }) } } impl <'a, T> Reader<'a, T> where T: FromClientHook { pub fn get(self, index: u32) -> Result<T> { assert!(index < self.len()); Ok(FromClientHook::new(self.reader.get_pointer_element(index).get_capability()?)) } } impl <'a, T> IndexMove<u32, Result<T>> for Reader<'a, T> where T: FromClientHook { fn index_move(&self, index: u32) -> Result<T> { self.get(index) } } pub struct Builder<'a, T> where T: FromClientHook { marker: PhantomData<T>, builder: ListBuilder<'a> } impl <'a, T> Builder<'a, T> where T: FromClientHook { pub fn len(&self) -> u32 { self.builder.len() } pub fn into_reader(self) -> Reader<'a, T> { Reader { marker: PhantomData,<|fim▁hole|> reader: self.builder.into_reader(), } } pub fn set(&mut self, index: u32, value: Box<dyn ClientHook>) { assert!(index < self.len()); self.builder.reborrow().get_pointer_element(index).set_capability(value); } } impl <'a, T> Builder<'a, T> where T: FromClientHook { pub fn reborrow<'b>(&'b mut self) -> Builder<'b, T> { Builder { builder: self.builder, marker: PhantomData } } } impl <'a, T> FromPointerBuilder<'a> for Builder<'a, T> where T: FromClientHook { fn init_pointer(builder: PointerBuilder<'a>, size: u32) -> Builder<'a, T> { Builder { marker: PhantomData, builder: builder.init_list(Pointer, size), } } fn get_from_pointer(builder: PointerBuilder<'a>, default: Option<&'a [crate::Word]>) -> Result<Builder<'a, T>> { Ok(Builder { marker: PhantomData, builder: builder.get_list(Pointer, default)? }) } } impl <'a, T> Builder<'a, T> where T: FromClientHook { pub fn get(self, index: u32) -> Result<T> { assert!(index < self.len()); Ok(FromClientHook::new(self.builder.get_pointer_element(index).get_capability()?)) } } impl <'a, T> crate::traits::SetPointerBuilder for Reader<'a, T> where T: FromClientHook { fn set_pointer_builder<'b>(pointer: crate::private::layout::PointerBuilder<'b>, value: Reader<'a, T>, canonicalize: bool) -> Result<()> { pointer.set_list(&value.reader, canonicalize) } } impl <'a, T> ::core::iter::IntoIterator for Reader<'a, T> where T: FromClientHook { type Item = Result<T>; type IntoIter = ListIter<Reader<'a, T>, Self::Item>; fn into_iter(self) -> Self::IntoIter { self.iter() } }<|fim▁end|>
<|file_name|>0001_initial.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*- # flake8: noqa<|fim▁hole|> class Migration(migrations.Migration): dependencies = [ ] operations = [ migrations.CreateModel( name='Item', fields=[ ('id', models.AutoField(serialize=False, primary_key=True, verbose_name='ID', auto_created=True)), ('text', models.CharField(max_length=32, default='')), ], ), ]<|fim▁end|>
from __future__ import unicode_literals from django.db import models, migrations
<|file_name|>createrpms.py<|end_file_name|><|fim▁begin|># # Script to build RPM's using latest LIS code, it will build the RPM's and copy it to LISISO folder # currently we have two source tree one for RHEL 5 and one for RHEL6 # # import os import sys import shutil import subprocess homedir = os.getcwd() directory = "lis-next" if os.path.exists(directory): shutil.rmtree(directory) def run(cmd): output = subprocess.call(cmd,shell=True) return output # Clean up LISISO direcotry #print "Cleaning up LISISO direcroty" #run("./cleanupISODir.sh") def buildrhel5(): print "Cleaning up LISISO direcroty" # run("./cleanupISODir5.sh") os.makedirs(directory) run("git clone https://github.com/LIS/lis-next") os.chdir(directory+"/hv-rhel5.x/") run("git checkout 4.1.3") run("tar -cvzf lis-next-rh5.tar.gz hv") shutil.copy("lis-next-rh5.tar.gz" , homedir+"/rh5/SOURCES/") os.chdir(homedir) shutil.rmtree(directory) found = False rhel5buildpath = homedir+"/rh5/pbuild" pbuildconfigfile = homedir+"/rh5/pbuild/.pbuild" pbuildMakefile = homedir+"/rh5/pbuild/Makefile" shutil.copy(pbuildconfigfile , "/root/") with open("/root/.pbuild", "a") as file: file.write("logdir: "+homedir+"/BuilgLogDir/rh5") with open(pbuildMakefile, "r") as myfile: for line in myfile : if "homedir=" in line: found = True if not found: with open(pbuildMakefile, "a") as myfile: myfile.write("homedir="+homedir) # Change direcoty to buildpath, before building . os.chdir(rhel5buildpath) # Now clean the destination VM's . clean = run("make clean") if clean : print "make clean failed" sys.exit(1) send = run("make send") if send : print "make send failed" sys.exit(1) make = run("make") if make : print "make failed" sys.exit(1) os.remove("/root/.pbuild") os.chdir(homedir) # run("./copyallrpmsrhel5.sh") def buildrhel6(): print "Cleaning up LISISO direcroty" # run("./cleanupISODir6.sh") os.makedirs(directory) run("git clone https://github.com/LIS/lis-next") os.chdir(directory+"/hv-rhel6.x/") run("git checkout 4.1.3") run("tar -cvzf lis-next-rh6.tar.gz hv") shutil.copy("lis-next-rh6.tar.gz" , homedir+"/rh6/SOURCES/") os.chdir(homedir) shutil.rmtree(directory) found = False rhel6buildpath=homedir+"/rh6/pbuild" pbuildconfigfile=homedir+"/rh6/pbuild/.pbuild" shutil.copy(pbuildconfigfile , "/root/") with open("/root/.pbuild", "a") as file: file.write("logdir: "+homedir+"/BuilgLogDir/rh6") pbuildMakefile = homedir+"/rh6/pbuild/Makefile" with open(pbuildMakefile, "r") as myfile: for line in myfile : if "homedir=" in line: found = True if not found: with open(pbuildMakefile, "a") as myfile: myfile.write("homedir="+homedir) # Change direcoty to buildpath, before building . os.chdir(rhel6buildpath) # Now clean the destination VM's . clean = run("make clean") if clean : print "make clean failed" sys.exit(1) send = run("make send") if send : print "make send failed" sys.exit(1) make = run("make") if make : print "make failed" sys.exit(1) os.remove("/root/.pbuild") os.chdir(homedir) #run("./copyallrpmsrhel6.sh") def buildrhel7(): print "Cleaning up LISISO direcroty" # run("./cleanupISODir7.sh") os.makedirs(directory) run("git clone https://github.com/LIS/lis-next") os.chdir(directory+"/hv-rhel7.x/") run("git checkout 4.1.3") run("tar -cvzf lis-next-rh7.tar.gz hv") shutil.copy("lis-next-rh7.tar.gz" , homedir+"/rh7/SOURCES/") os.chdir(homedir) shutil.rmtree(directory) found = False rhel7buildpath = homedir+"/rh7/pbuild" pbuildconfigfile = homedir+"/rh7/pbuild/.pbuild" shutil.copy(pbuildconfigfile , "/root/") with open("/root/.pbuild", "a") as file: file.write("logdir: "+homedir+"/BuilgLogDir/rh7") pbuildMakefile = homedir+"/rh7/pbuild/Makefile" with open(pbuildMakefile, "r") as myfile: for line in myfile : if "homedir=" in line: found = True if not found: with open(pbuildMakefile, "a") as myfile:<|fim▁hole|> # Now clean the destination VM's . clean = run("make clean") if clean : print "make clean failed" sys.exit(1) send = run("make send") if send : print "make send failed" sys.exit(1) make = run("make") if make : print "make failed" sys.exit(1) os.remove("/root/.pbuild") os.chdir(homedir) #run("./copyallrpmsrhel7.sh") ### Main entry for script.### def main(argv): for arg in sys.argv: if arg == "rh5": print "RHEL5 Build initializing...." buildrhel5() elif arg == "rh6": print "RHEL6 Build initializing...." buildrhel6() elif arg == "rh7": print "RHEL7 Build initializing...." buildrhel7() elif arg == "all": print "RHEL5 , RHEL6 and RHEL 7 Build initializing...." buildrhel5() buildrhel6() buildrhel7() elif len(sys.argv) == 1: print "USAGE : createrpms.py <rh5 , rh6 or all>" sys.exit(2) # Tar the LISISO directory . #run("tar -cvzf lis-rpms-autogen.tar.gz LISISO") if __name__ == "__main__": main(sys.argv[1:])<|fim▁end|>
myfile.write("homedir="+homedir) # Change direcoty to buildpath, before building . os.chdir(rhel7buildpath)
<|file_name|>DogfightErrorState.cpp<|end_file_name|><|fim▁begin|>/* * Copyright 2010-2016 OpenXcom Developers. * * This file is part of OpenXcom. * * OpenXcom is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * OpenXcom is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with OpenXcom. If not, see <http://www.gnu.org/licenses/>. */ #include "DogfightErrorState.h" #include "../Engine/Game.h" #include "../Mod/Mod.h" #include "../Engine/LocalizedText.h" #include "../Interface/TextButton.h" #include "../Interface/Window.h" #include "../Interface/Text.h" #include "../Engine/Options.h" #include "../Savegame/Craft.h" namespace OpenXcom { /** * Initializes all the elements in a Dogfight Error window. * @param game Pointer to the core game. * @param state Pointer to the Geoscape state. * @param msg Error message. */ DogfightErrorState::DogfightErrorState(Craft *craft, const std::wstring &msg) : _craft(craft) { _screen = false; // Create objects _window = new Window(this, 208, 120, 24, 48, POPUP_BOTH); _btnIntercept = new TextButton(180, 12, 38, 128); _btnBase = new TextButton(180, 12, 38, 144); _txtCraft = new Text(198, 16, 29, 63); _txtMessage = new Text(198, 20, 29, 94); // Set palette setInterface("dogfightInfo"); add(_window, "window", "dogfightInfo"); add(_btnIntercept, "button", "dogfightInfo"); add(_btnBase, "button", "dogfightInfo"); add(_txtCraft, "text", "dogfightInfo"); add(_txtMessage, "text", "dogfightInfo"); centerAllSurfaces(); // Set up objects _window->setBackground(_game->getMod()->getSurface("BACK15.SCR")); _btnIntercept->setText(tr("STR_CONTINUE_INTERCEPTION_PURSUIT")); _btnIntercept->onMouseClick((ActionHandler)&DogfightErrorState::btnInterceptClick); _btnIntercept->onKeyboardPress((ActionHandler)&DogfightErrorState::btnInterceptClick, Options::keyCancel); _btnBase->setText(tr("STR_RETURN_TO_BASE")); _btnBase->onMouseClick((ActionHandler)&DogfightErrorState::btnBaseClick); _btnBase->onKeyboardPress((ActionHandler)&DogfightErrorState::btnBaseClick, Options::keyOk); _txtCraft->setAlign(ALIGN_CENTER); _txtCraft->setBig(); _txtCraft->setText(_craft->getName(_game->getLanguage())); _txtMessage->setAlign(ALIGN_CENTER); _txtMessage->setWordWrap(true); _txtMessage->setText(msg); } /** * */ DogfightErrorState::~DogfightErrorState() { }<|fim▁hole|>/** * Closes the window. * @param action Pointer to an action. */ void DogfightErrorState::btnInterceptClick(Action *) { _game->popState(); } /** * Returns the craft to base. * @param action Pointer to an action. */ void DogfightErrorState::btnBaseClick(Action *) { _craft->returnToBase(); _game->popState(); } }<|fim▁end|>
<|file_name|>tests.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python3 """ Unit tests for the Grammar class and for the GrammarAnalyzer class. Tests each grammar in the "grammars" folder against a variety of strings. """ import unittest from grammar import Grammar from grammaranalyzer import GrammarAnalyzer class TestGrammar(unittest.TestCase): def test_nonexistent_file(self): # Ensure no exceptions are thrown. grammar = Grammar("nonexistent.json") self.assertEqual(grammar.get_desc(), "") def test_grammar_load(self): grammar = Grammar("grammars/grammar1.json") self.assertEqual(grammar.get_desc(), "{a^n # b^n | n > 0}") def test_grammar_productions(self): grammar = Grammar("grammars/grammar1.json") # Check start variable productions. rules = grammar.produces("S") self.assertEqual(rules, ["aAb"]) rules = grammar.produces("A") self.assertEqual(rules, ["aAb", "#"]) # Check nonexistent variable productions. rules = grammar.produces("N") self.assertFalse(rules) def test_grammar_rules(self): grammar = Grammar("grammars/grammar1.json") # Check that the correct rules are returned. rule = grammar.get_rule("S", "a") self.assertEqual(rule, "aAb") rule = grammar.get_rule("A", "#") self.assertEqual(rule, "#") # Check nonexistent input symbol. rule = grammar.get_rule("S", "k") self.assertFalse(rule) # Check nonexistent variable. rule = grammar.get_rule("N", "a") self.assertFalse(rule) class TestGrammarAnalyzer(unittest.TestCase):<|fim▁hole|> # Check accepted strings. self.assertTrue(grammar_analyzer.test_string("a#b")) self.assertTrue(grammar_analyzer.test_string("aa#bb")) self.assertTrue(grammar_analyzer.test_string("aaa#bbb")) self.assertTrue(grammar_analyzer.test_string("aaaa#bbbb")) self.assertTrue(grammar_analyzer.test_string("aaaaa#bbbbb")) self.assertTrue(grammar_analyzer.test_string("aaaaaa#bbbbbb")) self.assertTrue(grammar_analyzer.test_string("aaaaaaa#bbbbbbb")) self.assertTrue(grammar_analyzer.test_string("aaaaaaaa#bbbbbbbb")) self.assertTrue(grammar_analyzer.test_string("aaaaaaaaa#bbbbbbbbb")) self.assertTrue(grammar_analyzer.test_string("aaaaaaaaaa#bbbbbbbbbb")) # Check rejected strings. self.assertFalse(grammar_analyzer.test_string("xxx")) self.assertFalse(grammar_analyzer.test_string("")) self.assertFalse(grammar_analyzer.test_string("#")) self.assertFalse(grammar_analyzer.test_string("a")) self.assertFalse(grammar_analyzer.test_string("aa#b")) self.assertFalse(grammar_analyzer.test_string("a#bb")) self.assertFalse(grammar_analyzer.test_string("asdf")) self.assertFalse(grammar_analyzer.test_string("aaaa#bbbbbb")) def test_grammar2(self): grammar = Grammar("grammars/grammar2.json") grammar_analyzer = GrammarAnalyzer(grammar) # Check accepted strings. self.assertTrue(grammar_analyzer.test_string("#")) self.assertTrue(grammar_analyzer.test_string("0#0")) self.assertTrue(grammar_analyzer.test_string("1#1")) self.assertTrue(grammar_analyzer.test_string("01#10")) self.assertTrue(grammar_analyzer.test_string("10#01")) self.assertTrue(grammar_analyzer.test_string("010#010")) self.assertTrue(grammar_analyzer.test_string("1111#1111")) self.assertTrue(grammar_analyzer.test_string("010001#100010")) self.assertTrue(grammar_analyzer.test_string("0100011#1100010")) self.assertTrue(grammar_analyzer.test_string("01000101#10100010")) # Check rejected strings. self.assertFalse(grammar_analyzer.test_string("xxx")) self.assertFalse(grammar_analyzer.test_string("")) self.assertFalse(grammar_analyzer.test_string("0")) self.assertFalse(grammar_analyzer.test_string("0#1")) self.assertFalse(grammar_analyzer.test_string("1#10")) self.assertFalse(grammar_analyzer.test_string("01#01")) self.assertFalse(grammar_analyzer.test_string("11#111")) self.assertFalse(grammar_analyzer.test_string("111#11")) self.assertFalse(grammar_analyzer.test_string("111#110")) self.assertFalse(grammar_analyzer.test_string("0111#110")) def test_grammar3(self): grammar = Grammar("grammars/grammar3.json") grammar_analyzer = GrammarAnalyzer(grammar) # Check accepted strings. self.assertTrue(grammar_analyzer.test_string("a#b#c#")) self.assertTrue(grammar_analyzer.test_string("a#b#cc#")) self.assertTrue(grammar_analyzer.test_string("a#b#ccc#")) self.assertTrue(grammar_analyzer.test_string("a#b#cccc#")) self.assertTrue(grammar_analyzer.test_string("a#b#ccccc#")) self.assertTrue(grammar_analyzer.test_string("aa#bb#c#")) self.assertTrue(grammar_analyzer.test_string("aa#bb#cc#")) self.assertTrue(grammar_analyzer.test_string("aa#bb#ccc#")) self.assertTrue(grammar_analyzer.test_string("aa#bb#cccc#")) self.assertTrue(grammar_analyzer.test_string("aa#bb#ccccc#")) self.assertTrue(grammar_analyzer.test_string("aaaa#bbbb#c#")) self.assertTrue(grammar_analyzer.test_string("aaaaa#bbbbb#c#")) self.assertTrue(grammar_analyzer.test_string("aaaaa#bbbbb#cc#")) self.assertTrue(grammar_analyzer.test_string("aaaaa#bbbbb#ccc#")) self.assertTrue(grammar_analyzer.test_string("aaaaa#bbbbb#cccc#")) self.assertTrue(grammar_analyzer.test_string("aaaaa#bbbbb#ccccc#")) # Check rejected strings. self.assertFalse(grammar_analyzer.test_string("xxx")) self.assertFalse(grammar_analyzer.test_string("")) self.assertFalse(grammar_analyzer.test_string("a")) self.assertFalse(grammar_analyzer.test_string("a#b")) self.assertFalse(grammar_analyzer.test_string("a#b#c")) self.assertFalse(grammar_analyzer.test_string("####")) self.assertFalse(grammar_analyzer.test_string("abcd")) self.assertFalse(grammar_analyzer.test_string("aaaaa#bbb#c#")) self.assertFalse(grammar_analyzer.test_string("aaaaa##ccccc#")) self.assertFalse(grammar_analyzer.test_string("aaaa##ccccc#")) self.assertFalse(grammar_analyzer.test_string("aaa##ccccc#")) def test_grammar4(self): grammar = Grammar("grammars/grammar4.json") grammar_analyzer = GrammarAnalyzer(grammar) # Check accepted strings. self.assertTrue(grammar_analyzer.test_string("a#b#c#d")) self.assertTrue(grammar_analyzer.test_string("aa#bb#c#d")) self.assertTrue(grammar_analyzer.test_string("a#b#cc#dd")) self.assertTrue(grammar_analyzer.test_string("aaa#bbb#c#d")) self.assertTrue(grammar_analyzer.test_string("a#b#ccc#ddd")) self.assertTrue(grammar_analyzer.test_string("aaaa#bbbb#c#d")) self.assertTrue(grammar_analyzer.test_string("a#b#cccc#dddd")) self.assertTrue(grammar_analyzer.test_string("aa#bb#cccc#dddd")) self.assertTrue(grammar_analyzer.test_string("aaa#bbb#cccc#dddd")) self.assertTrue(grammar_analyzer.test_string("aaaa#bbbb#ccccc#ddddd")) self.assertTrue(grammar_analyzer.test_string("a#b#cccccc#dddddd")) self.assertTrue(grammar_analyzer.test_string("aaaaaaa#bbbbbbb#c#d")) # Check rejected strings. self.assertFalse(grammar_analyzer.test_string("xxx")) self.assertFalse(grammar_analyzer.test_string("")) self.assertFalse(grammar_analyzer.test_string("#")) self.assertFalse(grammar_analyzer.test_string("a#b#c#")) self.assertFalse(grammar_analyzer.test_string("#b#c#d")) self.assertFalse(grammar_analyzer.test_string("a#bb#c#d")) self.assertFalse(grammar_analyzer.test_string("a#b#c#dd")) self.assertFalse(grammar_analyzer.test_string("a#bb#c#dd")) self.assertFalse(grammar_analyzer.test_string("aa#bb#cc#dd#")) self.assertFalse(grammar_analyzer.test_string("aaa#bbb#ccc#dddd")) self.assertFalse(grammar_analyzer.test_string("aaa#bbb#ccc#dddd##"))<|fim▁end|>
def test_grammar1(self): grammar = Grammar("grammars/grammar1.json") grammar_analyzer = GrammarAnalyzer(grammar)
<|file_name|>UI.js<|end_file_name|><|fim▁begin|>"use strict"; exports.buttonEvent = function(id) { return function(cb) { return function() { var el = document.getElementById(id); el.addEventListener('click', function(ev) { cb(); }); } }; }; exports.keydownEvent = function(el) { return function(cb) { return function() { window.addEventListener('keydown', function(ev) { cb(ev)(); }); } }; }; exports.resizeEvent = function(cb) { return function() { var resizeDelay = 250; // ms delay before running resize logic var resizeTimeout = null; var throttled = function() { if (resizeTimeout) { clearTimeout(resizeTimeout); } resizeTimeout = setTimeout(function() { resizeTimeout = null; cb(exports.windowInnerSize())(); }, resizeDelay); }; window.addEventListener('resize', throttled, false); }; }; exports.windowInnerSize = function() { var w = window.innerWidth; var h = window.innerHeight; return { width: w, height: h }; }; exports.setWindow = function(k) { return function(v) { return function() { window[k] = v; }; }; }; exports.setElementContents = function(el) { return function(html) { return function() { el.innerHTML = html; }; }; }; <|fim▁hole|>exports.initDebugDiv = function(radius) { return function() { var view = document.getElementById("browser"); var div = document.getElementById(debugDivId); if (!div) { div = document.createElement("div"); view.appendChild(div); } div.id = debugDivId; div.style['position'] = "relative"; div.style['left'] = "0.0"; div.style['top'] = "0.0"; div.style['border-radius'] = "50%"; div.style['width'] = (radius * 2.0) + "px"; div.style['height'] = (radius * 2.0) + "px"; div.style['z-index'] = "100"; div.style['backgroundColor'] = "red"; div.style['pointer-events'] = "none"; div.style['display'] = "inline-block"; div.style['visibility'] = "hidden"; div.dataset.radius = radius; return div; }; }; var getDebugDiv = function() { var div = document.getElementById(debugDivId); if (!div) { return initDebugDiv(10.0)(); } else { return div; } }; exports.setDebugDivVisibility = function(s) { return function() { var div = getDebugDiv(); div.style['visibility'] = s; }; }; exports.setDebugDivPoint = function(p) { return function() { var div = getDebugDiv(); var r = div.dataset.radius | 1.0; var x = p.x - r; var y = p.y - r * 2.0; // var y = p.y; div.style['left'] = x + "px"; div.style['top'] = y + "px"; }; };<|fim▁end|>
var debugDivId = "debugDiv";
<|file_name|>generate_examples_lib.py<|end_file_name|><|fim▁begin|># Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Generate a series of TensorFlow graphs that become tflite test cases. Usage: generate_examples <output directory> bazel run //tensorflow/lite/testing:generate_examples To more easily debug failures use (or override) the --save_graphdefs flag to place text proto graphdefs into the generated zip files. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import functools import itertools import operator import os import random import re import string import tempfile import traceback import zipfile import numpy as np from six import StringIO from six.moves import xrange # TODO(aselle): Disable GPU for now os.environ["CUDA_VISIBLE_DEVICES"] = "-1" # pylint: disable=g-import-not-at-top import tensorflow as tf from google.protobuf import text_format # TODO(aselle): switch to TensorFlow's resource_loader from tensorflow.lite.testing import generate_examples_report as report_lib from tensorflow.lite.testing import string_util_wrapper from tensorflow.python.framework import test_util from tensorflow.python.framework import graph_util as tf_graph_util from tensorflow.python.ops import rnn from tensorflow.python.ops import array_ops from tensorflow.python.ops import spectral_ops_test_util RANDOM_SEED = 342 TEST_INPUT_DEPTH = 3 # A map from regular expression to bug number. Any test failure with label # matching the expression will be considered due to the corresponding bug. KNOWN_BUGS = { # TOCO doesn't support scalars as input. # Concat doesn't work with a single input tensor r"concat.*num_tensors=1": "67378344", # Transposition in MatMul is not fully supported. "fully_connected.*transpose_a=True": "67586970", # Softmax graphs are too complex. r"softmax.*dim=0": "67749831", # BatchToSpaceND only supports 4D tensors. r"batch_to_space_nd.*input_shape=\[8,2,2,2,1,1\]": "70594733", # Div will use floordiv. r"div.*int32": "72051395", } class Options(object): """All options for example generation.""" def __init__(self): # Directory where the outputs will be go. self.output_path = None # Particular zip to output. self.zip_to_output = None # Path to toco tool. self.toco = None # If a particular model is affected by a known bug count it as a Toco # error. self.known_bugs_are_errors = False # Raise an exception if any converter error is encountered. self.ignore_converter_errors = False # Include intermediate graphdefs in the output zip files. self.save_graphdefs = False # Whether the TFLite Flex converter is being used. self.run_with_flex = False # The function to convert a TensorFLow model to TFLite model. # See the document for `toco_convert` function for its required signature. # TODO(ycling): Decouple `toco_convert` function from this module, and # remove the `toco` attribute in this class. self.tflite_convert_function = toco_convert # A map from regular expression to bug number. Any test failure with label # matching the expression will be considered due to the corresponding bug. self.known_bugs = KNOWN_BUGS # A map from names to functions which make test cases. _MAKE_TEST_FUNCTIONS_MAP = {} # A decorator to register the make test functions. # Usage: # All the make_*_test should be registered. Example: # @register_make_test_function() # def make_conv_tests(options): # # ... # If a function is decorated by other decorators, it's required to specify the # name explicitly. Example: # @register_make_test_function(name="make_unidirectional_sequence_lstm_tests") # @test_util.enable_control_flow_v2 # def make_unidirectional_sequence_lstm_tests(options): # # ... def register_make_test_function(name=None): def decorate(function, name=name): if name is None: name = function.__name__ _MAKE_TEST_FUNCTIONS_MAP[name] = function return decorate class ExtraTocoOptions(object): """Additional toco options besides input, output, shape.""" def __init__(self): # Whether to ignore control dependency nodes. self.drop_control_dependency = False # Allow custom ops in the toco conversion. self.allow_custom_ops = False # Rnn states that are used to support rnn / lstm cells. self.rnn_states = None # Split the LSTM inputs from 5 inoputs to 18 inputs for TFLite. self.split_tflite_lstm_inputs = None def toco_options(data_types, input_arrays, output_arrays, shapes, extra_toco_options=ExtraTocoOptions()): """Create TOCO options to process a model. Args: data_types: input and inference types used by TOCO. input_arrays: names of the input tensors output_arrays: name of the output tensors shapes: shapes of the input tensors extra_toco_options: additional toco options Returns: the options in a string. """ shape_str = ":".join([",".join(str(y) for y in x) for x in shapes if x]) inference_type = "FLOAT" # TODO(ahentz): if we get multi-input quantization to work we need this # to change if data_types[0] == "QUANTIZED_UINT8": inference_type = "QUANTIZED_UINT8" s = (" --input_data_types=%s" % ",".join(data_types) + " --inference_type=%s" % inference_type + " --input_format=TENSORFLOW_GRAPHDEF" + " --output_format=TFLITE" + " --input_arrays=%s" % ",".join(input_arrays) + " --output_arrays=%s" % ",".join(output_arrays)) if shape_str: s += (" --input_shapes=%s" % shape_str) if extra_toco_options.drop_control_dependency: s += " --drop_control_dependency" if extra_toco_options.allow_custom_ops: s += " --allow_custom_ops" if extra_toco_options.rnn_states: s += (" --rnn_states='" + extra_toco_options.rnn_states + "'") if extra_toco_options.split_tflite_lstm_inputs is not None: if extra_toco_options.split_tflite_lstm_inputs: s += " --split_tflite_lstm_inputs=true" else: s += " --split_tflite_lstm_inputs=false" return s def format_result(t): """Convert a tensor to a format that can be used in test specs.""" if t.dtype.kind not in [np.dtype(np.string_).kind, np.dtype(np.object_).kind]: # Output 9 digits after the point to ensure the precision is good enough. values = ["{:.9f}".format(value) for value in list(t.flatten())] return ",".join(values) else: return string_util_wrapper.SerializeAsHexString(t.flatten()) def write_examples(fp, examples): """Given a list `examples`, write a text format representation. The file format is csv like with a simple repeated pattern. We would ike to use proto here, but we can't yet due to interfacing with the Android team using this format. Args: fp: File-like object to write to. examples: Example dictionary consiting of keys "inputs" and "outputs" """ def write_tensor(fp, x): """Write tensor in file format supported by TFLITE example.""" fp.write("dtype,%s\n" % x.dtype) fp.write("shape," + ",".join(map(str, x.shape)) + "\n") fp.write("values," + format_result(x) + "\n") fp.write("test_cases,%d\n" % len(examples)) for example in examples: fp.write("inputs,%d\n" % len(example["inputs"])) for i in example["inputs"]: write_tensor(fp, i) fp.write("outputs,%d\n" % len(example["outputs"])) for i in example["outputs"]: write_tensor(fp, i) def write_test_cases(fp, model_name, examples): """Given a dictionary of `examples`, write a text format representation. The file format is protocol-buffer-like, even though we don't use proto due to the needs of the Android team. Args: fp: File-like object to write to. model_name: Filename where the model was written to, relative to filename. examples: Example dictionary consiting of keys "inputs" and "outputs" """ fp.write("load_model: %s\n" % os.path.basename(model_name)) for example in examples: fp.write("reshape {\n") for t in example["inputs"]: fp.write(" input: \"" + ",".join(map(str, t.shape)) + "\"\n") fp.write("}\n") fp.write("invoke {\n") for t in example["inputs"]: fp.write(" input: \"" + format_result(t) + "\"\n") for t in example["outputs"]: fp.write(" output: \"" + format_result(t) + "\"\n") fp.write(" output_shape: \"" + ",".join([str(dim) for dim in t.shape]) + "\"\n") fp.write("}\n") _TF_TYPE_INFO = { tf.float32: (np.float32, "FLOAT"), tf.float16: (np.float16, "FLOAT"), tf.int32: (np.int32, "INT32"), tf.uint8: (np.uint8, "QUANTIZED_UINT8"), tf.int16: (np.int16, "QUANTIZED_INT16"), tf.int64: (np.int64, "INT64"), tf.bool: (np.bool, "BOOL"), tf.string: (np.string_, "STRING"), } def create_tensor_data(dtype, shape, min_value=-100, max_value=100): """Build tensor data spreading the range [min_value, max_value).""" if dtype in _TF_TYPE_INFO: dtype = _TF_TYPE_INFO[dtype][0] if dtype in (tf.float32, tf.float16): value = (max_value-min_value)*np.random.random_sample(shape)+min_value elif dtype in (tf.int32, tf.uint8, tf.int64, tf.int16): value = np.random.randint(min_value, max_value+1, shape) elif dtype == tf.bool: value = np.random.choice([True, False], size=shape) elif dtype == np.string_: # Not the best strings, but they will do for some basic testing. letters = list(string.ascii_uppercase) return np.random.choice(letters, size=shape).astype(dtype) return np.dtype(dtype).type(value) if np.isscalar(value) else value.astype( dtype) def create_scalar_data(dtype, min_value=-100, max_value=100): """Build scalar tensor data range from min_value to max_value exclusively.""" if dtype in _TF_TYPE_INFO: dtype = _TF_TYPE_INFO[dtype][0] if dtype in (tf.float32, tf.float16): value = (max_value - min_value) * np.random.random() + min_value elif dtype in (tf.int32, tf.uint8, tf.int64, tf.int16): value = np.random.randint(min_value, max_value + 1) return np.array(value, dtype=dtype) def freeze_graph(session, outputs): """Freeze the current graph. Args: session: Tensorflow sessions containing the graph outputs: List of output tensors Returns: The frozen graph_def. """ return tf_graph_util.convert_variables_to_constants( session, session.graph.as_graph_def(), [x.op.name for x in outputs]) @register_make_test_function() def make_control_dep_tests(options): """Make a set of tests that use control dependencies.""" test_parameters = [{ "input_shape": [[], [1, 1, 1, 1], [1, 15, 14, 1], [3, 15, 14, 3]], }] def build_graph(parameters): input_tensor = tf.placeholder( dtype=tf.float32, name="input", shape=parameters["input_shape"]) filter_value = tf.zeros((3, 3, TEST_INPUT_DEPTH, 8), tf.float32) assert_op = tf.assert_greater_equal(input_tensor, input_tensor - 1) with tf.control_dependencies([assert_op]): out = tf.nn.conv2d(input_tensor, filter_value, strides=(1, 1, 1, 1), padding="SAME") return [input_tensor], [out] def build_inputs(parameters, sess, inputs, outputs): input_values = create_tensor_data(tf.float32, parameters["input_shape"]) return [input_values], sess.run( outputs, feed_dict=dict(zip(inputs, [input_values]))) extra_toco_options = ExtraTocoOptions() extra_toco_options.drop_control_dependency = True make_zip_of_tests( options, test_parameters, build_graph, build_inputs, extra_toco_options, expected_tf_failures=3) def toco_convert(options, graph_def, input_tensors, output_tensors, **kwargs): """Convert a model's graph def into a tflite model. NOTE: this currently shells out to the toco binary, but we would like convert to Python API tooling in the future. Args: options: An Options instance. graph_def: A GraphDef object. input_tensors: List of input tensor tuples `(name, shape, type)`. output_tensors: List of output tensors (names). **kwargs: Extra options to be passed. Returns: output tflite model, log_txt from conversion or None, log_txt if it did not convert properly. """ # Convert ophint ops if presented. graph_def = tf.lite.experimental.convert_op_hints_to_stubs( graph_def=graph_def) graph_def_str = graph_def.SerializeToString() extra_toco_options = kwargs.get("extra_toco_options", ExtraTocoOptions()) test_params = kwargs.get("test_params", {}) input_arrays = [x[0] for x in input_tensors] data_types = [_TF_TYPE_INFO[x[2]][1] for x in input_tensors] if test_params.get("fully_quantize", False): with tempfile.NamedTemporaryFile() as graphdef_file: graphdef_file.write(graph_def_str) graphdef_file.flush() input_shapes = get_input_shapes_map(input_tensors) converter = tf.lite.TocoConverter.from_frozen_graph( graphdef_file.name, input_arrays, output_tensors, input_shapes) def representative_dataset(input_tensors): calibration_inputs = [] for _, shape, _ in input_tensors: if shape: dims = [dim.value for dim in shape.dims] calibration_inputs.append( np.random.uniform(-1, 1, tuple(dims)).astype(np.float32)) return calibration_inputs def representative_dataset_gen(): for _ in range(100): yield representative_dataset(input_tensors) converter.target_spec.supported_ops = [ tf.lite.OpsSet.TFLITE_BUILTINS_INT8 ] converter.representative_dataset = representative_dataset_gen try: tflite_model = converter.convert() return tflite_model, "" except Exception as e: log = "{0}\n{1}".format(str(e), traceback.format_exc()) return None, log else: opts = toco_options( data_types=data_types, input_arrays=input_arrays, shapes=[x[1] for x in input_tensors], output_arrays=output_tensors, extra_toco_options=extra_toco_options) with tempfile.NamedTemporaryFile() as graphdef_file, \ tempfile.NamedTemporaryFile() as output_file, \ tempfile.NamedTemporaryFile("w+") as stdout_file: graphdef_file.write(graph_def_str) graphdef_file.flush() # TODO(aselle): Switch this to subprocess at some point. if "pb2lite" in bin_path and options.run_with_flex: opts = ("--input_arrays={0} --output_arrays={1}".format( ",".join(input_arrays), ",".join(output_tensors))) elif options.run_with_flex: opts += " --enable_select_tf_ops --force_select_tf_ops" cmd = ("%s --input_file=%s --output_file=%s %s > %s 2>&1" % (bin_path, graphdef_file.name, output_file.name, opts, stdout_file.name)) exit_code = os.system(cmd) log = ( cmd + "exited with code %d" % exit_code + "\n------------------\n" + stdout_file.read()) return (None if exit_code != 0 else output_file.read()), log def get_input_shapes_map(input_tensors): """Gets a map of input names to shapes. Args: input_tensors: List of input tensor tuples `(name, shape, type)`. Returns: {string : list of integers}. """ input_arrays = [tensor[0] for tensor in input_tensors] input_shapes_list = [] for _, shape, _ in input_tensors: dims = None if shape: dims = [dim.value for dim in shape.dims] input_shapes_list.append(dims) input_shapes = { name: shape for name, shape in zip(input_arrays, input_shapes_list) if shape } return input_shapes def normalize_output_name(output_name): """Remove :0 suffix from tensor names.""" return output_name.split(":")[0] if output_name.endswith( ":0") else output_name # How many test cases we may have in a zip file. Too many test cases will # slow down the test data generation process. _MAX_TESTS_PER_ZIP = 500 def make_zip_of_tests(options, test_parameters, make_graph, make_test_inputs, extra_toco_options=ExtraTocoOptions(), use_frozen_graph=False, expected_tf_failures=0): """Helper to make a zip file of a bunch of TensorFlow models. This does a cartestian product of the dictionary of test_parameters and calls make_graph() for each item in the cartestian product set. If the graph is built successfully, then make_test_inputs() is called to build expected input/output value pairs. The model is then converted to tflite with toco, and the examples are serialized with the tflite model into a zip file (2 files per item in the cartesian product set). Args: options: An Options instance. test_parameters: Dictionary mapping to lists for each parameter. e.g. `{"strides": [[1,3,3,1], [1,2,2,1]], "foo": [1.2, 1.3]}` make_graph: function that takes current parameters and returns tuple `[input1, input2, ...], [output1, output2, ...]` make_test_inputs: function taking `curr_params`, `session`, `input_tensors`, `output_tensors` and returns tuple `(input_values, output_values)`. extra_toco_options: Additional toco options. use_frozen_graph: Whether or not freeze graph before toco converter. expected_tf_failures: Number of times tensorflow is expected to fail in executing the input graphs. In some cases it is OK for TensorFlow to fail because the one or more combination of parameters is invalid. Raises: RuntimeError: if there are converter errors that can't be ignored. """ zip_path = os.path.join(options.output_path, options.zip_to_output) parameter_count = 0 for parameters in test_parameters: parameter_count += functools.reduce( operator.mul, [len(values) for values in parameters.values()]) if parameter_count > _MAX_TESTS_PER_ZIP: raise RuntimeError( "Too many parameter combinations for generating '%s'.\n" "There are %d combinations while the upper limit is %d.\n" "Having too many combinations will slow down the tests.\n" "Please consider splitting the test into multiple functions.\n" % (zip_path, parameter_count, _MAX_TESTS_PER_ZIP)) # TODO(aselle): Make this allow multiple inputs outputs. archive = zipfile.PyZipFile(zip_path, "w") zip_manifest = [] convert_report = [] toco_errors = 0 processed_labels = set() for parameters in test_parameters: keys = parameters.keys() for curr in itertools.product(*parameters.values()): label = zip_path.replace(".zip", "_") + (",".join( "%s=%r" % z for z in sorted(zip(keys, curr))).replace(" ", "")) if label[0] == "/": label = label[1:] if label in processed_labels: # Do not populate data for the same label more than once. It will cause # errors when unzipping. continue processed_labels.add(label) param_dict = dict(zip(keys, curr)) def build_example(label, param_dict_real): """Build the model with parameter values set in param_dict_real. Args: label: Label of the model (i.e. the filename in the zip). param_dict_real: Parameter dictionary (arguments to the factories make_graph and make_test_inputs) Returns: (tflite_model_binary, report) where tflite_model_binary is the serialized flatbuffer as a string and report is a dictionary with keys `toco_log` (log of toco conversion), `tf_log` (log of tf conversion), `toco` (a string of success status of the conversion), `tf` (a string success status of the conversion). """ np.random.seed(RANDOM_SEED) report = {"toco": report_lib.NOTRUN, "tf": report_lib.FAILED} # Build graph report["tf_log"] = "" report["toco_log"] = "" tf.reset_default_graph() with tf.device("/cpu:0"): try: inputs, outputs = make_graph(param_dict_real) except (tf.errors.UnimplementedError, tf.errors.InvalidArgumentError, ValueError): report["tf_log"] += traceback.format_exc() return None, report sess = tf.Session() try: baseline_inputs, baseline_outputs = (make_test_inputs( param_dict_real, sess, inputs, outputs)) except (tf.errors.UnimplementedError, tf.errors.InvalidArgumentError, ValueError): report["tf_log"] += traceback.format_exc() return None, report report["toco"] = report_lib.FAILED report["tf"] = report_lib.SUCCESS # Convert graph to toco input_tensors = [(input_tensor.name.split(":")[0], input_tensor.shape, input_tensor.dtype) for input_tensor in inputs] output_tensors = [normalize_output_name(out.name) for out in outputs] graph_def = freeze_graph( sess, tf.global_variables() + inputs + outputs) if use_frozen_graph else sess.graph_def if "split_tflite_lstm_inputs" in param_dict_real: extra_toco_options.split_tflite_lstm_inputs = param_dict_real[ "split_tflite_lstm_inputs"] tflite_model_binary, toco_log = options.tflite_convert_function( options, graph_def, input_tensors, output_tensors, extra_toco_options=extra_toco_options, test_params=param_dict_real) report["toco"] = (report_lib.SUCCESS if tflite_model_binary is not None else report_lib.FAILED) report["toco_log"] = toco_log if True or options.save_graphdefs: archive.writestr(label + ".pbtxt", text_format.MessageToString(graph_def), zipfile.ZIP_DEFLATED) if tflite_model_binary: archive.writestr(label + ".bin", tflite_model_binary, zipfile.ZIP_DEFLATED) example = {"inputs": baseline_inputs, "outputs": baseline_outputs} example_fp = StringIO() write_examples(example_fp, [example]) archive.writestr(label + ".inputs", example_fp.getvalue(), zipfile.ZIP_DEFLATED) example_fp2 = StringIO() write_test_cases(example_fp2, label + ".bin", [example]) archive.writestr(label + "_tests.txt", example_fp2.getvalue(), zipfile.ZIP_DEFLATED) zip_manifest.append(label + "\n") return tflite_model_binary, report _, report = build_example(label, param_dict) if report["toco"] == report_lib.FAILED: ignore_error = False if not options.known_bugs_are_errors: for pattern, bug_number in options.known_bugs.items(): if re.search(pattern, label): print("Ignored converter error due to bug %s" % bug_number) ignore_error = True if not ignore_error: toco_errors += 1 print("-----------------\nconverter error!\n%s\n-----------------\n" % report["toco_log"]) convert_report.append((param_dict, report)) report_io = StringIO() report_lib.make_report_table(report_io, zip_path, convert_report) archive.writestr("report.html", report_io.getvalue()) archive.writestr("manifest.txt", "".join(zip_manifest), zipfile.ZIP_DEFLATED) # Log statistics of what succeeded total_conversions = len(convert_report) tf_success = sum(1 for x in convert_report if x[1]["tf"] == report_lib.SUCCESS) toco_success = sum(1 for x in convert_report if x[1]["toco"] == report_lib.SUCCESS) percent = 0 if tf_success > 0: percent = float(toco_success) / float(tf_success) * 100. tf.logging.info(("Archive %s Considered %d graphs, %d TF evaluated graphs " " and %d TOCO converted graphs (%.1f%%"), zip_path, total_conversions, tf_success, toco_success, percent) tf_failures = parameter_count - tf_success if tf_failures / parameter_count > 0.8: raise RuntimeError(("Test for '%s' is not very useful. " "TensorFlow fails in %d percent of the cases.") % (zip_path, int(100 * tf_failures / parameter_count))) if tf_failures != expected_tf_failures: raise RuntimeError(("Expected TF to fail %d times while generating '%s', " "but that happened %d times") % (expected_tf_failures, zip_path, tf_failures)) if not options.ignore_converter_errors and toco_errors > 0: raise RuntimeError( "Found %d errors while generating toco models" % toco_errors) def make_pool_tests(pool_op_in): """Make a set of tests to do average pooling. Args: pool_op_in: TensorFlow pooling operation to test i.e. `tf.nn.avg_pool2d`. Returns: A function representing the true generator (after curried pool_op_in). """ pool_op = pool_op_in def f(options, expected_tf_failures=0): """Actual function that generates examples. Args: options: An Options instance. expected_tf_failures: number of expected tensorflow failures. """ # Chose a set of parameters test_parameters = [{ "ksize": [[2, 1, 1, 2], [1, 1, 1, 1], [1, 1, 2, 1], [1, 10, 11, 1]], "strides": [[2, 1, 1, 2], [1, 1, 1, 1], [1, 1, 2, 1], [1, 10, 11, 1]], # TODO(aselle): should add in a degenerate shape (e.g. [1, 0, 1, 1]). "input_shape": [[], [1, 1, 1, 1], [1, 15, 14, 1], [3, 15, 14, 3]], "padding": ["SAME", "VALID"], "data_format": ["NHWC"], # TODO(aselle): NCHW would be good }] def build_graph(parameters): input_tensor = tf.placeholder( dtype=tf.float32, name="input", shape=parameters["input_shape"]) out = pool_op( input_tensor, ksize=parameters["ksize"], strides=parameters["strides"], data_format=parameters["data_format"], padding=parameters["padding"]) return [input_tensor], [out] def build_inputs(parameters, sess, inputs, outputs): input_values = create_tensor_data(tf.float32, parameters["input_shape"]) return [input_values], sess.run( outputs, feed_dict=dict(zip(inputs, [input_values]))) make_zip_of_tests( options, test_parameters, build_graph, build_inputs, expected_tf_failures=expected_tf_failures) return f @register_make_test_function() def make_l2_pool_tests(options): make_pool_tests(make_l2_pool)(options, expected_tf_failures=80) @register_make_test_function() def make_avg_pool_tests(options): make_pool_tests(tf.nn.avg_pool)(options, expected_tf_failures=80) @register_make_test_function() def make_max_pool_tests(options): make_pool_tests(tf.nn.max_pool)(options, expected_tf_failures=80) @register_make_test_function() def make_abs_tests(options): """Make a set of tests to do relu.""" # Chose a set of parameters test_parameters = [{ "input_shape": [[], [1], [2, 3], [1, 1, 1, 1], [1, 3, 4, 3], [3, 15, 14, 3], [3, 1, 2, 4, 6], [2, 2, 3, 4, 5, 6]], }] def build_graph(parameters): input_tensor = tf.placeholder( dtype=tf.float32, name="input", shape=parameters["input_shape"]) out = tf.abs(input_tensor) return [input_tensor], [out] def build_inputs(parameters, sess, inputs, outputs): input_values = create_tensor_data( np.float32, parameters["input_shape"], min_value=-10, max_value=10) return [input_values], sess.run( outputs, feed_dict=dict(zip(inputs, [input_values]))) make_zip_of_tests(options, test_parameters, build_graph, build_inputs) @register_make_test_function() def make_elu_tests(options): """Make a set of tests to do (float) tf.nn.elu.""" test_parameters = [ { "input_shape": [[], [1], [2, 3], [1, 1, 1, 1], [1, 3, 4, 3], [3, 15, 14, 3], [3, 1, 2, 4, 6], [2, 2, 3, 4, 5, 6]], }, ] def build_graph(parameters): """Build the graph for the test case.""" input_tensor = tf.placeholder( dtype=tf.float32, name="input", shape=parameters["input_shape"]) out = tf.nn.elu(input_tensor) return [input_tensor], [out] def build_inputs(parameters, sess, inputs, outputs): """Build the inputs for the test case.""" input_values = create_tensor_data( np.float32, parameters["input_shape"], min_value=-4, max_value=10) return [input_values], sess.run( outputs, feed_dict=dict(zip(inputs, [input_values]))) make_zip_of_tests(options, test_parameters, build_graph, build_inputs) @register_make_test_function() def make_identity_tests(options): """Make a set of tests to do identity.""" # Chose a set of parameters test_parameters = [{ "input_shape": [[], [1], [3, 3]], "use_snapshot": [False, True], }] def build_graph(parameters): input_tensor = tf.placeholder( dtype=tf.float32, name="input", shape=parameters["input_shape"]) # We add the Multiply before Identity just as a walk-around to make the test # pass when input_shape is scalar. # During graph transformation, TOCO will replace the Identity op with # Reshape when input has shape. However, currently TOCO can't distinguish # between missing shape and scalar shape. As a result, when input has scalar # shape, this conversion still fails. # TODO(b/129197312), remove the walk-around code once the bug is fixed. input_doubled = input_tensor * 2.0 if parameters["use_snapshot"]: identity_output = array_ops.snapshot(input_doubled) else: identity_output = tf.identity(input_doubled) return [input_tensor], [identity_output] def build_inputs(parameters, sess, inputs, outputs): input_values = create_tensor_data( np.float32, parameters["input_shape"], min_value=-4, max_value=10) return [input_values], sess.run( outputs, feed_dict=dict(zip(inputs, [input_values]))) make_zip_of_tests(options, test_parameters, build_graph, build_inputs) @register_make_test_function() def make_relu_tests(options): """Make a set of tests to do relu.""" # Chose a set of parameters test_parameters = [{ "input_shape": [[], [1], [2, 3], [1, 1, 1, 1], [1, 3, 4, 3], [3, 15, 14, 3], [3, 1, 2, 4, 6], [2, 2, 3, 4, 5, 6]], }] def build_graph(parameters): input_tensor = tf.placeholder( dtype=tf.float32, name="input", shape=parameters["input_shape"]) out = tf.nn.relu(input_tensor) return [input_tensor], [out] def build_inputs(parameters, sess, inputs, outputs): input_values = create_tensor_data( np.float32, parameters["input_shape"], min_value=-4, max_value=10) return [input_values], sess.run( outputs, feed_dict=dict(zip(inputs, [input_values]))) make_zip_of_tests(options, test_parameters, build_graph, build_inputs) @register_make_test_function() def make_relu1_tests(options): """Make a set of tests to do relu1.""" # Chose a set of parameters test_parameters = [{ "input_shape": [[], [1, 1, 1, 1], [1, 3, 4, 3], [3, 15, 14, 3], [3, 1, 2, 4, 6], [2, 2, 3, 4, 5, 6]], }] def build_graph(parameters): input_tensor = tf.placeholder( dtype=tf.float32, name="input", shape=parameters["input_shape"]) # Note that the following is not supported: # out = tf.maximum(-1.0, tf.minimum(input_tensor, 1.0)) out = tf.minimum(1.0, tf.maximum(input_tensor, -1.0)) return [input_tensor], [out] def build_inputs(parameters, sess, inputs, outputs): input_values = create_tensor_data( np.float32, parameters["input_shape"], min_value=-3, max_value=10) return [input_values], sess.run( outputs, feed_dict=dict(zip(inputs, [input_values]))) make_zip_of_tests(options, test_parameters, build_graph, build_inputs) @register_make_test_function() def make_relu6_tests(options): """Make a set of tests to do relu6.""" # Chose a set of parameters test_parameters = [{ "input_shape": [[], [1, 1, 1, 1], [1, 3, 4, 3], [3, 15, 14, 3], [3, 1, 2, 4, 6], [2, 2, 3, 4, 5, 6]], }] def build_graph(parameters): input_tensor = tf.placeholder( dtype=tf.float32, name="input", shape=parameters["input_shape"]) out = tf.nn.relu(input_tensor) return [input_tensor], [out] def build_inputs(parameters, sess, inputs, outputs): input_values = create_tensor_data( np.float32, parameters["input_shape"], min_value=-3, max_value=10) return [input_values], sess.run( outputs, feed_dict=dict(zip(inputs, [input_values]))) make_zip_of_tests(options, test_parameters, build_graph, build_inputs) @register_make_test_function() def make_prelu_tests(options): """Make a set of tests to do PReLU.""" test_parameters = [ { # The canonical case for image processing is having a 4D `input` # (NHWC)and `shared_axes`=[1, 2], so the alpha parameter is per # channel. "input_shape": [[1, 10, 10, 3], [3, 3, 3, 3]], "shared_axes": [[1, 2], [1]], }, { # 2D-3D example. Share the 2nd axis. "input_shape": [[20, 20], [20, 20, 20]], "shared_axes": [[1]], } ] def build_graph(parameters): """Build the graph for the test case.""" input_tensor = tf.placeholder( dtype=tf.float32, name="input", shape=parameters["input_shape"]) prelu = tf.keras.layers.PReLU(shared_axes=parameters["shared_axes"]) out = prelu(input_tensor) return [input_tensor], [out] def build_inputs(parameters, sess, inputs, outputs): """Build the inputs for the test case.""" input_shape = parameters["input_shape"] input_values = create_tensor_data( np.float32, input_shape, min_value=-10, max_value=10) shared_axes = parameters["shared_axes"] alpha_shape = [] for dim in range(1, len(input_shape)): alpha_shape.append(1 if dim in shared_axes else input_shape[dim]) alpha_values = create_tensor_data(np.float32, alpha_shape) # There should be only 1 trainable variable tensor. variables = tf.all_variables() assert len(variables) == 1 sess.run(variables[0].assign(alpha_values)) return [input_values], sess.run( outputs, feed_dict=dict(zip(inputs, [input_values]))) make_zip_of_tests( options, test_parameters, build_graph, build_inputs, use_frozen_graph=True) @register_make_test_function() def make_leaky_relu_tests(options): """Make a set of tests to do LeakyRelu.""" test_parameters = [ { "input_shape": [[], [1], [5], [1, 10, 10, 3], [3, 3, 3, 3]], "alpha": [0.1, 1.0, 2.0, -0.1, -1.0, -2.0], }, ] def build_graph(parameters): """Build the graph for the test case.""" input_tensor = tf.placeholder( dtype=tf.float32, name="input", shape=parameters["input_shape"]) out = tf.nn.leaky_relu(input_tensor, alpha=parameters["alpha"]) return [input_tensor], [out] def build_inputs(parameters, sess, inputs, outputs): """Build the inputs for the test case.""" input_values = create_tensor_data( np.float32, parameters["input_shape"], min_value=-3, max_value=10) return [input_values], sess.run( outputs, feed_dict=dict(zip(inputs, [input_values]))) make_zip_of_tests(options, test_parameters, build_graph, build_inputs) # This function tests various TensorFLow functions that generates Const op, # including `tf.ones`, `tf.zeros` and random functions. @register_make_test_function() def make_constant_tests(options): """Make a set of tests to do constant ops.""" test_parameters = [{ "dtype": [tf.float32, tf.int32], "input_shape": [[], [1], [2], [1, 1, 1, 1], [2, 2, 2, 2]], "constant_is_also_output": [True, False], # This is a regression test for a bug where Toco rejects models with # unread inputs. "has_unread_input": [True, False], }] def build_graph(parameters): dummy_input = tf.placeholder( dtype=parameters["dtype"], name="input1", shape=parameters["input_shape"]) constant = tf.constant( create_tensor_data(parameters["dtype"], parameters["input_shape"])) outputs = [tf.maximum(dummy_input, constant)] if parameters["constant_is_also_output"]: outputs.append(constant) inputs = [dummy_input] if parameters["has_unread_input"]: unread_input = tf.placeholder( dtype=parameters["dtype"], name="unread_input", shape=parameters["input_shape"]) inputs.append(unread_input) return inputs, outputs def build_inputs(parameters, sess, inputs, outputs): dummy_input = np.zeros( parameters["input_shape"], dtype=_TF_TYPE_INFO[parameters["dtype"]][0]) return [dummy_input], sess.run(outputs, feed_dict={inputs[0]: dummy_input}) make_zip_of_tests(options, test_parameters, build_graph, build_inputs) def make_binary_op_tests(options, binary_operator, expected_tf_failures=0): """Make a set of tests to do binary ops with and without broadcast.""" test_parameters = [ # Avoid creating all combinations to keep the test size small. { "dtype": [tf.float32, tf.int32], "input_shape_1": [[1, 3, 4, 3]], "input_shape_2": [[1, 3, 4, 3]], "activation": [True], }, { "dtype": [tf.float32], "input_shape_1": [[5]], "input_shape_2": [[5]], "activation": [False, True], }, { "dtype": [tf.float32, tf.int32, tf.int64], "input_shape_1": [[1, 3, 4, 3]], "input_shape_2": [[3]], "activation": [True, False], }, { "dtype": [tf.float32, tf.int32], "input_shape_1": [[3]], "input_shape_2": [[1, 3, 4, 3]], "activation": [True, False], }, { "dtype": [tf.float32], "input_shape_1": [[]], "input_shape_2": [[]], "activation": [False], }, { "dtype": [tf.float32], "input_shape_1": [[0]], "input_shape_2": [[1]], "activation": [False], } ] def build_graph(parameters): """Builds the graph given the current parameters.""" input1 = tf.placeholder( dtype=parameters["dtype"], name="input1", shape=parameters["input_shape_1"]) input2 = tf.placeholder( dtype=parameters["dtype"], name="input2", shape=parameters["input_shape_2"]) out = binary_operator(input1, input2) if parameters["activation"]: out = tf.nn.relu(out) return [input1, input2], [out] def build_inputs(parameters, sess, inputs, outputs): """Builds operand inputs for op.""" input1 = create_tensor_data(parameters["dtype"], parameters["input_shape_1"]) input2 = create_tensor_data(parameters["dtype"], parameters["input_shape_2"]) return [input1, input2], sess.run( outputs, feed_dict={ inputs[0]: input1, inputs[1]: input2 }) make_zip_of_tests( options, test_parameters, build_graph, build_inputs, expected_tf_failures=expected_tf_failures) def make_reduce_tests(reduce_op, min_value=-10, max_value=10, boolean_tensor_only=False): """Make a set of tests to do reduce operation. Args: reduce_op: TensorFlow reduce operation to test, i.e. `tf.reduce_mean`. min_value: min value for created tensor data. max_value: max value for created tensor data. boolean_tensor_only: If true, will only generate tensor with boolean value. Returns: a function representing the true generator with `reduce_op_in` curried. """ def f(options): """Actual function that generates examples.""" test_parameters = [ { "input_dtype": [tf.float32, tf.int32, tf.int64], "input_shape": [[3, 3, 2, 4]], "axis": [ 0, 1, 2, [0, 1], [0, 2], [1, 2], [0, 1, 2], [1, 0], [2, 0], [2, 1], [2, 1, 0], [2, 0, 1], -1, -2, -3, [1, -1], [0, -1], [-1, 0], [-1, -2, -3], [0, 0, 0], [2, 2, 0], [1, 0, -3, -3] ], "const_axis": [True, False], "keepdims": [True, False], }, { "input_dtype": [tf.float32], "input_shape": [[1, 8, 8, 3]], "axis": [ 0, 1, 2, 3, [1, 2], [0, 3], [1, 2, 3], [0, 1, 2, 3], [3, 2, 1, 0], [3, 1, 0, 2], [2, 0], [3, 0], [3, 1], [1, 0], -1, -2, -3, -4, [0, -2], [2, 3, -1, 0], [3, 1, 2, -3], [3, -4], [2, 2, 2], [2, 2, 3], [-3, -3, -4], [-3, 2, 1] ], "const_axis": [True, False], "keepdims": [True, False], }, { "input_dtype": [tf.float32], "input_shape": [[], [1, 8, 8, 3], [3, 2, 4]], "axis": [[]], # shape is: [0] "const_axis": [False], "keepdims": [True, False], }, { "input_dtype": [tf.float32], "input_shape": [[], [1, 8, 8, 3], [3, 2, 4]], "axis": [None], # shape is: [] "const_axis": [True], "keepdims": [True, False], } ] def build_graph(parameters): """Build the mean op testing graph.""" dtype = parameters["input_dtype"] if boolean_tensor_only: dtype = tf.bool input_tensor = tf.placeholder( dtype=dtype, name="input", shape=parameters["input_shape"]) # Get axis as either a placeholder or constants. if parameters["const_axis"]: axis = parameters["axis"] input_tensors = [input_tensor] else: if isinstance(parameters["axis"], list): shape = [len(parameters["axis"])] else: shape = [] # shape for None or integers. axis = tf.placeholder(dtype=tf.int32, name="axis", shape=shape) input_tensors = [input_tensor, axis] out = reduce_op( input_tensor, axis=axis, keepdims=parameters["keepdims"]) return input_tensors, [out] def build_inputs(parameters, sess, inputs, outputs): dtype = parameters["input_dtype"] if boolean_tensor_only: dtype = tf.bool values = [ create_tensor_data( dtype, parameters["input_shape"], min_value=min_value, max_value=max_value) ] if not parameters["const_axis"]: values.append(np.array(parameters["axis"])) return values, sess.run(outputs, feed_dict=dict(zip(inputs, values))) make_zip_of_tests(options, test_parameters, build_graph, build_inputs) return f @register_make_test_function() def make_mean_tests(options): """Make a set of tests to do mean.""" return make_reduce_tests(tf.reduce_mean)(options) @register_make_test_function() def make_sum_tests(options): """Make a set of tests to do sum.""" return make_reduce_tests(tf.reduce_sum)(options) @register_make_test_function() def make_reduce_prod_tests(options): """Make a set of tests to do prod.""" # set min max value to be -2, 2 to avoid overflow. return make_reduce_tests(tf.reduce_prod, -2, 2)(options) @register_make_test_function() def make_reduce_max_tests(options): """Make a set of tests to do max.""" return make_reduce_tests(tf.reduce_max)(options) @register_make_test_function() def make_reduce_min_tests(options): """Make a set of tests to do min.""" return make_reduce_tests(tf.reduce_min)(options) @register_make_test_function() def make_reduce_any_tests(options): """Make a set of tests to do any.""" return make_reduce_tests(tf.reduce_any, boolean_tensor_only=True)(options) @register_make_test_function() def make_exp_tests(options): """Make a set of tests to do exp.""" test_parameters = [{ "input_dtype": [tf.float32], "input_shape": [[], [3], [1, 100], [4, 2, 3], [5, 224, 224, 3]], }] def build_graph(parameters): """Build the exp op testing graph.""" input_tensor = tf.placeholder( dtype=parameters["input_dtype"], name="input", shape=parameters["input_shape"]) out = tf.exp(input_tensor) return [input_tensor], [out] def build_inputs(parameters, sess, inputs, outputs): values = [ create_tensor_data(parameters["input_dtype"], parameters["input_shape"], min_value=-100, max_value=9) ] return values, sess.run(outputs, feed_dict=dict(zip(inputs, values))) make_zip_of_tests(options, test_parameters, build_graph, build_inputs) @register_make_test_function() def make_cos_tests(options): """Make a set of tests to do cos.""" test_parameters = [{ "input_dtype": [tf.float32], "input_shape": [[], [3], [1, 100], [4, 2, 3], [5, 224, 224, 3]], }] def build_graph(parameters): """Build the cos op testing graph.""" input_tensor = tf.placeholder( dtype=parameters["input_dtype"], name="input", shape=parameters["input_shape"]) out = tf.cos(input_tensor) return [input_tensor], [out] def build_inputs(parameters, sess, inputs, outputs): values = [ create_tensor_data(parameters["input_dtype"], parameters["input_shape"], min_value=-np.pi, max_value=np.pi) ] return values, sess.run(outputs, feed_dict=dict(zip(inputs, values))) make_zip_of_tests(options, test_parameters, build_graph, build_inputs) @register_make_test_function() def make_log_softmax_tests(options): """Make a set of tests to do log_softmax.""" test_parameters = [{ "input_dtype": [tf.float32], "input_shape": [[1, 100], [4, 2], [5, 224]], }] def build_graph(parameters): """Build the log_softmax op testing graph.""" input_tensor = tf.placeholder( dtype=parameters["input_dtype"], name="input", shape=parameters["input_shape"]) out = tf.nn.log_softmax(input_tensor) return [input_tensor], [out] def build_inputs(parameters, sess, inputs, outputs): values = [ create_tensor_data( parameters["input_dtype"], parameters["input_shape"], min_value=-100, max_value=9) ] return values, sess.run(outputs, feed_dict=dict(zip(inputs, values))) make_zip_of_tests(options, test_parameters, build_graph, build_inputs) @register_make_test_function() def make_maximum_tests(options): """Make a set of tests to do maximum.""" test_parameters = [{ "input_dtype": [tf.float32], "input_shape_1": [[], [3], [1, 100], [4, 2, 3], [5, 224, 224, 3]], "input_shape_2": [[], [3], [1, 100], [4, 2, 3], [5, 224, 224, 3]], }] def build_graph(parameters): """Build the maximum op testing graph.""" input_tensor_1 = tf.placeholder( dtype=parameters["input_dtype"], name="input_1", shape=parameters["input_shape_1"]) input_tensor_2 = tf.placeholder( dtype=parameters["input_dtype"], name="input_2", shape=parameters["input_shape_2"]) out = tf.maximum(input_tensor_1, input_tensor_2) return [input_tensor_1, input_tensor_2], [out] def build_inputs(parameters, sess, inputs, outputs): values = [ create_tensor_data(parameters["input_dtype"], parameters["input_shape_1"]), create_tensor_data(parameters["input_dtype"], parameters["input_shape_2"]) ] return values, sess.run(outputs, feed_dict=dict(zip(inputs, values))) make_zip_of_tests( options, test_parameters, build_graph, build_inputs, expected_tf_failures=8) @register_make_test_function() def make_minimum_tests(options): """Make a set of tests to do minimum.""" test_parameters = [{ "input_dtype": [tf.float32], "input_shape_1": [[], [3], [1, 100], [4, 2, 3], [5, 224, 224, 3]], "input_shape_2": [[], [3], [1, 100], [4, 2, 3], [5, 224, 224, 3]], }] def build_graph(parameters): """Build the minimum op testing graph.""" input_tensor_1 = tf.placeholder( dtype=parameters["input_dtype"], name="input_1", shape=parameters["input_shape_1"]) input_tensor_2 = tf.placeholder( dtype=parameters["input_dtype"], name="input_2", shape=parameters["input_shape_2"]) out = tf.minimum(input_tensor_1, input_tensor_2) return [input_tensor_1, input_tensor_2], [out] def build_inputs(parameters, sess, inputs, outputs): values = [ create_tensor_data(parameters["input_dtype"], parameters["input_shape_1"]), create_tensor_data(parameters["input_dtype"], parameters["input_shape_2"]) ] return values, sess.run(outputs, feed_dict=dict(zip(inputs, values))) make_zip_of_tests( options, test_parameters, build_graph, build_inputs, expected_tf_failures=8) def make_binary_op_tests_func(binary_operator): """Return a function that does a test on a binary operator.""" return lambda options: make_binary_op_tests(options, binary_operator) @register_make_test_function() def make_add_tests(options): make_binary_op_tests(options, tf.add) @register_make_test_function() def make_add_n_tests(options): """Make a set of tests for AddN op.""" test_parameters = [ { "dtype": [tf.float32, tf.int32], "input_shape": [[2, 5, 3, 1]], "num_inputs": [2, 3, 4, 5], }, { "dtype": [tf.float32, tf.int32], "input_shape": [[5]], "num_inputs": [2, 3, 4, 5], }, { "dtype": [tf.float32, tf.int32], "input_shape": [[]], "num_inputs": [2, 3, 4, 5], }, ] def build_graph(parameters): """Builds the graph given the current parameters.""" input_tensors = [] for i in range(parameters["num_inputs"]): input_tensors.append( tf.placeholder( dtype=parameters["dtype"], name="input_{}".format(i), shape=parameters["input_shape"])) out = tf.add_n(input_tensors) return input_tensors, [out] def build_inputs(parameters, sess, inputs, outputs): """Builds operand inputs for op.""" input_data = [] for i in range(parameters["num_inputs"]): input_data.append( create_tensor_data(parameters["dtype"], parameters["input_shape"])) return input_data, sess.run( outputs, feed_dict={i: d for i, d in zip(inputs, input_data)}) make_zip_of_tests(options, test_parameters, build_graph, build_inputs) @register_make_test_function() def make_div_tests(options): make_binary_op_tests(options, tf.div) @register_make_test_function() def make_sub_tests(options): make_binary_op_tests(options, tf.subtract) @register_make_test_function() def make_mul_tests(options): make_binary_op_tests(options, tf.multiply) @register_make_test_function() def make_pow_tests(options): make_binary_op_tests(options, tf.pow, expected_tf_failures=7) @register_make_test_function() def make_floor_div_tests(options): make_binary_op_tests(options, tf.floor_div) @register_make_test_function() def make_floor_mod_tests(options): make_binary_op_tests(options, tf.floormod) @register_make_test_function() def make_squared_difference_tests(options): make_binary_op_tests(options, tf.squared_difference) @register_make_test_function() def make_gather_tests(options): """Make a set of tests to do gather.""" test_parameters = [ { "params_dtype": [tf.float32, tf.int32, tf.int64], "params_shape": [[10], [1, 2, 20]], "indices_dtype": [tf.int32, tf.int64], "indices_shape": [[3], [5]], "axis": [-1, 0, 1], }, { # TODO(b/123895910): add Nd support for strings. "params_dtype": [tf.string], "params_shape": [[8]], "indices_dtype": [tf.int32], "indices_shape": [[3]], "axis": [0], } ] def build_graph(parameters): """Build the gather op testing graph.""" params = tf.placeholder( dtype=parameters["params_dtype"], name="params", shape=parameters["params_shape"]) indices = tf.placeholder( dtype=parameters["indices_dtype"], name="indices", shape=parameters["indices_shape"]) axis = min(len(parameters["params_shape"]), parameters["axis"]) out = tf.gather(params, indices, axis=axis) return [params, indices], [out] def build_inputs(parameters, sess, inputs, outputs): params = create_tensor_data(parameters["params_dtype"], parameters["params_shape"]) indices = create_tensor_data(parameters["indices_dtype"], parameters["indices_shape"], 0, parameters["params_shape"][0] - 1) return [params, indices], sess.run( outputs, feed_dict=dict(zip(inputs, [params, indices]))) # Note that TF can't execute with index=1 and params_shape=[10]. make_zip_of_tests( options, test_parameters, build_graph, build_inputs, expected_tf_failures=12) @register_make_test_function() def make_gather_nd_tests(options): """Make a set of tests to do gather_nd.""" test_parameters = [ { "params_dtype": [tf.float32, tf.int32, tf.int64], "params_shape": [[5, 1]], "indices_dtype": [tf.int32, tf.int64], "indices_shape": [[1, 1]], }, { "params_dtype": [tf.float32, tf.int32, tf.int64], "params_shape": [[5, 5]], "indices_dtype": [tf.int32, tf.int64], "indices_shape": [[2, 1], [2, 2]], }, { "params_dtype": [tf.float32, tf.int32, tf.int64], "params_shape": [[5, 5, 10]], "indices_dtype": [tf.int32, tf.int64], "indices_shape": [[3, 1], [2, 2], [2, 3], [2, 1, 3]], }, ] def build_graph(parameters): """Build the gather_nd op testing graph.""" params = tf.placeholder( dtype=parameters["params_dtype"], name="params", shape=parameters["params_shape"]) indices = tf.placeholder( dtype=parameters["indices_dtype"], name="indices", shape=parameters["indices_shape"]) out = tf.gather_nd(params, indices) return [params, indices], [out] def build_inputs(parameters, sess, inputs, outputs): params = create_tensor_data(parameters["params_dtype"], parameters["params_shape"]) indices = create_tensor_data(parameters["indices_dtype"], parameters["indices_shape"], 0, parameters["params_shape"][0] - 1) return [params, indices], sess.run( outputs, feed_dict=dict(zip(inputs, [params, indices]))) make_zip_of_tests(options, test_parameters, build_graph, build_inputs) @register_make_test_function() def make_gather_with_constant_tests(options): """Make a set of test which feed a constant to gather toco.""" test_parameters = [{ "input_shape": [[3]], "reference_shape": [[2]], }, { "input_shape": [[2, 3]], "reference_shape": [[2, 3]], }] def build_graph(parameters): """Build a graph where the inputs to Gather are constants.""" reference = tf.placeholder( dtype=tf.int32, shape=parameters["reference_shape"]) gather_input = tf.constant( create_tensor_data(tf.int32, parameters["input_shape"])) gather_indices = tf.constant([0, 1], tf.int32) out = tf.equal(reference, tf.gather(gather_input, gather_indices)) return [reference], [out] def build_inputs(parameters, sess, inputs, outputs): reference_values = np.zeros(parameters["reference_shape"], dtype=np.int32) return [reference_values], sess.run( outputs, feed_dict={inputs[0]: reference_values}) make_zip_of_tests(options, test_parameters, build_graph, build_inputs) @register_make_test_function() def make_embedding_lookup_tests(options): """Make a set of tests to do gather.""" test_parameters = [ { "params_dtype": [tf.float32], "params_shape": [[10], [10, 10]], "ids_dtype": [tf.int32], "ids_shape": [[3], [5]], }, ] def build_graph(parameters): """Build the gather op testing graph.""" params = tf.placeholder( dtype=parameters["params_dtype"], name="params", shape=parameters["params_shape"]) ids = tf.placeholder( dtype=parameters["ids_dtype"], name="ids", shape=parameters["ids_shape"]) out = tf.nn.embedding_lookup(params, ids) return [params, ids], [out] def build_inputs(parameters, sess, inputs, outputs): params = create_tensor_data(parameters["params_dtype"], parameters["params_shape"]) ids = create_tensor_data(parameters["ids_dtype"], parameters["ids_shape"], 0, parameters["params_shape"][0] - 1) return [params, ids], sess.run( outputs, feed_dict=dict(zip(inputs, [params, ids]))) make_zip_of_tests( options, test_parameters, build_graph, build_inputs) @register_make_test_function() def make_global_batch_norm_tests(options): """Make a set of tests to do batch_norm_with_global_normalization.""" test_parameters = [{ "dtype": [tf.float32], "input_shape": [[1, 1, 6, 2], [3, 4, 5, 4]], "epsilon": [0.1, 0.0001], "scale_after": [True, False], }] def build_graph(parameters): """Build the global batch norm testing graph.""" input_shape = parameters["input_shape"] scale_shape = input_shape[3] scale = create_tensor_data(parameters["dtype"], scale_shape) offset = create_tensor_data(parameters["dtype"], scale_shape) mean = create_tensor_data(parameters["dtype"], scale_shape) variance = create_tensor_data(parameters["dtype"], scale_shape) x = create_tensor_data(parameters["dtype"], parameters["input_shape"]) x_norm = tf.nn.batch_norm_with_global_normalization( x, mean, variance, scale, offset, parameters["epsilon"], parameters["scale_after"]) input_tensor = tf.placeholder(dtype=parameters["dtype"], name="input", shape=parameters["input_shape"]) out = tf.add(input_tensor, x_norm) return [input_tensor], [out] def build_inputs(parameters, sess, inputs, outputs): input_value = create_tensor_data(parameters["dtype"], parameters["input_shape"]) return [input_value], sess.run( outputs, feed_dict=dict(zip(inputs, [input_value]))) make_zip_of_tests(options, test_parameters, build_graph, build_inputs) @register_make_test_function() def make_fused_batch_norm_tests(options): """Make a set of tests to do fused_batch_norm.""" test_parameters = [{ "dtype": [tf.float32], "input_shape": [[1, 1, 6, 2]], "epsilon": [0.001, 0.1], }] def build_graph(parameters): """Build the testing graph for fused batch normalization.""" input_shape = parameters["input_shape"] scale_shape = input_shape[3] scale = create_tensor_data(parameters["dtype"], scale_shape) offset = create_tensor_data(parameters["dtype"], scale_shape) mean = create_tensor_data(parameters["dtype"], scale_shape) variance = create_tensor_data(parameters["dtype"], scale_shape) x = create_tensor_data(parameters["dtype"], parameters["input_shape"]) [x_norm, _, _] = tf.nn.fused_batch_norm( x, scale, offset, mean, variance, parameters["epsilon"], data_format="NHWC", is_training=False) input_tensor = tf.placeholder(dtype=parameters["dtype"], name="input", shape=parameters["input_shape"]) out = tf.add(input_tensor, x_norm) return [input_tensor], [out] def build_inputs(parameters, sess, inputs, outputs): input_value = create_tensor_data(parameters["dtype"], parameters["input_shape"]) return [input_value], sess.run( outputs, feed_dict=dict(zip(inputs, [input_value]))) make_zip_of_tests(options, test_parameters, build_graph, build_inputs) @register_make_test_function() def make_conv_tests(options): """Make a set of tests to do convolution.""" test_parameters = [ { "input_shape": [[1, 3, 4, 3], [4, 6, 6, 1]], "filter_shape": [[1, 1], [2, 3], [3, 3]], "strides": [[1, 1, 1, 1], [1, 2, 3, 1]], "dilations": [[1, 1, 1, 1], [1, 3, 2, 1], [1, 2, 2, 1]], "padding": ["SAME", "VALID"], "data_format": ["NHWC"], # TODO(aselle): NCHW would be good "constant_filter": [True, False], "channel_multiplier": [1, 2], "fully_quantize": [False], }, # TODO(b/134702301): The fully_quantize param is just ignored by the MLIR # testing path now, resulting in duplicate tests. Either ignore these # tests or handle it properly in the mlir_convert() function. { "input_shape": [[1, 3, 4, 3], [4, 6, 6, 1]], "filter_shape": [[1, 1], [2, 3], [3, 3]], "strides": [[1, 1, 1, 1], [1, 2, 3, 1]], "dilations": [[1, 1, 1, 1], [1, 3, 2, 1], [1, 2, 2, 1]], "padding": ["SAME", "VALID"], "data_format": ["NHWC"], # TODO(aselle): NCHW would be good "constant_filter": [True], "channel_multiplier": [1, 2], "fully_quantize": [True], } ] def get_tensor_shapes(parameters): input_shape = parameters["input_shape"] filter_size = parameters["filter_shape"] filter_shape = filter_size + [ input_shape[3], parameters["channel_multiplier"] ] return [input_shape, filter_shape] def build_graph(parameters): """Build a conv graph given `parameters`.""" input_shape, filter_shape = get_tensor_shapes(parameters) input_tensor = tf.placeholder( dtype=tf.float32, name="input", shape=input_shape) # Get filter input either as a placeholder or constants. Also get a list of # the input tensors that are represented as placeholders. if parameters["constant_filter"]: filter_input = create_tensor_data( np.float32, filter_shape, min_value=-10, max_value=10) input_tensors = [input_tensor] else: filter_input = tf.placeholder( dtype=tf.float32, name="filter", shape=filter_shape) input_tensors = [input_tensor, filter_input] out = tf.nn.conv2d( input_tensor, filter_input, strides=parameters["strides"], dilations=parameters["dilations"], padding=parameters["padding"], data_format=parameters["data_format"]) return input_tensors, [out] def build_inputs(parameters, sess, inputs, outputs): # Build list of input values either containing 1 tensor (input) or 2 tensors # (input, filter) based on whether filter is constant or variable input. input_shape, filter_shape = get_tensor_shapes(parameters) values = [ create_tensor_data(np.float32, input_shape, min_value=-1, max_value=1) ] if not parameters["constant_filter"]: values.append(create_tensor_data(np.float32, filter_shape)) return values, sess.run(outputs, feed_dict=dict(zip(inputs, values))) make_zip_of_tests( options, test_parameters, build_graph, build_inputs, expected_tf_failures=60) # Note: This is a regression test for a bug (b/122651451) that Toco incorrectly # erases the reduction indices array while it's shared with other ops. @register_make_test_function() def make_l2norm_shared_epsilon_tests(options): """Regression test for a bug (b/122651451).""" # Chose a set of parameters test_parameters = [{ "input_shape": [[5, 7]], "dim": [1], "epsilon": [1e-8], }] def build_graph(parameters): input_tensor = tf.placeholder( dtype=tf.float32, name="input", shape=parameters["input_shape"]) epsilon = tf.constant(parameters["epsilon"]) out1 = tf.nn.l2_normalize(input_tensor, parameters["dim"], epsilon=epsilon) out2 = tf.nn.l2_normalize(input_tensor, parameters["dim"], epsilon=epsilon) out = out1 + out2 return [input_tensor], [out] def build_inputs(parameters, sess, inputs, outputs): input_values = create_tensor_data( np.float32, parameters["input_shape"], min_value=-4, max_value=10) return [input_values], sess.run( outputs, feed_dict=dict(zip(inputs, [input_values]))) make_zip_of_tests(options, test_parameters, build_graph, build_inputs) # Note: This is a regression test for a bug (b/112436267) that Toco incorrectly # fuses weights when multiple Conv2D/FULLY_CONNECTED ops share the same constant # weight tensor. @register_make_test_function() def make_conv_with_shared_weights_tests(options): """Make a test where 2 Conv ops shared the same constant weight tensor.""" test_parameters = [{ "input_shape": [[1, 10, 10, 3]], "filter_shape": [[3, 3]], "strides": [[1, 1, 1, 1]], "dilations": [[1, 1, 1, 1]], "padding": ["SAME"], "data_format": ["NHWC"], "channel_multiplier": [1], }] def get_tensor_shapes(parameters): input_shape = parameters["input_shape"] filter_size = parameters["filter_shape"] filter_shape = filter_size + [ input_shape[3], parameters["channel_multiplier"] ] return [input_shape, filter_shape] def build_graph(parameters): """Build a conv graph given `parameters`.""" input_shape, filter_shape = get_tensor_shapes(parameters) input_tensor = tf.placeholder( dtype=tf.float32, name="input", shape=input_shape) input_tensors = [input_tensor] # Construct a constant weights tensor which will be used by both Conv2D. filter_tensor = tf.constant( create_tensor_data(np.float32, filter_shape), dtype=tf.float32) # Ensure that FuseBinaryIntoFollowingAffine works with an input which # is shared by multiple affine ops. conv_input = input_tensor + 0.1 # Construct 2 Conv2D operations which use exactly the same input and # weights. result1 = tf.nn.conv2d( conv_input, filter_tensor, strides=parameters["strides"], dilations=parameters["dilations"], padding=parameters["padding"], data_format=parameters["data_format"]) result2 = tf.nn.conv2d( conv_input, filter_tensor, strides=parameters["strides"], dilations=parameters["dilations"], padding=parameters["padding"], data_format=parameters["data_format"]) # Add MUL ops after Conv2D ops. These MUL ops should be fused into the # weights of Conv2D. result1 = result1 * 2 result2 = result2 * 3 # Add the 2 results up. out = result1 + result2 return input_tensors, [out] def build_inputs(parameters, sess, inputs, outputs): # Build list of input values either containing 1 tensor (input) or 2 tensors # (input, filter) based on whether filter is constant or variable input. input_shape, unused_filter_shape = get_tensor_shapes(parameters) values = [create_tensor_data(np.float32, input_shape)] return values, sess.run(outputs, feed_dict=dict(zip(inputs, values))) make_zip_of_tests(options, test_parameters, build_graph, build_inputs) # Note: This is a regression test for a bug (b/112303004) that Toco incorrectly # transforms Conv into DepthwiseConv when two Conv ops share the same constant # weight tensor. @register_make_test_function() def make_conv_to_depthwiseconv_with_shared_weights_tests(options): """Make a test where 2 Conv ops shared the same constant weight tensor.""" test_parameters = [{ "input_shape": [[1, 10, 10, 1]], "filter_shape": [[3, 3]], "strides": [[1, 1, 1, 1]], "dilations": [[1, 1, 1, 1]], "padding": ["SAME"], "data_format": ["NHWC"], "channel_multiplier": [3], }] def get_tensor_shapes(parameters): input_shape = parameters["input_shape"] filter_size = parameters["filter_shape"] filter_shape = filter_size + [ input_shape[3], parameters["channel_multiplier"] ] return [input_shape, filter_shape] def build_graph(parameters): """Build a conv graph given `parameters`.""" input_shape, filter_shape = get_tensor_shapes(parameters) input_tensor = tf.placeholder( dtype=tf.float32, name="input", shape=input_shape) # Construct a constant weights tensor which will be used by both Conv2D. filter_tensor = tf.constant( create_tensor_data(np.float32, filter_shape), dtype=tf.float32) input_tensors = [input_tensor] # Construct 2 Conv2D operations which use exactly the same input and # weights. result1 = tf.nn.conv2d( input_tensor, filter_tensor, strides=parameters["strides"], dilations=parameters["dilations"], padding=parameters["padding"], data_format=parameters["data_format"]) result2 = tf.nn.conv2d( input_tensor, filter_tensor, strides=parameters["strides"], dilations=parameters["dilations"], padding=parameters["padding"], data_format=parameters["data_format"]) # Add the 2 results up. out = result1 + result2 return input_tensors, [out] def build_inputs(parameters, sess, inputs, outputs): # Build list of input values either containing 1 tensor (input) or 2 tensors # (input, filter) based on whether filter is constant or variable input. input_shape, unused_filter_shape = get_tensor_shapes(parameters) values = [create_tensor_data(np.float32, input_shape)] return values, sess.run(outputs, feed_dict=dict(zip(inputs, values))) make_zip_of_tests(options, test_parameters, build_graph, build_inputs) @register_make_test_function() def make_depthwiseconv_tests(options): """Make a set of tests to do convolution.""" # Tensorflow only supports equal strides test_parameters = [ { "input_shape": [[1, 3, 4, 3], [1, 10, 10, 3]], "filter_size": [[1, 1], [1, 2], [3, 3]], "strides": [[1, 1, 1, 1], [1, 3, 3, 1]], "dilations": [[1, 1, 1, 1], [1, 3, 2, 1], [1, 2, 2, 1]], "channel_multiplier": [1, 2], "rate": [[1, 1]], "padding": ["SAME", "VALID"], "data_format": ["NHWC"], "constant_filter": [True, False], }, { "input_shape": [[1, 3, 4, 3]], "filter_size": [[1, 1]], "strides": [[1, 1, 2, 1]], # TF needs [1, x, x, 1] "dilations": [[1, 1, 1, 1], [1, 2, 2, 1]], "channel_multiplier": [2], "rate": [[2, 2]], # Only [1, 1] is supported "padding": ["SAME"], "data_format": ["NHWC"], "constant_filter": [True, False], } ] def get_tensor_shapes(parameters): input_shape = parameters["input_shape"] filter_size = parameters["filter_size"] filter_shape = filter_size + [ input_shape[3], parameters["channel_multiplier"] ] return [input_shape, filter_shape] def build_graph(parameters): """Build a depthwise conv graph given `parameters`.""" input_shape, filter_shape = get_tensor_shapes(parameters) input_tensor = tf.placeholder( dtype=tf.float32, name="input", shape=input_shape) # Get filter input either as a placeholder or constants. Also get a list of # the input tensors that are represented as placeholders. if parameters["constant_filter"]: filter_input = create_tensor_data(np.float32, filter_shape) input_tensors = [input_tensor] else: filter_input = tf.placeholder( dtype=tf.float32, name="filter", shape=filter_shape) input_tensors = [input_tensor, filter_input] out = tf.nn.depthwise_conv2d( input_tensor, filter_input, strides=parameters["strides"], rate=parameters["rate"], padding=parameters["padding"], data_format=parameters["data_format"]) return input_tensors, [out] def build_inputs(parameters, sess, inputs, outputs): # Build list of input values either containing 1 tensor (input) or 2 tensors # (input, filter) based on whether filter is constant or variable input. input_shape, filter_shape = get_tensor_shapes(parameters) values = [create_tensor_data(np.float32, input_shape)] if not parameters["constant_filter"]: values.append(create_tensor_data(np.float32, filter_shape)) return values, sess.run(outputs, feed_dict=dict(zip(inputs, values))) make_zip_of_tests( options, test_parameters, build_graph, build_inputs, expected_tf_failures=4) @register_make_test_function() def make_split_tests(options): """Make a set of tests to do tf.split.""" test_parameters = [{ "input_shape": [[1, 3, 4, 6], [2, 4, 1], [6, 4], [8]], "num_or_size_splits": [1, 2, 3, 4, 5], "axis": [0, 1, 2, 3, -4, -3, -2, -1], }] def build_graph(parameters): input_tensor = tf.placeholder( dtype=tf.float32, name="input", shape=parameters["input_shape"]) out = tf.split( input_tensor, parameters["num_or_size_splits"], parameters["axis"]) return [input_tensor], [out[0]] def build_inputs(parameters, sess, inputs, outputs): values = [create_tensor_data(np.float32, parameters["input_shape"])] return values, sess.run(outputs, feed_dict=dict(zip(inputs, values))) make_zip_of_tests( options, test_parameters, build_graph, build_inputs, expected_tf_failures=112) @register_make_test_function() def make_splitv_tests(options): """Make a set of tests to do tf.split_v.""" test_parameters = [{ "input_shape": [[1, 3, 4, 6], [2, 4, 1], [6, 4], [8]], "size_splits": [[2, 2], [1, 3], [4, 2], [5, 3], [-1, 1], [-1, 2], [-1, 4]], "axis": [0, 1, 2, 3, -4, -3, -2, -1], }] def build_graph(parameters): input_tensor = tf.placeholder( dtype=tf.float32, name="input", shape=parameters["input_shape"]) out = tf.split(input_tensor, parameters["size_splits"], parameters["axis"]) return [input_tensor], [out[0]] def build_inputs(parameters, sess, inputs, outputs): values = [create_tensor_data(np.float32, parameters["input_shape"])] return values, sess.run(outputs, feed_dict=dict(zip(inputs, values))) make_zip_of_tests( options, test_parameters, build_graph, build_inputs, expected_tf_failures=158) @register_make_test_function() def make_concat_tests(options): """Make a set of tests to do concatenation.""" test_parameters = [{ "base_shape": [[1, 3, 4, 3], [3, 4]], "num_tensors": [1, 2, 3, 4, 5, 6], "axis": [0, 1, 2, 3, -3, -2, -1], "type": [tf.float32, tf.uint8, tf.int32, tf.int64], }] def get_shape(parameters, delta): """Return a tweaked version of 'base_shape'.""" axis = parameters["axis"] shape = parameters["base_shape"][:] if axis < 0: axis += len(shape) if axis < len(shape): shape[axis] += delta return shape def build_graph(parameters): all_tensors = [] for n in range(0, parameters["num_tensors"]): input_tensor = tf.placeholder(dtype=parameters["type"], name=("input%d" % n), shape=get_shape(parameters, n)) all_tensors.append(input_tensor) out = tf.concat(all_tensors, parameters["axis"]) return all_tensors, [out] def build_inputs(parameters, sess, inputs, outputs): all_values = [] for n in range(0, parameters["num_tensors"]): input_values = create_tensor_data( parameters["type"], get_shape(parameters, n)) all_values.append(input_values) return all_values, sess.run( outputs, feed_dict=dict(zip(inputs, all_values))) make_zip_of_tests( options, test_parameters, build_graph, build_inputs, expected_tf_failures=60) @register_make_test_function() def make_fully_connected_tests(options): """Make a set of tests to do fully_connected.""" test_parameters = [{ "shape1": [[3, 3]], "shape2": [[3, 3]], "transpose_a": [True, False], "transpose_b": [True, False], "constant_filter": [True, False], }, { "shape1": [[4, 4], [1, 4], [4]], "shape2": [[4, 4], [4, 1], [4]], "transpose_a": [False], "transpose_b": [False], "constant_filter": [True, False], }, { "shape1": [[40, 37]], "shape2": [[37, 40]], "transpose_a": [False], "transpose_b": [False], "constant_filter": [True, False], }, { "shape1": [[40, 37]], "shape2": [[40, 37]], "transpose_a": [False], "transpose_b": [True], "constant_filter": [True, False], }] def build_graph(parameters): """Build a matmul graph given `parameters`.""" input_tensor1 = tf.placeholder(dtype=tf.float32, name="input1", shape=parameters["shape1"]) # Get input_tensor2 either as a placeholder or constants. Also get a list of # the input tensors that are represented as placeholders. if parameters["constant_filter"]: input_tensor2 = create_tensor_data(np.float32, parameters["shape2"]) input_tensors = [input_tensor1] else: input_tensor2 = tf.placeholder( dtype=tf.float32, name="input2", shape=parameters["shape2"]) input_tensors = [input_tensor1, input_tensor2] out = tf.matmul(input_tensor1, input_tensor2, transpose_a=parameters["transpose_a"], transpose_b=parameters["transpose_b"]) return input_tensors, [out] def build_inputs(parameters, sess, inputs, outputs): # Build list of input values either containing 1 tensor (input_values1) or 2 # tensors (input_values1, input_values2) based on whether the second input # is a constant or variable input. values = [create_tensor_data(np.float32, shape=parameters["shape1"])] if not parameters["constant_filter"]: values.append(create_tensor_data(np.float32, parameters["shape2"])) return values, sess.run(outputs, feed_dict=dict(zip(inputs, values))) make_zip_of_tests( options, test_parameters, build_graph, build_inputs, expected_tf_failures=10) @register_make_test_function() def make_l2norm_tests(options): """Make a set of tests to do l2norm.""" # Chose a set of parameters test_parameters = [{ "input_shape": [[5, 7], [1, 1, 1, 1], [1, 3, 4, 3], [3, 15, 14, 3], [3, 1, 2, 4, 6], [2, 2, 3, 4, 5, 6]], "dim": [0, 1, 2, 3, [2, 3], -2], "epsilon": [None, 1e-12, 1e-3], }] def build_graph(parameters): input_tensor = tf.placeholder( dtype=tf.float32, name="input", shape=parameters["input_shape"]) if parameters["epsilon"]: out = tf.nn.l2_normalize( input_tensor, parameters["dim"], epsilon=parameters["epsilon"]) else: out = tf.nn.l2_normalize(input_tensor, parameters["dim"]) return [input_tensor], [out] def build_inputs(parameters, sess, inputs, outputs): input_values = create_tensor_data( np.float32, parameters["input_shape"], min_value=-4, max_value=10) return [input_values], sess.run( outputs, feed_dict=dict(zip(inputs, [input_values]))) make_zip_of_tests( options, test_parameters, build_graph, build_inputs, expected_tf_failures=9) @register_make_test_function() def make_local_response_norm_tests(options): """Make a set of tests to do local_response_norm.""" # Chose a set of parameters test_parameters = [{ "input_shape": [[1, 1, 1, 1], [1, 3, 4, 3], [3, 15, 14, 3]], "depth_radius": [None, 0, 1, 3, 5], "bias": [None, 0.3, -0.1], "alpha": [None, 2, -3], "beta": [None, 0.25, 2], }] def build_graph(parameters): input_tensor = tf.placeholder( dtype=tf.float32, name="input", shape=parameters["input_shape"]) out = tf.nn.local_response_normalization( input_tensor, depth_radius=parameters["depth_radius"], bias=parameters["bias"], alpha=parameters["alpha"], beta=parameters["beta"]) return [input_tensor], [out] def build_inputs(parameters, sess, inputs, outputs): input_values = create_tensor_data( np.float32, parameters["input_shape"], min_value=-4, max_value=10) return [input_values], sess.run( outputs, feed_dict=dict(zip(inputs, [input_values]))) make_zip_of_tests(options, test_parameters, build_graph, build_inputs) @register_make_test_function() def make_pad_tests(options): """Make a set of tests to do pad.""" # TODO(nupurgarg): Add test for tf.uint8. test_parameters = [ # 4D: { "dtype": [tf.int32, tf.int64, tf.float32], "input_shape": [[1, 1, 2, 1], [2, 1, 1, 1]], "paddings": [[[0, 0], [0, 1], [2, 3], [0, 0]], [[0, 1], [0, 0], [0, 0], [2, 3]]], "constant_paddings": [True, False], }, # 2D: { "dtype": [tf.int32, tf.int64, tf.float32], "input_shape": [[1, 2]], "paddings": [[[0, 1], [2, 3]]], "constant_paddings": [True, False], }, # 1D: { "dtype": [tf.int32], "input_shape": [[1]], "paddings": [[[1, 2]]], "constant_paddings": [False], }, ] def build_graph(parameters): """Build a pad graph given `parameters`.""" input_tensor = tf.placeholder( dtype=parameters["dtype"], name="input", shape=parameters["input_shape"]) # Get paddings as either a placeholder or constants. if parameters["constant_paddings"]: paddings = parameters["paddings"] input_tensors = [input_tensor] else: shape = [len(parameters["paddings"]), 2] paddings = tf.placeholder(dtype=tf.int32, name="padding", shape=shape) input_tensors = [input_tensor, paddings] out = tf.pad(input_tensor, paddings=paddings) return input_tensors, [out] def build_inputs(parameters, sess, inputs, outputs): values = [ create_tensor_data(parameters["dtype"], parameters["input_shape"]) ] if not parameters["constant_paddings"]: values.append(np.array(parameters["paddings"])) return values, sess.run(outputs, feed_dict=dict(zip(inputs, values))) make_zip_of_tests(options, test_parameters, build_graph, build_inputs) @register_make_test_function() def make_padv2_tests(options): """Make a set of tests to do padv2.""" # TODO(nupurgarg): Add test for tf.uint8. test_parameters = [ # 4D: { "dtype": [tf.int32, tf.int64, tf.float32], "input_shape": [[1, 1, 2, 1], [2, 1, 1, 1]], "paddings": [[[0, 0], [0, 1], [2, 3], [0, 0]], [[0, 1], [0, 0], [0, 0], [2, 3]]], "constant_paddings": [True, False], "constant_values": [0, 2], }, # 2D: { "dtype": [tf.int32, tf.int64, tf.float32], "input_shape": [[1, 2]], "paddings": [[[0, 1], [2, 3]]], "constant_paddings": [True, False], "constant_values": [0, 2], }, # 1D: { "dtype": [tf.int32], "input_shape": [[1]], "paddings": [[[0, 1]]], "constant_paddings": [False], "constant_values": [0, 2], }, ] def build_graph(parameters): """Build a pad graph given `parameters`.""" input_tensor = tf.placeholder( dtype=parameters["dtype"], name="input", shape=parameters["input_shape"]) # Get paddings as either a placeholder or constants. if parameters["constant_paddings"]: paddings = parameters["paddings"] input_tensors = [input_tensor] else: shape = [len(parameters["paddings"]), 2] paddings = tf.placeholder(dtype=tf.int32, name="padding", shape=shape) input_tensors = [input_tensor, paddings] out = tf.pad(input_tensor, paddings=paddings, constant_values=parameters["constant_values"]) return input_tensors, [out] def build_inputs(parameters, sess, inputs, outputs): values = [ create_tensor_data(parameters["dtype"], parameters["input_shape"]) ] if not parameters["constant_paddings"]: values.append(np.array(parameters["paddings"])) return values, sess.run(outputs, feed_dict=dict(zip(inputs, values))) make_zip_of_tests(options, test_parameters, build_graph, build_inputs) @register_make_test_function() def make_reshape_tests(options): """Make a set of tests to do reshape.""" # All shapes below are suitable for tensors with 420 elements. test_parameters = [{ "dtype": [tf.float32, tf.int32], "input_shape": [[3, 4, 5, 7], [4, 105], [21, 5, 2, 2], [420]], "output_shape": [[15, 28], [420], [1, -1, 5, 7], [-1]], "constant_shape": [True, False], }, { "dtype": [tf.float32], "input_shape": [[1]], "output_shape": [[]], "constant_shape": [True, False], }] def build_graph(parameters): input_tensor = tf.placeholder(dtype=parameters["dtype"], name="input", shape=parameters["input_shape"]) # Get shape as either a placeholder or constants. if parameters["constant_shape"]: output_shape = parameters["output_shape"] input_tensors = [input_tensor] else: # The shape of the shape tensor. shape_tensor_shape = [len(parameters["output_shape"])] output_shape = tf.placeholder( dtype=tf.int32, name="output_shape", shape=shape_tensor_shape) input_tensors = [input_tensor, output_shape] out = tf.reshape(input_tensor, shape=output_shape) return input_tensors, [out] def build_inputs(parameters, sess, inputs, outputs): values = [ create_tensor_data(parameters["dtype"], parameters["input_shape"]) ] if not parameters["constant_shape"]: values.append(np.array(parameters["output_shape"])) return values, sess.run(outputs, feed_dict=dict(zip(inputs, values))) make_zip_of_tests(options, test_parameters, build_graph, build_inputs) @register_make_test_function() def make_shape_tests(options): """Make a set of tests to do shape.""" test_parameters = [{ "input_dtype": [tf.float32, tf.int32], "input_shape": [[], [0], [1, 1, 1, 3], [2, 3, 4, 5], [5, 5], [10]], "out_type": [tf.int32, tf.int64], }] def build_graph(parameters): """Build the shape op testing graph.""" # Note that we intentionally leave out the shape from the input placeholder # to prevent the Shape operation from being optimized out during conversion. input_value = tf.placeholder(dtype=parameters["input_dtype"], name="input") out = tf.shape(input_value, out_type=parameters["out_type"]) return [input_value], [out] def build_inputs(parameters, sess, inputs, outputs): input_value = create_tensor_data(parameters["input_dtype"], parameters["input_shape"]) return [input_value], sess.run( outputs, feed_dict=dict(zip(inputs, [input_value]))) make_zip_of_tests(options, test_parameters, build_graph, build_inputs) @register_make_test_function() def make_rank_tests(options): """Make a set of tests to do rank.""" test_parameters = [{ "input_dtype": [tf.float32, tf.int32], "input_shape": [[], [0], [1, 1, 1, 3], [2, 3, 4, 5], [5, 5], [10]], }] def build_graph(parameters): """Build the rank op testing graph.""" input_value = tf.placeholder(dtype=parameters["input_dtype"], name="input") out = tf.rank(input_value) return [input_value], [out] def build_inputs(parameters, sess, inputs, outputs): input_value = create_tensor_data(parameters["input_dtype"], parameters["input_shape"]) return [input_value], sess.run( outputs, feed_dict=dict(zip(inputs, [input_value]))) make_zip_of_tests(options, test_parameters, build_graph, build_inputs) @register_make_test_function() def make_one_hot_tests(options): """Make a set of tests to do one_hot.""" test_parameters = [{ "indices_type": [tf.int32, tf.int64], "indices_shape": [[3], [4, 4], [1, 5], [5, 1]], "axis": [0, 1], "dtype": [tf.int32, tf.int64, tf.float32], "provide_optional_inputs": [True, False], }] def build_graph(parameters): indices = tf.placeholder( dtype=parameters["indices_type"], name="indices", shape=parameters["indices_shape"]) depth = tf.placeholder(dtype=tf.int32, name="depth", shape=()) if not parameters["provide_optional_inputs"]: out = tf.one_hot(indices=indices, depth=depth) return [indices, depth], [out] on_value = tf.placeholder( dtype=parameters["dtype"], name="on_value", shape=()) off_value = tf.placeholder( dtype=parameters["dtype"], name="off_value", shape=()) out = tf.one_hot( indices=indices, depth=depth, on_value=on_value, off_value=off_value, axis=parameters["axis"], dtype=parameters["dtype"]) return [indices, depth, on_value, off_value], [out] def build_inputs(parameters, sess, inputs, outputs): input_values = [ create_tensor_data( parameters["indices_type"], shape=parameters["indices_shape"], min_value=-1, max_value=10), create_tensor_data(tf.int32, shape=None, min_value=1, max_value=10), ] if parameters["provide_optional_inputs"]: input_values.append( create_tensor_data( parameters["dtype"], shape=None, min_value=1, max_value=10)) input_values.append( create_tensor_data( parameters["dtype"], shape=None, min_value=-1, max_value=0)) return input_values, sess.run( outputs, feed_dict=dict(zip(inputs, input_values))) make_zip_of_tests(options, test_parameters, build_graph, build_inputs) @register_make_test_function() def make_resize_bilinear_tests(options): """Make a set of tests to do resize_bilinear.""" test_parameters = [{ "dtype": [tf.float32, tf.int32], "input_shape": [[1, 3, 4, 3], [1, 10, 2, 1]], "size": [[1, 1], [4, 3], [2, 2], [5, 6]], "align_corners": [None, True, False], }] def build_graph(parameters): input_tensor = tf.placeholder(dtype=parameters["dtype"], name="input", shape=parameters["input_shape"]) out = tf.image.resize_bilinear(input_tensor, size=parameters["size"], align_corners=parameters["align_corners"]) return [input_tensor], [out] def build_inputs(parameters, sess, inputs, outputs): input_values = create_tensor_data(parameters["dtype"], parameters["input_shape"]) return [input_values], sess.run( outputs, feed_dict=dict(zip(inputs, [input_values]))) make_zip_of_tests(options, test_parameters, build_graph, build_inputs) @register_make_test_function() def make_resize_nearest_neighbor_tests(options): """Make a set of tests to do resize_nearest_neighbor.""" test_parameters = [{ "dtype": [tf.float32, tf.int32], "input_shape": [[1, 3, 4, 3], [1, 10, 2, 1]], "size": [[1, 1], [4, 3], [2, 2], [5, 6]], "align_corners": [False], }] def build_graph(parameters): input_tensor = tf.placeholder( dtype=parameters["dtype"], name="input", shape=parameters["input_shape"]) out = tf.image.resize_nearest_neighbor( input_tensor, size=parameters["size"], align_corners=parameters["align_corners"]) return [input_tensor], [out] def build_inputs(parameters, sess, inputs, outputs): input_values = create_tensor_data(parameters["dtype"], parameters["input_shape"]) return [input_values], sess.run( outputs, feed_dict=dict(zip(inputs, [input_values]))) make_zip_of_tests(options, test_parameters, build_graph, build_inputs) @register_make_test_function() def make_sigmoid_tests(options): """Make a set of tests to do sigmoid.""" test_parameters = [{ "dtype": [tf.float32], "input_shape": [[1, 3, 4, 3], [4], [], [1, 2, 3, 4, 5, 6]], }] def build_graph(parameters): input_tensor = tf.placeholder(dtype=parameters["dtype"], name="input", shape=parameters["input_shape"]) out = tf.sigmoid(input_tensor) return [input_tensor], [out] def build_inputs(parameters, sess, inputs, outputs): input_values = create_tensor_data(parameters["dtype"], parameters["input_shape"]) return [input_values], sess.run( outputs, feed_dict=dict(zip(inputs, [input_values]))) make_zip_of_tests(options, test_parameters, build_graph, build_inputs) @register_make_test_function() def make_softmax_tests(options): """Make a set of tests to do softmax.""" test_parameters = [{ "dtype": [tf.float32], "input_shape": [[1, 3, 4, 3], [2, 3]], "dim": [-1, 0], }, { "dtype": [tf.float32], "input_shape": [[4, 7]], "dim": [-1, 1], }] def build_graph(parameters): input_tensor = tf.placeholder(dtype=parameters["dtype"], name="input", shape=parameters["input_shape"]) out = tf.nn.softmax(input_tensor, dim=parameters["dim"]) return [input_tensor], [out] def build_inputs(parameters, sess, inputs, outputs): input_values = create_tensor_data(parameters["dtype"], parameters["input_shape"]) return [input_values], sess.run( outputs, feed_dict=dict(zip(inputs, [input_values]))) make_zip_of_tests(options, test_parameters, build_graph, build_inputs) @register_make_test_function() def make_space_to_depth_tests(options): """Make a set of tests to do space_to_depth.""" test_parameters = [{ "dtype": [tf.float32, tf.int32, tf.uint8, tf.int64], "input_shape": [[2, 12, 24, 1]], "block_size": [2, 3, 4], }] def build_graph(parameters): input_tensor = tf.placeholder(dtype=parameters["dtype"], name="input", shape=parameters["input_shape"]) out = tf.space_to_depth(input_tensor, block_size=parameters["block_size"]) return [input_tensor], [out] def build_inputs(parameters, sess, inputs, outputs): input_values = create_tensor_data(parameters["dtype"], parameters["input_shape"]) return [input_values], sess.run( outputs, feed_dict=dict(zip(inputs, [input_values]))) make_zip_of_tests(options, test_parameters, build_graph, build_inputs) @register_make_test_function() def make_space_to_batch_nd_tests(options): """Make a set of tests to do space_to_batch_nd.""" # TODO(nupurgarg): Add test for uint8. test_parameters = [ { "dtype": [tf.int32, tf.int64, tf.float32], "input_shape": [[1, 2, 2, 3], [2, 2, 4, 1]], "block_shape": [[1, 3], [2, 2]], "paddings": [[[0, 0], [0, 0]], [[0, 0], [2, 0]], [[1, 1], [1, 1]]], "constant_block_shape": [True, False], "constant_paddings": [True, False], }, { "dtype": [tf.float32], "input_shape": [[2, 3, 7, 3]], "block_shape": [[1, 3], [2, 2]], "paddings": [[[0, 0], [2, 0]], [[1, 0], [1, 0]]], "constant_block_shape": [True, False], "constant_paddings": [True, False], }, # Non-4D use case: 1 bath dimension, 3 spatial dimensions, 2 others. { "dtype": [tf.float32], "input_shape": [[1, 4, 4, 4, 1, 1]], "block_shape": [[2, 2, 2]], "paddings": [[[0, 0], [0, 0], [0, 0]]], "constant_block_shape": [True, False], "constant_paddings": [True, False], }, ] def build_graph(parameters): """Build a space_to_batch graph given `parameters`.""" input_tensor = tf.placeholder( dtype=parameters["dtype"], name="input", shape=parameters["input_shape"]) input_tensors = [input_tensor] # Get block_shape either as a const or as a placeholder (tensor). if parameters["constant_block_shape"]: block_shape = parameters["block_shape"] else: shape = [len(parameters["block_shape"])] block_shape = tf.placeholder(dtype=tf.int32, name="shape", shape=shape) input_tensors.append(block_shape) # Get paddings either as a const or as a placeholder (tensor). if parameters["constant_paddings"]: paddings = parameters["paddings"] else: shape = [len(parameters["paddings"]), 2] paddings = tf.placeholder(dtype=tf.int32, name="paddings", shape=shape) input_tensors.append(paddings) out = tf.space_to_batch_nd(input_tensor, block_shape, paddings) return input_tensors, [out] def build_inputs(parameters, sess, inputs, outputs): values = [ create_tensor_data(parameters["dtype"], parameters["input_shape"]) ] if not parameters["constant_block_shape"]: values.append(np.array(parameters["block_shape"])) if not parameters["constant_paddings"]: values.append(np.array(parameters["paddings"])) return values, sess.run(outputs, feed_dict=dict(zip(inputs, values))) make_zip_of_tests( options, test_parameters, build_graph, build_inputs, expected_tf_failures=56) @register_make_test_function() def make_batch_to_space_nd_tests(options): """Make a set of tests to do batch_to_space_nd.""" test_parameters = [ { "dtype": [tf.float32, tf.int64, tf.int32], "input_shape": [[12, 3, 3, 1]], "block_shape": [[1, 4], [2, 2], [3, 4]], "crops": [[[0, 0], [0, 0]], [[1, 1], [1, 1]]], "constant_block_shape": [True, False], "constant_crops": [True, False], }, # Single batch (no-op) { "dtype": [tf.float32], "input_shape": [[1, 3, 3, 1]], "block_shape": [[1, 1]], "crops": [[[0, 0], [0, 0]], [[1, 1], [1, 1]]], "constant_block_shape": [True], "constant_crops": [True], }, # Non-4D use case: 1 batch dimension, 3 spatial dimensions, 2 others. { "dtype": [tf.float32], "input_shape": [[8, 2, 2, 2, 1, 1]], "block_shape": [[2, 2, 2]], "crops": [[[0, 0], [0, 0], [0, 0]]], "constant_block_shape": [True, False], "constant_crops": [True, False], }, ] def build_graph(parameters): """Build a batch_to_space graph given `parameters`.""" input_tensor = tf.placeholder( dtype=parameters["dtype"], name="input", shape=parameters["input_shape"]) input_tensors = [input_tensor] # Get block_shape either as a const or as a placeholder (tensor). if parameters["constant_block_shape"]: block_shape = parameters["block_shape"] else: shape = [len(parameters["block_shape"])] block_shape = tf.placeholder(dtype=tf.int32, name="shape", shape=shape) input_tensors.append(block_shape) # Get crops either as a const or as a placeholder (tensor). if parameters["constant_crops"]: crops = parameters["crops"] else: shape = [len(parameters["crops"]), 2] crops = tf.placeholder(dtype=tf.int32, name="crops", shape=shape) input_tensors.append(crops) out = tf.batch_to_space_nd(input_tensor, block_shape, crops) return input_tensors, [out] def build_inputs(parameters, sess, inputs, outputs): values = [ create_tensor_data(parameters["dtype"], parameters["input_shape"]) ] if not parameters["constant_block_shape"]: values.append(np.array(parameters["block_shape"])) if not parameters["constant_crops"]: values.append(np.array(parameters["crops"])) return values, sess.run(outputs, feed_dict=dict(zip(inputs, values))) make_zip_of_tests(options, test_parameters, build_graph, build_inputs) @register_make_test_function() def make_transpose_tests(options): """Make a set of tests to do transpose.""" # TODO(nupurgarg): Add test for uint8. test_parameters = [{ "dtype": [tf.int32, tf.int64, tf.float32], "input_shape": [[2, 2, 3]], "perm": [[0, 1, 2], [0, 2, 1]], "constant_perm": [True, False], }, { "dtype": [tf.float32], "input_shape": [[1, 2, 3, 4]], "perm": [[0, 1, 2, 3], [3, 0, 1, 2]], "constant_perm": [True, False], }, { "dtype": [tf.float32], "input_shape": [[1, 2, 3, 4, 5]], "perm": [[4, 3, 2, 1, 0]], "constant_perm": [True, False], }] def build_graph(parameters): """Build a transpose graph given `parameters`.""" input_tensor = tf.placeholder( dtype=parameters["dtype"], name="input", shape=parameters["input_shape"]) if parameters["constant_perm"]: perm = parameters["perm"] input_tensors = [input_tensor] else: shape = [len(parameters["perm"]), 2] perm = tf.placeholder(dtype=tf.int32, name="perm", shape=shape) input_tensors = [input_tensor, perm] out = tf.transpose(input_tensor, perm=perm) return input_tensors, [out] def build_inputs(parameters, sess, inputs, outputs): values = [ create_tensor_data(parameters["dtype"], parameters["input_shape"]) ] if not parameters["constant_perm"]: values.append(np.array(parameters["perm"])) return values, sess.run(outputs, feed_dict=dict(zip(inputs, values))) make_zip_of_tests( options, test_parameters, build_graph, build_inputs, expected_tf_failures=9) @register_make_test_function() def make_squeeze_tests(options): """Make a set of tests to do squeeze.""" test_parameters = [{ "dtype": [tf.int32, tf.float32, tf.int64], "input_shape": [[1, 2, 1, 3, 1, 4, 1, 1]], "axis": [ None, [], [0, 2], [4, 7], [-1, 0, 2, 0, 7, -6], [1], [2, 3, 2], [-1, -2, -4, -6, -8], [0, 2, 4, 6, 7], [7, 6, 4, 2, 0], [6, 6], [0, 1, 2, 3, 4, 5, 6, 7], [-2, -3, 1, 0, 7, -5] ], }, { "dtype": [tf.int32, tf.float32, tf.int64], "input_shape": [[1]], "axis": [None, [], [0], [-1]], }, { "dtype": [tf.int32, tf.float32, tf.int64], "input_shape": [[1, 1, 1, 1, 1]], "axis": [None, [], [0], [3, 0], [-2, 0, 3, 2]], }] def build_graph(parameters): input_tensor = tf.placeholder( dtype=parameters["dtype"], name="input", shape=parameters["input_shape"]) out = tf.squeeze(input_tensor, axis=parameters["axis"]) return [input_tensor], [out] def build_inputs(parameters, sess, inputs, outputs): input_values = create_tensor_data(parameters["dtype"], parameters["input_shape"]) return [input_values], sess.run( outputs, feed_dict=dict(zip(inputs, [input_values]))) make_zip_of_tests( options, test_parameters, build_graph, build_inputs, expected_tf_failures=12) @register_make_test_function() def make_squeeze_transpose_tests(options): """Make a set of tests to do squeeze followed by transpose.""" test_parameters = [{ "dtype": [tf.int32, tf.float32, tf.int64], "input_shape": [[1, 4, 10, 1]], "axis": [[-1], [3]], }] def build_graph(parameters): input_tensor = tf.placeholder( dtype=parameters["dtype"], name="input", shape=parameters["input_shape"]) out = tf.squeeze(input_tensor, axis=parameters["axis"]) out = tf.transpose(out, perm=[1, 2]) return [input_tensor], [out] def build_inputs(parameters, sess, inputs, outputs): input_values = create_tensor_data(parameters["dtype"], parameters["input_shape"]) return [input_values], sess.run( outputs, feed_dict=dict(zip(inputs, [input_values]))) make_zip_of_tests( options, test_parameters, build_graph, build_inputs, expected_tf_failures=0) def _make_strided_slice_tests(options, test_parameters, expected_tf_failures=0): """Utility function to make strided_slice_tests based on parameters.""" def build_graph(parameters): """Build graph for stride_slice test.""" input_tensor = tf.placeholder( dtype=parameters["dtype"], name="input", shape=parameters["input_shape"]) if parameters["constant_indices"]: begin = parameters["begin"] end = parameters["end"] strides = parameters["strides"] tensors = [input_tensor] else: begin = tf.placeholder( dtype=parameters["index_type"], name="begin", shape=[len(parameters["input_shape"])]) end = tf.placeholder( dtype=parameters["index_type"], name="end", shape=[len(parameters["input_shape"])]) strides = ( tf.placeholder( dtype=parameters["index_type"], name="strides", shape=[len(parameters["input_shape"])]) if parameters["strides"] is not None else None) tensors = [input_tensor, begin, end] if strides is not None: tensors.append(strides) out = tf.strided_slice( input_tensor, begin, end, strides, begin_mask=parameters["begin_mask"], end_mask=parameters["end_mask"]) return tensors, [out] def build_inputs(parameters, sess, inputs, outputs): """Build inputs for stride_slice test.""" input_values = create_tensor_data(parameters["dtype"], parameters["input_shape"]) index_type = _TF_TYPE_INFO[parameters["index_type"]][0] values = [input_values] if not parameters["constant_indices"]: begin_values = np.array(parameters["begin"]).astype(index_type) end_values = np.array(parameters["end"]).astype(index_type) stride_values = ( np.array(parameters["strides"]).astype(index_type) if parameters["strides"] is not None else None) values.append(begin_values) values.append(end_values) if stride_values is not None: values.append(stride_values) return values, sess.run(outputs, feed_dict=dict(zip(inputs, values))) make_zip_of_tests( options, test_parameters, build_graph, build_inputs, expected_tf_failures=expected_tf_failures) @register_make_test_function() def make_strided_slice_tests(options): """Make a set of tests to do strided_slice.""" # TODO(soroosh): add test/support for uint8. test_parameters = [ # 4-D (basic cases with const/non-const indices). { "dtype": [tf.float32, tf.int32, tf.int64], "index_type": [tf.int32], "input_shape": [[12, 2, 2, 5]], "strides": [None, [2, 1, 3, 1]], "begin": [[0, 0, 0, 0]], "end": [[12, 2, 2, 5]], "begin_mask": [None], "end_mask": [None], "shrink_axis_mask": [None], "constant_indices": [False, True], }, # 4-D with non-trivial begin & end. { "dtype": [tf.float32], "index_type": [tf.int32], "input_shape": [[12, 2, 2, 5]], "begin": [[0, 0, 0, 0], [1, 0, 1, 0]], "end": [[8, 2, 2, 3], [12, 2, 2, 5]], "strides": [None, [2, 1, 3, 1]], "begin_mask": [None, 8], "end_mask": [None, 3], "shrink_axis_mask": [None, 15, -1], "constant_indices": [True], }, # Begin, end, strides dim are different from input shape { "dtype": [tf.float32], "index_type": [tf.int32], "input_shape": [[12, 2, 2, 5]], "begin": [[0]], "end": [[1]], "strides": [None, [1]], "begin_mask": [0], "end_mask": [0], "shrink_axis_mask": [1], "constant_indices": [True], }, # 2-D { "dtype": [tf.float32], "index_type": [tf.int32], "input_shape": [[2, 3]], "begin": [[0, 0]], "end": [[2, 2]], "strides": [None, [2, 2]], "begin_mask": [None, 1, 2], "end_mask": [None, 1, 2], "shrink_axis_mask": [None, 1, 2, 3, -1], "constant_indices": [False, True], }, # Negative strides { "dtype": [tf.float32], "index_type": [tf.int32], "input_shape": [[2, 3]], "begin": [[0, -1]], "end": [[2, -3]], "strides": [[1, -1]], "begin_mask": [None, 1, 2], "end_mask": [None, 1, 2], "shrink_axis_mask": [None, 1, 2, 3, -1], "constant_indices": [False], }, ] _make_strided_slice_tests(options, test_parameters, expected_tf_failures=2) @register_make_test_function() def make_strided_slice_1d_exhaustive_tests(options): """Make a set of exhaustive tests for 1D strided_slice.""" test_parameters = [ # 1-D Exhaustive { "dtype": [tf.float32], "index_type": [tf.int32], "input_shape": [[3]], "begin": [[-2], [-1], [0], [1], [2]], "end": [[-2], [-1], [0], [1], [2]], "strides": [[-2], [-1], [1], [2]], "begin_mask": [0, 1], "end_mask": [0, 1], "shrink_axis_mask": [0], "constant_indices": [False], }, ] _make_strided_slice_tests(options, test_parameters) # For verifying https://github.com/tensorflow/tensorflow/issues/23599 # TODO(chaomei): refactor the test to cover more cases, like negative stride, # negative array index etc. @register_make_test_function() def make_resolve_constant_strided_slice_tests(options): """Make a set of tests to show strided_slice yields incorrect results.""" test_parameters = [{ "unused_iteration_counter": [1], }] def build_graph(parameters): """Build the strided_slice op testing graph.""" del parameters input_values = tf.placeholder(dtype=tf.float32, shape=[4, 2]) data = tf.constant([[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11], [12, 13, 14, 15]], tf.float32) return [input_values], [input_values + data[:, :2]] def build_inputs(parameters, sess, inputs, outputs): del parameters input_values = np.zeros([4, 2], dtype=np.float32) return [input_values], sess.run( outputs, feed_dict={inputs[0]: input_values}) make_zip_of_tests(options, test_parameters, build_graph, build_inputs) @register_make_test_function() def make_lstm_tests(options): """Make a set of tests to do basic Lstm cell.""" test_parameters = [ { "dtype": [tf.float32], "num_batchs": [1], "time_step_size": [1], "input_vec_size": [3], "num_cells": [4], "split_tflite_lstm_inputs": [False], }, ] def build_graph(parameters): """Build a simple graph with BasicLSTMCell.""" num_batchs = parameters["num_batchs"] time_step_size = parameters["time_step_size"] input_vec_size = parameters["input_vec_size"] num_cells = parameters["num_cells"] inputs_after_split = [] for i in xrange(time_step_size): one_timestamp_input = tf.placeholder( dtype=parameters["dtype"], name="split_{}".format(i), shape=[num_batchs, input_vec_size]) inputs_after_split.append(one_timestamp_input) # Currently lstm identifier has a few limitations: only supports # forget_bias == 0, inner state activation == tanh. # TODO(zhixianyan): Add another test with forget_bias == 1. # TODO(zhixianyan): Add another test with relu as activation. lstm_cell = tf.contrib.rnn.BasicLSTMCell( num_cells, forget_bias=0.0, state_is_tuple=True) cell_outputs, _ = rnn.static_rnn( lstm_cell, inputs_after_split, dtype=tf.float32) out = cell_outputs[-1] return inputs_after_split, [out] def build_inputs(parameters, sess, inputs, outputs): """Feed inputs, assign variables, and freeze graph.""" with tf.variable_scope("", reuse=True): kernel = tf.get_variable("rnn/basic_lstm_cell/kernel") bias = tf.get_variable("rnn/basic_lstm_cell/bias") kernel_values = create_tensor_data( parameters["dtype"], [kernel.shape[0], kernel.shape[1]], -1, 1) bias_values = create_tensor_data(parameters["dtype"], [bias.shape[0]], 0, 1) sess.run(tf.group(kernel.assign(kernel_values), bias.assign(bias_values))) num_batchs = parameters["num_batchs"] time_step_size = parameters["time_step_size"] input_vec_size = parameters["input_vec_size"] input_values = [] for _ in xrange(time_step_size): tensor_data = create_tensor_data(parameters["dtype"], [num_batchs, input_vec_size], 0, 1) input_values.append(tensor_data) out = sess.run(outputs, feed_dict=dict(zip(inputs, input_values))) return input_values, out # TODO(zhixianyan): Automatically generate rnn_states for lstm cell. extra_toco_options = ExtraTocoOptions() extra_toco_options.rnn_states = ( "{state_array:rnn/BasicLSTMCellZeroState/zeros," "back_edge_source_array:rnn/basic_lstm_cell/Add_1,size:4}," "{state_array:rnn/BasicLSTMCellZeroState/zeros_1," "back_edge_source_array:rnn/basic_lstm_cell/Mul_2,size:4}") make_zip_of_tests( options, test_parameters, build_graph, build_inputs, extra_toco_options, use_frozen_graph=True) def make_l2_pool(input_tensor, ksize, strides, padding, data_format): """Given an input perform a sequence of TensorFlow ops to produce l2pool.""" return tf.sqrt(tf.nn.avg_pool( tf.square(input_tensor), ksize=ksize, strides=strides, padding=padding, data_format=data_format)) @register_make_test_function() def make_topk_tests(options): """Make a set of tests to do topk.""" test_parameters = [{ "input_dtype": [tf.float32, tf.int32], "input_shape": [[10], [5, 20]], "input_k": [None, 1, 3], }] def build_graph(parameters): """Build the topk op testing graph.""" input_value = tf.placeholder( dtype=parameters["input_dtype"], name="input", shape=parameters["input_shape"]) if parameters["input_k"] is not None: k = tf.placeholder(dtype=tf.int32, name="input_k", shape=[]) inputs = [input_value, k] else: k = tf.constant(3, name="k") inputs = [input_value] out = tf.nn.top_k(input_value, k) return inputs, [out[1]] def build_inputs(parameters, sess, inputs, outputs): input_value = create_tensor_data(parameters["input_dtype"], parameters["input_shape"]) if parameters["input_k"] is not None: k = np.array(parameters["input_k"], dtype=np.int32) return [input_value, k], sess.run( outputs, feed_dict=dict(zip(inputs, [input_value, k]))) else: return [input_value], sess.run( outputs, feed_dict=dict(zip(inputs, [input_value]))) make_zip_of_tests(options, test_parameters, build_graph, build_inputs) @register_make_test_function() def make_arg_min_max_tests(options): """Make a set of tests to do arg_max.""" test_parameters = [{ "input_dtype": [tf.float32, tf.int32], "input_shape": [[], [1, 1, 1, 3], [2, 3, 4, 5], [2, 3, 3], [5, 5], [10]], "output_type": [tf.int32, tf.int64], "is_arg_max": [True], }] def build_graph(parameters): """Build the topk op testing graph.""" input_value = tf.placeholder( dtype=parameters["input_dtype"], name="input", shape=parameters["input_shape"]) axis = random.randint(0, max(len(parameters["input_shape"]) - 1, 0)) if parameters["is_arg_max"]: out = tf.arg_max(input_value, axis, output_type=parameters["output_type"]) else: out = tf.arg_min(input_value, axis, output_type=parameters["output_type"]) return [input_value], [out] def build_inputs(parameters, sess, inputs, outputs): input_value = create_tensor_data(parameters["input_dtype"], parameters["input_shape"]) return [input_value], sess.run( outputs, feed_dict=dict(zip(inputs, [input_value]))) make_zip_of_tests( options, test_parameters, build_graph, build_inputs, expected_tf_failures=4) @register_make_test_function() def make_equal_tests(options): """Make a set of tests to do equal.""" test_parameters = [{ "input_dtype": [tf.float32, tf.int32, tf.int64], "input_shape_pair": [([], []), ([1, 1, 1, 3], [1, 1, 1, 3]), ([2, 3, 4, 5], [2, 3, 4, 5]), ([2, 3, 3], [2, 3]), ([5, 5], [1]), ([10], [2, 4, 10])], }] def build_graph(parameters): """Build the equal op testing graph.""" input_value1 = tf.placeholder( dtype=parameters["input_dtype"], name="input1", shape=parameters["input_shape_pair"][0]) input_value2 = tf.placeholder( dtype=parameters["input_dtype"], name="input2", shape=parameters["input_shape_pair"][1]) out = tf.equal(input_value1, input_value2) return [input_value1, input_value2], [out] def build_inputs(parameters, sess, inputs, outputs): input_value1 = create_tensor_data(parameters["input_dtype"], parameters["input_shape_pair"][0]) input_value2 = create_tensor_data(parameters["input_dtype"], parameters["input_shape_pair"][1]) return [input_value1, input_value2], sess.run( outputs, feed_dict=dict(zip(inputs, [input_value1, input_value2]))) make_zip_of_tests( options, test_parameters, build_graph, build_inputs, expected_tf_failures=3) @register_make_test_function() def make_not_equal_tests(options): """Make a set of tests to do not equal.""" test_parameters = [{ "input_dtype": [tf.float32, tf.int32, tf.int64], "input_shape_pair": [([1, 1, 1, 3], [1, 1, 1, 3]), ([2, 3, 4, 5], [2, 3, 4, 5]), ([2, 3, 3], [2, 3]), ([5, 5], [1]), ([10], [2, 4, 10])], }] def build_graph(parameters): """Build the not euqal op testing graph.""" input_value1 = tf.placeholder( dtype=parameters["input_dtype"], name="input1", shape=parameters["input_shape_pair"][0]) input_value2 = tf.placeholder( dtype=parameters["input_dtype"], name="input2", shape=parameters["input_shape_pair"][1]) out = tf.not_equal(input_value1, input_value2) return [input_value1, input_value2], [out] def build_inputs(parameters, sess, inputs, outputs): input_value1 = create_tensor_data(parameters["input_dtype"], parameters["input_shape_pair"][0]) input_value2 = create_tensor_data(parameters["input_dtype"], parameters["input_shape_pair"][1]) return [input_value1, input_value2], sess.run( outputs, feed_dict=dict(zip(inputs, [input_value1, input_value2]))) make_zip_of_tests( options, test_parameters, build_graph, build_inputs, expected_tf_failures=3) @register_make_test_function() def make_greater_tests(options): """Make a set of tests to do greater.""" test_parameters = [{ "input_dtype": [tf.float32, tf.int32, tf.int64], "input_shape_pair": [([1, 1, 1, 3], [1, 1, 1, 3]), ([2, 3, 4, 5], [2, 3, 4, 5]), ([2, 3, 3], [2, 3]), ([5, 5], [1]), ([10], [2, 4, 10])], }] def build_graph(parameters): """Build the greater op testing graph.""" input_value1 = tf.placeholder( dtype=parameters["input_dtype"], name="input1", shape=parameters["input_shape_pair"][0]) input_value2 = tf.placeholder( dtype=parameters["input_dtype"], name="input2", shape=parameters["input_shape_pair"][1]) out = tf.greater(input_value1, input_value2) return [input_value1, input_value2], [out] def build_inputs(parameters, sess, inputs, outputs): input_value1 = create_tensor_data(parameters["input_dtype"], parameters["input_shape_pair"][0]) input_value2 = create_tensor_data(parameters["input_dtype"], parameters["input_shape_pair"][1]) return [input_value1, input_value2], sess.run( outputs, feed_dict=dict(zip(inputs, [input_value1, input_value2]))) make_zip_of_tests( options, test_parameters, build_graph, build_inputs, expected_tf_failures=3) @register_make_test_function() def make_greater_equal_tests(options): """Make a set of tests to do greater_equal.""" test_parameters = [{ "input_dtype": [tf.float32, tf.int32, tf.int64], "input_shape_pair": [([1, 1, 1, 3], [1, 1, 1, 3]), ([2, 3, 4, 5], [2, 3, 4, 5]), ([2, 3, 3], [2, 3]), ([5, 5], [1]), ([10], [2, 4, 10])], }] def build_graph(parameters): """Build the greater_equal op testing graph.""" input_value1 = tf.placeholder( dtype=parameters["input_dtype"], name="input1", shape=parameters["input_shape_pair"][0]) input_value2 = tf.placeholder( dtype=parameters["input_dtype"], name="input2", shape=parameters["input_shape_pair"][1]) out = tf.greater_equal(input_value1, input_value2) return [input_value1, input_value2], [out] def build_inputs(parameters, sess, inputs, outputs): input_value1 = create_tensor_data(parameters["input_dtype"], parameters["input_shape_pair"][0]) input_value2 = create_tensor_data(parameters["input_dtype"], parameters["input_shape_pair"][1]) return [input_value1, input_value2], sess.run( outputs, feed_dict=dict(zip(inputs, [input_value1, input_value2]))) make_zip_of_tests( options, test_parameters, build_graph, build_inputs, expected_tf_failures=3) @register_make_test_function() def make_less_tests(options): """Make a set of tests to do less.""" test_parameters = [{ "input_dtype": [tf.float32, tf.int32, tf.int64], "input_shape_pair": [([1, 1, 1, 3], [1, 1, 1, 3]), ([2, 3, 4, 5], [2, 3, 4, 5]), ([2, 3, 3], [2, 3]), ([5, 5], [1]), ([10], [2, 4, 10])], }] def build_graph(parameters): """Build the less op testing graph.""" input_value1 = tf.placeholder( dtype=parameters["input_dtype"], name="input1", shape=parameters["input_shape_pair"][0]) input_value2 = tf.placeholder( dtype=parameters["input_dtype"], name="input2", shape=parameters["input_shape_pair"][1]) out = tf.less(input_value1, input_value2) return [input_value1, input_value2], [out] def build_inputs(parameters, sess, inputs, outputs): input_value1 = create_tensor_data(parameters["input_dtype"], parameters["input_shape_pair"][0]) input_value2 = create_tensor_data(parameters["input_dtype"], parameters["input_shape_pair"][1]) return [input_value1, input_value2], sess.run( outputs, feed_dict=dict(zip(inputs, [input_value1, input_value2]))) make_zip_of_tests( options, test_parameters, build_graph, build_inputs, expected_tf_failures=3) @register_make_test_function() def make_less_equal_tests(options): """Make a set of tests to do less_equal.""" test_parameters = [{ "input_dtype": [tf.float32, tf.int32, tf.int64], "input_shape_pair": [([1, 1, 1, 3], [1, 1, 1, 3]), ([2, 3, 4, 5], [2, 3, 4, 5]), ([2, 3, 3], [2, 3]), ([5, 5], [1]), ([10], [2, 4, 10])], }] def build_graph(parameters): """Build the less_equal op testing graph.""" input_value1 = tf.placeholder( dtype=parameters["input_dtype"], name="input1", shape=parameters["input_shape_pair"][0]) input_value2 = tf.placeholder( dtype=parameters["input_dtype"], name="input2", shape=parameters["input_shape_pair"][1]) out = tf.less_equal(input_value1, input_value2) return [input_value1, input_value2], [out] def build_inputs(parameters, sess, inputs, outputs): input_value1 = create_tensor_data(parameters["input_dtype"], parameters["input_shape_pair"][0]) input_value2 = create_tensor_data(parameters["input_dtype"], parameters["input_shape_pair"][1]) return [input_value1, input_value2], sess.run( outputs, feed_dict=dict(zip(inputs, [input_value1, input_value2]))) make_zip_of_tests( options, test_parameters, build_graph, build_inputs, expected_tf_failures=3) @register_make_test_function() def make_floor_tests(options): """Make a set of tests to do floor.""" test_parameters = [{ "input_dtype": [tf.float32], "input_shape": [[], [1], [1, 2], [5, 6, 7, 8], [3, 4, 5, 6]], }] def build_graph(parameters): """Build the floor op testing graph.""" input_value = tf.placeholder( dtype=parameters["input_dtype"], name="input1", shape=parameters["input_shape"]) out = tf.floor(input_value) return [input_value], [out] def build_inputs(parameters, sess, inputs, outputs): input_value = create_tensor_data(parameters["input_dtype"], parameters["input_shape"]) return [input_value], sess.run(outputs, feed_dict={inputs[0]: input_value}) make_zip_of_tests(options, test_parameters, build_graph, build_inputs) @register_make_test_function() def make_ceil_tests(options): """Make a set of tests to do ceil.""" test_parameters = [{ "input_dtype": [tf.float32], "input_shape": [[], [1], [1, 2], [5, 6, 7, 8], [3, 4, 5, 6]], }] def build_graph(parameters): """Build the ceil op testing graph.""" input_value = tf.placeholder( dtype=parameters["input_dtype"], name="input1", shape=parameters["input_shape"]) out = tf.ceil(input_value) return [input_value], [out] def build_inputs(parameters, sess, inputs, outputs): input_value = create_tensor_data(parameters["input_dtype"], parameters["input_shape"]) return [input_value], sess.run( outputs, feed_dict={inputs[0]: input_value}) make_zip_of_tests(options, test_parameters, build_graph, build_inputs) @register_make_test_function() def make_round_tests(options): """Build the round op testing graph.""" test_parameters = [{ "input_dtype": [tf.float32], "input_shape": [[], [1], [1, 2], [5, 6, 7, 8], [3, 4, 5, 6]], }] def build_graph(parameters): """Build the round op testing graph.""" input_value = tf.placeholder( dtype=parameters["input_dtype"], name="input1", shape=parameters["input_shape"]) out = tf.round(input_value) return [input_value], [out] def build_inputs(parameters, sess, inputs, outputs): input_value = create_tensor_data(parameters["input_dtype"], parameters["input_shape"]) return [input_value], sess.run(outputs, feed_dict={inputs[0]: input_value}) make_zip_of_tests(options, test_parameters, build_graph, build_inputs) @register_make_test_function() def make_neg_tests(options): """Make a set of tests to do neg.""" test_parameters = [{ "input_dtype": [tf.float32, tf.int32], "input_shape": [[1, 3, 4, 3], [5], []], }] def build_graph(parameters): """Build the neg op testing graph.""" input_tensor = tf.placeholder( dtype=parameters["input_dtype"], name="input", shape=parameters["input_shape"]) out = tf.negative(input_tensor) return [input_tensor], [out] def build_inputs(parameters, sess, inputs, outputs): values = create_tensor_data(parameters["input_dtype"], parameters["input_shape"]) return [values], sess.run(outputs, feed_dict=dict(zip(inputs, [values]))) make_zip_of_tests(options, test_parameters, build_graph, build_inputs) @register_make_test_function() def make_zeros_like_tests(options): """Make a set of tests to do zeros_like.""" test_parameters = [{ "input_dtype": [tf.float32, tf.int32, tf.int64], "input_shape": [[], [1], [1, 2], [5, 6, 7, 8], [3, 4, 5, 6]], }] def build_graph(parameters): """Build the zeros_like op testing graph.""" input_tensor = tf.placeholder( dtype=parameters["input_dtype"], name="input", shape=parameters["input_shape"]) zeros = tf.zeros_like(input_tensor) # This maximum node is so that toco can perform the constants-propagation # through the above zeros_like, which it can't do if the output of the # zeros_like as an output of the whole graphs (graph outputs can't be # constants). If toco does not perform such constants-propagation then # the resulting tflite graph retains the zeros_like as a Fill op, which # is unsupported by TFLite, even as a custom op. out = tf.maximum(zeros, input_tensor) return [input_tensor], [out] def build_inputs(parameters, sess, inputs, outputs): values = create_tensor_data(parameters["input_dtype"], parameters["input_shape"]) return [values], sess.run(outputs, feed_dict=dict(zip(inputs, [values]))) make_zip_of_tests(options, test_parameters, build_graph, build_inputs) def _make_elementwise_tests(op): """Make a set of tests to do element-wise operations.""" def f(options): """Actual function that generates examples.""" test_parameters = [{<|fim▁hole|> "input_shape": [[], [1], [1, 2], [5, 6, 7, 8], [3, 4, 5, 6]], }] def build_graph(parameters): """Build the unary op testing graph.""" input_value = tf.placeholder( dtype=parameters["input_dtype"], name="input1", shape=parameters["input_shape"]) out = op(input_value) return [input_value], [out] def build_inputs(parameters, sess, inputs, outputs): input_value = create_tensor_data(parameters["input_dtype"], parameters["input_shape"]) return [input_value], sess.run( outputs, feed_dict={inputs[0]: input_value}) make_zip_of_tests(options, test_parameters, build_graph, build_inputs) return f @register_make_test_function() def make_sin_tests(options): """Make a set of tests to do sin.""" return _make_elementwise_tests(tf.sin)(options) @register_make_test_function() def make_log_tests(options): """Make a set of tests to do log.""" return _make_elementwise_tests(tf.log)(options) @register_make_test_function() def make_sqrt_tests(options): """Make a set of tests to do sqrt.""" return _make_elementwise_tests(tf.sqrt)(options) @register_make_test_function() def make_rsqrt_tests(options): """Make a set of tests to do 1/sqrt.""" return _make_elementwise_tests(tf.rsqrt)(options) @register_make_test_function() def make_square_tests(options): """Make a set of tests to do square.""" return _make_elementwise_tests(tf.square)(options) @register_make_test_function() def make_where_tests(options): """Make a set of tests to do where.""" test_parameters = [ { "input_dtype": [tf.float32, tf.int32], "input_shape_set": [([1, 2, 3, 4], [1, 2, 3, 4]),], "use_where_v2": [False, True], }, { "input_dtype": [tf.float32, tf.int32], "input_shape_set": [([1, 2, 3, 4], [1, 2, 3, 1]),], "use_where_v2": [True], }, ] def build_graph(parameters): """Build the where op testing graph.""" input_value1 = tf.placeholder( dtype=parameters["input_dtype"], name="input2", shape=parameters["input_shape_set"][0]) input_value2 = tf.placeholder( dtype=parameters["input_dtype"], name="input3", shape=parameters["input_shape_set"][1]) less = tf.less(input_value1, input_value2) where = tf.where_v2 if parameters["use_where_v2"] else tf.where out = where(less, input_value1, input_value2) return [input_value1, input_value2], [out] def build_inputs(parameters, sess, inputs, outputs): input_value1 = create_tensor_data(parameters["input_dtype"], parameters["input_shape_set"][0]) input_value2 = create_tensor_data(parameters["input_dtype"], parameters["input_shape_set"][1]) return [input_value1, input_value2], sess.run( outputs, feed_dict=dict(zip(inputs, [input_value1, input_value2]))) make_zip_of_tests(options, test_parameters, build_graph, build_inputs) @register_make_test_function() def make_slice_tests(options): """Make a set of tests to do slice.""" # TODO(renjieliu): add test/support for uint8. test_parameters = [ # 4-D { "dtype": [tf.float32, tf.int32, tf.int64, tf.string], "index_type": [tf.int32, tf.int64], "input_shape": [[12, 2, 2, 5]], "begin": [[0, 0, 0, 0], [1, 0, 1, 0]], "size": [[8, 2, 2, 3], [11, 2, 1, 5]], }, # 2-D { "dtype": [tf.float32, tf.int32, tf.int64, tf.string], "index_type": [tf.int32, tf.int64], "input_shape": [[2, 3]], "begin": [[0, 0], [1, 0]], "size": [[2, 3], [2, 2]], }, # 4-D with size -1 { "dtype": [tf.float32], "index_type": [tf.int32], "input_shape": [[4, 4, 4, 4]], "begin": [[0, 0, 0, 0], [1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]], "size": [[-1, 1, 1, 1], [1, -1, 1, 1], [1, 1, -1, 1], [1, 1, 1, -1]], }, ] def build_graph(parameters): """Build graph for slice test.""" input_tensor = tf.placeholder( dtype=parameters["dtype"], name="input", shape=parameters["input_shape"]) begin = tf.placeholder( dtype=parameters["index_type"], name="begin", shape=[len(parameters["input_shape"])]) size = tf.placeholder( dtype=parameters["index_type"], name="size", shape=[len(parameters["input_shape"])]) tensors = [input_tensor, begin, size] out = tf.slice(input_tensor, begin, size) return tensors, [out] def build_inputs(parameters, sess, inputs, outputs): """Build inputs for slice test.""" input_values = create_tensor_data(parameters["dtype"], parameters["input_shape"]) index_type = _TF_TYPE_INFO[parameters["index_type"]][0] begin_values = np.array(parameters["begin"]).astype(index_type) size_values = np.array(parameters["size"]).astype(index_type) values = [input_values, begin_values, size_values] return values, sess.run(outputs, feed_dict=dict(zip(inputs, values))) make_zip_of_tests( options, test_parameters, build_graph, build_inputs, expected_tf_failures=24) @register_make_test_function() def make_conv2d_transpose_tests(options): """Make a set of tests to do transpose_conv.""" test_parameters = [{ "input_shape": [[1, 50, 54, 3]], "filter_shape": [[1, 1, 8, 3], [1, 2, 8, 3], [1, 3, 8, 3], [1, 4, 8, 3]], "output_shape": [[1, 100, 108, 8]], "dynamic_output_shape": [True, False], }, { "input_shape": [[1, 16, 1, 512]], "filter_shape": [[4, 1, 512, 512]], "output_shape": [[1, 32, 1, 512]], "dynamic_output_shape": [True, False], }, { "input_shape": [[1, 128, 128, 1]], "filter_shape": [[4, 4, 1, 1]], "output_shape": [[1, 256, 256, 1]], "dynamic_output_shape": [True, False], }] def build_graph(parameters): """Build a transpose_conv graph given `parameters`.""" input_tensor = tf.placeholder( dtype=tf.float32, name="input", shape=parameters["input_shape"]) filter_tensor = tf.placeholder( dtype=tf.float32, name="filter", shape=parameters["filter_shape"]) input_tensors = [input_tensor, filter_tensor] if parameters["dynamic_output_shape"]: output_shape = tf.placeholder(dtype=tf.int32, shape=[4]) input_tensors.append(output_shape) else: output_shape = parameters["output_shape"] out = tf.nn.conv2d_transpose( input_tensor, filter_tensor, output_shape=output_shape, padding="SAME", strides=(1, 2, 2, 1)) return input_tensors, [out] def build_inputs(parameters, sess, inputs, outputs): values = [ create_tensor_data(np.float32, parameters["input_shape"]), create_tensor_data(np.float32, parameters["filter_shape"]) ] if parameters["dynamic_output_shape"]: values.append(np.array(parameters["output_shape"])) return values, sess.run(outputs, feed_dict=dict(zip(inputs, values))) make_zip_of_tests(options, test_parameters, build_graph, build_inputs) # Since compute output_shape is fairly complicated for # tf.nn.conv2d_transpose input_sizes argument, so we here first perform a # "conv2d" operation to get the output, then we use the output to feed in # tf.nn.conv2d_backprop_input. # This test will depend on the "conv2d" operation's correctness. @register_make_test_function() def make_transpose_conv_tests(options): """Make a set of tests to do transpose_conv.""" # Tensorflow only supports equal strides test_parameters = [{ "input_shape": [[1, 3, 4, 1], [1, 10, 10, 3], [3, 20, 20, 1]], "filter_size": [[1, 1], [1, 2], [3, 3]], "strides": [[1, 1, 1, 1], [1, 3, 3, 1]], "padding": ["SAME", "VALID"], "data_format": ["NHWC"], "channel_multiplier": [1, 2], }] def get_tensor_shapes(parameters): input_shape = parameters["input_shape"] filter_size = parameters["filter_size"] filter_shape = filter_size + [ input_shape[3], parameters["channel_multiplier"] ] return [input_shape, filter_shape] def build_graph(parameters): """Build a transpose_conv graph given `parameters`.""" input_shape, filter_shape = get_tensor_shapes(parameters) input_tensor = tf.placeholder( dtype=tf.float32, name="input", shape=input_shape) filter_input = tf.placeholder( dtype=tf.float32, name="filter", shape=filter_shape) conv_outputs = tf.nn.conv2d( input_tensor, filter_input, strides=parameters["strides"], padding=parameters["padding"], data_format=parameters["data_format"]) out = tf.nn.conv2d_backprop_input( input_shape, filter_input, conv_outputs, strides=parameters["strides"], padding=parameters["padding"], data_format=parameters["data_format"]) input_tensors = [input_tensor, filter_input] return input_tensors, [out] def build_inputs(parameters, sess, inputs, outputs): input_shape, filter_shape = get_tensor_shapes(parameters) values = [ create_tensor_data(np.float32, input_shape), create_tensor_data(np.float32, filter_shape) ] return values, sess.run(outputs, feed_dict=dict(zip(inputs, values))) make_zip_of_tests(options, test_parameters, build_graph, build_inputs) @register_make_test_function() def make_tile_tests(options): """Make a set of tests to do tile.""" test_parameters = [{ "input_dtype": [tf.float32, tf.int32, tf.bool], "input_shape": [[3, 2, 1], [2, 2, 2]], "multiplier_dtype": [tf.int32, tf.int64], "multiplier_shape": [[3]] }] def build_graph(parameters): """Build the tile op testing graph.""" input_value = tf.placeholder( dtype=parameters["input_dtype"], shape=parameters["input_shape"], name="input") multiplier_value = tf.placeholder( dtype=parameters["multiplier_dtype"], shape=parameters["multiplier_shape"], name="multiplier") out = tf.tile(input_value, multiplier_value) return [input_value, multiplier_value], [out] def build_inputs(parameters, sess, inputs, outputs): input_value = create_tensor_data(parameters["input_dtype"], parameters["input_shape"]) multipliers_value = create_tensor_data( parameters["multiplier_dtype"], parameters["multiplier_shape"], min_value=0) return [input_value, multipliers_value], sess.run( outputs, feed_dict={ inputs[0]: input_value, inputs[1]: multipliers_value }) make_zip_of_tests(options, test_parameters, build_graph, build_inputs) @register_make_test_function() def make_expand_dims_tests(options): """Make a set of tests to do expand_dims.""" test_parameters = [{ "input_type": [tf.float32, tf.int32], "input_shape": [[5, 4]], "axis_value": [0, 1, 2, -1, -2, -3], "constant_axis": [True, False], }] def build_graph(parameters): """Build the where op testing graph.""" inputs = [] input_value = tf.placeholder( dtype=parameters["input_type"], name="input", shape=parameters["input_shape"]) inputs.append(input_value) if parameters["constant_axis"]: axis_value = tf.constant( parameters["axis_value"], dtype=tf.int32, shape=[1]) else: axis_value = tf.placeholder(dtype=tf.int32, name="axis", shape=[1]) inputs.append(axis_value) out = tf.expand_dims(input_value, axis=axis_value) return inputs, [out] def build_inputs(parameters, sess, inputs, outputs): input_values = [] input_values.append( create_tensor_data(parameters["input_type"], parameters["input_shape"])) if not parameters["constant_axis"]: input_values.append(np.array([parameters["axis_value"]], dtype=np.int32)) return input_values, sess.run( outputs, feed_dict=dict(zip(inputs, input_values))) make_zip_of_tests(options, test_parameters, build_graph, build_inputs) @register_make_test_function() def make_sparse_to_dense_tests(options): """Make a set of tests to do sparse to dense.""" test_parameters = [{ "value_dtype": [tf.float32, tf.int32, tf.int64], "index_dtype": [tf.int32, tf.int64], "value_count": [1, 3, 6, 8], "dense_shape": [[15], [3, 10], [4, 4, 4, 4], [7, 10, 9]], "default_value": [0, -1], "value_is_scalar": [True, False], }] # Return a single value for 1-D dense shape, but a tuple for other shapes. def generate_index(dense_shape): if len(dense_shape) == 1: return np.random.randint(dense_shape[0]) else: index = [] for shape in dense_shape: index.append(np.random.randint(shape)) return tuple(index) def build_graph(parameters): """Build the sparse_to_dense op testing graph.""" dense_shape = parameters["dense_shape"] # Special handle for value_is_scalar case. # value_count must be 1. if parameters["value_is_scalar"] and parameters["value_count"] == 1: value = tf.placeholder( name="value", dtype=parameters["value_dtype"], shape=()) else: value = tf.placeholder( name="value", dtype=parameters["value_dtype"], shape=[parameters["value_count"]]) indices = set() while len(indices) < parameters["value_count"]: indices.add(generate_index(dense_shape)) indices = tf.constant(tuple(indices), dtype=parameters["index_dtype"]) # TODO(renjieliu): Add test for validate_indices case. out = tf.sparse_to_dense( indices, dense_shape, value, parameters["default_value"], validate_indices=False) return [value], [out] def build_inputs(parameters, sess, inputs, outputs): if parameters["value_is_scalar"] and parameters["value_count"] == 1: input_value = create_scalar_data(parameters["value_dtype"]) else: input_value = create_tensor_data(parameters["value_dtype"], [parameters["value_count"]]) return [input_value], sess.run( outputs, feed_dict=dict(zip(inputs, [input_value]))) make_zip_of_tests(options, test_parameters, build_graph, build_inputs) @register_make_test_function() def make_pack_tests(options): """Make a set of tests to do stack.""" test_parameters = [ # Avoid creating all combinations to keep the test size small. { "dtype": [tf.float32], "base_shape": [[3, 4, 3], [3, 4], [5]], "num_tensors": [1, 2, 3, 4, 5, 6], "axis": [0, 1, 2, 3], "additional_shape": [1, 2, 3], }, { "dtype": [tf.int32], "base_shape": [[3, 4, 3], [3, 4], [5]], "num_tensors": [6], "axis": [0, 1, 2, 3], "additional_shape": [1, 2, 3], }, { "dtype": [tf.int64], "base_shape": [[3, 4, 3], [3, 4], [5]], "num_tensors": [5], "axis": [0, 1, 2, 3], "additional_shape": [1, 2, 3], } ] def get_shape(parameters): """Return a tweaked version of 'base_shape'.""" axis = parameters["axis"] shape = parameters["base_shape"][:] if axis < len(shape): shape[axis] += parameters["additional_shape"] return shape def build_graph(parameters): all_tensors = [] for n in range(0, parameters["num_tensors"]): input_tensor = tf.placeholder( dtype=parameters["dtype"], name=("input%d" % n), shape=get_shape(parameters)) all_tensors.append(input_tensor) out = tf.stack(all_tensors, parameters["axis"]) return all_tensors, [out] def build_inputs(parameters, sess, inputs, outputs): all_values = [] for _ in range(0, parameters["num_tensors"]): input_values = create_tensor_data(np.float32, get_shape(parameters)) all_values.append(input_values) return all_values, sess.run( outputs, feed_dict=dict(zip(inputs, all_values))) make_zip_of_tests( options, test_parameters, build_graph, build_inputs, expected_tf_failures=72) @register_make_test_function() def make_unpack_tests(options): """Make a set of tests to do unstack.""" test_parameters = [{ "base_shape": [[3, 4, 3], [3, 4], [5, 6, 7, 8]], "axis": [0, 1, 2, 3], }] def get_valid_axis(parameters): """Return a tweaked version of 'axis'.""" axis = parameters["axis"] shape = parameters["base_shape"][:] while axis > len(shape) - 1: axis -= 1 return axis def build_graph(parameters): input_tensor = tf.placeholder( dtype=tf.float32, name=("input"), shape=parameters["base_shape"]) outs = tf.unstack(input_tensor, axis=get_valid_axis(parameters)) return [input_tensor], [outs[0]] def build_inputs(parameters, sess, inputs, outputs): input_value = create_tensor_data(np.float32, shape=parameters["base_shape"]) return [input_value], sess.run( outputs, feed_dict=dict(zip(inputs, [input_value]))) make_zip_of_tests(options, test_parameters, build_graph, build_inputs) @register_make_test_function() def make_range_tests(options): """Make a set of tests to do range.""" test_parameters = [{ "dtype": [tf.int32, tf.float32], "offset": [10, 100, 1000], "delta": [1, 2, 3, 4, -1, -2, -3, -4], }] def build_graph(parameters): """Build the range op testing graph.""" input_tensor = tf.placeholder( dtype=parameters["dtype"], name=("start"), shape=[]) if parameters["delta"] < 0: offset = parameters["offset"] * -1 else: offset = parameters["offset"] delta = parameters["delta"] limit_tensor = input_tensor + offset delta_tensor = tf.constant(delta, dtype=parameters["dtype"]) out = tf.range(input_tensor, limit_tensor, delta_tensor) return [input_tensor], [out] def build_inputs(parameters, sess, inputs, outputs): input_value = create_scalar_data(parameters["dtype"]) return [input_value], sess.run( outputs, feed_dict=dict(zip(inputs, [input_value]))) make_zip_of_tests(options, test_parameters, build_graph, build_inputs) @register_make_test_function() def make_fill_tests(options): """Make a set of tests to do fill.""" test_parameters = [{ "dims_dtype": [tf.int32, tf.int64], "dims_shape": [[], [1], [3], [3, 3]], "value_dtype": [tf.int32, tf.int64, tf.float32], }] def build_graph(parameters): """Build the fill op testing graph.""" input1 = tf.placeholder( dtype=parameters["dims_dtype"], name="dims", shape=parameters["dims_shape"]) input2 = tf.placeholder( dtype=parameters["value_dtype"], name="value", shape=[]) out = tf.fill(input1, input2) return [input1, input2], [out] def build_inputs(parameters, sess, inputs, outputs): input1 = create_tensor_data(parameters["dims_dtype"], parameters["dims_shape"], 1) input2 = create_scalar_data(parameters["value_dtype"]) return [input1, input2], sess.run( outputs, feed_dict=dict(zip(inputs, [input1, input2]))) make_zip_of_tests( options, test_parameters, build_graph, build_inputs, expected_tf_failures=12) def _make_logical_tests(op): """Make a set of tests to do logical operations.""" def logical(options, expected_tf_failures=0): """Generate examples.""" test_parameters = [{ "input_shape_pair": [([], []), ([1, 1, 1, 3], [1, 1, 1, 3]), ([2, 3, 4, 5], [2, 3, 4, 5]), ([2, 3, 3], [2, 3]), ([5, 5], [1]), ([10], [2, 4, 10])], }] def build_graph(parameters): """Build the logical testing graph.""" input_value1 = tf.placeholder( dtype=tf.bool, name="input1", shape=parameters["input_shape_pair"][0]) input_value2 = tf.placeholder( dtype=tf.bool, name="input2", shape=parameters["input_shape_pair"][1]) out = op(input_value1, input_value2) return [input_value1, input_value2], [out] def build_inputs(parameters, sess, inputs, outputs): input_value1 = create_tensor_data(tf.bool, parameters["input_shape_pair"][0]) input_value2 = create_tensor_data(tf.bool, parameters["input_shape_pair"][1]) return [input_value1, input_value2], sess.run( outputs, feed_dict=dict(zip(inputs, [input_value1, input_value2]))) make_zip_of_tests( options, test_parameters, build_graph, build_inputs, expected_tf_failures=expected_tf_failures) return logical @register_make_test_function() def make_logical_or_tests(options): """Make a set of tests to do logical_or.""" return _make_logical_tests(tf.logical_or)(options, expected_tf_failures=1) @register_make_test_function() def make_logical_and_tests(options): """Make a set of tests to do logical_and.""" return _make_logical_tests(tf.logical_and)(options, expected_tf_failures=1) @register_make_test_function() def make_logical_xor_tests(options): """Make a set of tests to do logical_xor. Test logical_not as well. """ return _make_logical_tests(tf.logical_xor)(options, expected_tf_failures=1) @register_make_test_function() def make_mirror_pad_tests(options): """Make a set of tests to do mirror_pad.""" test_parameters = [ { "input_shape": [[2, 3]], "padding_matrix": [[[1, 1], [2, 1]]], "mode": ["REFLECT"], "type": ["const"] }, { "input_shape": [[2, 3]], "padding_matrix": [[[1, 1], [1, 1]]], "mode": ["REFLECT"], "type": ["const"] }, { "input_shape": [[2, 3]], "padding_matrix": [[[1, 1], [2, 1]]], "mode": ["SYMMETRIC"], "type": ["placeholder"] }, { "input_shape": [[2, 3]], "padding_matrix": [[[1, 1], [2, 1]]], "mode": ["REFLECT"], "type": ["placeholder"] }, { "input_shape": [[3]], "padding_matrix": [[[0, 2]]], "mode": ["SYMMETRIC"], "type": ["placeholder"] }, { "input_shape": [[3]], "padding_matrix": [[[0, 2]]], "mode": ["SYMMETRIC"], "type": ["const"] }, { "input_shape": [[3]], "padding_matrix": [[[0, 2]]], "mode": ["REFLECT"], "type": ["const"] }, { "input_shape": [[3, 2, 4, 5]], "padding_matrix": [[[1, 1], [2, 2], [1, 1], [1, 1]]], "mode": ["SYMMETRIC"], "type": ["placeholder"] }, ] def build_graph(parameters): """Build the graph for the test case.""" input_tensor = tf.placeholder( dtype=tf.int32, name="input", shape=parameters["input_shape"]) if parameters["type"] != "const": padding_matrix = tf.placeholder( dtype=tf.int32, name="padding", shape=[len(parameters["input_shape"]), 2]) input_tensors = [input_tensor, padding_matrix] else: padding_matrix = tf.constant(np.array(parameters["padding_matrix"])) input_tensors = [input_tensor] output = tf.pad( input_tensor, paddings=padding_matrix, mode=parameters["mode"]) return input_tensors, [output] def build_inputs(parameters, sess, inputs, outputs): input_values = [create_tensor_data(tf.int32, parameters["input_shape"])] if parameters["type"] != "const": input_values.append(np.array(parameters["padding_matrix"])) return input_values, sess.run( outputs, feed_dict=dict(zip(inputs, input_values))) make_zip_of_tests(options, test_parameters, build_graph, build_inputs) @register_make_test_function() def make_unroll_batch_matmul_tests(options): """Make a set of tests to test unroll_batch_matmul.""" # The test cases below requires broadcasting support (BatchMatMulV2 semantic), # whis isn't supported as of this change. broadcast_shape_params = [ # Simple broadcast. [(1, 2, 3), (3, 5), False, False], # Empty batch broadcast. [(2, 5, 3), (3, 7), False, False], # Single batch with non-empty batch broadcast. [(1, 5, 3), (4, 3, 7), False, False], # Broadcast both operands [(3, 1, 5, 3), (1, 4, 3, 7), False, False], ] test_parameters = [{ "dtype": [tf.float32], "shape": [ [(2, 2, 3), (2, 3, 2), False, False], [(2, 2, 3), (2, 3, 2), True, True], [(2, 2, 3), (2, 2, 3), False, True], [(2, 2, 3), (2, 2, 3), True, False], [(4, 2, 2, 3), (4, 2, 3, 2), False, False], [(4, 2, 2, 3), (4, 2, 3, 2), True, True], [(4, 2, 2, 3), (4, 2, 2, 3), False, True], [(4, 2, 2, 3), (4, 2, 2, 3), True, False] ] + broadcast_shape_params, # TODO(b/130887442): Improve the forward compatibility tests for every # ops. "forward_compatibility_test": [False, True], }] def build_graph(parameters): """Build the batch_matmul op testing graph.""" def _build_graph(): input_tensor1 = tf.placeholder( dtype=parameters["dtype"], shape=parameters["shape"][0]) input_tensor2 = tf.placeholder( dtype=parameters["dtype"], shape=parameters["shape"][1]) # Should be unrolled and replaced with fully_connected ops in the end. out = tf.matmul( input_tensor1, input_tensor2, transpose_a=parameters["shape"][2], transpose_b=parameters["shape"][3]) return [input_tensor1, input_tensor2], [out] if parameters["forward_compatibility_test"]: # This is hardcoded to the date after MatMulV2 is activated. # TODO(b/130887442): Improve the forward compatibility tests for every # ops, and remove the hardcoded date. with tf.compat.forward_compatibility_horizon(2019, 4, 26): return _build_graph() else: return _build_graph() def build_inputs(parameters, sess, inputs, outputs): input_value1 = create_tensor_data( parameters["dtype"], shape=parameters["shape"][0]) input_value2 = create_tensor_data( parameters["dtype"], shape=parameters["shape"][1]) return [input_value1, input_value2], sess.run( outputs, feed_dict=dict(zip(inputs, [input_value1, input_value2]))) make_zip_of_tests( options, test_parameters, build_graph, build_inputs) @register_make_test_function() def make_placeholder_with_default_tests(options): """Make a set of tests to test placeholder_with_default.""" test_parameters = [{ "dtype": [tf.float32, tf.int32, tf.int64], }] def build_graph(parameters): """Build the placeholder_with_default testing graph.""" const_node = tf.constant( [1, 2, 2, 0], shape=[2, 2], dtype=parameters["dtype"]) input_tensor = tf.placeholder_with_default( const_node, shape=[2, 2], name="input") out = tf.equal(input_tensor, const_node, name="output") return [input_tensor], [out] def build_inputs(parameters, sess, inputs, outputs): numpy_type = _TF_TYPE_INFO[parameters["dtype"]][0] input_value = np.array([[1, 0], [2, 1]], numpy_type) return [input_value], sess.run( outputs, feed_dict=dict(zip(inputs, [input_value]))) make_zip_of_tests(options, test_parameters, build_graph, build_inputs) @register_make_test_function() def make_unique_tests(options): """Make a set of tests for Unique op.""" test_parameters = [ { "input_shape": [[1]], "index_type": [tf.int32, tf.int64, None], "input_values": [3] }, { "input_shape": [[5]], "index_type": [tf.int32, tf.int64], "input_values": [[3, 2, 1, 2, 3]] }, { "input_shape": [[7]], "index_type": [tf.int32, tf.int64], "input_values": [[1, 1, 1, 1, 1, 1, 1]] }, { "input_shape": [[5]], "index_type": [tf.int32, tf.int64], "input_values": [[3, 2, 1, 0, -1]] }] def build_graph(parameters): """Build the graph for the test case.""" input_tensor = tf.placeholder( dtype=tf.int32, name="input", shape=parameters["input_shape"]) if parameters["index_type"] is None: output = tf.unique(input_tensor) else: output = tf.unique(input_tensor, parameters["index_type"]) return [input_tensor], output def build_inputs(parameters, sess, inputs, outputs): input_values = [create_tensor_data(tf.int32, parameters["input_shape"])] return input_values, sess.run( outputs, feed_dict=dict(zip(inputs, input_values))) make_zip_of_tests(options, test_parameters, build_graph, build_inputs) @register_make_test_function() def make_reverse_v2_tests(options): """Make a set of tests to do reverse_v2.""" test_parameters = [{ "base_shape": [[3, 4, 3], [3, 4], [5, 6, 7, 8]], "axis": [0, 1, 2, 3], }] def get_valid_axis(parameters): """Return a tweaked version of 'axis'.""" axis = parameters["axis"] shape = parameters["base_shape"][:] while axis > len(shape) - 1: axis -= 1 return axis def build_graph(parameters): input_tensor = tf.placeholder( dtype=tf.float32, name=("input"), shape=parameters["base_shape"]) outs = tf.reverse(input_tensor, axis=[get_valid_axis(parameters)]) return [input_tensor], [outs] def build_inputs(parameters, sess, inputs, outputs): input_value = create_tensor_data(np.float32, shape=parameters["base_shape"]) return [input_value], sess.run( outputs, feed_dict=dict(zip(inputs, [input_value]))) make_zip_of_tests(options, test_parameters, build_graph, build_inputs) @register_make_test_function() def make_reverse_sequence_tests(options): """Make a set of tests to do reverse_sequence.""" test_parameters = [ { "input_dtype": [tf.float32, tf.int32, tf.int64], "input_shape": [[8, 4, 5, 5, 6], [4, 4, 3, 5]], "seq_lengths": [[2, 2, 2, 2], [2, 1, 1, 0]], "seq_axis": [0, 3], "batch_axis": [1] }, { "input_dtype": [tf.float32], "input_shape": [[2, 4, 5, 5, 6]], "seq_lengths": [[2, 1]], "seq_axis": [2], "batch_axis": [0] }, { "input_dtype": [tf.float32], "input_shape": [[4, 2]], "seq_lengths": [[3, 1]], "seq_axis": [0], "batch_axis": [1] }] def build_graph(parameters): input_value = tf.placeholder( dtype=parameters["input_dtype"], name="input", shape=parameters["input_shape"]) outs = tf.reverse_sequence( input_value, seq_lengths=parameters["seq_lengths"], batch_axis=parameters["batch_axis"], seq_axis=parameters["seq_axis"]) return [input_value], [outs] def build_inputs(parameters, sess, inputs, outputs): input_value = create_tensor_data(parameters["input_dtype"], parameters["input_shape"]) return [input_value], sess.run( outputs, feed_dict=dict(zip(inputs, [input_value]))) make_zip_of_tests(options, test_parameters, build_graph, build_inputs) @register_make_test_function() def make_matrix_diag_tests(options): """Make a set of tests for tf.linalg.diag op.""" test_parameters = [ { "input_shape": [[3], [2, 3], [3, 4, 5], [2, 4, 6, 8]], "input_dtype": [tf.int32, tf.float32], }, ] def build_graph(parameters): input_tensor = tf.placeholder( dtype=parameters["input_dtype"], name="input", shape=parameters["input_shape"]) outs = tf.matrix_diag(input_tensor) return [input_tensor], [outs] def build_inputs(parameters, sess, inputs, outputs): input_values = create_tensor_data(parameters["input_dtype"], parameters["input_shape"]) return [input_values], sess.run( outputs, feed_dict=dict(zip(inputs, [input_values]))) make_zip_of_tests(options, test_parameters, build_graph, build_inputs) @register_make_test_function() def make_matrix_set_diag_tests(options): """Make a set of tests for tf.linalg.set_diag op.""" test_parameters = [ { "input_diag_shapes": [([3, 3], [3]), ([2, 3], [2]), ([2, 4, 4], [2, 4]), ([3, 4, 5, 6], [3, 4, 5])], "input_dtype": [tf.int32, tf.float32, tf.uint8], }, ] def build_graph(parameters): input_shape = parameters["input_diag_shapes"][0] diag_shape = parameters["input_diag_shapes"][1] input_tensor = tf.placeholder( dtype=parameters["input_dtype"], name="input", shape=input_shape) diag_tensor = tf.placeholder( dtype=parameters["input_dtype"], name="diagonal", shape=diag_shape) outs = tf.matrix_set_diag(input_tensor, diag_tensor) return [input_tensor, diag_tensor], [outs] def build_inputs(parameters, sess, inputs, outputs): input_shape = parameters["input_diag_shapes"][0] diag_shape = parameters["input_diag_shapes"][1] input_values = create_tensor_data(parameters["input_dtype"], input_shape) diag_values = create_tensor_data(parameters["input_dtype"], diag_shape) return [input_values, diag_values], sess.run( outputs, feed_dict=dict(zip(inputs, [input_values, diag_values]))) make_zip_of_tests(options, test_parameters, build_graph, build_inputs) @register_make_test_function() def make_eye_tests(options): """Make a set of tests for tf.eye op.""" test_parameters = [{ "num_rows_shape": [[]], "num_cols_shape": [[]], "batch_shape": [[3], [2, 4], [4, 5, 6], None], "use_num_cols": [True, False], "dtype": [tf.float32, tf.int32], }] def build_graph(parameters): input_tensor0 = tf.placeholder( dtype=tf.int32, name="num_rows", shape=parameters["num_rows_shape"]) input_tensor1 = tf.placeholder( dtype=tf.int32, name="num_columns", shape=parameters["num_cols_shape"]) if parameters["use_num_cols"]: outs = tf.eye( num_rows=input_tensor0, num_columns=input_tensor1, batch_shape=parameters["batch_shape"], dtype=parameters["dtype"]) return [input_tensor0, input_tensor1], [outs] else: outs = tf.eye(num_rows=input_tensor0, dtype=parameters["dtype"]) return [input_tensor0], [outs] def build_inputs(parameters, sess, inputs, outputs): input_value0 = create_scalar_data(dtype=np.int32, min_value=1) input_value1 = create_scalar_data(dtype=np.int32, min_value=1) if parameters["use_num_cols"]: return [input_value0, input_value1], sess.run( outputs, feed_dict=dict(zip(inputs, [input_value0, input_value1]))) else: return [input_value0], sess.run( outputs, feed_dict=dict(zip(inputs, [input_value0]))) make_zip_of_tests(options, test_parameters, build_graph, build_inputs) @register_make_test_function(name="make_unidirectional_sequence_lstm_tests") @test_util.enable_control_flow_v2 def make_unidirectional_sequence_lstm_tests(options): """Make a set of tests to do unidirectional_sequence_lstm.""" test_parameters = [{ "batch_size": [2, 4, 6], "seq_length": [1, 3], "units": [4, 5], "use_peepholes": [False, True], "is_dynamic_rnn": [False, True] }] def build_graph(parameters): input_values = [] if parameters["is_dynamic_rnn"]: shape = [ parameters["seq_length"], parameters["batch_size"], parameters["units"] ] input_value = tf.placeholder(dtype=tf.float32, name="input", shape=shape) input_values.append(input_value) lstm_cell = tf.lite.experimental.nn.TFLiteLSTMCell( parameters["units"], use_peepholes=parameters["use_peepholes"]) outs, _ = tf.lite.experimental.nn.dynamic_rnn( lstm_cell, input_value, dtype=tf.float32, time_major=True) outs = tf.unstack(outs, axis=1) else: shape = [parameters["batch_size"], parameters["units"]] for i in range(parameters["seq_length"]): input_value = tf.placeholder( dtype=tf.float32, name=("input_%d" % i), shape=shape) input_values.append(input_value) lstm_cell = tf.lite.experimental.nn.TFLiteLSTMCell( parameters["units"], use_peepholes=parameters["use_peepholes"]) outs, _ = tf.nn.static_rnn(lstm_cell, input_values, dtype=tf.float32) real_output = tf.zeros([1], dtype=tf.float32) + outs[-1] return input_values, [real_output] def build_inputs(parameters, sess, inputs, outputs): input_values = [] if parameters["is_dynamic_rnn"]: shape = [ parameters["seq_length"], parameters["batch_size"], parameters["units"] ] input_value = create_tensor_data(tf.float32, shape) input_values.append(input_value) else: shape = [parameters["batch_size"], parameters["units"]] for i in range(parameters["seq_length"]): input_value = create_tensor_data(tf.float32, shape) input_values.append(input_value) init = tf.global_variables_initializer() sess.run(init) # Tflite fused kernel takes input as [time, batch, input]. # For static unidirectional sequence lstm, the input is an array sized of # time, and pack the array together, however, for time = 1, the input is # not packed. tflite_input_values = input_values if not parameters["is_dynamic_rnn"] and parameters["seq_length"] == 1: tflite_input_values = [ input_values[0].reshape((1, parameters["batch_size"], parameters["units"])) ] return tflite_input_values, sess.run( outputs, feed_dict=dict(zip(inputs, input_values))) make_zip_of_tests( options, test_parameters, build_graph, build_inputs, use_frozen_graph=True) @register_make_test_function(name="make_unidirectional_sequence_rnn_tests") @test_util.enable_control_flow_v2 def make_unidirectional_sequence_rnn_tests(options): """Make a set of tests to do unidirectional_sequence_rnn.""" test_parameters = [{ "batch_size": [2, 4, 6], "seq_length": [1, 3], "units": [4, 5], "is_dynamic_rnn": [False, True] }] def build_graph(parameters): input_values = [] if parameters["is_dynamic_rnn"]: shape = [ parameters["seq_length"], parameters["batch_size"], parameters["units"] ] input_value = tf.placeholder(dtype=tf.float32, name="input", shape=shape) input_values.append(input_value) rnn_cell = tf.lite.experimental.nn.TfLiteRNNCell(parameters["units"]) outs, _ = tf.lite.experimental.nn.dynamic_rnn( rnn_cell, input_value, dtype=tf.float32, time_major=True) outs = tf.unstack(outs, axis=1) else: shape = [parameters["batch_size"], parameters["units"]] for i in range(parameters["seq_length"]): input_value = tf.placeholder( dtype=tf.float32, name=("input_%d" % i), shape=shape) input_values.append(input_value) rnn_cell = tf.lite.experimental.nn.TfLiteRNNCell(parameters["units"]) outs, _ = tf.nn.static_rnn(rnn_cell, input_values, dtype=tf.float32) real_output = tf.zeros([1], dtype=tf.float32) + outs[-1] return input_values, [real_output] def build_inputs(parameters, sess, inputs, outputs): input_values = [] if parameters["is_dynamic_rnn"]: shape = [ parameters["seq_length"], parameters["batch_size"], parameters["units"] ] input_value = create_tensor_data(tf.float32, shape) input_values.append(input_value) else: shape = [parameters["batch_size"], parameters["units"]] for i in range(parameters["seq_length"]): input_value = create_tensor_data(tf.float32, shape) input_values.append(input_value) init = tf.global_variables_initializer() sess.run(init) # Tflite fused kernel takes input as [time, batch, input]. # For static unidirectional sequence rnn, the input is an array sized of # time, and pack the array together, however, for time = 1, the input is # not packed. tflite_input_values = input_values if not parameters["is_dynamic_rnn"] and parameters["seq_length"] == 1: tflite_input_values = [ input_values[0].reshape((1, parameters["batch_size"], parameters["units"])) ] return tflite_input_values, sess.run( outputs, feed_dict=dict(zip(inputs, input_values))) make_zip_of_tests( options, test_parameters, build_graph, build_inputs, use_frozen_graph=True) @register_make_test_function() def make_unfused_gru_tests(options): """Make a set of tests for unfused gru op.""" test_parameters = [{ "units": [2, 5], "batch_size": [1, 2], "time": [3], }] def build_graph(parameters): inputs = [ tf.placeholder(tf.float32, [parameters["batch_size"], parameters["units"]]) for _ in range(parameters["time"]) ] cell_fw = tf.nn.rnn_cell.GRUCell(parameters["units"]) cell_bw = tf.nn.rnn_cell.GRUCell(parameters["units"]) outputs, _, _ = tf.nn.static_bidirectional_rnn( cell_fw, cell_bw, inputs, dtype=tf.float32) return inputs, outputs def build_inputs(parameters, sess, inputs, outputs): input_values = [ create_tensor_data(tf.float32, [parameters["batch_size"], parameters["units"]]) for _ in range(parameters["time"]) ] init = tf.global_variables_initializer() sess.run(init) return input_values, sess.run( outputs, feed_dict=dict(zip(inputs, input_values))) make_zip_of_tests( options, test_parameters, build_graph, build_inputs, use_frozen_graph=True) @register_make_test_function() def make_rfft2d_tests(options): """Make a set of tests to do rfft2d.""" test_parameters = [{ "input_dtype": [tf.float32], "input_shape": [[8, 8], [3, 8, 8]], "fft_length": [ None, [4, 4], [4, 8], [8, 4], [8, 8], [8, 16], [16, 8], [16, 16] ] }] def build_graph(parameters): input_value = tf.placeholder( dtype=parameters["input_dtype"], name="input", shape=parameters["input_shape"]) with spectral_ops_test_util.fft_kernel_label_map(): outs = tf.signal.rfft2d(input_value, fft_length=parameters["fft_length"]) return [input_value], [outs] def build_inputs(parameters, sess, inputs, outputs): input_value = create_tensor_data(parameters["input_dtype"], parameters["input_shape"]) return [input_value], sess.run( outputs, feed_dict=dict(zip(inputs, [input_value]))) extra_toco_options = ExtraTocoOptions() extra_toco_options.allow_custom_ops = True make_zip_of_tests(options, test_parameters, build_graph, build_inputs, extra_toco_options) # Toco binary path provided by the generate rule. bin_path = None def generate_examples(options): global bin_path def mkdir_if_not_exist(x): if not os.path.isdir(x): os.mkdir(x) if not os.path.isdir(x): raise RuntimeError("Failed to create dir %r" % x) opstest_path = os.path.join(options.output_path) mkdir_if_not_exist(opstest_path) out = options.zip_to_output bin_path = options.toco # Some zip filenames contain a postfix identifying the conversion mode. The # list of valid conversion modes is defined in # generated_test_conversion_modes() in build_def.bzl. test_function = ("make_%s_tests" % (out.replace(".zip", "").replace( "pb2lite", "").replace("toco-flex", "").rstrip("_"))) if test_function not in _MAKE_TEST_FUNCTIONS_MAP: raise RuntimeError("Can't find a test function to create %r. Tried %r" % (out, test_function)) _MAKE_TEST_FUNCTIONS_MAP[test_function](options)<|fim▁end|>
"input_dtype": [tf.float32],
<|file_name|>cost_estimate.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*- # # Author: Joël Grand-Guillaume # Copyright 2013 Camptocamp SA # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. #<|fim▁hole|># # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # # from openerp.osv import orm from openerp.tools.translate import _ class logistic_requisition_cost_estimate(orm.TransientModel): _inherit = 'logistic.requisition.cost.estimate' def _check_requisition(self, cr, uid, requisition, context=None): """ Check the rules to create a cost estimate from the requisition :returns: list of tuples ('message, 'error_code') """ errors = [] if not requisition.budget_holder_id: error = (_('The requisition must be validated ' 'by the Budget Holder.'), 'NO_BUDGET_VALID') errors.append(error) return errors<|fim▁end|>
# This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details.
<|file_name|>tasks.py<|end_file_name|><|fim▁begin|>from __future__ import absolute_import, unicode_literals import time from datetime import timedelta from djcelery_transactions import task from django.utils import timezone from redis_cache import get_redis_connection from .models import CreditAlert, Invitation, Org, TopUpCredits @task(track_started=True, name='send_invitation_email_task') def send_invitation_email_task(invitation_id): invitation = Invitation.objects.get(pk=invitation_id) invitation.send_email() <|fim▁hole|>@task(track_started=True, name='send_alert_email_task') def send_alert_email_task(alert_id): alert = CreditAlert.objects.get(pk=alert_id) alert.send_email() @task(track_started=True, name='check_credits_task') def check_credits_task(): CreditAlert.check_org_credits() @task(track_started=True, name='calculate_credit_caches') def calculate_credit_caches(): """ Repopulates the active topup and total credits for each organization that received messages in the past week. """ # get all orgs that have sent a message in the past week last_week = timezone.now() - timedelta(days=7) # for every org that has sent a message in the past week for org in Org.objects.filter(msgs__created_on__gte=last_week).distinct('pk'): start = time.time() org._calculate_credit_caches() print " -- recalculated credits for %s in %0.2f seconds" % (org.name, time.time() - start) @task(track_started=True, name="squash_topupcredits") def squash_topupcredits(): r = get_redis_connection() key = 'squash_topupcredits' if not r.get(key): with r.lock(key, timeout=900): TopUpCredits.squash_credits()<|fim▁end|>
<|file_name|>stocks.py<|end_file_name|><|fim▁begin|>import numpy as np import pandas as pd from bokeh.plotting import * # Here is some code to read in some stock data from the Yahoo Finance API AAPL = pd.read_csv( "http://ichart.yahoo.com/table.csv?s=AAPL&a=0&b=1&c=2000", parse_dates=['Date']) GOOG = pd.read_csv( "http://ichart.yahoo.com/table.csv?s=GOOG&a=0&b=1&c=2000", parse_dates=['Date']) MSFT = pd.read_csv( "http://ichart.yahoo.com/table.csv?s=MSFT&a=0&b=1&c=2000", parse_dates=['Date']) IBM = pd.read_csv( "http://ichart.yahoo.com/table.csv?s=IBM&a=0&b=1&c=2000", parse_dates=['Date']) output_file("stocks.html", title="stocks.py example") # EXERCISE: turn on plot hold # EXERCISE: finish this line plot, and add more for the other stocks. Each one should # have a legend, and its own color. line( AAPL['Date'], # x coordinates AAPL['Adj Close'], # y coordinates color='#A6CEE3', # set a color for the line legend='AAPL', # attach a legend label<|fim▁hole|># EXERCISE: style the plot, set a title, lighten the gridlines, etc. # EXERCISE: start a new figure # Here is some code to compute the 30-day moving average for AAPL aapl = AAPL['Adj Close'] aapl_dates = AAPL['Date'] window_size = 30 window = np.ones(window_size)/float(window_size) aapl_avg = np.convolve(aapl, window, 'same') # EXERCISE: plot a scatter of circles for the individual AAPL prices with legend # 'close'. Remember to set the x axis type and tools on the first renderer. # EXERCISE: plot a line of the AAPL moving average data with the legeng 'avg' # EXERCISE: style the plot, set a title, lighten the gridlines, etc. show() # open a browser<|fim▁end|>
x_axis_type = "datetime", # NOTE: only needed on first tools="pan,wheel_zoom,box_zoom,reset,previewsave" # NOTE: only needed on first )
<|file_name|>convert_cldr.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python from gi.repository import Gdk from xml.etree.ElementTree import ElementTree, Element import re ESCAPE_PATTERN = re.compile(r'\\u\{([0-9A-Fa-f]+?)\}') ISO_PATTERN = re.compile(r'[A-E]([0-9]+)') def parse_single_key(value): key = Element('key') uc = 0 if hasattr(__builtins__, 'unichr'): def unescape(m): return chr(int(m.group(1), 16)) else: def unescape(m): return chr(int(m.group(1), 16)) value = ESCAPE_PATTERN.sub(unescape, value) if len(value) > 1: key.set('text', value) uc = ord(value[0]) keyval = Gdk.unicode_to_keyval(uc) name = Gdk.keyval_name(keyval) key.set('name', name) return key def convert(source, tree): root = Element('layout') for index, keymap in enumerate(tree.iter('keyMap')): level = Element('level') rows = {}<|fim▁hole|> if not modifiers: mode = 'default' elif 'shift' in modifiers.split(' ') or 'lock' in modifiers.split(' '): mode = 'latched' else: mode = 'locked' level.set('mode', mode) for _map in keymap.iter('map'): value = _map.get('to') key = parse_single_key(value) iso = _map.get('iso') if not ISO_PATTERN.match(iso): sys.stderr.write('invalid ISO key name: %s\n' % iso) continue if not iso[0] in rows: rows[iso[0]] = [] rows[iso[0]].append((int(iso[1:]), key)) # add attribute to certain keys name = key.get('name') if name == 'space': key.set('align', 'center') key.set('width', '6.0') if name in ('space', 'BackSpace'): key.set('repeatable', 'yes') # add subkeys longPress = _map.get('longPress') if longPress: for value in longPress.split(' '): subkey = parse_single_key(value) key.append(subkey) for k, v in sorted(list(rows.items()), key=lambda x: x[0], reverse=True): row = Element('row') for key in sorted(v, key=lambda x: x): row.append(key[1]) level.append(row) return root def indent(elem, level=0): i = "\n" + level*" " if len(elem): if not elem.text or not elem.text.strip(): elem.text = i + " " if not elem.tail or not elem.tail.strip(): elem.tail = i for elem in elem: indent(elem, level+1) if not elem.tail or not elem.tail.strip(): elem.tail = i else: if level and (not elem.tail or not elem.tail.strip()): elem.tail = i if __name__ == "__main__": import sys if len(sys.argv) != 2: print("supply a CLDR keyboard file") sys.exit(1) source = sys.argv[-1] itree = ElementTree() itree.parse(source) root = convert(source, itree) indent(root) otree = ElementTree(root) if hasattr(sys.stdout, 'buffer'): out = sys.stdout.buffer else: out = sys.stdout otree.write(out, xml_declaration=True, encoding='UTF-8')<|fim▁end|>
root.append(level) level.set('name', 'level%d' % (index+1)) # FIXME: heuristics here modifiers = keymap.get('modifiers')
<|file_name|>JdbcDataTest.java<|end_file_name|><|fim▁begin|>/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.drill.jdbc.test; import java.io.IOException; import java.sql.Connection; import java.sql.Driver; import java.sql.DriverManager; import java.sql.Statement; import java.util.Iterator; import java.util.Map; import java.util.ServiceLoader; import org.apache.calcite.rel.core.JoinRelType; import org.apache.drill.common.logical.LogicalPlan; import org.apache.drill.common.logical.PlanProperties; import org.apache.drill.common.logical.StoragePluginConfig; import org.apache.drill.common.logical.data.Filter; import org.apache.drill.common.logical.data.Join; import org.apache.drill.common.logical.data.Limit; import org.apache.drill.common.logical.data.LogicalOperator; import org.apache.drill.common.logical.data.Order; import org.apache.drill.common.logical.data.Project; import org.apache.drill.common.logical.data.Scan; import org.apache.drill.common.logical.data.Store; import org.apache.drill.common.logical.data.Union; import org.apache.drill.jdbc.JdbcTestBase; import org.apache.drill.categories.JdbcTest; import org.junit.Assert; import org.junit.BeforeClass; import org.junit.Ignore; import org.junit.Test; import com.google.common.base.Charsets; import com.google.common.base.Function; import com.google.common.base.Predicate; import com.google.common.collect.Iterables; import com.google.common.io.Resources; import org.junit.experimental.categories.Category; /** Unit tests for Drill's JDBC driver. */ @Ignore // ignore for now. @Category(JdbcTest.class) public class JdbcDataTest extends JdbcTestBase { private static String MODEL; private static String EXPECTED; @BeforeClass public static void setupFixtures() throws IOException { MODEL = Resources.toString(Resources.getResource("test-models.json"), Charsets.UTF_8); EXPECTED = Resources.toString(Resources.getResource("donuts-output-data.txt"), Charsets.UTF_8); } /** * Command-line utility to execute a logical plan. * * <p> * The forwarding method ensures that the IDE calls this method with the right classpath. * </p> */ public static void main(String[] args) throws Exception { } /** Load driver. */ @Test public void testLoadDriver() throws ClassNotFoundException { Class.forName("org.apache.drill.jdbc.Driver"); } /** * Load the driver using ServiceLoader */ @Test public void testLoadDriverServiceLoader() { ServiceLoader<Driver> sl = ServiceLoader.load(Driver.class); for(Iterator<Driver> it = sl.iterator(); it.hasNext(); ) { Driver driver = it.next(); if (driver instanceof org.apache.drill.jdbc.Driver) { return; } } Assert.fail("org.apache.drill.jdbc.Driver not found using ServiceLoader"); } /** Load driver and make a connection. */ @Test public void testConnect() throws Exception { Class.forName("org.apache.drill.jdbc.Driver"); final Connection connection = DriverManager.getConnection("jdbc:drill:zk=local"); connection.close(); } /** Load driver, make a connection, prepare a statement. */ @Test public void testPrepare() throws Exception { withModel(MODEL, "DONUTS").withConnection(new Function<Connection, Void>() { @Override public Void apply(Connection connection) { try { final Statement statement = connection.prepareStatement("select * from donuts"); statement.close(); return null; } catch (Exception e) { throw new RuntimeException(e); } } }); } /** Simple query against JSON. */ @Test public void testSelectJson() throws Exception { withModel(MODEL, "DONUTS").sql("select * from donuts").returns(EXPECTED); } /** Simple query against EMP table in HR database. */ @Test public void testSelectEmployees() throws Exception { withModel(MODEL, "HR") .sql("select * from employees") .returns( "_MAP={deptId=31, lastName=Rafferty}\n" + "_MAP={deptId=33, lastName=Jones}\n" + "_MAP={deptId=33, lastName=Steinberg}\n" + "_MAP={deptId=34, lastName=Robinson}\n" + "_MAP={deptId=34, lastName=Smith}\n" + "_MAP={lastName=John}\n"); } /** Simple query against EMP table in HR database. */ @Test public void testSelectEmpView() throws Exception { withModel(MODEL, "HR") .sql("select * from emp") .returns( "DEPTID=31; LASTNAME=Rafferty\n" + "DEPTID=33; LASTNAME=Jones\n" + "DEPTID=33; LASTNAME=Steinberg\n" + "DEPTID=34; LASTNAME=Robinson\n" + "DEPTID=34; LASTNAME=Smith\n" + "DEPTID=null; LASTNAME=John\n"); } /** Simple query against EMP table in HR database. */ @Test public void testSelectDept() throws Exception { withModel(MODEL, "HR")<|fim▁hole|> } /** Query with project list. No field references yet. */ @Test public void testProjectConstant() throws Exception { withModel(MODEL, "DONUTS").sql("select 1 + 3 as c from donuts") .returns("C=4\n" + "C=4\n" + "C=4\n" + "C=4\n" + "C=4\n"); } /** Query that projects an element from the map. */ @Test public void testProject() throws Exception { withModel(MODEL, "DONUTS").sql("select _MAP['ppu'] as ppu from donuts") .returns("PPU=0.55\n" + "PPU=0.69\n" + "PPU=0.55\n" + "PPU=0.69\n" + "PPU=1.0\n"); } /** Same logic as {@link #testProject()}, but using a subquery. */ @Test public void testProjectOnSubquery() throws Exception { withModel(MODEL, "DONUTS").sql("select d['ppu'] as ppu from (\n" + " select _MAP as d from donuts)") .returns("PPU=0.55\n" + "PPU=0.69\n" + "PPU=0.55\n" + "PPU=0.69\n" + "PPU=1.0\n"); } /** Checks the logical plan. */ @Test public void testProjectPlan() throws Exception { LogicalPlan plan = withModel(MODEL, "DONUTS") .sql("select _MAP['ppu'] as ppu from donuts") .logicalPlan(); PlanProperties planProperties = plan.getProperties(); Assert.assertEquals("optiq", planProperties.generator.type); Assert.assertEquals("na", planProperties.generator.info); Assert.assertEquals(1, planProperties.version); Assert.assertEquals(PlanProperties.PlanType.APACHE_DRILL_LOGICAL, planProperties.type); Map<String, StoragePluginConfig> seConfigs = plan.getStorageEngines(); StoragePluginConfig config = seConfigs.get("donuts-json"); // Assert.assertTrue(config != null && config instanceof ClasspathRSE.ClasspathRSEConfig); config = seConfigs.get("queue"); // Assert.assertTrue(config != null && config instanceof QueueRSE.QueueRSEConfig); Scan scan = findOnlyOperator(plan, Scan.class); Assert.assertEquals("donuts-json", scan.getStorageEngine()); Project project = findOnlyOperator(plan, Project.class); Assert.assertEquals(1, project.getSelections().size()); Assert.assertEquals(Scan.class, project.getInput().getClass()); Store store = findOnlyOperator(plan, Store.class); Assert.assertEquals("queue", store.getStorageEngine()); Assert.assertEquals("output sink", store.getMemo()); Assert.assertEquals(Project.class, store.getInput().getClass()); } /** * Query with subquery, filter, and projection of one real and one nonexistent field from a map field. */ @Test public void testProjectFilterSubquery() throws Exception { withModel(MODEL, "DONUTS") .sql( "select d['name'] as name, d['xx'] as xx from (\n" + " select _MAP as d from donuts)\n" + "where cast(d['ppu'] as double) > 0.6") .returns("NAME=Raised; XX=null\n" + "NAME=Filled; XX=null\n" + "NAME=Apple Fritter; XX=null\n"); } private static <T extends LogicalOperator> Iterable<T> findOperator(LogicalPlan plan, final Class<T> operatorClazz) { return (Iterable<T>) Iterables.filter(plan.getSortedOperators(), new Predicate<LogicalOperator>() { @Override public boolean apply(LogicalOperator input) { return input.getClass().equals(operatorClazz); } }); } private static <T extends LogicalOperator> T findOnlyOperator(LogicalPlan plan, final Class<T> operatorClazz) { return Iterables.getOnlyElement(findOperator(plan, operatorClazz)); } @Test public void testProjectFilterSubqueryPlan() throws Exception { LogicalPlan plan = withModel(MODEL, "DONUTS") .sql( "select d['name'] as name, d['xx'] as xx from (\n" + " select _MAP['donuts'] as d from donuts)\n" + "where cast(d['ppu'] as double) > 0.6") .logicalPlan(); PlanProperties planProperties = plan.getProperties(); Assert.assertEquals("optiq", planProperties.generator.type); Assert.assertEquals("na", planProperties.generator.info); Assert.assertEquals(1, planProperties.version); Assert.assertEquals(PlanProperties.PlanType.APACHE_DRILL_LOGICAL, planProperties.type); Map<String, StoragePluginConfig> seConfigs = plan.getStorageEngines(); StoragePluginConfig config = seConfigs.get("donuts-json"); // Assert.assertTrue(config != null && config instanceof ClasspathRSE.ClasspathRSEConfig); config = seConfigs.get("queue"); // Assert.assertTrue(config != null && config instanceof QueueRSE.QueueRSEConfig); Scan scan = findOnlyOperator(plan, Scan.class); Assert.assertEquals("donuts-json", scan.getStorageEngine()); Filter filter = findOnlyOperator(plan, Filter.class); Assert.assertTrue(filter.getInput() instanceof Scan); Project[] projects = Iterables.toArray(findOperator(plan, Project.class), Project.class); Assert.assertEquals(2, projects.length); Assert.assertEquals(1, projects[0].getSelections().size()); Assert.assertEquals(Filter.class, projects[0].getInput().getClass()); Assert.assertEquals(2, projects[1].getSelections().size()); Assert.assertEquals(Project.class, projects[1].getInput().getClass()); Store store = findOnlyOperator(plan, Store.class); Assert.assertEquals("queue", store.getStorageEngine()); Assert.assertEquals("output sink", store.getMemo()); Assert.assertEquals(Project.class, store.getInput().getClass()); } /** Query that projects one field. (Disabled; uses sugared syntax.) */ @Test @Ignore public void testProjectNestedFieldSugared() throws Exception { withModel(MODEL, "DONUTS").sql("select donuts.ppu from donuts") .returns("C=4\n" + "C=4\n" + "C=4\n" + "C=4\n" + "C=4\n"); } /** Query with filter. No field references yet. */ @Test public void testFilterConstantFalse() throws Exception { withModel(MODEL, "DONUTS").sql("select * from donuts where 3 > 4").returns(""); } @Test public void testFilterConstant() throws Exception { withModel(MODEL, "DONUTS").sql("select * from donuts where 3 < 4").returns(EXPECTED); } @Ignore @Test public void testValues() throws Exception { withModel(MODEL, "DONUTS").sql("values (1)").returns("EXPR$0=1\n"); // Enable when https://issues.apache.org/jira/browse/DRILL-57 fixed // .planContains("store"); } @Test public void testJoin() throws Exception { Join join = withModel(MODEL, "HR") .sql("select * from emp join dept on emp.deptId = dept.deptId") .returnsUnordered("DEPTID=31; LASTNAME=Rafferty; DEPTID0=31; NAME=Sales", "DEPTID=33; LASTNAME=Jones; DEPTID0=33; NAME=Engineering", "DEPTID=33; LASTNAME=Steinberg; DEPTID0=33; NAME=Engineering", "DEPTID=34; LASTNAME=Robinson; DEPTID0=34; NAME=Clerical", "DEPTID=34; LASTNAME=Smith; DEPTID0=34; NAME=Clerical").planContains(Join.class); Assert.assertEquals(JoinRelType.INNER, join.getJoinType()); } @Test public void testLeftJoin() throws Exception { Join join = withModel(MODEL, "HR") .sql("select * from emp left join dept on emp.deptId = dept.deptId") .returnsUnordered("DEPTID=31; LASTNAME=Rafferty; DEPTID0=31; NAME=Sales", "DEPTID=33; LASTNAME=Jones; DEPTID0=33; NAME=Engineering", "DEPTID=33; LASTNAME=Steinberg; DEPTID0=33; NAME=Engineering", "DEPTID=34; LASTNAME=Robinson; DEPTID0=34; NAME=Clerical", "DEPTID=34; LASTNAME=Smith; DEPTID0=34; NAME=Clerical", "DEPTID=null; LASTNAME=John; DEPTID0=null; NAME=null").planContains(Join.class); Assert.assertEquals(JoinRelType.LEFT, join.getJoinType()); } /** * Right join is tricky because Drill's "join" operator only supports "left", so we have to flip inputs. */ @Test @Ignore public void testRightJoin() throws Exception { Join join = withModel(MODEL, "HR").sql("select * from emp right join dept on emp.deptId = dept.deptId") .returnsUnordered("xx").planContains(Join.class); Assert.assertEquals(JoinRelType.LEFT, join.getJoinType()); } @Test public void testFullJoin() throws Exception { Join join = withModel(MODEL, "HR") .sql("select * from emp full join dept on emp.deptId = dept.deptId") .returnsUnordered("DEPTID=31; LASTNAME=Rafferty; DEPTID0=31; NAME=Sales", "DEPTID=33; LASTNAME=Jones; DEPTID0=33; NAME=Engineering", "DEPTID=33; LASTNAME=Steinberg; DEPTID0=33; NAME=Engineering", "DEPTID=34; LASTNAME=Robinson; DEPTID0=34; NAME=Clerical", "DEPTID=34; LASTNAME=Smith; DEPTID0=34; NAME=Clerical", "DEPTID=null; LASTNAME=John; DEPTID0=null; NAME=null", "DEPTID=null; LASTNAME=null; DEPTID0=35; NAME=Marketing").planContains(Join.class); Assert.assertEquals(JoinRelType.FULL, join.getJoinType()); } /** * Join on subquery; also tests that if a field of the same name exists in both inputs, both fields make it through * the join. */ @Test public void testJoinOnSubquery() throws Exception { Join join = withModel(MODEL, "HR") .sql( "select * from (\n" + "select deptId, lastname, 'x' as name from emp) as e\n" + " join dept on e.deptId = dept.deptId") .returnsUnordered("DEPTID=31; LASTNAME=Rafferty; NAME=x; DEPTID0=31; NAME0=Sales", "DEPTID=33; LASTNAME=Jones; NAME=x; DEPTID0=33; NAME0=Engineering", "DEPTID=33; LASTNAME=Steinberg; NAME=x; DEPTID0=33; NAME0=Engineering", "DEPTID=34; LASTNAME=Robinson; NAME=x; DEPTID0=34; NAME0=Clerical", "DEPTID=34; LASTNAME=Smith; NAME=x; DEPTID0=34; NAME0=Clerical").planContains(Join.class); Assert.assertEquals(JoinRelType.INNER, join.getJoinType()); } /** Tests that one of the FoodMart tables is present. */ @Test @Ignore public void testFoodMart() throws Exception { withModel(MODEL, "FOODMART") .sql("select * from product_class where cast(_map['product_class_id'] as integer) < 3") .returnsUnordered( "_MAP={product_category=Seafood, product_class_id=2, product_department=Seafood, product_family=Food, product_subcategory=Shellfish}", "_MAP={product_category=Specialty, product_class_id=1, product_department=Produce, product_family=Food, product_subcategory=Nuts}"); } @Test public void testUnionAll() throws Exception { Union union = withModel(MODEL, "HR") .sql("select deptId from dept\n" + "union all\n" + "select deptId from emp") .returnsUnordered("DEPTID=31", "DEPTID=33", "DEPTID=34", "DEPTID=35", "DEPTID=null") .planContains(Union.class); Assert.assertFalse(union.isDistinct()); } @Test public void testUnion() throws Exception { Union union = withModel(MODEL, "HR") .sql("select deptId from dept\n" + "union\n" + "select deptId from emp") .returnsUnordered("DEPTID=31", "DEPTID=33", "DEPTID=34", "DEPTID=35", "DEPTID=null") .planContains(Union.class); Assert.assertTrue(union.isDistinct()); } @Test public void testOrderByDescNullsFirst() throws Exception { // desc nulls last withModel(MODEL, "HR") .sql("select * from emp order by deptId desc nulls first") .returns( "DEPTID=null; LASTNAME=John\n" + "DEPTID=34; LASTNAME=Robinson\n" + "DEPTID=34; LASTNAME=Smith\n" + "DEPTID=33; LASTNAME=Jones\n" + "DEPTID=33; LASTNAME=Steinberg\n" + "DEPTID=31; LASTNAME=Rafferty\n") .planContains(Order.class); } @Test public void testOrderByDescNullsLast() throws Exception { // desc nulls first withModel(MODEL, "HR") .sql("select * from emp order by deptId desc nulls last") .returns( "DEPTID=34; LASTNAME=Robinson\n" + "DEPTID=34; LASTNAME=Smith\n" + "DEPTID=33; LASTNAME=Jones\n" + "DEPTID=33; LASTNAME=Steinberg\n" + "DEPTID=31; LASTNAME=Rafferty\n" + "DEPTID=null; LASTNAME=John\n") .planContains(Order.class); } @Test @Ignore public void testOrderByDesc() throws Exception { // desc is implicitly "nulls first" (i.e. null sorted as +inf) // Current behavior is to sort nulls last. This is wrong. withModel(MODEL, "HR") .sql("select * from emp order by deptId desc") .returns( "DEPTID=null; LASTNAME=John\n" + "DEPTID=34; LASTNAME=Robinson\n" + "DEPTID=34; LASTNAME=Smith\n" + "DEPTID=33; LASTNAME=Jones\n" + "DEPTID=33; LASTNAME=Steinberg\n" + "DEPTID=31; LASTNAME=Rafferty\n") .planContains(Order.class); } @Test public void testOrderBy() throws Exception { // no sort order specified is implicitly "asc", and asc is "nulls last" withModel(MODEL, "HR") .sql("select * from emp order by deptId") .returns( "DEPTID=31; LASTNAME=Rafferty\n" + "DEPTID=33; LASTNAME=Jones\n" + "DEPTID=33; LASTNAME=Steinberg\n" + "DEPTID=34; LASTNAME=Robinson\n" + "DEPTID=34; LASTNAME=Smith\n" + "DEPTID=null; LASTNAME=John\n") .planContains(Order.class); } @Test public void testLimit() throws Exception { withModel(MODEL, "HR") .sql("select LASTNAME from emp limit 2") .returns("LASTNAME=Rafferty\n" + "LASTNAME=Jones") .planContains(Limit.class); } @Test public void testLimitOrderBy() throws Exception { TestDataConnection tdc = withModel(MODEL, "HR") .sql("select LASTNAME from emp order by LASTNAME limit 2") .returns("LASTNAME=John\n" + "LASTNAME=Jones"); tdc.planContains(Limit.class); tdc.planContains(Order.class); } @Test public void testOrderByWithOffset() throws Exception { withModel(MODEL, "HR") .sql("select LASTNAME from emp order by LASTNAME asc offset 3") .returns("LASTNAME=Robinson\n" + "LASTNAME=Smith\n" + "LASTNAME=Steinberg") .planContains(Limit.class); } @Test public void testOrderByWithOffsetAndFetch() throws Exception { withModel(MODEL, "HR") .sql("select LASTNAME from emp order by LASTNAME asc offset 3 fetch next 2 rows only") .returns("LASTNAME=Robinson\n" + "LASTNAME=Smith") .planContains(Limit.class); } }<|fim▁end|>
.sql("select * from departments") .returns( "_MAP={deptId=31, name=Sales}\n" + "_MAP={deptId=33, name=Engineering}\n" + "_MAP={deptId=34, name=Clerical}\n" + "_MAP={deptId=35, name=Marketing}\n");
<|file_name|>cookie_jar.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*- import time from datetime import timedelta class CookieJar: def __init__(self, pluginname, account=None): self.cookies = {} self.plugin = pluginname self.account = account def add_cookies(self, clist): for c in clist: name = c.split("\t")[5] self.cookies[name] = c def get_cookies(self): return list(self.cookies.values()) def parse_cookie(self, name): if name in self.cookies: return self.cookies[name].split("\t")[6] else: return None def get_cookie(self, name): return self.parse_cookie(name)<|fim▁hole|> def set_cookie( self, domain, name, value, path="/", exp=time.time() + timedelta(hours=744).total_seconds(), #: 31 days retention ): self.cookies[ name ] = f".{domain}\tTRUE\t{path}\tFALSE\t{exp}\t{name}\t{value}" def clear(self): self.cookies = {}<|fim▁end|>
<|file_name|>core-help.js<|end_file_name|><|fim▁begin|>// Help functions /* * Return a string with all helper functions whose name contains the 'substring'; * if the 'searchDescription' is true, then also search the function description"); */ function getHelp(substring, searchDescription) { return framework.getJavaScriptHelp(".*(?i:" + substring + ").*", searchDescription); } framework.addJavaScriptHelp("help", "substring, fileName", "output all the helper functions whose name contains the given 'substring'"); function help(substring, fileName) { if (arguments.length > 1) { write(getHelp(substring, false), fileName); } else if (arguments.length > 0) { write(getHelp(substring, false)); } else { write(getHelp("", false)); } } framework.addJavaScriptHelp("apropos", "substring, fileName", "output all the helper functions whose name or description contains the given 'substring'"); function apropos(substring, fileName) { if (arguments.length > 1) { write(getHelp(substring, true), fileName); } else if (arguments.length > 0) { write(getHelp(substring, true)); } else { write(getHelp("", true)); } } framework.addJavaScriptHelp("helpRegex", "regex, fileName", "output all helper functions whose name matches 'regex'"); function helpRegex(regex, fileName) { if (arguments.length > 1) {<|fim▁hole|> write(framework.getJavaScriptHelp(regex, false)); } }<|fim▁end|>
write(framework.getJavaScriptHelp(regex, false), fileName); } else if (arguments.length > 0) {
<|file_name|>rad_util.py<|end_file_name|><|fim▁begin|># Copyright (c) 2007 RADLogic # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. """Provide various handy Python functions. Running this script directly will execute the doctests. Functions: int2bin(i, n) -- Convert integer to binary string. bin2int(bin_string) -- Convert binary string to integer. reverse(input_string) -- Reverse a string. transpose(matrix) -- Transpose a list of lists. polygon_area(points_list) -- Calculate the area of an arbitrary polygon. timestamp() -- Return string containing current time stamp. pt2str(point) -- Return prettier string version of point tuple. gcf(a, b) -- Return the greatest common factor of two numbers. lcm(a, b) -- Return the least common multiple of two numbers. permutations(input_list) -- Generate all permutations of a list of items. reduce_fraction(fraction) -- Reduce fraction (num, denom) to simplest form. quantile(l, p) -- Return p quantile of list l. E.g. p=0.25 for q1. trim(l) -- Discard values in list more than 1.5*IQR outside IQR. nice_units(value) -- Return value converted to human readable units. uniquify(seq) -- Return sequence with duplicate items in sequence seq removed. reverse_dict(d) -- Return the dictionary with the items as keys and vice-versa. lsb(x, n) -- Return the n least significant bits of x. gray_encode(i) -- Gray encode the given integer. random_vec(bits, max_value=None) -- Return a random binary vector. binary_range(bits) -- Return list of all possible binary numbers width=bits. float_range([start], stop, [step]) -- Return range of floats. find_common_fixes(s1, s2) -- Find common (prefix, suffix) of two strings. is_rotated(seq1, seq2) -- Return true if the list is a rotation of other list. getmodule(obj) -- Return the module that contains the object definition of obj. (use inspect.getmodule instead, though) get_args(argv) -- Store command-line args in a dictionary. This module requires Python >= 2.2 """ __author__ = 'Tim Wegener <[email protected]>' __date__ = '$Date: 2007/03/27 03:15:06 $' __version__ = '$Revision: 0.45 $' __credits__ = """ David Chandler, for polygon area algorithm. (http://www.davidchandler.com/AreaOfAGeneralPolygon.pdf) """ import re import sys import time import random try: True, False except NameError: True, False = (1==1, 0==1) def int2bin(i, n): """Convert decimal integer i to n-bit binary number (string). >>> int2bin(0, 8) '00000000' >>> int2bin(123, 8) '01111011' >>> int2bin(123L, 8) '01111011' >>> int2bin(15, 2) Traceback (most recent call last): ValueError: Value too large for given number of bits. """ hex2bin = {'0': '0000', '1': '0001', '2': '0010', '3': '0011', '4': '0100', '5': '0101', '6': '0110', '7': '0111', '8': '1000', '9': '1001', 'a': '1010', 'b': '1011', 'c': '1100', 'd': '1101', 'e': '1110', 'f': '1111'} # Convert to hex then map each hex digit to binary equivalent. result = ''.join([hex2bin[x] for x in hex(i).lower().replace('l','')[2:]]) # Shrink result to appropriate length. # Raise an error if the value is changed by the truncation. if '1' in result[:-n]: raise ValueError("Value too large for given number of bits.") result = result[-n:] # Zero-pad if length longer than mapped result. result = '0'*(n-len(result)) + result return result def bin2int(bin_string): """Convert binary number string to decimal integer. Note: Python > v2 has int(bin_string, 2) >>> bin2int('1111') 15 >>> bin2int('0101') 5 """ ## result = 0 ## bin_list = list(bin_string) ## if len(filter(lambda x: x in ('1','0'), bin_list)) < len(bin_list): ## raise Exception ("bin2int: Error - not a binary number: %s" ## % bin_string) ## bit_list = map(int, bin_list) ## bit_list.reverse() # Make most significant bit have highest index. ## for bit_place in range(len(bit_list)): ## result = result + ((2**bit_place) * bit_list[bit_place]) ## return result return int(bin_string, 2) def reverse(input_string): """Reverse a string. Useful for strings of binary numbers. >>> reverse('abc') 'cba' """ str_list = list(input_string) str_list.reverse() return ''.join(str_list) def transpose(matrix): """Transpose a list of lists. >>> transpose([['a', 'b', 'c'], ['d', 'e', 'f'], ['g', 'h', 'i']]) [['a', 'd', 'g'], ['b', 'e', 'h'], ['c', 'f', 'i']] >>> transpose([['a', 'b', 'c'], ['d', 'e', 'f']]) [['a', 'd'], ['b', 'e'], ['c', 'f']] >>> transpose([['a', 'b'], ['d', 'e'], ['g', 'h']]) [['a', 'd', 'g'], ['b', 'e', 'h']] """ result = zip(*matrix) # Convert list of tuples to list of lists. # map is faster than a list comprehension since it is being used with # a built-in function as an argument. result = map(list, result) return result def polygon_area(points_list, precision=100): """Calculate area of an arbitrary polygon using an algorithm from the web. Return the area of the polygon as a positive float. Arguments: points_list -- list of point tuples [(x0, y0), (x1, y1), (x2, y2), ...] (Unclosed polygons will be closed automatically. precision -- Internal arithmetic precision (integer arithmetic). >>> polygon_area([(0, 0), (0, 1), (1, 1), (1, 2), (2, 2), (2, 0), (0, 0)]) 3.0 Credits: Area of a General Polygon by David Chandler http://www.davidchandler.com/AreaOfAGeneralPolygon.pdf """ # Scale up co-ordinates and convert them to integers. for i in range(len(points_list)): points_list[i] = (int(points_list[i][0] * precision), int(points_list[i][1] * precision)) # Close polygon if not closed. if points_list[-1] != points_list[0]: points_list.append(points_list[0]) # Calculate area. area = 0 for i in range(len(points_list)-1): (x_i, y_i) = points_list[i] (x_i_plus_1, y_i_plus_1) = points_list[i+1] area = area + (x_i_plus_1 * y_i) - (y_i_plus_1 * x_i) area = abs(area / 2) # Unscale area. area = float(area)/(precision**2) return area def timestamp(): """Return string containing current time stamp. Note: In Python 2 onwards can use time.asctime() with no arguments. """ return time.asctime() def pt2str(point): """Return prettier string version of point tuple. >>> pt2str((1.8, 1.9)) '(1.8, 1.9)' """ return "(%s, %s)" % (str(point[0]), str(point[1])) def gcf(a, b, epsilon=1e-16): """Return the greatest common factor of a and b, using Euclidean algorithm. Arguments: a, b -- two numbers If both numbers are integers return an integer result, otherwise return a float result. epsilon -- floats less than this magnitude are considered to be zero (default: 1e-16) Examples: >>> gcf(12, 34) 2 >>> gcf(13.5, 4) 0.5 >>> gcf(-2, 4) 2 >>> gcf(5, 0) 5 By (a convenient) definition: >>> gcf(0, 0) 0 """ result = max(a, b) remainder = min(a, b) while remainder and abs(remainder) > epsilon: new_remainder = result % remainder result = remainder remainder = new_remainder return abs(result) def lcm(a, b, precision=None): """Return the least common multiple of a and b, using the gcf function. Arguments: a, b -- two numbers. If both are integers return an integer result, otherwise a return a float result. precision -- scaling factor if a and/or b are floats. >>> lcm(21, 6) 42 >>> lcm(2.5, 3.5) 17.5 >>> str(lcm(1.5e-8, 2.5e-8, precision=1e9)) '7.5e-08' By (an arbitary) definition: >>> lcm(0, 0) 0 """ # Note: Dummy precision argument is for backwards compatibility. # Do the division first. # (See http://en.wikipedia.org/wiki/Least_common_multiple ) denom = gcf(a, b) if denom == 0: result = 0 else: result = a * (b / denom) return result def permutations(input_list): """Return a list containing all permutations of the input list. Note: This is a recursive function. >>> perms = permutations(['a', 'b', 'c']) >>> perms.sort() >>> for perm in perms: ... print perm ['a', 'b', 'c'] ['a', 'c', 'b'] ['b', 'a', 'c'] ['b', 'c', 'a'] ['c', 'a', 'b'] ['c', 'b', 'a'] """ out_lists = [] if len(input_list) > 1: # Extract first item in list. item = input_list[0] # Find all permutations of remainder of list. (Recursive call.) sub_lists = permutations(input_list[1:]) # For every permutation of the sub list... for sub_list in sub_lists: # Insert the extracted first item at every position of the list. for i in range(len(input_list)): new_list = sub_list[:] new_list.insert(i, item) out_lists.append(new_list) else: # Termination condition: only one item in input list. out_lists = [input_list] return out_lists def reduce_fraction(fraction): """Reduce fraction tuple to simplest form. fraction=(num, denom) >>> reduce_fraction((14, 7)) (2, 1) >>> reduce_fraction((-2, 4)) (-1, 2) >>> reduce_fraction((0, 4)) (0, 1) >>> reduce_fraction((4, 0)) (1, 0) """ (numerator, denominator) = fraction common_factor = abs(gcf(numerator, denominator)) result = (numerator/common_factor, denominator/common_factor) return result def quantile(l, p): """Return p quantile of list l. E.g. p=0.25 for q1. See: http://rweb.stat.umn.edu/R/library/base/html/quantile.html """ l_sort = l[:] l_sort.sort() n = len(l) r = 1 + ((n - 1) * p) i = int(r) f = r - i if i < n: result = (1-f)*l_sort[i-1] + f*l_sort[i] else: result = l_sort[i-1] return result def trim(l): """Discard values in list more than 1.5*IQR outside IQR. (IQR is inter-quartile-range) This function uses rad_util.quantile 1.5*IQR -- mild outlier 3*IQR -- extreme outlier See: http://wind.cc.whecn.edu/~pwildman/statnew/section_7_-_exploratory_data_analysis.htm """ l_sort = l[:] l_sort.sort() # Calculate medianscore (based on stats.py lmedianscore by Gary Strangman) if len(l_sort) % 2 == 0: # If even number of scores, average middle 2. index = int(len(l_sort) / 2) # Integer division correct median = float(l_sort[index] + l_sort[index-1]) / 2 else: # int divsion gives mid value when count from 0 index = int(len(l_sort) / 2) median = l_sort[index] # Calculate IQR. q1 = quantile(l_sort, 0.25) q3 = quantile(l_sort, 0.75) iqr = q3 - q1 iqr_extra = iqr * 1.5 def in_interval(x, i=iqr_extra, q1=q1, q3=q3): return (x >= q1-i and x <= q3+i) l_trimmed = [x for x in l_sort if in_interval(x)] return l_trimmed def nice_units(value, dp=0, sigfigs=None, suffix='', space=' ', use_extra_prefixes=False, use_full_name=False, mode='si'): """Return value converted to human readable units eg milli, micro, etc. Arguments: value -- number in base units dp -- number of decimal places to display (rounded) sigfigs -- number of significant figures to display (rounded) This overrides dp if set. suffix -- optional unit suffix to append to unit multiplier space -- seperator between value and unit multiplier (default: ' ') use_extra_prefixes -- use hecto, deka, deci and centi as well if set. (default: False) use_full_name -- use full name for multiplier symbol, e.g. milli instead of m (default: False) mode -- 'si' for SI prefixes, 'bin' for binary multipliers (1024, etc.) (Default: 'si') SI prefixes from: http://physics.nist.gov/cuu/Units/prefixes.html (Greek mu changed to u.) Binary prefixes based on: http://physics.nist.gov/cuu/Units/binary.html >>> nice_units(2e-11) '20 p' >>> nice_units(2e-11, space='') '20p' """ si_prefixes = {1e24: ('Y', 'yotta'), 1e21: ('Z', 'zetta'), 1e18: ('E', 'exa'), 1e15: ('P', 'peta'), 1e12: ('T', 'tera'), 1e9: ('G', 'giga'), 1e6: ('M', 'mega'), 1e3: ('k', 'kilo'), 1e-3: ('m', 'milli'), 1e-6: ('u', 'micro'), 1e-9: ('n', 'nano'), 1e-12: ('p', 'pico'), 1e-15: ('f', 'femto'), 1e-18: ('a', 'atto'), 1e-21: ('z', 'zepto'), 1e-24: ('y', 'yocto') } if use_extra_prefixes: si_prefixes.update({1e2: ('h', 'hecto'), 1e1: ('da', 'deka'), 1e-1: ('d', 'deci'), 1e-2: ('c', 'centi') }) bin_prefixes = {2**10: ('K', 'kilo'), 2**20: ('M', 'mega'), 2**30: ('G', 'mega'), 2**40: ('T', 'tera'), 2**50: ('P', 'peta'), 2**60: ('E', 'exa') } if mode == 'bin': prefixes = bin_prefixes else: prefixes = si_prefixes prefixes[1] = ('', '') # Unity. # Determine appropriate multiplier. multipliers = prefixes.keys() multipliers.sort() mult = None for i in range(len(multipliers) - 1): lower_mult = multipliers[i] upper_mult = multipliers[i+1] if lower_mult <= value < upper_mult: mult_i = i break if mult is None: if value < multipliers[0]: mult_i = 0 elif value >= multipliers[-1]: mult_i = len(multipliers) - 1 mult = multipliers[mult_i] # Convert value for this multiplier. new_value = value / mult # Deal with special case due to rounding. if sigfigs is None: if mult_i < (len(multipliers) - 1) and\ round(new_value, dp) ==\ round((multipliers[mult_i+1] / mult), dp): mult = multipliers[mult_i + 1] new_value = value / mult # Concatenate multiplier symbol. if use_full_name: label_type = 1 else: label_type = 0 # Round and truncate to appropriate precision. if sigfigs is None: str_value = eval('"%.'+str(dp)+'f" % new_value', locals(), {}) else: str_value = eval('"%.'+str(sigfigs)+'g" % new_value', locals(), {}) return str_value + space + prefixes[mult][label_type] + suffix def uniquify(seq, preserve_order=False): """Return sequence with duplicate items in sequence seq removed. The code is based on usenet post by Tim Peters. This code is O(N) if the sequence items are hashable, O(N**2) if not. Peter Bengtsson has a blog post with an empirical comparison of other approaches: http://www.peterbe.com/plog/uniqifiers-benchmark If order is not important and the sequence items are hashable then list(set(seq)) is readable and efficient. If order is important and the sequence items are hashable generator expressions can be used (in py >= 2.4) (useful for large sequences): seen = set() do_something(x for x in seq if x not in seen or seen.add(x)) Arguments: seq -- sequence preserve_order -- if not set the order will be arbitrary Using this option will incur a speed penalty. (default: False) Example showing order preservation: >>> uniquify(['a', 'aa', 'b', 'b', 'ccc', 'ccc', 'd'], preserve_order=True) ['a', 'aa', 'b', 'ccc', 'd'] Example using a sequence of un-hashable items: >>> uniquify([['z'], ['x'], ['y'], ['z']], preserve_order=True) [['z'], ['x'], ['y']] The sorted output or the non-order-preserving approach should equal that of the sorted order-preserving approach output: >>> unordered = uniquify([3, 3, 1, 2], preserve_order=False) >>> unordered.sort() >>> ordered = uniquify([3, 3, 1, 2], preserve_order=True) >>> ordered.sort() >>> ordered [1, 2, 3] >>> int(ordered == unordered) 1 """ try: # Attempt fast algorithm. d = {} if preserve_order: # This is based on Dave Kirby's method (f8) noted in the post: # http://www.peterbe.com/plog/uniqifiers-benchmark return [x for x in seq if (x not in d) and not d.__setitem__(x, 0)] else: for x in seq: d[x] = 0 return d.keys() except TypeError: # Have an unhashable object, so use slow algorithm. result = [] app = result.append for x in seq: if x not in result: app(x) return result # Alias to noun form for backward compatibility. unique = uniquify def reverse_dict(d): """Reverse a dictionary so the items become the keys and vice-versa. Note: The results will be arbitrary if the items are not unique. >>> d = reverse_dict({'a': 1, 'b': 2}) >>> d_items = d.items() >>> d_items.sort() >>> d_items [(1, 'a'), (2, 'b')] """ result = {} for key, value in d.items(): result[value] = key return result def lsb(x, n): """Return the n least significant bits of x. >>> lsb(13, 3) 5 """ return x & ((2 ** n) - 1) def gray_encode(i): """Gray encode the given integer.""" return i ^ (i >> 1) def random_vec(bits, max_value=None): """Generate a random binary vector of length bits and given max value.""" vector = "" for _ in range(int(bits / 10) + 1): i = int((2**10) * random.random()) vector += int2bin(i, 10) if max_value and (max_value < 2 ** bits - 1): vector = int2bin((int(vector, 2) / (2 ** bits - 1)) * max_value, bits) return vector[0:bits] def binary_range(bits): """Return a list of all possible binary numbers in order with width=bits. It would be nice to extend it to match the functionality of python's range() built-in function. """ l = [] v = ['0'] * bits toggle = [1] + [0] * bits while toggle[bits] != 1: v_copy = v[:] v_copy.reverse() l.append(''.join(v_copy)) toggle = [1] + [0]*bits i = 0 while i < bits and toggle[i] == 1: if toggle[i]: if v[i] == '0': v[i] = '1' toggle[i+1] = 0 else: v[i] = '0' toggle[i+1] = 1 i += 1 return l def float_range(start, stop=None, step=None): """Return a list containing an arithmetic progression of floats. Return a list of floats between 0.0 (or start) and stop with an increment of step. This is in functionality to python's range() built-in function but can accept float increments. As with range(), stop is omitted from the list. """ if stop is None: stop = float(start) start = 0.0 if step is None: step = 1.0 cur = float(start) l = [] while cur < stop: l.append(cur) cur += step return l def find_common_fixes(s1, s2): """Find common (prefix, suffix) of two strings. >>> find_common_fixes('abc', 'def') ('', '') >>> find_common_fixes('abcelephantdef', 'abccowdef') ('abc', 'def') >>> find_common_fixes('abcelephantdef', 'abccow') ('abc', '') >>> find_common_fixes('elephantdef', 'abccowdef') ('', 'def') """ prefix = [] suffix = [] i = 0 common_len = min(len(s1), len(s2)) while i < common_len: if s1[i] != s2[i]: break prefix.append(s1[i]) i += 1 i = 1 while i < (common_len + 1): if s1[-i] != s2[-i]: break suffix.append(s1[-i]) i += 1 suffix.reverse() prefix = ''.join(prefix) suffix = ''.join(suffix) return (prefix, suffix) def is_rotated(seq1, seq2): """Return true if the first sequence is a rotation of the second sequence. >>> seq1 = ['A', 'B', 'C', 'D'] >>> seq2 = ['C', 'D', 'A', 'B'] >>> int(is_rotated(seq1, seq2)) 1 >>> seq2 = ['C', 'D', 'B', 'A'] >>> int(is_rotated(seq1, seq2)) 0 >>> seq1 = ['A', 'B', 'C', 'A'] >>> seq2 = ['A', 'A', 'B', 'C'] >>> int(is_rotated(seq1, seq2)) 1 >>> seq2 = ['A', 'B', 'C', 'A'] >>> int(is_rotated(seq1, seq2)) 1 >>> seq2 = ['A', 'A', 'C', 'B'] >>> int(is_rotated(seq1, seq2)) 0 """ # Do a sanity check. if len(seq1) != len(seq2): return False # Look for occurrences of second sequence head item in first sequence. start_indexes = [] head_item = seq2[0] for index1 in range(len(seq1)): if seq1[index1] == head_item: start_indexes.append(index1) # Check that wrapped sequence matches. double_seq1 = seq1 + seq1 for index1 in start_indexes: if double_seq1[index1:index1+len(seq1)] == seq2: return True return False def getmodule(obj): """Return the module that contains the object definition of obj. Note: Use inspect.getmodule instead. Arguments: obj -- python obj, generally a class or a function Examples: A function: >>> module = getmodule(random.choice) >>> module.__name__ 'random' >>> module is random 1 A class: >>> module = getmodule(random.Random) >>> module.__name__ 'random' >>> module is random 1 A class inheriting from a class in another module: (note: The inheriting class must define at least one function.) >>> class MyRandom(random.Random): ... def play(self): ... pass >>> module = getmodule(MyRandom) >>> if __name__ == '__main__': ... name = 'rad_util' ... else: ... name = module.__name__ >>> name 'rad_util' >>> module is sys.modules[__name__] 1 Discussion: This approach is slightly hackish, and won't work in various situations. However, this was the approach recommended by GvR, so it's as good as you'll get. See GvR's post in this thread: http://groups.google.com.au/group/comp.lang.python/browse_thread/thread/966a7bdee07e3b34/c3cab3f41ea84236?lnk=st&q=python+determine+class+module&rnum=4&hl=en#c3cab3f41ea84236 """ if hasattr(obj, 'func_globals'): func = obj else: # Handle classes. func = None for item in obj.__dict__.values(): if hasattr(item, 'func_globals'): func = item break if func is None: raise ValueError("No functions attached to object: %r" % obj) module_name = func.func_globals['__name__'] # Get module. module = sys.modules[module_name] return module def round_grid(value, grid, mode=0): """Round off the given value to the given grid size. Arguments: value -- value to be roudne grid -- result must be a multiple of this mode -- 0 nearest, 1 up, -1 down Examples: >>> round_grid(7.5, 5) 10 >>> round_grid(7.5, 5, mode=-1) 5 <|fim▁hole|> 10 >>> round_grid(7.3, 5.0, mode=1) 10.0 """ off_grid = value % grid if mode == 0: add_one = int(off_grid >= (grid / 2.0)) elif mode == 1 and off_grid: add_one = 1 elif mode == -1 and off_grid: add_one = 0 result = ((int(value / grid) + add_one) * grid) return result def get_args(argv): """Store command-line args in a dictionary. -, -- prefixes are removed Items not prefixed with - or -- are stored as a list, indexed by 'args' For options that take a value use --option=value Consider using optparse or getopt (in Python standard library) instead. """ d = {} args = [] for arg in argv: if arg.startswith('-'): parts = re.sub(r'^-+', '', arg).split('=') if len(parts) == 2: d[parts[0]] = parts[1] else: d[parts[0]] = None else: args.append(arg) d['args'] = args return d if __name__ == '__main__': import doctest doctest.testmod(sys.modules['__main__'])<|fim▁end|>
>>> round_grid(7.3, 5, mode=1)
<|file_name|>EditFormat.java<|end_file_name|><|fim▁begin|>/** * The contents of this file are subject to the license and copyright * detailed in the LICENSE file at the root of the source * tree and available online at * * https://github.com/keeps/roda */ package org.roda.wui.client.planning; import java.util.Arrays; import java.util.List; import org.roda.core.data.common.RodaConstants; import org.roda.core.data.exceptions.NotFoundException; import org.roda.core.data.v2.formats.Format; import org.roda.core.data.v2.index.select.SelectedItemsList; import org.roda.wui.client.browse.BrowserService; import org.roda.wui.client.common.UserLogin; import org.roda.wui.client.common.utils.AsyncCallbackUtils; import org.roda.wui.client.common.utils.JavascriptUtils; import org.roda.wui.client.management.MemberManagement; import org.roda.wui.common.client.HistoryResolver; import org.roda.wui.common.client.tools.HistoryUtils; import org.roda.wui.common.client.tools.ListUtils; import org.roda.wui.common.client.widgets.Toast; import com.google.gwt.core.client.GWT; import com.google.gwt.event.dom.client.ClickEvent; import com.google.gwt.uibinder.client.UiBinder; import com.google.gwt.uibinder.client.UiField; import com.google.gwt.uibinder.client.UiHandler; import com.google.gwt.user.client.Timer; import com.google.gwt.user.client.rpc.AsyncCallback; import com.google.gwt.user.client.ui.Button; import com.google.gwt.user.client.ui.Composite; import com.google.gwt.user.client.ui.Widget; import config.i18n.client.ClientMessages; public class EditFormat extends Composite { public static final HistoryResolver RESOLVER = new HistoryResolver() { @Override public void resolve(List<String> historyTokens, final AsyncCallback<Widget> callback) { if (historyTokens.size() == 1) { String formatId = historyTokens.get(0); BrowserService.Util.getInstance().retrieve(Format.class.getName(), formatId, fieldsToReturn, new AsyncCallback<Format>() { @Override public void onFailure(Throwable caught) { callback.onFailure(caught); } @Override public void onSuccess(Format format) { EditFormat editFormat = new EditFormat(format); callback.onSuccess(editFormat); } }); } else { HistoryUtils.newHistory(FormatRegister.RESOLVER); callback.onSuccess(null); } } @Override public void isCurrentUserPermitted(AsyncCallback<Boolean> callback) { UserLogin.getInstance().checkRoles(new HistoryResolver[] {MemberManagement.RESOLVER}, false, callback); } @Override public List<String> getHistoryPath() { return ListUtils.concat(FormatRegister.RESOLVER.getHistoryPath(), getHistoryToken()); } @Override public String getHistoryToken() { return "edit_format"; } }; interface MyUiBinder extends UiBinder<Widget, EditFormat> { } private static MyUiBinder uiBinder = GWT.create(MyUiBinder.class); private static ClientMessages messages = GWT.create(ClientMessages.class); private Format format; private static final List<String> fieldsToReturn = Arrays.asList(RodaConstants.INDEX_UUID, RodaConstants.FORMAT_ID, RodaConstants.FORMAT_NAME, RodaConstants.FORMAT_DEFINITION, RodaConstants.FORMAT_CATEGORY, RodaConstants.FORMAT_LATEST_VERSION, RodaConstants.FORMAT_DEVELOPER, RodaConstants.FORMAT_POPULARITY, RodaConstants.FORMAT_INITIAL_RELEASE, RodaConstants.FORMAT_IS_OPEN_FORMAT, RodaConstants.FORMAT_STANDARD, RodaConstants.FORMAT_WEBSITE, RodaConstants.FORMAT_PROVENANCE_INFORMATION, RodaConstants.FORMAT_EXTENSIONS, RodaConstants.FORMAT_MIMETYPES, RodaConstants.FORMAT_PRONOMS, RodaConstants.FORMAT_UTIS, RodaConstants.FORMAT_ALTERNATIVE_DESIGNATIONS, RodaConstants.FORMAT_VERSIONS); @UiField Button buttonApply; @UiField Button buttonRemove; @UiField Button buttonCancel; @UiField(provided = true) FormatDataPanel formatDataPanel; /** * Create a new panel to create a user * * @param user * the user to create */ public EditFormat(Format format) { this.format = format; this.formatDataPanel = new FormatDataPanel(true, true, format);<|fim▁hole|> @Override protected void onLoad() { super.onLoad(); JavascriptUtils.stickSidebar(); } @UiHandler("buttonApply") void buttonApplyHandler(ClickEvent e) { if (formatDataPanel.isChanged() && formatDataPanel.isValid()) { String formatId = format.getId(); format = formatDataPanel.getFormat(); format.setId(formatId); BrowserService.Util.getInstance().updateFormat(format, new AsyncCallback<Void>() { @Override public void onFailure(Throwable caught) { errorMessage(caught); } @Override public void onSuccess(Void result) { HistoryUtils.newHistory(ShowFormat.RESOLVER, format.getId()); } }); } else { HistoryUtils.newHistory(ShowFormat.RESOLVER, format.getId()); } } @UiHandler("buttonRemove") void buttonRemoveHandler(ClickEvent e) { BrowserService.Util.getInstance().deleteFormat( new SelectedItemsList<Format>(Arrays.asList(format.getUUID()), Format.class.getName()), new AsyncCallback<Void>() { @Override public void onFailure(Throwable caught) { errorMessage(caught); } @Override public void onSuccess(Void result) { Timer timer = new Timer() { @Override public void run() { HistoryUtils.newHistory(FormatRegister.RESOLVER); } }; timer.schedule(RodaConstants.ACTION_TIMEOUT); } }); } @UiHandler("buttonCancel") void buttonCancelHandler(ClickEvent e) { cancel(); } private void cancel() { HistoryUtils.newHistory(ShowFormat.RESOLVER, format.getId()); } private void errorMessage(Throwable caught) { if (caught instanceof NotFoundException) { Toast.showError(messages.editFormatNotFound(format.getName())); cancel(); } else { AsyncCallbackUtils.defaultFailureTreatment(caught); } } protected void enableApplyButton(boolean enabled) { buttonApply.setVisible(enabled); } }<|fim▁end|>
initWidget(uiBinder.createAndBindUi(this)); }
<|file_name|>check_errors_xlsx.py<|end_file_name|><|fim▁begin|>from openpyxl import load_workbook import os # list of text to search for keyWordList = ['Resume', 'Label', 'Description', 'ClueText', 'Title', 'QTEtitle'] # default path for docs on my PC for sh8 game xlsx documents #docDir = "d:/svn/ue3/SH8Game/Production/Dialogs/" docDir = "d:/svn/ue3/SH8Game/Production/Data/" #docDir = "d:/sh8/xlsx_python_tests/" # output for the log file logFile = 'd:/sh8/xlsx_python_tests/genlog.txt' # searching for INT column ID # returns column serial nubmer def FindBase(sheetName, keyWord): for col in range(1,50): findSpokenCoord = sheetName.cell(row = 1, column = col) findSpokenVal = findSpokenCoord.value if findSpokenVal == keyWord: return col # searching for all localization columns that is present # returns list of columns serial number def FindLoc(sheetName, keyWord): TextColList = [] for col in range(1,100): findSpokenCoord = sheetName.cell(row = 1, column = col) findSpokenVal = findSpokenCoord.value #print findSpokenVal if findSpokenVal: if ('.' + keyWord) in findSpokenVal: TextColList.append(col) return TextColList # comparing INT cell content with localization content # returns string if INT and LOC cell are indentical # returns string if LOC is empty while INT is not def FindAndLog(docPath, keyWordList): # declaring var for storing log logVal = '' <|fim▁hole|> # obtaining list of all sheets in document sheetList = workBook.get_sheet_names() # adding path to log logVal += docPath + '\n' # iterating through key words for keyWord in keyWordList: # iterating through sheets in document for sheet in sheetList: sheetName = workBook[sheet] intColNum = FindBase(sheetName, keyWord) locColNumList = FindLoc(sheetName, keyWord) # checking if INT keyword is present in document if intColNum: # even for comments it is enough length for row in range(4,200): intRowCoord = sheetName.cell(row = row, column = intColNum) # obtaining INT cell value intRowVal = intRowCoord.value # checking if INT cell is not empty if intRowVal: # iterating through LOC columns in list for col in locColNumList: locRowCoord = sheetName.cell(row = row, column = col) # obtaining LOC cell value locRowVal = locRowCoord.value # checking whether LOC cell is duplicate of INT if intRowVal == locRowVal: #convering non ASCII characters #locASCII = str(intRowVal).encode('ascii', 'ignore').decode('ascii') #print intRowVal logVal += str(locRowCoord) + str(intRowVal) + '\n' # checking if LOC cell is empty while INT cell is not elif locRowVal == None: logVal += str(locRowCoord) + ' is empty\n' return logVal # collecting all .xlsxs from supplied path genLog = '' for path, dirs, fileNames in os.walk(docDir): for fileName in fileNames: docPath = os.path.join(path, fileName) # filtering files except .xlsx if '.xlsx' in docPath: # filling log genLog += FindAndLog(docPath, keyWordList) # writing and saving the log file filePath = open(logFile, 'wb') filePath.write(genLog) filePath.close()<|fim▁end|>
workBook = load_workbook(docPath) # for test purposes print docPath
<|file_name|>test_artificial_32_Anscombe_Lag1Trend_0_12_20.py<|end_file_name|><|fim▁begin|>import pyaf.Bench.TS_datasets as tsds import tests.artificial.process_artificial_dataset as art<|fim▁hole|> art.process_dataset(N = 32 , FREQ = 'D', seed = 0, trendtype = "Lag1Trend", cycle_length = 0, transform = "Anscombe", sigma = 0.0, exog_count = 20, ar_order = 12);<|fim▁end|>
<|file_name|>StructureGenTree.java<|end_file_name|><|fim▁begin|>package Game.generation; import Game.base.BlockBase; import Game.base.World; /** * * @author maximumtech */ public class StructureGenTree extends StructureGenBase { <|fim▁hole|> public void generate(int x, int y) { int height = 13 + rand.nextInt(5); int leavesProg = height / 2; boolean passedLast = false; for (int i = 0; leavesProg > 0 || i < height; i++) { int yy = y + i; if (leavesProg == 0) { leavesProg = 1; } BlockBase block1 = world.getBlock(x, yy); if (i < height - 1 && (block1 == null || block1.canBeReplaced(world, x, y, BlockBase.woodLog))) { world.setBlock(x, yy, BlockBase.woodLog, (short) 1); } if (i > height - 11 && leavesProg > 0) { for (int o = x - leavesProg; o <= x + leavesProg; o++) { BlockBase block2 = world.getBlock(o, yy); if (block2 == null || block2.canBeReplaced(world, x, y, BlockBase.woodLog)) { if (o != x) { world.setBlock(o, yy, BlockBase.leaves); } else if (i >= height - 1) { world.setBlock(o, yy, BlockBase.leaves, (short) 1); } } } //if(leavesProg > 4) { // hitDelim = true; //} //if(hitDelim) { if (rand.nextInt(4) == 0 || passedLast) { leavesProg--; passedLast = false; } else { passedLast = true; } //}else{ // leavesProg++; //} } else if (i == height - 11) { world.setBlock(x + 1, yy, BlockBase.leaves); world.setBlock(x - 1, yy, BlockBase.leaves); } } } }<|fim▁end|>
public StructureGenTree(World world) { super(world); }