repo_name
stringlengths
4
116
path
stringlengths
4
379
size
stringlengths
1
7
content
stringlengths
3
1.05M
license
stringclasses
15 values
worldskills/concrete5-worldskills
blocks/page_attribute_display/templates/worldskills_content.php
1701
<?php defined('C5_EXECUTE') or die('Access Denied.'); $c = \Page::getCurrentPage(); $content = $c->getAttribute($attributeHandle); ?> <div class="ws-content"> <?php if (is_object($content) && $content instanceof \Concrete\Core\Entity\File\File): ?> <?php if ($content->getTypeObject()->isSVG()) { $src = $content->getRelativePath(); } else { $thumbnailType = \Concrete\Core\File\Image\Thumbnail\Type\Type::getByHandle('large'); if (is_object($thumbnailType)) { $src = $content->getThumbnailURL($thumbnailType->getBaseVersion()); } else { $src = $content->getRelativePath(); } } if (!$src) { $src = $content->getURL(); } ?> <figure class="mb-4"> <?php if ($linkURL) { echo '<a href="' . $linkURL . '" '. ($openLinkInNewWindow ? 'target="_blank"' : '') .'>'; } echo '<img src="' . h($src) . '" alt="' . h($altText) . '" class="figure-img img-fluid" role="presentation">'; if ($linkURL) { echo '</a>'; } if ($title) { echo '<figcaption class="figure-caption">' . h($title) . '</figcaption>'; } ?> </figure> <?php else: ?> <?php echo $controller->getOpenTag(); if ($controller->getTitle()) { echo '<span class="ccm-block-page-attribute-display-title">' . $controller->getTitle() . '</span>'; } echo $controller->getContent(); echo $controller->getCloseTag(); ?> <?php endif; ?> </div>
mit
danwrong/liquid-inheritance
test/liquid_inheritance_test.rb
3831
$:.unshift(File.join(File.dirname(__FILE__), '../lib')) require 'rubygems' require 'test/unit' require 'shoulda' require 'redgreen' rescue nil require 'liquid_inheritance' class TestFileSystem def read_template_file(path) if path == 'simple' 'test' elsif path == 'complex' %{ beginning {% block thing %} rarrgh {% endblock %} {% block another %} bum {% endblock %} end } elsif 'nested' %{ {% extends 'complex' %} {% block thing %} from nested {% endblock %} {% block another %} from nested (another) {% endblock %} } else %{ {% extends 'complex' %} {% block thing %} from nested {% endblock %} } end end end class LiquidInheritanceTest < Test::Unit::TestCase context 'given a template with an extends tag' do setup do Liquid::Template.file_system = TestFileSystem.new end should 'output the contents of the extended template' do template = Liquid::Template.parse %{ {% extends 'simple' %} {% block thing %} yeah {% endblock %} } assert_contains(template.render, /test/) end should 'render original content of block if no child block given' do template = Liquid::Template.parse %{ {% extends 'complex' %} } assert_contains(template.render, /rarrgh/) assert_contains(template.render, /bum/) end should 'render child content of block if child block given' do template = Liquid::Template.parse %{ {% extends 'complex' %} {% block thing %} booyeah {% endblock %} } assert_contains(template.render, /booyeah/) assert_contains(template.render, /bum/) end should 'render child content of blocks if multiple child blocks given' do template = Liquid::Template.parse %{ {% extends 'complex' %} {% block thing %} booyeah {% endblock %} {% block another %} blurb {% endblock %} } assert_contains(template.render, /booyeah/) assert_contains(template.render, /blurb/) end should 'should remember context of child template' do template = Liquid::Template.parse %{ {% extends 'complex' %} {% block thing %} booyeah {% endblock %} {% block another %} {{ a }} {% endblock %} } res = template.render 'a' => 1234 assert_contains(res, /booyeah/) assert_contains(res, /1234/) end should 'should work with nested templates' do template = Liquid::Template.parse %{ {% extends 'nested' %} {% block thing %} booyeah {% endblock %} } res = template.render 'a' => 1234 assert_contains(res, /booyeah/) assert_contains(res, /from nested/) end should 'should work with nested templates if middle template skips a block' do template = Liquid::Template.parse %{ {% extends 'nested2' %} {% block another %} win {% endblock %} } res = template.render assert_contains(res, /win/) end should 'should render parent for block.super' do template = Liquid::Template.parse %{ {% extends 'complex' %} {% block thing %} {{ block.super }} {% endblock %} } res = template.render 'a' => 1234 assert_contains(res, /rarrgh/) end end end
mit
arteria/django-compat
setup.py
2374
# -*- encoding: utf-8 -*- import os, sys from setuptools import setup from setuptools import find_packages # Make the open function accept encodings in python < 3.x if sys.version_info[0] < 3: import codecs open = codecs.open # pylint: disable=redefined-builtin # Utility function to read the README file. # Used for the long_description. It's nice, because now 1) we have a top level # README file and 2) it's easier to type in the README file than to put a raw # string in below ... def get_path(fname): return os.path.join(os.path.dirname(os.path.abspath(__file__)), fname) def read(fname): return open(get_path(fname), 'r', encoding='utf8').read() if sys.argv[-1] == 'genreadme': try: import pypandoc long_description = pypandoc.convert(get_path('README.md'), 'rst') long_description = long_description.split('<!---Illegal PyPi RST data -->')[0] f = open(get_path('README.rst'), 'w') f.write(long_description) f.close() print("Successfully converted README.md to README.rst") except (IOError, ImportError): pass sys.exit() try: long_description=read('README.rst') except (OSError, IOError): try: long_description=read('README.md') except (OSError, IOError): long_description = "" setup( name="django-compat", version="1.0.15", author='arteria GmbH', author_email="[email protected]", packages=find_packages(), include_package_data=True, description="For- and backwards compatibility layer for Django 1.4, 1.7, 1.8, 1.9, 1.10, and 1.11", long_description=long_description, license='MIT', install_requires=open('requirements.txt').read().splitlines(), url="https://github.com/arteria/django-compat", classifiers=[ 'Development Status :: 5 - Production/Stable', 'Framework :: Django', 'License :: OSI Approved :: MIT License', 'Framework :: Django', 'Framework :: Django :: 1.4', 'Framework :: Django :: 1.6', 'Framework :: Django :: 1.7', 'Framework :: Django :: 1.8', 'Framework :: Django :: 1.9', 'Framework :: Django :: 1.10', 'Framework :: Django :: 1.11', 'Programming Language :: Python', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 3', ], )
mit
SpectralAngel/simoni
src/Indira/SimoniBundle/Resources/public/js/simoni.js
2310
function SIMONI() { $('.datepicker').datepicker({ dateFormat : 'dd/mm/yy', changeMonth: true, changeYear: true }); $('.colorpicker').colorpicker(); $('.form-dialog').dialog({ autoOpen : false }); $('input.datetimepicker').datetimepicker( { dateFormat : 'dd/mm/yy', timeFormat : 'hh:mm', changeMonth: true, changeYear: true, maxDate: 0 }); $('.ajax-form').submit(function(event){ event.preventDefault(); var form = $(this); var url = form.attr('action'); var posting = $.post(url, form.serialize()).done(function() { location.reload(); }); }); } function addTagForm(collectionHolder, $newLinkLi) { // Get the data-prototype explained earlier var prototype = collectionHolder.data('prototype'); // get the new index var index = collectionHolder.data('index'); // Replace '__name__' in the prototype's HTML to // instead be a number based on how many items we have var newForm = prototype.replace(/__label__/g, ' ' + index); // increase the index with one for the next item collectionHolder.data('index', index + 1); $newLinkLi.before(newForm); } function embedForm(collectionHolder, $addImagenLink) { // add the "add a tag" anchor and li to the tags ul collectionHolder.append($addImagenLink); // count the current form inputs we have (e.g. 2), use that as the new // index when inserting a new item (e.g. 2) collectionHolder.data('index', collectionHolder.find(':input').length + 1); $addImagenLink.on('click', function(e) { // prevent the link from creating a "#" on the URL e.preventDefault(); // add a new tag form (see next code block) addTagForm(collectionHolder, $addImagenLink); }); } function addChildForm(collectionHolder, label) { var prototype = collectionHolder.data('prototype'); // get the new index var index = collectionHolder.data('index'); // Replace '__name__' in the prototype's HTML to // instead be a number based on how many items we have var newForm = prototype.replace(/__label__/g, ' ' + index); // increase the index with one for the next item collectionHolder.data('index', index + 1); collectionHolder.append(newForm); }
mit
dthree/wat
src/spider/stackoverflow.js
5932
'use strict'; /** * Module dependencies. */ const _ = require('lodash'); const moment = require('moment'); const chalk = require('chalk'); const util = require('../util'); const stackoverflow = { getPage(searchResult, callback) { callback = callback || {}; const self = this; const questionId = (_.isObject(searchResult)) ? this.parseQuestionId(searchResult) : searchResult; self.getJSON(questionId, function (err, page) { if (err) { callback(err); return; } const question = page.question; const answers = page.answers; if (answers.length < 1) { callback('NO_ANSWERS'); return; } let margin = String(_.max(answers, function (answ) { return String(answ.score).length; }).score).length + 4; margin = (String(question.score).length + 4) > margin ? String(question.score).length + 4 : margin; const headerLength = String(question.title).length + 2; const viewLength = String(question.view_count).length + 8; const padding = process.stdout.columns - (headerLength + viewLength); const header = ` ${chalk.cyan(question.title)}${self.app.cosmetician.pad('', padding)}${question.view_count} views`; const quest = self.formatAnswer(question, margin); const title = chalk.yellow('Stack Overflow'); const hr = self.app.cosmetician.hr(2); let result = ` ${title}\n${header}\n\n${quest}\n\n Answers\n ${hr}\n`; for (let l = 0; l < answers.length; ++l) { result += `${self.formatAnswer(answers[l], margin)}\n`; if (l < answers.length - 1) { result += `${self.app.cosmetician.pad('', margin) + self.app.cosmetician.hr(margin)}\n`; } } callback(undefined, result); }); }, parseQuestionId(obj) { let res = String(obj.link).split('/questions/')[1]; if (res) { res = String(res).split('/')[0]; res = (!isNaN(res)) ? res : undefined; } return res; }, getJSON(questionId, cb) { const self = this; const result = {}; let dones = 0; let returned = false; function handler(err) { if (err && !returned) { returned = true; cb(err); return; } dones++; if (dones === 2) { cb(undefined, result); } } self.getQuestion(questionId, function (err, questions) { result.question = questions; handler(err, questions); }); self.getAnswers(questionId, function (err, data) { result.answers = data; handler(err, data); }); }, getQuestion(questionId, callback) { callback = callback || {}; const url = `http://api.stackexchange.com/2.2/questions/${questionId}?order=desc&sort=votes&site=stackoverflow&filter=!)Ehu.SDh9PeCcJmhDxT60pU1mT_mgvdo9d3mN8WYbPzQzO6Te`; util.fetchRemote({ url, gzip: true }, function (err, answ) { if (!err) { let answers; let error; try { answers = JSON.parse(answ); } catch (e) { error = e; } if (answers === undefined) { callback(error); return; } callback(undefined, (answers.items || [])[0]); } else { callback(err); } }); }, getAnswers(questionId, callback) { callback = callback || {}; const self = this; const filter = '!t)I()ziOdWLVHc78tC981)pqWLzTas-'; const url = `http://api.stackexchange.com/2.2/questions/${questionId}/answers?order=desc&sort=votes&site=stackoverflow&filter=${filter}`; util.fetchRemote({ url, gzip: true }, function (err, answ) { if (!err) { let answers; let error; try { answers = JSON.parse(answ); } catch (e) { error = e; } if (answers === undefined) { callback(error); return; } answers = answers.items || []; answers = self.sortAnswers(answers); answers = self.filterAnswers(answers); callback(undefined, answers); } else { callback(err); } }); }, sortAnswers(answ) { const result = answ.sort(function (a, b) { const aScore = (a.is_accepted) ? a.score + 5 : a.score; const bScore = (b.is_accepted) ? b.score + 5 : b.score; let order = 0; if (aScore > bScore) { order = -1; } else if (aScore < bScore) { order = 1; } return order; }); return result; }, filterAnswers(answers) { const results = []; let sum = 0; let best = 0; for (let i = 0; i < answers.length; ++i) { const score = answers[i].score; best = (score > best) ? score : best; sum += score; } const avg = (sum > 0) ? (sum / answers.length) : 0; answers = answers.slice(0, 3); for (let i = 0; i < answers.length; ++i) { if (answers[i].score >= avg || answers[i].is_accepted === true) { results.push(answers[i]); } } return results; }, formatAnswer(answ, margin) { margin = margin || 8; const accepted = answ.is_accepted || false; const markdown = answ.body_markdown; const score = answ.score; const creation = moment(parseFloat(answ.creation_date) * 1000).format('DD MMM YYYY'); const owner = answ.owner; let scoreSpace = this.app.cosmetician.pad(score, margin - 4, ' '); scoreSpace = (accepted === true) ? chalk.green(scoreSpace) : scoreSpace; const creator = ` ${scoreSpace} ${chalk.cyan(`${owner.display_name} on ${creation}`)}`; const formatted = this.app.cosmetician.tab(this.app.cosmetician.markdownToTerminal(markdown, { lineWidth() { return process.stdout.columns - margin - 2; } }), margin - 2); return `${creator}\n${formatted}`; } }; module.exports = function (app) { stackoverflow.app = app; return stackoverflow; };
mit
xtrmstep/ApplyingTddToLegacyCode
ApplyingTddToLegacyCode/Rule7Example/Notifications/EmailNotifier.cs
448
namespace Rule7Example.Notifications { public class EmailNotifier : INotifier { public void Send(Cart cart) { var message = CreateMessage(cart); SendMessage(message); } private void SendMessage(string message) { // send via SMPT } private string CreateMessage(Cart cart) { return "Information about cart"; } } }
mit
chenglei1986/ImageUploadMask
image-upload-mask/src/main/java/imageuploadmask/OvalMask.java
778
package imageuploadmask; import android.content.Context; import android.graphics.Canvas; import android.graphics.Path; import android.graphics.RectF; import android.graphics.Region; import android.util.AttributeSet; public class OvalMask extends ShapeMask { private Path mClipPath = new Path(); public OvalMask(Context context) { this(context, null); } public OvalMask(Context context, AttributeSet attrs) { this(context, attrs, 0); } public OvalMask(Context context, AttributeSet attrs, int defStyleAttr) { super(context, attrs, defStyleAttr); } @Override protected Path getClipPath() { mClipPath.reset(); mClipPath.addOval(mMaskOvalRect, Path.Direction.CW); return mClipPath; } }
mit
icyflash/ucloud-csharp-sdk
UCloudSDK/Models/SecurityGroupRule.cs
1929
namespace UCloudSDK.Models { /// <summary> /// UNet防火墙规则 /// </summary> public partial class SecurityGroupRule { /// <summary> /// 网络协议 /// <para> /// 枚举值为:TCP,UDP,ICMP,GRE /// </para> /// </summary> public string Proto { get; set; } /// <summary> /// 目标端口 /// <para> /// 范围:[1-65535],可指定单个端口(如80),或指定端口段(1-1024) /// </para> /// </summary> public string Dst_port { get; set; } /// <summary> /// 源地址 /// <para> /// 格式为’x.x.x.x/x 或 x.x.x.x’的有效IP地址。 /// </para> /// </summary> public string Src_ip { get; set; } /// <summary> /// 防火墙动作 /// <para> /// 枚举值为:ACCEPT:允许通过防火墙;DROP:禁止通过防火墙并不给任何返回信息 /// </para> /// </summary> public string Action { get; set; } /// <summary> /// 规则优先级 /// <para> /// 枚举值为:0(高),50(中),100(低) /// </para> /// </summary> public int Priority { get; set; } /// <summary> /// 返回Rule格式化字符串. /// <para> /// 格式:Proto|Dst_port|Src_ip|Action|Priority /// </para> /// </summary> /// <returns> /// Proto|Dst_port|Src_ip|Action|Priority. /// </returns> public override string ToString() { return string.Format("{0}|{1}|{2}|{3}|{4}", Proto, Dst_port, Src_ip, Action, Priority); } } }
mit
tbepler/LRPaGe
src/bepler/lrpage/templates/LexerTemplate.java
2857
package bepler.lrpage.templates; public class LexerTemplate { private final java.io.Reader r; private boolean next = true; private int lineNum = 1; private int charNum = 1; private final java.util.Deque<Character> buffer = new java.util.LinkedList<Character>(); public LexerTemplate(java.io.Reader r){ this.r = r; } public LexerTemplate(java.io.InputStream in){ this(new java.io.InputStreamReader(in)); } public boolean hasNext(){ return next; } public AbstractSyntaxNodeTemplate nextToken() throws java.io.IOException{ if(!next){ throw new RuntimeException("No tokens remaining."); } java.util.List<java.util.regex.Pattern> pats = getRegexes(); java.util.List<java.util.regex.Matcher> ms = new java.util.ArrayList<java.util.regex.Matcher>(); for(java.util.regex.Pattern p : pats){ ms.add(p.matcher("")); } String cur = ""; boolean fin = false; while(!fin){ if(!buffer.isEmpty()){ //read from the buffer before reading more chars //from the reader cur = cur + buffer.pop(); }else{ int read = r.read(); if(read == -1){ //the reader is expired, so set fin to true fin = true; }else{ cur = cur + (char) read; } } fin = fin || !hasMatch(cur, ms); } //if cur is empty, then return eof and mark lexing done if(cur.length() == 0){ next = false; return eofToken(); } //find the longest match for( int end = cur.length() ; end >= 0 ; --end ){ String sub = cur.substring(0, end); for( int i = 0 ; i < ms.size() ; ++i ){ java.util.regex.Matcher m = ms.get(i); m.reset(sub); if(m.matches()){ //push the end of cur into the buffer for( int j = end ; j < cur.length() ; ++j ){ buffer.add(cur.charAt(j)); } int line = lineNum; int cNum = charNum; //update line and char count for( int j = 0 ; j < sub.length() ; ++j ){ if(sub.charAt(j) == '\n'){ ++lineNum; charNum = 0; } ++charNum; } //return the token return createToken(i, line, cNum, sub); } } } //an error occured, the string is unmatched throw new RuntimeException("Unmatched token: "+cur); } private boolean hasMatch(String s, java.util.List<java.util.regex.Matcher> ms){ for(java.util.regex.Matcher m : ms){ m.reset(s); if(m.matches() || m.hitEnd()){ return true; } } return false; } //TODO - must be implemented by the code generator protected AbstractSyntaxNodeTemplate createToken(int tokenIndex, int line, int pos, String text){ //STUB return null; } //TODO - must be implemented by the code generator protected java.util.List<java.util.regex.Pattern> getRegexes(){ //STUB return null; } //TODO - must be implemented by the code generator protected AbstractSyntaxNodeTemplate eofToken(){ //STUB return null; } }
mit
rogerio-dfb/Vidly
Vidly/Models/Movie.cs
864
using System; using System.Collections.Generic; using System.ComponentModel.DataAnnotations; using System.Linq; using System.Web; namespace Vidly.Models { public class Movie { public int Id { get; set; } [Required] [StringLength(255)] public string Name { get; set; } [Required] [Display(Name = "Release Date")] public DateTime ReleaseDate { get; set; } public DateTime DateAdded { get; set; } [Required] [Display(Name = "Number In Stock")] [Range(1,20,ErrorMessage =("The Number in Stock must between 1 and 20."))] public byte NumberInStock { get; set; } public Genre Genre { get; set; } [Display(Name ="Genre")] [Required] public byte GenreId { get; set; } public byte NumberAvailable { get; set; } } }
mit
getnashty/liftr
app/helpers/lifts_helper.rb
1730
module LiftsHelper def exercise_select(f) options = {} today=Time.now.strftime("%m/%d/%Y") @exercises.each do |exercise| noway=0 if exercise.name != "Body Weight" && exercise.muscle_id == @muscleid @lifts.each do |lift| if lift.user_id == current_user.id && lift.created_at.strftime("%m/%d/%Y")==today && lift.exercise_id == exercise.id && exercise.name != "Body Weight" noway=1 end end options[exercise.name] = exercise.id unless noway == 1 end end f.select :exercise_id, options end def optcount today=Time.now.strftime("%m/%d/%Y") @xcount=0 @lcount=0 @exercises.each do |exercise| if exercise.name != "Body Weight" && exercise.muscle_id == @muscleid @xcount=@xcount+1 @lifts.each do |lift| if lift.user_id == current_user.id && lift.created_at.strftime("%m/%d/%Y")==today && lift.exercise_id == exercise.id && exercise.name != "Body Weight" @lcount=@lcount+1 end end end end end def exercise_edit(f) options = {} today=Time.now.strftime("%m/%d/%Y") @exercises.each do |exercise| noway=0 if exercise.name != "Body Weight" @lifts.each do |lift| if lift.user_id == current_user.id && lift.created_at.strftime("%m/%d/%Y")==today && lift.exercise_id == exercise.id && exercise.name != "Body Weight" noway=1 end end options[exercise.name] = exercise.id unless noway == 1 end end f.select :exercise_id, options end end
mit
xZ1mEFx/yii2-multilang
views/translation/adminlte/index.php
2944
<?php use xz1mefx\adminlte\helpers\Html; use xz1mefx\multilang\models\SourceMessage; use yii\grid\ActionColumn; use yii\grid\GridView; use yii\widgets\Pjax; /* @var $this yii\web\View */ /* @var $searchModel \xz1mefx\multilang\models\search\TranslationSearch */ /* @var $dataProvider yii\data\ActiveDataProvider */ /* @var $canUpdate bool */ $this->title = Yii::t('multilang-tools', 'Interface translations'); $this->params['breadcrumbs'][] = $this->title; $this->params['title'] = $this->title; ?> <div class="box box-primary"> <div class="box-header"> &nbsp; <div class="box-tools pull-right"> <button class="btn btn-box-tool" data-widget="collapse" data-toggle="tooltip" title="Collapse"> <?= Html::icon('minus', ['prefix' => 'fa fa-']) ?> </button> </div> </div> <div class="box-body"> <div class="box-body-overflow"> <?php Pjax::begin(); ?> <?= GridView::widget([ 'tableOptions' => ['class' => 'table table-striped table-bordered table-hover'], 'dataProvider' => $dataProvider, 'filterModel' => $searchModel, 'columns' => [ [ 'class' => 'yii\grid\SerialColumn', 'headerOptions' => ['class' => 'text-center col-xs-1 col-sm-1'], 'contentOptions' => ['class' => 'text-center col-xs-1 col-sm-1'], ], [ 'attribute' => 'category', 'filter' => SourceMessage::getCategoriesDrDownList(), ], [ 'attribute' => 'message', 'format' => 'text', ], [ 'label' => Yii::t('multilang-tools', 'Translation ({language})', ['language' => Yii::$app->language]), 'attribute' => 'translate', 'content' => function ($model) { /* @var $model \xz1mefx\multilang\models\SourceMessage */ return Yii::$app->formatter->asNtext($model->getTranslationByLocal(Yii::$app->language)); }, ], [ 'class' => ActionColumn::className(), 'visible' => $canUpdate, 'headerOptions' => ['class' => 'text-center col-lg-1 col-sm-1'], 'contentOptions' => ['class' => 'text-center col-lg-1 col-sm-1'], 'template' => '{update}', 'visibleButtons' => [ 'update' => $canUpdate, ], ], ], ]); ?> <?php Pjax::end(); ?> </div> </div> </div>
mit
Ostrovski/node-express-ex-boilerplate
domain/services/Counter.js
220
'use strict'; function Counter() { this.counter = 0; this.dt = new Date(); } Counter.prototype.count = function() { return this.counter++ + ' (I was born at ' + this.dt + ')'; }; module.exports = Counter;
mit
duodraco/curso-php-drc
aulas/07-bancos de dados/fromcsvtodb/app/library/DRC/Data/Sale.php
3732
<?php /** * Created by PhpStorm. * User: noite * Date: 25/03/15 * Time: 21:58 */ namespace DRC\Data; class Sale { protected $id; protected $transactionDate; protected $product; protected $price; protected $paymentType; protected $name; protected $city; protected $state; protected $country; protected $accountCreated; protected $lastLogin; protected $latitude; protected $longitude; /** * @return mixed */ public function getId() { return $this->id; } /** * @param mixed $id */ public function setId($id) { $this->id = $id; } /** * @return mixed */ public function getTransactionDate() { return $this->transactionDate; } /** * @param mixed $transactionDate */ public function setTransactionDate($transactionDate) { $this->transactionDate = $transactionDate; } /** * @return mixed */ public function getProduct() { return $this->product; } /** * @param mixed $product */ public function setProduct($product) { $this->product = $product; } /** * @return mixed */ public function getPrice() { return $this->price; } /** * @param mixed $price */ public function setPrice($price) { $this->price = $price; } /** * @return mixed */ public function getPaymentType() { return $this->paymentType; } /** * @param mixed $paymentType */ public function setPaymentType($paymentType) { $this->paymentType = $paymentType; } /** * @return mixed */ public function getName() { return $this->name; } /** * @param mixed $name */ public function setName($name) { $this->name = $name; } /** * @return mixed */ public function getCity() { return $this->city; } /** * @param mixed $city */ public function setCity($city) { $this->city = $city; } /** * @return mixed */ public function getState() { return $this->state; } /** * @param mixed $state */ public function setState($state) { $this->state = $state; } /** * @return mixed */ public function getCountry() { return $this->country; } /** * @param mixed $country */ public function setCountry($country) { $this->country = $country; } /** * @return mixed */ public function getAccountCreated() { return $this->accountCreated; } /** * @param mixed $accountCreated */ public function setAccountCreated($accountCreated) { $this->accountCreated = $accountCreated; } /** * @return mixed */ public function getLastLogin() { return $this->lastLogin; } /** * @param mixed $lastLogin */ public function setLastLogin($lastLogin) { $this->lastLogin = $lastLogin; } /** * @return mixed */ public function getLatitude() { return $this->latitude; } /** * @param mixed $latitude */ public function setLatitude($latitude) { $this->latitude = $latitude; } /** * @return mixed */ public function getLongitude() { return $this->longitude; } /** * @param mixed $longitude */ public function setLongitude($longitude) { $this->longitude = $longitude; } }
mit
peta-okechan/emris
src/ObjectData.hpp
22594
// // ObjectData.hpp // emris // // Created by peta on 2013/12/16. // Copyright (c) 2013 peta.okechan.net. All rights reserved. // #ifndef EmTetris_Box_h #define EmTetris_Box_h #include <vector> #include <unordered_map> #include <string> #include <GL/glut.h> #include <glm/glm.hpp> struct ObjectData { public: struct BoxChar { unsigned char character; unsigned int width; std::vector<glm::vec2> dotData; }; using CharMap = std::unordered_map<unsigned char, BoxChar>; private: static size_t boxVertexCount; static size_t bevelBoxVertexCount; static size_t backgroundVertexCount; static size_t currentObjectVertexCount; static CharMap charMap; public: static void UseBox(GLuint const positionAttribId, GLuint const normalAttribId) { static GLuint vbo = 0; if (!vbo) { std::vector<float> vdata = { // 手前面 -0.5f, +0.5f, +0.5f, +0.0f, +0.0f, +1.0f, -0.5f, -0.5f, +0.5f, +0.0f, +0.0f, +1.0f, +0.5f, +0.5f, +0.5f, +0.0f, +0.0f, +1.0f, +0.5f, +0.5f, +0.5f, +0.0f, +0.0f, +1.0f, -0.5f, -0.5f, +0.5f, +0.0f, +0.0f, +1.0f, +0.5f, -0.5f, +0.5f, +0.0f, +0.0f, +1.0f, // 後ろ面 -0.5f, +0.5f, -0.5f, +0.0f, +0.0f, -1.0f, +0.5f, +0.5f, -0.5f, +0.0f, +0.0f, -1.0f, -0.5f, -0.5f, -0.5f, +0.0f, +0.0f, -1.0f, -0.5f, -0.5f, -0.5f, +0.0f, +0.0f, -1.0f, +0.5f, +0.5f, -0.5f, +0.0f, +0.0f, -1.0f, +0.5f, -0.5f, -0.5f, +0.0f, +0.0f, -1.0f, // 左側面 -0.5f, +0.5f, -0.5f, -1.0f, +0.0f, +0.0f, -0.5f, -0.5f, -0.5f, -1.0f, +0.0f, +0.0f, -0.5f, +0.5f, +0.5f, -1.0f, +0.0f, +0.0f, -0.5f, +0.5f, +0.5f, -1.0f, +0.0f, +0.0f, -0.5f, -0.5f, -0.5f, -1.0f, +0.0f, +0.0f, -0.5f, -0.5f, +0.5f, -1.0f, +0.0f, +0.0f, // 右側面 +0.5f, +0.5f, -0.5f, +1.0f, +0.0f, +0.0f, +0.5f, +0.5f, +0.5f, +1.0f, +0.0f, +0.0f, +0.5f, -0.5f, -0.5f, +1.0f, +0.0f, +0.0f, +0.5f, -0.5f, -0.5f, +1.0f, +0.0f, +0.0f, +0.5f, +0.5f, +0.5f, +1.0f, +0.0f, +0.0f, +0.5f, -0.5f, +0.5f, +1.0f, +0.0f, +0.0f, // 天面 -0.5f, +0.5f, -0.5f, +0.0f, +1.0f, +0.0f, -0.5f, +0.5f, +0.5f, +0.0f, +1.0f, +0.0f, +0.5f, +0.5f, -0.5f, +0.0f, +1.0f, +0.0f, +0.5f, +0.5f, -0.5f, +0.0f, +1.0f, +0.0f, -0.5f, +0.5f, +0.5f, +0.0f, +1.0f, +0.0f, +0.5f, +0.5f, +0.5f, +0.0f, +1.0f, +0.0f, // 底面 -0.5f, -0.5f, -0.5f, +0.0f, -1.0f, +0.0f, +0.5f, -0.5f, -0.5f, +0.0f, -1.0f, +0.0f, -0.5f, -0.5f, +0.5f, +0.0f, -1.0f, +0.0f, -0.5f, -0.5f, +0.5f, +0.0f, -1.0f, +0.0f, +0.5f, -0.5f, -0.5f, +0.0f, -1.0f, +0.0f, +0.5f, -0.5f, +0.5f, +0.0f, -1.0f, +0.0f, }; glGenBuffers(1, &vbo); glBindBuffer(GL_ARRAY_BUFFER, vbo); glBufferData(GL_ARRAY_BUFFER, sizeof(float) * vdata.size(), vdata.data(), GL_STATIC_DRAW); boxVertexCount = vdata.size() / 6; } else { glBindBuffer(GL_ARRAY_BUFFER, vbo); } GLsizei stride = sizeof(float) * 6; float *ptr = nullptr; glEnableVertexAttribArray(positionAttribId); glVertexAttribPointer(positionAttribId, 3, GL_FLOAT, GL_FALSE, stride, ptr); glEnableVertexAttribArray(normalAttribId); glVertexAttribPointer(normalAttribId, 3, GL_FLOAT, GL_FALSE, stride, ptr + 3); currentObjectVertexCount = boxVertexCount; } static void UseBevelBox(GLuint const positionAttribId, GLuint const normalAttribId) { static GLuint vbo = 0; if (!vbo) { std::vector<float> vdata = { +0.45f, +0.50f, +0.45f, +0.00f, +1.00f, +0.00f, +0.45f, +0.50f, -0.45f, +0.00f, +1.00f, +0.00f, -0.45f, +0.50f, +0.45f, +0.00f, +1.00f, +0.00f, -0.45f, +0.50f, +0.45f, +0.00f, +1.00f, +0.00f, +0.45f, +0.50f, -0.45f, +0.00f, +1.00f, +0.00f, -0.45f, +0.50f, -0.45f, +0.00f, +1.00f, +0.00f, +0.45f, -0.50f, -0.45f, -0.00f, -1.00f, -0.00f, +0.45f, -0.50f, +0.45f, -0.00f, -1.00f, -0.00f, -0.45f, -0.50f, -0.45f, -0.00f, -1.00f, -0.00f, -0.45f, -0.50f, -0.45f, -0.00f, -1.00f, -0.00f, +0.45f, -0.50f, +0.45f, -0.00f, -1.00f, -0.00f, -0.45f, -0.50f, +0.45f, -0.00f, -1.00f, -0.00f, -0.50f, -0.45f, -0.45f, -1.00f, +0.00f, -0.00f, -0.50f, -0.45f, +0.45f, -1.00f, +0.00f, -0.00f, -0.50f, +0.45f, -0.45f, -1.00f, +0.00f, -0.00f, -0.50f, +0.45f, -0.45f, -1.00f, +0.00f, -0.00f, -0.50f, -0.45f, +0.45f, -1.00f, +0.00f, -0.00f, -0.50f, +0.45f, +0.45f, -1.00f, +0.00f, -0.00f, +0.45f, +0.45f, +0.50f, +0.00f, -0.00f, +1.00f, -0.45f, +0.45f, +0.50f, +0.00f, -0.00f, +1.00f, +0.45f, -0.45f, +0.50f, +0.00f, -0.00f, +1.00f, +0.45f, -0.45f, +0.50f, +0.00f, -0.00f, +1.00f, -0.45f, +0.45f, +0.50f, +0.00f, -0.00f, +1.00f, -0.45f, -0.45f, +0.50f, +0.00f, -0.00f, +1.00f, +0.50f, +0.45f, -0.45f, +1.00f, -0.00f, +0.00f, +0.50f, +0.45f, +0.45f, +1.00f, -0.00f, +0.00f, +0.50f, -0.45f, -0.45f, +1.00f, -0.00f, +0.00f, +0.50f, -0.45f, -0.45f, +1.00f, -0.00f, +0.00f, +0.50f, +0.45f, +0.45f, +1.00f, -0.00f, +0.00f, +0.50f, -0.45f, +0.45f, +1.00f, -0.00f, +0.00f, +0.50f, +0.45f, -0.45f, +0.70f, +0.00f, -0.70f, +0.50f, -0.45f, -0.45f, +0.70f, +0.00f, -0.70f, +0.45f, +0.45f, -0.50f, +0.70f, +0.00f, -0.70f, +0.45f, +0.45f, -0.50f, +0.70f, +0.00f, -0.70f, +0.50f, -0.45f, -0.45f, +0.70f, +0.00f, -0.70f, +0.45f, -0.45f, -0.50f, +0.70f, +0.00f, -0.70f, -0.45f, +0.50f, -0.45f, +0.00f, +0.70f, -0.70f, +0.45f, +0.50f, -0.45f, +0.00f, +0.70f, -0.70f, -0.45f, +0.45f, -0.50f, +0.00f, +0.70f, -0.70f, -0.45f, +0.45f, -0.50f, +0.00f, +0.70f, -0.70f, +0.45f, +0.50f, -0.45f, +0.00f, +0.70f, -0.70f, +0.45f, +0.45f, -0.50f, +0.00f, +0.70f, -0.70f, +0.45f, +0.50f, -0.45f, +0.70f, +0.70f, +0.00f, +0.45f, +0.50f, +0.45f, +0.70f, +0.70f, +0.00f, +0.50f, +0.45f, -0.45f, +0.70f, +0.70f, +0.00f, +0.50f, +0.45f, -0.45f, +0.70f, +0.70f, +0.00f, +0.45f, +0.50f, +0.45f, +0.70f, +0.70f, +0.00f, +0.50f, +0.45f, +0.45f, +0.70f, +0.70f, +0.00f, +0.45f, -0.50f, -0.45f, -0.00f, -0.70f, -0.70f, -0.45f, -0.50f, -0.45f, -0.00f, -0.70f, -0.70f, +0.45f, -0.45f, -0.50f, -0.00f, -0.70f, -0.70f, +0.45f, -0.45f, -0.50f, -0.00f, -0.70f, -0.70f, -0.45f, -0.50f, -0.45f, -0.00f, -0.70f, -0.70f, -0.45f, -0.45f, -0.50f, -0.00f, -0.70f, -0.70f, +0.50f, -0.45f, -0.45f, +0.70f, -0.70f, +0.00f, +0.50f, -0.45f, +0.45f, +0.70f, -0.70f, +0.00f, +0.45f, -0.50f, -0.45f, +0.70f, -0.70f, +0.00f, +0.45f, -0.50f, -0.45f, +0.70f, -0.70f, +0.00f, +0.50f, -0.45f, +0.45f, +0.70f, -0.70f, +0.00f, +0.45f, -0.50f, +0.45f, +0.70f, -0.70f, +0.00f, -0.50f, -0.45f, -0.45f, -0.70f, +0.00f, -0.70f, -0.50f, +0.45f, -0.45f, -0.70f, +0.00f, -0.70f, -0.45f, -0.45f, -0.50f, -0.70f, +0.00f, -0.70f, -0.45f, -0.45f, -0.50f, -0.70f, +0.00f, -0.70f, -0.50f, +0.45f, -0.45f, -0.70f, +0.00f, -0.70f, -0.45f, +0.45f, -0.50f, -0.70f, +0.00f, -0.70f, -0.45f, -0.50f, -0.45f, -0.70f, -0.70f, -0.00f, -0.45f, -0.50f, +0.45f, -0.70f, -0.70f, -0.00f, -0.50f, -0.45f, -0.45f, -0.70f, -0.70f, -0.00f, -0.50f, -0.45f, -0.45f, -0.70f, -0.70f, -0.00f, -0.45f, -0.50f, +0.45f, -0.70f, -0.70f, -0.00f, -0.50f, -0.45f, +0.45f, -0.70f, -0.70f, -0.00f, -0.50f, +0.45f, -0.45f, -0.70f, +0.70f, +0.00f, -0.50f, +0.45f, +0.45f, -0.70f, +0.70f, +0.00f, -0.45f, +0.50f, -0.45f, -0.70f, +0.70f, +0.00f, -0.45f, +0.50f, -0.45f, -0.70f, +0.70f, +0.00f, -0.50f, +0.45f, +0.45f, -0.70f, +0.70f, +0.00f, -0.45f, +0.50f, +0.45f, -0.70f, +0.70f, +0.00f, +0.45f, +0.45f, +0.50f, +0.70f, -0.00f, +0.70f, +0.45f, -0.45f, +0.50f, +0.70f, -0.00f, +0.70f, +0.50f, +0.45f, +0.45f, +0.70f, -0.00f, +0.70f, +0.50f, +0.45f, +0.45f, +0.70f, -0.00f, +0.70f, +0.45f, -0.45f, +0.50f, +0.70f, -0.00f, +0.70f, +0.50f, -0.45f, +0.45f, +0.70f, -0.00f, +0.70f, +0.45f, +0.50f, +0.45f, +0.00f, +0.70f, +0.70f, -0.45f, +0.50f, +0.45f, +0.00f, +0.70f, +0.70f, +0.45f, +0.45f, +0.50f, +0.00f, +0.70f, +0.70f, +0.45f, +0.45f, +0.50f, +0.00f, +0.70f, +0.70f, -0.45f, +0.50f, +0.45f, +0.00f, +0.70f, +0.70f, -0.45f, +0.45f, +0.50f, +0.00f, +0.70f, +0.70f, +0.45f, -0.45f, +0.50f, -0.00f, -0.70f, +0.70f, -0.45f, -0.45f, +0.50f, -0.00f, -0.70f, +0.70f, +0.45f, -0.50f, +0.45f, -0.00f, -0.70f, +0.70f, +0.45f, -0.50f, +0.45f, -0.00f, -0.70f, +0.70f, -0.45f, -0.45f, +0.50f, -0.00f, -0.70f, +0.70f, -0.45f, -0.50f, +0.45f, -0.00f, -0.70f, +0.70f, -0.45f, -0.45f, +0.50f, -0.70f, +0.00f, +0.70f, -0.45f, +0.45f, +0.50f, -0.70f, +0.00f, +0.70f, -0.50f, -0.45f, +0.45f, -0.70f, +0.00f, +0.70f, -0.50f, -0.45f, +0.45f, -0.70f, +0.00f, +0.70f, -0.45f, +0.45f, +0.50f, -0.70f, +0.00f, +0.70f, -0.50f, +0.45f, +0.45f, -0.70f, +0.00f, +0.70f, +0.45f, +0.45f, -0.50f, +0.00f, +0.00f, -1.00f, +0.45f, -0.45f, -0.50f, +0.00f, +0.00f, -1.00f, -0.45f, +0.45f, -0.50f, +0.00f, +0.00f, -1.00f, -0.45f, +0.45f, -0.50f, +0.00f, +0.00f, -1.00f, +0.45f, -0.45f, -0.50f, +0.00f, +0.00f, -1.00f, -0.45f, -0.45f, -0.50f, +0.00f, +0.00f, -1.00f, +0.45f, +0.50f, -0.45f, +0.58f, +0.58f, -0.58f, +0.50f, +0.45f, -0.45f, +0.58f, +0.58f, -0.58f, +0.45f, +0.45f, -0.50f, +0.58f, +0.58f, -0.58f, +0.45f, -0.45f, -0.50f, +0.58f, -0.58f, -0.58f, +0.50f, -0.45f, -0.45f, +0.58f, -0.58f, -0.58f, +0.45f, -0.50f, -0.45f, +0.58f, -0.58f, -0.58f, -0.45f, -0.45f, -0.50f, -0.58f, -0.58f, -0.58f, -0.45f, -0.50f, -0.45f, -0.58f, -0.58f, -0.58f, -0.50f, -0.45f, -0.45f, -0.58f, -0.58f, -0.58f, -0.50f, +0.45f, -0.45f, -0.58f, +0.58f, -0.58f, -0.45f, +0.50f, -0.45f, -0.58f, +0.58f, -0.58f, -0.45f, +0.45f, -0.50f, -0.58f, +0.58f, -0.58f, +0.50f, +0.45f, +0.45f, +0.58f, +0.58f, +0.58f, +0.45f, +0.50f, +0.45f, +0.58f, +0.58f, +0.58f, +0.45f, +0.45f, +0.50f, +0.58f, +0.58f, +0.58f, +0.45f, -0.45f, +0.50f, +0.58f, -0.58f, +0.58f, +0.45f, -0.50f, +0.45f, +0.58f, -0.58f, +0.58f, +0.50f, -0.45f, +0.45f, +0.58f, -0.58f, +0.58f, -0.45f, -0.45f, +0.50f, -0.58f, -0.58f, +0.58f, -0.50f, -0.45f, +0.45f, -0.58f, -0.58f, +0.58f, -0.45f, -0.50f, +0.45f, -0.58f, -0.58f, +0.58f, -0.45f, +0.50f, +0.45f, -0.58f, +0.58f, +0.58f, -0.50f, +0.45f, +0.45f, -0.58f, +0.58f, +0.58f, -0.45f, +0.45f, +0.50f, -0.58f, +0.58f, +0.58f, }; glGenBuffers(1, &vbo); glBindBuffer(GL_ARRAY_BUFFER, vbo); glBufferData(GL_ARRAY_BUFFER, sizeof(float) * vdata.size(), vdata.data(), GL_STATIC_DRAW); bevelBoxVertexCount = vdata.size() / 6; } else { glBindBuffer(GL_ARRAY_BUFFER, vbo); } GLsizei stride = sizeof(float) * 6; float *ptr = nullptr; glEnableVertexAttribArray(positionAttribId); glVertexAttribPointer(positionAttribId, 3, GL_FLOAT, GL_FALSE, stride, ptr); glEnableVertexAttribArray(normalAttribId); glVertexAttribPointer(normalAttribId, 3, GL_FLOAT, GL_FALSE, stride, ptr + 3); currentObjectVertexCount = bevelBoxVertexCount; } static void UseBackground(GLuint const positionAttribId, GLuint const normalAttribId, int const innerWidth = 10, int const innerHeight = 20) { static GLuint vbo = 0; if (!vbo) { std::vector<float> const plane = { -0.5f, +0.5f, +0.0f, +0.0f, +0.0f, +1.0f, -0.5f, -0.5f, +0.0f, +0.0f, +0.0f, +1.0f, +0.5f, +0.5f, +0.0f, +0.0f, +0.0f, +1.0f, +0.5f, +0.5f, +0.0f, +0.0f, +0.0f, +1.0f, -0.5f, -0.5f, +0.0f, +0.0f, +0.0f, +1.0f, +0.5f, -0.5f, +0.0f, +0.0f, +0.0f, +1.0f, }; glm::mat4 m; std::vector<glm::mat4> transforms; m = glm::translate(glm::mat4(), glm::vec3(0, 1.0f, -0.5f)); m = glm::scale(m, glm::vec3(innerWidth, innerHeight + 2.0f, 1.0f)); transforms.push_back(m); m = glm::translate(glm::mat4(), glm::vec3(-(innerWidth / 2.0f), 1.0f, 0.0f)); m = glm::scale(m, glm::vec3(1.0f, innerHeight + 2.0f, 1.0f)); m = glm::rotate(m, glm::pi<float>() / 2.0f, glm::vec3(0, 1, 0)); transforms.push_back(m); m = glm::translate(glm::mat4(), glm::vec3(+(innerWidth / 2.0f), 1.0f, 0.0f)); m = glm::scale(m, glm::vec3(1.0f, innerHeight + 2.0f, 1.0f)); m = glm::rotate(m, glm::pi<float>() / 2.0f, glm::vec3(0, -1, 0)); transforms.push_back(m); m = glm::translate(glm::mat4(), glm::vec3(0.0f, -(innerHeight / 2.0f), 0.0f)); m = glm::scale(m, glm::vec3(innerWidth, 1.0f, 1.0f)); m = glm::rotate(m, glm::pi<float>() / 2.0f, glm::vec3(-1, 0, 0)); transforms.push_back(m); m = glm::translate(glm::mat4(), glm::vec3(-(innerWidth / 2.0f + 5.0f), 1.0f, 0.5f)); m = glm::scale(m, glm::vec3(10.0f, innerHeight + 2.0f, 1.0f)); transforms.push_back(m); m = glm::translate(glm::mat4(), glm::vec3(+(innerWidth / 2.0f + 5.0f), 1.0f, 0.5f)); m = glm::scale(m, glm::vec3(10.0f, innerHeight + 2.0f, 1.0f)); transforms.push_back(m); m = glm::translate(glm::mat4(), glm::vec3(0.0f, -(innerHeight / 2.0f + 2.0f), 0.5f)); m = glm::scale(m, glm::vec3(innerWidth + 20.0f, 4.0f, 1.0f)); transforms.push_back(m); std::vector<float> vdata; for (auto m: transforms) { for (int i = 0; i < plane.size() / 6; ++i) { glm::vec3 p = glm::make_vec3(&(plane.data()[i * 6 + 0])); glm::vec3 n = glm::make_vec3(&(plane.data()[i * 6 + 3])); p = glm::vec3(m * glm::vec4(p, 1.0f)); n = glm::mat3(m) * n; vdata.push_back(p.x); vdata.push_back(p.y); vdata.push_back(p.z); vdata.push_back(n.x); vdata.push_back(n.y); vdata.push_back(n.z); } } glGenBuffers(1, &vbo); glBindBuffer(GL_ARRAY_BUFFER, vbo); glBufferData(GL_ARRAY_BUFFER, sizeof(float) * vdata.size(), vdata.data(), GL_STATIC_DRAW); backgroundVertexCount = vdata.size() / 6; } else { glBindBuffer(GL_ARRAY_BUFFER, vbo); } GLsizei stride = sizeof(float) * 6; float *ptr = nullptr; glEnableVertexAttribArray(positionAttribId); glVertexAttribPointer(positionAttribId, 3, GL_FLOAT, GL_FALSE, stride, ptr); glEnableVertexAttribArray(normalAttribId); glVertexAttribPointer(normalAttribId, 3, GL_FLOAT, GL_FALSE, stride, ptr + 3); currentObjectVertexCount = backgroundVertexCount; } static void Draw() { glDrawArrays(GL_TRIANGLES, 0, (GLsizei)currentObjectVertexCount); } static std::vector<BoxChar> GetBoxChars(std::string const str) { if (charMap.size() == 0) { int const height = 5; std::string chars = "ABCDEFGHIJKLMNOPQRSTUVWXYZ!? @_0123456789^v<>:."; std::vector<std::string> cd = { " # !## !###!## !###!###!###!# #!#! #!# #!# !# #!# #!", "# # # # # # # # # # # # # # # # # # ## ## ## # ", "### ## # # # ### ### # # ### # # ## # # # # # ## ", "# # # # # # # # # # # # # # # # # # # # # # # # ", "# # ## ### ## ### # ### # # # ### # # ### # # # # ", "###!###!###!###!###!###!# #!# #!# # #!# #!# #!###!#!###! ! ###! !", "# # # # # # # # # # # # # # # # # # # # # # # # # ## ", "# # ### # # ## ### # # # # # # # # # # # # # # ## ", "# # # ## # # # # # # # # # # # # # # # # ", "### # ## # # ### # ### # # # # # # ### # # #### ### ", "###!## !###!###!# #!###!###!###!###!###! # ! # ! # ! # ! ! !", "# # # # # # # # # # # # # # ### # # # # ", "# # # ### ### ### ### ### # ### ### # # # # # # ##### ##### ", "# # # # # # # # # # # # # # ### # # # ", "### ### ### ### # ### ### # ### ### # # # # # ", }; auto curId = 0; auto curLine = 0; auto curCol = 0; while (curId < chars.length() && curLine * height < cd.size()) { auto curRow = curLine * height; auto charEnd = curCol + 1; while (cd[curRow][charEnd] != '!') ++charEnd; BoxChar bc; bc.character = chars[curId]; bc.width = charEnd - curCol; glm::vec2 center(float(bc.width) / 2.0f, float(height) / 2.0f); for (int y = 0; y < height; ++y) { for (int x = 0; x < bc.width; ++x) { if (cd[curRow + y][curCol + x] == '#') { auto p = glm::vec2(x + 0.5f, y + 0.5f) - center; bc.dotData.push_back(glm::vec2(p.x, -p.y)); } } } charMap[bc.character] = bc; ++curId; curCol = charEnd + 1; if (curCol >= cd[curRow].length()) { curCol = 0; ++curLine; } } } std::vector<BoxChar> boxChars; for (auto c: str) { boxChars.push_back(charMap[c]); } return std::move(boxChars); } enum CenterType { LEFT_CENTER, RIGHT_CENTER, CENTER }; static std::vector<glm::mat4> GetStringMatrices(std::string const str, CenterType const centerType = CenterType::CENTER) { if (str.length() == 0) return std::move(std::vector<glm::mat4>()); auto bcs = GetBoxChars(str); int totalWidth = 0; for (auto &bc: bcs) totalWidth += bc.width + 1; totalWidth -= 1; float offsetX = 0.0f; switch (centerType) { case CenterType::LEFT_CENTER: break; case CenterType::RIGHT_CENTER: offsetX = -float(totalWidth); break; case CenterType::CENTER: offsetX = -float(totalWidth) * 0.5f; break; } std::vector<glm::mat4> matrices; int charX = 0; for (auto &bc: bcs) { auto width = bc.width; glm::vec2 offset(float(charX) + (float(width) / 2.0f) + offsetX, 0.0f); for (auto &p: bc.dotData) { auto strLocalPos = p + offset; glm::mat4 m = glm::translate(glm::mat4(), glm::vec3(strLocalPos, 0.0f)); matrices.push_back(m); } charX += width + 1; } return std::move(matrices); } }; size_t ObjectData::boxVertexCount = 0; size_t ObjectData::bevelBoxVertexCount = 0; size_t ObjectData::backgroundVertexCount = 0; size_t ObjectData::currentObjectVertexCount = 0; ObjectData::CharMap ObjectData::charMap; #endif
mit
benbrandt22/genevaGen
svgBindViewbox.js
631
(function () { 'use strict'; var app = angular.module('app'); app.directive('svgBindViewbox', function () { return { link: function (scope, element, attrs) { /* inserts the evaluated value of the "svg-bind-viewbox" attribute into the "viewBox" attribute, making sure to capitalize the "B", as this SVG attribute name is case-sensitive. */ attrs.$observe('svgBindViewbox', function (value) { element.attr('viewBox', value); }) } }; }); })();
mit
ja1cap/shop
src/Weasty/Bundle/CatalogBundle/Feature/FeaturesResourceInterface.php
1194
<?php namespace Weasty\Bundle\CatalogBundle\Feature; /** * Interface FeaturesResourceInterface * @package Weasty\Bundle\CatalogBundle\Feature */ interface FeaturesResourceInterface extends \JsonSerializable, \ArrayAccess { /** * @return FeatureInterface[]; */ public function getFeatures(); /** * @param int $id * @return FeatureInterface|null */ public function getFeature($id); /** * @param FeatureInterface $feature * @return $this */ public function addFeature(FeatureInterface $feature); /** * @param int $id * @return $this */ public function removeFeature($id); /** * @return FeatureGroupInterface[] */ public function getGroups(); /** * @param int $id * @return FeatureGroupInterface|null */ public function getGroup($id); /** * @param FeatureGroupInterface $featureGroup * @return $this */ public function addGroup(FeatureGroupInterface $featureGroup); /** * @param int $id * @return $this */ public function removeGroup($id); /** * @return array */ public function toArray(); }
mit
callstack-io/haul
packages/haul-core/src/preset/utils/applyMultiBundleTweaks.ts
3054
import path from 'path'; import webpack from 'webpack'; import { EnvOptions, NormalizedBundleConfig, NormalizedTemplatesConfig, } from '../../config/types'; import compileTemplate from './compileTemplate'; export function getBundleFilename( env: EnvOptions, templatesConfig: NormalizedTemplatesConfig, bundleConfig: NormalizedBundleConfig ) { return compileTemplate( templatesConfig.filename[ env.bundleTarget === 'server' ? '__server__' : bundleConfig.platform ], { bundleName: bundleConfig.name, platform: bundleConfig.platform, type: bundleConfig.dll ? 'dll' : bundleConfig.app ? 'app' : 'default', mode: bundleConfig.dev ? 'dev' : 'prod', } ); } export default function applyMultiBundleTweaks( env: EnvOptions, templatesConfig: NormalizedTemplatesConfig, bundleConfig: NormalizedBundleConfig, webpackConfig: webpack.Configuration, normalizedBundleConfigs: { [bundleName: string]: NormalizedBundleConfig } ) { let bundleFilename = getBundleFilename(env, templatesConfig, bundleConfig); let bundleOutputDirectory = webpackConfig.output!.path!; if (env.bundleOutput) { // `bundleOutput` should be a directory, but for backward-compatibility, // we also handle the case with a filename. bundleOutputDirectory = path.extname(env.bundleOutput) === '' ? env.bundleOutput : path.dirname(env.bundleOutput); bundleOutputDirectory = path.isAbsolute(bundleOutputDirectory) ? bundleOutputDirectory : path.join(bundleConfig.root, bundleOutputDirectory); const targetBundleOutput = path.join(bundleOutputDirectory, bundleFilename); webpackConfig.output!.filename = path.relative( webpackConfig.output!.path!, targetBundleOutput ); } else { webpackConfig.output!.filename = bundleFilename; } if (bundleConfig.dll) { webpackConfig.output!.library = bundleConfig.name; webpackConfig.output!.libraryTarget = 'this'; webpackConfig.plugins!.push( new webpack.DllPlugin({ name: bundleConfig.name, path: path.join( bundleOutputDirectory, `${bundleConfig.name}.manifest.json` ), }) ); } else if (bundleConfig.app) { webpackConfig.output!.library = bundleConfig.name; webpackConfig.output!.libraryTarget = 'this'; } bundleConfig.dependsOn.forEach((dllBundleName: string) => { const dllNormalizedBundleConfig = normalizedBundleConfigs[dllBundleName]; if (!dllNormalizedBundleConfig) { throw new Error( `Cannot find bundle config for DLL '${dllBundleName}' - make sure it's listed in config before any other bundle depends on it.` ); } webpackConfig.plugins!.push( new webpack.DllReferencePlugin({ context: bundleConfig.root, manifest: dllNormalizedBundleConfig.external ? dllNormalizedBundleConfig.external.manifestPath! : path.join(bundleOutputDirectory, `${dllBundleName}.manifest.json`), sourceType: 'this', }) ); }); }
mit
PublicHealthEngland/fingertips-open
FingertipsProfileManager/DIResolver/Properties/AssemblyInfo.cs
1396
using System.Reflection; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; // General Information about an assembly is controlled through the following // set of attributes. Change these attribute values to modify the information // associated with an assembly. [assembly: AssemblyTitle("DIResolver")] [assembly: AssemblyDescription("")] [assembly: AssemblyConfiguration("")] [assembly: AssemblyCompany("")] [assembly: AssemblyProduct("DIResolver")] [assembly: AssemblyCopyright("Copyright © 2015")] [assembly: AssemblyTrademark("")] [assembly: AssemblyCulture("")] // Setting ComVisible to false makes the types in this assembly not visible // to COM components. If you need to access a type in this assembly from // COM, set the ComVisible attribute to true on that type. [assembly: ComVisible(false)] // The following GUID is for the ID of the typelib if this project is exposed to COM [assembly: Guid("4ed41376-05cf-469e-9a5d-31f847d11c2f")] // Version information for an assembly consists of the following four values: // // Major Version // Minor Version // Build Number // Revision // // You can specify all the values or you can default the Build and Revision Numbers // by using the '*' as shown below: // [assembly: AssemblyVersion("1.0.*")] [assembly: AssemblyVersion("1.0.0.0")] [assembly: AssemblyFileVersion("1.0.0.0")]
mit
janvt/Ghost
core/server/data/migrations/versions/4.9/05-fix-missed-mobiledoc-url-transforms.js
3081
const logging = require('@tryghost/logging'); const urlUtils = require('../../../../../shared/url-utils'); const htmlToPlaintext = require('../../../../../shared/html-to-plaintext'); const mobiledocLib = require('../../../../lib/mobiledoc'); const {createTransactionalMigration} = require('../../utils'); // in Ghost versions 4.6.1-4.8.4 the 4.0 migration that transfored URLs had a bug // that meant urls inside cards in mobiledoc content was not being transformed // // if the migrations table indicates an upgrade was made from 3.x to 4.6-4.8 then // we'll re-run the transforms against post.mobiledoc and re-generate the html // and plaintext contents module.exports = createTransactionalMigration( async function up(knex) { const badVersionUsedFor40Migration = await knex('migrations') .where({ name: '18-transform-urls-absolute-to-transform-ready.js' }) .whereIn('currentVersion', ['4.6', '4.7', '4.8']) .first(); if (!badVersionUsedFor40Migration) { logging.info('Skipping transform of mobiledoc URLs - original transform was good'); return; } logging.info('Transforming all internal URLs in posts.{mobiledoc,html,plaintext} to transform-ready'); await knex.transaction(async (trx) => { const postIdRows = await knex('posts') .transacting(trx) .forUpdate() .select('id'); for (const postIdRow of postIdRows) { const {id} = postIdRow; const [post] = await knex('posts') .transacting(trx) .where({id}) .select([ 'mobiledoc' ]); let mobiledoc; let html; try { mobiledoc = urlUtils.mobiledocToTransformReady(post.mobiledoc, {cardTransformers: mobiledocLib.cards}); if (!mobiledoc) { logging.warn(`No mobiledoc for ${id}. Skipping.`); continue; } } catch (err) { logging.warn(`Invalid mobiledoc JSON structure for ${id}. Skipping`); continue; } try { html = mobiledocLib.mobiledocHtmlRenderer.render(JSON.parse(mobiledoc)); } catch (err) { logging.warn(`Invalid mobiledoc content structure for ${id}, unable to render. Skipping`); continue; } const plaintext = htmlToPlaintext(html); await knex('posts') .transacting(trx) .where({id}) .update({ mobiledoc, html, plaintext }); } return 'transaction complete'; }); }, async function down() { // noop } );
mit
callbackrun/callback-ruby
lib/callback.rb
296
require "callback/api" require "callback/client" require "callback/configuration" require "callback/parser" require "callback/version" module Callback def self.configuration @configuration ||= Configuration.new end def self.configure yield configuration if block_given? end end
mit
awoland/Synergy
src/qt/locale/bitcoin_pl.ts
119760
<?xml version="1.0" ?><!DOCTYPE TS><TS language="pl" version="2.1"> <context> <name>AboutDialog</name> <message> <location filename="../forms/aboutdialog.ui" line="+14"/> <source>About synergy</source> <translation>O synergy</translation> </message> <message> <location line="+39"/> <source>&lt;b&gt;synergy&lt;/b&gt; version</source> <translation>&lt;b&gt;synergy&lt;/b&gt; wersja</translation> </message> <message> <location line="+41"/> <source>Copyright © 2009-2014 The Bitcoin developers Copyright © 2012-2014 The NovaCoin developers Copyright © 2014 The synergy developers</source> <translation type="unfinished"/> </message> <message> <location line="+15"/> <source> This is experimental software. Distributed under the MIT/X11 software license, see the accompanying file COPYING or http://www.opensource.org/licenses/mit-license.php. This product includes software developed by the OpenSSL Project for use in the OpenSSL Toolkit (http://www.openssl.org/) and cryptographic software written by Eric Young ([email protected]) and UPnP software written by Thomas Bernard.</source> <translation> Oprogramowanie eksperymentalne. Distributed under the MIT/X11 software license, see the accompanying file COPYING or http://www.opensource.org/licenses/mit-license.php. This product includes software developed by the OpenSSL Project for use in the OpenSSL Toolkit (http://www.openssl.org/) and cryptographic software written by Eric Young ([email protected]) and UPnP software written by Thomas Bernard.</translation> </message> </context> <context> <name>AddressBookPage</name> <message> <location filename="../forms/addressbookpage.ui" line="+14"/> <source>Address Book</source> <translation>Książka Adresowa</translation> </message> <message> <location line="+22"/> <source>Double-click to edit address or label</source> <translation>Kliknij dwukrotnie, aby edytować adres lub etykietę</translation> </message> <message> <location line="+27"/> <source>Create a new address</source> <translation>Utwórz nowy adres</translation> </message> <message> <location line="+14"/> <source>Copy the currently selected address to the system clipboard</source> <translation>Skopiuj aktualnie wybrany adres do schowka</translation> </message> <message> <location line="-11"/> <source>&amp;New Address</source> <translation>Nowy Adres</translation> </message> <message> <location line="-46"/> <source>These are your synergy addresses for receiving payments. You may want to give a different one to each sender so you can keep track of who is paying you.</source> <translation>Tutaj znajdują się twoje adresy do odbierania wpłat. Możesz dodać kolejny adres dla każdego wysyłającego aby określić od kogo pochodzi wpłata.</translation> </message> <message> <location line="+60"/> <source>&amp;Copy Address</source> <translation>&amp;Kopiuj adres</translation> </message> <message> <location line="+11"/> <source>Show &amp;QR Code</source> <translation>Pokaż &amp;Kod QR</translation> </message> <message> <location line="+11"/> <source>Sign a message to prove you own a synergy address</source> <translation>Podpisz wiadomość by udowodnić, że jesteś właścicielem adresu synergy</translation> </message> <message> <location line="+3"/> <source>Sign &amp;Message</source> <translation>Podpisz &amp;Wiadomość</translation> </message> <message> <location line="+25"/> <source>Delete the currently selected address from the list</source> <translation>Usuń zaznaczony adres z listy</translation> </message> <message> <location line="-14"/> <source>Verify a message to ensure it was signed with a specified synergy address</source> <translation>Zweryfikuj wiadomość, w celu zapewnienia, że została podpisana z określonego adresu synergy</translation> </message> <message> <location line="+3"/> <source>&amp;Verify Message</source> <translation>&amp;Zweryfikuj wiadomość</translation> </message> <message> <location line="+14"/> <source>&amp;Delete</source> <translation>&amp;Usuń</translation> </message> <message> <location filename="../addressbookpage.cpp" line="+65"/> <source>Copy &amp;Label</source> <translation>Kopiuj &amp;Etykietę</translation> </message> <message> <location line="+2"/> <source>&amp;Edit</source> <translation>&amp;Edytuj</translation> </message> <message> <location line="+250"/> <source>Export Address Book Data</source> <translation>Exportuj Książkę Adresową</translation> </message> <message> <location line="+1"/> <source>Comma separated file (*.csv)</source> <translation>Plik *.CSV (rozdzielany przecinkami)</translation> </message> <message> <location line="+13"/> <source>Error exporting</source> <translation>Błąd exportowania</translation> </message> <message> <location line="+0"/> <source>Could not write to file %1.</source> <translation>Nie mogę zapisać do pliku %1</translation> </message> </context> <context> <name>AddressTableModel</name> <message> <location filename="../addresstablemodel.cpp" line="+144"/> <source>Label</source> <translation>Etykieta</translation> </message> <message> <location line="+0"/> <source>Address</source> <translation>Adres</translation> </message> <message> <location line="+36"/> <source>(no label)</source> <translation>(bez etykiety)</translation> </message> </context> <context> <name>AskPassphraseDialog</name> <message> <location filename="../forms/askpassphrasedialog.ui" line="+26"/> <source>Passphrase Dialog</source> <translation>Okienko Hasła</translation> </message> <message> <location line="+21"/> <source>Enter passphrase</source> <translation>Wpisz hasło</translation> </message> <message> <location line="+14"/> <source>New passphrase</source> <translation>Nowe hasło</translation> </message> <message> <location line="+14"/> <source>Repeat new passphrase</source> <translation>Powtórz nowe hasło</translation> </message> <message> <location line="+33"/> <source>Serves to disable the trivial sendmoney when OS account compromised. Provides no real security.</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>For staking only</source> <translation type="unfinished"/> </message> <message> <location filename="../askpassphrasedialog.cpp" line="+35"/> <source>Enter the new passphrase to the wallet.&lt;br/&gt;Please use a passphrase of &lt;b&gt;10 or more random characters&lt;/b&gt;, or &lt;b&gt;eight or more words&lt;/b&gt;.</source> <translation>Wprowadź nowe hasło dla portfela.&lt;br/&gt;Proszę użyć hasła składającego się z &lt;b&gt;10 lub więcej losowych znaków&lt;/b&gt; lub &lt;b&gt;ośmiu lub więcej słów&lt;/b&gt;.</translation> </message> <message> <location line="+1"/> <source>Encrypt wallet</source> <translation>Zaszyfruj portfel</translation> </message> <message> <location line="+7"/> <source>This operation needs your wallet passphrase to unlock the wallet.</source> <translation>Ta operacja wymaga hasła do portfela ażeby odblokować portfel.</translation> </message> <message> <location line="+5"/> <source>Unlock wallet</source> <translation>Odblokuj portfel</translation> </message> <message> <location line="+3"/> <source>This operation needs your wallet passphrase to decrypt the wallet.</source> <translation>Ta operacja wymaga hasła do portfela ażeby odszyfrować portfel.</translation> </message> <message> <location line="+5"/> <source>Decrypt wallet</source> <translation>Odszyfruj portfel</translation> </message> <message> <location line="+3"/> <source>Change passphrase</source> <translation>Zmień hasło</translation> </message> <message> <location line="+1"/> <source>Enter the old and new passphrase to the wallet.</source> <translation>Podaj stare i nowe hasło do portfela.</translation> </message> <message> <location line="+46"/> <source>Confirm wallet encryption</source> <translation>Potwierdź szyfrowanie portfela</translation> </message> <message> <location line="+1"/> <source>Warning: If you encrypt your wallet and lose your passphrase, you will &lt;b&gt;LOSE ALL OF YOUR COINS&lt;/b&gt;!</source> <translation>Uwaga: Jeśli zaszyfrujesz swój portfel i zgubisz hasło, wtedy&lt;b&gt;UTRACISZ SWOJE MONETY!&lt;/b&gt;!</translation> </message> <message> <location line="+0"/> <source>Are you sure you wish to encrypt your wallet?</source> <translation>Jesteś pewien, że chcesz zaszyfrować swój portfel?</translation> </message> <message> <location line="+15"/> <source>IMPORTANT: Any previous backups you have made of your wallet file should be replaced with the newly generated, encrypted wallet file. For security reasons, previous backups of the unencrypted wallet file will become useless as soon as you start using the new, encrypted wallet.</source> <translation>WAŻNE: Wszystkie wykonane wcześniej kopie pliku portfela powinny być zamienione na nowe, szyfrowane pliki. Z powodów bezpieczeństwa, poprzednie kopie nieszyfrowanych plików portfela staną się bezużyteczne jak tylko zaczniesz korzystać z nowego, szyfrowanego portfela.</translation> </message> <message> <location line="+103"/> <location line="+24"/> <source>Warning: The Caps Lock key is on!</source> <translation>Uwaga: Klawisz Caps Lock jest włączony</translation> </message> <message> <location line="-133"/> <location line="+60"/> <source>Wallet encrypted</source> <translation>Portfel zaszyfrowany</translation> </message> <message> <location line="-58"/> <source>synergy will close now to finish the encryption process. Remember that encrypting your wallet cannot fully protect your coins from being stolen by malware infecting your computer.</source> <translation type="unfinished"/> </message> <message> <location line="+13"/> <location line="+7"/> <location line="+44"/> <location line="+6"/> <source>Wallet encryption failed</source> <translation>Szyfrowanie portfela nie powiodło się</translation> </message> <message> <location line="-56"/> <source>Wallet encryption failed due to an internal error. Your wallet was not encrypted.</source> <translation>Szyfrowanie portfela nie powiodło się z powodu wewnętrznego błędu. Twój portfel nie został zaszyfrowany.</translation> </message> <message> <location line="+7"/> <location line="+50"/> <source>The supplied passphrases do not match.</source> <translation>Podane hasła nie są takie same.</translation> </message> <message> <location line="-38"/> <source>Wallet unlock failed</source> <translation>Odblokowanie portfela nie powiodło się</translation> </message> <message> <location line="+1"/> <location line="+12"/> <location line="+19"/> <source>The passphrase entered for the wallet decryption was incorrect.</source> <translation>Wprowadzone hasło do odszyfrowania portfela jest niepoprawne.</translation> </message> <message> <location line="-20"/> <source>Wallet decryption failed</source> <translation>Odszyfrowanie portfela nie powiodło się</translation> </message> <message> <location line="+14"/> <source>Wallet passphrase was successfully changed.</source> <translation>Hasło portfela zostało pomyślnie zmienione.</translation> </message> </context> <context> <name>BitcoinGUI</name> <message> <location filename="../bitcoingui.cpp" line="+282"/> <source>Sign &amp;message...</source> <translation>Podpisz wiado&amp;mość...</translation> </message> <message> <location line="+251"/> <source>Synchronizing with network...</source> <translation>Synchronizacja z siecią...</translation> </message> <message> <location line="-319"/> <source>&amp;Overview</source> <translation>P&amp;odsumowanie</translation> </message> <message> <location line="+1"/> <source>Show general overview of wallet</source> <translation>Pokazuje ogólny zarys portfela</translation> </message> <message> <location line="+17"/> <source>&amp;Transactions</source> <translation>&amp;Transakcje</translation> </message> <message> <location line="+1"/> <source>Browse transaction history</source> <translation>Przeglądaj historię transakcji</translation> </message> <message> <location line="+5"/> <source>&amp;Address Book</source> <translation>&amp;Książka Adresowa</translation> </message> <message> <location line="+1"/> <source>Edit the list of stored addresses and labels</source> <translation>Edytuj listę przechowywanych adresów i etykiet</translation> </message> <message> <location line="-13"/> <source>&amp;Receive SNRG</source> <translation>&amp;Odbierz monety</translation> </message> <message> <location line="+1"/> <source>Show the list of addresses for receiving payments</source> <translation>Pokaż listę adresów do odbierania wpłat</translation> </message> <message> <location line="-7"/> <source>&amp;Send SNRG</source> <translation>&amp;Wyślij monety</translation> </message> <message> <location line="+35"/> <source>E&amp;xit</source> <translation>&amp;Zakończ</translation> </message> <message> <location line="+1"/> <source>Quit application</source> <translation>Zamknij program</translation> </message> <message> <location line="+6"/> <source>Show information about synergy</source> <translation>Pokaż informacje dotyczące synergy</translation> </message> <message> <location line="+2"/> <source>About &amp;Qt</source> <translation>O &amp;Qt</translation> </message> <message> <location line="+1"/> <source>Show information about Qt</source> <translation>Pokazuje informacje o Qt</translation> </message> <message> <location line="+2"/> <source>&amp;Options...</source> <translation>&amp;Opcje...</translation> </message> <message> <location line="+4"/> <source>&amp;Encrypt Wallet...</source> <translation>Zaszyfruj Portf&amp;el</translation> </message> <message> <location line="+3"/> <source>&amp;Backup Wallet...</source> <translation>Wykonaj kopię zapasową...</translation> </message> <message> <location line="+2"/> <source>&amp;Change Passphrase...</source> <translation>&amp;Zmień hasło...</translation> </message> <message numerus="yes"> <location line="+259"/> <source>~%n block(s) remaining</source> <translation type="unfinished"><numerusform></numerusform><numerusform></numerusform><numerusform></numerusform></translation> </message> <message> <location line="+6"/> <source>Downloaded %1 of %2 blocks of transaction history (%3% done).</source> <translation>Pobrano %1 z %2 bloków historii transakcji (%3% gotowe).</translation> </message> <message> <location line="-256"/> <source>&amp;Export...</source> <translation>&amp;Exportuj</translation> </message> <message> <location line="-64"/> <source>Send SNRG to a synergy address</source> <translation>Wyślij monety na adres synergy</translation> </message> <message> <location line="+47"/> <source>Modify configuration options for synergy</source> <translation type="unfinished"/> </message> <message> <location line="+18"/> <source>Export the data in the current tab to a file</source> <translation type="unfinished"/> </message> <message> <location line="-14"/> <source>Encrypt or decrypt wallet</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Backup wallet to another location</source> <translation>Zapasowy portfel w innej lokalizacji</translation> </message> <message> <location line="+2"/> <source>Change the passphrase used for wallet encryption</source> <translation>Zmień hasło użyte do szyfrowania portfela</translation> </message> <message> <location line="+10"/> <source>&amp;Debug window</source> <translation>&amp;Okno debugowania</translation> </message> <message> <location line="+1"/> <source>Open debugging and diagnostic console</source> <translation>Otwórz konsolę debugowania i diagnostyki</translation> </message> <message> <location line="-5"/> <source>&amp;Verify message...</source> <translation>&amp;Zweryfikuj wiadomość...</translation> </message> <message> <location line="-202"/> <source>synergy</source> <translation>synergy</translation> </message> <message> <location line="+0"/> <source>Wallet</source> <translation>Portfel</translation> </message> <message> <location line="+180"/> <source>&amp;About synergy</source> <translation type="unfinished"/> </message> <message> <location line="+9"/> <source>&amp;Show / Hide</source> <translation>&amp;Pokaż / Ukryj</translation> </message> <message> <location line="+9"/> <source>Unlock wallet</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>&amp;Lock Wallet</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Lock wallet</source> <translation type="unfinished"/> </message> <message> <location line="+35"/> <source>&amp;File</source> <translation>&amp;Plik</translation> </message> <message> <location line="+8"/> <source>&amp;Settings</source> <translation>P&amp;referencje</translation> </message> <message> <location line="+8"/> <source>&amp;Help</source> <translation>Pomo&amp;c</translation> </message> <message> <location line="+12"/> <source>Tabs toolbar</source> <translation>Pasek zakładek</translation> </message> <message> <location line="+8"/> <source>Actions toolbar</source> <translation type="unfinished"/> </message> <message> <location line="+13"/> <location line="+9"/> <source>[testnet]</source> <translation>[testnet]</translation> </message> <message> <location line="+0"/> <location line="+60"/> <source>synergy client</source> <translation type="unfinished"/> </message> <message numerus="yes"> <location line="+75"/> <source>%n active connection(s) to synergy network</source> <translation type="unfinished"><numerusform></numerusform><numerusform></numerusform><numerusform></numerusform></translation> </message> <message> <location line="+40"/> <source>Downloaded %1 blocks of transaction history.</source> <translation type="unfinished"/> </message> <message> <location line="+413"/> <source>Staking.&lt;br&gt;Your weight is %1&lt;br&gt;Network weight is %2&lt;br&gt;Expected time to earn reward is %3</source> <translation type="unfinished"/> </message> <message> <location line="+6"/> <source>Not staking because wallet is locked</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>Not staking because wallet is offline</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>Not staking because wallet is syncing</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>Not staking because you don&apos;t have mature coins</source> <translation type="unfinished"/> </message> <message numerus="yes"> <location line="-403"/> <source>%n second(s) ago</source> <translation type="unfinished"><numerusform></numerusform><numerusform></numerusform><numerusform></numerusform></translation> </message> <message> <location line="-312"/> <source>About synergy card</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Show information about synergy card</source> <translation type="unfinished"/> </message> <message> <location line="+18"/> <source>&amp;Unlock Wallet...</source> <translation type="unfinished"/> </message> <message numerus="yes"> <location line="+297"/> <source>%n minute(s) ago</source> <translation type="unfinished"><numerusform></numerusform><numerusform></numerusform><numerusform></numerusform></translation> </message> <message numerus="yes"> <location line="+4"/> <source>%n hour(s) ago</source> <translation type="unfinished"><numerusform></numerusform><numerusform></numerusform><numerusform></numerusform></translation> </message> <message numerus="yes"> <location line="+4"/> <source>%n day(s) ago</source> <translation type="unfinished"><numerusform></numerusform><numerusform></numerusform><numerusform></numerusform></translation> </message> <message> <location line="+6"/> <source>Up to date</source> <translation>Aktualny</translation> </message> <message> <location line="+7"/> <source>Catching up...</source> <translation>Łapanie bloków...</translation> </message> <message> <location line="+10"/> <source>Last received block was generated %1.</source> <translation type="unfinished"/> </message> <message> <location line="+59"/> <source>This transaction requires a fee based on the services it uses. You may send it for a fee of %1 , which rewards all users of the Synergy network as a result of your usage. Do you want to pay this fee?</source> <translation type="unfinished"/> </message> <message> <location line="+5"/> <source>Confirm transaction fee</source> <translation type="unfinished"/> </message> <message> <location line="+27"/> <source>Sent transaction</source> <translation>Transakcja wysłana</translation> </message> <message> <location line="+1"/> <source>Incoming transaction</source> <translation>Transakcja przychodząca</translation> </message> <message> <location line="+1"/> <source>Date: %1 Amount: %2 Type: %3 Address: %4 </source> <translation>Data: %1 Kwota: %2 Typ: %3 Adres: %4 </translation> </message> <message> <location line="+100"/> <location line="+15"/> <source>URI handling</source> <translation type="unfinished"/> </message> <message> <location line="-15"/> <location line="+15"/> <source>URI can not be parsed! This can be caused by an invalid synergy address or malformed URI parameters.</source> <translation type="unfinished"/> </message> <message> <location line="+18"/> <source>Wallet is &lt;b&gt;encrypted&lt;/b&gt; and currently &lt;b&gt;unlocked&lt;/b&gt;</source> <translation>Portfel jest &lt;b&gt;zaszyfrowany&lt;/b&gt; i obecnie &lt;b&gt;niezablokowany&lt;/b&gt;</translation> </message> <message> <location line="+10"/> <source>Wallet is &lt;b&gt;encrypted&lt;/b&gt; and currently &lt;b&gt;locked&lt;/b&gt;</source> <translation>Portfel jest &lt;b&gt;zaszyfrowany&lt;/b&gt; i obecnie &lt;b&gt;zablokowany&lt;/b&gt;</translation> </message> <message> <location line="+25"/> <source>Backup Wallet</source> <translation type="unfinished"/> </message> <message> <location line="+0"/> <source>Wallet Data (*.dat)</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Backup Failed</source> <translation type="unfinished"/> </message> <message> <location line="+0"/> <source>There was an error trying to save the wallet data to the new location.</source> <translation type="unfinished"/> </message> <message numerus="yes"> <location line="+76"/> <source>%n second(s)</source> <translation type="unfinished"><numerusform></numerusform><numerusform></numerusform><numerusform></numerusform></translation> </message> <message numerus="yes"> <location line="+4"/> <source>%n minute(s)</source> <translation type="unfinished"><numerusform></numerusform><numerusform></numerusform><numerusform></numerusform></translation> </message> <message numerus="yes"> <location line="+4"/> <source>%n hour(s)</source> <translation><numerusform>%n godzina</numerusform><numerusform>%n godzin</numerusform><numerusform>%n godzin</numerusform></translation> </message> <message numerus="yes"> <location line="+4"/> <source>%n day(s)</source> <translation><numerusform>%n dzień</numerusform><numerusform>%n dni</numerusform><numerusform>%n dni</numerusform></translation> </message> <message> <location line="+18"/> <source>Not staking</source> <translation type="unfinished"/> </message> <message> <location filename="../bitcoin.cpp" line="+109"/> <source>A fatal error occurred. synergy can no longer continue safely and will quit.</source> <translation type="unfinished"/> </message> </context> <context> <name>ClientModel</name> <message> <location filename="../clientmodel.cpp" line="+90"/> <source>Network Alert</source> <translation>Sieć Alert</translation> </message> </context> <context> <name>CoinControlDialog</name> <message> <location filename="../forms/coincontroldialog.ui" line="+14"/> <source>Coin Control</source> <translation type="unfinished"/> </message> <message> <location line="+31"/> <source>Quantity:</source> <translation>Ilość:</translation> </message> <message> <location line="+32"/> <source>Bytes:</source> <translation>Bajtów:</translation> </message> <message> <location line="+48"/> <source>Amount:</source> <translation>Kwota:</translation> </message> <message> <location line="+32"/> <source>Priority:</source> <translation>Priorytet:</translation> </message> <message> <location line="+48"/> <source>Fee:</source> <translation>Opłata:</translation> </message> <message> <location line="+35"/> <source>Low Output:</source> <translation type="unfinished"/> </message> <message> <location filename="../coincontroldialog.cpp" line="+551"/> <source>no</source> <translation>nie</translation> </message> <message> <location filename="../forms/coincontroldialog.ui" line="+51"/> <source>After Fee:</source> <translation>Po opłacie:</translation> </message> <message> <location line="+35"/> <source>Change:</source> <translation>Reszta:</translation> </message> <message> <location line="+69"/> <source>(un)select all</source> <translation>Zaznacz/Odznacz wszystko</translation> </message> <message> <location line="+13"/> <source>Tree mode</source> <translation>Widok drzewa</translation> </message> <message> <location line="+16"/> <source>List mode</source> <translation>Widok listy</translation> </message> <message> <location line="+45"/> <source>Amount</source> <translation>Kwota</translation> </message> <message> <location line="+5"/> <source>Label</source> <translation type="unfinished"/> </message> <message> <location line="+5"/> <source>Address</source> <translation>Adres</translation> </message> <message> <location line="+5"/> <source>Date</source> <translation>Data</translation> </message> <message> <location line="+5"/> <source>Confirmations</source> <translation>Potwierdzenia</translation> </message> <message> <location line="+3"/> <source>Confirmed</source> <translation>Potwierdzony</translation> </message> <message> <location line="+5"/> <source>Priority</source> <translation>Priorytet</translation> </message> <message> <location filename="../coincontroldialog.cpp" line="-515"/> <source>Copy address</source> <translation>Kopiuj adres</translation> </message> <message> <location line="+1"/> <source>Copy label</source> <translation>Kopiuj etykietę</translation> </message> <message> <location line="+1"/> <location line="+26"/> <source>Copy amount</source> <translation>Kopiuj kwotę</translation> </message> <message> <location line="-25"/> <source>Copy transaction ID</source> <translation>Skopiuj ID transakcji</translation> </message> <message> <location line="+24"/> <source>Copy quantity</source> <translation>Skopiuj ilość</translation> </message> <message> <location line="+2"/> <source>Copy fee</source> <translation>Skopiuj opłatę</translation> </message> <message> <location line="+1"/> <source>Copy after fee</source> <translation>Skopiuj ilość po opłacie</translation> </message> <message> <location line="+1"/> <source>Copy bytes</source> <translation>Skopiuj ilość bajtów</translation> </message> <message> <location line="+1"/> <source>Copy priority</source> <translation>Skopiuj priorytet</translation> </message> <message> <location line="+1"/> <source>Copy low output</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Copy change</source> <translation>Skopiuj resztę</translation> </message> <message> <location line="+317"/> <source>highest</source> <translation>najwyższa</translation> </message> <message> <location line="+1"/> <source>high</source> <translation>wysoka</translation> </message> <message> <location line="+1"/> <source>medium-high</source> <translation>średnio wysoki</translation> </message> <message> <location line="+1"/> <source>medium</source> <translation>średnia</translation> </message> <message> <location line="+4"/> <source>low-medium</source> <translation>średnio niski</translation> </message> <message> <location line="+1"/> <source>low</source> <translation>niski</translation> </message> <message> <location line="+1"/> <source>lowest</source> <translation>najniższy</translation> </message> <message> <location line="+155"/> <source>DUST</source> <translation type="unfinished"/> </message> <message> <location line="+0"/> <source>yes</source> <translation>tak</translation> </message> <message> <location line="+10"/> <source>This label turns red, if the transaction size is bigger than 10000 bytes. This means a fee of at least %1 per kb is required. Can vary +/- 1 Byte per input.</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Transactions with higher priority get more likely into a block. This label turns red, if the priority is smaller than &quot;medium&quot;. This means a fee of at least %1 per kb is required.</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>This label turns red, if any recipient receives an amount smaller than %1. This means a fee of at least %2 is required. Amounts below 0.546 times the minimum relay fee are shown as DUST.</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>This label turns red, if the change is smaller than %1. This means a fee of at least %2 is required.</source> <translation type="unfinished"/> </message> <message> <location line="+37"/> <location line="+66"/> <source>(no label)</source> <translation>(bez etykiety)</translation> </message> <message> <location line="-9"/> <source>change from %1 (%2)</source> <translation>reszta z %1 (%2)</translation> </message> <message> <location line="+1"/> <source>(change)</source> <translation>(reszta)</translation> </message> </context> <context> <name>EditAddressDialog</name> <message> <location filename="../forms/editaddressdialog.ui" line="+14"/> <source>Edit Address</source> <translation>Edytuj adres</translation> </message> <message> <location line="+11"/> <source>&amp;Label</source> <translation>&amp;Etykieta</translation> </message> <message> <location line="+10"/> <source>The label associated with this address book entry</source> <translation type="unfinished"/> </message> <message> <location line="+7"/> <source>&amp;Address</source> <translation>&amp;Adres</translation> </message> <message> <location line="+10"/> <source>The address associated with this address book entry. This can only be modified for sending addresses.</source> <translation type="unfinished"/> </message> <message> <location filename="../editaddressdialog.cpp" line="+20"/> <source>New receiving address</source> <translation>Nowy adres odbiorczy</translation> </message> <message> <location line="+4"/> <source>New sending address</source> <translation>Nowy adres wysyłania</translation> </message> <message> <location line="+3"/> <source>Edit receiving address</source> <translation>Edytuj adres odbioru</translation> </message> <message> <location line="+4"/> <source>Edit sending address</source> <translation>Edytuj adres wysyłania</translation> </message> <message> <location line="+76"/> <source>The entered address &quot;%1&quot; is already in the address book.</source> <translation>Wprowadzony adres &quot;%1&quot; już istnieje w książce adresowej.</translation> </message> <message> <location line="-5"/> <source>The entered address &quot;%1&quot; is not a valid synergy address.</source> <translation type="unfinished"/> </message> <message> <location line="+10"/> <source>Could not unlock wallet.</source> <translation>Nie można było odblokować portfela.</translation> </message> <message> <location line="+5"/> <source>New key generation failed.</source> <translation>Tworzenie nowego klucza nie powiodło się.</translation> </message> </context> <context> <name>GUIUtil::HelpMessageBox</name> <message> <location filename="../guiutil.cpp" line="+420"/> <location line="+12"/> <source>synergy-Qt</source> <translation type="unfinished"/> </message> <message> <location line="-12"/> <source>version</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>Usage:</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>command-line options</source> <translation type="unfinished"/> </message> <message> <location line="+4"/> <source>UI options</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Set language, for example &quot;de_DE&quot; (default: system locale)</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Start minimized</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Show splash screen on startup (default: 1)</source> <translation type="unfinished"/> </message> </context> <context> <name>OptionsDialog</name> <message> <location filename="../forms/optionsdialog.ui" line="+14"/> <source>Options</source> <translation>Opcje</translation> </message> <message> <location line="+16"/> <source>&amp;Main</source> <translation>Główne</translation> </message> <message> <location line="+6"/> <source>Optional transaction fee per kB that helps make sure your transactions are processed quickly. Most transactions are 1 kB. Fee 0.01 recommended.</source> <translation type="unfinished"/> </message> <message> <location line="+15"/> <source>Pay transaction &amp;fee</source> <translation>Płać prowizję za transakcje</translation> </message> <message> <location line="+31"/> <source>Reserved amount does not participate in staking and is therefore spendable at any time.</source> <translation type="unfinished"/> </message> <message> <location line="+15"/> <source>Reserve</source> <translation type="unfinished"/> </message> <message> <location line="+31"/> <source>Automatically start synergy after logging in to the system.</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>&amp;Start synergy on system login</source> <translation type="unfinished"/> </message> <message> <location line="+7"/> <source>Detach block and address databases at shutdown. This means they can be moved to another data directory, but it slows down shutdown. The wallet is always detached.</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>&amp;Detach databases at shutdown</source> <translation type="unfinished"/> </message> <message> <location line="+21"/> <source>&amp;Network</source> <translation>&amp;Sieć</translation> </message> <message> <location line="+6"/> <source>Automatically open the synergy client port on the router. This only works when your router supports UPnP and it is enabled.</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Map port using &amp;UPnP</source> <translation>Mapuj port używając &amp;UPnP</translation> </message> <message> <location line="+7"/> <source>Connect to the synergy network through a SOCKS proxy (e.g. when connecting through Tor).</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>&amp;Connect through SOCKS proxy:</source> <translation type="unfinished"/> </message> <message> <location line="+9"/> <source>Proxy &amp;IP:</source> <translation>Proxy &amp;IP: </translation> </message> <message> <location line="+19"/> <source>IP address of the proxy (e.g. 127.0.0.1)</source> <translation type="unfinished"/> </message> <message> <location line="+7"/> <source>&amp;Port:</source> <translation>&amp;Port:</translation> </message> <message> <location line="+19"/> <source>Port of the proxy (e.g. 9050)</source> <translation>Port proxy (np. 9050)</translation> </message> <message> <location line="+7"/> <source>SOCKS &amp;Version:</source> <translation>Wersja &amp;SOCKS</translation> </message> <message> <location line="+13"/> <source>SOCKS version of the proxy (e.g. 5)</source> <translation>SOCKS wersja serwera proxy (np. 5)</translation> </message> <message> <location line="+36"/> <source>&amp;Window</source> <translation>&amp;Okno</translation> </message> <message> <location line="+6"/> <source>Show only a tray icon after minimizing the window.</source> <translation>Pokazuj tylko ikonę przy zegarku po zminimalizowaniu okna.</translation> </message> <message> <location line="+3"/> <source>&amp;Minimize to the tray instead of the taskbar</source> <translation>&amp;Minimalizuj do paska przy zegarku zamiast do paska zadań</translation> </message> <message> <location line="+7"/> <source>Minimize instead of exit the application when the window is closed. When this option is enabled, the application will be closed only after selecting Quit in the menu.</source> <translation>Minimalizuje zamiast zakończyć działanie programu przy zamykaniu okna. Kiedy ta opcja jest włączona, program zakończy działanie po wybieraniu Zamknij w menu.</translation> </message> <message> <location line="+3"/> <source>M&amp;inimize on close</source> <translation>M&amp;inimalizuj przy zamknięciu</translation> </message> <message> <location line="+21"/> <source>&amp;Display</source> <translation>&amp;Wyświetlanie</translation> </message> <message> <location line="+8"/> <source>User Interface &amp;language:</source> <translation>Język &amp;Użytkownika:</translation> </message> <message> <location line="+13"/> <source>The user interface language can be set here. This setting will take effect after restarting synergy.</source> <translation type="unfinished"/> </message> <message> <location line="+11"/> <source>&amp;Unit to show amounts in:</source> <translation>&amp;Jednostka pokazywana przy kwocie:</translation> </message> <message> <location line="+13"/> <source>Choose the default subdivision unit to show in the interface and when sending coins.</source> <translation>Wybierz podział jednostki pokazywany w interfejsie oraz podczas wysyłania monet</translation> </message> <message> <location line="+9"/> <source>Whether to show synergy addresses in the transaction list or not.</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>&amp;Display addresses in transaction list</source> <translation>&amp;Wyświetlaj adresy w liście transakcji</translation> </message> <message> <location line="+7"/> <source>Whether to show coin control features or not.</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Display coin &amp;control features (experts only!)</source> <translation type="unfinished"/> </message> <message> <location line="+71"/> <source>&amp;OK</source> <translation>&amp;OK</translation> </message> <message> <location line="+7"/> <source>&amp;Cancel</source> <translation>&amp;Anuluj</translation> </message> <message> <location line="+10"/> <source>&amp;Apply</source> <translation type="unfinished"/> </message> <message> <location filename="../optionsdialog.cpp" line="+55"/> <source>default</source> <translation>domyślny</translation> </message> <message> <location line="+149"/> <location line="+9"/> <source>Warning</source> <translation type="unfinished"/> </message> <message> <location line="-9"/> <location line="+9"/> <source>This setting will take effect after restarting synergy.</source> <translation type="unfinished"/> </message> <message> <location line="+29"/> <source>The supplied proxy address is invalid.</source> <translation>Adres podanego proxy jest nieprawidłowy</translation> </message> </context> <context> <name>OverviewPage</name> <message> <location filename="../forms/overviewpage.ui" line="+14"/> <source>Form</source> <translation>Formularz</translation> </message> <message> <location line="+33"/> <location line="+231"/> <source>The displayed information may be out of date. Your wallet automatically synchronizes with the synergy network after a connection is established, but this process has not completed yet.</source> <translation type="unfinished"/> </message> <message> <location line="-160"/> <source>Stake:</source> <translation type="unfinished"/> </message> <message> <location line="+29"/> <source>Unconfirmed:</source> <translation type="unfinished"/> </message> <message> <location line="-107"/> <source>Wallet</source> <translation>Portfel</translation> </message> <message> <location line="+49"/> <source>Spendable:</source> <translation type="unfinished"/> </message> <message> <location line="+16"/> <source>Your current spendable balance</source> <translation>Twoje obecne saldo</translation> </message> <message> <location line="+71"/> <source>Immature:</source> <translation>Niedojrzały: </translation> </message> <message> <location line="+13"/> <source>Mined balance that has not yet matured</source> <translation>Balans wydobycia, który jeszcze nie dojrzał</translation> </message> <message> <location line="+20"/> <source>Total:</source> <translation>Wynosi ogółem:</translation> </message> <message> <location line="+16"/> <source>Your current total balance</source> <translation>Twoje obecne saldo</translation> </message> <message> <location line="+46"/> <source>&lt;b&gt;Recent transactions&lt;/b&gt;</source> <translation>&lt;b&gt;Ostatnie transakcje&lt;/b&gt;</translation> </message> <message> <location line="-108"/> <source>Total of transactions that have yet to be confirmed, and do not yet count toward the current balance</source> <translation type="unfinished"/> </message> <message> <location line="-29"/> <source>Total of coins that was staked, and do not yet count toward the current balance</source> <translation type="unfinished"/> </message> <message> <location filename="../overviewpage.cpp" line="+113"/> <location line="+1"/> <source>out of sync</source> <translation>desynchronizacja</translation> </message> </context> <context> <name>QRCodeDialog</name> <message> <location filename="../forms/qrcodedialog.ui" line="+14"/> <source>QR Code Dialog</source> <translation type="unfinished"/> </message> <message> <location line="+59"/> <source>Request Payment</source> <translation type="unfinished"/> </message> <message> <location line="+56"/> <source>Amount:</source> <translation type="unfinished"/> </message> <message> <location line="-44"/> <source>Label:</source> <translation type="unfinished"/> </message> <message> <location line="+19"/> <source>Message:</source> <translation type="unfinished"/> </message> <message> <location line="+71"/> <source>&amp;Save As...</source> <translation type="unfinished"/> </message> <message> <location filename="../qrcodedialog.cpp" line="+62"/> <source>Error encoding URI into QR Code.</source> <translation type="unfinished"/> </message> <message> <location line="+40"/> <source>The entered amount is invalid, please check.</source> <translation type="unfinished"/> </message> <message> <location line="+23"/> <source>Resulting URI too long, try to reduce the text for label / message.</source> <translation type="unfinished"/> </message> <message> <location line="+25"/> <source>Save QR Code</source> <translation type="unfinished"/> </message> <message> <location line="+0"/> <source>PNG Images (*.png)</source> <translation type="unfinished"/> </message> </context> <context> <name>RPCConsole</name> <message> <location filename="../forms/rpcconsole.ui" line="+46"/> <source>Client name</source> <translation>Nazwa klienta</translation> </message> <message> <location line="+10"/> <location line="+23"/> <location line="+26"/> <location line="+23"/> <location line="+23"/> <location line="+36"/> <location line="+53"/> <location line="+23"/> <location line="+23"/> <location filename="../rpcconsole.cpp" line="+348"/> <source>N/A</source> <translation>NIEDOSTĘPNE</translation> </message> <message> <location line="-217"/> <source>Client version</source> <translation>Wersja klienta</translation> </message> <message> <location line="-45"/> <source>&amp;Information</source> <translation>&amp;Informacje</translation> </message> <message> <location line="+68"/> <source>Using OpenSSL version</source> <translation>Używana wersja OpenSSL</translation> </message> <message> <location line="+49"/> <source>Startup time</source> <translation>Czas uruchomienia</translation> </message> <message> <location line="+29"/> <source>Network</source> <translation>Sieć</translation> </message> <message> <location line="+7"/> <source>Number of connections</source> <translation>Liczba połączeń</translation> </message> <message> <location line="+23"/> <source>On testnet</source> <translation type="unfinished"/> </message> <message> <location line="+23"/> <source>Block chain</source> <translation>Ciąg bloków</translation> </message> <message> <location line="+7"/> <source>Current number of blocks</source> <translation>Aktualna liczba bloków</translation> </message> <message> <location line="+23"/> <source>Estimated total blocks</source> <translation>Szacowana ilość bloków</translation> </message> <message> <location line="+23"/> <source>Last block time</source> <translation>Czas ostatniego bloku</translation> </message> <message> <location line="+52"/> <source>&amp;Open</source> <translation>&amp;Otwórz</translation> </message> <message> <location line="+16"/> <source>Command-line options</source> <translation type="unfinished"/> </message> <message> <location line="+7"/> <source>Show the synergy-Qt help message to get a list with possible synergy command-line options.</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>&amp;Show</source> <translation type="unfinished"/> </message> <message> <location line="+24"/> <source>&amp;Console</source> <translation>&amp;Konsola</translation> </message> <message> <location line="-260"/> <source>Build date</source> <translation>Data kompilacji</translation> </message> <message> <location line="-104"/> <source>synergy - Debug window</source> <translation type="unfinished"/> </message> <message> <location line="+25"/> <source>synergy Core</source> <translation type="unfinished"/> </message> <message> <location line="+279"/> <source>Debug log file</source> <translation>Plik logowania debugowania</translation> </message> <message> <location line="+7"/> <source>Open the synergy debug log file from the current data directory. This can take a few seconds for large log files.</source> <translation type="unfinished"/> </message> <message> <location line="+102"/> <source>Clear console</source> <translation>Wyczyść konsolę</translation> </message> <message> <location filename="../rpcconsole.cpp" line="-33"/> <source>Welcome to the synergy RPC console.</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Use up and down arrows to navigate history, and &lt;b&gt;Ctrl-L&lt;/b&gt; to clear screen.</source> <translation>Użyj strzałek do przewijania historii i &lt;b&gt;Ctrl-L&lt;/b&gt; aby wyczyścić ekran</translation> </message> <message> <location line="+1"/> <source>Type &lt;b&gt;help&lt;/b&gt; for an overview of available commands.</source> <translation>Wpisz &lt;b&gt;help&lt;/b&gt; aby uzyskać listę dostępnych komend</translation> </message> </context> <context> <name>SendCoinsDialog</name> <message> <location filename="../forms/sendcoinsdialog.ui" line="+14"/> <location filename="../sendcoinsdialog.cpp" line="+182"/> <location line="+5"/> <location line="+5"/> <location line="+5"/> <location line="+6"/> <location line="+5"/> <location line="+5"/> <source>Send Coins</source> <translation>Wyślij Monety</translation> </message> <message> <location line="+76"/> <source>Coin Control Features</source> <translation type="unfinished"/> </message> <message> <location line="+20"/> <source>Inputs...</source> <translation type="unfinished"/> </message> <message> <location line="+7"/> <source>automatically selected</source> <translation type="unfinished"/> </message> <message> <location line="+19"/> <source>Insufficient funds!</source> <translation type="unfinished"/> </message> <message> <location line="+77"/> <source>Quantity:</source> <translation>Ilość:</translation> </message> <message> <location line="+22"/> <location line="+35"/> <source>0</source> <translation type="unfinished"/> </message> <message> <location line="-19"/> <source>Bytes:</source> <translation>Bajtów:</translation> </message> <message> <location line="+51"/> <source>Amount:</source> <translation>Kwota:</translation> </message> <message> <location line="+22"/> <location line="+86"/> <location line="+86"/> <location line="+32"/> <source>0.00 hack</source> <translation type="unfinished"/> </message> <message> <location line="-191"/> <source>Priority:</source> <translation>Priorytet:</translation> </message> <message> <location line="+19"/> <source>medium</source> <translation type="unfinished"/> </message> <message> <location line="+32"/> <source>Fee:</source> <translation>Opłata:</translation> </message> <message> <location line="+35"/> <source>Low Output:</source> <translation type="unfinished"/> </message> <message> <location line="+19"/> <source>no</source> <translation type="unfinished"/> </message> <message> <location line="+32"/> <source>After Fee:</source> <translation>Po opłacie:</translation> </message> <message> <location line="+35"/> <source>Change</source> <translation type="unfinished"/> </message> <message> <location line="+50"/> <source>custom change address</source> <translation type="unfinished"/> </message> <message> <location line="+106"/> <source>Send to multiple recipients at once</source> <translation>Wyślij do wielu odbiorców na raz</translation> </message> <message> <location line="+3"/> <source>Add &amp;Recipient</source> <translation>Dodaj Odbio&amp;rce</translation> </message> <message> <location line="+20"/> <source>Remove all transaction fields</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Clear &amp;All</source> <translation>Wyczyść &amp;wszystko</translation> </message> <message> <location line="+28"/> <source>Balance:</source> <translation>Saldo:</translation> </message> <message> <location line="+16"/> <source>123.456 hack</source> <translation type="unfinished"/> </message> <message> <location line="+31"/> <source>Confirm the send action</source> <translation>Potwierdź akcję wysyłania</translation> </message> <message> <location line="+3"/> <source>S&amp;end</source> <translation>Wy&amp;syłka</translation> </message> <message> <location filename="../sendcoinsdialog.cpp" line="-173"/> <source>Enter a synergy address (e.g. synergyfwYhBmGXcFP2Po1NpRUEiK8km2)</source> <translation type="unfinished"/> </message> <message> <location line="+15"/> <source>Copy quantity</source> <translation>Skopiuj ilość</translation> </message> <message> <location line="+1"/> <source>Copy amount</source> <translation>Kopiuj kwotę</translation> </message> <message> <location line="+1"/> <source>Copy fee</source> <translation>Skopiuj opłatę</translation> </message> <message> <location line="+1"/> <source>Copy after fee</source> <translation>Skopiuj ilość po opłacie</translation> </message> <message> <location line="+1"/> <source>Copy bytes</source> <translation>Skopiuj ilość bajtów</translation> </message> <message> <location line="+1"/> <source>Copy priority</source> <translation>Skopiuj priorytet</translation> </message> <message> <location line="+1"/> <source>Copy low output</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Copy change</source> <translation>Skopiuj resztę</translation> </message> <message> <location line="+86"/> <source>&lt;b&gt;%1&lt;/b&gt; to %2 (%3)</source> <translation type="unfinished"/> </message> <message> <location line="+5"/> <source>Confirm send coins</source> <translation>Potwierdź wysyłanie monet</translation> </message> <message> <location line="+1"/> <source>Are you sure you want to send %1?</source> <translation type="unfinished"/> </message> <message> <location line="+0"/> <source> and </source> <translation type="unfinished"/> </message> <message> <location line="+29"/> <source>The recipient address is not valid, please recheck.</source> <translation>Adres odbiorcy jest nieprawidłowy, proszę poprawić</translation> </message> <message> <location line="+5"/> <source>The amount to pay must be larger than 0.</source> <translation>Kwota do zapłacenia musi być większa od 0.</translation> </message> <message> <location line="+5"/> <source>The amount exceeds your balance.</source> <translation>Kwota przekracza twoje saldo.</translation> </message> <message> <location line="+5"/> <source>The total exceeds your balance when the %1 transaction fee is included.</source> <translation>Suma przekracza twoje saldo, gdy doliczymy %1 prowizji transakcyjnej.</translation> </message> <message> <location line="+6"/> <source>Duplicate address found, can only send to each address once per send operation.</source> <translation>Znaleziono powtórzony adres, można wysłać tylko raz na każdy adres podczas operacji wysyłania.</translation> </message> <message> <location line="+5"/> <source>Error: Transaction creation failed.</source> <translation type="unfinished"/> </message> <message> <location line="+5"/> <source>Error: The transaction was rejected. This might happen if some of the coins in your wallet were already spent, such as if you used a copy of wallet.dat and coins were spent in the copy but not marked as spent here.</source> <translation type="unfinished"/> </message> <message> <location line="+251"/> <source>WARNING: Invalid synergy address</source> <translation type="unfinished"/> </message> <message> <location line="+13"/> <source>(no label)</source> <translation>(bez etykiety)</translation> </message> <message> <location line="+4"/> <source>WARNING: unknown change address</source> <translation type="unfinished"/> </message> </context> <context> <name>SendCoinsEntry</name> <message> <location filename="../forms/sendcoinsentry.ui" line="+14"/> <source>Form</source> <translation type="unfinished"/> </message> <message> <location line="+15"/> <source>A&amp;mount:</source> <translation>Su&amp;ma:</translation> </message> <message> <location line="+13"/> <source>Pay &amp;To:</source> <translation>Zapłać &amp;dla:</translation> </message> <message> <location line="+24"/> <location filename="../sendcoinsentry.cpp" line="+25"/> <source>Enter a label for this address to add it to your address book</source> <translation>Wprowadź etykietę dla tego adresu by dodać go do książki adresowej</translation> </message> <message> <location line="+9"/> <source>&amp;Label:</source> <translation>&amp;Etykieta:</translation> </message> <message> <location line="+18"/> <source>The address to send the payment to (e.g. synergyfwYhBmGXcFP2Po1NpRUEiK8km2)</source> <translation type="unfinished"/> </message> <message> <location line="+10"/> <source>Choose address from address book</source> <translation type="unfinished"/> </message> <message> <location line="+10"/> <source>Alt+A</source> <translation>Alt+A</translation> </message> <message> <location line="+7"/> <source>Paste address from clipboard</source> <translation>Wklej adres ze schowka</translation> </message> <message> <location line="+10"/> <source>Alt+P</source> <translation>Alt+P</translation> </message> <message> <location line="+7"/> <source>Remove this recipient</source> <translation type="unfinished"/> </message> <message> <location filename="../sendcoinsentry.cpp" line="+1"/> <source>Enter a synergy address (e.g. synergyfwYhBmGXcFP2Po1NpRUEiK8km2)</source> <translation type="unfinished"/> </message> </context> <context> <name>SignVerifyMessageDialog</name> <message> <location filename="../forms/signverifymessagedialog.ui" line="+14"/> <source>Signatures - Sign / Verify a Message</source> <translation>Podpisy - Podpisz / zweryfikuj wiadomość</translation> </message> <message> <location line="+13"/> <location line="+124"/> <source>&amp;Sign Message</source> <translation>Podpi&amp;sz Wiadomość</translation> </message> <message> <location line="-118"/> <source>You can sign messages with your addresses to prove you own them. Be careful not to sign anything vague, as phishing attacks may try to trick you into signing your identity over to them. Only sign fully-detailed statements you agree to.</source> <translation>Możesz podpisywać wiadomości swoimi adresami aby udowodnić, że jesteś ich właścicielem. Uważaj, aby nie podpisywać niczego co wzbudza Twoje podejrzenia, ponieważ ktoś może stosować phishing próbując nakłonić Cię do ich podpisania. Akceptuj i podpisuj tylko w pełni zrozumiałe komunikaty i wiadomości.</translation> </message> <message> <location line="+18"/> <source>The address to sign the message with (e.g. synergyfwYhBmGXcFP2Po1NpRUEiK8km2)</source> <translation type="unfinished"/> </message> <message> <location line="+10"/> <location line="+203"/> <source>Choose an address from the address book</source> <translation type="unfinished"/> </message> <message> <location line="-193"/> <location line="+203"/> <source>Alt+A</source> <translation>Alt+A</translation> </message> <message> <location line="-193"/> <source>Paste address from clipboard</source> <translation>Wklej adres ze schowka</translation> </message> <message> <location line="+10"/> <source>Alt+P</source> <translation>Alt+P</translation> </message> <message> <location line="+12"/> <source>Enter the message you want to sign here</source> <translation>Wprowadź wiadomość, którą chcesz podpisać, tutaj</translation> </message> <message> <location line="+24"/> <source>Copy the current signature to the system clipboard</source> <translation>Kopiuje aktualny podpis do schowka systemowego</translation> </message> <message> <location line="+21"/> <source>Sign the message to prove you own this synergy address</source> <translation type="unfinished"/> </message> <message> <location line="+17"/> <source>Reset all sign message fields</source> <translation>Zresetuj wszystkie pola podpisanej wiadomości</translation> </message> <message> <location line="+3"/> <location line="+146"/> <source>Clear &amp;All</source> <translation>Wyczyść &amp;wszystko</translation> </message> <message> <location line="-87"/> <location line="+70"/> <source>&amp;Verify Message</source> <translation>&amp;Zweryfikuj wiadomość</translation> </message> <message> <location line="-64"/> <source>Enter the signing address, message (ensure you copy line breaks, spaces, tabs, etc. exactly) and signature below to verify the message. Be careful not to read more into the signature than what is in the signed message itself, to avoid being tricked by a man-in-the-middle attack.</source> <translation>Wpisz adres podpisu, wiadomość (upewnij się, że dokładnie skopiujesz wszystkie zakończenia linii, spacje, tabulacje itp.) oraz podpis poniżej by sprawdzić wiadomość. Uważaj by nie dodać więcej do podpisu niż do samej podpisywanej wiadomości by uniknąć ataku man-in-the-middle (człowiek pośrodku)</translation> </message> <message> <location line="+21"/> <source>The address the message was signed with (e.g. synergyfwYhBmGXcFP2Po1NpRUEiK8km2)</source> <translation type="unfinished"/> </message> <message> <location line="+40"/> <source>Verify the message to ensure it was signed with the specified synergy address</source> <translation type="unfinished"/> </message> <message> <location line="+17"/> <source>Reset all verify message fields</source> <translation>Resetuje wszystkie pola weryfikacji wiadomości</translation> </message> <message> <location filename="../signverifymessagedialog.cpp" line="+27"/> <location line="+3"/> <source>Enter a synergy address (e.g. synergyfwYhBmGXcFP2Po1NpRUEiK8km2)</source> <translation type="unfinished"/> </message> <message> <location line="-2"/> <source>Click &quot;Sign Message&quot; to generate signature</source> <translation>Kliknij &quot;Podpisz Wiadomość&quot; żeby uzyskać podpis</translation> </message> <message> <location line="+3"/> <source>Enter synergy signature</source> <translation type="unfinished"/> </message> <message> <location line="+82"/> <location line="+81"/> <source>The entered address is invalid.</source> <translation>Podany adres jest nieprawidłowy.</translation> </message> <message> <location line="-81"/> <location line="+8"/> <location line="+73"/> <location line="+8"/> <source>Please check the address and try again.</source> <translation>Proszę sprawdzić adres i spróbować ponownie.</translation> </message> <message> <location line="-81"/> <location line="+81"/> <source>The entered address does not refer to a key.</source> <translation>Wprowadzony adres nie odnosi się do klucza.</translation> </message> <message> <location line="-73"/> <source>Wallet unlock was cancelled.</source> <translation>Odblokowanie portfela zostało anulowane.</translation> </message> <message> <location line="+8"/> <source>Private key for the entered address is not available.</source> <translation>Klucz prywatny dla podanego adresu nie jest dostępny</translation> </message> <message> <location line="+12"/> <source>Message signing failed.</source> <translation>Podpisanie wiadomości nie powiodło się</translation> </message> <message> <location line="+5"/> <source>Message signed.</source> <translation>Wiadomość podpisana.</translation> </message> <message> <location line="+59"/> <source>The signature could not be decoded.</source> <translation>Podpis nie może zostać zdekodowany.</translation> </message> <message> <location line="+0"/> <location line="+13"/> <source>Please check the signature and try again.</source> <translation>Sprawdź podpis i spróbuj ponownie.</translation> </message> <message> <location line="+0"/> <source>The signature did not match the message digest.</source> <translation>Podpis nie odpowiadał streszczeniu wiadomości</translation> </message> <message> <location line="+7"/> <source>Message verification failed.</source> <translation>Weryfikacja wiadomości nie powiodła się.</translation> </message> <message> <location line="+5"/> <source>Message verified.</source> <translation>Wiadomość zweryfikowana.</translation> </message> </context> <context> <name>TransactionDesc</name> <message> <location filename="../transactiondesc.cpp" line="+19"/> <source>Open until %1</source> <translation>Otwórz do %1</translation> </message> <message numerus="yes"> <location line="-2"/> <source>Open for %n block(s)</source> <translation type="unfinished"><numerusform></numerusform><numerusform></numerusform><numerusform></numerusform></translation> </message> <message> <location line="+8"/> <source>conflicted</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>%1/offline</source> <translation>%1/offline</translation> </message> <message> <location line="+2"/> <source>%1/unconfirmed</source> <translation>%1/niezatwierdzone</translation> </message> <message> <location line="+2"/> <source>%1 confirmations</source> <translation>%1 potwierdzeń</translation> </message> <message> <location line="+18"/> <source>Status</source> <translation>Status</translation> </message> <message numerus="yes"> <location line="+7"/> <source>, broadcast through %n node(s)</source> <translation><numerusform>, emitowany przez %n węzeł</numerusform><numerusform>, emitowany przez %n węzły</numerusform><numerusform>, emitowany przez %n węzłów</numerusform></translation> </message> <message> <location line="+4"/> <source>Date</source> <translation>Data</translation> </message> <message> <location line="+7"/> <source>Source</source> <translation>Źródło</translation> </message> <message> <location line="+0"/> <source>Generated</source> <translation>Wygenerowano</translation> </message> <message> <location line="+5"/> <location line="+17"/> <source>From</source> <translation>Od</translation> </message> <message> <location line="+1"/> <location line="+22"/> <location line="+58"/> <source>To</source> <translation>Do</translation> </message> <message> <location line="-77"/> <location line="+2"/> <source>own address</source> <translation>własny adres</translation> </message> <message> <location line="-2"/> <source>label</source> <translation>etykieta</translation> </message> <message> <location line="+37"/> <location line="+12"/> <location line="+45"/> <location line="+17"/> <location line="+30"/> <source>Credit</source> <translation>Przypisy</translation> </message> <message numerus="yes"> <location line="-102"/> <source>matures in %n more block(s)</source> <translation><numerusform>potwierdzona przy %n bloku więcej</numerusform><numerusform>potwierdzona przy %n blokach więcej</numerusform><numerusform>potwierdzona przy %n blokach więcej</numerusform></translation> </message> <message> <location line="+2"/> <source>not accepted</source> <translation>niezaakceptowane</translation> </message> <message> <location line="+44"/> <location line="+8"/> <location line="+15"/> <location line="+30"/> <source>Debit</source> <translation>Debet</translation> </message> <message> <location line="-39"/> <source>Transaction fee</source> <translation>Prowizja transakcji</translation> </message> <message> <location line="+16"/> <source>Net amount</source> <translation>Kwota netto</translation> </message> <message> <location line="+6"/> <source>Message</source> <translation>Wiadomość</translation> </message> <message> <location line="+2"/> <source>Comment</source> <translation>Komentarz</translation> </message> <message> <location line="+2"/> <source>Transaction ID</source> <translation>ID transakcji</translation> </message> <message> <location line="+3"/> <source>Generated coins must mature 510 blocks before they can be spent. When you generated this block, it was broadcast to the network to be added to the block chain. If it fails to get into the chain, its state will change to &quot;not accepted&quot; and it won&apos;t be spendable. This may occasionally happen if another node generates a block within a few seconds of yours.</source> <translation type="unfinished"/> </message> <message> <location line="+7"/> <source>Debug information</source> <translation>Informacje debugowania</translation> </message> <message> <location line="+8"/> <source>Transaction</source> <translation>Transakcja</translation> </message> <message> <location line="+5"/> <source>Inputs</source> <translation>Wejścia</translation> </message> <message> <location line="+23"/> <source>Amount</source> <translation>Kwota</translation> </message> <message> <location line="+1"/> <source>true</source> <translation>prawda</translation> </message> <message> <location line="+0"/> <source>false</source> <translation>fałsz</translation> </message> <message> <location line="-211"/> <source>, has not been successfully broadcast yet</source> <translation>, nie został jeszcze pomyślnie wyemitowany</translation> </message> <message> <location line="+35"/> <source>unknown</source> <translation>nieznany</translation> </message> </context> <context> <name>TransactionDescDialog</name> <message> <location filename="../forms/transactiondescdialog.ui" line="+14"/> <source>Transaction details</source> <translation>Szczegóły transakcji</translation> </message> <message> <location line="+6"/> <source>This pane shows a detailed description of the transaction</source> <translation>Ten panel pokazuje szczegółowy opis transakcji</translation> </message> </context> <context> <name>TransactionTableModel</name> <message> <location filename="../transactiontablemodel.cpp" line="+226"/> <source>Date</source> <translation>Data</translation> </message> <message> <location line="+0"/> <source>Type</source> <translation>Typ</translation> </message> <message> <location line="+0"/> <source>Address</source> <translation>Adres</translation> </message> <message> <location line="+0"/> <source>Amount</source> <translation>Kwota</translation> </message> <message> <location line="+60"/> <source>Open until %1</source> <translation>Otwórz do %1</translation> </message> <message> <location line="+12"/> <source>Confirmed (%1 confirmations)</source> <translation>Zatwierdzony (%1 potwierdzeń)</translation> </message> <message numerus="yes"> <location line="-15"/> <source>Open for %n more block(s)</source> <translation type="unfinished"><numerusform></numerusform><numerusform></numerusform><numerusform></numerusform></translation> </message> <message> <location line="+6"/> <source>Offline</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Unconfirmed</source> <translation>Niepotwierdzone:</translation> </message> <message> <location line="+3"/> <source>Confirming (%1 of %2 recommended confirmations)</source> <translation>Potwierdzanie (%1 z %2 rekomendowanych potwierdzeń)</translation> </message> <message> <location line="+6"/> <source>Conflicted</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Immature (%1 confirmations, will be available after %2)</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>This block was not received by any other nodes and will probably not be accepted!</source> <translation>Ten blok nie został odebrany przez jakikolwiek inny węzeł i prawdopodobnie nie zostanie zaakceptowany!</translation> </message> <message> <location line="+3"/> <source>Generated but not accepted</source> <translation>Wygenerowano ale nie zaakceptowano</translation> </message> <message> <location line="+42"/> <source>Received with</source> <translation>Otrzymane przez</translation> </message> <message> <location line="+2"/> <source>Received from</source> <translation>Odebrano od</translation> </message> <message> <location line="+3"/> <source>Sent to</source> <translation>Wysłano do</translation> </message> <message> <location line="+2"/> <source>Payment to yourself</source> <translation>Płatność do siebie</translation> </message> <message> <location line="+2"/> <source>Mined</source> <translation>Wydobyto</translation> </message> <message> <location line="+38"/> <source>(n/a)</source> <translation>(brak)</translation> </message> <message> <location line="+190"/> <source>Transaction status. Hover over this field to show number of confirmations.</source> <translation>Status transakcji. Najedź na pole, aby zobaczyć liczbę potwierdzeń.</translation> </message> <message> <location line="+2"/> <source>Date and time that the transaction was received.</source> <translation>Data i czas odebrania transakcji.</translation> </message> <message> <location line="+2"/> <source>Type of transaction.</source> <translation>Rodzaj transakcji.</translation> </message> <message> <location line="+2"/> <source>Destination address of transaction.</source> <translation>Adres docelowy transakcji.</translation> </message> <message> <location line="+2"/> <source>Amount removed from or added to balance.</source> <translation>Kwota usunięta z lub dodana do konta.</translation> </message> </context> <context> <name>TransactionView</name> <message> <location filename="../transactionview.cpp" line="+55"/> <location line="+16"/> <source>All</source> <translation>Wszystko</translation> </message> <message> <location line="-15"/> <source>Today</source> <translation>Dzisiaj</translation> </message> <message> <location line="+1"/> <source>This week</source> <translation>W tym tygodniu</translation> </message> <message> <location line="+1"/> <source>This month</source> <translation>W tym miesiącu</translation> </message> <message> <location line="+1"/> <source>Last month</source> <translation>W zeszłym miesiącu</translation> </message> <message> <location line="+1"/> <source>This year</source> <translation>W tym roku</translation> </message> <message> <location line="+1"/> <source>Range...</source> <translation>Zakres...</translation> </message> <message> <location line="+11"/> <source>Received with</source> <translation>Otrzymane przez</translation> </message> <message> <location line="+2"/> <source>Sent to</source> <translation>Wysłano do</translation> </message> <message> <location line="+2"/> <source>To yourself</source> <translation>Do siebie</translation> </message> <message> <location line="+1"/> <source>Mined</source> <translation>Wydobyto</translation> </message> <message> <location line="+1"/> <source>Other</source> <translation>Inne</translation> </message> <message> <location line="+7"/> <source>Enter address or label to search</source> <translation>Wprowadź adres albo etykietę żeby wyszukać</translation> </message> <message> <location line="+7"/> <source>Min amount</source> <translation>Min suma</translation> </message> <message> <location line="+34"/> <source>Copy address</source> <translation>Kopiuj adres</translation> </message> <message> <location line="+1"/> <source>Copy label</source> <translation>Kopiuj etykietę</translation> </message> <message> <location line="+1"/> <source>Copy amount</source> <translation>Kopiuj kwotę</translation> </message> <message> <location line="+1"/> <source>Copy transaction ID</source> <translation>Skopiuj ID transakcji</translation> </message> <message> <location line="+1"/> <source>Edit label</source> <translation>Edytuj etykietę</translation> </message> <message> <location line="+1"/> <source>Show transaction details</source> <translation>Pokaż szczegóły transakcji</translation> </message> <message> <location line="+144"/> <source>Export Transaction Data</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Comma separated file (*.csv)</source> <translation>CSV (rozdzielany przecinkami)</translation> </message> <message> <location line="+8"/> <source>Confirmed</source> <translation>Potwierdzony</translation> </message> <message> <location line="+1"/> <source>Date</source> <translation>Data</translation> </message> <message> <location line="+1"/> <source>Type</source> <translation>Typ</translation> </message> <message> <location line="+1"/> <source>Label</source> <translation>Etykieta</translation> </message> <message> <location line="+1"/> <source>Address</source> <translation>Adres</translation> </message> <message> <location line="+1"/> <source>Amount</source> <translation>Kwota</translation> </message> <message> <location line="+1"/> <source>ID</source> <translation>ID</translation> </message> <message> <location line="+4"/> <source>Error exporting</source> <translation type="unfinished"/> </message> <message> <location line="+0"/> <source>Could not write to file %1.</source> <translation type="unfinished"/> </message> <message> <location line="+100"/> <source>Range:</source> <translation>Zakres:</translation> </message> <message> <location line="+8"/> <source>to</source> <translation>do</translation> </message> </context> <context> <name>WalletModel</name> <message> <location filename="../walletmodel.cpp" line="+206"/> <source>Sending...</source> <translation type="unfinished"/> </message> </context> <context> <name>bitcoin-core</name> <message> <location filename="../bitcoinstrings.cpp" line="+33"/> <source>synergy version</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Usage:</source> <translation>Użycie:</translation> </message> <message> <location line="+1"/> <source>Send command to -server or synergyd</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>List commands</source> <translation>Lista poleceń</translation> </message> <message> <location line="+1"/> <source>Get help for a command</source> <translation>Uzyskaj pomoc do polecenia</translation> </message> <message> <location line="+2"/> <source>Options:</source> <translation>Opcje:</translation> </message> <message> <location line="+2"/> <source>Specify configuration file (default: synergy.conf)</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Specify pid file (default: synergyd.pid)</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>Specify wallet file (within data directory)</source> <translation>Określ plik portfela (w obrębie folderu danych)</translation> </message> <message> <location line="-1"/> <source>Specify data directory</source> <translation>Wskaż folder danych</translation> </message> <message> <location line="+2"/> <source>Set database cache size in megabytes (default: 25)</source> <translation>Ustaw rozmiar w megabajtach cache-u bazy danych (domyślnie: 25)</translation> </message> <message> <location line="+1"/> <source>Set database disk log size in megabytes (default: 100)</source> <translation type="unfinished"/> </message> <message> <location line="+6"/> <source>Listen for connections on &lt;port&gt; (default: 15714 or testnet: 25714)</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Maintain at most &lt;n&gt; connections to peers (default: 125)</source> <translation>Utrzymuj maksymalnie &lt;n&gt; połączeń z peerami (domyślnie: 125)</translation> </message> <message> <location line="+3"/> <source>Connect to a node to retrieve peer addresses, and disconnect</source> <translation>Podłącz się do węzła aby otrzymać adresy peerów i rozłącz</translation> </message> <message> <location line="+1"/> <source>Specify your own public address</source> <translation>Podaj swój publiczny adres</translation> </message> <message> <location line="+5"/> <source>Bind to given address. Use [host]:port notation for IPv6</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>Stake your coins to support network and gain reward (default: 1)</source> <translation type="unfinished"/> </message> <message> <location line="+5"/> <source>Threshold for disconnecting misbehaving peers (default: 100)</source> <translation>Próg po którym nastąpi rozłączenie nietrzymających się zasad peerów (domyślnie: 100)</translation> </message> <message> <location line="+1"/> <source>Number of seconds to keep misbehaving peers from reconnecting (default: 86400)</source> <translation>Czas w sekundach, przez jaki nietrzymający się zasad peerzy nie będą mogli ponownie się podłączyć (domyślnie: 86400)</translation> </message> <message> <location line="-44"/> <source>An error occurred while setting up the RPC port %u for listening on IPv4: %s</source> <translation>Wystąpił błąd podczas ustawiania portu RPC %u w tryb nasłuchu: %s</translation> </message> <message> <location line="+51"/> <source>Detach block and address databases. Increases shutdown time (default: 0)</source> <translation type="unfinished"/> </message> <message> <location line="+109"/> <source>Error: The transaction was rejected. This might happen if some of the coins in your wallet were already spent, such as if you used a copy of wallet.dat and coins were spent in the copy but not marked as spent here.</source> <translation type="unfinished"/> </message> <message> <location line="-5"/> <source>Error: This transaction requires a transaction fee of at least %s because of its amount, complexity, or use of recently received funds </source> <translation type="unfinished"/> </message> <message> <location line="-87"/> <source>Listen for JSON-RPC connections on &lt;port&gt; (default: 15715 or testnet: 25715)</source> <translation type="unfinished"/> </message> <message> <location line="-11"/> <source>Accept command line and JSON-RPC commands</source> <translation>Akceptuj linię poleceń oraz polecenia JSON-RPC</translation> </message> <message> <location line="+101"/> <source>Error: Transaction creation failed </source> <translation type="unfinished"/> </message> <message> <location line="-5"/> <source>Error: Wallet locked, unable to create transaction </source> <translation type="unfinished"/> </message> <message> <location line="-8"/> <source>Importing blockchain data file.</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Importing bootstrap blockchain data file.</source> <translation type="unfinished"/> </message> <message> <location line="-88"/> <source>Run in the background as a daemon and accept commands</source> <translation>Uruchom w tle jako daemon i przyjmuj polecenia</translation> </message> <message> <location line="+1"/> <source>Use the test network</source> <translation>Użyj sieci testowej</translation> </message> <message> <location line="-24"/> <source>Accept connections from outside (default: 1 if no -proxy or -connect)</source> <translation>Akceptuj połączenia z zewnątrz (domyślnie: 1 jeśli nie ustawiono -proxy lub -connect)</translation> </message> <message> <location line="-38"/> <source>An error occurred while setting up the RPC port %u for listening on IPv6, falling back to IPv4: %s</source> <translation>Wystąpił błąd podczas ustawiania portu RPC %u w tryb nasłuchu dla IPv6, korzystam z IPv4: %s</translation> </message> <message> <location line="+117"/> <source>Error initializing database environment %s! To recover, BACKUP THAT DIRECTORY, then remove everything from it except for wallet.dat.</source> <translation type="unfinished"/> </message> <message> <location line="-20"/> <source>Set maximum size of high-priority/low-fee transactions in bytes (default: 27000)</source> <translation type="unfinished"/> </message> <message> <location line="+11"/> <source>Warning: -paytxfee is set very high! This is the transaction fee you will pay if you send a transaction.</source> <translation>Ostrzeżenie: -paytxfee jest bardzo duży. To jest prowizja za transakcje, którą płacisz, gdy wysyłasz monety.</translation> </message> <message> <location line="+61"/> <source>Warning: Please check that your computer&apos;s date and time are correct! If your clock is wrong synergy will not work properly.</source> <translation type="unfinished"/> </message> <message> <location line="-31"/> <source>Warning: error reading wallet.dat! All keys read correctly, but transaction data or address book entries might be missing or incorrect.</source> <translation>Ostrzeżenie: błąd odczytu wallet.dat! Wszystkie klucze zostały odczytane, ale może brakować pewnych danych transakcji lub wpisów w książce adresowej lub mogą one być nieprawidłowe.</translation> </message> <message> <location line="-18"/> <source>Warning: wallet.dat corrupt, data salvaged! Original wallet.dat saved as wallet.{timestamp}.bak in %s; if your balance or transactions are incorrect you should restore from a backup.</source> <translation>Ostrzeżenie: Odtworzono dane z uszkodzonego pliku wallet.dat! Oryginalny wallet.dat został zapisany jako wallet.{timestamp}.bak w %s; jeśli twoje saldo lub transakcje są niepoprawne powinieneś odtworzyć kopię zapasową.</translation> </message> <message> <location line="-30"/> <source>Attempt to recover private keys from a corrupt wallet.dat</source> <translation>Próbuj odzyskać klucze prywatne z uszkodzonego wallet.dat</translation> </message> <message> <location line="+4"/> <source>Block creation options:</source> <translation>Opcje tworzenia bloku:</translation> </message> <message> <location line="-62"/> <source>Connect only to the specified node(s)</source> <translation>Łącz tylko do wskazanego węzła</translation> </message> <message> <location line="+4"/> <source>Discover own IP address (default: 1 when listening and no -externalip)</source> <translation>Odkryj własny adres IP (domyślnie: 1 kiedy w trybie nasłuchu i brak -externalip )</translation> </message> <message> <location line="+94"/> <source>Failed to listen on any port. Use -listen=0 if you want this.</source> <translation>Próba otwarcia jakiegokolwiek portu nie powiodła się. Użyj -listen=0 jeśli tego chcesz.</translation> </message> <message> <location line="-90"/> <source>Find peers using DNS lookup (default: 1)</source> <translation type="unfinished"/> </message> <message> <location line="+5"/> <source>Sync checkpoints policy (default: strict)</source> <translation type="unfinished"/> </message> <message> <location line="+83"/> <source>Invalid -tor address: &apos;%s&apos;</source> <translation type="unfinished"/> </message> <message> <location line="+4"/> <source>Invalid amount for -reservebalance=&lt;amount&gt;</source> <translation type="unfinished"/> </message> <message> <location line="-82"/> <source>Maximum per-connection receive buffer, &lt;n&gt;*1000 bytes (default: 5000)</source> <translation>Maksymalny bufor odbioru na połączenie, &lt;n&gt;*1000 bajtów (domyślnie: 5000)</translation> </message> <message> <location line="+1"/> <source>Maximum per-connection send buffer, &lt;n&gt;*1000 bytes (default: 1000)</source> <translation>Maksymalny bufor wysyłu na połączenie, &lt;n&gt;*1000 bajtów (domyślnie: 1000)</translation> </message> <message> <location line="-16"/> <source>Only connect to nodes in network &lt;net&gt; (IPv4, IPv6 or Tor)</source> <translation>Łącz z węzłami tylko w sieci &lt;net&gt; (IPv4, IPv6 lub Tor)</translation> </message> <message> <location line="+28"/> <source>Output extra debugging information. Implies all other -debug* options</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Output extra network debugging information</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Prepend debug output with timestamp</source> <translation type="unfinished"/> </message> <message> <location line="+35"/> <source>SSL options: (see the Bitcoin Wiki for SSL setup instructions)</source> <translation>Opcje SSL: (odwiedź Bitcoin Wiki w celu uzyskania instrukcji)</translation> </message> <message> <location line="-74"/> <source>Select the version of socks proxy to use (4-5, default: 5)</source> <translation type="unfinished"/> </message> <message> <location line="+41"/> <source>Send trace/debug info to console instead of debug.log file</source> <translation>Wyślij informację/raport do konsoli zamiast do pliku debug.log.</translation> </message> <message> <location line="+1"/> <source>Send trace/debug info to debugger</source> <translation type="unfinished"/> </message> <message> <location line="+28"/> <source>Set maximum block size in bytes (default: 250000)</source> <translation type="unfinished"/> </message> <message> <location line="-1"/> <source>Set minimum block size in bytes (default: 0)</source> <translation>Ustaw minimalny rozmiar bloku w bajtach (domyślnie: 0)</translation> </message> <message> <location line="-29"/> <source>Shrink debug.log file on client startup (default: 1 when no -debug)</source> <translation>Zmniejsz plik debug.log przy starcie programu (domyślnie: 1 jeśli nie użyto -debug)</translation> </message> <message> <location line="-42"/> <source>Specify connection timeout in milliseconds (default: 5000)</source> <translation>Wskaż czas oczekiwania bezczynności połączenia w milisekundach (domyślnie: 5000)</translation> </message> <message> <location line="+109"/> <source>Unable to sign checkpoint, wrong checkpointkey? </source> <translation type="unfinished"/> </message> <message> <location line="-80"/> <source>Use UPnP to map the listening port (default: 0)</source> <translation>Używaj UPnP do mapowania portu nasłuchu (domyślnie: 0)</translation> </message> <message> <location line="-1"/> <source>Use UPnP to map the listening port (default: 1 when listening)</source> <translation>Używaj UPnP do mapowania portu nasłuchu (domyślnie: 1 gdy nasłuchuje)</translation> </message> <message> <location line="-25"/> <source>Use proxy to reach tor hidden services (default: same as -proxy)</source> <translation type="unfinished"/> </message> <message> <location line="+42"/> <source>Username for JSON-RPC connections</source> <translation>Nazwa użytkownika dla połączeń JSON-RPC</translation> </message> <message> <location line="+47"/> <source>Verifying database integrity...</source> <translation type="unfinished"/> </message> <message> <location line="+57"/> <source>WARNING: syncronized checkpoint violation detected, but skipped!</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Warning: Disk space is low!</source> <translation type="unfinished"/> </message> <message> <location line="-2"/> <source>Warning: This version is obsolete, upgrade required!</source> <translation>Uwaga: Ta wersja jest przestarzała, aktualizacja wymagana!</translation> </message> <message> <location line="-48"/> <source>wallet.dat corrupt, salvage failed</source> <translation>wallet.dat uszkodzony, odtworzenie się nie powiodło</translation> </message> <message> <location line="-54"/> <source>Password for JSON-RPC connections</source> <translation>Hasło do połączeń JSON-RPC</translation> </message> <message> <location line="-84"/> <source>%s, you must set a rpcpassword in the configuration file: %s It is recommended you use the following random password: rpcuser=synergyrpc rpcpassword=%s (you do not need to remember this password) The username and password MUST NOT be the same. If the file does not exist, create it with owner-readable-only file permissions. It is also recommended to set alertnotify so you are notified of problems; for example: alertnotify=echo %%s | mail -s &quot;synergy Alert&quot; [email protected] </source> <translation type="unfinished"/> </message> <message> <location line="+51"/> <source>Find peers using internet relay chat (default: 0)</source> <translation type="unfinished"/> </message> <message> <location line="+5"/> <source>Sync time with other nodes. Disable if time on your system is precise e.g. syncing with NTP (default: 1)</source> <translation type="unfinished"/> </message> <message> <location line="+15"/> <source>When creating transactions, ignore inputs with value less than this (default: 0.01)</source> <translation type="unfinished"/> </message> <message> <location line="+16"/> <source>Allow JSON-RPC connections from specified IP address</source> <translation>Przyjmuj połączenia JSON-RPC ze wskazanego adresu IP</translation> </message> <message> <location line="+1"/> <source>Send commands to node running on &lt;ip&gt; (default: 127.0.0.1)</source> <translation>Wysyłaj polecenia do węzła działającego na &lt;ip&gt; (domyślnie: 127.0.0.1)</translation> </message> <message> <location line="+1"/> <source>Execute command when the best block changes (%s in cmd is replaced by block hash)</source> <translation>Wykonaj polecenie kiedy najlepszy blok ulegnie zmianie (%s w komendzie zastanie zastąpione przez hash bloku)</translation> </message> <message> <location line="+3"/> <source>Execute command when a wallet transaction changes (%s in cmd is replaced by TxID)</source> <translation>Wykonaj polecenie, kiedy transakcja portfela ulegnie zmianie (%s w poleceniu zostanie zastąpione przez TxID)</translation> </message> <message> <location line="+3"/> <source>Require a confirmations for change (default: 0)</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Enforce transaction scripts to use canonical PUSH operators (default: 1)</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>Execute command when a relevant alert is received (%s in cmd is replaced by message)</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Upgrade wallet to latest format</source> <translation>Zaktualizuj portfel do najnowszego formatu.</translation> </message> <message> <location line="+1"/> <source>Set key pool size to &lt;n&gt; (default: 100)</source> <translation>Ustaw rozmiar puli kluczy na &lt;n&gt; (domyślnie: 100)</translation> </message> <message> <location line="+1"/> <source>Rescan the block chain for missing wallet transactions</source> <translation>Przeskanuj blok łańcuchów żeby znaleźć zaginione transakcje portfela</translation> </message> <message> <location line="+2"/> <source>How many blocks to check at startup (default: 2500, 0 = all)</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>How thorough the block verification is (0-6, default: 1)</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Imports blocks from external blk000?.dat file</source> <translation type="unfinished"/> </message> <message> <location line="+8"/> <source>Use OpenSSL (https) for JSON-RPC connections</source> <translation>Użyj OpenSSL (https) do połączeń JSON-RPC</translation> </message> <message> <location line="+1"/> <source>Server certificate file (default: server.cert)</source> <translation>Plik certyfikatu serwera (domyślnie: server.cert)</translation> </message> <message> <location line="+1"/> <source>Server private key (default: server.pem)</source> <translation>Klucz prywatny serwera (domyślnie: server.pem)</translation> </message> <message> <location line="+1"/> <source>Acceptable ciphers (default: TLSv1+HIGH:!SSLv2:!aNULL:!eNULL:!AH:!3DES:@STRENGTH)</source> <translation type="unfinished"/> </message> <message> <location line="+53"/> <source>Error: Wallet unlocked for staking only, unable to create transaction.</source> <translation type="unfinished"/> </message> <message> <location line="+18"/> <source>WARNING: Invalid checkpoint found! Displayed transactions may not be correct! You may need to upgrade, or notify developers.</source> <translation type="unfinished"/> </message> <message> <location line="-158"/> <source>This help message</source> <translation>Ta wiadomość pomocy</translation> </message> <message> <location line="+95"/> <source>Wallet %s resides outside data directory %s.</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Cannot obtain a lock on data directory %s. synergy is probably already running.</source> <translation type="unfinished"/> </message> <message> <location line="-98"/> <source>synergy</source> <translation type="unfinished"/> </message> <message> <location line="+140"/> <source>Unable to bind to %s on this computer (bind returned error %d, %s)</source> <translation>Nie można przywiązać %s na tym komputerze (bind returned error %d, %s)</translation> </message> <message> <location line="-130"/> <source>Connect through socks proxy</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Allow DNS lookups for -addnode, -seednode and -connect</source> <translation>Zezwól -addnode, -seednode i -connect na łączenie się z serwerem DNS</translation> </message> <message> <location line="+122"/> <source>Loading addresses...</source> <translation>Wczytywanie adresów...</translation> </message> <message> <location line="-15"/> <source>Error loading blkindex.dat</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>Error loading wallet.dat: Wallet corrupted</source> <translation>Błąd ładowania wallet.dat: Uszkodzony portfel</translation> </message> <message> <location line="+4"/> <source>Error loading wallet.dat: Wallet requires newer version of synergy</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Wallet needed to be rewritten: restart synergy to complete</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Error loading wallet.dat</source> <translation>Błąd ładowania wallet.dat</translation> </message> <message> <location line="-16"/> <source>Invalid -proxy address: &apos;%s&apos;</source> <translation>Nieprawidłowy adres -proxy: &apos;%s&apos;</translation> </message> <message> <location line="-1"/> <source>Unknown network specified in -onlynet: &apos;%s&apos;</source> <translation>Nieznana sieć w -onlynet: &apos;%s&apos;</translation> </message> <message> <location line="-1"/> <source>Unknown -socks proxy version requested: %i</source> <translation>Nieznana wersja proxy w -socks: %i</translation> </message> <message> <location line="+4"/> <source>Cannot resolve -bind address: &apos;%s&apos;</source> <translation>Nie można uzyskać adresu -bind: &apos;%s&apos;</translation> </message> <message> <location line="+2"/> <source>Cannot resolve -externalip address: &apos;%s&apos;</source> <translation>Nie można uzyskać adresu -externalip: &apos;%s&apos;</translation> </message> <message> <location line="-24"/> <source>Invalid amount for -paytxfee=&lt;amount&gt;: &apos;%s&apos;</source> <translation>Nieprawidłowa kwota dla -paytxfee=&lt;amount&gt;: &apos;%s&apos;</translation> </message> <message> <location line="+44"/> <source>Error: could not start node</source> <translation type="unfinished"/> </message> <message> <location line="+11"/> <source>Sending...</source> <translation type="unfinished"/> </message> <message> <location line="+5"/> <source>Invalid amount</source> <translation>Nieprawidłowa kwota</translation> </message> <message> <location line="+1"/> <source>Insufficient funds</source> <translation>Niewystarczające środki</translation> </message> <message> <location line="-34"/> <source>Loading block index...</source> <translation>Ładowanie indeksu bloku...</translation> </message> <message> <location line="-103"/> <source>Add a node to connect to and attempt to keep the connection open</source> <translation>Dodaj węzeł do łączenia się and attempt to keep the connection open</translation> </message> <message> <location line="+122"/> <source>Unable to bind to %s on this computer. synergy is probably already running.</source> <translation type="unfinished"/> </message> <message> <location line="-97"/> <source>Fee per KB to add to transactions you send</source> <translation type="unfinished"/> </message> <message> <location line="+55"/> <source>Invalid amount for -mininput=&lt;amount&gt;: &apos;%s&apos;</source> <translation type="unfinished"/> </message> <message> <location line="+25"/> <source>Loading wallet...</source> <translation>Wczytywanie portfela...</translation> </message> <message> <location line="+8"/> <source>Cannot downgrade wallet</source> <translation>Nie można dezaktualizować portfela</translation> </message> <message> <location line="+1"/> <source>Cannot initialize keypool</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Cannot write default address</source> <translation>Nie można zapisać domyślnego adresu</translation> </message> <message> <location line="+1"/> <source>Rescanning...</source> <translation>Ponowne skanowanie...</translation> </message> <message> <location line="+5"/> <source>Done loading</source> <translation>Wczytywanie zakończone</translation> </message> <message> <location line="-167"/> <source>To use the %s option</source> <translation>Aby użyć opcji %s</translation> </message> <message> <location line="+14"/> <source>Error</source> <translation>Błąd</translation> </message> <message> <location line="+6"/> <source>You must set rpcpassword=&lt;password&gt; in the configuration file: %s If the file does not exist, create it with owner-readable-only file permissions.</source> <translation>Musisz ustawić rpcpassword=&lt;hasło&gt; w pliku configuracyjnym: %s Jeżeli plik nie istnieje, utwórz go z uprawnieniami właściciela-tylko-do-odczytu.</translation> </message> </context> </TS>
mit
kramer2/EMR
files/templates_c/%%E3^E33^E336EC2F%%search_control.tpl.php
3513
<?php /* Smarty version 2.6.26, created on 2011-03-16 21:33:31 compiled from search_control.tpl */ ?> <!-- --> <form method="GET" name="SearchForm" style="padding: 0px; margin: 0px; vertical-align: middle;"> <?php $_from = $this->_tpl_vars['SearchControl']->GetHiddenValues(); if (!is_array($_from) && !is_object($_from)) { settype($_from, 'array'); }if (count($_from)): foreach ($_from as $this->_tpl_vars['Name'] => $this->_tpl_vars['Value']): ?> <input type="hidden" name="<?php echo $this->_tpl_vars['Name']; ?> " value="<?php echo $this->_tpl_vars['Value']; ?> " /> <?php endforeach; endif; unset($_from); ?> <input type="hidden" name="operation" value="ssearch" /> <input type="hidden" name="ResetFilter" value="0" /> <b><?php echo $this->_tpl_vars['Captions']->GetMessageString('SearchFor'); ?> : </b> &nbsp;&nbsp;&nbsp <select class="sfilter_comboBox" name="SearchField" id="SearchField"> <?php $_from = $this->_tpl_vars['SearchControl']->GetFieldsToFilter(); if (!is_array($_from) && !is_object($_from)) { settype($_from, 'array'); }if (count($_from)): foreach ($_from as $this->_tpl_vars['FieldName'] => $this->_tpl_vars['FieldCaption']): ?> <option value="<?php echo $this->_tpl_vars['FieldName']; ?> "<?php if ($this->_tpl_vars['SearchControl']->GetActiveFieldName() == $this->_tpl_vars['FieldName']): ?> selected<?php endif; ?>><?php echo $this->_tpl_vars['FieldCaption']; ?> </option> <?php endforeach; endif; unset($_from); ?> </select> &nbsp; <select class="sfilter_comboBox" name="FilterType" id="FilterType"> <?php $_from = $this->_tpl_vars['SearchControl']->GetFilterTypes(); if (!is_array($_from) && !is_object($_from)) { settype($_from, 'array'); }if (count($_from)): foreach ($_from as $this->_tpl_vars['FilterTypeIndex'] => $this->_tpl_vars['FilterTypeName']): ?> <option value="<?php echo $this->_tpl_vars['FilterTypeIndex']; ?> "<?php if ($this->_tpl_vars['SearchControl']->GetActiveFilterTypeName() == $this->_tpl_vars['FilterTypeIndex']): ?> selected<?php endif; ?>><?php echo $this->_tpl_vars['FilterTypeName']; ?> </option> <?php endforeach; endif; unset($_from); ?> </select> &nbsp;&nbsp;&nbsp;&nbsp; <input class="sfilter_text" type="text" size="20" name="FilterText" id="FilterText" value="<?php echo $this->_tpl_vars['SearchControl']->GetActiveFilterText(); ?> "> &nbsp; <input type="submit" class="sm_button" value="<?php echo $this->_tpl_vars['Captions']->GetMessageString('ApplySimpleFilter'); ?> "> &nbsp; <input type="button" class="sm_button" value="<?php echo $this->_tpl_vars['Captions']->GetMessageString('ResetSimpleFilter'); ?> " onclick="javascript: document.forms.SearchForm.ResetFilter.value = '1'; document.forms.SearchForm.submit();"> </form> <script> <?php if ($this->_tpl_vars['SearchControl']->UseTextHighlight() != ''): ?> $(document).ready(function(){ <?php $_from = $this->_tpl_vars['SearchControl']->GetHighlightedFields(); if (!is_array($_from) && !is_object($_from)) { settype($_from, 'array'); }if (count($_from)): foreach ($_from as $this->_tpl_vars['HighlightFieldName']): ?> HighlightTextInGrid('.grid', '<?php echo $this->_tpl_vars['HighlightFieldName']; ?> ', '<?php echo $this->_tpl_vars['SearchControl']->GetTextForHighlight(); ?> ', '<?php echo $this->_tpl_vars['SearchControl']->GetHighlightOption(); ?> '); <?php endforeach; endif; unset($_from); ?> }); <?php endif; ?> </script>
mit
SharpKit/Cs2Java
Runtime/rt/sun/reflect/ConstructorAccessor.cs
306
//*************************************************** //* This file was generated by JSharp //*************************************************** namespace sun.reflect { public partial interface ConstructorAccessor { global::System.Object newInstance(global::System.Object[] prm1); } }
mit
edibleguy/grav
cache/gantry5/g5_helium/compiled/yaml/cbc56e228bfef762f2610e3cf90a3ff7.yaml.php
802
<?php return [ '@class' => 'Gantry\\Component\\File\\CompiledYamlFile', 'filename' => '/var/www/html/grav/user/themes/g5_helium/blueprints/styles/intro.yaml', 'modified' => 1482802103, 'data' => [ 'name' => 'Intro Styles', 'description' => 'Intro section styles for the Helium theme', 'type' => 'section', 'form' => [ 'fields' => [ 'background' => [ 'type' => 'input.colorpicker', 'label' => 'Background', 'default' => '#f4f5f7' ], 'text-color' => [ 'type' => 'input.colorpicker', 'label' => 'Text', 'default' => '#424753' ] ] ] ] ];
mit
pacho10/Stone-Age
src/stoneAge/main/HuntingGround.java
936
package stoneAge.main; import stoneAge.player.Player; public class HuntingGround extends GameBoardElement { private static final int FOOD_COEFFICIENT = 2; public HuntingGround() { super(); } @Override public int removeFigure(Figure figure) { super.removeFigure(figure); int diceValue = (int)((Math.random()*6)+1); return diceValue; } public void removeAllFiguresOfOnePlayer(Player player){ int amountOfFoodGaind = 0; if (player != null) { for (int i = 0; i < this.getFigures().size(); i++) { if((this.getFigures().get(i) != null) && (this.getFigures().get(i).getPlayer().equals(player))){ player.takeFigure(this.getFigures().get(i)); amountOfFoodGaind += removeFigure(this.getFigures().get(i)); i--; } } System.out.println(player.getName()+" has gained "+amountOfFoodGaind/FOOD_COEFFICIENT+" amount of food."); player.gainFood(amountOfFoodGaind/FOOD_COEFFICIENT); } } }
mit
dachengxi/spring1.1.1_source
src/org/springframework/aop/target/AbstractPrototypeBasedTargetSource.java
5266
/* * Copyright 2002-2004 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.springframework.aop.target; import java.io.ObjectStreamException; import java.io.Serializable; import org.aopalliance.aop.AspectException; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.springframework.aop.TargetSource; import org.springframework.beans.BeansException; import org.springframework.beans.factory.BeanDefinitionStoreException; import org.springframework.beans.factory.BeanFactory; import org.springframework.beans.factory.BeanFactoryAware; import org.springframework.beans.factory.InitializingBean; import org.springframework.beans.factory.config.ConfigurableListableBeanFactory; /** * Base class for dynamic TargetSources that can create new prototype bean * instances to support a pooling or new-instance-per-invocation strategy. * * <p>Such TargetSources must run in a BeanFactory, as it needs to call the * getBean() method to create a new prototype instance. * * <p>PrototypeBasedTargetSources are serializable. This involves disconnecting * the current target and turning into a SingletonTargetSource. * * @author Rod Johnson * @author Juergen Hoeller * @see org.springframework.beans.factory.BeanFactory#getBean */ public abstract class AbstractPrototypeBasedTargetSource implements TargetSource, BeanFactoryAware, InitializingBean, Serializable { protected final Log logger = LogFactory.getLog(getClass()); /** Name of the target bean we will create on each invocation */ private String targetBeanName; /** * BeanFactory that owns this TargetSource. We need to hold onto this * reference so that we can create new prototype instances as necessary. */ private BeanFactory owningBeanFactory; /** Class of the target */ private Class targetClass; /** * Set the name of the target bean in the factory. This bean should be a * prototype, or the same instance will always be obtained from the * factory, resulting in the same behavior as the InvokerInterceptor. * @param targetBeanName name of the target bean in the BeanFactory * that owns this interceptor */ public void setTargetBeanName(String targetBeanName) { this.targetBeanName = targetBeanName; } /** * Return the name of the target bean in the factory. */ public String getTargetBeanName() { return this.targetBeanName; } /** * Set the owning BeanFactory. We need to save a reference so that we can * use the getBean() method on every invocation. */ public void setBeanFactory(BeanFactory beanFactory) throws BeansException { this.owningBeanFactory = beanFactory; // check whether the target bean is defined as prototype if (this.owningBeanFactory.isSingleton(this.targetBeanName)) { throw new BeanDefinitionStoreException( "Cannot use PrototypeTargetSource against a singleton bean: instances would not be independent"); } // determine type of the target bean if (beanFactory instanceof ConfigurableListableBeanFactory) { this.targetClass = ((ConfigurableListableBeanFactory) beanFactory).getBeanDefinition(this.targetBeanName).getBeanClass(); } else { if (logger.isInfoEnabled()) { logger.info("Getting bean with name '" + this.targetBeanName + "' to find class"); } this.targetClass = this.owningBeanFactory.getBean(this.targetBeanName).getClass(); } } public void afterPropertiesSet() { if (this.targetBeanName == null) { throw new IllegalStateException("targetBeanName is required"); } } public Class getTargetClass() { return this.targetClass; } public boolean isStatic() { return false; } /** * Subclasses should use this method to create a new prototype instance. */ protected Object newPrototypeInstance() { if (logger.isInfoEnabled()) { logger.info("Creating new target from bean '" + this.targetBeanName + "'"); } return this.owningBeanFactory.getBean(this.targetBeanName); } /** * Replaces this object with a SingletonTargetSource on serialization. * Protected as otherwise it won't be invoked for subclasses. * (The writeReplace() method must be visible to the class being serialized.) * <p>With this implementation of this method, there is no need to mark * non-serializable fields in this class or subclasses as transient. */ protected Object writeReplace() throws ObjectStreamException { if (logger.isDebugEnabled()) { logger.debug("Disconnecting TargetSource [" + this + "]"); } try { TargetSource disconnectedTargetSource = new SingletonTargetSource(getTarget()); return disconnectedTargetSource; } catch (Exception ex) { throw new AspectException("Can't get target", ex); } } }
mit
BePsvPT-Fork/framework
tests/Database/DatabaseConnectorTest.php
12126
<?php namespace Illuminate\Tests\Database; use PDO; use Mockery as m; use PHPUnit\Framework\TestCase; class DatabaseConnectorTest extends TestCase { public function tearDown() { m::close(); } public function testOptionResolution() { $connector = new \Illuminate\Database\Connectors\Connector; $connector->setDefaultOptions([0 => 'foo', 1 => 'bar']); $this->assertEquals([0 => 'baz', 1 => 'bar', 2 => 'boom'], $connector->getOptions(['options' => [0 => 'baz', 2 => 'boom']])); } /** * @dataProvider mySqlConnectProvider */ public function testMySqlConnectCallsCreateConnectionWithProperArguments($dsn, $config) { $connector = $this->getMockBuilder('Illuminate\Database\Connectors\MySqlConnector')->setMethods(['createConnection', 'getOptions'])->getMock(); $connection = m::mock('PDO'); $connector->expects($this->once())->method('getOptions')->with($this->equalTo($config))->will($this->returnValue(['options'])); $connector->expects($this->once())->method('createConnection')->with($this->equalTo($dsn), $this->equalTo($config), $this->equalTo(['options']))->will($this->returnValue($connection)); $connection->shouldReceive('prepare')->once()->with('set names \'utf8\' collate \'utf8_unicode_ci\'')->andReturn($connection); $connection->shouldReceive('execute')->once(); $connection->shouldReceive('exec')->zeroOrMoreTimes(); $result = $connector->connect($config); $this->assertSame($result, $connection); } public function mySqlConnectProvider() { return [ ['mysql:host=foo;dbname=bar', ['host' => 'foo', 'database' => 'bar', 'collation' => 'utf8_unicode_ci', 'charset' => 'utf8']], ['mysql:host=foo;port=111;dbname=bar', ['host' => 'foo', 'database' => 'bar', 'port' => 111, 'collation' => 'utf8_unicode_ci', 'charset' => 'utf8']], ['mysql:unix_socket=baz;dbname=bar', ['host' => 'foo', 'database' => 'bar', 'port' => 111, 'unix_socket' => 'baz', 'collation' => 'utf8_unicode_ci', 'charset' => 'utf8']], ]; } public function testPostgresConnectCallsCreateConnectionWithProperArguments() { $dsn = 'pgsql:host=foo;dbname=bar;port=111'; $config = ['host' => 'foo', 'database' => 'bar', 'port' => 111, 'charset' => 'utf8']; $connector = $this->getMockBuilder('Illuminate\Database\Connectors\PostgresConnector')->setMethods(['createConnection', 'getOptions'])->getMock(); $connection = m::mock('stdClass'); $connector->expects($this->once())->method('getOptions')->with($this->equalTo($config))->will($this->returnValue(['options'])); $connector->expects($this->once())->method('createConnection')->with($this->equalTo($dsn), $this->equalTo($config), $this->equalTo(['options']))->will($this->returnValue($connection)); $connection->shouldReceive('prepare')->once()->with('set names \'utf8\'')->andReturn($connection); $connection->shouldReceive('execute')->once(); $result = $connector->connect($config); $this->assertSame($result, $connection); } public function testPostgresSearchPathIsSet() { $dsn = 'pgsql:host=foo;dbname=bar'; $config = ['host' => 'foo', 'database' => 'bar', 'schema' => 'public', 'charset' => 'utf8']; $connector = $this->getMockBuilder('Illuminate\Database\Connectors\PostgresConnector')->setMethods(['createConnection', 'getOptions'])->getMock(); $connection = m::mock('stdClass'); $connector->expects($this->once())->method('getOptions')->with($this->equalTo($config))->will($this->returnValue(['options'])); $connector->expects($this->once())->method('createConnection')->with($this->equalTo($dsn), $this->equalTo($config), $this->equalTo(['options']))->will($this->returnValue($connection)); $connection->shouldReceive('prepare')->once()->with('set names \'utf8\'')->andReturn($connection); $connection->shouldReceive('prepare')->once()->with('set search_path to "public"')->andReturn($connection); $connection->shouldReceive('execute')->twice(); $result = $connector->connect($config); $this->assertSame($result, $connection); } public function testPostgresSearchPathArraySupported() { $dsn = 'pgsql:host=foo;dbname=bar'; $config = ['host' => 'foo', 'database' => 'bar', 'schema' => ['public', 'user'], 'charset' => 'utf8']; $connector = $this->getMockBuilder('Illuminate\Database\Connectors\PostgresConnector')->setMethods(['createConnection', 'getOptions'])->getMock(); $connection = m::mock('stdClass'); $connector->expects($this->once())->method('getOptions')->with($this->equalTo($config))->will($this->returnValue(['options'])); $connector->expects($this->once())->method('createConnection')->with($this->equalTo($dsn), $this->equalTo($config), $this->equalTo(['options']))->will($this->returnValue($connection)); $connection->shouldReceive('prepare')->once()->with('set names \'utf8\'')->andReturn($connection); $connection->shouldReceive('prepare')->once()->with('set search_path to "public", "user"')->andReturn($connection); $connection->shouldReceive('execute')->twice(); $result = $connector->connect($config); $this->assertSame($result, $connection); } public function testPostgresApplicationNameIsSet() { $dsn = 'pgsql:host=foo;dbname=bar'; $config = ['host' => 'foo', 'database' => 'bar', 'charset' => 'utf8', 'application_name' => 'Laravel App']; $connector = $this->getMockBuilder('Illuminate\Database\Connectors\PostgresConnector')->setMethods(['createConnection', 'getOptions'])->getMock(); $connection = m::mock('stdClass'); $connector->expects($this->once())->method('getOptions')->with($this->equalTo($config))->will($this->returnValue(['options'])); $connector->expects($this->once())->method('createConnection')->with($this->equalTo($dsn), $this->equalTo($config), $this->equalTo(['options']))->will($this->returnValue($connection)); $connection->shouldReceive('prepare')->once()->with('set names \'utf8\'')->andReturn($connection); $connection->shouldReceive('prepare')->once()->with('set application_name to \'Laravel App\'')->andReturn($connection); $connection->shouldReceive('execute')->twice(); $result = $connector->connect($config); $this->assertSame($result, $connection); } public function testSQLiteMemoryDatabasesMayBeConnectedTo() { $dsn = 'sqlite::memory:'; $config = ['database' => ':memory:']; $connector = $this->getMockBuilder('Illuminate\Database\Connectors\SQLiteConnector')->setMethods(['createConnection', 'getOptions'])->getMock(); $connection = m::mock('stdClass'); $connector->expects($this->once())->method('getOptions')->with($this->equalTo($config))->will($this->returnValue(['options'])); $connector->expects($this->once())->method('createConnection')->with($this->equalTo($dsn), $this->equalTo($config), $this->equalTo(['options']))->will($this->returnValue($connection)); $result = $connector->connect($config); $this->assertSame($result, $connection); } public function testSQLiteFileDatabasesMayBeConnectedTo() { $dsn = 'sqlite:'.__DIR__; $config = ['database' => __DIR__]; $connector = $this->getMockBuilder('Illuminate\Database\Connectors\SQLiteConnector')->setMethods(['createConnection', 'getOptions'])->getMock(); $connection = m::mock('stdClass'); $connector->expects($this->once())->method('getOptions')->with($this->equalTo($config))->will($this->returnValue(['options'])); $connector->expects($this->once())->method('createConnection')->with($this->equalTo($dsn), $this->equalTo($config), $this->equalTo(['options']))->will($this->returnValue($connection)); $result = $connector->connect($config); $this->assertSame($result, $connection); } public function testSqlServerConnectCallsCreateConnectionWithProperArguments() { $config = ['host' => 'foo', 'database' => 'bar', 'port' => 111]; $dsn = $this->getDsn($config); $connector = $this->getMockBuilder('Illuminate\Database\Connectors\SqlServerConnector')->setMethods(['createConnection', 'getOptions'])->getMock(); $connection = m::mock('stdClass'); $connector->expects($this->once())->method('getOptions')->with($this->equalTo($config))->will($this->returnValue(['options'])); $connector->expects($this->once())->method('createConnection')->with($this->equalTo($dsn), $this->equalTo($config), $this->equalTo(['options']))->will($this->returnValue($connection)); $result = $connector->connect($config); $this->assertSame($result, $connection); } public function testSqlServerConnectCallsCreateConnectionWithOptionalArguments() { $config = ['host' => 'foo', 'database' => 'bar', 'port' => 111, 'readonly' => true, 'charset' => 'utf-8', 'pooling' => false, 'appname' => 'baz']; $dsn = $this->getDsn($config); $connector = $this->getMockBuilder('Illuminate\Database\Connectors\SqlServerConnector')->setMethods(['createConnection', 'getOptions'])->getMock(); $connection = m::mock('stdClass'); $connector->expects($this->once())->method('getOptions')->with($this->equalTo($config))->will($this->returnValue(['options'])); $connector->expects($this->once())->method('createConnection')->with($this->equalTo($dsn), $this->equalTo($config), $this->equalTo(['options']))->will($this->returnValue($connection)); $result = $connector->connect($config); $this->assertSame($result, $connection); } public function testSqlServerConnectCallsCreateConnectionWithPreferredODBC() { if (! in_array('odbc', PDO::getAvailableDrivers())) { $this->markTestSkipped('PHP was compiled without PDO ODBC support.'); } $config = ['odbc' => true, 'odbc_datasource_name' => 'server=localhost;database=test;']; $dsn = $this->getDsn($config); $connector = $this->getMockBuilder('Illuminate\Database\Connectors\SqlServerConnector')->setMethods(['createConnection', 'getOptions'])->getMock(); $connection = m::mock('stdClass'); $connector->expects($this->once())->method('getOptions')->with($this->equalTo($config))->will($this->returnValue(['options'])); $connector->expects($this->once())->method('createConnection')->with($this->equalTo($dsn), $this->equalTo($config), $this->equalTo(['options']))->will($this->returnValue($connection)); $result = $connector->connect($config); $this->assertSame($result, $connection); } protected function getDsn(array $config) { extract($config, EXTR_SKIP); $availableDrivers = PDO::getAvailableDrivers(); if (in_array('odbc', $availableDrivers) && ($config['odbc'] ?? null) === true) { return isset($config['odbc_datasource_name']) ? 'odbc:'.$config['odbc_datasource_name'] : ''; } if (in_array('sqlsrv', $availableDrivers)) { $port = isset($config['port']) ? ','.$port : ''; $appname = isset($config['appname']) ? ';APP='.$config['appname'] : ''; $readonly = isset($config['readonly']) ? ';ApplicationIntent=ReadOnly' : ''; $pooling = (isset($config['pooling']) && $config['pooling'] == false) ? ';ConnectionPooling=0' : ''; return "sqlsrv:Server={$host}{$port};Database={$database}{$readonly}{$pooling}{$appname}"; } else { $port = isset($config['port']) ? ':'.$port : ''; $appname = isset($config['appname']) ? ';appname='.$config['appname'] : ''; $charset = isset($config['charset']) ? ';charset='.$config['charset'] : ''; return "dblib:host={$host}{$port};dbname={$database}{$charset}{$appname}"; } } }
mit
mwveliz/sitio
app/dev/cache/twig/03/0320da792ba87815ee6130528cd7068d73f59dc26aa9c360ee0b590ffef4f52f.php
5229
<?php /* :noticiainterna:edit.html.twig */ class __TwigTemplate_ec2d40d35f34890cd5cde6169313ba0a39f3592be3fc27ed3bcd9b40060dee65 extends Twig_Template { public function __construct(Twig_Environment $env) { parent::__construct($env); // line 1 $this->parent = $this->loadTemplate("base.html.twig", ":noticiainterna:edit.html.twig", 1); $this->blocks = array( 'body' => array($this, 'block_body'), ); } protected function doGetParent(array $context) { return "base.html.twig"; } protected function doDisplay(array $context, array $blocks = array()) { $__internal_c47c5e596e2b6463731f58462153187808952c0b7207e35884491455f2776a94 = $this->env->getExtension("native_profiler"); $__internal_c47c5e596e2b6463731f58462153187808952c0b7207e35884491455f2776a94->enter($__internal_c47c5e596e2b6463731f58462153187808952c0b7207e35884491455f2776a94_prof = new Twig_Profiler_Profile($this->getTemplateName(), "template", ":noticiainterna:edit.html.twig")); $this->parent->display($context, array_merge($this->blocks, $blocks)); $__internal_c47c5e596e2b6463731f58462153187808952c0b7207e35884491455f2776a94->leave($__internal_c47c5e596e2b6463731f58462153187808952c0b7207e35884491455f2776a94_prof); } // line 3 public function block_body($context, array $blocks = array()) { $__internal_1b2185fb28edeeaba7f5aec8edcbef688f1a9518d9eef5a1e9fc8117612d7618 = $this->env->getExtension("native_profiler"); $__internal_1b2185fb28edeeaba7f5aec8edcbef688f1a9518d9eef5a1e9fc8117612d7618->enter($__internal_1b2185fb28edeeaba7f5aec8edcbef688f1a9518d9eef5a1e9fc8117612d7618_prof = new Twig_Profiler_Profile($this->getTemplateName(), "block", "body")); // line 4 echo " <div class=\"row\"> <div class=\"col-xs-12 col-sm-12\tcol-md-12 col-lg-12 \"> <!-- BEGIN SAMPLE FORM PORTLET--> <div class=\"portlet light bordered\"> <div class=\"portlet-title\"> </div> <div class=\"portlet-body form\"> "; // line 11 echo $this->env->getExtension('form')->renderer->renderBlock((isset($context["edit_form"]) ? $context["edit_form"] : $this->getContext($context, "edit_form")), 'form_start'); echo $this->env->getExtension('stfalcon_tinymce')->tinymceInit(); echo " "; // line 12 echo $this->env->getExtension('form')->renderer->searchAndRenderBlock((isset($context["edit_form"]) ? $context["edit_form"] : $this->getContext($context, "edit_form")), 'widget'); echo " <button type=\"reset\" class=\"btn btn-primary\" id=\"clear\">Limpiar</button> <a href=\""; // line 14 echo $this->env->getExtension('routing')->getPath("noticiainterna_index"); echo "\" class=\"btn btn-danger\" role=\"button\">Cancelar</a> <button type=\"submit\" class=\"btn btn-success \">Enviar</button> "; // line 16 echo $this->env->getExtension('form')->renderer->renderBlock((isset($context["edit_form"]) ? $context["edit_form"] : $this->getContext($context, "edit_form")), 'form_end'); echo " <br> <div class=\"form-actions\"> </div> </div> </div> <!-- END SAMPLE FORM PORTLET--> </div> </div> "; $__internal_1b2185fb28edeeaba7f5aec8edcbef688f1a9518d9eef5a1e9fc8117612d7618->leave($__internal_1b2185fb28edeeaba7f5aec8edcbef688f1a9518d9eef5a1e9fc8117612d7618_prof); } public function getTemplateName() { return ":noticiainterna:edit.html.twig"; } public function isTraitable() { return false; } public function getDebugInfo() { return array ( 64 => 16, 59 => 14, 54 => 12, 49 => 11, 40 => 4, 34 => 3, 11 => 1,); } } /* {% extends 'base.html.twig' %}*/ /* */ /* {% block body %}*/ /* <div class="row">*/ /* <div class="col-xs-12 col-sm-12 col-md-12 col-lg-12 ">*/ /* <!-- BEGIN SAMPLE FORM PORTLET-->*/ /* <div class="portlet light bordered">*/ /* <div class="portlet-title">*/ /* </div>*/ /* <div class="portlet-body form">*/ /* {{ form_start(edit_form) }}{{ tinymce_init() }}*/ /* {{ form_widget(edit_form) }}*/ /* <button type="reset" class="btn btn-primary" id="clear">Limpiar</button>*/ /* <a href="{{ path('noticiainterna_index') }}" class="btn btn-danger" role="button">Cancelar</a>*/ /* <button type="submit" class="btn btn-success ">Enviar</button>*/ /* {{ form_end(edit_form) }}*/ /* <br>*/ /* <div class="form-actions">*/ /* </div>*/ /* </div>*/ /* </div>*/ /* <!-- END SAMPLE FORM PORTLET-->*/ /* </div>*/ /* </div>*/ /* */ /* {% endblock %}*/ /* */
mit
ocultcoin/ocultcoin
src/init.cpp
35648
// Copyright (c) 2009-2010 Satoshi Nakamoto // Copyright (c) 2009-2012 The Bitcoin developers // Distributed under the MIT/X11 software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include "txdb.h" #include "walletdb.h" #include "bitcoinrpc.h" #include "net.h" #include "init.h" #include "util.h" #include "ui_interface.h" #include "checkpoints.h" #include <boost/filesystem.hpp> #include <boost/filesystem/fstream.hpp> #include <boost/filesystem/convenience.hpp> #include <boost/interprocess/sync/file_lock.hpp> #include <boost/algorithm/string/predicate.hpp> #include <openssl/crypto.h> #ifndef WIN32 #include <signal.h> #endif using namespace std; using namespace boost; CWallet* pwalletMain; CClientUIInterface uiInterface; std::string strWalletFileName; bool fConfChange; bool fEnforceCanonical; unsigned int nNodeLifespan; unsigned int nDerivationMethodIndex; unsigned int nMinerSleep; bool fUseFastIndex; enum Checkpoints::CPMode CheckpointsMode; ////////////////////////////////////////////////////////////////////////////// // // Shutdown // void ExitTimeout(void* parg) { #ifdef WIN32 MilliSleep(5000); ExitProcess(0); #endif } void StartShutdown() { #ifdef QT_GUI // ensure we leave the Qt main loop for a clean GUI exit (Shutdown() is called in bitcoin.cpp afterwards) uiInterface.QueueShutdown(); #else // Without UI, Shutdown() can simply be started in a new thread NewThread(Shutdown, NULL); #endif } void Shutdown(void* parg) { static CCriticalSection cs_Shutdown; static bool fTaken; // Make this thread recognisable as the shutdown thread RenameThread("OcultCoin-shutoff"); bool fFirstThread = false; { TRY_LOCK(cs_Shutdown, lockShutdown); if (lockShutdown) { fFirstThread = !fTaken; fTaken = true; } } static bool fExit; if (fFirstThread) { fShutdown = true; nTransactionsUpdated++; // CTxDB().Close(); bitdb.Flush(false); StopNode(); bitdb.Flush(true); boost::filesystem::remove(GetPidFile()); UnregisterWallet(pwalletMain); delete pwalletMain; NewThread(ExitTimeout, NULL); MilliSleep(50); printf("OcultCoin exited\n\n"); fExit = true; #ifndef QT_GUI // ensure non-UI client gets exited here, but let Bitcoin-Qt reach 'return 0;' in bitcoin.cpp exit(0); #endif } else { while (!fExit) MilliSleep(500); MilliSleep(100); ExitThread(0); } } void HandleSIGTERM(int) { fRequestShutdown = true; } void HandleSIGHUP(int) { fReopenDebugLog = true; } ////////////////////////////////////////////////////////////////////////////// // // Start // #if !defined(QT_GUI) bool AppInit(int argc, char* argv[]) { bool fRet = false; try { // // Parameters // // If Qt is used, parameters/bitcoin.conf are parsed in qt/bitcoin.cpp's main() ParseParameters(argc, argv); if (!boost::filesystem::is_directory(GetDataDir(false))) { fprintf(stderr, "Error: Specified directory does not exist\n"); Shutdown(NULL); } ReadConfigFile(mapArgs, mapMultiArgs); if (mapArgs.count("-?") || mapArgs.count("--help")) { // First part of help message is specific to bitcoind / RPC client std::string strUsage = _("OcultCoin version") + " " + FormatFullVersion() + "\n\n" + _("Usage:") + "\n" + " OcultCoind [options] " + "\n" + " OcultCoind [options] <command> [params] " + _("Send command to -server or OcultCoind") + "\n" + " OcultCoind [options] help " + _("List commands") + "\n" + " OcultCoind [options] help <command> " + _("Get help for a command") + "\n"; strUsage += "\n" + HelpMessage(); fprintf(stdout, "%s", strUsage.c_str()); return false; } // Command-line RPC for (int i = 1; i < argc; i++) if (!IsSwitchChar(argv[i][0]) && !boost::algorithm::istarts_with(argv[i], "OcultCoin:")) fCommandLine = true; if (fCommandLine) { int ret = CommandLineRPC(argc, argv); exit(ret); } fRet = AppInit2(); } catch (std::exception& e) { PrintException(&e, "AppInit()"); } catch (...) { PrintException(NULL, "AppInit()"); } if (!fRet) Shutdown(NULL); return fRet; } extern void noui_connect(); int main(int argc, char* argv[]) { bool fRet = false; // Connect bitcoind signal handlers noui_connect(); fRet = AppInit(argc, argv); if (fRet && fDaemon) return 0; return 1; } #endif bool static InitError(const std::string &str) { uiInterface.ThreadSafeMessageBox(str, _("OcultCoin"), CClientUIInterface::OK | CClientUIInterface::MODAL); return false; } bool static InitWarning(const std::string &str) { uiInterface.ThreadSafeMessageBox(str, _("OcultCoin"), CClientUIInterface::OK | CClientUIInterface::ICON_EXCLAMATION | CClientUIInterface::MODAL); return true; } bool static Bind(const CService &addr, bool fError = true) { if (IsLimited(addr)) return false; std::string strError; if (!BindListenPort(addr, strError)) { if (fError) return InitError(strError); return false; } return true; } // Core-specific options shared between UI and daemon std::string HelpMessage() { string strUsage = _("Options:") + "\n" + " -? " + _("This help message") + "\n" + " -conf=<file> " + _("Specify configuration file (default: OcultCoin.conf)") + "\n" + " -pid=<file> " + _("Specify pid file (default: OcultCoind.pid)") + "\n" + " -datadir=<dir> " + _("Specify data directory") + "\n" + " -wallet=<dir> " + _("Specify wallet file (within data directory)") + "\n" + " -dbcache=<n> " + _("Set database cache size in megabytes (default: 25)") + "\n" + " -dblogsize=<n> " + _("Set database disk log size in megabytes (default: 100)") + "\n" + " -timeout=<n> " + _("Specify connection timeout in milliseconds (default: 5000)") + "\n" + " -proxy=<ip:port> " + _("Connect through socks proxy") + "\n" + " -socks=<n> " + _("Select the version of socks proxy to use (4-5, default: 5)") + "\n" + " -tor=<ip:port> " + _("Use proxy to reach tor hidden services (default: same as -proxy)") + "\n" " -dns " + _("Allow DNS lookups for -addnode, -seednode and -connect") + "\n" + " -port=<port> " + _("Listen for connections on <port> (default: 14973 or testnet: 14972)") + "\n" + " -maxconnections=<n> " + _("Maintain at most <n> connections to peers (default: 125)") + "\n" + " -addnode=<ip> " + _("Add a node to connect to and attempt to keep the connection open") + "\n" + " -connect=<ip> " + _("Connect only to the specified node(s)") + "\n" + " -seednode=<ip> " + _("Connect to a node to retrieve peer addresses, and disconnect") + "\n" + " -externalip=<ip> " + _("Specify your own public address") + "\n" + " -onlynet=<net> " + _("Only connect to nodes in network <net> (IPv4, IPv6 or Tor)") + "\n" + " -discover " + _("Discover own IP address (default: 1 when listening and no -externalip)") + "\n" + " -irc " + _("Find peers using internet relay chat (default: 0 (off)") + "\n" + " -listen " + _("Accept connections from outside (default: 1 if no -proxy or -connect)") + "\n" + " -bind=<addr> " + _("Bind to given address. Use [host]:port notation for IPv6") + "\n" + " -dnsseed " + _("Find peers using DNS lookup (default: 1)") + "\n" + " -staking " + _("Stake your coins to support network and gain reward (default: 1)") + "\n" + " -synctime " + _("Sync time with other nodes. Disable if time on your system is precise e.g. syncing with NTP (default: 1)") + "\n" + " -cppolicy " + _("Sync checkpoints policy (default: strict)") + "\n" + " -banscore=<n> " + _("Threshold for disconnecting misbehaving peers (default: 100)") + "\n" + " -bantime=<n> " + _("Number of seconds to keep misbehaving peers from reconnecting (default: 86400)") + "\n" + " -maxreceivebuffer=<n> " + _("Maximum per-connection receive buffer, <n>*1000 bytes (default: 5000)") + "\n" + " -maxsendbuffer=<n> " + _("Maximum per-connection send buffer, <n>*1000 bytes (default: 1000)") + "\n" + #ifdef USE_UPNP #if USE_UPNP " -upnp " + _("Use UPnP to map the listening port (default: 1 when listening)") + "\n" + #else " -upnp " + _("Use UPnP to map the listening port (default: 0)") + "\n" + #endif #endif " -detachdb " + _("Detach block and address databases. Increases shutdown time (default: 0)") + "\n" + " -paytxfee=<amt> " + _("Fee per KB to add to transactions you send") + "\n" + " -mininput=<amt> " + _("When creating transactions, ignore inputs with value less than this (default: 0.01)") + "\n" + #ifdef QT_GUI " -server " + _("Accept command line and JSON-RPC commands") + "\n" + #endif #if !defined(WIN32) && !defined(QT_GUI) " -daemon " + _("Run in the background as a daemon and accept commands") + "\n" + #endif " -testnet " + _("Use the test network") + "\n" + " -debug " + _("Output extra debugging information. Implies all other -debug* options") + "\n" + " -debugnet " + _("Output extra network debugging information") + "\n" + " -logtimestamps " + _("Prepend debug output with timestamp") + "\n" + " -shrinkdebugfile " + _("Shrink debug.log file on client startup (default: 1 when no -debug)") + "\n" + " -printtoconsole " + _("Send trace/debug info to console instead of debug.log file") + "\n" + #ifdef WIN32 " -printtodebugger " + _("Send trace/debug info to debugger") + "\n" + #endif " -rpcuser=<user> " + _("Username for JSON-RPC connections") + "\n" + " -rpcpassword=<pw> " + _("Password for JSON-RPC connections") + "\n" + " -rpcport=<port> " + _("Listen for JSON-RPC connections on <port> (default: 24973 or testnet: 24972)") + "\n" + " -rpcallowip=<ip> " + _("Allow JSON-RPC connections from specified IP address") + "\n" + " -rpcconnect=<ip> " + _("Send commands to node running on <ip> (default: 127.0.0.1)") + "\n" + " -blocknotify=<cmd> " + _("Execute command when the best block changes (%s in cmd is replaced by block hash)") + "\n" + " -walletnotify=<cmd> " + _("Execute command when a wallet transaction changes (%s in cmd is replaced by TxID)") + "\n" + " -confchange " + _("Require a confirmations for change (default: 0)") + "\n" + " -enforcecanonical " + _("Enforce transaction scripts to use canonical PUSH operators (default: 1)") + "\n" + " -alertnotify=<cmd> " + _("Execute command when a relevant alert is received (%s in cmd is replaced by message)") + "\n" + " -upgradewallet " + _("Upgrade wallet to latest format") + "\n" + " -keypool=<n> " + _("Set key pool size to <n> (default: 100)") + "\n" + " -rescan " + _("Rescan the block chain for missing wallet transactions") + "\n" + " -salvagewallet " + _("Attempt to recover private keys from a corrupt wallet.dat") + "\n" + " -checkblocks=<n> " + _("How many blocks to check at startup (default: 2500, 0 = all)") + "\n" + " -checklevel=<n> " + _("How thorough the block verification is (0-6, default: 1)") + "\n" + " -loadblock=<file> " + _("Imports blocks from external blk000?.dat file") + "\n" + "\n" + _("Block creation options:") + "\n" + " -blockminsize=<n> " + _("Set minimum block size in bytes (default: 0)") + "\n" + " -blockmaxsize=<n> " + _("Set maximum block size in bytes (default: 250000)") + "\n" + " -blockprioritysize=<n> " + _("Set maximum size of high-priority/low-fee transactions in bytes (default: 27000)") + "\n" + "\n" + _("SSL options: (see the Bitcoin Wiki for SSL setup instructions)") + "\n" + " -rpcssl " + _("Use OpenSSL (https) for JSON-RPC connections") + "\n" + " -rpcsslcertificatechainfile=<file.cert> " + _("Server certificate file (default: server.cert)") + "\n" + " -rpcsslprivatekeyfile=<file.pem> " + _("Server private key (default: server.pem)") + "\n" + " -rpcsslciphers=<ciphers> " + _("Acceptable ciphers (default: TLSv1+HIGH:!SSLv2:!aNULL:!eNULL:!AH:!3DES:@STRENGTH)") + "\n"; return strUsage; } /** Initialize bitcoin. * @pre Parameters should be parsed and config file should be read. */ bool AppInit2() { // ********************************************************* Step 1: setup #ifdef _MSC_VER // Turn off Microsoft heap dump noise _CrtSetReportMode(_CRT_WARN, _CRTDBG_MODE_FILE); _CrtSetReportFile(_CRT_WARN, CreateFileA("NUL", GENERIC_WRITE, 0, NULL, OPEN_EXISTING, 0, 0)); #endif #if _MSC_VER >= 1400 // Disable confusing "helpful" text message on abort, Ctrl-C _set_abort_behavior(0, _WRITE_ABORT_MSG | _CALL_REPORTFAULT); #endif #ifdef WIN32 // Enable Data Execution Prevention (DEP) // Minimum supported OS versions: WinXP SP3, WinVista >= SP1, Win Server 2008 // A failure is non-critical and needs no further attention! #ifndef PROCESS_DEP_ENABLE // We define this here, because GCCs winbase.h limits this to _WIN32_WINNT >= 0x0601 (Windows 7), // which is not correct. Can be removed, when GCCs winbase.h is fixed! #define PROCESS_DEP_ENABLE 0x00000001 #endif typedef BOOL (WINAPI *PSETPROCDEPPOL)(DWORD); PSETPROCDEPPOL setProcDEPPol = (PSETPROCDEPPOL)GetProcAddress(GetModuleHandleA("Kernel32.dll"), "SetProcessDEPPolicy"); if (setProcDEPPol != NULL) setProcDEPPol(PROCESS_DEP_ENABLE); #endif #ifndef WIN32 umask(077); // Clean shutdown on SIGTERM struct sigaction sa; sa.sa_handler = HandleSIGTERM; sigemptyset(&sa.sa_mask); sa.sa_flags = 0; sigaction(SIGTERM, &sa, NULL); sigaction(SIGINT, &sa, NULL); // Reopen debug.log on SIGHUP struct sigaction sa_hup; sa_hup.sa_handler = HandleSIGHUP; sigemptyset(&sa_hup.sa_mask); sa_hup.sa_flags = 0; sigaction(SIGHUP, &sa_hup, NULL); #endif // ********************************************************* Step 2: parameter interactions nNodeLifespan = GetArg("-addrlifespan", 7); fUseFastIndex = GetBoolArg("-fastindex", true); nMinerSleep = GetArg("-minersleep", 500); CheckpointsMode = Checkpoints::STRICT; std::string strCpMode = GetArg("-cppolicy", "strict"); if(strCpMode == "strict") CheckpointsMode = Checkpoints::STRICT; if(strCpMode == "advisory") CheckpointsMode = Checkpoints::ADVISORY; if(strCpMode == "permissive") CheckpointsMode = Checkpoints::PERMISSIVE; nDerivationMethodIndex = 0; fTestNet = GetBoolArg("-testnet"); //fTestNet = true; if (fTestNet) { SoftSetBoolArg("-irc", true); } if (mapArgs.count("-bind")) { // when specifying an explicit binding address, you want to listen on it // even when -connect or -proxy is specified SoftSetBoolArg("-listen", true); } if (mapArgs.count("-connect") && mapMultiArgs["-connect"].size() > 0) { // when only connecting to trusted nodes, do not seed via DNS, or listen by default SoftSetBoolArg("-dnsseed", false); SoftSetBoolArg("-listen", false); } if (mapArgs.count("-proxy")) { // to protect privacy, do not listen by default if a proxy server is specified SoftSetBoolArg("-listen", false); } if (!GetBoolArg("-listen", true)) { // do not map ports or try to retrieve public IP when not listening (pointless) SoftSetBoolArg("-upnp", false); SoftSetBoolArg("-discover", false); } if (mapArgs.count("-externalip")) { // if an explicit public IP is specified, do not try to find others SoftSetBoolArg("-discover", false); } if (GetBoolArg("-salvagewallet")) { // Rewrite just private keys: rescan to find transactions SoftSetBoolArg("-rescan", true); } // ********************************************************* Step 3: parameter-to-internal-flags fDebug = GetBoolArg("-debug"); // -debug implies fDebug* if (fDebug) fDebugNet = true; else fDebugNet = GetBoolArg("-debugnet"); bitdb.SetDetach(GetBoolArg("-detachdb", false)); #if !defined(WIN32) && !defined(QT_GUI) fDaemon = GetBoolArg("-daemon"); #else fDaemon = false; #endif if (fDaemon) fServer = true; else fServer = GetBoolArg("-server"); /* force fServer when running without GUI */ #if !defined(QT_GUI) fServer = true; #endif fPrintToConsole = GetBoolArg("-printtoconsole"); fPrintToDebugger = GetBoolArg("-printtodebugger"); fLogTimestamps = GetBoolArg("-logtimestamps"); if (mapArgs.count("-timeout")) { int nNewTimeout = GetArg("-timeout", 5000); if (nNewTimeout > 0 && nNewTimeout < 600000) nConnectTimeout = nNewTimeout; } if (mapArgs.count("-paytxfee")) { if (!ParseMoney(mapArgs["-paytxfee"], nTransactionFee)) return InitError(strprintf(_("Invalid amount for -paytxfee=<amount>: '%s'"), mapArgs["-paytxfee"].c_str())); if (nTransactionFee > 0.25 * COIN) InitWarning(_("Warning: -paytxfee is set very high! This is the transaction fee you will pay if you send a transaction.")); } fConfChange = GetBoolArg("-confchange", false); fEnforceCanonical = GetBoolArg("-enforcecanonical", true); if (mapArgs.count("-mininput")) { if (!ParseMoney(mapArgs["-mininput"], nMinimumInputValue)) return InitError(strprintf(_("Invalid amount for -mininput=<amount>: '%s'"), mapArgs["-mininput"].c_str())); } // ********************************************************* Step 4: application initialization: dir lock, daemonize, pidfile, debug log std::string strDataDir = GetDataDir().string(); std::string strWalletFileName = GetArg("-wallet", "wallet.dat"); // strWalletFileName must be a plain filename without a directory if (strWalletFileName != boost::filesystem::basename(strWalletFileName) + boost::filesystem::extension(strWalletFileName)) return InitError(strprintf(_("Wallet %s resides outside data directory %s."), strWalletFileName.c_str(), strDataDir.c_str())); // Make sure only a single Bitcoin process is using the data directory. boost::filesystem::path pathLockFile = GetDataDir() / ".lock"; FILE* file = fopen(pathLockFile.string().c_str(), "a"); // empty lock file; created if it doesn't exist. if (file) fclose(file); static boost::interprocess::file_lock lock(pathLockFile.string().c_str()); if (!lock.try_lock()) return InitError(strprintf(_("Cannot obtain a lock on data directory %s. OcultCoin is probably already running."), strDataDir.c_str())); #if !defined(WIN32) && !defined(QT_GUI) if (fDaemon) { // Daemonize pid_t pid = fork(); if (pid < 0) { fprintf(stderr, "Error: fork() returned %d errno %d\n", pid, errno); return false; } if (pid > 0) { CreatePidFile(GetPidFile(), pid); return true; } pid_t sid = setsid(); if (sid < 0) fprintf(stderr, "Error: setsid() returned %d errno %d\n", sid, errno); } #endif if (GetBoolArg("-shrinkdebugfile", !fDebug)) ShrinkDebugFile(); printf("\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n"); printf("OcultCoin version %s (%s)\n", FormatFullVersion().c_str(), CLIENT_DATE.c_str()); printf("Using OpenSSL version %s\n", SSLeay_version(SSLEAY_VERSION)); if (!fLogTimestamps) printf("Startup time: %s\n", DateTimeStrFormat("%x %H:%M:%S", GetTime()).c_str()); printf("Default data directory %s\n", GetDefaultDataDir().string().c_str()); printf("Used data directory %s\n", strDataDir.c_str()); std::ostringstream strErrors; if (fDaemon) fprintf(stdout, "OcultCoin server starting\n"); int64_t nStart; // ********************************************************* Step 5: verify database integrity uiInterface.InitMessage(_("Verifying database integrity...")); if (!bitdb.Open(GetDataDir())) { string msg = strprintf(_("Error initializing database environment %s!" " To recover, BACKUP THAT DIRECTORY, then remove" " everything from it except for wallet.dat."), strDataDir.c_str()); return InitError(msg); } if (GetBoolArg("-salvagewallet")) { // Recover readable keypairs: if (!CWalletDB::Recover(bitdb, strWalletFileName, true)) return false; } if (filesystem::exists(GetDataDir() / strWalletFileName)) { CDBEnv::VerifyResult r = bitdb.Verify(strWalletFileName, CWalletDB::Recover); if (r == CDBEnv::RECOVER_OK) { string msg = strprintf(_("Warning: wallet.dat corrupt, data salvaged!" " Original wallet.dat saved as wallet.{timestamp}.bak in %s; if" " your balance or transactions are incorrect you should" " restore from a backup."), strDataDir.c_str()); uiInterface.ThreadSafeMessageBox(msg, _("OcultCoin"), CClientUIInterface::OK | CClientUIInterface::ICON_EXCLAMATION | CClientUIInterface::MODAL); } if (r == CDBEnv::RECOVER_FAIL) return InitError(_("wallet.dat corrupt, salvage failed")); } // ********************************************************* Step 6: network initialization int nSocksVersion = GetArg("-socks", 5); if (nSocksVersion != 4 && nSocksVersion != 5) return InitError(strprintf(_("Unknown -socks proxy version requested: %i"), nSocksVersion)); if (mapArgs.count("-onlynet")) { std::set<enum Network> nets; BOOST_FOREACH(std::string snet, mapMultiArgs["-onlynet"]) { enum Network net = ParseNetwork(snet); if (net == NET_UNROUTABLE) return InitError(strprintf(_("Unknown network specified in -onlynet: '%s'"), snet.c_str())); nets.insert(net); } for (int n = 0; n < NET_MAX; n++) { enum Network net = (enum Network)n; if (!nets.count(net)) SetLimited(net); } } #if defined(USE_IPV6) #if ! USE_IPV6 else SetLimited(NET_IPV6); #endif #endif CService addrProxy; bool fProxy = false; if (mapArgs.count("-proxy")) { addrProxy = CService(mapArgs["-proxy"], 9050); if (!addrProxy.IsValid()) return InitError(strprintf(_("Invalid -proxy address: '%s'"), mapArgs["-proxy"].c_str())); if (!IsLimited(NET_IPV4)) SetProxy(NET_IPV4, addrProxy, nSocksVersion); if (nSocksVersion > 4) { #ifdef USE_IPV6 if (!IsLimited(NET_IPV6)) SetProxy(NET_IPV6, addrProxy, nSocksVersion); #endif SetNameProxy(addrProxy, nSocksVersion); } fProxy = true; } // -tor can override normal proxy, -notor disables tor entirely if (!(mapArgs.count("-tor") && mapArgs["-tor"] == "0") && (fProxy || mapArgs.count("-tor"))) { CService addrOnion; if (!mapArgs.count("-tor")) addrOnion = addrProxy; else addrOnion = CService(mapArgs["-tor"], 9050); if (!addrOnion.IsValid()) return InitError(strprintf(_("Invalid -tor address: '%s'"), mapArgs["-tor"].c_str())); SetProxy(NET_TOR, addrOnion, 5); SetReachable(NET_TOR); } // see Step 2: parameter interactions for more information about these fNoListen = !GetBoolArg("-listen", true); fDiscover = GetBoolArg("-discover", true); fNameLookup = GetBoolArg("-dns", true); #ifdef USE_UPNP fUseUPnP = GetBoolArg("-upnp", USE_UPNP); #endif bool fBound = false; if (!fNoListen) { std::string strError; if (mapArgs.count("-bind")) { BOOST_FOREACH(std::string strBind, mapMultiArgs["-bind"]) { CService addrBind; if (!Lookup(strBind.c_str(), addrBind, GetListenPort(), false)) return InitError(strprintf(_("Cannot resolve -bind address: '%s'"), strBind.c_str())); fBound |= Bind(addrBind); } } else { struct in_addr inaddr_any; inaddr_any.s_addr = INADDR_ANY; #ifdef USE_IPV6 if (!IsLimited(NET_IPV6)) fBound |= Bind(CService(in6addr_any, GetListenPort()), false); #endif if (!IsLimited(NET_IPV4)) fBound |= Bind(CService(inaddr_any, GetListenPort()), !fBound); } if (!fBound) return InitError(_("Failed to listen on any port. Use -listen=0 if you want this.")); } if (mapArgs.count("-externalip")) { BOOST_FOREACH(string strAddr, mapMultiArgs["-externalip"]) { CService addrLocal(strAddr, GetListenPort(), fNameLookup); if (!addrLocal.IsValid()) return InitError(strprintf(_("Cannot resolve -externalip address: '%s'"), strAddr.c_str())); AddLocal(CService(strAddr, GetListenPort(), fNameLookup), LOCAL_MANUAL); } } if (mapArgs.count("-reservebalance")) // OcultCoin: reserve balance amount { if (!ParseMoney(mapArgs["-reservebalance"], nReserveBalance)) { InitError(_("Invalid amount for -reservebalance=<amount>")); return false; } } if (mapArgs.count("-checkpointkey")) // OcultCoin: checkpoint master priv key { if (!Checkpoints::SetCheckpointPrivKey(GetArg("-checkpointkey", ""))) InitError(_("Unable to sign checkpoint, wrong checkpointkey?\n")); } BOOST_FOREACH(string strDest, mapMultiArgs["-seednode"]) AddOneShot(strDest); // ********************************************************* Step 7: load blockchain if (!bitdb.Open(GetDataDir())) { string msg = strprintf(_("Error initializing database environment %s!" " To recover, BACKUP THAT DIRECTORY, then remove" " everything from it except for wallet.dat."), strDataDir.c_str()); return InitError(msg); } if (GetBoolArg("-loadblockindextest")) { CTxDB txdb("r"); txdb.LoadBlockIndex(); PrintBlockTree(); return false; } uiInterface.InitMessage(_("Loading block index...")); printf("Loading block index...\n"); nStart = GetTimeMillis(); if (!LoadBlockIndex()) return InitError(_("Error loading blkindex.dat")); // as LoadBlockIndex can take several minutes, it's possible the user // requested to kill bitcoin-qt during the last operation. If so, exit. // As the program has not fully started yet, Shutdown() is possibly overkill. if (fRequestShutdown) { printf("Shutdown requested. Exiting.\n"); return false; } printf(" block index %15"PRId64"ms\n", GetTimeMillis() - nStart); if (GetBoolArg("-printblockindex") || GetBoolArg("-printblocktree")) { PrintBlockTree(); return false; } if (mapArgs.count("-printblock")) { string strMatch = mapArgs["-printblock"]; int nFound = 0; for (map<uint256, CBlockIndex*>::iterator mi = mapBlockIndex.begin(); mi != mapBlockIndex.end(); ++mi) { uint256 hash = (*mi).first; if (strncmp(hash.ToString().c_str(), strMatch.c_str(), strMatch.size()) == 0) { CBlockIndex* pindex = (*mi).second; CBlock block; block.ReadFromDisk(pindex); block.BuildMerkleTree(); block.print(); printf("\n"); nFound++; } } if (nFound == 0) printf("No blocks matching %s were found\n", strMatch.c_str()); return false; } // ********************************************************* Step 8: load wallet uiInterface.InitMessage(_("Loading wallet...")); printf("Loading wallet...\n"); nStart = GetTimeMillis(); bool fFirstRun = true; pwalletMain = new CWallet(strWalletFileName); DBErrors nLoadWalletRet = pwalletMain->LoadWallet(fFirstRun); if (nLoadWalletRet != DB_LOAD_OK) { if (nLoadWalletRet == DB_CORRUPT) strErrors << _("Error loading wallet.dat: Wallet corrupted") << "\n"; else if (nLoadWalletRet == DB_NONCRITICAL_ERROR) { string msg(_("Warning: error reading wallet.dat! All keys read correctly, but transaction data" " or address book entries might be missing or incorrect.")); uiInterface.ThreadSafeMessageBox(msg, _("OcultCoin"), CClientUIInterface::OK | CClientUIInterface::ICON_EXCLAMATION | CClientUIInterface::MODAL); } else if (nLoadWalletRet == DB_TOO_NEW) strErrors << _("Error loading wallet.dat: Wallet requires newer version of OcultCoin") << "\n"; else if (nLoadWalletRet == DB_NEED_REWRITE) { strErrors << _("Wallet needed to be rewritten: restart OcultCoin to complete") << "\n"; printf("%s", strErrors.str().c_str()); return InitError(strErrors.str()); } else strErrors << _("Error loading wallet.dat") << "\n"; } if (GetBoolArg("-upgradewallet", fFirstRun)) { int nMaxVersion = GetArg("-upgradewallet", 0); if (nMaxVersion == 0) // the -upgradewallet without argument case { printf("Performing wallet upgrade to %i\n", FEATURE_LATEST); nMaxVersion = CLIENT_VERSION; pwalletMain->SetMinVersion(FEATURE_LATEST); // permanently upgrade the wallet immediately } else printf("Allowing wallet upgrade up to %i\n", nMaxVersion); if (nMaxVersion < pwalletMain->GetVersion()) strErrors << _("Cannot downgrade wallet") << "\n"; pwalletMain->SetMaxVersion(nMaxVersion); } if (fFirstRun) { // Create new keyUser and set as default key RandAddSeedPerfmon(); CPubKey newDefaultKey; if (!pwalletMain->GetKeyFromPool(newDefaultKey, false)) strErrors << _("Cannot initialize keypool") << "\n"; pwalletMain->SetDefaultKey(newDefaultKey); if (!pwalletMain->SetAddressBookName(pwalletMain->vchDefaultKey.GetID(), "")) strErrors << _("Cannot write default address") << "\n"; } printf("%s", strErrors.str().c_str()); printf(" wallet %15"PRId64"ms\n", GetTimeMillis() - nStart); RegisterWallet(pwalletMain); CBlockIndex *pindexRescan = pindexBest; if (GetBoolArg("-rescan")) pindexRescan = pindexGenesisBlock; else { CWalletDB walletdb(strWalletFileName); CBlockLocator locator; if (walletdb.ReadBestBlock(locator)) pindexRescan = locator.GetBlockIndex(); } if (pindexBest != pindexRescan && pindexBest && pindexRescan && pindexBest->nHeight > pindexRescan->nHeight) { uiInterface.InitMessage(_("Rescanning...")); printf("Rescanning last %i blocks (from block %i)...\n", pindexBest->nHeight - pindexRescan->nHeight, pindexRescan->nHeight); nStart = GetTimeMillis(); pwalletMain->ScanForWalletTransactions(pindexRescan, true); printf(" rescan %15"PRId64"ms\n", GetTimeMillis() - nStart); } // ********************************************************* Step 9: import blocks if (mapArgs.count("-loadblock")) { uiInterface.InitMessage(_("Importing blockchain data file.")); BOOST_FOREACH(string strFile, mapMultiArgs["-loadblock"]) { FILE *file = fopen(strFile.c_str(), "rb"); if (file) LoadExternalBlockFile(file); } exit(0); } filesystem::path pathBootstrap = GetDataDir() / "bootstrap.dat"; if (filesystem::exists(pathBootstrap)) { uiInterface.InitMessage(_("Importing bootstrap blockchain data file.")); FILE *file = fopen(pathBootstrap.string().c_str(), "rb"); if (file) { filesystem::path pathBootstrapOld = GetDataDir() / "bootstrap.dat.old"; LoadExternalBlockFile(file); RenameOver(pathBootstrap, pathBootstrapOld); } } // ********************************************************* Step 10: load peers uiInterface.InitMessage(_("Loading addresses...")); printf("Loading addresses...\n"); nStart = GetTimeMillis(); { CAddrDB adb; if (!adb.Read(addrman)) printf("Invalid or missing peers.dat; recreating\n"); } printf("Loaded %i addresses from peers.dat %"PRId64"ms\n", addrman.size(), GetTimeMillis() - nStart); // ********************************************************* Step 11: start node if (!CheckDiskSpace()) return false; RandAddSeedPerfmon(); //// debug print printf("mapBlockIndex.size() = %"PRIszu"\n", mapBlockIndex.size()); printf("nBestHeight = %d\n", nBestHeight); printf("setKeyPool.size() = %"PRIszu"\n", pwalletMain->setKeyPool.size()); printf("mapWallet.size() = %"PRIszu"\n", pwalletMain->mapWallet.size()); printf("mapAddressBook.size() = %"PRIszu"\n", pwalletMain->mapAddressBook.size()); if (!NewThread(StartNode, NULL)) InitError(_("Error: could not start node")); if (fServer) NewThread(ThreadRPCServer, NULL); // ********************************************************* Step 12: finished uiInterface.InitMessage(_("Done loading")); printf("Done loading\n"); if (!strErrors.str().empty()) return InitError(strErrors.str()); // Add wallet transactions that aren't already in a block to mapTransactions pwalletMain->ReacceptWalletTransactions(); #if !defined(QT_GUI) // Loop until process is exit()ed from shutdown() function, // called from ThreadRPCServer thread when a "stop" command is received. while (1) MilliSleep(5000); #endif return true; }
mit
lenin-anzen/bb-widgets
collection-myportal/src/widget-myportal-newpassword-ng/scripts/controller.js
1640
/** * @module widget-myportal-newpassword-ng * @name NewpasswordController * * @description * New password */ import { E_AUTH, E_CONNECTIVITY } from "lib-bb-model-errors"; const errorMessage = (code) => ({ [E_AUTH]: "error.load.auth", [E_CONNECTIVITY]: "error.load.connectivity", }[code] || "error.load.unexpected"); export default function NewpasswordController(bus, hooks, widget, model) { const $ctrl = this; /** * AngularJS Lifecycle hook used to initialize the controller * * @name NewpasswordController#$onInit * @returns {void} */ const $onInit = () => { $ctrl.isLoading = true; model.load() .then(loaded => { $ctrl.items = hooks.itemsFromModel(loaded); }) .catch(error => { $ctrl.error = errorMessage(error.code); bus.publish("widget-myportal-newpassword-ng.load.failed", { error }); }) .then(() => { $ctrl.isLoading = false; }); bus.publish("cxp.item.loaded", { id: widget.getId(), }); }; Object.assign($ctrl, { $onInit, /** * @description * The value returned from {@link Hooks.processItems} hook. * null if the items aren"t loaded. * * @name NewpasswordController#items * @type {any} */ items: null, /** * @description * Loading status * * @name NewpasswordController#isLoading * @type {boolean} */ isLoading: false, /** * @description * The error encountered when attempting to fetch from the model * * @name NewpasswordController#error * @type {ModelError} */ error: null, }); }
mit
heyfaraday/rustcmb
src/fourier/fft_2d/mod.rs
31
pub mod sphere; pub mod torus;
mit
opgginc/php-riotapi-request
src/RequestMethod/LolStaticData/ProfileIcons.php
1179
<?php /** * Created by PhpStorm. * User: kargnas * Date: 2017-07-04 * Time: 19:11 */ namespace RiotQuest\RequestMethod\LolStaticData; use RiotQuest\Constant\EndPoint; use RiotQuest\Dto\LolStaticData\ProfileIcon\ProfileIconDataDto; use RiotQuest\RequestMethod\Request; use RiotQuest\RequestMethod\RequestMethodAbstract; use GuzzleHttp\Psr7\Response; use JsonMapper; class ProfileIcons extends RequestMethodAbstract { public $path = EndPoint::LOL_STATIC_DATA__PROFILE_ICONS; /** @var string */ public $locale, $version; public function getRequest() { $uri = $this->platform->apiScheme . "://" . $this->platform->apiHost . "" . $this->path; $query = static::buildParams([ 'locale' => $this->locale, 'version' => $this->version ]); if (strlen($query) > 0) { $uri .= "?{$query}"; } return $this->getPsr7Request('GET', $uri); } public function mapping(Response $response) { $json = \GuzzleHttp\json_decode($response->getBody()); $mapper = new JsonMapper(); return $mapper->map($json, new ProfileIconDataDto()); } }
mit
damiankaminski/HandyQuery
src/Language/HandyQuery.Language.Tests/Lexing/SearchTrieTests.cs
9922
using System.Collections.Generic; using System.Linq; using FluentAssertions; using HandyQuery.Language.Lexing; using NUnit.Framework; namespace HandyQuery.Language.Tests.Lexing { public class SearchTrieTests { [TestCaseSource(nameof(TestCases))] [TestCaseSource(nameof(TestCasesWithOffset))] public void Should_work_with_basic_test_cases(TestCase testCase) { var trie = SearchTrie<Person>.Create(testCase.CaseSensitive, testCase.Map); var reader = new LexerStringReader(testCase.Query, testCase.StartPosition); var found = trie.TryFind(reader, out var person, out var readLength); found.Should().Be(testCase.ShouldBeFound); person.Should().Be(testCase.ExpectedValue); readLength.Should().Be(testCase.ExpectedReadLength); } private static IEnumerable<TestCase> TestCasesWithOffset { get { foreach (var testCase in TestCases) { const string offsetValue = "OFFSET "; yield return new TestCase() { CaseSensitive = testCase.CaseSensitive, Map = testCase.Map, Query = offsetValue + testCase.Query, StartPosition = testCase.StartPosition + offsetValue.Length, ShouldBeFound = testCase.ShouldBeFound, ExpectedValue = testCase.ExpectedValue, ExpectedReadLength = testCase.ExpectedReadLength }; } } } private static IEnumerable<TestCase> TestCases { get { yield return new TestCase { CaseSensitive = true, Map = new Dictionary<string, Person> { ["test"] = "John" }, Query = "test", ShouldBeFound = true, ExpectedValue = "John", ExpectedReadLength = "test".Length }; yield return new TestCase { CaseSensitive = true, Map = new Dictionary<string, Person> { ["test"] = "John" }, Query = "tes", ShouldBeFound = false, ExpectedReadLength = 0 }; yield return new TestCase { CaseSensitive = true, Map = new Dictionary<string, Person> { ["test"] = "John" }, Query = "testt and even more stuff", ShouldBeFound = true, ExpectedValue = "John", ExpectedReadLength = "test".Length }; yield return new TestCase { CaseSensitive = true, Map = new Dictionary<string, Person> { ["foo"] = "Jane", ["bar"] = "Jack", ["foobar"] = "John", }, Query = "foobar", ShouldBeFound = true, ExpectedValue = "John", ExpectedReadLength = "foobar".Length }; yield return new TestCase { CaseSensitive = true, Map = new Dictionary<string, Person> { ["foo"] = "Jane", ["bar"] = "Jack", ["foo bar"] = "John", }, Query = "foo bar", ShouldBeFound = true, ExpectedValue = "John", ExpectedReadLength = "foo bar".Length }; yield return new TestCase { CaseSensitive = true, Map = new Dictionary<string, Person> { ["foo"] = "Jane", ["bar"] = "Jack", ["foo bar"] = "John", }, Query = "foo \tbar", ShouldBeFound = true, ExpectedValue = "John", ExpectedReadLength = "foo \tbar".Length }; yield return new TestCase { CaseSensitive = true, Map = new Dictionary<string, Person> { ["foo"] = "Jane", ["bar"] = "Jack", ["foobar"] = "John", }, Query = "foo bar", ShouldBeFound = true, ExpectedValue = "Jane", ExpectedReadLength = "foo".Length }; yield return new TestCase { CaseSensitive = true, Map = new Dictionary<string, Person> { ["foo"] = "Jane", ["bar"] = "Jack", ["foo bar"] = "John", }, Query = "foo bar", ShouldBeFound = true, ExpectedValue = "John", ExpectedReadLength = "foo bar".Length }; yield return new TestCase { CaseSensitive = true, Map = new Dictionary<string, Person> { ["foo"] = "Jane", ["bar"] = "Jack", ["foobar"] = "John", }, Query = "foo bar and even more stuff", ShouldBeFound = true, ExpectedValue = "Jane", ExpectedReadLength = "foo".Length }; yield return new TestCase { CaseSensitive = true, Map = new Dictionary<string, Person> { ["foo"] = "Jane", ["bar"] = "Jack", ["foobar"] = "John", }, Query = "Foo and even more stuff", ShouldBeFound = false, ExpectedReadLength = 0 }; yield return new TestCase { CaseSensitive = false, Map = new Dictionary<string, Person> { ["foo"] = "Jane", ["bar"] = "Jack", ["foobar"] = "John", }, Query = "Foo and even more stuff", ShouldBeFound = true, ExpectedReadLength = "foo".Length, ExpectedValue = "Jane" }; yield return new TestCase { CaseSensitive = false, Map = new Dictionary<string, Person> { ["foo"] = "Jane", ["bar"] = "Jack", ["foo bar baaaaaaz"] = "John", }, Query = "foo bar baz", ShouldBeFound = true, ExpectedReadLength = "foo".Length, ExpectedValue = "Jane" }; } } public class TestCase { public bool CaseSensitive { get; set; } public IReadOnlyDictionary<string, Person> Map { get; set; } public string Query { get; set; } public int StartPosition { get; set; } public bool ShouldBeFound { get; set; } public Person ExpectedValue { get; set; } public int ExpectedReadLength { get; set; } public override string ToString() { return $"Query: \"{Query}\", " + $"Map: [{string.Join(';', Map.Select(x => $"\"{x.Key}\"=>\"{x.Value}\""))}], " + $"StartPosition: \"{StartPosition}\", " + $"CaseSensitive: {CaseSensitive}, " + $"ShouldBeFound: {ShouldBeFound}, " + $"ExpectedValue: \"{ExpectedValue}\", " + $"ExpectedReadLength: {ExpectedReadLength}"; } } public class Person { public string Name { get; private set; } public static implicit operator Person(string name) { return new Person { Name = name }; } public override string ToString() { return Name; } private bool Equals(Person other) { return string.Equals(Name, other.Name); } public override bool Equals(object obj) { if (ReferenceEquals(null, obj)) return false; if (ReferenceEquals(this, obj)) return true; if (obj.GetType() != this.GetType()) return false; return Equals((Person) obj); } public override int GetHashCode() { return (Name != null ? Name.GetHashCode() : 0); } } } }
mit
yogeshsaroya/new-cdnjs
ajax/libs/yui/3.15.0/widget-htmlparser/widget-htmlparser-min.js
129
version https://git-lfs.github.com/spec/v1 oid sha256:8768137f450a1128d92ba9168bc36e309a98752598e6e3b1281e820bbec93eac size 1308
mit
lind/partsregister-eventsourcing
src/main/java/ske/part/partsregister/domain/part/FornavnEndretEvent.java
437
package ske.part.partsregister.domain.part; import ske.eventsourcing.event.DomainEvent; import ske.eventsourcing.eventstore.EventSourceIdentifier; public class FornavnEndretEvent extends DomainEvent { private final String fornavn; public FornavnEndretEvent(EventSourceIdentifier id, String fornavn) { super(id); this.fornavn = fornavn; } public String getFornavn() { return fornavn; } }
mit
exowanderer/SpitzerDeepLearningNetwork
Python Scripts/spitzer_cal_NALU_predict_orig.py
14840
from multiprocessing import set_start_method, cpu_count set_start_method('forkserver') import os os.environ["OMP_NUM_THREADS"] = str(cpu_count()) # or to whatever you want print('BEGIN BIG COPY PASTE ') # This section is for if/when I copy/paste the code into a ipython sesssion n_resamp = 0 n_trees = 100 core = 'A' # unknown do_std = False do_pca = False do_ica = False do_rfi = False do_gbr = False do_pp = False rand_state = 42 pdb_stop = False n_jobs = -1 sp_fname = '' verbose = True import pandas as pd import numpy as np import pdb import warnings warnings.filterwarnings("ignore") from sklearn.model_selection import train_test_split from sklearn.pipeline import Pipeline from sklearn.preprocessing import StandardScaler, MinMaxScaler, minmax_scale from sklearn.ensemble import RandomForestRegressor, ExtraTreesRegressor#, AdaBoostRegressor, GradientBoostingRegressor from sklearn.decomposition import PCA, FastICA from sklearn.externals import joblib from sklearn.metrics import r2_score import xgboost as xgb from tqdm import tqdm from glob import glob from time import time start0 = time() def setup_features(dataRaw, label='flux', notFeatures=[], pipeline=None, verbose=False, resample=False, returnAll=None): """Example function with types documented in the docstring. For production level usage: All scaling and transformations must be done with respect to the calibration data distributions Args: features (nD-array): Array of input raw features. labels (1D-array): The second parameter. pipeline (int): The first parameter. label_scaler (str): The second parameter. feature_scaler (str): The second parameter. Returns: features_transformed, labels_scaled .. _PEP 484: https://github.com/ExoWanderer/ """ if isinstance(dataRaw,str): dataRaw = pd.read_csv(filename) elif isinstance(dataRaw, dict): dataRaw = pd.DataFrame(dataRaw) elif not isinstance(dataRaw, pd.DataFrame): raise TypeError('The input must be a `pandas.DataFrame` or a `dict` with Equal Size Entries (to convert to df here)') # WHY IS THIS ALLOWED TO NOT HAVE PARENTHESES? # assert isinstance(dataRaw, pd.DataFrame), 'The input must be a Pandas DataFrame or Dictionary with Equal Size Entries' inputData = dataRaw.copy() # PLDpixels = pd.DataFrame({key:dataRaw[key] for key in dataRaw.columns if 'pix' in key}) pixCols = [colname for colname in inputData.columns if 'pix' in colname.lower() or 'pld' in colname.lower()] PLDnorm = np.sum(np.array(inputData[pixCols]),axis=1) inputData[pixCols] = (np.array(inputData[pixCols]).T / PLDnorm).T # # Overwrite the PLDpixels entries with the normalized version # for key in dataRaw.columns: # if key in PLDpixels.columns: # inputData[key] = PLDpixels[key] # # Assign the labels n_PLD = len([key for key in dataRaw.keys() if 'err' not in colname.lower() and ('pix' in key.lower() or 'pld' in key.lower())]) input_labels = [colname for colname in dataRaw.columns if colname not in notFeatures and 'err' not in colname.lower()] errors_labels = [colname for colname in dataRaw.columns if colname not in notFeatures and 'err' in colname.lower()] # resampling_inputs = ['flux', 'xpos', 'ypos', 'xfwhm', 'yfwhm', 'bg_flux', 'bmjd', 'np'] + ['pix{}'.format(k) for k in range(1,10)] # resampling_errors = ['fluxerr', 'xerr', 'yerr', 'xerr', 'yerr', 'sigma_bg_flux', 'bmjd_err', 'np_err'] + ['fluxerr']*n_PLD start = time() if resample: print("Resampling ", end=" ") inputData = pd.DataFrame({colname:np.random.normal(dataRaw[colname], dataRaw[colerr]) \ for colname, colerr in tqdm(zip(input_labels, errors_labels), total=len(input_labels)) }) print("took {} seconds".format(time() - start)) else: inputData = pd.DataFrame({colname:dataRaw[colname] for colname in input_labels}) labels = inputData[label].values # explicitly remove the label inputData.drop(label, axis=1, inplace=True) feature_columns = inputData.drop(notFeatures,axis=1).columns.values features = inputData.drop(notFeatures,axis=1).values if verbose: print('Shape of Features Array is', features.shape) if verbose: start = time() # labels_scaled = labels# label_scaler.fit_transform(labels[:,None]).ravel() if label_scaler is not None else labels features_trnsfrmd = pipeline.fit_transform(features) if pipeline is not None else features if verbose: print('took {} seconds'.format(time() - start)) collection = features_trnsfrmd, labels if returnAll == True: collection = features_trnsfrmd, labels, pipeline if returnAll == 'features': collection = features_trnsfrmd if returnAll == 'with raw data': collection.append(dataRaw) return collection def random_forest_wrapper(features, labels, n_trees, n_jobs, grad_boost=False, header='PCA', core_num=0, samp_num=0, loss='quantile', learning_rate=0.1, max_depth=3, subsample=1.0, full_output=False, verbose=False): print('Performing {} Random Forest'.format(header)) features_ = features.copy() labels_ = labels.copy() if grad_boost: rgr = xgb.XGBRegressor( max_depth = max_depth, learning_rate = learning_rate, n_estimators = n_trees, silent = not verbose, n_jobs = n_jobs) # objective='reg:linear', booster='gbtree', # gamma=0, min_child_weight=1, max_delta_step=0, subsample=1, # colsample_bytree=1, colsample_bylevel=1, reg_alpha=0, reg_lambda=1, # scale_pos_weight=1, base_score=0.5, random_state=0, seed=None, # missing=None features, testX, labels, testY = train_test_split(features_, labels_, test_size=0.25) else: rgr = RandomForestRegressor( n_estimators = n_trees , n_jobs = n_jobs , oob_score = True , warm_start = True , verbose = verbose ) if verbose: print('Feature Shape: {}\nLabel Shape: {}'.format(features.shape, labels.shape)) if verbose: start=time() rgr.fit(features, labels) rgr_oob = r2_score(testY, rgr.predict(testX)) if grad_boost else rgr.oob_score_ rgr_Rsq = r2_score(labels_, rgr.predict(features_)) test_label = {True:'Test R^2', False:'OOB'} if verbose: print('{} Pretrained Random Forest:\n\t{} Score: \ {:.3f}%\n\tTrain R^2 score: {:.3f}%\ \n\tRuntime: {:.3f} seconds'.format(header, test_label[grad_boost], rgr_oob*100, rgr_Rsq*100, time()-start)) output_savename = 'randForest_{}_approach_{}trees_{}resamp_{}core.save'.format(header, n_trees, samp_num, core_num) print('Storing New File to {}'.format(output_savename)) joblib.dump(rgr, output_savename) if full_output: return rgr if n_jobs == 1: print('WARNING: You are only using 1 core!') # Check if requested to complete more than one operatiion # if so delete old instances files_in_directory = glob('./*') # ## Load CSVs data flux_normalized = ['fluxerr', 'bg_flux', 'sigma_bg_flux', 'flux'] spitzerCalNotFeatures = ['flux', 'fluxerr', 'dn_peak', 'xycov', 't_cernox', 'xerr', 'yerr', 'sigma_bg_flux'] spitzerCalFilename = 'pmap_ch2_0p1s_x4_rmulti_s3_7.csv' if sp_fname == '' else sp_fname spitzerCalRawData = pd.read_csv(spitzerCalFilename) for key in flux_normalized: spitzerCalRawData[key] = spitzerCalRawData[key] / np.median(spitzerCalRawData['flux'].values) spitzerCalRawData['bmjd_err'] = np.median(0.5*np.diff(spitzerCalRawData['bmjd'])) spitzerCalRawData['np_err'] = np.sqrt(spitzerCalRawData['yerr']) for colname in spitzerCalRawData.columns: if 'err' not in colname.lower() and ('pix' in colname.lower() or 'pld' in colname.lower()): spitzerCalRawData[colname+'_err'] = spitzerCalRawData[colname] * spitzerCalRawData['fluxerr'] start = time() print("Transforming Data ", end=" ") operations = [] header = 'GBR' if do_gbr else 'RFI' if do_rfi else 'STD' pipe = Pipeline(operations) if len(operations) else None features, labels, pipe_fitted = setup_features( dataRaw = spitzerCalRawData, pipeline = pipe, verbose = verbose, resample = False, returnAll = True) print('END OF BIG COPY PASTE') print('BEGIN NEW HyperParameter Optimization.') from sklearn.metrics import make_scorer from sklearn.metrics import r2_score, mean_squared_error def rmse(yt,yp): return np.sqrt(mean_squared_error(yt,yp)) grad_boost=False header='XGB' core = 'A' pipe = None features_ = features.copy() labels_ = labels.copy() n_iters = 100 n_jobs = -1 cv = 10 verbose = True # for RSCV silent = True # for XGB random_state = 42 ''' NALU: Nearual Arithmentic Logical Unit NALU uses memory and logic gates to train a unique TF layer to modify the gradients of the weights. This seems to be very smilar to a LSTM layer, but for a non-RNN. This code has been specifically implemented with tensorflow. Code source: https://github.com/grananqvist/NALU-tf Original paper: https://arxiv.org/abs/1808.00508 (Trask et al.) ''' import numpy as np import tensorflow as tf def nalu(input_layer, num_outputs): """ Neural Arithmetic Logic Unit tesnorflow layer Arguments: input_layer - A Tensor representing previous layer num_outputs - number of ouput units Returns: A tensor representing the output of NALU """ shape = (int(input_layer.shape[-1]), num_outputs) # define variables W_hat = tf.Variable(tf.truncated_normal(shape, stddev=0.02)) M_hat = tf.Variable(tf.truncated_normal(shape, stddev=0.02)) G = tf.Variable(tf.truncated_normal(shape, stddev=0.02)) # operations according to paper W = tf.tanh(W_hat) * tf.sigmoid(M_hat) m = tf.exp(tf.matmul(tf.log(tf.abs(input_layer) + 1e-7), W)) g = tf.sigmoid(tf.matmul(input_layer, G)) a = tf.matmul(input_layer, W) out = g * a + (1 - g) * m return out if __name__ == "__main__": from tqdm import tqdm from argparse import ArgumentParser ap = ArgumentParser() ap.add_argument('-d', '--directory', type=str, required=False, default='nalu_tf_save_dir/saves_{}'.format(time()), help='The tensorflow ckpt save file') ap.add_argument('-nnl', '--n_nalu_layers', type=int, required=False, default=1, help='Whether to use 1 (default), 2, or ... N NALU layers.') ap.add_argument('-nnn', '--n_nalu_neurons', type=int, required=False, default=1, help='How many features on the second NALU layer') ap.add_argument('-ne', '--n_epochs', type=int, required=False, default=200, help='Number of N_EPOCHS to train the network with.') ap.add_argument('-nc', '--n_classes', type=int, required=False, default=1, help='n_classes == 1 for Regression (default); > 1 for Classification.') ap.add_argument('-bs', '--batch_size', type=int, required=False, default=32, help='Batch size: number of samples per batch.') ap.add_argument('-lr', '--learning_rate', type=float, required=False, default=1e-3, help='Learning rate: how fast the optimizer moves up/down the gradient.') ap.add_argument('-ts', '--test_size', type=float, required=False, default=0.75, help='How much to split the train / test ratio') ap.add_argument('-rs', '--random_state', type=int, required=False, default=42, help='Integer value to initialize train/test splitting randomization') args = vars(ap.parse_args()) IMPORT_DIR = args['directory'] N_NALU_LAYERS = args['n_nalu_layers'] N_NALU_NEURONS = args['n_nalu_neurons'] N_CLASSES = args['n_classes'] # = 1 for regression TEST_SIZE = args['test_size'] RANDOM_STATE = args['random_state'] N_EPOCHS = args['n_epochs'] LEARNING_RATE = args['learning_rate'] BATCH_SIZE = args['batch_size'] N_FEATURES = features.shape[-1] idx_train, idx_test = train_test_split(np.arange(labels.size), test_size=0.75, random_state=42) X_data, Y_data = features[idx_test], labels[idx_test][:,None] ''' Construct Load Path ''' print("Loaded model stored in path: {}".format(IMPORT_DIR)) with tf.device("/cpu:0"): # tf.reset_default_graph() # define placeholders and network X = tf.placeholder(tf.float32, shape=[None, N_FEATURES]) Y_true = tf.placeholder(tf.float32, shape=[None, 1]) # Setup NALU Layers nalu_layers = {'nalu0':nalu(X,N_NALU_NEURONS)} for kn in range(1, N_NALU_LAYERS): nalu_layers['nalu{}'.format(kn)] = nalu(nalu_layers['nalu{}'.format(kn-1)], N_NALU_NEURONS) Y_pred = nalu(nalu_layers['nalu{}'.format(N_NALU_LAYERS-1)], N_CLASSES) # N_CLASSES = 1 for regression # loss and train operations loss = tf.nn.l2_loss(Y_pred - Y_true) # NALU uses mse optimizer = tf.train.AdamOptimizer(LEARNING_RATE) train_op = optimizer.minimize(loss) # Add an op to initialize the variables. init_op = tf.global_variables_initializer() # Add ops to save and restore all the variables. saver = tf.train.Saver() sess_config = tf.ConfigProto(device_count={"CPU": cpu_count()}, inter_op_parallelism_threads=2, intra_op_parallelism_threads=1) with tf.Session(config=sess_config) as sess: # Restore variables from disk. saver.restore(sess, IMPORT_DIR) print("Model restored.") # Check the values of the variables ys_pred = Y_pred.eval(feed_dict={X: X_data}) print("R2_Test: {}".format(r2_score(Y_data,ys_pred)))
mit
yun2win/yun2win-sdk-desktop
app/main/y2w_openRTC.js
836
const childProcess = require('child_process'); const config = require('../config'); module.exports = function (parms) { // var path = '/Users/qs/Desktop/Y2WRTCQuick.app'; var path = config.RTCPath; var parmList = toParmList(parms); openFile(path, parmList); }; function toParmList(parmsDict) { var parmList = []; for (var key in parmsDict) { parmList.push(key + '=' + parmsDict[key]); } return parmList; } function openFile(path, parmList) { var cmd = process.platform === 'darwin' ? 'open' : 'start'; cmd += ' ' + path + ' --args ' + parmList.join(' '); childProcess.exec(cmd); } function openApp(path, parmList) { var list = [path, '--args']; parmList.forEach(function (parm) { list.push(parm); }); childProcess.execFile(process.execPath, list); }
mit
mauretto78/simple-event-store-manager
src/Infrastructure/Drivers/Exceptions/DriverConnectionException.php
388
<?php /** * This file is part of the Simple EventStore Manager package. * * (c) Mauro Cassani<https://github.com/mauretto78> * * For the full copyright and license information, please view the LICENSE * file that was distributed with this source code. */ namespace SimpleEventStoreManager\Infrastructure\Drivers\Exceptions; class DriverConnectionException extends \Exception { }
mit
smallhelm/level-fact-base
src/dbRange.js
674
var promisify = require('./promisify') module.exports = function dbRange (db, opts, onData, callbackOrig) { if (!callbackOrig) { callbackOrig = promisify() } var hasCalledback = false function callback (err) { if (hasCalledback) return hasCalledback = true callbackOrig(err) } if (opts.prefix) { opts.gte = opts.prefix opts.lte = opts.prefix.concat([void 0]) } var stream = db.createReadStream(opts) stream.on('error', callback) stream.on('end', callback) function stopRange () { stream.destroy() callback() } stream.on('data', function (data) { onData(data, stopRange) }) return callbackOrig.promise }
mit
TheOrchid/Platform
resources/views/layouts/browsing.blade.php
151
<div data-controller="browsing" class="mb-3"> <iframe @foreach($attributes as $key => $value) {{ $key }}='{{$value}}' @endforeach></iframe> </div>
mit
vanncho/Java-Web-Development-Basics
issueTracker/src/main/java/com/issueTracker/mapper/ModelParserImpl.java
779
package com.issueTracker.mapper; import org.modelmapper.ModelMapper; import org.modelmapper.PropertyMap; import javax.ejb.Stateless; @Stateless public class ModelParserImpl implements ModelParser { private ModelMapper modelMapper; public ModelParserImpl() { this.modelMapper = new ModelMapper(); } @Override public <S, D> D convert(S source, Class<D> destination) { D convertedObject = this.modelMapper.map(source, destination); return convertedObject; } @Override public <S, D> D convert(S source, Class<D> destination, PropertyMap<S, D> propertyMap) { this.modelMapper.addMappings(propertyMap); D convertedObject = this.modelMapper.map(source, destination); return convertedObject; } }
mit
Celarix/IronAssembler
IronAssembler/IronAssembler/Data/ParsedStringTable.cs
361
using System.Collections.Generic; using System.Linq; namespace IronAssembler.Data { public sealed class ParsedStringTable { private readonly List<string> strings; public IReadOnlyList<string> Strings => strings.AsReadOnly(); public ParsedStringTable(IEnumerable<string> strings) => this.strings = strings.ToList(); } }
mit
pciccio/TamTam
TamTam.Models/TamTam/Result/AggregatedObject.cs
225
using TamTam.Models.OMDb.Result; namespace TamTam.Models.TamTam.Result { public class AggregatedObject { public Movie Movie { get; set; } public YouTube.Result.Result Video { get; set; } } }
mit
leonardoanalista/java2word
java2word/src/main/java/word/w2004/elements/tableElements/TableFactoryMethod.java
1024
package word.w2004.elements.tableElements; /** * @author leonardo_correa Factory Method for Table Elements, Header, Columns * and Footer * * Here is the logic to decide which instance create and return * */ public class TableFactoryMethod { private static TableFactoryMethod instance; private TableFactoryMethod() { } public static TableFactoryMethod getInstance() { if (instance == null) { instance = new TableFactoryMethod(); } return instance; } public ITableItemStrategy getTableItem(TableEle tableEle) { if (tableEle == null) { return null; } return getTableEle(tableEle); } private ITableItemStrategy getTableEle(TableEle tableEle) { if (tableEle.getValue().equals("tableDef")) { return new TableDefinition(); }else if (tableEle.getValue().equals("th")) { return new TableHeader(); }else if (tableEle.getValue().equals("td")) { return new TableCol(); }else { //if (tableEle.getValue().equals("tf")) { return new TableFooter(); } } }
mit
caleb531/cidr-brewer
setup.py
798
#!/usr/bin/env python # coding=utf-8 from setuptools import setup # Get long description (used on PyPI project page) def get_long_description(): with open('README.md', 'r') as readme_file: return readme_file.read() setup( name='cidr-brewer', version='1.0.0', description='A CLI utility for working with classless IP addresses', long_description=get_long_description(), long_description_content_type='text/markdown', url='https://github.com/caleb531/cidr-brewer', author='Caleb Evans', author_email='[email protected]', license='MIT', keywords='networking ip addresses cidr', py_modules=['cidrbrewer'], install_requires=[], entry_points={ 'console_scripts': [ 'cidr-brewer=cidrbrewer:main' ] } )
mit
Sprinkoringo/PMU-Server
Server/Server/Shops/ShopCollection.cs
2028
/*The MIT License (MIT) Copyright (c) 2014 PMU Staff Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ using System; using System.Collections.Generic; using System.Text; using PMU.Core; namespace Server.Shops { public class ShopCollection { #region Fields ListPair<int, Shop> shops; int maxShops; #endregion Fields #region Constructors public ShopCollection(int maxShops) { if (maxShops == 0) maxShops = 50; this.maxShops = maxShops; shops = new ListPair<int, Shop>(); } #endregion Constructors #region Properties public ListPair<int, Shop> Shops { get { return shops; } } public int MaxShops { get { return maxShops; } } #endregion Properties #region Indexers public Shop this[int index] { get { return shops[index]; } set { shops[index] = value; } } #endregion Indexers } }
mit
d53dave/DSLFY-Web
src/main/java/net/d53dev/dslfy/web/config/MongoConfig.java
876
package net.d53dev.dslfy.web.config; import com.mongodb.Mongo; import com.mongodb.MongoClient; import org.springframework.context.annotation.Bean; import org.springframework.context.annotation.Configuration; import org.springframework.data.mongodb.config.AbstractMongoConfiguration; import org.springframework.data.mongodb.gridfs.GridFsTemplate; /** * Created by davidsere on 16/11/15. */ @Configuration public class MongoConfig extends AbstractMongoConfiguration { @Bean public GridFsTemplate gridFsTemplate() throws Exception { return new GridFsTemplate(mongoDbFactory(), mappingMongoConverter()); } @Override protected String getDatabaseName() { return ConfigConstants.MONGO_DB_NAME; } @Override @Bean public Mongo mongo() throws Exception { return new MongoClient(ConfigConstants.MONGO_DB_HOST); } }
mit
input-output-hk/etc-client
src/rpcTest/scala/io/iohk/ethereum/rpcTest/TestContracts.scala
3903
package io.iohk.ethereum.rpcTest import akka.util.ByteString import org.bouncycastle.util.encoders.Hex import io.iohk.ethereum.rpcTest.TestData.firstAccount object TestContracts { //https://github.com/rsksmart/rskj/wiki/Deploying-contracts-using-RPC-calls#publishing-a-contract-using-rpc val testContract = "6060604052341561000c57fe5b5b6101598061001c6000396000f30060606040526000357c0100000000000000000000000000000000000000000000000000000000900463ffffffff168063cfae32171461003b575bfe5b341561004357fe5b61004b6100d4565b604051808060200182810382528381815181526020019150805190602001908083836000831461009a575b80518252602083111561009a57602082019150602081019050602083039250610076565b505050905090810190601f1680156100c65780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b6100dc610119565b604060405190810160405280600381526020017f486921000000000000000000000000000000000000000000000000000000000081525090505b90565b6020604051908101604052806000815250905600a165627a7a72305820ed71008611bb64338581c5758f96e31ac3b0c57e1d8de028b72f0b8173ff93a10029" import io.iohk.ethereum.crypto.kec256 // https://github.com/ethereum/wiki/wiki/JSON-RPC#example-14 val storageContract = "0x60606040525b6104d260006000508190555061162e600160005060003373ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020600050819055505b600a8060546000396000f360606040526008565b00" val pos0 = BigInt(1234) val mapPos = "000000000000000000000000" + firstAccount.address.drop(2) + "0000000000000000000000000000000000000000000000000000000000000001" val decoded = Hex.decode(mapPos) val shaPos = BigInt(kec256(decoded)) val mapResult = BigInt(5678) val StorageCodeRuntimeRepresentation = "0x60606040526008565b00" /* contract Counter { uint256 public count = 0; event Increment(address indexed who, uint256 indexed newValue); // declaring event function increment(uint256 newValue) public { Increment(msg.sender, newValue); // logging event count = newValue; } } * * * * */ val counterEventContract = "0x6060604052600060006000505560d68060186000396000f360606040526000357c01000000000000000000000000000000000000000000000000000000009004806306661abd1460415780637cf5dab014606257603f565b005b604c600480505060cd565b6040518082815260200191505060405180910390f35b607660048080359060200190919050506078565b005b803373ffffffffffffffffffffffffffffffffffffffff167fb182275171042022ff972a26edbd0171bccc74463bd22e56dbbeba4e93b7a66860405180905060405180910390a3806000600050819055505b50565b6000600050548156" val readEventContract = "06661abd" + "0000000000000000000000000000000000000000000000000000000000000000" val incrementEventContract = "7cf5dab0" val counterContractEventHash = "0xb182275171042022ff972a26edbd0171bccc74463bd22e56dbbeba4e93b7a668" /* contract Example1 { event Event(uint256 indexed value); function emitEvent(uint256 value) public { // there is no way to interact with this event // in smart contracts Event(value); } } * */ val emitEventContract = "0x606060405260818060106000396000f360606040526000357c0100000000000000000000000000000000000000000000000000000000900480634d43bec9146037576035565b005b604b6004808035906020019091905050604d565b005b807f510e730eb6600b4c67d51768c6996795863364461fee983d92d5e461f209c7cf60405180905060405180910390a25b5056" val emitEvent = "4d43bec9" val emitEventHash = "0x510e730eb6600b4c67d51768c6996795863364461fee983d92d5e461f209c7cf" def writeContract(a: BigInt, funName: String): String = { import io.iohk.ethereum.utils.ByteUtils val asByteString = ByteString(a.toByteArray) funName + Hex.toHexString(ByteUtils.padLeft(asByteString, 32).toArray) } def createTopic(s :String): String = { // 0x + padLeft to 64 bytes "0x" + s.reverse.padTo(64, "0").reverse.mkString } }
mit
OmnInfinity/Octothorpe
Properties/AssemblyInfo.cs
1396
using System.Reflection; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; // General Information about an assembly is controlled through the following // set of attributes. Change these attribute values to modify the information // associated with an assembly. [assembly: AssemblyTitle("Octothorpe")] [assembly: AssemblyDescription("")] [assembly: AssemblyConfiguration("")] [assembly: AssemblyCompany("")] [assembly: AssemblyProduct("Octothorpe")] [assembly: AssemblyCopyright("Copyright © 2016")] [assembly: AssemblyTrademark("")] [assembly: AssemblyCulture("")] // Setting ComVisible to false makes the types in this assembly not visible // to COM components. If you need to access a type in this assembly from // COM, set the ComVisible attribute to true on that type. [assembly: ComVisible(false)] // The following GUID is for the ID of the typelib if this project is exposed to COM [assembly: Guid("ce99b1ad-3cc3-45f8-89e5-e972d566d65a")] // Version information for an assembly consists of the following four values: // // Major Version // Minor Version // Build Number // Revision // // You can specify all the values or you can default the Build and Revision Numbers // by using the '*' as shown below: // [assembly: AssemblyVersion("1.0.*")] [assembly: AssemblyVersion("1.0.0.0")] [assembly: AssemblyFileVersion("1.0.0.0")]
mit
grappachu/core
src/Grappachu.Core.Test/IO/TempFileTests.cs
1861
using System; using System.IO; using Grappachu.Core.IO; using Grappachu.Core.Test.IO.Abstract; using NUnit.Framework; using SharpTestsEx; namespace Grappachu.Core.Test.IO { [TestFixture] public class TempFileTests : GenericFolderBasedTest { [SetUp] public void SetUp() { OnSetUp(); } [TearDown] public void TearDown() { OnTearDown(); } [Test] public void CreateTest() { using (var t = new TempFile(ToString())) { t.Create(); } } [Test] public void Create_should_write_file_on_disk() { using (var t = new TempFile(TestRoot, "tmp")) { t.Create(); File.Exists(t.Path).Should().Be.True(); } } [Test] public void Dispose_should_delete_file() { string path; using (var t = new TempFile(ToString())) { path = t.Path; t.Create(); } File.Exists(path).Should().Be.False(); } [Test] public void TempFile_constructor_should_generate_a_file_path_with_specified_extension() { string ext = "." + Guid.NewGuid().ToString().Substring(0, 3); using (var t = new TempFile(TestRoot, ext)) { File.Exists(t.Path).Should().Be.False(); Path.GetExtension(t.Path).Should().Be.EqualTo(ext); } } [Test] public void TempFile_constructor_should_use_temp_path_by_default() { using (var t = new TempFile()) { t.Path.StartsWith(Path.GetTempPath()).Should().Be.True(); } } } }
mit
SetiZ/SpotIt
gen/com/google/android/gms/R.java
7906
/* AUTO-GENERATED FILE. DO NOT MODIFY. * * This class was automatically generated by the * aapt tool from the resource data it found. It * should not be modified by hand. */ package com.google.android.gms; public final class R { public static final class attr { public static final int adSize = 0x7f010000; public static final int adUnitId = 0x7f010001; public static final int cameraBearing = 0x7f010003; public static final int cameraTargetLat = 0x7f010004; public static final int cameraTargetLng = 0x7f010005; public static final int cameraTilt = 0x7f010006; public static final int cameraZoom = 0x7f010007; public static final int mapType = 0x7f010002; public static final int uiCompass = 0x7f010008; public static final int uiRotateGestures = 0x7f010009; public static final int uiScrollGestures = 0x7f01000a; public static final int uiTiltGestures = 0x7f01000b; public static final int uiZoomControls = 0x7f01000c; public static final int uiZoomGestures = 0x7f01000d; public static final int useViewLifecycle = 0x7f01000e; public static final int zOrderOnTop = 0x7f01000f; } public static final class color { public static final int common_action_bar_splitter = 0x7f050009; public static final int common_signin_btn_dark_text_default = 0x7f050000; public static final int common_signin_btn_dark_text_disabled = 0x7f050002; public static final int common_signin_btn_dark_text_focused = 0x7f050003; public static final int common_signin_btn_dark_text_pressed = 0x7f050001; public static final int common_signin_btn_default_background = 0x7f050008; public static final int common_signin_btn_light_text_default = 0x7f050004; public static final int common_signin_btn_light_text_disabled = 0x7f050006; public static final int common_signin_btn_light_text_focused = 0x7f050007; public static final int common_signin_btn_light_text_pressed = 0x7f050005; public static final int common_signin_btn_text_dark = 0x7f05000e; public static final int common_signin_btn_text_light = 0x7f05000f; } public static final class drawable { public static final int common_signin_btn_icon_dark = 0x7f020057; public static final int common_signin_btn_icon_disabled_dark = 0x7f020058; public static final int common_signin_btn_icon_disabled_focus_dark = 0x7f020059; public static final int common_signin_btn_icon_disabled_focus_light = 0x7f02005a; public static final int common_signin_btn_icon_disabled_light = 0x7f02005b; public static final int common_signin_btn_icon_focus_dark = 0x7f02005c; public static final int common_signin_btn_icon_focus_light = 0x7f02005d; public static final int common_signin_btn_icon_light = 0x7f02005e; public static final int common_signin_btn_icon_normal_dark = 0x7f02005f; public static final int common_signin_btn_icon_normal_light = 0x7f020060; public static final int common_signin_btn_icon_pressed_dark = 0x7f020061; public static final int common_signin_btn_icon_pressed_light = 0x7f020062; public static final int common_signin_btn_text_dark = 0x7f020063; public static final int common_signin_btn_text_disabled_dark = 0x7f020064; public static final int common_signin_btn_text_disabled_focus_dark = 0x7f020065; public static final int common_signin_btn_text_disabled_focus_light = 0x7f020066; public static final int common_signin_btn_text_disabled_light = 0x7f020067; public static final int common_signin_btn_text_focus_dark = 0x7f020068; public static final int common_signin_btn_text_focus_light = 0x7f020069; public static final int common_signin_btn_text_light = 0x7f02006a; public static final int common_signin_btn_text_normal_dark = 0x7f02006b; public static final int common_signin_btn_text_normal_light = 0x7f02006c; public static final int common_signin_btn_text_pressed_dark = 0x7f02006d; public static final int common_signin_btn_text_pressed_light = 0x7f02006e; public static final int ic_plusone_medium_off_client = 0x7f020070; public static final int ic_plusone_small_off_client = 0x7f020071; public static final int ic_plusone_standard_off_client = 0x7f020072; public static final int ic_plusone_tall_off_client = 0x7f020073; } public static final class id { public static final int hybrid = 0x7f060004; public static final int none = 0x7f060000; public static final int normal = 0x7f060001; public static final int satellite = 0x7f060002; public static final int terrain = 0x7f060003; } public static final class integer { public static final int google_play_services_version = 0x7f080000; } public static final class string { public static final int auth_client_needs_enabling_title = 0x7f070015; public static final int auth_client_needs_installation_title = 0x7f070016; public static final int auth_client_needs_update_title = 0x7f070017; public static final int auth_client_play_services_err_notification_msg = 0x7f070018; public static final int auth_client_requested_by_msg = 0x7f070019; public static final int auth_client_using_bad_version_title = 0x7f070014; public static final int common_google_play_services_enable_button = 0x7f070006; public static final int common_google_play_services_enable_text = 0x7f070005; public static final int common_google_play_services_enable_title = 0x7f070004; public static final int common_google_play_services_install_button = 0x7f070003; public static final int common_google_play_services_install_text_phone = 0x7f070001; public static final int common_google_play_services_install_text_tablet = 0x7f070002; public static final int common_google_play_services_install_title = 0x7f070000; public static final int common_google_play_services_invalid_account_text = 0x7f07000c; public static final int common_google_play_services_invalid_account_title = 0x7f07000b; public static final int common_google_play_services_network_error_text = 0x7f07000a; public static final int common_google_play_services_network_error_title = 0x7f070009; public static final int common_google_play_services_unknown_issue = 0x7f07000d; public static final int common_google_play_services_unsupported_date_text = 0x7f070010; public static final int common_google_play_services_unsupported_text = 0x7f07000f; public static final int common_google_play_services_unsupported_title = 0x7f07000e; public static final int common_google_play_services_update_button = 0x7f070011; public static final int common_google_play_services_update_text = 0x7f070008; public static final int common_google_play_services_update_title = 0x7f070007; public static final int common_signin_button_text = 0x7f070012; public static final int common_signin_button_text_long = 0x7f070013; } public static final class styleable { public static final int[] AdsAttrs = { 0x7f010000, 0x7f010001 }; public static final int AdsAttrs_adSize = 0; public static final int AdsAttrs_adUnitId = 1; public static final int[] MapAttrs = { 0x7f010002, 0x7f010003, 0x7f010004, 0x7f010005, 0x7f010006, 0x7f010007, 0x7f010008, 0x7f010009, 0x7f01000a, 0x7f01000b, 0x7f01000c, 0x7f01000d, 0x7f01000e, 0x7f01000f }; public static final int MapAttrs_cameraBearing = 1; public static final int MapAttrs_cameraTargetLat = 2; public static final int MapAttrs_cameraTargetLng = 3; public static final int MapAttrs_cameraTilt = 4; public static final int MapAttrs_cameraZoom = 5; public static final int MapAttrs_mapType = 0; public static final int MapAttrs_uiCompass = 6; public static final int MapAttrs_uiRotateGestures = 7; public static final int MapAttrs_uiScrollGestures = 8; public static final int MapAttrs_uiTiltGestures = 9; public static final int MapAttrs_uiZoomControls = 10; public static final int MapAttrs_uiZoomGestures = 11; public static final int MapAttrs_useViewLifecycle = 12; public static final int MapAttrs_zOrderOnTop = 13; } }
mit
mpOzelot/Unity
src/GitHub.Api/NewTaskSystem/TaskCanceledExceptions.cs
473
using System; using System.Threading.Tasks; namespace GitHub.Unity { class DependentTaskFailedException : TaskCanceledException { public DependentTaskFailedException(ITask task, Exception ex) : base(ex.InnerException != null ? ex.InnerException.Message : ex.Message, ex.InnerException ?? ex) {} } class ProcessException : TaskCanceledException { public ProcessException(ITask process) : base(process.Errors) { } } }
mit
kinshuk4/MoocX
misc/deep_learning_notes/Proj_Molecular_Simulation/scratchs/basic_tensor_operators.py
1024
import tensorflow as tf import numpy as np from termcolor import cprint, colored as c deep_features = tf.constant([ [0, 1], [1, 1.5], [1.5, 2], [-1, 0], [-1, 1.5], [-1.5, 2], [-1, 0], [-1, 1.5], [-1.5, 2] ]) labels = tf.constant([ [1., 0, 0], [1., 0, 0], [1., 0, 0], [0, 1., 0], [0, 1., 0], [0, 1., 0], [0, 0, 1.], [0, 0, 1.], [0, 0, 1.] ]) features_expanded = tf.reshape(deep_features, shape=[-1, 2, 1]) label_expanded = tf.reshape(labels, shape=[-1, 1, 3]) samples_per_label = tf.reduce_sum( label_expanded, reduction_indices=[0] ) centroids = \ tf.reduce_sum( tf.reshape(deep_features, shape=[-1, 2, 1]) * \ label_expanded, reduction_indices=[0] ) / samples_per_label spread = tf.reduce_mean( tf.square( features_expanded * label_expanded - tf.reshape(centroids, shape=[1, 2, 3]) ) ) with tf.Session() as sess: spread_output = sess.run([spread]) cprint(c(spread_output, 'red'))
mit
Karnix/The-SSN-App
app/src/main/java/karnix/the/ssn/app/activity/dining/DiningMenuAdapter.java
6721
package karnix.the.ssn.app.activity.dining; import android.content.Context; import android.view.LayoutInflater; import android.view.View; import android.view.ViewGroup; import android.widget.ImageView; import android.widget.TextView; import com.afollestad.sectionedrecyclerview.SectionedRecyclerViewAdapter; import com.afollestad.sectionedrecyclerview.SectionedViewHolder; import java.util.ArrayList; import java.util.List; import karnix.the.ssn.app.model.DatabaseHandler; import karnix.the.ssn.app.utils.LogHelper; import karnix.the.ssn.ssnmachan.R; public class DiningMenuAdapter extends SectionedRecyclerViewAdapter<DiningMenuAdapter.DiningMenuViewHolder> { private static final String TAG = LogHelper.makeLogTag(DiningMenuAdapter.class); private Context context; private int is; private DatabaseHandler databaseHandler; private String[] messSections = {"Breakfast", "Lunch", "Snacks", "Dinner"}; private String[] storesSections = {"Fresh Juice", "Milk Shakes", "Soda", "Tea", "Coffee", "Others", "Noodles", "Puff", "Corn", "Kulfi", "Sandwich", "Omlette", "Rice", "Chappathi", "Paratha", "Chat"}; private String[] snowCubeSections = {"Softies Cone", "Delights", "Sundae Mania", "Sundae & Fantasies", "Jelly Jellose", "Snow Specials", "Ice Cream Shakes", "Sandwiches", "Crispy Snacks", "Wraps", "Sweet Corn", "Pop Corn"}; private String[] tuttiSections = {"South Indian", "Snack", "Manchurian", "Indian", "Parotha", "Chinese", "Biryani"}; private List<List<List<String>>> messMenuList; private List<List<List<String>>> storesMenuList; private List<List<List<String>>> snowCubeList; private List<List<List<String>>> tuttiList; public DiningMenuAdapter(Context context, String place, String day) { this.context = context; if (place.contains("Mess")) this.is = 0; if (place.contains("Canteen")) this.is = 1; if (place.contains("Snow")) this.is = 2; if (place.contains("Tutti")) this.is = 3; databaseHandler = new DatabaseHandler(context); messMenuList = new ArrayList<>(); storesMenuList = new ArrayList<>(); snowCubeList = new ArrayList<>(); tuttiList = new ArrayList<>(); if (is == 0) for (String messSection : messSections) messMenuList.add(databaseHandler.getMessMenu(place, day, messSection)); else if (is == 1) for (String storesSection : storesSections) storesMenuList.add(databaseHandler.getStoresMenu(place, storesSection)); else if (is == 2) for (String snowCubeSection : snowCubeSections) snowCubeList.add(databaseHandler.getSnowMenu(place, snowCubeSection)); else for (String tuttiSection : tuttiSections) tuttiList.add(databaseHandler.getSnowMenu(place, tuttiSection)); } @Override public int getSectionCount() { if (is == 0) return messSections.length; else if (is == 1) return storesSections.length; else if (is == 2) return snowCubeSections.length; else return tuttiSections.length; } @Override public int getItemCount(int section) { if (is == 0) return messMenuList.get(section).size(); else if (is == 1) return storesMenuList.get(section).size(); else if (is == 2) return snowCubeList.get(section).size(); else return tuttiList.get(section).size(); } @Override public void onBindHeaderViewHolder(DiningMenuViewHolder holder, int section, boolean expanded) { if (is == 0) holder.title.setText(messSections[section]); else if (is == 1) holder.title.setText(storesSections[section]); else if (is == 2) holder.title.setText(snowCubeSections[section]); else holder.title.setText(tuttiSections[section]); holder.caret.setImageResource(expanded ? R.drawable.ic_collapse : R.drawable.ic_expand); } @Override public void onBindViewHolder(DiningMenuViewHolder holder, int section, int relativePosition, int absolutePosition) { List<List<String>> menuList; if (is == 0) { menuList = messMenuList.get(section); if (menuList.get(relativePosition).get(1).equals("Veg")) holder.title.setTextColor(context.getResources().getColor(R.color.open)); else holder.title.setTextColor(context.getResources().getColor(R.color.close)); holder.price.setVisibility(View.GONE); } else if (is == 1) menuList = storesMenuList.get(section); else if (is == 2) menuList = snowCubeList.get(section); else menuList = tuttiList.get(section); holder.title.setText(menuList.get(relativePosition).get(0)); holder.price.setText(menuList.get(relativePosition).get(1)); } @Override public int getItemViewType(int section, int relativePosition, int absolutePosition) { if (section == 1) { return 0; // VIEW_TYPE_HEADER is -2, VIEW_TYPE_ITEM is -1. You can return 0 or greater. } return super.getItemViewType(section, relativePosition, absolutePosition); } @Override public DiningMenuViewHolder onCreateViewHolder(ViewGroup parent, int viewType) { int layout; switch (viewType) { case VIEW_TYPE_HEADER: layout = R.layout.item_dining_header; break; case VIEW_TYPE_ITEM: layout = R.layout.item_dining_item; break; default: layout = R.layout.item_dining_item; } View v = LayoutInflater.from(parent.getContext()).inflate(layout, parent, false); return new DiningMenuViewHolder(v, this); } static class DiningMenuViewHolder extends SectionedViewHolder implements View.OnClickListener { final TextView title; final TextView price; final ImageView caret; final DiningMenuAdapter adapter; DiningMenuViewHolder(View itemView, DiningMenuAdapter adapter) { super(itemView); this.title = (TextView) itemView.findViewById(R.id.title); this.price = (TextView) itemView.findViewById(R.id.textView_itemDining_price); this.caret = (ImageView) itemView.findViewById(R.id.caret); this.adapter = adapter; itemView.setOnClickListener(this); } @Override public void onClick(View view) { if (isHeader()) adapter.toggleSectionExpanded(getRelativePosition().section()); } } }
mit
kaaaaang/ews-java-api
src/main/java/microsoft/exchange/webservices/data/ItemGroup.java
1767
/************************************************************************** * copyright file="ItemGroup.java" company="Microsoft" * Copyright (c) Microsoft Corporation. All rights reserved. * * Defines the ItemGroup.java. **************************************************************************/ package microsoft.exchange.webservices.data; import java.util.ArrayList; import java.util.Collection; import java.util.List; /** * Represents a group of items as returned by grouped item search operations. * * @param <TItem> * the generic type */ public final class ItemGroup<TItem extends Item> { /** The group index. */ private String groupIndex; /** The items. */ private Collection<TItem> items; /** * Initializes a new instance of the class. * * @param groupIndex * the group index * @param items * the items */ protected ItemGroup(String groupIndex, List<TItem> items) { EwsUtilities.EwsAssert(groupIndex != null, "ItemGroup.ctor", "groupIndex is null"); EwsUtilities .EwsAssert(items != null, "ItemGroup.ctor", "items is null"); this.groupIndex = groupIndex; this.items = new ArrayList<TItem>(items); } /** * Gets an index identifying the group. * * @return the group index */ public String getGroupIndex() { return this.groupIndex; } /** * Sets an index identifying the group. */ private void setGroupIndex(String value) { this.groupIndex = value; } /** * Gets a collection of the items in this group. * * @return the items */ public Collection<TItem> getItems() { return this.items; } /** * Sets a collection of the items in this group. */ private void setItems(Collection<TItem> value) { this.items = value; } }
mit
paine1690/cdp4j
src/main/java/io/webfolder/cdp/command/Inspector.java
1498
/** * The MIT License * Copyright © 2017 WebFolder OÜ * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ package io.webfolder.cdp.command; import io.webfolder.cdp.annotation.Domain; import io.webfolder.cdp.annotation.Experimental; @Experimental @Domain("Inspector") public interface Inspector { /** * Enables inspector domain notifications. */ void enable(); /** * Disables inspector domain notifications. */ void disable(); }
mit
ilzayas/arbeitsplan
src/Proyecto/TaskBundle/Entity/Client.php
3267
<?php namespace Proyecto\TaskBundle\Entity; use Doctrine\ORM\Mapping as ORM; /** * Client * * @ORM\Table() * @ORM\Entity(repositoryClass="Proyecto\TaskBundle\Entity\ClientRepository") */ class Client { /** * @var integer * * @ORM\Column(name="id", type="integer") * @ORM\Id * @ORM\GeneratedValue(strategy="AUTO") */ private $id; /** * @var string * * @ORM\Column(name="name", type="string", length=60) */ private $name; /** * @var string * * @ORM\Column(name="color", type="string", length=255) */ private $color; /** * @ORM\Column(name="is_active", type="boolean") */ private $isActive; /** * @ORM\ManyToOne(targetEntity="Proyecto\SecurityBundle\Entity\UserProject") * @ORM\JoinColumn(name="user_id", referencedColumnName="id") */ private $userProject; /* * @ORM\OneToMany(targetEntity="Proyecto\TaskBundle\Entity\Task", mappedBy="client") */ private $tasks; /** * @ORM\OneToMany(targetEntity="Proyecto\TaskBundle\Entity\UserTaskClient", mappedBy="client") */ private $userTaskClient; public function __toString() { return $this->name; } /** * Get id * * @return integer */ public function getId() { return $this->id; } /** * Set name * * @param string $name * @return Client */ public function setName($name) { $this->name = $name; return $this; } /** * Get name * * @return string */ public function getName() { return $this->name; } /** * Set color * * @param string $color * @return Client */ public function setColor($color) { $this->color = $color; return $this; } /** * Get color * * @return string */ public function getColor() { return $this->color; } /** * Set userProject * * @param string $userProject * @return Client */ public function setUserProject($userProject) { $this->userProject = $userProject; return $this; } /** * Get userProject * * @return string */ public function getUserProject() { return $this->userProject; } /** * Set userProject * * @param string $userTask * @return Task */ public function setUserTask($userTask, $user) { foreach ($userTask as $userT) { $this->addUserTask($userT, $user); } $this->userTaskClient = $userTask; return $this; } /** * Get userTask * * @return string */ public function getUserTask() { return $this->userTaskClient; } /** * Set isActive * * @param boolean $isActive * @return Client */ public function setIsActive($isActive) { $this->isActive = $isActive; return $this; } /** * Get isActive * * @return boolean */ public function getIsActive() { return $this->isActive; } }
mit
adamspe/odata-resource-file
models/Img.js
6563
var mongoose = require('mongoose'), schema = mongoose.Schema({ fileName: { type: String, trim: true, required: true}, contentType: { type: String, trim: true}, formats: [{ format: {type: String, required: true, default: 'original'}, file: {type: mongoose.Schema.Types.ObjectId, required: true, ref: 'File' } }] }), File = require('./File'), q = require('q'), lwip = require('lwip'), debug = require('debug')('odata-resource-file'); module.exports = function(config) { config = config||{}; config.collection = config.collection||'Image'; schema.set('collection', config.collection); function removeFile(fid) { var def = q.defer(); debug('removeFile',fid); File.findById(fid).exec(function(err,obj){ if(err || !obj) { return def.reject(err); } debug('removing file %s/%s',obj._id,obj.filename); obj.remove(function(err,obj){ if(err) { return def.reject(err); } debug('removed file %s/%s',obj._id,obj.filename); def.resolve(obj); }); }); return def.promise; } function cleanupImg(img){ debug('cleanupImg',img); q.all(img.formats.map(function(f){ return removeFile(f.file); })).done(function(values) { debug('remove results',values.map(function(f){ return f._id; })); }); } function formatImg(original,format) { var def = q.defer(), partsRegex = /^([^\.]+)\.(.*)$/, fileName = original.get('filename'), prefix = fileName.replace(partsRegex,'$1'), extension = fileName.replace(partsRegex,'$2'), type = extension.toLowerCase(), imgBuffers = []; original.getReadStream() .on('data',function(buffer){ imgBuffers.push(buffer); }) .on('end',function(){ lwip.open(Buffer.concat(imgBuffers),type,function(err,image){ if(err) { return def.reject(err); } var batch = image.batch(); format.transformations.forEach(function(txf){ debug('applying trasnformation',txf); batch = batch[txf.fn].apply(batch,txf.args); }); debug('writing %s to buffer with type %s',format.format,type); batch.toBuffer(type,function(err,buffer){ if(err) { return def.reject(err); } def.resolve({ format: format.format, file: { filename: prefix+'_'+format.format+'.'+extension, mimetype: original.contentType, data: buffer } }); }); }); }); return def.promise; } schema.pre('save',function(next){ var thisImg = this; function handleErr(msg,err) { cleanupImg(thisImg); debug('error: %s',msg,err); return next(err); } var originals = this.formats.filter(function(f){ return f.format === 'original'; }), original = originals.length ? originals[0] : undefined; if(!original){ return handleErr('no original',new Error('original not defined')); } if(!config.formats) { debug('saving image %s (original only)', this._id); return next(); } // https://www.npmjs.com/package/lwip File.findById(original.file).exec(function(err,obj){ if(err) { return handleErr('error finding original',err); } q.all(config.formats.map(function(format){ return formatImg(obj,format); })).then(function(translated){ debug('translated',translated); q.all(translated.map(function(tx){ var def = q.defer(); File.storeData(null,tx.file,function(err,f){ if(err) { console.error(err); return def.reject(err); } def.resolve(f); }); return def.promise; })).then(function(files){ translated.forEach(function(format,i){ thisImg.formats.push({ format: format.format, file: files[i] }); }); next(); },next); },next); }); }); schema.post('remove',cleanupImg); var ImageModel = mongoose.model('Image',schema); /** * static utility function for creating a unique file name * for a given format. * * @param {string} original The original file name. * @param {string} format The name of the format. * @returns {string} A new filename with the format inserted. */ ImageModel.fileNameFormat = function(original,format){ var partsRegex = /^([^\.]+)\.(.*)$/; return original.replace(partsRegex,'$1')+ '_'+format+'.'+ original.replace(partsRegex,'$2'); }; /** * static utility function for creating a new Image. * * @param {object} file multer input. * @param {Function} callback function(err,image) */ ImageModel.newImage = function(file,callback) { File.storeFile(undefined,file,function(err,f){ if(err) { return Resource.sendError(res,500,'create failure',err); } (new ImageModel({ fileName: f.filename, contentType: f.contentType, formats: [{file: f._id}] })).save(function(err,img){ if(err) { f.remove(/* best effort at cleanup */); return callback(err); } callback(null,img); }); }); }; return ImageModel; };
mit
aszczesn/rma-iqutech
application/modules/diy_customer_type/controllers/Diy_customer_type.php
2207
<?php if (!defined('BASEPATH')) exit('No direct script access allowed'); class Diy_customer_type extends MX_Controller { function __construct() { parent::__construct(); } function index(){ $groups = $this->get('id')->result(); echo '<pre>'; var_dump($groups); echo '</pre>'; } function get($order_by) { $this->load->model('mdl_diy_customer_type'); $query = $this->mdl_diy_customer_type->get($order_by); return $query; } function get_with_limit($limit, $offset, $order_by) { $this->load->model('mdl_diy_customer_type'); $query = $this->mdl_diy_customer_type->get_with_limit($limit, $offset, $order_by); return $query; } function get_where($id) { $this->load->model('mdl_diy_customer_type'); $query = $this->mdl_diy_customer_type->get_where($id); return $query; } function get_where_custom($col, $value) { $this->load->model('mdl_diy_customer_type'); $query = $this->mdl_diy_customer_type->get_where_custom($col, $value); return $query; } function _insert($data) { $this->load->model('mdl_diy_customer_type'); $this->mdl_diy_customer_type->_insert($data); } function _update($id, $data) { $this->load->model('mdl_diy_customer_type'); $this->mdl_diy_customer_type->_update($id, $data); } function _delete($id) { $this->load->model('mdl_diy_customer_type'); $this->mdl_diy_customer_type->_delete($id); } function count_where($column, $value) { $this->load->model('mdl_diy_customer_type'); $count = $this->mdl_diy_customer_type->count_where($column, $value); return $count; } function get_max() { $this->load->model('mdl_diy_customer_type'); $max_id = $this->mdl_diy_customer_type->get_max(); return $max_id; } function _custom_query($mysql_query) { $this->load->model('mdl_diy_customer_type'); $query = $this->mdl_diy_customer_type->_custom_query($mysql_query); return $query; } }
mit
api-platform/core
tests/Serializer/ItemNormalizerTest.php
14494
<?php /* * This file is part of the API Platform project. * * (c) Kévin Dunglas <[email protected]> * * For the full copyright and license information, please view the LICENSE * file that was distributed with this source code. */ declare(strict_types=1); namespace ApiPlatform\Tests\Serializer; use ApiPlatform\Core\Api\IriConverterInterface; use ApiPlatform\Core\Api\ResourceClassResolverInterface; use ApiPlatform\Core\Metadata\Property\Factory\PropertyMetadataFactoryInterface; use ApiPlatform\Core\Metadata\Property\Factory\PropertyNameCollectionFactoryInterface; use ApiPlatform\Core\Metadata\Property\PropertyNameCollection; use ApiPlatform\Core\Tests\ProphecyTrait; use ApiPlatform\DataTransformer\DataTransformerInterface; use ApiPlatform\Exception\InvalidArgumentException; use ApiPlatform\Metadata\ApiProperty; use ApiPlatform\Serializer\ItemNormalizer; use ApiPlatform\Tests\Fixtures\TestBundle\Dto\OutputDto; use ApiPlatform\Tests\Fixtures\TestBundle\Entity\Dummy; use PHPUnit\Framework\TestCase; use Prophecy\Argument; use Symfony\Component\Serializer\Exception\NotNormalizableValueException; use Symfony\Component\Serializer\Normalizer\DenormalizerInterface; use Symfony\Component\Serializer\Normalizer\NormalizerInterface; use Symfony\Component\Serializer\SerializerInterface; /** * @author Kévin Dunglas <[email protected]> * @group legacy */ class ItemNormalizerTest extends TestCase { use ProphecyTrait; /** * @group legacy */ public function testSupportNormalization() { $std = new \stdClass(); $dummy = new Dummy(); $dummy->setDescription('hello'); $propertyNameCollectionFactoryProphecy = $this->prophesize(PropertyNameCollectionFactoryInterface::class); $propertyMetadataFactoryProphecy = $this->prophesize(PropertyMetadataFactoryInterface::class); $iriConverterProphecy = $this->prophesize(IriConverterInterface::class); $resourceClassResolverProphecy = $this->prophesize(ResourceClassResolverInterface::class); $resourceClassResolverProphecy->isResourceClass(Dummy::class)->willReturn(true); $resourceClassResolverProphecy->isResourceClass(\stdClass::class)->willReturn(false); $normalizer = new ItemNormalizer( $propertyNameCollectionFactoryProphecy->reveal(), $propertyMetadataFactoryProphecy->reveal(), $iriConverterProphecy->reveal(), $resourceClassResolverProphecy->reveal() ); $this->assertTrue($normalizer->supportsNormalization($dummy)); $this->assertTrue($normalizer->supportsNormalization($dummy)); $this->assertFalse($normalizer->supportsNormalization($std)); $this->assertTrue($normalizer->supportsDenormalization($dummy, Dummy::class)); $this->assertTrue($normalizer->supportsDenormalization($dummy, Dummy::class)); $this->assertFalse($normalizer->supportsDenormalization($std, \stdClass::class)); $this->assertTrue($normalizer->hasCacheableSupportsMethod()); } public function testNormalize() { $dummy = new Dummy(); $dummy->setName('hello'); $propertyNameCollection = new PropertyNameCollection(['name']); $propertyNameCollectionFactoryProphecy = $this->prophesize(PropertyNameCollectionFactoryInterface::class); $propertyNameCollectionFactoryProphecy->create(Dummy::class, [])->willReturn($propertyNameCollection); $propertyMetadata = (new ApiProperty())->withReadable(true); $propertyMetadataFactoryProphecy = $this->prophesize(PropertyMetadataFactoryInterface::class); $propertyMetadataFactoryProphecy->create(Dummy::class, 'name', [])->willReturn($propertyMetadata); $iriConverterProphecy = $this->prophesize(IriConverterInterface::class); $iriConverterProphecy->getIriFromItem($dummy)->willReturn('/dummies/1'); $resourceClassResolverProphecy = $this->prophesize(ResourceClassResolverInterface::class); $resourceClassResolverProphecy->getResourceClass($dummy, null)->willReturn(Dummy::class); $serializerProphecy = $this->prophesize(SerializerInterface::class); $serializerProphecy->willImplement(NormalizerInterface::class); $serializerProphecy->normalize('hello', null, Argument::type('array'))->willReturn('hello'); $normalizer = new ItemNormalizer( $propertyNameCollectionFactoryProphecy->reveal(), $propertyMetadataFactoryProphecy->reveal(), $iriConverterProphecy->reveal(), $resourceClassResolverProphecy->reveal(), null, null, null, null, false, null, [], null ); $normalizer->setSerializer($serializerProphecy->reveal()); $this->assertEquals(['name' => 'hello'], $normalizer->normalize($dummy, null, ['resources' => []])); } public function testDenormalize() { $context = ['resource_class' => Dummy::class, 'api_allow_update' => true]; $propertyNameCollection = new PropertyNameCollection(['name']); $propertyNameCollectionFactoryProphecy = $this->prophesize(PropertyNameCollectionFactoryInterface::class); $propertyNameCollectionFactoryProphecy->create(Dummy::class, [])->willReturn($propertyNameCollection)->shouldBeCalled(); $propertyMetadata = (new ApiProperty())->withReadable(true)->withWritable(true); $propertyMetadataFactoryProphecy = $this->prophesize(PropertyMetadataFactoryInterface::class); $propertyMetadataFactoryProphecy->create(Dummy::class, 'name', [])->willReturn($propertyMetadata)->shouldBeCalled(); $iriConverterProphecy = $this->prophesize(IriConverterInterface::class); $resourceClassResolverProphecy = $this->prophesize(ResourceClassResolverInterface::class); $resourceClassResolverProphecy->getResourceClass(null, Dummy::class)->willReturn(Dummy::class); $serializerProphecy = $this->prophesize(SerializerInterface::class); $serializerProphecy->willImplement(DenormalizerInterface::class); $normalizer = new ItemNormalizer( $propertyNameCollectionFactoryProphecy->reveal(), $propertyMetadataFactoryProphecy->reveal(), $iriConverterProphecy->reveal(), $resourceClassResolverProphecy->reveal(), null, null, null, null, false, null, [], null ); $normalizer->setSerializer($serializerProphecy->reveal()); $this->assertInstanceOf(Dummy::class, $normalizer->denormalize(['name' => 'hello'], Dummy::class, null, $context)); } public function testDenormalizeWithIri() { $context = ['resource_class' => Dummy::class, 'api_allow_update' => true]; $propertyNameCollection = new PropertyNameCollection(['name']); $propertyNameCollectionFactoryProphecy = $this->prophesize(PropertyNameCollectionFactoryInterface::class); $propertyNameCollectionFactoryProphecy->create(Dummy::class, [])->willReturn($propertyNameCollection)->shouldBeCalled(); $propertyMetadata = (new ApiProperty())->withReadable(true)->withWritable(true); $propertyMetadataFactoryProphecy = $this->prophesize(PropertyMetadataFactoryInterface::class); $propertyMetadataFactoryProphecy->create(Dummy::class, 'id', [])->willReturn($propertyMetadata)->shouldBeCalled(); $propertyMetadataFactoryProphecy->create(Dummy::class, 'name', [])->willReturn($propertyMetadata)->shouldBeCalled(); $iriConverterProphecy = $this->prophesize(IriConverterInterface::class); $iriConverterProphecy->getItemFromIri('/dummies/12', ['resource_class' => Dummy::class, 'api_allow_update' => true, 'fetch_data' => true])->shouldBeCalled(); $resourceClassResolverProphecy = $this->prophesize(ResourceClassResolverInterface::class); $resourceClassResolverProphecy->getResourceClass(null, Dummy::class)->willReturn(Dummy::class); $serializerProphecy = $this->prophesize(SerializerInterface::class); $serializerProphecy->willImplement(DenormalizerInterface::class); $normalizer = new ItemNormalizer( $propertyNameCollectionFactoryProphecy->reveal(), $propertyMetadataFactoryProphecy->reveal(), $iriConverterProphecy->reveal(), $resourceClassResolverProphecy->reveal(), null, null, null, null, false, null, [], null ); $normalizer->setSerializer($serializerProphecy->reveal()); $this->assertInstanceOf(Dummy::class, $normalizer->denormalize(['id' => '/dummies/12', 'name' => 'hello'], Dummy::class, null, $context)); } public function testDenormalizeWithIdAndUpdateNotAllowed() { $this->expectException(NotNormalizableValueException::class); $this->expectExceptionMessage('Update is not allowed for this operation.'); $context = ['resource_class' => Dummy::class, 'api_allow_update' => false]; $propertyNameCollectionFactoryProphecy = $this->prophesize(PropertyNameCollectionFactoryInterface::class); $propertyMetadataFactoryProphecy = $this->prophesize(PropertyMetadataFactoryInterface::class); $iriConverterProphecy = $this->prophesize(IriConverterInterface::class); $resourceClassResolverProphecy = $this->prophesize(ResourceClassResolverInterface::class); $serializerProphecy = $this->prophesize(SerializerInterface::class); $serializerProphecy->willImplement(DenormalizerInterface::class); $normalizer = new ItemNormalizer( $propertyNameCollectionFactoryProphecy->reveal(), $propertyMetadataFactoryProphecy->reveal(), $iriConverterProphecy->reveal(), $resourceClassResolverProphecy->reveal(), null, null, null, null, false, null, [], null ); $normalizer->setSerializer($serializerProphecy->reveal()); $normalizer->denormalize(['id' => '12', 'name' => 'hello'], Dummy::class, null, $context); } public function testDenormalizeWithIdAndNoResourceClass() { $context = []; $propertyNameCollection = new PropertyNameCollection(['id', 'name']); $propertyNameCollectionFactoryProphecy = $this->prophesize(PropertyNameCollectionFactoryInterface::class); $propertyNameCollectionFactoryProphecy->create(Dummy::class, [])->willReturn($propertyNameCollection)->shouldBeCalled(); $propertyMetadata = (new ApiProperty())->withReadable(true)->withWritable(true); $propertyMetadataFactoryProphecy = $this->prophesize(PropertyMetadataFactoryInterface::class); $propertyMetadataFactoryProphecy->create(Dummy::class, 'id', [])->willReturn($propertyMetadata)->shouldBeCalled(); $propertyMetadataFactoryProphecy->create(Dummy::class, 'name', [])->willReturn($propertyMetadata)->shouldBeCalled(); $iriConverterProphecy = $this->prophesize(IriConverterInterface::class); $resourceClassResolverProphecy = $this->prophesize(ResourceClassResolverInterface::class); $resourceClassResolverProphecy->getResourceClass(null, Dummy::class)->willReturn(Dummy::class); $serializerProphecy = $this->prophesize(SerializerInterface::class); $serializerProphecy->willImplement(DenormalizerInterface::class); $normalizer = new ItemNormalizer( $propertyNameCollectionFactoryProphecy->reveal(), $propertyMetadataFactoryProphecy->reveal(), $iriConverterProphecy->reveal(), $resourceClassResolverProphecy->reveal(), null, null, null, null, false, null, [], null ); $normalizer->setSerializer($serializerProphecy->reveal()); $object = $normalizer->denormalize(['id' => '42', 'name' => 'hello'], Dummy::class, null, $context); $this->assertInstanceOf(Dummy::class, $object); $this->assertSame('42', $object->getId()); $this->assertSame('hello', $object->getName()); } public function testNormalizeWithDataTransformers() { $dummy = new Dummy(); $dummy->setName('hello'); $output = new OutputDto(); $propertyNameCollection = new PropertyNameCollection(['baz']); $propertyNameCollectionFactoryProphecy = $this->prophesize(PropertyNameCollectionFactoryInterface::class); $propertyMetadata = new ApiProperty(null, null, true); $propertyMetadataFactoryProphecy = $this->prophesize(PropertyMetadataFactoryInterface::class); $iriConverterProphecy = $this->prophesize(IriConverterInterface::class); $resourceClassResolverProphecy = $this->prophesize(ResourceClassResolverInterface::class); $resourceClassResolverProphecy->getResourceClass($output, null)->willThrow(InvalidArgumentException::class); $serializerProphecy = $this->prophesize(SerializerInterface::class); $serializerProphecy->willImplement(NormalizerInterface::class); $serializerProphecy->normalize(Argument::type(OutputDto::class), null, Argument::type('array'))->willReturn(['baz' => 'hello'])->shouldBeCalled(); $dataTransformer = $this->prophesize(DataTransformerInterface::class); $dataTransformer->supportsTransformation($dummy, OutputDto::class, Argument::any())->shouldBeCalled()->willReturn(true); $dataTransformer->transform($dummy, OutputDto::class, Argument::any())->shouldBeCalled()->willReturn($output); $normalizer = new ItemNormalizer( $propertyNameCollectionFactoryProphecy->reveal(), $propertyMetadataFactoryProphecy->reveal(), $iriConverterProphecy->reveal(), $resourceClassResolverProphecy->reveal(), null, null, null, null, false, null, [$dataTransformer->reveal()], null ); $normalizer->setSerializer($serializerProphecy->reveal()); $this->assertEquals(['baz' => 'hello'], $normalizer->normalize($dummy, null, ['resources' => [], 'output' => ['class' => OutputDto::class]])); } }
mit
gmhewett/bookshelf
www/Bookshelf/Bookshelf.Tests/Services/LibraryServiceTests.cs
1006
namespace Bookshelf.Tests.Services { using System; using Bookshelf.Models; using Bookshelf.Services; using Microsoft.VisualStudio.TestTools.UnitTesting; using Moq; [TestClass] public class LibraryServiceTests { private readonly Mock<BookshelfDbContext> bookshelfDbContext = new Mock<BookshelfDbContext>(); [TestMethod] [ExpectedException(typeof(ArgumentNullException))] public void LibraryService_BookshelfDbContextIsNull() { var given = new Prior { HasBookshelfDbContext = false }; this.CreateLibraryService(given); } private LibraryService CreateLibraryService(Prior given) { return new LibraryService( given.HasBookshelfDbContext ? this.bookshelfDbContext.Object : null); } internal class Prior { public bool HasBookshelfDbContext { get; set; } = true; } } }
mit
Mostafa-Samir/klyng
tests/benchmarks/tasks/pi/mpi.cpp
1422
#include <mpi.h> #include <ctime> #include <cstdio> #include "../../utilis/cputime.h" double approx_pi(double from, double to) { double pi = 0.0; double dx = 0.000000002; for(double x = from; x < to ; x += dx) { pi += 4.0 / (1 + x * x); } return pi * dx; } int main(int argc, char* argv[]) { int size, rank; MPI_Status status; MPI_Init(&argc, &argv); MPI_Comm_size(MPI_COMM_WORLD, &size); MPI_Comm_rank(MPI_COMM_WORLD, &rank); if(rank == 0) { double interval_size = 1.0 / size; for(int p = 1; p < size; ++p) { double range[2]; range[0] = p * interval_size; range[1] = (p + 1) * interval_size; MPI_Send(&range, 2, MPI_DOUBLE, p, 0, MPI_COMM_WORLD); } double local_pi = approx_pi(0, interval_size); for(int p = 1; p < size; ++p) { double other_pi = 0.0; MPI_Recv(&other_pi, 1, MPI_DOUBLE, MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, &status); local_pi += other_pi; } printf("%.3f\n", local_pi); } else { double range[2]; MPI_Recv(&range, 2, MPI_DOUBLE, 0, 0, MPI_COMM_WORLD, &status); double local_pi = approx_pi(range[0], range[1]); MPI_Send(&local_pi, 1, MPI_DOUBLE, 0, 1, MPI_COMM_WORLD); } printf("cputime:%.3f\n", getcputime()); MPI_Finalize(); return 0; }
mit
drcloud/arx
arx/sources/git.py
1610
# from sh import Command, chmod, cp, curl, mkdir # import uritools from ..decorators import schemes from .core import onepath, oneurl, SourceURL, twopaths class Git(SourceURL): """Git respositories as Arx sources. These sources have directory nature by default but do support fragments to indicate that only certain files or directories should be extracted. Query parametes can be used to indicate a particular branch, tag or SHA: .. code:: # Try to find a branch or a tag called beta. git+ssh://abc.example.com/web/server.git?beta # Same as above. git+ssh://abc.example.com/web/server.git?ref=beta # Find a SHA beginning with 0abc3df. git+ssh://abc.example.com/web/server.git?0abc3df # Same as above. git+ssh://abc.example.com/web/server.git?ref=0abc3df The ``git+ssh://`` and ``git+http://`` schemes are passed, minus the leading ``git+``, to ``git``. As discussed in :class:`~arx.sources.files.File`, ``git+file:///`` URLs can point to repositories in home or the project directory using ``/@/~`` or ``/@/.`` or ``/@/..``. To reference the Git repository local to the manifest, use: ``git+file:///@/.``. """ @oneurl @schemes('git+file', 'git+https', 'git+https', 'git+ssh') def __init__(self, url): self.url = url @onepath def cache(self, cache): raise NotImplementedError() @twopaths def place(self, cache, path): raise NotImplementedError() @onepath def run(self, cache, args=[]): raise NotImplementedError()
mit
arussellsaw/influxdb
cmd/influxd/run/server_test.go
285073
package run_test import ( "fmt" "net/http" "net/url" "strconv" "strings" "testing" "time" "github.com/influxdata/influxdb/coordinator" "github.com/influxdata/influxdb/models" ) // Ensure that HTTP responses include the InfluxDB version. func TestServer_HTTPResponseVersion(t *testing.T) { version := "v1234" s := OpenServerWithVersion(NewConfig(), version) defer s.Close() resp, _ := http.Get(s.URL() + "/query") got := resp.Header.Get("X-Influxdb-Version") if got != version { t.Errorf("Server responded with incorrect version, exp %s, got %s", version, got) } } // Ensure the database commands work. func TestServer_DatabaseCommands(t *testing.T) { t.Parallel() s := OpenServer(NewConfig()) defer s.Close() test := tests.load(t, "database_commands") for _, query := range test.queries { if query.skip { t.Logf("SKIP:: %s", query.name) continue } if err := query.Execute(s); err != nil { t.Error(query.Error(err)) } else if !query.success() { t.Error(query.failureMessage()) } } } func TestServer_Query_DropAndRecreateDatabase(t *testing.T) { t.Parallel() s := OpenServer(NewConfig()) defer s.Close() test := tests.load(t, "drop_and_recreate_database") if err := s.CreateDatabaseAndRetentionPolicy(test.database(), newRetentionPolicyInfo(test.retentionPolicy(), 1, 0)); err != nil { t.Fatal(err) } if err := s.MetaClient.SetDefaultRetentionPolicy(test.database(), test.retentionPolicy()); err != nil { t.Fatal(err) } for i, query := range test.queries { if i == 0 { if err := test.init(s); err != nil { t.Fatalf("test init failed: %s", err) } } if query.skip { t.Logf("SKIP:: %s", query.name) continue } if err := query.Execute(s); err != nil { t.Error(query.Error(err)) } else if !query.success() { t.Error(query.failureMessage()) } } } func TestServer_Query_DropDatabaseIsolated(t *testing.T) { t.Parallel() s := OpenServer(NewConfig()) defer s.Close() test := tests.load(t, "drop_database_isolated") if err := s.CreateDatabaseAndRetentionPolicy(test.database(), newRetentionPolicyInfo(test.retentionPolicy(), 1, 0)); err != nil { t.Fatal(err) } if err := s.MetaClient.SetDefaultRetentionPolicy(test.database(), test.retentionPolicy()); err != nil { t.Fatal(err) } if err := s.CreateDatabaseAndRetentionPolicy("db1", newRetentionPolicyInfo("rp1", 1, 0)); err != nil { t.Fatal(err) } for i, query := range test.queries { if i == 0 { if err := test.init(s); err != nil { t.Fatalf("test init failed: %s", err) } } if query.skip { t.Logf("SKIP:: %s", query.name) continue } if err := query.Execute(s); err != nil { t.Error(query.Error(err)) } else if !query.success() { t.Error(query.failureMessage()) } } } func TestServer_Query_DeleteSeries(t *testing.T) { t.Parallel() s := OpenServer(NewConfig()) defer s.Close() test := tests.load(t, "delete_series") if err := s.CreateDatabaseAndRetentionPolicy(test.database(), newRetentionPolicyInfo(test.retentionPolicy(), 1, 0)); err != nil { t.Fatal(err) } if err := s.MetaClient.SetDefaultRetentionPolicy(test.database(), test.retentionPolicy()); err != nil { t.Fatal(err) } for i, query := range test.queries { if i == 0 { if err := test.init(s); err != nil { t.Fatalf("test init failed: %s", err) } } if query.skip { t.Logf("SKIP:: %s", query.name) continue } if err := query.Execute(s); err != nil { t.Error(query.Error(err)) } else if !query.success() { t.Error(query.failureMessage()) } } } func TestServer_Query_DropAndRecreateSeries(t *testing.T) { t.Parallel() s := OpenServer(NewConfig()) defer s.Close() test := tests.load(t, "drop_and_recreate_series") if err := s.CreateDatabaseAndRetentionPolicy(test.database(), newRetentionPolicyInfo(test.retentionPolicy(), 1, 0)); err != nil { t.Fatal(err) } if err := s.MetaClient.SetDefaultRetentionPolicy(test.database(), test.retentionPolicy()); err != nil { t.Fatal(err) } for i, query := range test.queries { if i == 0 { if err := test.init(s); err != nil { t.Fatalf("test init failed: %s", err) } } if query.skip { t.Logf("SKIP:: %s", query.name) continue } if err := query.Execute(s); err != nil { t.Error(query.Error(err)) } else if !query.success() { t.Error(query.failureMessage()) } } // Re-write data and test again. retest := tests.load(t, "drop_and_recreate_series_retest") for i, query := range retest.queries { if i == 0 { if err := retest.init(s); err != nil { t.Fatalf("test init failed: %s", err) } } if query.skip { t.Logf("SKIP:: %s", query.name) continue } if err := query.Execute(s); err != nil { t.Error(query.Error(err)) } else if !query.success() { t.Error(query.failureMessage()) } } } func TestServer_Query_DropSeriesFromRegex(t *testing.T) { t.Parallel() s := OpenServer(NewConfig()) defer s.Close() test := tests.load(t, "drop_series_from_regex") if err := s.CreateDatabaseAndRetentionPolicy(test.database(), newRetentionPolicyInfo(test.retentionPolicy(), 1, 0)); err != nil { t.Fatal(err) } if err := s.MetaClient.SetDefaultRetentionPolicy(test.database(), test.retentionPolicy()); err != nil { t.Fatal(err) } for i, query := range test.queries { if i == 0 { if err := test.init(s); err != nil { t.Fatalf("test init failed: %s", err) } } if query.skip { t.Logf("SKIP:: %s", query.name) continue } if err := query.Execute(s); err != nil { t.Error(query.Error(err)) } else if !query.success() { t.Error(query.failureMessage()) } } } // Ensure retention policy commands work. func TestServer_RetentionPolicyCommands(t *testing.T) { t.Parallel() c := NewConfig() c.Meta.RetentionAutoCreate = false s := OpenServer(c) defer s.Close() test := tests.load(t, "retention_policy_commands") // Create a database. if _, err := s.MetaClient.CreateDatabase(test.database()); err != nil { t.Fatal(err) } for _, query := range test.queries { if query.skip { t.Logf("SKIP:: %s", query.name) continue } if err := query.Execute(s); err != nil { t.Error(query.Error(err)) } else if !query.success() { t.Error(query.failureMessage()) } } } // Ensure the autocreation of retention policy works. func TestServer_DatabaseRetentionPolicyAutoCreate(t *testing.T) { t.Parallel() s := OpenServer(NewConfig()) defer s.Close() test := tests.load(t, "retention_policy_auto_create") for _, query := range test.queries { if query.skip { t.Logf("SKIP:: %s", query.name) continue } if err := query.Execute(s); err != nil { t.Error(query.Error(err)) } else if !query.success() { t.Error(query.failureMessage()) } } } // Ensure user commands work. func TestServer_UserCommands(t *testing.T) { t.Parallel() s := OpenServer(NewConfig()) defer s.Close() // Create a database. if _, err := s.MetaClient.CreateDatabase("db0"); err != nil { t.Fatal(err) } test := Test{ queries: []*Query{ &Query{ name: "show users, no actual users", command: `SHOW USERS`, exp: `{"results":[{"series":[{"columns":["user","admin"]}]}]}`, }, &Query{ name: `create user`, command: "CREATE USER jdoe WITH PASSWORD '1337'", exp: `{"results":[{}]}`, }, &Query{ name: "show users, 1 existing user", command: `SHOW USERS`, exp: `{"results":[{"series":[{"columns":["user","admin"],"values":[["jdoe",false]]}]}]}`, }, &Query{ name: "grant all priviledges to jdoe", command: `GRANT ALL PRIVILEGES TO jdoe`, exp: `{"results":[{}]}`, }, &Query{ name: "show users, existing user as admin", command: `SHOW USERS`, exp: `{"results":[{"series":[{"columns":["user","admin"],"values":[["jdoe",true]]}]}]}`, }, &Query{ name: "grant DB privileges to user", command: `GRANT READ ON db0 TO jdoe`, exp: `{"results":[{}]}`, }, &Query{ name: "revoke all privileges", command: `REVOKE ALL PRIVILEGES FROM jdoe`, exp: `{"results":[{}]}`, }, &Query{ name: "bad create user request", command: `CREATE USER 0xBAD WITH PASSWORD pwd1337`, exp: `{"error":"error parsing query: found 0, expected identifier at line 1, char 13"}`, }, &Query{ name: "bad create user request, no name", command: `CREATE USER WITH PASSWORD pwd1337`, exp: `{"error":"error parsing query: found WITH, expected identifier at line 1, char 13"}`, }, &Query{ name: "bad create user request, no password", command: `CREATE USER jdoe`, exp: `{"error":"error parsing query: found EOF, expected WITH at line 1, char 18"}`, }, &Query{ name: "drop user", command: `DROP USER jdoe`, exp: `{"results":[{}]}`, }, &Query{ name: "make sure user was dropped", command: `SHOW USERS`, exp: `{"results":[{"series":[{"columns":["user","admin"]}]}]}`, }, &Query{ name: "delete non existing user", command: `DROP USER noone`, exp: `{"results":[{"error":"user not found"}]}`, }, }, } for _, query := range test.queries { if query.skip { t.Logf("SKIP:: %s", query.name) continue } if err := query.Execute(s); err != nil { t.Error(fmt.Sprintf("command: %s - err: %s", query.command, query.Error(err))) } else if !query.success() { t.Error(query.failureMessage()) } } } // Ensure the server can create a single point via line protocol with float type and read it back. func TestServer_Write_LineProtocol_Float(t *testing.T) { t.Parallel() s := OpenServer(NewConfig()) defer s.Close() if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 1*time.Hour)); err != nil { t.Fatal(err) } now := now() if res, err := s.Write("db0", "rp0", `cpu,host=server01 value=1.0 `+strconv.FormatInt(now.UnixNano(), 10), nil); err != nil { t.Fatal(err) } else if exp := ``; exp != res { t.Fatalf("unexpected results\nexp: %s\ngot: %s\n", exp, res) } // Verify the data was written. if res, err := s.Query(`SELECT * FROM db0.rp0.cpu GROUP BY *`); err != nil { t.Fatal(err) } else if exp := fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","tags":{"host":"server01"},"columns":["time","value"],"values":[["%s",1]]}]}]}`, now.Format(time.RFC3339Nano)); exp != res { t.Fatalf("unexpected results\nexp: %s\ngot: %s\n", exp, res) } } // Ensure the server can create a single point via line protocol with bool type and read it back. func TestServer_Write_LineProtocol_Bool(t *testing.T) { t.Parallel() s := OpenServer(NewConfig()) defer s.Close() if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 1*time.Hour)); err != nil { t.Fatal(err) } now := now() if res, err := s.Write("db0", "rp0", `cpu,host=server01 value=true `+strconv.FormatInt(now.UnixNano(), 10), nil); err != nil { t.Fatal(err) } else if exp := ``; exp != res { t.Fatalf("unexpected results\nexp: %s\ngot: %s\n", exp, res) } // Verify the data was written. if res, err := s.Query(`SELECT * FROM db0.rp0.cpu GROUP BY *`); err != nil { t.Fatal(err) } else if exp := fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","tags":{"host":"server01"},"columns":["time","value"],"values":[["%s",true]]}]}]}`, now.Format(time.RFC3339Nano)); exp != res { t.Fatalf("unexpected results\nexp: %s\ngot: %s\n", exp, res) } } // Ensure the server can create a single point via line protocol with string type and read it back. func TestServer_Write_LineProtocol_String(t *testing.T) { t.Parallel() s := OpenServer(NewConfig()) defer s.Close() if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 1*time.Hour)); err != nil { t.Fatal(err) } now := now() if res, err := s.Write("db0", "rp0", `cpu,host=server01 value="disk full" `+strconv.FormatInt(now.UnixNano(), 10), nil); err != nil { t.Fatal(err) } else if exp := ``; exp != res { t.Fatalf("unexpected results\nexp: %s\ngot: %s\n", exp, res) } // Verify the data was written. if res, err := s.Query(`SELECT * FROM db0.rp0.cpu GROUP BY *`); err != nil { t.Fatal(err) } else if exp := fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","tags":{"host":"server01"},"columns":["time","value"],"values":[["%s","disk full"]]}]}]}`, now.Format(time.RFC3339Nano)); exp != res { t.Fatalf("unexpected results\nexp: %s\ngot: %s\n", exp, res) } } // Ensure the server can create a single point via line protocol with integer type and read it back. func TestServer_Write_LineProtocol_Integer(t *testing.T) { t.Parallel() s := OpenServer(NewConfig()) defer s.Close() if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 1*time.Hour)); err != nil { t.Fatal(err) } now := now() if res, err := s.Write("db0", "rp0", `cpu,host=server01 value=100 `+strconv.FormatInt(now.UnixNano(), 10), nil); err != nil { t.Fatal(err) } else if exp := ``; exp != res { t.Fatalf("unexpected results\nexp: %s\ngot: %s\n", exp, res) } // Verify the data was written. if res, err := s.Query(`SELECT * FROM db0.rp0.cpu GROUP BY *`); err != nil { t.Fatal(err) } else if exp := fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","tags":{"host":"server01"},"columns":["time","value"],"values":[["%s",100]]}]}]}`, now.Format(time.RFC3339Nano)); exp != res { t.Fatalf("unexpected results\nexp: %s\ngot: %s\n", exp, res) } } // Ensure the server returns a partial write response when some points fail to parse. Also validate that // the successfully parsed points can be queried. func TestServer_Write_LineProtocol_Partial(t *testing.T) { t.Parallel() s := OpenServer(NewConfig()) defer s.Close() if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 1*time.Hour)); err != nil { t.Fatal(err) } now := now() points := []string{ "cpu,host=server01 value=100 " + strconv.FormatInt(now.UnixNano(), 10), "cpu,host=server01 value=NaN " + strconv.FormatInt(now.UnixNano(), 20), "cpu,host=server01 value=NaN " + strconv.FormatInt(now.UnixNano(), 30), } if res, err := s.Write("db0", "rp0", strings.Join(points, "\n"), nil); err == nil { t.Fatal("expected error. got nil", err) } else if exp := ``; exp != res { t.Fatalf("unexpected results\nexp: %s\ngot: %s\n", exp, res) } else if exp := "partial write"; !strings.Contains(err.Error(), exp) { t.Fatalf("unexpected error: exp\nexp: %v\ngot: %v", exp, err) } // Verify the data was written. if res, err := s.Query(`SELECT * FROM db0.rp0.cpu GROUP BY *`); err != nil { t.Fatal(err) } else if exp := fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","tags":{"host":"server01"},"columns":["time","value"],"values":[["%s",100]]}]}]}`, now.Format(time.RFC3339Nano)); exp != res { t.Fatalf("unexpected results\nexp: %s\ngot: %s\n", exp, res) } } // Ensure the server can query with default databases (via param) and default retention policy func TestServer_Query_DefaultDBAndRP(t *testing.T) { t.Parallel() s := OpenServer(NewConfig()) defer s.Close() test := NewTest("db0", "rp0") test.writes = Writes{ &Write{data: fmt.Sprintf(`cpu value=1.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T01:00:00Z").UnixNano())}, } test.addQueries([]*Query{ &Query{ name: "default db and rp", params: url.Values{"db": []string{"db0"}}, command: `SELECT * FROM cpu GROUP BY *`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","value"],"values":[["2000-01-01T01:00:00Z",1]]}]}]}`, }, &Query{ name: "default rp exists", command: `show retention policies ON db0`, exp: `{"results":[{"series":[{"columns":["name","duration","shardGroupDuration","replicaN","default"],"values":[["default","0","168h0m0s",1,false],["rp0","0","168h0m0s",1,true]]}]}]}`, }, &Query{ name: "default rp", command: `SELECT * FROM db0..cpu GROUP BY *`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","value"],"values":[["2000-01-01T01:00:00Z",1]]}]}]}`, }, &Query{ name: "default dp", params: url.Values{"db": []string{"db0"}}, command: `SELECT * FROM rp0.cpu GROUP BY *`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","value"],"values":[["2000-01-01T01:00:00Z",1]]}]}]}`, }, }...) if err := test.init(s); err != nil { t.Fatalf("test init failed: %s", err) } for _, query := range test.queries { if query.skip { t.Logf("SKIP:: %s", query.name) continue } if err := query.Execute(s); err != nil { t.Error(query.Error(err)) } else if !query.success() { t.Error(query.failureMessage()) } } } // Ensure the server can have a database with multiple measurements. func TestServer_Query_Multiple_Measurements(t *testing.T) { t.Parallel() s := OpenServer(NewConfig()) defer s.Close() // Make sure we do writes for measurements that will span across shards writes := []string{ fmt.Sprintf("cpu,host=server01 value=100,core=4 %d", mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), fmt.Sprintf("cpu1,host=server02 value=50,core=2 %d", mustParseTime(time.RFC3339Nano, "2015-01-01T00:00:00Z").UnixNano()), } test := NewTest("db0", "rp0") test.writes = Writes{ &Write{data: strings.Join(writes, "\n")}, } test.addQueries([]*Query{ &Query{ name: "measurement in one shard but not another shouldn't panic server", command: `SELECT host,value FROM db0.rp0.cpu`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","host","value"],"values":[["2000-01-01T00:00:00Z","server01",100]]}]}]}`, }, &Query{ name: "measurement in one shard but not another shouldn't panic server", command: `SELECT host,value FROM db0.rp0.cpu GROUP BY host`, exp: `{"results":[{"series":[{"name":"cpu","tags":{"host":"server01"},"columns":["time","host","value"],"values":[["2000-01-01T00:00:00Z","server01",100]]}]}]}`, }, }...) if err := test.init(s); err != nil { t.Fatalf("test init failed: %s", err) } for _, query := range test.queries { if query.skip { t.Logf("SKIP:: %s", query.name) continue } if err := query.Execute(s); err != nil { t.Error(query.Error(err)) } else if !query.success() { t.Error(query.failureMessage()) } } } // Ensure the server correctly supports data with identical tag values. func TestServer_Query_IdenticalTagValues(t *testing.T) { t.Parallel() s := OpenServer(NewConfig()) defer s.Close() writes := []string{ fmt.Sprintf("cpu,t1=val1 value=1 %d", mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), fmt.Sprintf("cpu,t2=val2 value=2 %d", mustParseTime(time.RFC3339Nano, "2000-01-01T00:01:00Z").UnixNano()), fmt.Sprintf("cpu,t1=val2 value=3 %d", mustParseTime(time.RFC3339Nano, "2000-01-01T00:02:00Z").UnixNano()), } test := NewTest("db0", "rp0") test.writes = Writes{ &Write{data: strings.Join(writes, "\n")}, } test.addQueries([]*Query{ &Query{ name: "measurements with identical tag values - SELECT *, no GROUP BY", command: `SELECT * FROM db0.rp0.cpu GROUP BY *`, exp: `{"results":[{"series":[{"name":"cpu","tags":{"t1":"","t2":"val2"},"columns":["time","value"],"values":[["2000-01-01T00:01:00Z",2]]},{"name":"cpu","tags":{"t1":"val1","t2":""},"columns":["time","value"],"values":[["2000-01-01T00:00:00Z",1]]},{"name":"cpu","tags":{"t1":"val2","t2":""},"columns":["time","value"],"values":[["2000-01-01T00:02:00Z",3]]}]}]}`, }, &Query{ name: "measurements with identical tag values - SELECT *, with GROUP BY", command: `SELECT value FROM db0.rp0.cpu GROUP BY t1,t2`, exp: `{"results":[{"series":[{"name":"cpu","tags":{"t1":"","t2":"val2"},"columns":["time","value"],"values":[["2000-01-01T00:01:00Z",2]]},{"name":"cpu","tags":{"t1":"val1","t2":""},"columns":["time","value"],"values":[["2000-01-01T00:00:00Z",1]]},{"name":"cpu","tags":{"t1":"val2","t2":""},"columns":["time","value"],"values":[["2000-01-01T00:02:00Z",3]]}]}]}`, }, &Query{ name: "measurements with identical tag values - SELECT value no GROUP BY", command: `SELECT value FROM db0.rp0.cpu`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","value"],"values":[["2000-01-01T00:00:00Z",1],["2000-01-01T00:01:00Z",2],["2000-01-01T00:02:00Z",3]]}]}]}`, }, }...) if err := test.init(s); err != nil { t.Fatalf("test init failed: %s", err) } for _, query := range test.queries { if query.skip { t.Logf("SKIP:: %s", query.name) continue } if err := query.Execute(s); err != nil { t.Error(query.Error(err)) } else if !query.success() { t.Error(query.failureMessage()) } } } // Ensure the server can handle a query that involves accessing no shards. func TestServer_Query_NoShards(t *testing.T) { t.Parallel() s := OpenServer(NewConfig()) defer s.Close() now := now() test := NewTest("db0", "rp0") test.writes = Writes{ &Write{data: `cpu,host=server01 value=1 ` + strconv.FormatInt(now.UnixNano(), 10)}, } test.addQueries([]*Query{ &Query{ name: "selecting value should succeed", command: `SELECT value FROM db0.rp0.cpu WHERE time < now() - 1d`, exp: `{"results":[{}]}`, }, }...) if err := test.init(s); err != nil { t.Fatalf("test init failed: %s", err) } for _, query := range test.queries { if query.skip { t.Logf("SKIP:: %s", query.name) continue } if err := query.Execute(s); err != nil { t.Error(query.Error(err)) } else if !query.success() { t.Error(query.failureMessage()) } } } // Ensure the server can query a non-existent field func TestServer_Query_NonExistent(t *testing.T) { t.Parallel() s := OpenServer(NewConfig()) defer s.Close() now := now() test := NewTest("db0", "rp0") test.writes = Writes{ &Write{data: `cpu,host=server01 value=1 ` + strconv.FormatInt(now.UnixNano(), 10)}, } test.addQueries([]*Query{ &Query{ name: "selecting value should succeed", command: `SELECT value FROM db0.rp0.cpu`, exp: fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","columns":["time","value"],"values":[["%s",1]]}]}]}`, now.Format(time.RFC3339Nano)), }, &Query{ name: "selecting non-existent should succeed", command: `SELECT foo FROM db0.rp0.cpu`, exp: `{"results":[{}]}`, }, }...) if err := test.init(s); err != nil { t.Fatalf("test init failed: %s", err) } for _, query := range test.queries { if query.skip { t.Logf("SKIP:: %s", query.name) continue } if err := query.Execute(s); err != nil { t.Error(query.Error(err)) } else if !query.success() { t.Error(query.failureMessage()) } } } // Ensure the server can perform basic math func TestServer_Query_Math(t *testing.T) { t.Parallel() s := OpenServer(NewConfig()) defer s.Close() now := now() writes := []string{ "float value=42 " + strconv.FormatInt(now.UnixNano(), 10), "integer value=42i " + strconv.FormatInt(now.UnixNano(), 10), } test := NewTest("db", "rp") test.writes = Writes{ &Write{data: strings.Join(writes, "\n")}, } test.addQueries([]*Query{ &Query{ name: "SELECT multiple of float value", command: `SELECT value * 2 from db.rp.float`, exp: fmt.Sprintf(`{"results":[{"series":[{"name":"float","columns":["time","value"],"values":[["%s",84]]}]}]}`, now.Format(time.RFC3339Nano)), }, &Query{ name: "SELECT multiple of float value", command: `SELECT 2 * value from db.rp.float`, exp: fmt.Sprintf(`{"results":[{"series":[{"name":"float","columns":["time","value"],"values":[["%s",84]]}]}]}`, now.Format(time.RFC3339Nano)), }, &Query{ name: "SELECT multiple of integer value", command: `SELECT value * 2 from db.rp.integer`, exp: fmt.Sprintf(`{"results":[{"series":[{"name":"integer","columns":["time","value"],"values":[["%s",84]]}]}]}`, now.Format(time.RFC3339Nano)), }, &Query{ name: "SELECT float multiple of integer value", command: `SELECT value * 2.0 from db.rp.integer`, exp: fmt.Sprintf(`{"results":[{"series":[{"name":"integer","columns":["time","value"],"values":[["%s",84]]}]}]}`, now.Format(time.RFC3339Nano)), }, &Query{ name: "SELECT square of float value", command: `SELECT value * value from db.rp.float`, exp: fmt.Sprintf(`{"results":[{"series":[{"name":"float","columns":["time","value_value"],"values":[["%s",1764]]}]}]}`, now.Format(time.RFC3339Nano)), }, &Query{ name: "SELECT square of integer value", command: `SELECT value * value from db.rp.integer`, exp: fmt.Sprintf(`{"results":[{"series":[{"name":"integer","columns":["time","value_value"],"values":[["%s",1764]]}]}]}`, now.Format(time.RFC3339Nano)), }, &Query{ name: "SELECT square of integer, float value", command: `SELECT value * value,float from db.rp.integer`, exp: fmt.Sprintf(`{"results":[{"series":[{"name":"integer","columns":["time","value_value","float"],"values":[["%s",1764,null]]}]}]}`, now.Format(time.RFC3339Nano)), }, &Query{ name: "SELECT square of integer value with alias", command: `SELECT value * value as square from db.rp.integer`, exp: fmt.Sprintf(`{"results":[{"series":[{"name":"integer","columns":["time","square"],"values":[["%s",1764]]}]}]}`, now.Format(time.RFC3339Nano)), }, &Query{ name: "SELECT sum of aggregates", command: `SELECT max(value) + min(value) from db.rp.integer`, exp: fmt.Sprintf(`{"results":[{"series":[{"name":"integer","columns":["time","max_min"],"values":[["1970-01-01T00:00:00Z",84]]}]}]}`), }, &Query{ name: "SELECT square of enclosed integer value", command: `SELECT ((value) * (value)) from db.rp.integer`, exp: fmt.Sprintf(`{"results":[{"series":[{"name":"integer","columns":["time","value_value"],"values":[["%s",1764]]}]}]}`, now.Format(time.RFC3339Nano)), }, &Query{ name: "SELECT square of enclosed integer value", command: `SELECT (value * value) from db.rp.integer`, exp: fmt.Sprintf(`{"results":[{"series":[{"name":"integer","columns":["time","value_value"],"values":[["%s",1764]]}]}]}`, now.Format(time.RFC3339Nano)), }, }...) if err := test.init(s); err != nil { t.Fatalf("test init failed: %s", err) } for _, query := range test.queries { if query.skip { t.Logf("SKIP:: %s", query.name) continue } if err := query.Execute(s); err != nil { t.Error(query.Error(err)) } else if !query.success() { t.Error(query.failureMessage()) } } } // Ensure the server can query with the count aggregate function func TestServer_Query_Count(t *testing.T) { t.Parallel() s := OpenServer(NewConfig()) defer s.Close() now := now() test := NewTest("db0", "rp0") writes := []string{ `cpu,host=server01 value=1.0 ` + strconv.FormatInt(now.UnixNano(), 10), `ram value1=1.0,value2=2.0 ` + strconv.FormatInt(now.UnixNano(), 10), } test.writes = Writes{ &Write{data: strings.Join(writes, "\n")}, } hour_ago := now.Add(-time.Hour).UTC() test.addQueries([]*Query{ &Query{ name: "selecting count(value) should succeed", command: `SELECT count(value) FROM db0.rp0.cpu`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","count"],"values":[["1970-01-01T00:00:00Z",1]]}]}]}`, }, &Query{ name: "selecting count(value) with where time should return result", command: fmt.Sprintf(`SELECT count(value) FROM db0.rp0.cpu WHERE time >= '%s'`, hour_ago.Format(time.RFC3339Nano)), exp: fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","columns":["time","count"],"values":[["%s",1]]}]}]}`, hour_ago.Format(time.RFC3339Nano)), }, &Query{ name: "selecting count(value) with filter that excludes all results should return 0", command: fmt.Sprintf(`SELECT count(value) FROM db0.rp0.cpu WHERE value=100 AND time >= '%s'`, hour_ago.Format(time.RFC3339Nano)), exp: `{"results":[{}]}`, }, &Query{ name: "selecting count(value1) with matching filter against value2 should return correct result", command: fmt.Sprintf(`SELECT count(value1) FROM db0.rp0.ram WHERE value2=2 AND time >= '%s'`, hour_ago.Format(time.RFC3339Nano)), exp: fmt.Sprintf(`{"results":[{"series":[{"name":"ram","columns":["time","count"],"values":[["%s",1]]}]}]}`, hour_ago.Format(time.RFC3339Nano)), }, &Query{ name: "selecting count(value1) with non-matching filter against value2 should return correct result", command: fmt.Sprintf(`SELECT count(value1) FROM db0.rp0.ram WHERE value2=3 AND time >= '%s'`, hour_ago.Format(time.RFC3339Nano)), exp: `{"results":[{}]}`, }, &Query{ name: "selecting count(*) should error", command: `SELECT count(*) FROM db0.rp0.cpu`, exp: `{"error":"error parsing query: expected field argument in count()"}`, }, }...) if err := test.init(s); err != nil { t.Fatalf("test init failed: %s", err) } for _, query := range test.queries { if query.skip { t.Logf("SKIP:: %s", query.name) continue } if err := query.Execute(s); err != nil { t.Error(query.Error(err)) } else if !query.success() { t.Error(query.failureMessage()) } } } // Ensure the server can query with Now(). func TestServer_Query_Now(t *testing.T) { t.Parallel() s := OpenServer(NewConfig()) defer s.Close() now := now() test := NewTest("db0", "rp0") test.writes = Writes{ &Write{data: `cpu,host=server01 value=1.0 ` + strconv.FormatInt(now.UnixNano(), 10)}, } test.addQueries([]*Query{ &Query{ name: "where with time < now() should work", command: `SELECT * FROM db0.rp0.cpu where time < now()`, exp: fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","columns":["time","host","value"],"values":[["%s","server01",1]]}]}]}`, now.Format(time.RFC3339Nano)), }, &Query{ name: "where with time < now() and GROUP BY * should work", command: `SELECT * FROM db0.rp0.cpu where time < now() GROUP BY *`, exp: fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","tags":{"host":"server01"},"columns":["time","value"],"values":[["%s",1]]}]}]}`, now.Format(time.RFC3339Nano)), }, &Query{ name: "where with time > now() should return an empty result", command: `SELECT * FROM db0.rp0.cpu where time > now()`, exp: `{"results":[{}]}`, }, &Query{ name: "where with time > now() with GROUP BY * should return an empty result", command: `SELECT * FROM db0.rp0.cpu where time > now() GROUP BY *`, exp: `{"results":[{}]}`, }, }...) if err := test.init(s); err != nil { t.Fatalf("test init failed: %s", err) } for _, query := range test.queries { if query.skip { t.Logf("SKIP:: %s", query.name) continue } if err := query.Execute(s); err != nil { t.Error(query.Error(err)) } else if !query.success() { t.Error(query.failureMessage()) } } } // Ensure the server can query with epoch precisions. func TestServer_Query_EpochPrecision(t *testing.T) { t.Parallel() s := OpenServer(NewConfig()) defer s.Close() now := now() test := NewTest("db0", "rp0") test.writes = Writes{ &Write{data: `cpu,host=server01 value=1.0 ` + strconv.FormatInt(now.UnixNano(), 10)}, } test.addQueries([]*Query{ &Query{ name: "nanosecond precision", command: `SELECT * FROM db0.rp0.cpu GROUP BY *`, params: url.Values{"epoch": []string{"n"}}, exp: fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","tags":{"host":"server01"},"columns":["time","value"],"values":[[%d,1]]}]}]}`, now.UnixNano()), }, &Query{ name: "microsecond precision", command: `SELECT * FROM db0.rp0.cpu GROUP BY *`, params: url.Values{"epoch": []string{"u"}}, exp: fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","tags":{"host":"server01"},"columns":["time","value"],"values":[[%d,1]]}]}]}`, now.UnixNano()/int64(time.Microsecond)), }, &Query{ name: "millisecond precision", command: `SELECT * FROM db0.rp0.cpu GROUP BY *`, params: url.Values{"epoch": []string{"ms"}}, exp: fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","tags":{"host":"server01"},"columns":["time","value"],"values":[[%d,1]]}]}]}`, now.UnixNano()/int64(time.Millisecond)), }, &Query{ name: "second precision", command: `SELECT * FROM db0.rp0.cpu GROUP BY *`, params: url.Values{"epoch": []string{"s"}}, exp: fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","tags":{"host":"server01"},"columns":["time","value"],"values":[[%d,1]]}]}]}`, now.UnixNano()/int64(time.Second)), }, &Query{ name: "minute precision", command: `SELECT * FROM db0.rp0.cpu GROUP BY *`, params: url.Values{"epoch": []string{"m"}}, exp: fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","tags":{"host":"server01"},"columns":["time","value"],"values":[[%d,1]]}]}]}`, now.UnixNano()/int64(time.Minute)), }, &Query{ name: "hour precision", command: `SELECT * FROM db0.rp0.cpu GROUP BY *`, params: url.Values{"epoch": []string{"h"}}, exp: fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","tags":{"host":"server01"},"columns":["time","value"],"values":[[%d,1]]}]}]}`, now.UnixNano()/int64(time.Hour)), }, }...) if err := test.init(s); err != nil { t.Fatalf("test init failed: %s", err) } for _, query := range test.queries { if query.skip { t.Logf("SKIP:: %s", query.name) continue } if err := query.Execute(s); err != nil { t.Error(query.Error(err)) } else if !query.success() { t.Error(query.failureMessage()) } } } // Ensure the server works with tag queries. func TestServer_Query_Tags(t *testing.T) { t.Parallel() s := OpenServer(NewConfig()) defer s.Close() now := now() writes := []string{ fmt.Sprintf("cpu,host=server01 value=100,core=4 %d", now.UnixNano()), fmt.Sprintf("cpu,host=server02 value=50,core=2 %d", now.Add(1).UnixNano()), fmt.Sprintf("cpu1,host=server01,region=us-west value=100 %d", mustParseTime(time.RFC3339Nano, "2015-02-28T01:03:36.703820946Z").UnixNano()), fmt.Sprintf("cpu1,host=server02 value=200 %d", mustParseTime(time.RFC3339Nano, "2010-02-28T01:03:37.703820946Z").UnixNano()), fmt.Sprintf("cpu1,host=server03 value=300 %d", mustParseTime(time.RFC3339Nano, "2012-02-28T01:03:38.703820946Z").UnixNano()), fmt.Sprintf("cpu2,host=server01 value=100 %d", mustParseTime(time.RFC3339Nano, "2015-02-28T01:03:36.703820946Z").UnixNano()), fmt.Sprintf("cpu2 value=200 %d", mustParseTime(time.RFC3339Nano, "2012-02-28T01:03:38.703820946Z").UnixNano()), fmt.Sprintf("cpu3,company=acme01 value=100 %d", mustParseTime(time.RFC3339Nano, "2015-02-28T01:03:36.703820946Z").UnixNano()), fmt.Sprintf("cpu3 value=200 %d", mustParseTime(time.RFC3339Nano, "2012-02-28T01:03:38.703820946Z").UnixNano()), fmt.Sprintf("status_code,url=http://www.example.com value=404 %d", mustParseTime(time.RFC3339Nano, "2015-07-22T08:13:54.929026672Z").UnixNano()), fmt.Sprintf("status_code,url=https://influxdb.com value=418 %d", mustParseTime(time.RFC3339Nano, "2015-07-22T09:52:24.914395083Z").UnixNano()), } test := NewTest("db0", "rp0") test.writes = Writes{ &Write{data: strings.Join(writes, "\n")}, } test.addQueries([]*Query{ &Query{ name: "tag without field should return error", command: `SELECT host FROM db0.rp0.cpu`, exp: `{"results":[{"error":"statement must have at least one field in select clause"}]}`, skip: true, // FIXME(benbjohnson): tags should stream as values }, &Query{ name: "field with tag should succeed", command: `SELECT host, value FROM db0.rp0.cpu`, exp: fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","columns":["time","host","value"],"values":[["%s","server01",100],["%s","server02",50]]}]}]}`, now.Format(time.RFC3339Nano), now.Add(1).Format(time.RFC3339Nano)), }, &Query{ name: "field with tag and GROUP BY should succeed", command: `SELECT host, value FROM db0.rp0.cpu GROUP BY host`, exp: fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","tags":{"host":"server01"},"columns":["time","host","value"],"values":[["%s","server01",100]]},{"name":"cpu","tags":{"host":"server02"},"columns":["time","host","value"],"values":[["%s","server02",50]]}]}]}`, now.Format(time.RFC3339Nano), now.Add(1).Format(time.RFC3339Nano)), }, &Query{ name: "field with two tags should succeed", command: `SELECT host, value, core FROM db0.rp0.cpu`, exp: fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","columns":["time","host","value","core"],"values":[["%s","server01",100,4],["%s","server02",50,2]]}]}]}`, now.Format(time.RFC3339Nano), now.Add(1).Format(time.RFC3339Nano)), }, &Query{ name: "field with two tags and GROUP BY should succeed", command: `SELECT host, value, core FROM db0.rp0.cpu GROUP BY host`, exp: fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","tags":{"host":"server01"},"columns":["time","host","value","core"],"values":[["%s","server01",100,4]]},{"name":"cpu","tags":{"host":"server02"},"columns":["time","host","value","core"],"values":[["%s","server02",50,2]]}]}]}`, now.Format(time.RFC3339Nano), now.Add(1).Format(time.RFC3339Nano)), }, &Query{ name: "select * with tags should succeed", command: `SELECT * FROM db0.rp0.cpu`, exp: fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","columns":["time","core","host","value"],"values":[["%s",4,"server01",100],["%s",2,"server02",50]]}]}]}`, now.Format(time.RFC3339Nano), now.Add(1).Format(time.RFC3339Nano)), }, &Query{ name: "select * with tags with GROUP BY * should succeed", command: `SELECT * FROM db0.rp0.cpu GROUP BY *`, exp: fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","tags":{"host":"server01"},"columns":["time","core","value"],"values":[["%s",4,100]]},{"name":"cpu","tags":{"host":"server02"},"columns":["time","core","value"],"values":[["%s",2,50]]}]}]}`, now.Format(time.RFC3339Nano), now.Add(1).Format(time.RFC3339Nano)), }, &Query{ name: "group by tag", command: `SELECT value FROM db0.rp0.cpu GROUP by host`, exp: fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","tags":{"host":"server01"},"columns":["time","value"],"values":[["%s",100]]},{"name":"cpu","tags":{"host":"server02"},"columns":["time","value"],"values":[["%s",50]]}]}]}`, now.Format(time.RFC3339Nano), now.Add(1).Format(time.RFC3339Nano)), }, &Query{ name: "single field (EQ tag value1)", command: `SELECT value FROM db0.rp0.cpu1 WHERE host = 'server01'`, exp: `{"results":[{"series":[{"name":"cpu1","columns":["time","value"],"values":[["2015-02-28T01:03:36.703820946Z",100]]}]}]}`, }, &Query{ name: "single field (2 EQ tags)", command: `SELECT value FROM db0.rp0.cpu1 WHERE host = 'server01' AND region = 'us-west'`, exp: `{"results":[{"series":[{"name":"cpu1","columns":["time","value"],"values":[["2015-02-28T01:03:36.703820946Z",100]]}]}]}`, }, &Query{ name: "single field (OR different tags)", command: `SELECT value FROM db0.rp0.cpu1 WHERE host = 'server03' OR region = 'us-west'`, exp: `{"results":[{"series":[{"name":"cpu1","columns":["time","value"],"values":[["2012-02-28T01:03:38.703820946Z",300],["2015-02-28T01:03:36.703820946Z",100]]}]}]}`, }, &Query{ name: "single field (OR with non-existent tag value)", command: `SELECT value FROM db0.rp0.cpu1 WHERE host = 'server01' OR host = 'server66'`, exp: `{"results":[{"series":[{"name":"cpu1","columns":["time","value"],"values":[["2015-02-28T01:03:36.703820946Z",100]]}]}]}`, }, &Query{ name: "single field (OR with all tag values)", command: `SELECT value FROM db0.rp0.cpu1 WHERE host = 'server01' OR host = 'server02' OR host = 'server03'`, exp: `{"results":[{"series":[{"name":"cpu1","columns":["time","value"],"values":[["2010-02-28T01:03:37.703820946Z",200],["2012-02-28T01:03:38.703820946Z",300],["2015-02-28T01:03:36.703820946Z",100]]}]}]}`, }, &Query{ name: "single field (1 EQ and 1 NEQ tag)", command: `SELECT value FROM db0.rp0.cpu1 WHERE host = 'server01' AND region != 'us-west'`, exp: `{"results":[{}]}`, }, &Query{ name: "single field (EQ tag value2)", command: `SELECT value FROM db0.rp0.cpu1 WHERE host = 'server02'`, exp: `{"results":[{"series":[{"name":"cpu1","columns":["time","value"],"values":[["2010-02-28T01:03:37.703820946Z",200]]}]}]}`, }, &Query{ name: "single field (NEQ tag value1)", command: `SELECT value FROM db0.rp0.cpu1 WHERE host != 'server01'`, exp: `{"results":[{"series":[{"name":"cpu1","columns":["time","value"],"values":[["2010-02-28T01:03:37.703820946Z",200],["2012-02-28T01:03:38.703820946Z",300]]}]}]}`, }, &Query{ name: "single field (NEQ tag value1 AND NEQ tag value2)", command: `SELECT value FROM db0.rp0.cpu1 WHERE host != 'server01' AND host != 'server02'`, exp: `{"results":[{"series":[{"name":"cpu1","columns":["time","value"],"values":[["2012-02-28T01:03:38.703820946Z",300]]}]}]}`, }, &Query{ name: "single field (NEQ tag value1 OR NEQ tag value2)", command: `SELECT value FROM db0.rp0.cpu1 WHERE host != 'server01' OR host != 'server02'`, // Yes, this is always true, but that's the point. exp: `{"results":[{"series":[{"name":"cpu1","columns":["time","value"],"values":[["2010-02-28T01:03:37.703820946Z",200],["2012-02-28T01:03:38.703820946Z",300],["2015-02-28T01:03:36.703820946Z",100]]}]}]}`, }, &Query{ name: "single field (NEQ tag value1 AND NEQ tag value2 AND NEQ tag value3)", command: `SELECT value FROM db0.rp0.cpu1 WHERE host != 'server01' AND host != 'server02' AND host != 'server03'`, exp: `{"results":[{}]}`, }, &Query{ name: "single field (NEQ tag value1, point without any tags)", command: `SELECT value FROM db0.rp0.cpu2 WHERE host != 'server01'`, exp: `{"results":[{"series":[{"name":"cpu2","columns":["time","value"],"values":[["2012-02-28T01:03:38.703820946Z",200]]}]}]}`, }, &Query{ name: "single field (NEQ tag value1, point without any tags)", command: `SELECT value FROM db0.rp0.cpu3 WHERE company !~ /acme01/`, exp: `{"results":[{"series":[{"name":"cpu3","columns":["time","value"],"values":[["2012-02-28T01:03:38.703820946Z",200]]}]}]}`, }, &Query{ name: "single field (regex tag match)", command: `SELECT value FROM db0.rp0.cpu3 WHERE company =~ /acme01/`, exp: `{"results":[{"series":[{"name":"cpu3","columns":["time","value"],"values":[["2015-02-28T01:03:36.703820946Z",100]]}]}]}`, }, &Query{ name: "single field (regex tag match)", command: `SELECT value FROM db0.rp0.cpu3 WHERE company !~ /acme[23]/`, exp: `{"results":[{"series":[{"name":"cpu3","columns":["time","value"],"values":[["2012-02-28T01:03:38.703820946Z",200],["2015-02-28T01:03:36.703820946Z",100]]}]}]}`, }, &Query{ name: "single field (regex tag match with escaping)", command: `SELECT value FROM db0.rp0.status_code WHERE url !~ /https\:\/\/influxdb\.com/`, exp: `{"results":[{"series":[{"name":"status_code","columns":["time","value"],"values":[["2015-07-22T08:13:54.929026672Z",404]]}]}]}`, }, &Query{ name: "single field (regex tag match with escaping)", command: `SELECT value FROM db0.rp0.status_code WHERE url =~ /https\:\/\/influxdb\.com/`, exp: `{"results":[{"series":[{"name":"status_code","columns":["time","value"],"values":[["2015-07-22T09:52:24.914395083Z",418]]}]}]}`, }, }...) if err := test.init(s); err != nil { t.Fatalf("test init failed: %s", err) } for _, query := range test.queries { if query.skip { t.Logf("SKIP:: %s", query.name) continue } if err := query.Execute(s); err != nil { t.Error(query.Error(err)) } else if !query.success() { t.Error(query.failureMessage()) } } } // Ensure the server correctly queries with an alias. func TestServer_Query_Alias(t *testing.T) { t.Parallel() s := OpenServer(NewConfig()) defer s.Close() writes := []string{ fmt.Sprintf("cpu value=1i,steps=3i %d", mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), fmt.Sprintf("cpu value=2i,steps=4i %d", mustParseTime(time.RFC3339Nano, "2000-01-01T00:01:00Z").UnixNano()), } test := NewTest("db0", "rp0") test.writes = Writes{ &Write{data: strings.Join(writes, "\n")}, } test.addQueries([]*Query{ &Query{ name: "baseline query - SELECT * FROM db0.rp0.cpu", command: `SELECT * FROM db0.rp0.cpu`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","steps","value"],"values":[["2000-01-01T00:00:00Z",3,1],["2000-01-01T00:01:00Z",4,2]]}]}]}`, }, &Query{ name: "basic query with alias - SELECT steps, value as v FROM db0.rp0.cpu", command: `SELECT steps, value as v FROM db0.rp0.cpu`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","steps","v"],"values":[["2000-01-01T00:00:00Z",3,1],["2000-01-01T00:01:00Z",4,2]]}]}]}`, }, &Query{ name: "double aggregate sum - SELECT sum(value), sum(steps) FROM db0.rp0.cpu", command: `SELECT sum(value), sum(steps) FROM db0.rp0.cpu`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","sum","sum_1"],"values":[["1970-01-01T00:00:00Z",3,7]]}]}]}`, }, &Query{ name: "double aggregate sum reverse order - SELECT sum(steps), sum(value) FROM db0.rp0.cpu", command: `SELECT sum(steps), sum(value) FROM db0.rp0.cpu`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","sum","sum_1"],"values":[["1970-01-01T00:00:00Z",7,3]]}]}]}`, }, &Query{ name: "double aggregate sum with alias - SELECT sum(value) as sumv, sum(steps) as sums FROM db0.rp0.cpu", command: `SELECT sum(value) as sumv, sum(steps) as sums FROM db0.rp0.cpu`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","sumv","sums"],"values":[["1970-01-01T00:00:00Z",3,7]]}]}]}`, }, &Query{ name: "double aggregate with same value - SELECT sum(value), mean(value) FROM db0.rp0.cpu", command: `SELECT sum(value), mean(value) FROM db0.rp0.cpu`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","sum","mean"],"values":[["1970-01-01T00:00:00Z",3,1.5]]}]}]}`, }, &Query{ name: "double aggregate with same value and same alias - SELECT mean(value) as mv, max(value) as mv FROM db0.rp0.cpu", command: `SELECT mean(value) as mv, max(value) as mv FROM db0.rp0.cpu`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","mv","mv"],"values":[["1970-01-01T00:00:00Z",1.5,2]]}]}]}`, }, &Query{ name: "double aggregate with non-existent field - SELECT mean(value), max(foo) FROM db0.rp0.cpu", command: `SELECT mean(value), max(foo) FROM db0.rp0.cpu`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","mean","max"],"values":[["1970-01-01T00:00:00Z",1.5,null]]}]}]}`, }, }...) if err := test.init(s); err != nil { t.Fatalf("test init failed: %s", err) } for _, query := range test.queries { if query.skip { t.Logf("SKIP:: %s", query.name) continue } if err := query.Execute(s); err != nil { t.Error(query.Error(err)) } else if !query.success() { t.Error(query.failureMessage()) } } } // Ensure the server will succeed and error for common scenarios. func TestServer_Query_Common(t *testing.T) { t.Parallel() s := OpenServer(NewConfig()) defer s.Close() now := now() test := NewTest("db0", "rp0") test.writes = Writes{ &Write{data: fmt.Sprintf("cpu,host=server01 value=1 %s", strconv.FormatInt(now.UnixNano(), 10))}, } test.addQueries([]*Query{ &Query{ name: "selecting a from a non-existent database should error", command: `SELECT value FROM db1.rp0.cpu`, exp: `{"results":[{"error":"database not found: db1"}]}`, }, &Query{ name: "selecting a from a non-existent retention policy should error", command: `SELECT value FROM db0.rp1.cpu`, exp: `{"results":[{"error":"retention policy not found: rp1"}]}`, }, &Query{ name: "selecting a valid measurement and field should succeed", command: `SELECT value FROM db0.rp0.cpu`, exp: fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","columns":["time","value"],"values":[["%s",1]]}]}]}`, now.Format(time.RFC3339Nano)), }, &Query{ name: "explicitly selecting time and a valid measurement and field should succeed", command: `SELECT time,value FROM db0.rp0.cpu`, exp: fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","columns":["time","value"],"values":[["%s",1]]}]}]}`, now.Format(time.RFC3339Nano)), }, &Query{ name: "selecting a measurement that doesn't exist should result in empty set", command: `SELECT value FROM db0.rp0.idontexist`, exp: `{"results":[{}]}`, }, &Query{ name: "selecting a field that doesn't exist should result in empty set", command: `SELECT idontexist FROM db0.rp0.cpu`, exp: `{"results":[{}]}`, }, &Query{ name: "selecting wildcard without specifying a database should error", command: `SELECT * FROM cpu`, exp: `{"results":[{"error":"database name required"}]}`, }, &Query{ name: "selecting explicit field without specifying a database should error", command: `SELECT value FROM cpu`, exp: `{"results":[{"error":"database name required"}]}`, }, }...) if err := test.init(s); err != nil { t.Fatalf("test init failed: %s", err) } for _, query := range test.queries { if query.skip { t.Logf("SKIP:: %s", query.name) continue } if err := query.Execute(s); err != nil { t.Error(query.Error(err)) } else if !query.success() { t.Error(query.failureMessage()) } } } // Ensure the server can query two points. func TestServer_Query_SelectTwoPoints(t *testing.T) { t.Parallel() s := OpenServer(NewConfig()) defer s.Close() now := now() test := NewTest("db0", "rp0") test.writes = Writes{ &Write{data: fmt.Sprintf("cpu value=100 %s\ncpu value=200 %s", strconv.FormatInt(now.UnixNano(), 10), strconv.FormatInt(now.Add(1).UnixNano(), 10))}, } test.addQueries( &Query{ name: "selecting two points should result in two points", command: `SELECT * FROM db0.rp0.cpu`, exp: fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","columns":["time","value"],"values":[["%s",100],["%s",200]]}]}]}`, now.Format(time.RFC3339Nano), now.Add(1).Format(time.RFC3339Nano)), }, &Query{ name: "selecting two points with GROUP BY * should result in two points", command: `SELECT * FROM db0.rp0.cpu GROUP BY *`, exp: fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","columns":["time","value"],"values":[["%s",100],["%s",200]]}]}]}`, now.Format(time.RFC3339Nano), now.Add(1).Format(time.RFC3339Nano)), }, ) for i, query := range test.queries { if i == 0 { if err := test.init(s); err != nil { t.Fatalf("test init failed: %s", err) } } if query.skip { t.Logf("SKIP:: %s", query.name) continue } if err := query.Execute(s); err != nil { t.Error(query.Error(err)) } else if !query.success() { t.Error(query.failureMessage()) } } } // Ensure the server can query two negative points. func TestServer_Query_SelectTwoNegativePoints(t *testing.T) { t.Parallel() s := OpenServer(NewConfig()) defer s.Close() now := now() test := NewTest("db0", "rp0") test.writes = Writes{ &Write{data: fmt.Sprintf("cpu value=-100 %s\ncpu value=-200 %s", strconv.FormatInt(now.UnixNano(), 10), strconv.FormatInt(now.Add(1).UnixNano(), 10))}, } test.addQueries(&Query{ name: "selecting two negative points should succeed", command: `SELECT * FROM db0.rp0.cpu`, exp: fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","columns":["time","value"],"values":[["%s",-100],["%s",-200]]}]}]}`, now.Format(time.RFC3339Nano), now.Add(1).Format(time.RFC3339Nano)), }) for i, query := range test.queries { if i == 0 { if err := test.init(s); err != nil { t.Fatalf("test init failed: %s", err) } } if query.skip { t.Logf("SKIP:: %s", query.name) continue } if err := query.Execute(s); err != nil { t.Error(query.Error(err)) } else if !query.success() { t.Error(query.failureMessage()) } } } // Ensure the server can query with relative time. func TestServer_Query_SelectRelativeTime(t *testing.T) { t.Parallel() s := OpenServer(NewConfig()) defer s.Close() now := now() yesterday := yesterday() test := NewTest("db0", "rp0") test.writes = Writes{ &Write{data: fmt.Sprintf("cpu,host=server01 value=100 %s\ncpu,host=server01 value=200 %s", strconv.FormatInt(yesterday.UnixNano(), 10), strconv.FormatInt(now.UnixNano(), 10))}, } test.addQueries([]*Query{ &Query{ name: "single point with time pre-calculated for past time queries yesterday", command: `SELECT * FROM db0.rp0.cpu where time >= '` + yesterday.Add(-1*time.Minute).Format(time.RFC3339Nano) + `' GROUP BY *`, exp: fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","tags":{"host":"server01"},"columns":["time","value"],"values":[["%s",100],["%s",200]]}]}]}`, yesterday.Format(time.RFC3339Nano), now.Format(time.RFC3339Nano)), }, &Query{ name: "single point with time pre-calculated for relative time queries now", command: `SELECT * FROM db0.rp0.cpu where time >= now() - 1m GROUP BY *`, exp: fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","tags":{"host":"server01"},"columns":["time","value"],"values":[["%s",200]]}]}]}`, now.Format(time.RFC3339Nano)), }, }...) for i, query := range test.queries { if i == 0 { if err := test.init(s); err != nil { t.Fatalf("test init failed: %s", err) } } if query.skip { t.Logf("SKIP:: %s", query.name) continue } if err := query.Execute(s); err != nil { t.Error(query.Error(err)) } else if !query.success() { t.Error(query.failureMessage()) } } } // Ensure the server can handle various simple derivative queries. func TestServer_Query_SelectRawDerivative(t *testing.T) { t.Parallel() s := OpenServer(NewConfig()) defer s.Close() test := NewTest("db0", "rp0") test.writes = Writes{ &Write{data: fmt.Sprintf("cpu value=210 1278010021000000000\ncpu value=10 1278010022000000000")}, } test.addQueries([]*Query{ &Query{ name: "calculate single derivate", command: `SELECT derivative(value) from db0.rp0.cpu`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",-200]]}]}]}`, }, &Query{ name: "calculate derivate with unit", command: `SELECT derivative(value, 10s) from db0.rp0.cpu`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",-2000]]}]}]}`, }, }...) for i, query := range test.queries { if i == 0 { if err := test.init(s); err != nil { t.Fatalf("test init failed: %s", err) } } if query.skip { t.Logf("SKIP:: %s", query.name) continue } if err := query.Execute(s); err != nil { t.Error(query.Error(err)) } else if !query.success() { t.Error(query.failureMessage()) } } } // Ensure the server can handle various simple non_negative_derivative queries. func TestServer_Query_SelectRawNonNegativeDerivative(t *testing.T) { t.Parallel() s := OpenServer(NewConfig()) defer s.Close() test := NewTest("db0", "rp0") test.writes = Writes{ &Write{data: fmt.Sprintf(`cpu value=10 1278010021000000000 cpu value=15 1278010022000000000 cpu value=10 1278010023000000000 cpu value=20 1278010024000000000 `)}, } test.addQueries([]*Query{ &Query{ name: "calculate single non_negative_derivative", command: `SELECT non_negative_derivative(value) from db0.rp0.cpu`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","non_negative_derivative"],"values":[["2010-07-01T18:47:02Z",5],["2010-07-01T18:47:04Z",10]]}]}]}`, }, &Query{ name: "calculate single non_negative_derivative", command: `SELECT non_negative_derivative(value, 10s) from db0.rp0.cpu`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","non_negative_derivative"],"values":[["2010-07-01T18:47:02Z",50],["2010-07-01T18:47:04Z",100]]}]}]}`, }, }...) for i, query := range test.queries { if i == 0 { if err := test.init(s); err != nil { t.Fatalf("test init failed: %s", err) } } if query.skip { t.Logf("SKIP:: %s", query.name) continue } if err := query.Execute(s); err != nil { t.Error(query.Error(err)) } else if !query.success() { t.Error(query.failureMessage()) } } } // Ensure the server can handle various group by time derivative queries. func TestServer_Query_SelectGroupByTimeDerivative(t *testing.T) { t.Parallel() s := OpenServer(NewConfig()) defer s.Close() test := NewTest("db0", "rp0") test.writes = Writes{ &Write{data: fmt.Sprintf(`cpu value=10 1278010020000000000 cpu value=15 1278010021000000000 cpu value=20 1278010022000000000 cpu value=25 1278010023000000000 `)}, } test.addQueries([]*Query{ &Query{ name: "calculate derivative of count with unit default (2s) group by time", command: `SELECT derivative(count(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s)`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:00Z",2],["2010-07-01T18:47:02Z",0]]}]}]}`, }, &Query{ name: "calculate derivative of count with unit 4s group by time", command: `SELECT derivative(count(value), 4s) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s)`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:00Z",4],["2010-07-01T18:47:02Z",0]]}]}]}`, }, &Query{ name: "calculate derivative of mean with unit default (2s) group by time", command: `SELECT derivative(mean(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s)`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",10]]}]}]}`, }, &Query{ name: "calculate derivative of mean with unit 4s group by time", command: `SELECT derivative(mean(value), 4s) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s)`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",20]]}]}]}`, }, &Query{ name: "calculate derivative of median with unit default (2s) group by time", command: `SELECT derivative(median(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s)`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",10]]}]}]}`, }, &Query{ name: "calculate derivative of median with unit 4s group by time", command: `SELECT derivative(median(value), 4s) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s)`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",20]]}]}]}`, }, &Query{ name: "calculate derivative of sum with unit default (2s) group by time", command: `SELECT derivative(sum(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s)`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",20]]}]}]}`, }, &Query{ name: "calculate derivative of sum with unit 4s group by time", command: `SELECT derivative(sum(value), 4s) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s)`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",40]]}]}]}`, }, &Query{ name: "calculate derivative of first with unit default (2s) group by time", command: `SELECT derivative(first(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s)`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",10]]}]}]}`, }, &Query{ name: "calculate derivative of first with unit 4s group by time", command: `SELECT derivative(first(value), 4s) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s)`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",20]]}]}]}`, }, &Query{ name: "calculate derivative of last with unit default (2s) group by time", command: `SELECT derivative(last(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s)`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",10]]}]}]}`, }, &Query{ name: "calculate derivative of last with unit 4s group by time", command: `SELECT derivative(last(value), 4s) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s)`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",20]]}]}]}`, }, &Query{ name: "calculate derivative of min with unit default (2s) group by time", command: `SELECT derivative(min(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s)`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",10]]}]}]}`, }, &Query{ name: "calculate derivative of min with unit 4s group by time", command: `SELECT derivative(min(value), 4s) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s)`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",20]]}]}]}`, }, &Query{ name: "calculate derivative of max with unit default (2s) group by time", command: `SELECT derivative(max(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s)`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",10]]}]}]}`, }, &Query{ name: "calculate derivative of max with unit 4s group by time", command: `SELECT derivative(max(value), 4s) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s)`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",20]]}]}]}`, }, &Query{ name: "calculate derivative of percentile with unit default (2s) group by time", command: `SELECT derivative(percentile(value, 50)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s)`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",10]]}]}]}`, }, &Query{ name: "calculate derivative of percentile with unit 4s group by time", command: `SELECT derivative(percentile(value, 50), 4s) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s)`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",20]]}]}]}`, }, }...) for i, query := range test.queries { if i == 0 { if err := test.init(s); err != nil { t.Fatalf("test init failed: %s", err) } } if query.skip { t.Logf("SKIP:: %s", query.name) continue } if err := query.Execute(s); err != nil { t.Error(query.Error(err)) } else if !query.success() { t.Error(query.failureMessage()) } } } // Ensure the server can handle various group by time derivative queries. func TestServer_Query_SelectGroupByTimeDerivativeWithFill(t *testing.T) { t.Parallel() s := OpenServer(NewConfig()) defer s.Close() test := NewTest("db0", "rp0") test.writes = Writes{ &Write{data: fmt.Sprintf(`cpu value=10 1278010020000000000 cpu value=20 1278010021000000000 `)}, } test.addQueries([]*Query{ &Query{ name: "calculate derivative of count with unit default (2s) group by time with fill 0", command: `SELECT derivative(count(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:00Z",2],["2010-07-01T18:47:02Z",-2]]}]}]}`, }, &Query{ name: "calculate derivative of count with unit 4s group by time with fill 0", command: `SELECT derivative(count(value), 4s) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:00Z",4],["2010-07-01T18:47:02Z",-4]]}]}]}`, }, &Query{ name: "calculate derivative of count with unit default (2s) group by time with fill previous", command: `SELECT derivative(count(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(previous)`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",0]]}]}]}`, }, &Query{ name: "calculate derivative of count with unit 4s group by time with fill previous", command: `SELECT derivative(count(value), 4s) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(previous)`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",0]]}]}]}`, }, &Query{ name: "calculate derivative of mean with unit default (2s) group by time with fill 0", command: `SELECT derivative(mean(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:00Z",15],["2010-07-01T18:47:02Z",-15]]}]}]}`, }, &Query{ name: "calculate derivative of mean with unit 4s group by time with fill 0", command: `SELECT derivative(mean(value), 4s) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:00Z",30],["2010-07-01T18:47:02Z",-30]]}]}]}`, }, &Query{ name: "calculate derivative of mean with unit default (2s) group by time with fill previous", command: `SELECT derivative(mean(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(previous)`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",0]]}]}]}`, }, &Query{ name: "calculate derivative of mean with unit 4s group by time with fill previous", command: `SELECT derivative(mean(value), 4s) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(previous)`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",0]]}]}]}`, }, &Query{ name: "calculate derivative of median with unit default (2s) group by time with fill 0", command: `SELECT derivative(median(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:00Z",15],["2010-07-01T18:47:02Z",-15]]}]}]}`, }, &Query{ name: "calculate derivative of median with unit 4s group by time with fill 0", command: `SELECT derivative(median(value), 4s) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:00Z",30],["2010-07-01T18:47:02Z",-30]]}]}]}`, }, &Query{ name: "calculate derivative of median with unit default (2s) group by time with fill previous", command: `SELECT derivative(median(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(previous)`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",0]]}]}]}`, }, &Query{ name: "calculate derivative of median with unit 4s group by time with fill previous", command: `SELECT derivative(median(value), 4s) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(previous)`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",0]]}]}]}`, }, &Query{ name: "calculate derivative of sum with unit default (2s) group by time with fill 0", command: `SELECT derivative(sum(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:00Z",30],["2010-07-01T18:47:02Z",-30]]}]}]}`, }, &Query{ name: "calculate derivative of sum with unit 4s group by time with fill 0", command: `SELECT derivative(sum(value), 4s) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:00Z",60],["2010-07-01T18:47:02Z",-60]]}]}]}`, }, &Query{ name: "calculate derivative of sum with unit default (2s) group by time with fill previous", command: `SELECT derivative(sum(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(previous)`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",0]]}]}]}`, }, &Query{ name: "calculate derivative of sum with unit 4s group by time with fill previous", command: `SELECT derivative(sum(value), 4s) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(previous)`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",0]]}]}]}`, }, &Query{ name: "calculate derivative of first with unit default (2s) group by time with fill 0", command: `SELECT derivative(first(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:00Z",10],["2010-07-01T18:47:02Z",-10]]}]}]}`, }, &Query{ name: "calculate derivative of first with unit 4s group by time with fill 0", command: `SELECT derivative(first(value), 4s) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:00Z",20],["2010-07-01T18:47:02Z",-20]]}]}]}`, }, &Query{ name: "calculate derivative of first with unit default (2s) group by time with fill previous", command: `SELECT derivative(first(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(previous)`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",0]]}]}]}`, }, &Query{ name: "calculate derivative of first with unit 4s group by time with fill previous", command: `SELECT derivative(first(value), 4s) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(previous)`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",0]]}]}]}`, }, &Query{ name: "calculate derivative of last with unit default (2s) group by time with fill 0", command: `SELECT derivative(last(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:00Z",20],["2010-07-01T18:47:02Z",-20]]}]}]}`, }, &Query{ name: "calculate derivative of last with unit 4s group by time with fill 0", command: `SELECT derivative(last(value), 4s) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:00Z",40],["2010-07-01T18:47:02Z",-40]]}]}]}`, }, &Query{ name: "calculate derivative of last with unit default (2s) group by time with fill previous", command: `SELECT derivative(last(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(previous)`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",0]]}]}]}`, }, &Query{ name: "calculate derivative of last with unit 4s group by time with fill previous", command: `SELECT derivative(last(value), 4s) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(previous)`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",0]]}]}]}`, }, &Query{ name: "calculate derivative of min with unit default (2s) group by time with fill 0", command: `SELECT derivative(min(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:00Z",10],["2010-07-01T18:47:02Z",-10]]}]}]}`, }, &Query{ name: "calculate derivative of min with unit 4s group by time with fill 0", command: `SELECT derivative(min(value), 4s) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:00Z",20],["2010-07-01T18:47:02Z",-20]]}]}]}`, }, &Query{ name: "calculate derivative of min with unit default (2s) group by time with fill previous", command: `SELECT derivative(min(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(previous)`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",0]]}]}]}`, }, &Query{ name: "calculate derivative of min with unit 4s group by time with fill previous", command: `SELECT derivative(min(value), 4s) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(previous)`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",0]]}]}]}`, }, &Query{ name: "calculate derivative of max with unit default (2s) group by time with fill 0", command: `SELECT derivative(max(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:00Z",20],["2010-07-01T18:47:02Z",-20]]}]}]}`, }, &Query{ name: "calculate derivative of max with unit 4s group by time with fill 0", command: `SELECT derivative(max(value), 4s) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:00Z",40],["2010-07-01T18:47:02Z",-40]]}]}]}`, }, &Query{ name: "calculate derivative of max with unit default (2s) group by time with fill previous", command: `SELECT derivative(max(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(previous)`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",0]]}]}]}`, }, &Query{ name: "calculate derivative of max with unit 4s group by time with fill previous", command: `SELECT derivative(max(value), 4s) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(previous)`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",0]]}]}]}`, }, &Query{ name: "calculate derivative of percentile with unit default (2s) group by time with fill 0", command: `SELECT derivative(percentile(value, 50)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:00Z",10],["2010-07-01T18:47:02Z",-10]]}]}]}`, }, &Query{ name: "calculate derivative of percentile with unit 4s group by time with fill 0", command: `SELECT derivative(percentile(value, 50), 4s) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:00Z",20],["2010-07-01T18:47:02Z",-20]]}]}]}`, }, &Query{ name: "calculate derivative of percentile with unit default (2s) group by time with fill previous", command: `SELECT derivative(percentile(value, 50)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(previous)`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",0]]}]}]}`, }, &Query{ name: "calculate derivative of percentile with unit 4s group by time with fill previous", command: `SELECT derivative(percentile(value, 50), 4s) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(previous)`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",0]]}]}]}`, }, }...) for i, query := range test.queries { if i == 0 { if err := test.init(s); err != nil { t.Fatalf("test init failed: %s", err) } } if query.skip { t.Logf("SKIP:: %s", query.name) continue } if err := query.Execute(s); err != nil { t.Error(query.Error(err)) } else if !query.success() { t.Error(query.failureMessage()) } } } // Ensure the server can handle various group by time difference queries. func TestServer_Query_SelectGroupByTimeDifference(t *testing.T) { t.Parallel() s := OpenServer(NewConfig()) defer s.Close() test := NewTest("db0", "rp0") test.writes = Writes{ &Write{data: fmt.Sprintf(`cpu value=10 1278010020000000000 cpu value=15 1278010021000000000 cpu value=20 1278010022000000000 cpu value=25 1278010023000000000 `)}, } test.addQueries([]*Query{ &Query{ name: "calculate difference of count", command: `SELECT difference(count(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s)`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","difference"],"values":[["2010-07-01T18:47:00Z",2],["2010-07-01T18:47:02Z",0]]}]}]}`, }, &Query{ name: "calculate difference of mean", command: `SELECT difference(mean(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s)`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","difference"],"values":[["2010-07-01T18:47:02Z",10]]}]}]}`, }, &Query{ name: "calculate difference of median", command: `SELECT difference(median(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s)`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","difference"],"values":[["2010-07-01T18:47:02Z",10]]}]}]}`, }, &Query{ name: "calculate difference of sum", command: `SELECT difference(sum(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s)`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","difference"],"values":[["2010-07-01T18:47:02Z",20]]}]}]}`, }, &Query{ name: "calculate difference of first", command: `SELECT difference(first(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s)`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","difference"],"values":[["2010-07-01T18:47:02Z",10]]}]}]}`, }, &Query{ name: "calculate difference of last", command: `SELECT difference(last(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s)`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","difference"],"values":[["2010-07-01T18:47:02Z",10]]}]}]}`, }, &Query{ name: "calculate difference of min", command: `SELECT difference(min(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s)`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","difference"],"values":[["2010-07-01T18:47:02Z",10]]}]}]}`, }, &Query{ name: "calculate difference of max", command: `SELECT difference(max(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s)`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","difference"],"values":[["2010-07-01T18:47:02Z",10]]}]}]}`, }, &Query{ name: "calculate difference of percentile", command: `SELECT difference(percentile(value, 50)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s)`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","difference"],"values":[["2010-07-01T18:47:02Z",10]]}]}]}`, }, }...) for i, query := range test.queries { if i == 0 { if err := test.init(s); err != nil { t.Fatalf("test init failed: %s", err) } } if query.skip { t.Logf("SKIP:: %s", query.name) continue } if err := query.Execute(s); err != nil { t.Error(query.Error(err)) } else if !query.success() { t.Error(query.failureMessage()) } } } // Ensure the server can handle various group by time difference queries. func TestServer_Query_SelectGroupByTimeDifferenceWithFill(t *testing.T) { t.Parallel() s := OpenServer(NewConfig()) defer s.Close() test := NewTest("db0", "rp0") test.writes = Writes{ &Write{data: fmt.Sprintf(`cpu value=10 1278010020000000000 cpu value=20 1278010021000000000 `)}, } test.addQueries([]*Query{ &Query{ name: "calculate difference of count with fill 0", command: `SELECT difference(count(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","difference"],"values":[["2010-07-01T18:47:00Z",2],["2010-07-01T18:47:02Z",-2]]}]}]}`, }, &Query{ name: "calculate difference of count with fill previous", command: `SELECT difference(count(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(previous)`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","difference"],"values":[["2010-07-01T18:47:02Z",0]]}]}]}`, }, &Query{ name: "calculate difference of mean with fill 0", command: `SELECT difference(mean(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","difference"],"values":[["2010-07-01T18:47:00Z",15],["2010-07-01T18:47:02Z",-15]]}]}]}`, }, &Query{ name: "calculate difference of mean with fill previous", command: `SELECT difference(mean(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(previous)`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","difference"],"values":[["2010-07-01T18:47:02Z",0]]}]}]}`, }, &Query{ name: "calculate difference of median with fill 0", command: `SELECT difference(median(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","difference"],"values":[["2010-07-01T18:47:00Z",15],["2010-07-01T18:47:02Z",-15]]}]}]}`, }, &Query{ name: "calculate difference of median with fill previous", command: `SELECT difference(median(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(previous)`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","difference"],"values":[["2010-07-01T18:47:02Z",0]]}]}]}`, }, &Query{ name: "calculate difference of sum with fill 0", command: `SELECT difference(sum(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","difference"],"values":[["2010-07-01T18:47:00Z",30],["2010-07-01T18:47:02Z",-30]]}]}]}`, }, &Query{ name: "calculate difference of sum with fill previous", command: `SELECT difference(sum(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(previous)`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","difference"],"values":[["2010-07-01T18:47:02Z",0]]}]}]}`, }, &Query{ name: "calculate difference of first with fill 0", command: `SELECT difference(first(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","difference"],"values":[["2010-07-01T18:47:00Z",10],["2010-07-01T18:47:02Z",-10]]}]}]}`, }, &Query{ name: "calculate difference of first with fill previous", command: `SELECT difference(first(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(previous)`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","difference"],"values":[["2010-07-01T18:47:02Z",0]]}]}]}`, }, &Query{ name: "calculate difference of last with fill 0", command: `SELECT difference(last(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","difference"],"values":[["2010-07-01T18:47:00Z",20],["2010-07-01T18:47:02Z",-20]]}]}]}`, }, &Query{ name: "calculate difference of last with fill previous", command: `SELECT difference(last(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(previous)`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","difference"],"values":[["2010-07-01T18:47:02Z",0]]}]}]}`, }, &Query{ name: "calculate difference of min with fill 0", command: `SELECT difference(min(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","difference"],"values":[["2010-07-01T18:47:00Z",10],["2010-07-01T18:47:02Z",-10]]}]}]}`, }, &Query{ name: "calculate difference of min with fill previous", command: `SELECT difference(min(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(previous)`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","difference"],"values":[["2010-07-01T18:47:02Z",0]]}]}]}`, }, &Query{ name: "calculate difference of max with fill 0", command: `SELECT difference(max(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","difference"],"values":[["2010-07-01T18:47:00Z",20],["2010-07-01T18:47:02Z",-20]]}]}]}`, }, &Query{ name: "calculate difference of max with fill previous", command: `SELECT difference(max(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(previous)`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","difference"],"values":[["2010-07-01T18:47:02Z",0]]}]}]}`, }, &Query{ name: "calculate difference of percentile with fill 0", command: `SELECT difference(percentile(value, 50)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","difference"],"values":[["2010-07-01T18:47:00Z",10],["2010-07-01T18:47:02Z",-10]]}]}]}`, }, &Query{ name: "calculate difference of percentile with fill previous", command: `SELECT difference(percentile(value, 50)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(previous)`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","difference"],"values":[["2010-07-01T18:47:02Z",0]]}]}]}`, }, }...) for i, query := range test.queries { if i == 0 { if err := test.init(s); err != nil { t.Fatalf("test init failed: %s", err) } } if query.skip { t.Logf("SKIP:: %s", query.name) continue } if err := query.Execute(s); err != nil { t.Error(query.Error(err)) } else if !query.success() { t.Error(query.failureMessage()) } } } // Ensure the server can handle various group by time moving average queries. func TestServer_Query_SelectGroupByTimeMovingAverage(t *testing.T) { t.Parallel() s := OpenServer(NewConfig()) defer s.Close() test := NewTest("db0", "rp0") test.writes = Writes{ &Write{data: fmt.Sprintf(`cpu value=10 1278010020000000000 cpu value=15 1278010021000000000 cpu value=20 1278010022000000000 cpu value=25 1278010023000000000 cpu value=30 1278010024000000000 cpu value=35 1278010025000000000 `)}, } test.addQueries([]*Query{ &Query{ name: "calculate moving average of count", command: `SELECT moving_average(count(value), 2) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:05' group by time(2s)`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","moving_average"],"values":[["2010-07-01T18:47:00Z",1],["2010-07-01T18:47:02Z",2],["2010-07-01T18:47:04Z",2]]}]}]}`, }, &Query{ name: "calculate moving average of mean", command: `SELECT moving_average(mean(value), 2) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:05' group by time(2s)`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","moving_average"],"values":[["2010-07-01T18:47:02Z",17.5],["2010-07-01T18:47:04Z",27.5]]}]}]}`, }, &Query{ name: "calculate moving average of median", command: `SELECT moving_average(median(value), 2) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:05' group by time(2s)`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","moving_average"],"values":[["2010-07-01T18:47:02Z",17.5],["2010-07-01T18:47:04Z",27.5]]}]}]}`, }, &Query{ name: "calculate moving average of sum", command: `SELECT moving_average(sum(value), 2) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:05' group by time(2s)`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","moving_average"],"values":[["2010-07-01T18:47:02Z",35],["2010-07-01T18:47:04Z",55]]}]}]}`, }, &Query{ name: "calculate moving average of first", command: `SELECT moving_average(first(value), 2) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:05' group by time(2s)`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","moving_average"],"values":[["2010-07-01T18:47:02Z",15],["2010-07-01T18:47:04Z",25]]}]}]}`, }, &Query{ name: "calculate moving average of last", command: `SELECT moving_average(last(value), 2) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:05' group by time(2s)`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","moving_average"],"values":[["2010-07-01T18:47:02Z",20],["2010-07-01T18:47:04Z",30]]}]}]}`, }, &Query{ name: "calculate moving average of min", command: `SELECT moving_average(min(value), 2) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:05' group by time(2s)`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","moving_average"],"values":[["2010-07-01T18:47:02Z",15],["2010-07-01T18:47:04Z",25]]}]}]}`, }, &Query{ name: "calculate moving average of max", command: `SELECT moving_average(max(value), 2) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:05' group by time(2s)`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","moving_average"],"values":[["2010-07-01T18:47:02Z",20],["2010-07-01T18:47:04Z",30]]}]}]}`, }, &Query{ name: "calculate moving average of percentile", command: `SELECT moving_average(percentile(value, 50), 2) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:05' group by time(2s)`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","moving_average"],"values":[["2010-07-01T18:47:02Z",15],["2010-07-01T18:47:04Z",25]]}]}]}`, }, }...) for i, query := range test.queries { if i == 0 { if err := test.init(s); err != nil { t.Fatalf("test init failed: %s", err) } } if query.skip { t.Logf("SKIP:: %s", query.name) continue } if err := query.Execute(s); err != nil { t.Error(query.Error(err)) } else if !query.success() { t.Error(query.failureMessage()) } } } // Ensure the server can handle various group by time moving average queries. func TestServer_Query_SelectGroupByTimeMovingAverageWithFill(t *testing.T) { t.Parallel() s := OpenServer(NewConfig()) defer s.Close() test := NewTest("db0", "rp0") test.writes = Writes{ &Write{data: fmt.Sprintf(`cpu value=10 1278010020000000000 cpu value=15 1278010021000000000 cpu value=30 1278010024000000000 cpu value=35 1278010025000000000 `)}, } test.addQueries([]*Query{ &Query{ name: "calculate moving average of count with fill 0", command: `SELECT moving_average(count(value), 2) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:05' group by time(2s) fill(0)`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","moving_average"],"values":[["2010-07-01T18:47:00Z",1],["2010-07-01T18:47:02Z",1],["2010-07-01T18:47:04Z",1]]}]}]}`, }, &Query{ name: "calculate moving average of count with fill previous", command: `SELECT moving_average(count(value), 2) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:05' group by time(2s) fill(previous)`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","moving_average"],"values":[["2010-07-01T18:47:02Z",2],["2010-07-01T18:47:04Z",2]]}]}]}`, }, &Query{ name: "calculate moving average of mean with fill 0", command: `SELECT moving_average(mean(value), 2) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:05' group by time(2s) fill(0)`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","moving_average"],"values":[["2010-07-01T18:47:00Z",6.25],["2010-07-01T18:47:02Z",6.25],["2010-07-01T18:47:04Z",16.25]]}]}]}`, }, &Query{ name: "calculate moving average of mean with fill previous", command: `SELECT moving_average(mean(value), 2) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:05' group by time(2s) fill(previous)`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","moving_average"],"values":[["2010-07-01T18:47:02Z",12.5],["2010-07-01T18:47:04Z",22.5]]}]}]}`, }, &Query{ name: "calculate moving average of median with fill 0", command: `SELECT moving_average(median(value), 2) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:05' group by time(2s) fill(0)`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","moving_average"],"values":[["2010-07-01T18:47:00Z",6.25],["2010-07-01T18:47:02Z",6.25],["2010-07-01T18:47:04Z",16.25]]}]}]}`, }, &Query{ name: "calculate moving average of median with fill previous", command: `SELECT moving_average(median(value), 2) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:05' group by time(2s) fill(previous)`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","moving_average"],"values":[["2010-07-01T18:47:02Z",12.5],["2010-07-01T18:47:04Z",22.5]]}]}]}`, }, &Query{ name: "calculate moving average of sum with fill 0", command: `SELECT moving_average(sum(value), 2) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:05' group by time(2s) fill(0)`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","moving_average"],"values":[["2010-07-01T18:47:00Z",12.5],["2010-07-01T18:47:02Z",12.5],["2010-07-01T18:47:04Z",32.5]]}]}]}`, }, &Query{ name: "calculate moving average of sum with fill previous", command: `SELECT moving_average(sum(value), 2) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:05' group by time(2s) fill(previous)`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","moving_average"],"values":[["2010-07-01T18:47:02Z",25],["2010-07-01T18:47:04Z",45]]}]}]}`, }, &Query{ name: "calculate moving average of first with fill 0", command: `SELECT moving_average(first(value), 2) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:05' group by time(2s) fill(0)`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","moving_average"],"values":[["2010-07-01T18:47:00Z",5],["2010-07-01T18:47:02Z",5],["2010-07-01T18:47:04Z",15]]}]}]}`, }, &Query{ name: "calculate moving average of first with fill previous", command: `SELECT moving_average(first(value), 2) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:05' group by time(2s) fill(previous)`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","moving_average"],"values":[["2010-07-01T18:47:02Z",10],["2010-07-01T18:47:04Z",20]]}]}]}`, }, &Query{ name: "calculate moving average of last with fill 0", command: `SELECT moving_average(last(value), 2) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:05' group by time(2s) fill(0)`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","moving_average"],"values":[["2010-07-01T18:47:00Z",7.5],["2010-07-01T18:47:02Z",7.5],["2010-07-01T18:47:04Z",17.5]]}]}]}`, }, &Query{ name: "calculate moving average of last with fill previous", command: `SELECT moving_average(last(value), 2) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:05' group by time(2s) fill(previous)`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","moving_average"],"values":[["2010-07-01T18:47:02Z",15],["2010-07-01T18:47:04Z",25]]}]}]}`, }, &Query{ name: "calculate moving average of min with fill 0", command: `SELECT moving_average(min(value), 2) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:05' group by time(2s) fill(0)`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","moving_average"],"values":[["2010-07-01T18:47:00Z",5],["2010-07-01T18:47:02Z",5],["2010-07-01T18:47:04Z",15]]}]}]}`, }, &Query{ name: "calculate moving average of min with fill previous", command: `SELECT moving_average(min(value), 2) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:05' group by time(2s) fill(previous)`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","moving_average"],"values":[["2010-07-01T18:47:02Z",10],["2010-07-01T18:47:04Z",20]]}]}]}`, }, &Query{ name: "calculate moving average of max with fill 0", command: `SELECT moving_average(max(value), 2) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:05' group by time(2s) fill(0)`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","moving_average"],"values":[["2010-07-01T18:47:00Z",7.5],["2010-07-01T18:47:02Z",7.5],["2010-07-01T18:47:04Z",17.5]]}]}]}`, }, &Query{ name: "calculate moving average of max with fill previous", command: `SELECT moving_average(max(value), 2) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:05' group by time(2s) fill(previous)`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","moving_average"],"values":[["2010-07-01T18:47:02Z",15],["2010-07-01T18:47:04Z",25]]}]}]}`, }, &Query{ name: "calculate moving average of percentile with fill 0", command: `SELECT moving_average(percentile(value, 50), 2) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:05' group by time(2s) fill(0)`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","moving_average"],"values":[["2010-07-01T18:47:00Z",5],["2010-07-01T18:47:02Z",5],["2010-07-01T18:47:04Z",15]]}]}]}`, }, &Query{ name: "calculate moving average of percentile with fill previous", command: `SELECT moving_average(percentile(value, 50), 2) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:05' group by time(2s) fill(previous)`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","moving_average"],"values":[["2010-07-01T18:47:02Z",10],["2010-07-01T18:47:04Z",20]]}]}]}`, }, }...) for i, query := range test.queries { if i == 0 { if err := test.init(s); err != nil { t.Fatalf("test init failed: %s", err) } } if query.skip { t.Logf("SKIP:: %s", query.name) continue } if err := query.Execute(s); err != nil { t.Error(query.Error(err)) } else if !query.success() { t.Error(query.failureMessage()) } } } // mergeMany ensures that when merging many series together and some of them have a different number // of points than others in a group by interval the results are correct func TestServer_Query_MergeMany(t *testing.T) { t.Parallel() s := OpenServer(NewConfig()) defer s.Close() // set infinite retention policy as we are inserting data in the past and don't want retention policy enforcement to make this test racy if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { t.Fatal(err) } test := NewTest("db0", "rp0") writes := []string{} for i := 1; i < 11; i++ { for j := 1; j < 5+i%3; j++ { data := fmt.Sprintf(`cpu,host=server_%d value=22 %d`, i, time.Unix(int64(j), int64(0)).UTC().UnixNano()) writes = append(writes, data) } } test.writes = Writes{ &Write{data: strings.Join(writes, "\n")}, } test.addQueries([]*Query{ &Query{ name: "GROUP by time", command: `SELECT count(value) FROM db0.rp0.cpu WHERE time >= '1970-01-01T00:00:01Z' AND time <= '1970-01-01T00:00:06Z' GROUP BY time(1s)`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","count"],"values":[["1970-01-01T00:00:01Z",10],["1970-01-01T00:00:02Z",10],["1970-01-01T00:00:03Z",10],["1970-01-01T00:00:04Z",10],["1970-01-01T00:00:05Z",7],["1970-01-01T00:00:06Z",3]]}]}]}`, }, &Query{ skip: true, name: "GROUP by tag - FIXME issue #2875", command: `SELECT count(value) FROM db0.rp0.cpu where time >= '2000-01-01T00:00:00Z' and time <= '2000-01-01T02:00:00Z' group by host`, exp: `{"results":[{"series":[{"name":"cpu","tags":{"host":"server01"},"columns":["time","count"],"values":[["2000-01-01T00:00:00Z",1]]},{"name":"cpu","tags":{"host":"server02"},"columns":["time","count"],"values":[["2000-01-01T00:00:00Z",1]]},{"name":"cpu","tags":{"host":"server03"},"columns":["time","count"],"values":[["2000-01-01T00:00:00Z",1]]}]}]}`, }, &Query{ name: "GROUP by field", command: `SELECT count(value) FROM db0.rp0.cpu group by value`, exp: `{"results":[{"series":[{"name":"cpu","tags":{"value":""},"columns":["time","count"],"values":[["1970-01-01T00:00:00Z",50]]}]}]}`, }, }...) for i, query := range test.queries { if i == 0 { if err := test.init(s); err != nil { t.Fatalf("test init failed: %s", err) } } if query.skip { t.Logf("SKIP:: %s", query.name) continue } if err := query.Execute(s); err != nil { t.Error(query.Error(err)) } else if !query.success() { t.Error(query.failureMessage()) } } } func TestServer_Query_SLimitAndSOffset(t *testing.T) { t.Parallel() s := OpenServer(NewConfig()) defer s.Close() // set infinite retention policy as we are inserting data in the past and don't want retention policy enforcement to make this test racy if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { t.Fatal(err) } test := NewTest("db0", "rp0") writes := []string{} for i := 1; i < 10; i++ { data := fmt.Sprintf(`cpu,region=us-east,host=server-%d value=%d %d`, i, i, time.Unix(int64(i), int64(0)).UnixNano()) writes = append(writes, data) } test.writes = Writes{ &Write{data: strings.Join(writes, "\n")}, } test.addQueries([]*Query{ &Query{ name: "SLIMIT 2 SOFFSET 1", command: `SELECT count(value) FROM db0.rp0.cpu GROUP BY * SLIMIT 2 SOFFSET 1`, exp: `{"results":[{"series":[{"name":"cpu","tags":{"host":"server-2","region":"us-east"},"columns":["time","count"],"values":[["1970-01-01T00:00:00Z",1]]},{"name":"cpu","tags":{"host":"server-3","region":"us-east"},"columns":["time","count"],"values":[["1970-01-01T00:00:00Z",1]]}]}]}`, }, &Query{ name: "SLIMIT 2 SOFFSET 3", command: `SELECT count(value) FROM db0.rp0.cpu GROUP BY * SLIMIT 2 SOFFSET 3`, exp: `{"results":[{"series":[{"name":"cpu","tags":{"host":"server-4","region":"us-east"},"columns":["time","count"],"values":[["1970-01-01T00:00:00Z",1]]},{"name":"cpu","tags":{"host":"server-5","region":"us-east"},"columns":["time","count"],"values":[["1970-01-01T00:00:00Z",1]]}]}]}`, }, &Query{ name: "SLIMIT 3 SOFFSET 8", command: `SELECT count(value) FROM db0.rp0.cpu GROUP BY * SLIMIT 3 SOFFSET 8`, exp: `{"results":[{"series":[{"name":"cpu","tags":{"host":"server-9","region":"us-east"},"columns":["time","count"],"values":[["1970-01-01T00:00:00Z",1]]}]}]}`, }, }...) for i, query := range test.queries { if i == 0 { if err := test.init(s); err != nil { t.Fatalf("test init failed: %s", err) } } if query.skip { t.Logf("SKIP:: %s", query.name) continue } if err := query.Execute(s); err != nil { t.Error(query.Error(err)) } else if !query.success() { t.Error(query.failureMessage()) } } } func TestServer_Query_Regex(t *testing.T) { t.Parallel() s := OpenServer(NewConfig()) defer s.Close() if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { t.Fatal(err) } if err := s.MetaClient.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { t.Fatal(err) } writes := []string{ fmt.Sprintf(`cpu1,host=server01 value=10 %d`, mustParseTime(time.RFC3339Nano, "2015-02-28T01:03:36.703820946Z").UnixNano()), fmt.Sprintf(`cpu2,host=server01 value=20 %d`, mustParseTime(time.RFC3339Nano, "2015-02-28T01:03:36.703820946Z").UnixNano()), fmt.Sprintf(`cpu3,host=server01 value=30 %d`, mustParseTime(time.RFC3339Nano, "2015-02-28T01:03:36.703820946Z").UnixNano()), } test := NewTest("db0", "rp0") test.writes = Writes{ &Write{data: strings.Join(writes, "\n")}, } test.addQueries([]*Query{ &Query{ name: "default db and rp", command: `SELECT * FROM /cpu[13]/`, params: url.Values{"db": []string{"db0"}}, exp: `{"results":[{"series":[{"name":"cpu1","columns":["time","host","value"],"values":[["2015-02-28T01:03:36.703820946Z","server01",10]]},{"name":"cpu3","columns":["time","host","value"],"values":[["2015-02-28T01:03:36.703820946Z","server01",30]]}]}]}`, }, &Query{ name: "default db and rp with GROUP BY *", command: `SELECT * FROM /cpu[13]/ GROUP BY *`, params: url.Values{"db": []string{"db0"}}, exp: `{"results":[{"series":[{"name":"cpu1","tags":{"host":"server01"},"columns":["time","value"],"values":[["2015-02-28T01:03:36.703820946Z",10]]},{"name":"cpu3","tags":{"host":"server01"},"columns":["time","value"],"values":[["2015-02-28T01:03:36.703820946Z",30]]}]}]}`, }, &Query{ name: "specifying db and rp", command: `SELECT * FROM db0.rp0./cpu[13]/ GROUP BY *`, exp: `{"results":[{"series":[{"name":"cpu1","tags":{"host":"server01"},"columns":["time","value"],"values":[["2015-02-28T01:03:36.703820946Z",10]]},{"name":"cpu3","tags":{"host":"server01"},"columns":["time","value"],"values":[["2015-02-28T01:03:36.703820946Z",30]]}]}]}`, }, &Query{ name: "default db and specified rp", command: `SELECT * FROM rp0./cpu[13]/ GROUP BY *`, params: url.Values{"db": []string{"db0"}}, exp: `{"results":[{"series":[{"name":"cpu1","tags":{"host":"server01"},"columns":["time","value"],"values":[["2015-02-28T01:03:36.703820946Z",10]]},{"name":"cpu3","tags":{"host":"server01"},"columns":["time","value"],"values":[["2015-02-28T01:03:36.703820946Z",30]]}]}]}`, }, &Query{ name: "specified db and default rp", command: `SELECT * FROM db0../cpu[13]/ GROUP BY *`, exp: `{"results":[{"series":[{"name":"cpu1","tags":{"host":"server01"},"columns":["time","value"],"values":[["2015-02-28T01:03:36.703820946Z",10]]},{"name":"cpu3","tags":{"host":"server01"},"columns":["time","value"],"values":[["2015-02-28T01:03:36.703820946Z",30]]}]}]}`, }, }...) for i, query := range test.queries { if i == 0 { if err := test.init(s); err != nil { t.Fatalf("test init failed: %s", err) } } if query.skip { t.Logf("SKIP:: %s", query.name) continue } if err := query.Execute(s); err != nil { t.Error(query.Error(err)) } else if !query.success() { t.Error(query.failureMessage()) } } } func TestServer_Query_Aggregates_Int(t *testing.T) { t.Parallel() s := OpenDefaultServer(NewConfig()) defer s.Close() test := NewTest("db0", "rp0") test.writes = Writes{ &Write{data: strings.Join([]string{ fmt.Sprintf(`int value=45 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), }, "\n")}, } test.addQueries([]*Query{ // int64 &Query{ name: "stddev with just one point - int", params: url.Values{"db": []string{"db0"}}, command: `SELECT STDDEV(value) FROM int`, exp: `{"results":[{"series":[{"name":"int","columns":["time","stddev"],"values":[["1970-01-01T00:00:00Z",null]]}]}]}`, }, }...) for i, query := range test.queries { if i == 0 { if err := test.init(s); err != nil { t.Fatalf("test init failed: %s", err) } } if query.skip { t.Logf("SKIP:: %s", query.name) continue } if err := query.Execute(s); err != nil { t.Error(query.Error(err)) } else if !query.success() { t.Error(query.failureMessage()) } } } func TestServer_Query_Aggregates_IntMax(t *testing.T) { t.Parallel() s := OpenDefaultServer(NewConfig()) defer s.Close() test := NewTest("db0", "rp0") test.writes = Writes{ &Write{data: strings.Join([]string{ fmt.Sprintf(`intmax value=%s %d`, maxInt64(), mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), fmt.Sprintf(`intmax value=%s %d`, maxInt64(), mustParseTime(time.RFC3339Nano, "2000-01-01T01:00:00Z").UnixNano()), }, "\n")}, } test.addQueries([]*Query{ &Query{ name: "large mean and stddev - int", params: url.Values{"db": []string{"db0"}}, command: `SELECT MEAN(value), STDDEV(value) FROM intmax`, exp: `{"results":[{"series":[{"name":"intmax","columns":["time","mean","stddev"],"values":[["1970-01-01T00:00:00Z",` + maxInt64() + `,0]]}]}]}`, }, }...) for i, query := range test.queries { if i == 0 { if err := test.init(s); err != nil { t.Fatalf("test init failed: %s", err) } } if query.skip { t.Logf("SKIP:: %s", query.name) continue } if err := query.Execute(s); err != nil { t.Error(query.Error(err)) } else if !query.success() { t.Error(query.failureMessage()) } } } func TestServer_Query_Aggregates_IntMany(t *testing.T) { t.Parallel() s := OpenDefaultServer(NewConfig()) defer s.Close() test := NewTest("db0", "rp0") test.writes = Writes{ &Write{data: strings.Join([]string{ fmt.Sprintf(`intmany,host=server01 value=2.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), fmt.Sprintf(`intmany,host=server02 value=4.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:10Z").UnixNano()), fmt.Sprintf(`intmany,host=server03 value=4.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:20Z").UnixNano()), fmt.Sprintf(`intmany,host=server04 value=4.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:30Z").UnixNano()), fmt.Sprintf(`intmany,host=server05 value=5.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:40Z").UnixNano()), fmt.Sprintf(`intmany,host=server06 value=5.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:50Z").UnixNano()), fmt.Sprintf(`intmany,host=server07 value=7.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:01:00Z").UnixNano()), fmt.Sprintf(`intmany,host=server08 value=9.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:01:10Z").UnixNano()), }, "\n")}, } test.addQueries([]*Query{ &Query{ name: "mean and stddev - int", params: url.Values{"db": []string{"db0"}}, command: `SELECT MEAN(value), STDDEV(value) FROM intmany WHERE time >= '2000-01-01' AND time < '2000-01-01T00:02:00Z' GROUP BY time(10m)`, exp: `{"results":[{"series":[{"name":"intmany","columns":["time","mean","stddev"],"values":[["2000-01-01T00:00:00Z",5,2.138089935299395]]}]}]}`, }, &Query{ name: "first - int", params: url.Values{"db": []string{"db0"}}, command: `SELECT FIRST(value) FROM intmany`, exp: `{"results":[{"series":[{"name":"intmany","columns":["time","first"],"values":[["2000-01-01T00:00:00Z",2]]}]}]}`, }, &Query{ name: "first - int - epoch ms", params: url.Values{"db": []string{"db0"}, "epoch": []string{"ms"}}, command: `SELECT FIRST(value) FROM intmany`, exp: fmt.Sprintf(`{"results":[{"series":[{"name":"intmany","columns":["time","first"],"values":[[%d,2]]}]}]}`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()/int64(time.Millisecond)), }, &Query{ name: "last - int", params: url.Values{"db": []string{"db0"}}, command: `SELECT LAST(value) FROM intmany`, exp: `{"results":[{"series":[{"name":"intmany","columns":["time","last"],"values":[["2000-01-01T00:01:10Z",9]]}]}]}`, }, &Query{ name: "spread - int", params: url.Values{"db": []string{"db0"}}, command: `SELECT SPREAD(value) FROM intmany`, exp: `{"results":[{"series":[{"name":"intmany","columns":["time","spread"],"values":[["1970-01-01T00:00:00Z",7]]}]}]}`, }, &Query{ name: "median - even count - int", params: url.Values{"db": []string{"db0"}}, command: `SELECT MEDIAN(value) FROM intmany`, exp: `{"results":[{"series":[{"name":"intmany","columns":["time","median"],"values":[["1970-01-01T00:00:00Z",4.5]]}]}]}`, }, &Query{ name: "median - odd count - int", params: url.Values{"db": []string{"db0"}}, command: `SELECT MEDIAN(value) FROM intmany where time < '2000-01-01T00:01:10Z'`, exp: `{"results":[{"series":[{"name":"intmany","columns":["time","median"],"values":[["1970-01-01T00:00:00Z",4]]}]}]}`, }, &Query{ name: "distinct as call - int", params: url.Values{"db": []string{"db0"}}, command: `SELECT DISTINCT(value) FROM intmany`, exp: `{"results":[{"series":[{"name":"intmany","columns":["time","distinct"],"values":[["1970-01-01T00:00:00Z",2],["1970-01-01T00:00:00Z",4],["1970-01-01T00:00:00Z",5],["1970-01-01T00:00:00Z",7],["1970-01-01T00:00:00Z",9]]}]}]}`, }, &Query{ name: "distinct alt syntax - int", params: url.Values{"db": []string{"db0"}}, command: `SELECT DISTINCT value FROM intmany`, exp: `{"results":[{"series":[{"name":"intmany","columns":["time","distinct"],"values":[["1970-01-01T00:00:00Z",2],["1970-01-01T00:00:00Z",4],["1970-01-01T00:00:00Z",5],["1970-01-01T00:00:00Z",7],["1970-01-01T00:00:00Z",9]]}]}]}`, }, &Query{ name: "distinct select tag - int", params: url.Values{"db": []string{"db0"}}, command: `SELECT DISTINCT(host) FROM intmany`, exp: `{"results":[{"error":"statement must have at least one field in select clause"}]}`, skip: true, // FIXME(benbjohnson): should be allowed, need to stream tag values }, &Query{ name: "distinct alt select tag - int", params: url.Values{"db": []string{"db0"}}, command: `SELECT DISTINCT host FROM intmany`, exp: `{"results":[{"error":"statement must have at least one field in select clause"}]}`, skip: true, // FIXME(benbjohnson): should be allowed, need to stream tag values }, &Query{ name: "count distinct - int", params: url.Values{"db": []string{"db0"}}, command: `SELECT COUNT(DISTINCT value) FROM intmany`, exp: `{"results":[{"series":[{"name":"intmany","columns":["time","count"],"values":[["1970-01-01T00:00:00Z",5]]}]}]}`, }, &Query{ name: "count distinct as call - int", params: url.Values{"db": []string{"db0"}}, command: `SELECT COUNT(DISTINCT(value)) FROM intmany`, exp: `{"results":[{"series":[{"name":"intmany","columns":["time","count"],"values":[["1970-01-01T00:00:00Z",5]]}]}]}`, }, &Query{ name: "count distinct select tag - int", params: url.Values{"db": []string{"db0"}}, command: `SELECT COUNT(DISTINCT host) FROM intmany`, exp: `{"results":[{"series":[{"name":"intmany","columns":["time","count"],"values":[["1970-01-01T00:00:00Z",0]]}]}]}`, skip: true, // FIXME(benbjohnson): stream tag values }, &Query{ name: "count distinct as call select tag - int", params: url.Values{"db": []string{"db0"}}, command: `SELECT COUNT(DISTINCT host) FROM intmany`, exp: `{"results":[{"series":[{"name":"intmany","columns":["time","count"],"values":[["1970-01-01T00:00:00Z",0]]}]}]}`, skip: true, // FIXME(benbjohnson): stream tag values }, }...) for i, query := range test.queries { if i == 0 { if err := test.init(s); err != nil { t.Fatalf("test init failed: %s", err) } } if query.skip { t.Logf("SKIP:: %s", query.name) continue } if err := query.Execute(s); err != nil { t.Error(query.Error(err)) } else if !query.success() { t.Error(query.failureMessage()) } } } func TestServer_Query_Aggregates_IntMany_GroupBy(t *testing.T) { t.Parallel() s := OpenDefaultServer(NewConfig()) defer s.Close() test := NewTest("db0", "rp0") test.writes = Writes{ &Write{data: strings.Join([]string{ fmt.Sprintf(`intmany,host=server01 value=2.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), fmt.Sprintf(`intmany,host=server02 value=4.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:10Z").UnixNano()), fmt.Sprintf(`intmany,host=server03 value=4.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:20Z").UnixNano()), fmt.Sprintf(`intmany,host=server04 value=4.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:30Z").UnixNano()), fmt.Sprintf(`intmany,host=server05 value=5.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:40Z").UnixNano()), fmt.Sprintf(`intmany,host=server06 value=5.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:50Z").UnixNano()), fmt.Sprintf(`intmany,host=server07 value=7.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:01:00Z").UnixNano()), fmt.Sprintf(`intmany,host=server08 value=9.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:01:10Z").UnixNano()), }, "\n")}, } test.addQueries([]*Query{ &Query{ name: "max order by time with time specified group by 10s", params: url.Values{"db": []string{"db0"}}, command: `SELECT time, max(value) FROM intmany where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:14Z' group by time(10s)`, exp: `{"results":[{"series":[{"name":"intmany","columns":["time","max"],"values":[["2000-01-01T00:00:00Z",2],["2000-01-01T00:00:10Z",4],["2000-01-01T00:00:20Z",4],["2000-01-01T00:00:30Z",4],["2000-01-01T00:00:40Z",5],["2000-01-01T00:00:50Z",5],["2000-01-01T00:01:00Z",7],["2000-01-01T00:01:10Z",9]]}]}]}`, }, &Query{ name: "max order by time without time specified group by 30s", params: url.Values{"db": []string{"db0"}}, command: `SELECT max(value) FROM intmany where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:14Z' group by time(30s)`, exp: `{"results":[{"series":[{"name":"intmany","columns":["time","max"],"values":[["2000-01-01T00:00:00Z",4],["2000-01-01T00:00:30Z",5],["2000-01-01T00:01:00Z",9]]}]}]}`, }, &Query{ name: "max order by time with time specified group by 30s", params: url.Values{"db": []string{"db0"}}, command: `SELECT time, max(value) FROM intmany where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:14Z' group by time(30s)`, exp: `{"results":[{"series":[{"name":"intmany","columns":["time","max"],"values":[["2000-01-01T00:00:00Z",4],["2000-01-01T00:00:30Z",5],["2000-01-01T00:01:00Z",9]]}]}]}`, }, &Query{ name: "min order by time without time specified group by 15s", params: url.Values{"db": []string{"db0"}}, command: `SELECT min(value) FROM intmany where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:14Z' group by time(15s)`, exp: `{"results":[{"series":[{"name":"intmany","columns":["time","min"],"values":[["2000-01-01T00:00:00Z",2],["2000-01-01T00:00:15Z",4],["2000-01-01T00:00:30Z",4],["2000-01-01T00:00:45Z",5],["2000-01-01T00:01:00Z",7]]}]}]}`, }, &Query{ name: "min order by time with time specified group by 15s", params: url.Values{"db": []string{"db0"}}, command: `SELECT time, min(value) FROM intmany where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:14Z' group by time(15s)`, exp: `{"results":[{"series":[{"name":"intmany","columns":["time","min"],"values":[["2000-01-01T00:00:00Z",2],["2000-01-01T00:00:15Z",4],["2000-01-01T00:00:30Z",4],["2000-01-01T00:00:45Z",5],["2000-01-01T00:01:00Z",7]]}]}]}`, }, &Query{ name: "first order by time without time specified group by 15s", params: url.Values{"db": []string{"db0"}}, command: `SELECT first(value) FROM intmany where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:14Z' group by time(15s)`, exp: `{"results":[{"series":[{"name":"intmany","columns":["time","first"],"values":[["2000-01-01T00:00:00Z",2],["2000-01-01T00:00:15Z",4],["2000-01-01T00:00:30Z",4],["2000-01-01T00:00:45Z",5],["2000-01-01T00:01:00Z",7]]}]}]}`, }, &Query{ name: "first order by time with time specified group by 15s", params: url.Values{"db": []string{"db0"}}, command: `SELECT time, first(value) FROM intmany where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:14Z' group by time(15s)`, exp: `{"results":[{"series":[{"name":"intmany","columns":["time","first"],"values":[["2000-01-01T00:00:00Z",2],["2000-01-01T00:00:15Z",4],["2000-01-01T00:00:30Z",4],["2000-01-01T00:00:45Z",5],["2000-01-01T00:01:00Z",7]]}]}]}`, }, &Query{ name: "last order by time without time specified group by 15s", params: url.Values{"db": []string{"db0"}}, command: `SELECT last(value) FROM intmany where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:14Z' group by time(15s)`, exp: `{"results":[{"series":[{"name":"intmany","columns":["time","last"],"values":[["2000-01-01T00:00:00Z",4],["2000-01-01T00:00:15Z",4],["2000-01-01T00:00:30Z",5],["2000-01-01T00:00:45Z",5],["2000-01-01T00:01:00Z",9]]}]}]}`, }, &Query{ name: "last order by time with time specified group by 15s", params: url.Values{"db": []string{"db0"}}, command: `SELECT time, last(value) FROM intmany where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:14Z' group by time(15s)`, exp: `{"results":[{"series":[{"name":"intmany","columns":["time","last"],"values":[["2000-01-01T00:00:00Z",4],["2000-01-01T00:00:15Z",4],["2000-01-01T00:00:30Z",5],["2000-01-01T00:00:45Z",5],["2000-01-01T00:01:00Z",9]]}]}]}`, }, }...) for i, query := range test.queries { if i == 0 { if err := test.init(s); err != nil { t.Fatalf("test init failed: %s", err) } } if query.skip { t.Logf("SKIP:: %s", query.name) continue } if err := query.Execute(s); err != nil { t.Error(query.Error(err)) } else if !query.success() { t.Error(query.failureMessage()) } } } func TestServer_Query_Aggregates_IntMany_OrderByDesc(t *testing.T) { t.Parallel() s := OpenDefaultServer(NewConfig()) defer s.Close() test := NewTest("db0", "rp0") test.writes = Writes{ &Write{data: strings.Join([]string{ fmt.Sprintf(`intmany,host=server01 value=2.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), fmt.Sprintf(`intmany,host=server02 value=4.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:10Z").UnixNano()), fmt.Sprintf(`intmany,host=server03 value=4.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:20Z").UnixNano()), fmt.Sprintf(`intmany,host=server04 value=4.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:30Z").UnixNano()), fmt.Sprintf(`intmany,host=server05 value=5.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:40Z").UnixNano()), fmt.Sprintf(`intmany,host=server06 value=5.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:50Z").UnixNano()), fmt.Sprintf(`intmany,host=server07 value=7.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:01:00Z").UnixNano()), fmt.Sprintf(`intmany,host=server08 value=9.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:01:10Z").UnixNano()), }, "\n")}, } test.addQueries([]*Query{ &Query{ name: "aggregate order by time desc", params: url.Values{"db": []string{"db0"}}, command: `SELECT max(value) FROM intmany where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:00Z' group by time(10s) order by time desc`, exp: `{"results":[{"series":[{"name":"intmany","columns":["time","max"],"values":[["2000-01-01T00:01:00Z",7],["2000-01-01T00:00:50Z",5],["2000-01-01T00:00:40Z",5],["2000-01-01T00:00:30Z",4],["2000-01-01T00:00:20Z",4],["2000-01-01T00:00:10Z",4],["2000-01-01T00:00:00Z",2]]}]}]}`, }, }...) for i, query := range test.queries { if i == 0 { if err := test.init(s); err != nil { t.Fatalf("test init failed: %s", err) } } if query.skip { t.Logf("SKIP:: %s", query.name) continue } if err := query.Execute(s); err != nil { t.Error(query.Error(err)) } else if !query.success() { t.Error(query.failureMessage()) } } } func TestServer_Query_Aggregates_IntOverlap(t *testing.T) { t.Parallel() s := OpenDefaultServer(NewConfig()) defer s.Close() test := NewTest("db0", "rp0") test.writes = Writes{ &Write{data: strings.Join([]string{ fmt.Sprintf(`intoverlap,region=us-east value=20 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), fmt.Sprintf(`intoverlap,region=us-east value=30 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:10Z").UnixNano()), fmt.Sprintf(`intoverlap,region=us-west value=100 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), fmt.Sprintf(`intoverlap,region=us-east otherVal=20 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:03Z").UnixNano()), }, "\n")}, } test.addQueries([]*Query{ /* &Query{ name: "aggregation with no interval - int", params: url.Values{"db": []string{"db0"}}, command: `SELECT count(value) FROM intoverlap WHERE time = '2000-01-01 00:00:00'`, exp: `{"results":[{"series":[{"name":"intoverlap","columns":["time","count"],"values":[["2000-01-01T00:00:00Z",2]]}]}]}`, }, &Query{ name: "sum - int", params: url.Values{"db": []string{"db0"}}, command: `SELECT SUM(value) FROM intoverlap WHERE time >= '2000-01-01 00:00:05' AND time <= '2000-01-01T00:00:10Z' GROUP BY time(10s), region`, exp: `{"results":[{"series":[{"name":"intoverlap","tags":{"region":"us-east"},"columns":["time","sum"],"values":[["2000-01-01T00:00:10Z",30]]}]}]}`, }, */&Query{ name: "aggregation with a null field value - int", params: url.Values{"db": []string{"db0"}}, command: `SELECT SUM(value) FROM intoverlap GROUP BY region`, exp: `{"results":[{"series":[{"name":"intoverlap","tags":{"region":"us-east"},"columns":["time","sum"],"values":[["1970-01-01T00:00:00Z",50]]},{"name":"intoverlap","tags":{"region":"us-west"},"columns":["time","sum"],"values":[["1970-01-01T00:00:00Z",100]]}]}]}`, }, &Query{ name: "multiple aggregations - int", params: url.Values{"db": []string{"db0"}}, command: `SELECT SUM(value), MEAN(value) FROM intoverlap GROUP BY region`, exp: `{"results":[{"series":[{"name":"intoverlap","tags":{"region":"us-east"},"columns":["time","sum","mean"],"values":[["1970-01-01T00:00:00Z",50,25]]},{"name":"intoverlap","tags":{"region":"us-west"},"columns":["time","sum","mean"],"values":[["1970-01-01T00:00:00Z",100,100]]}]}]}`, }, &Query{ skip: true, name: "multiple aggregations with division - int FIXME issue #2879", params: url.Values{"db": []string{"db0"}}, command: `SELECT sum(value), mean(value), sum(value) / mean(value) as div FROM intoverlap GROUP BY region`, exp: `{"results":[{"series":[{"name":"intoverlap","tags":{"region":"us-east"},"columns":["time","sum","mean","div"],"values":[["1970-01-01T00:00:00Z",50,25,2]]},{"name":"intoverlap","tags":{"region":"us-west"},"columns":["time","div"],"values":[["1970-01-01T00:00:00Z",100,100,1]]}]}]}`, }, }...) for i, query := range test.queries { if i == 0 { if err := test.init(s); err != nil { t.Fatalf("test init failed: %s", err) } } if query.skip { t.Logf("SKIP:: %s", query.name) continue } if err := query.Execute(s); err != nil { t.Error(query.Error(err)) } else if !query.success() { t.Error(query.failureMessage()) } } } func TestServer_Query_Aggregates_FloatSingle(t *testing.T) { t.Parallel() s := OpenDefaultServer(NewConfig()) defer s.Close() test := NewTest("db0", "rp0") test.writes = Writes{ &Write{data: strings.Join([]string{ fmt.Sprintf(`floatsingle value=45.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), }, "\n")}, } test.addQueries([]*Query{ &Query{ name: "stddev with just one point - float", params: url.Values{"db": []string{"db0"}}, command: `SELECT STDDEV(value) FROM floatsingle`, exp: `{"results":[{"series":[{"name":"floatsingle","columns":["time","stddev"],"values":[["1970-01-01T00:00:00Z",null]]}]}]}`, }, }...) for i, query := range test.queries { if i == 0 { if err := test.init(s); err != nil { t.Fatalf("test init failed: %s", err) } } if query.skip { t.Logf("SKIP:: %s", query.name) continue } if err := query.Execute(s); err != nil { t.Error(query.Error(err)) } else if !query.success() { t.Error(query.failureMessage()) } } } func TestServer_Query_Aggregates_FloatMany(t *testing.T) { t.Parallel() s := OpenDefaultServer(NewConfig()) defer s.Close() test := NewTest("db0", "rp0") test.writes = Writes{ &Write{data: strings.Join([]string{ fmt.Sprintf(`floatmany,host=server01 value=2.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), fmt.Sprintf(`floatmany,host=server02 value=4.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:10Z").UnixNano()), fmt.Sprintf(`floatmany,host=server03 value=4.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:20Z").UnixNano()), fmt.Sprintf(`floatmany,host=server04 value=4.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:30Z").UnixNano()), fmt.Sprintf(`floatmany,host=server05 value=5.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:40Z").UnixNano()), fmt.Sprintf(`floatmany,host=server06 value=5.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:50Z").UnixNano()), fmt.Sprintf(`floatmany,host=server07 value=7.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:01:00Z").UnixNano()), fmt.Sprintf(`floatmany,host=server08 value=9.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:01:10Z").UnixNano()), }, "\n")}, } test.addQueries([]*Query{ &Query{ name: "mean and stddev - float", params: url.Values{"db": []string{"db0"}}, command: `SELECT MEAN(value), STDDEV(value) FROM floatmany WHERE time >= '2000-01-01' AND time < '2000-01-01T00:02:00Z' GROUP BY time(10m)`, exp: `{"results":[{"series":[{"name":"floatmany","columns":["time","mean","stddev"],"values":[["2000-01-01T00:00:00Z",5,2.138089935299395]]}]}]}`, }, &Query{ name: "first - float", params: url.Values{"db": []string{"db0"}}, command: `SELECT FIRST(value) FROM floatmany`, exp: `{"results":[{"series":[{"name":"floatmany","columns":["time","first"],"values":[["2000-01-01T00:00:00Z",2]]}]}]}`, }, &Query{ name: "last - float", params: url.Values{"db": []string{"db0"}}, command: `SELECT LAST(value) FROM floatmany`, exp: `{"results":[{"series":[{"name":"floatmany","columns":["time","last"],"values":[["2000-01-01T00:01:10Z",9]]}]}]}`, }, &Query{ name: "spread - float", params: url.Values{"db": []string{"db0"}}, command: `SELECT SPREAD(value) FROM floatmany`, exp: `{"results":[{"series":[{"name":"floatmany","columns":["time","spread"],"values":[["1970-01-01T00:00:00Z",7]]}]}]}`, }, &Query{ name: "median - even count - float", params: url.Values{"db": []string{"db0"}}, command: `SELECT MEDIAN(value) FROM floatmany`, exp: `{"results":[{"series":[{"name":"floatmany","columns":["time","median"],"values":[["1970-01-01T00:00:00Z",4.5]]}]}]}`, }, &Query{ name: "median - odd count - float", params: url.Values{"db": []string{"db0"}}, command: `SELECT MEDIAN(value) FROM floatmany where time < '2000-01-01T00:01:10Z'`, exp: `{"results":[{"series":[{"name":"floatmany","columns":["time","median"],"values":[["1970-01-01T00:00:00Z",4]]}]}]}`, }, &Query{ name: "distinct as call - float", params: url.Values{"db": []string{"db0"}}, command: `SELECT DISTINCT(value) FROM floatmany`, exp: `{"results":[{"series":[{"name":"floatmany","columns":["time","distinct"],"values":[["1970-01-01T00:00:00Z",2],["1970-01-01T00:00:00Z",4],["1970-01-01T00:00:00Z",5],["1970-01-01T00:00:00Z",7],["1970-01-01T00:00:00Z",9]]}]}]}`, }, &Query{ name: "distinct alt syntax - float", params: url.Values{"db": []string{"db0"}}, command: `SELECT DISTINCT value FROM floatmany`, exp: `{"results":[{"series":[{"name":"floatmany","columns":["time","distinct"],"values":[["1970-01-01T00:00:00Z",2],["1970-01-01T00:00:00Z",4],["1970-01-01T00:00:00Z",5],["1970-01-01T00:00:00Z",7],["1970-01-01T00:00:00Z",9]]}]}]}`, }, &Query{ name: "distinct select tag - float", params: url.Values{"db": []string{"db0"}}, command: `SELECT DISTINCT(host) FROM floatmany`, exp: `{"results":[{"error":"statement must have at least one field in select clause"}]}`, skip: true, // FIXME(benbjohnson): show be allowed, stream tag values }, &Query{ name: "distinct alt select tag - float", params: url.Values{"db": []string{"db0"}}, command: `SELECT DISTINCT host FROM floatmany`, exp: `{"results":[{"error":"statement must have at least one field in select clause"}]}`, skip: true, // FIXME(benbjohnson): show be allowed, stream tag values }, &Query{ name: "count distinct - float", params: url.Values{"db": []string{"db0"}}, command: `SELECT COUNT(DISTINCT value) FROM floatmany`, exp: `{"results":[{"series":[{"name":"floatmany","columns":["time","count"],"values":[["1970-01-01T00:00:00Z",5]]}]}]}`, }, &Query{ name: "count distinct as call - float", params: url.Values{"db": []string{"db0"}}, command: `SELECT COUNT(DISTINCT(value)) FROM floatmany`, exp: `{"results":[{"series":[{"name":"floatmany","columns":["time","count"],"values":[["1970-01-01T00:00:00Z",5]]}]}]}`, }, &Query{ name: "count distinct select tag - float", params: url.Values{"db": []string{"db0"}}, command: `SELECT COUNT(DISTINCT host) FROM floatmany`, exp: `{"results":[{"series":[{"name":"floatmany","columns":["time","count"],"values":[["1970-01-01T00:00:00Z",0]]}]}]}`, skip: true, // FIXME(benbjohnson): stream tag values }, &Query{ name: "count distinct as call select tag - float", params: url.Values{"db": []string{"db0"}}, command: `SELECT COUNT(DISTINCT host) FROM floatmany`, exp: `{"results":[{"series":[{"name":"floatmany","columns":["time","count"],"values":[["1970-01-01T00:00:00Z",0]]}]}]}`, skip: true, // FIXME(benbjohnson): stream tag values }, }...) for i, query := range test.queries { if i == 0 { if err := test.init(s); err != nil { t.Fatalf("test init failed: %s", err) } } if query.skip { t.Logf("SKIP:: %s", query.name) continue } if err := query.Execute(s); err != nil { t.Error(query.Error(err)) } else if !query.success() { t.Error(query.failureMessage()) } } } func TestServer_Query_Aggregates_FloatOverlap(t *testing.T) { t.Parallel() s := OpenDefaultServer(NewConfig()) defer s.Close() test := NewTest("db0", "rp0") test.writes = Writes{ &Write{data: strings.Join([]string{ fmt.Sprintf(`floatoverlap,region=us-east value=20.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), fmt.Sprintf(`floatoverlap,region=us-east value=30.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:10Z").UnixNano()), fmt.Sprintf(`floatoverlap,region=us-west value=100.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), fmt.Sprintf(`floatoverlap,region=us-east otherVal=20.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:03Z").UnixNano()), }, "\n")}, } test.addQueries([]*Query{ &Query{ name: "aggregation with no interval - float", params: url.Values{"db": []string{"db0"}}, command: `SELECT count(value) FROM floatoverlap WHERE time = '2000-01-01 00:00:00'`, exp: `{"results":[{"series":[{"name":"floatoverlap","columns":["time","count"],"values":[["2000-01-01T00:00:00Z",2]]}]}]}`, }, &Query{ name: "sum - float", params: url.Values{"db": []string{"db0"}}, command: `SELECT SUM(value) FROM floatoverlap WHERE time >= '2000-01-01 00:00:05' AND time <= '2000-01-01T00:00:10Z' GROUP BY time(10s), region`, exp: `{"results":[{"series":[{"name":"floatoverlap","tags":{"region":"us-east"},"columns":["time","sum"],"values":[["2000-01-01T00:00:00Z",null],["2000-01-01T00:00:10Z",30]]}]}]}`, }, &Query{ name: "aggregation with a null field value - float", params: url.Values{"db": []string{"db0"}}, command: `SELECT SUM(value) FROM floatoverlap GROUP BY region`, exp: `{"results":[{"series":[{"name":"floatoverlap","tags":{"region":"us-east"},"columns":["time","sum"],"values":[["1970-01-01T00:00:00Z",50]]},{"name":"floatoverlap","tags":{"region":"us-west"},"columns":["time","sum"],"values":[["1970-01-01T00:00:00Z",100]]}]}]}`, }, &Query{ name: "multiple aggregations - float", params: url.Values{"db": []string{"db0"}}, command: `SELECT SUM(value), MEAN(value) FROM floatoverlap GROUP BY region`, exp: `{"results":[{"series":[{"name":"floatoverlap","tags":{"region":"us-east"},"columns":["time","sum","mean"],"values":[["1970-01-01T00:00:00Z",50,25]]},{"name":"floatoverlap","tags":{"region":"us-west"},"columns":["time","sum","mean"],"values":[["1970-01-01T00:00:00Z",100,100]]}]}]}`, }, &Query{ name: "multiple aggregations with division - float", params: url.Values{"db": []string{"db0"}}, command: `SELECT sum(value) / mean(value) as div FROM floatoverlap GROUP BY region`, exp: `{"results":[{"series":[{"name":"floatoverlap","tags":{"region":"us-east"},"columns":["time","div"],"values":[["1970-01-01T00:00:00Z",2]]},{"name":"floatoverlap","tags":{"region":"us-west"},"columns":["time","div"],"values":[["1970-01-01T00:00:00Z",1]]}]}]}`, }, }...) for i, query := range test.queries { if i == 0 { if err := test.init(s); err != nil { t.Fatalf("test init failed: %s", err) } } if query.skip { t.Logf("SKIP:: %s", query.name) continue } if err := query.Execute(s); err != nil { t.Error(query.Error(err)) } else if !query.success() { t.Error(query.failureMessage()) } } } func TestServer_Query_Aggregates_GroupByOffset(t *testing.T) { t.Parallel() s := OpenDefaultServer(NewConfig()) defer s.Close() test := NewTest("db0", "rp0") test.writes = Writes{ &Write{data: strings.Join([]string{ fmt.Sprintf(`offset,region=us-east,host=serverA value=20.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), fmt.Sprintf(`offset,region=us-east,host=serverB value=30.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:10Z").UnixNano()), fmt.Sprintf(`offset,region=us-west,host=serverC value=100.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), }, "\n")}, } test.addQueries([]*Query{ &Query{ name: "group by offset - standard", params: url.Values{"db": []string{"db0"}}, command: `SELECT sum(value) FROM "offset" WHERE time >= '1999-12-31T23:59:55Z' AND time < '2000-01-01T00:00:15Z' GROUP BY time(10s, 5s) FILL(0)`, exp: `{"results":[{"series":[{"name":"offset","columns":["time","sum"],"values":[["1999-12-31T23:59:55Z",120],["2000-01-01T00:00:05Z",30]]}]}]}`, }, &Query{ name: "group by offset - misaligned time", params: url.Values{"db": []string{"db0"}}, command: `SELECT sum(value) FROM "offset" WHERE time >= '2000-01-01T00:00:00Z' AND time < '2000-01-01T00:00:20Z' GROUP BY time(10s, 5s) FILL(0)`, exp: `{"results":[{"series":[{"name":"offset","columns":["time","sum"],"values":[["1999-12-31T23:59:55Z",120],["2000-01-01T00:00:05Z",30],["2000-01-01T00:00:15Z",0]]}]}]}`, }, &Query{ name: "group by offset - negative time", params: url.Values{"db": []string{"db0"}}, command: `SELECT sum(value) FROM "offset" WHERE time >= '1999-12-31T23:59:55Z' AND time < '2000-01-01T00:00:15Z' GROUP BY time(10s, -5s) FILL(0)`, exp: `{"results":[{"series":[{"name":"offset","columns":["time","sum"],"values":[["1999-12-31T23:59:55Z",120],["2000-01-01T00:00:05Z",30]]}]}]}`, }, &Query{ name: "group by offset - modulo", params: url.Values{"db": []string{"db0"}}, command: `SELECT sum(value) FROM "offset" WHERE time >= '1999-12-31T23:59:55Z' AND time < '2000-01-01T00:00:15Z' GROUP BY time(10s, 35s) FILL(0)`, exp: `{"results":[{"series":[{"name":"offset","columns":["time","sum"],"values":[["1999-12-31T23:59:55Z",120],["2000-01-01T00:00:05Z",30]]}]}]}`, }, }...) for i, query := range test.queries { if i == 0 { if err := test.init(s); err != nil { t.Fatalf("test init failed: %s", err) } } if query.skip { t.Logf("SKIP:: %s", query.name) continue } if err := query.Execute(s); err != nil { t.Error(query.Error(err)) } else if !query.success() { t.Error(query.failureMessage()) } } } func TestServer_Query_Aggregates_Load(t *testing.T) { t.Parallel() s := OpenDefaultServer(NewConfig()) defer s.Close() test := NewTest("db0", "rp0") test.writes = Writes{ &Write{data: strings.Join([]string{ fmt.Sprintf(`load,region=us-east,host=serverA value=20.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), fmt.Sprintf(`load,region=us-east,host=serverB value=30.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:10Z").UnixNano()), fmt.Sprintf(`load,region=us-west,host=serverC value=100.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), }, "\n")}, } test.addQueries([]*Query{ &Query{ name: "group by multiple dimensions", params: url.Values{"db": []string{"db0"}}, command: `SELECT sum(value) FROM load GROUP BY region, host`, exp: `{"results":[{"series":[{"name":"load","tags":{"host":"serverA","region":"us-east"},"columns":["time","sum"],"values":[["1970-01-01T00:00:00Z",20]]},{"name":"load","tags":{"host":"serverB","region":"us-east"},"columns":["time","sum"],"values":[["1970-01-01T00:00:00Z",30]]},{"name":"load","tags":{"host":"serverC","region":"us-west"},"columns":["time","sum"],"values":[["1970-01-01T00:00:00Z",100]]}]}]}`, }, &Query{ name: "group by multiple dimensions", params: url.Values{"db": []string{"db0"}}, command: `SELECT sum(value)*2 FROM load`, exp: `{"results":[{"series":[{"name":"load","columns":["time","sum"],"values":[["1970-01-01T00:00:00Z",300]]}]}]}`, }, &Query{ name: "group by multiple dimensions", params: url.Values{"db": []string{"db0"}}, command: `SELECT sum(value)/2 FROM load`, exp: `{"results":[{"series":[{"name":"load","columns":["time","sum"],"values":[["1970-01-01T00:00:00Z",75]]}]}]}`, }, }...) for i, query := range test.queries { if i == 0 { if err := test.init(s); err != nil { t.Fatalf("test init failed: %s", err) } } if query.skip { t.Logf("SKIP:: %s", query.name) continue } if err := query.Execute(s); err != nil { t.Error(query.Error(err)) } else if !query.success() { t.Error(query.failureMessage()) } } } func TestServer_Query_Aggregates_CPU(t *testing.T) { t.Parallel() s := OpenDefaultServer(NewConfig()) defer s.Close() test := NewTest("db0", "rp0") test.writes = Writes{ &Write{data: strings.Join([]string{ fmt.Sprintf(`cpu,region=uk,host=serverZ,service=redis value=20.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:03Z").UnixNano()), fmt.Sprintf(`cpu,region=uk,host=serverZ,service=mysql value=30.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:03Z").UnixNano()), }, "\n")}, } test.addQueries([]*Query{ &Query{ name: "aggregation with WHERE and AND", params: url.Values{"db": []string{"db0"}}, command: `SELECT sum(value) FROM cpu WHERE region='uk' AND host='serverZ'`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","sum"],"values":[["1970-01-01T00:00:00Z",50]]}]}]}`, }, }...) for i, query := range test.queries { if i == 0 { if err := test.init(s); err != nil { t.Fatalf("test init failed: %s", err) } } if query.skip { t.Logf("SKIP:: %s", query.name) continue } if err := query.Execute(s); err != nil { t.Error(query.Error(err)) } else if !query.success() { t.Error(query.failureMessage()) } } } func TestServer_Query_Aggregates_String(t *testing.T) { t.Parallel() s := OpenDefaultServer(NewConfig()) defer s.Close() test := NewTest("db0", "rp0") test.writes = Writes{ &Write{data: strings.Join([]string{ fmt.Sprintf(`stringdata value="first" %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:03Z").UnixNano()), fmt.Sprintf(`stringdata value="last" %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:04Z").UnixNano()), }, "\n")}, } test.addQueries([]*Query{ // strings &Query{ name: "STDDEV on string data - string", params: url.Values{"db": []string{"db0"}}, command: `SELECT STDDEV(value) FROM stringdata`, exp: `{"results":[{"series":[{"name":"stringdata","columns":["time","stddev"],"values":[["1970-01-01T00:00:00Z",null]]}]}]}`, skip: true, // FIXME(benbjohnson): allow non-float var ref expr in cursor iterator }, &Query{ name: "MEAN on string data - string", params: url.Values{"db": []string{"db0"}}, command: `SELECT MEAN(value) FROM stringdata`, exp: `{"results":[{"series":[{"name":"stringdata","columns":["time","mean"],"values":[["1970-01-01T00:00:00Z",0]]}]}]}`, skip: true, // FIXME(benbjohnson): allow non-float var ref expr in cursor iterator }, &Query{ name: "MEDIAN on string data - string", params: url.Values{"db": []string{"db0"}}, command: `SELECT MEDIAN(value) FROM stringdata`, exp: `{"results":[{"series":[{"name":"stringdata","columns":["time","median"],"values":[["1970-01-01T00:00:00Z",null]]}]}]}`, skip: true, // FIXME(benbjohnson): allow non-float var ref expr in cursor iterator }, &Query{ name: "COUNT on string data - string", params: url.Values{"db": []string{"db0"}}, command: `SELECT COUNT(value) FROM stringdata`, exp: `{"results":[{"series":[{"name":"stringdata","columns":["time","count"],"values":[["1970-01-01T00:00:00Z",2]]}]}]}`, skip: true, // FIXME(benbjohnson): allow non-float var ref expr in cursor iterator }, &Query{ name: "FIRST on string data - string", params: url.Values{"db": []string{"db0"}}, command: `SELECT FIRST(value) FROM stringdata`, exp: `{"results":[{"series":[{"name":"stringdata","columns":["time","first"],"values":[["2000-01-01T00:00:03Z","first"]]}]}]}`, skip: true, // FIXME(benbjohnson): allow non-float var ref expr in cursor iterator }, &Query{ name: "LAST on string data - string", params: url.Values{"db": []string{"db0"}}, command: `SELECT LAST(value) FROM stringdata`, exp: `{"results":[{"series":[{"name":"stringdata","columns":["time","last"],"values":[["2000-01-01T00:00:04Z","last"]]}]}]}`, skip: true, // FIXME(benbjohnson): allow non-float var ref expr in cursor iterator }, }...) for i, query := range test.queries { if i == 0 { if err := test.init(s); err != nil { t.Fatalf("test init failed: %s", err) } } if query.skip { t.Logf("SKIP:: %s", query.name) continue } if err := query.Execute(s); err != nil { t.Error(query.Error(err)) } else if !query.success() { t.Error(query.failureMessage()) } } } func TestServer_Query_AggregateSelectors(t *testing.T) { t.Parallel() s := OpenServer(NewConfig()) defer s.Close() if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { t.Fatal(err) } if err := s.MetaClient.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { t.Fatal(err) } writes := []string{ fmt.Sprintf(`network,host=server01,region=west,core=1 rx=10i,tx=20i,core=2i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), fmt.Sprintf(`network,host=server02,region=west,core=2 rx=40i,tx=50i,core=3i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:10Z").UnixNano()), fmt.Sprintf(`network,host=server03,region=east,core=3 rx=40i,tx=55i,core=4i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:20Z").UnixNano()), fmt.Sprintf(`network,host=server04,region=east,core=4 rx=40i,tx=60i,core=1i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:30Z").UnixNano()), fmt.Sprintf(`network,host=server05,region=west,core=1 rx=50i,tx=70i,core=2i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:40Z").UnixNano()), fmt.Sprintf(`network,host=server06,region=east,core=2 rx=50i,tx=40i,core=3i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:50Z").UnixNano()), fmt.Sprintf(`network,host=server07,region=west,core=3 rx=70i,tx=30i,core=4i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:01:00Z").UnixNano()), fmt.Sprintf(`network,host=server08,region=east,core=4 rx=90i,tx=10i,core=1i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:01:10Z").UnixNano()), fmt.Sprintf(`network,host=server09,region=east,core=1 rx=5i,tx=4i,core=2i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:01:20Z").UnixNano()), } test := NewTest("db0", "rp0") test.writes = Writes{ &Write{data: strings.Join(writes, "\n")}, } test.addQueries([]*Query{ &Query{ name: "baseline", params: url.Values{"db": []string{"db0"}}, command: `SELECT * FROM network`, exp: `{"results":[{"series":[{"name":"network","columns":["time","core","core_1","host","region","rx","tx"],"values":[["2000-01-01T00:00:00Z",2,"1","server01","west",10,20],["2000-01-01T00:00:10Z",3,"2","server02","west",40,50],["2000-01-01T00:00:20Z",4,"3","server03","east",40,55],["2000-01-01T00:00:30Z",1,"4","server04","east",40,60],["2000-01-01T00:00:40Z",2,"1","server05","west",50,70],["2000-01-01T00:00:50Z",3,"2","server06","east",50,40],["2000-01-01T00:01:00Z",4,"3","server07","west",70,30],["2000-01-01T00:01:10Z",1,"4","server08","east",90,10],["2000-01-01T00:01:20Z",2,"1","server09","east",5,4]]}]}]}`, }, &Query{ name: "max - baseline 30s", params: url.Values{"db": []string{"db0"}}, command: `SELECT max(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, exp: `{"results":[{"series":[{"name":"network","columns":["time","max"],"values":[["2000-01-01T00:00:00Z",40],["2000-01-01T00:00:30Z",50],["2000-01-01T00:01:00Z",90]]}]}]}`, }, &Query{ name: "max - baseline 30s - epoch ms", params: url.Values{"db": []string{"db0"}, "epoch": []string{"ms"}}, command: `SELECT max(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, exp: fmt.Sprintf( `{"results":[{"series":[{"name":"network","columns":["time","max"],"values":[[%d,40],[%d,50],[%d,90]]}]}]}`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()/int64(time.Millisecond), mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:30Z").UnixNano()/int64(time.Millisecond), mustParseTime(time.RFC3339Nano, "2000-01-01T00:01:00Z").UnixNano()/int64(time.Millisecond), ), }, &Query{ name: "max - tx", params: url.Values{"db": []string{"db0"}}, command: `SELECT tx, max(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, exp: `{"results":[{"series":[{"name":"network","columns":["time","tx","max"],"values":[["2000-01-01T00:00:00Z",50,40],["2000-01-01T00:00:30Z",70,50],["2000-01-01T00:01:00Z",10,90]]}]}]}`, }, &Query{ name: "max - time", params: url.Values{"db": []string{"db0"}}, command: `SELECT time, max(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, exp: `{"results":[{"series":[{"name":"network","columns":["time","max"],"values":[["2000-01-01T00:00:00Z",40],["2000-01-01T00:00:30Z",50],["2000-01-01T00:01:00Z",90]]}]}]}`, }, &Query{ name: "max - time and tx", params: url.Values{"db": []string{"db0"}}, command: `SELECT time, tx, max(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, exp: `{"results":[{"series":[{"name":"network","columns":["time","tx","max"],"values":[["2000-01-01T00:00:00Z",50,40],["2000-01-01T00:00:30Z",70,50],["2000-01-01T00:01:00Z",10,90]]}]}]}`, }, &Query{ name: "min - baseline 30s", params: url.Values{"db": []string{"db0"}}, command: `SELECT min(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, exp: `{"results":[{"series":[{"name":"network","columns":["time","min"],"values":[["2000-01-01T00:00:00Z",10],["2000-01-01T00:00:30Z",40],["2000-01-01T00:01:00Z",5]]}]}]}`, }, &Query{ name: "min - tx", params: url.Values{"db": []string{"db0"}}, command: `SELECT tx, min(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, exp: `{"results":[{"series":[{"name":"network","columns":["time","tx","min"],"values":[["2000-01-01T00:00:00Z",20,10],["2000-01-01T00:00:30Z",60,40],["2000-01-01T00:01:00Z",4,5]]}]}]}`, }, &Query{ name: "min - time", params: url.Values{"db": []string{"db0"}}, command: `SELECT time, min(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, exp: `{"results":[{"series":[{"name":"network","columns":["time","min"],"values":[["2000-01-01T00:00:00Z",10],["2000-01-01T00:00:30Z",40],["2000-01-01T00:01:00Z",5]]}]}]}`, }, &Query{ name: "min - time and tx", params: url.Values{"db": []string{"db0"}}, command: `SELECT time, tx, min(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, exp: `{"results":[{"series":[{"name":"network","columns":["time","tx","min"],"values":[["2000-01-01T00:00:00Z",20,10],["2000-01-01T00:00:30Z",60,40],["2000-01-01T00:01:00Z",4,5]]}]}]}`, }, &Query{ name: "max,min - baseline 30s", params: url.Values{"db": []string{"db0"}}, command: `SELECT max(rx), min(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, exp: `{"results":[{"series":[{"name":"network","columns":["time","max","min"],"values":[["2000-01-01T00:00:00Z",40,10],["2000-01-01T00:00:30Z",50,40],["2000-01-01T00:01:00Z",90,5]]}]}]}`, }, &Query{ name: "first - baseline 30s", params: url.Values{"db": []string{"db0"}}, command: `SELECT first(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, exp: `{"results":[{"series":[{"name":"network","columns":["time","first"],"values":[["2000-01-01T00:00:00Z",10],["2000-01-01T00:00:30Z",40],["2000-01-01T00:01:00Z",70]]}]}]}`, }, &Query{ name: "first - tx", params: url.Values{"db": []string{"db0"}}, command: `SELECT time, tx, first(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, exp: `{"results":[{"series":[{"name":"network","columns":["time","tx","first"],"values":[["2000-01-01T00:00:00Z",20,10],["2000-01-01T00:00:30Z",60,40],["2000-01-01T00:01:00Z",30,70]]}]}]}`, }, &Query{ name: "first - time", params: url.Values{"db": []string{"db0"}}, command: `SELECT time, first(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, exp: `{"results":[{"series":[{"name":"network","columns":["time","first"],"values":[["2000-01-01T00:00:00Z",10],["2000-01-01T00:00:30Z",40],["2000-01-01T00:01:00Z",70]]}]}]}`, }, &Query{ name: "first - time and tx", params: url.Values{"db": []string{"db0"}}, command: `SELECT time, tx, first(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, exp: `{"results":[{"series":[{"name":"network","columns":["time","tx","first"],"values":[["2000-01-01T00:00:00Z",20,10],["2000-01-01T00:00:30Z",60,40],["2000-01-01T00:01:00Z",30,70]]}]}]}`, }, &Query{ name: "last - baseline 30s", params: url.Values{"db": []string{"db0"}}, command: `SELECT last(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, exp: `{"results":[{"series":[{"name":"network","columns":["time","last"],"values":[["2000-01-01T00:00:00Z",40],["2000-01-01T00:00:30Z",50],["2000-01-01T00:01:00Z",5]]}]}]}`, }, &Query{ name: "last - tx", params: url.Values{"db": []string{"db0"}}, command: `SELECT tx, last(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, exp: `{"results":[{"series":[{"name":"network","columns":["time","tx","last"],"values":[["2000-01-01T00:00:00Z",55,40],["2000-01-01T00:00:30Z",40,50],["2000-01-01T00:01:00Z",4,5]]}]}]}`, }, &Query{ name: "last - time", params: url.Values{"db": []string{"db0"}}, command: `SELECT time, last(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, exp: `{"results":[{"series":[{"name":"network","columns":["time","last"],"values":[["2000-01-01T00:00:00Z",40],["2000-01-01T00:00:30Z",50],["2000-01-01T00:01:00Z",5]]}]}]}`, }, &Query{ name: "last - time and tx", params: url.Values{"db": []string{"db0"}}, command: `SELECT time, tx, last(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, exp: `{"results":[{"series":[{"name":"network","columns":["time","tx","last"],"values":[["2000-01-01T00:00:00Z",55,40],["2000-01-01T00:00:30Z",40,50],["2000-01-01T00:01:00Z",4,5]]}]}]}`, }, &Query{ name: "count - baseline 30s", params: url.Values{"db": []string{"db0"}}, command: `SELECT count(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, exp: `{"results":[{"series":[{"name":"network","columns":["time","count"],"values":[["2000-01-01T00:00:00Z",3],["2000-01-01T00:00:30Z",3],["2000-01-01T00:01:00Z",3]]}]}]}`, }, &Query{ name: "count - time", params: url.Values{"db": []string{"db0"}}, command: `SELECT time, count(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, exp: `{"error":"error parsing query: mixing aggregate and non-aggregate queries is not supported"}`, }, &Query{ name: "count - tx", params: url.Values{"db": []string{"db0"}}, command: `SELECT tx, count(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, exp: `{"error":"error parsing query: mixing aggregate and non-aggregate queries is not supported"}`, }, &Query{ name: "distinct - baseline 30s", params: url.Values{"db": []string{"db0"}}, command: `SELECT distinct(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, exp: `{"results":[{"series":[{"name":"network","columns":["time","distinct"],"values":[["2000-01-01T00:00:00Z",10],["2000-01-01T00:00:00Z",40],["2000-01-01T00:00:30Z",40],["2000-01-01T00:00:30Z",50],["2000-01-01T00:01:00Z",70],["2000-01-01T00:01:00Z",90],["2000-01-01T00:01:00Z",5]]}]}]}`, }, &Query{ name: "distinct - time", params: url.Values{"db": []string{"db0"}}, command: `SELECT time, distinct(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, exp: `{"error":"error parsing query: aggregate function distinct() can not be combined with other functions or fields"}`, }, &Query{ name: "distinct - tx", params: url.Values{"db": []string{"db0"}}, command: `SELECT tx, distinct(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, exp: `{"error":"error parsing query: aggregate function distinct() can not be combined with other functions or fields"}`, }, &Query{ name: "mean - baseline 30s", params: url.Values{"db": []string{"db0"}}, command: `SELECT mean(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, exp: `{"results":[{"series":[{"name":"network","columns":["time","mean"],"values":[["2000-01-01T00:00:00Z",30],["2000-01-01T00:00:30Z",46.666666666666664],["2000-01-01T00:01:00Z",55]]}]}]}`, }, &Query{ name: "mean - time", params: url.Values{"db": []string{"db0"}}, command: `SELECT time, mean(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, exp: `{"error":"error parsing query: mixing aggregate and non-aggregate queries is not supported"}`, }, &Query{ name: "mean - tx", params: url.Values{"db": []string{"db0"}}, command: `SELECT tx, mean(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, exp: `{"error":"error parsing query: mixing aggregate and non-aggregate queries is not supported"}`, }, &Query{ name: "median - baseline 30s", params: url.Values{"db": []string{"db0"}}, command: `SELECT median(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, exp: `{"results":[{"series":[{"name":"network","columns":["time","median"],"values":[["2000-01-01T00:00:00Z",40],["2000-01-01T00:00:30Z",50],["2000-01-01T00:01:00Z",70]]}]}]}`, }, &Query{ name: "median - time", params: url.Values{"db": []string{"db0"}}, command: `SELECT time, median(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, exp: `{"error":"error parsing query: mixing aggregate and non-aggregate queries is not supported"}`, }, &Query{ name: "median - tx", params: url.Values{"db": []string{"db0"}}, command: `SELECT tx, median(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, exp: `{"error":"error parsing query: mixing aggregate and non-aggregate queries is not supported"}`, }, &Query{ name: "spread - baseline 30s", params: url.Values{"db": []string{"db0"}}, command: `SELECT spread(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, exp: `{"results":[{"series":[{"name":"network","columns":["time","spread"],"values":[["2000-01-01T00:00:00Z",30],["2000-01-01T00:00:30Z",10],["2000-01-01T00:01:00Z",85]]}]}]}`, }, &Query{ name: "spread - time", params: url.Values{"db": []string{"db0"}}, command: `SELECT time, spread(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, exp: `{"error":"error parsing query: mixing aggregate and non-aggregate queries is not supported"}`, }, &Query{ name: "spread - tx", params: url.Values{"db": []string{"db0"}}, command: `SELECT tx, spread(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, exp: `{"error":"error parsing query: mixing aggregate and non-aggregate queries is not supported"}`, }, &Query{ name: "stddev - baseline 30s", params: url.Values{"db": []string{"db0"}}, command: `SELECT stddev(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, exp: `{"results":[{"series":[{"name":"network","columns":["time","stddev"],"values":[["2000-01-01T00:00:00Z",17.320508075688775],["2000-01-01T00:00:30Z",5.773502691896258],["2000-01-01T00:01:00Z",44.44097208657794]]}]}]}`, }, &Query{ name: "stddev - time", params: url.Values{"db": []string{"db0"}}, command: `SELECT time, stddev(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, exp: `{"error":"error parsing query: mixing aggregate and non-aggregate queries is not supported"}`, }, &Query{ name: "stddev - tx", params: url.Values{"db": []string{"db0"}}, command: `SELECT tx, stddev(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, exp: `{"error":"error parsing query: mixing aggregate and non-aggregate queries is not supported"}`, }, &Query{ name: "percentile - baseline 30s", params: url.Values{"db": []string{"db0"}}, command: `SELECT percentile(rx, 75) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, exp: `{"results":[{"series":[{"name":"network","columns":["time","percentile"],"values":[["2000-01-01T00:00:00Z",40],["2000-01-01T00:00:30Z",50],["2000-01-01T00:01:00Z",70]]}]}]}`, }, &Query{ name: "percentile - time", params: url.Values{"db": []string{"db0"}}, command: `SELECT time, percentile(rx, 75) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, exp: `{"results":[{"series":[{"name":"network","columns":["time","percentile"],"values":[["2000-01-01T00:00:00Z",40],["2000-01-01T00:00:30Z",50],["2000-01-01T00:01:00Z",70]]}]}]}`, }, &Query{ name: "percentile - tx", params: url.Values{"db": []string{"db0"}}, command: `SELECT tx, percentile(rx, 75) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, exp: `{"results":[{"series":[{"name":"network","columns":["time","tx","percentile"],"values":[["2000-01-01T00:00:00Z",50,40],["2000-01-01T00:00:30Z",70,50],["2000-01-01T00:01:00Z",30,70]]}]}]}`, }, }...) for i, query := range test.queries { if i == 0 { if err := test.init(s); err != nil { t.Fatalf("test init failed: %s", err) } } if query.skip { t.Logf("SKIP:: %s", query.name) continue } if err := query.Execute(s); err != nil { t.Error(query.Error(err)) } else if !query.success() { t.Error(query.failureMessage()) } } } func TestServer_Query_TopInt(t *testing.T) { t.Parallel() s := OpenServer(NewConfig()) defer s.Close() if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { t.Fatal(err) } if err := s.MetaClient.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { t.Fatal(err) } writes := []string{ // cpu data with overlapping duplicate values // hour 0 fmt.Sprintf(`cpu,host=server01 value=2.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), fmt.Sprintf(`cpu,host=server02 value=3.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:10Z").UnixNano()), fmt.Sprintf(`cpu,host=server03 value=4.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:20Z").UnixNano()), // hour 1 fmt.Sprintf(`cpu,host=server04 value=5.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T01:00:00Z").UnixNano()), fmt.Sprintf(`cpu,host=server05 value=7.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T01:00:10Z").UnixNano()), fmt.Sprintf(`cpu,host=server06 value=6.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T01:00:20Z").UnixNano()), // hour 2 fmt.Sprintf(`cpu,host=server07 value=7.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T02:00:00Z").UnixNano()), fmt.Sprintf(`cpu,host=server08 value=9.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T02:00:10Z").UnixNano()), // memory data // hour 0 fmt.Sprintf(`memory,host=a,service=redis value=1000i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), fmt.Sprintf(`memory,host=b,service=mysql value=2000i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), fmt.Sprintf(`memory,host=b,service=redis value=1500i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), // hour 1 fmt.Sprintf(`memory,host=a,service=redis value=1001i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T01:00:00Z").UnixNano()), fmt.Sprintf(`memory,host=b,service=mysql value=2001i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T01:00:00Z").UnixNano()), fmt.Sprintf(`memory,host=b,service=redis value=1501i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T01:00:00Z").UnixNano()), // hour 2 fmt.Sprintf(`memory,host=a,service=redis value=1002i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T02:00:00Z").UnixNano()), fmt.Sprintf(`memory,host=b,service=mysql value=2002i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T02:00:00Z").UnixNano()), fmt.Sprintf(`memory,host=b,service=redis value=1502i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T02:00:00Z").UnixNano()), } test := NewTest("db0", "rp0") test.writes = Writes{ &Write{data: strings.Join(writes, "\n")}, } test.addQueries([]*Query{ &Query{ name: "top - cpu", params: url.Values{"db": []string{"db0"}}, command: `SELECT TOP(value, 1) FROM cpu`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","top"],"values":[["2000-01-01T02:00:10Z",9]]}]}]}`, }, &Query{ name: "top - cpu - 2 values", params: url.Values{"db": []string{"db0"}}, command: `SELECT TOP(value, 2) FROM cpu`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","top"],"values":[["2000-01-01T01:00:10Z",7],["2000-01-01T02:00:10Z",9]]}]}]}`, }, &Query{ name: "top - cpu - 3 values - sorts on tie properly", params: url.Values{"db": []string{"db0"}}, command: `SELECT TOP(value, 3) FROM cpu`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","top"],"values":[["2000-01-01T01:00:10Z",7],["2000-01-01T02:00:00Z",7],["2000-01-01T02:00:10Z",9]]}]}]}`, }, &Query{ name: "top - cpu - with tag", params: url.Values{"db": []string{"db0"}}, command: `SELECT TOP(value, host, 2) FROM cpu`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","top","host"],"values":[["2000-01-01T01:00:10Z",7,"server05"],["2000-01-01T02:00:10Z",9,"server08"]]}]}]}`, }, &Query{ name: "top - cpu - 3 values with limit 2", params: url.Values{"db": []string{"db0"}}, command: `SELECT TOP(value, 3) FROM cpu limit 2`, exp: `{"error":"error parsing query: limit (3) in top function can not be larger than the LIMIT (2) in the select statement"}`, }, &Query{ name: "top - cpu - hourly", params: url.Values{"db": []string{"db0"}}, command: `SELECT TOP(value, 1) FROM cpu where time >= '2000-01-01T00:00:00Z' and time <= '2000-01-01T02:00:10Z' group by time(1h)`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","top"],"values":[["2000-01-01T00:00:00Z",4],["2000-01-01T01:00:00Z",7],["2000-01-01T02:00:00Z",9]]}]}]}`, }, &Query{ name: "top - cpu - 2 values hourly", params: url.Values{"db": []string{"db0"}}, command: `SELECT TOP(value, 2) FROM cpu where time >= '2000-01-01T00:00:00Z' and time <= '2000-01-01T02:00:10Z' group by time(1h)`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","top"],"values":[["2000-01-01T00:00:00Z",4],["2000-01-01T00:00:00Z",3],["2000-01-01T01:00:00Z",7],["2000-01-01T01:00:00Z",6],["2000-01-01T02:00:00Z",9],["2000-01-01T02:00:00Z",7]]}]}]}`, }, &Query{ name: "top - cpu - 3 values hourly - validates that a bucket can have less than limit if no values exist in that time bucket", params: url.Values{"db": []string{"db0"}}, command: `SELECT TOP(value, 3) FROM cpu where time >= '2000-01-01T00:00:00Z' and time <= '2000-01-01T02:00:10Z' group by time(1h)`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","top"],"values":[["2000-01-01T00:00:00Z",4],["2000-01-01T00:00:00Z",3],["2000-01-01T00:00:00Z",2],["2000-01-01T01:00:00Z",7],["2000-01-01T01:00:00Z",6],["2000-01-01T01:00:00Z",5],["2000-01-01T02:00:00Z",9],["2000-01-01T02:00:00Z",7]]}]}]}`, }, &Query{ name: "top - memory - 2 values, two tags", params: url.Values{"db": []string{"db0"}}, command: `SELECT TOP(value, 2), host, service FROM memory`, exp: `{"results":[{"series":[{"name":"memory","columns":["time","top","host","service"],"values":[["2000-01-01T01:00:00Z",2001,"b","mysql"],["2000-01-01T02:00:00Z",2002,"b","mysql"]]}]}]}`, }, &Query{ name: "top - memory - host tag with limit 2", params: url.Values{"db": []string{"db0"}}, command: `SELECT TOP(value, host, 2) FROM memory`, exp: `{"results":[{"series":[{"name":"memory","columns":["time","top","host"],"values":[["2000-01-01T02:00:00Z",2002,"b"],["2000-01-01T02:00:00Z",1002,"a"]]}]}]}`, }, &Query{ name: "top - memory - host tag with limit 2, service tag in select", params: url.Values{"db": []string{"db0"}}, command: `SELECT TOP(value, host, 2), service FROM memory`, exp: `{"results":[{"series":[{"name":"memory","columns":["time","top","host","service"],"values":[["2000-01-01T02:00:00Z",2002,"b","mysql"],["2000-01-01T02:00:00Z",1002,"a","redis"]]}]}]}`, }, &Query{ name: "top - memory - service tag with limit 2, host tag in select", params: url.Values{"db": []string{"db0"}}, command: `SELECT TOP(value, service, 2), host FROM memory`, exp: `{"results":[{"series":[{"name":"memory","columns":["time","top","service","host"],"values":[["2000-01-01T02:00:00Z",2002,"mysql","b"],["2000-01-01T02:00:00Z",1502,"redis","b"]]}]}]}`, }, &Query{ name: "top - memory - host and service tag with limit 2", params: url.Values{"db": []string{"db0"}}, command: `SELECT TOP(value, host, service, 2) FROM memory`, exp: `{"results":[{"series":[{"name":"memory","columns":["time","top","host","service"],"values":[["2000-01-01T02:00:00Z",2002,"b","mysql"],["2000-01-01T02:00:00Z",1502,"b","redis"]]}]}]}`, }, &Query{ name: "top - memory - host tag with limit 2 with service tag in select", params: url.Values{"db": []string{"db0"}}, command: `SELECT TOP(value, host, 2), service FROM memory`, exp: `{"results":[{"series":[{"name":"memory","columns":["time","top","host","service"],"values":[["2000-01-01T02:00:00Z",2002,"b","mysql"],["2000-01-01T02:00:00Z",1002,"a","redis"]]}]}]}`, }, &Query{ name: "top - memory - host and service tag with limit 3", params: url.Values{"db": []string{"db0"}}, command: `SELECT TOP(value, host, service, 3) FROM memory`, exp: `{"results":[{"series":[{"name":"memory","columns":["time","top","host","service"],"values":[["2000-01-01T02:00:00Z",2002,"b","mysql"],["2000-01-01T02:00:00Z",1502,"b","redis"],["2000-01-01T02:00:00Z",1002,"a","redis"]]}]}]}`, }, // TODO // - Test that specifiying fields or tags in the function will rewrite the query to expand them to the fields // - Test that a field can be used in the top function // - Test that asking for a field will come back before a tag if they have the same name for a tag and a field // - Test that `select top(value, host, 2)` when there is only one value for `host` it will only bring back one value // - Test that `select top(value, host, 4) from foo where time > now() - 1d and time < now() group by time(1h)` and host is unique in some time buckets that it returns only the unique ones, and not always 4 values }...) for i, query := range test.queries { if i == 0 { if err := test.init(s); err != nil { t.Fatalf("test init failed: %s", err) } } if query.skip { t.Logf("SKIP: %s", query.name) continue } if err := query.Execute(s); err != nil { t.Error(query.Error(err)) } else if !query.success() { t.Error(query.failureMessage()) } } } // Test various aggregates when different series only have data for the same timestamp. func TestServer_Query_Aggregates_IdenticalTime(t *testing.T) { t.Parallel() s := OpenServer(NewConfig()) defer s.Close() if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { t.Fatal(err) } if err := s.MetaClient.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { t.Fatal(err) } writes := []string{ fmt.Sprintf(`series,host=a value=1 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), fmt.Sprintf(`series,host=b value=2 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), fmt.Sprintf(`series,host=c value=3 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), fmt.Sprintf(`series,host=d value=4 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), fmt.Sprintf(`series,host=e value=5 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), fmt.Sprintf(`series,host=f value=5 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), fmt.Sprintf(`series,host=g value=5 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), fmt.Sprintf(`series,host=h value=5 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), fmt.Sprintf(`series,host=i value=5 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), } test := NewTest("db0", "rp0") test.writes = Writes{ &Write{data: strings.Join(writes, "\n")}, } test.addQueries([]*Query{ &Query{ name: "last from multiple series with identical timestamp", params: url.Values{"db": []string{"db0"}}, command: `SELECT last(value) FROM "series"`, exp: `{"results":[{"series":[{"name":"series","columns":["time","last"],"values":[["2000-01-01T00:00:00Z",5]]}]}]}`, repeat: 100, }, &Query{ name: "first from multiple series with identical timestamp", params: url.Values{"db": []string{"db0"}}, command: `SELECT first(value) FROM "series"`, exp: `{"results":[{"series":[{"name":"series","columns":["time","first"],"values":[["2000-01-01T00:00:00Z",5]]}]}]}`, repeat: 100, }, }...) for i, query := range test.queries { if i == 0 { if err := test.init(s); err != nil { t.Fatalf("test init failed: %s", err) } } if query.skip { t.Logf("SKIP:: %s", query.name) continue } for n := 0; n <= query.repeat; n++ { if err := query.Execute(s); err != nil { t.Error(query.Error(err)) } else if !query.success() { t.Error(query.failureMessage()) } } } } // This will test that when using a group by, that it observes the time you asked for // but will only put the values in the bucket that match the time range func TestServer_Query_GroupByTimeCutoffs(t *testing.T) { t.Parallel() s := OpenServer(NewConfig()) defer s.Close() if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { t.Fatal(err) } if err := s.MetaClient.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { t.Fatal(err) } writes := []string{ fmt.Sprintf(`cpu value=1i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), fmt.Sprintf(`cpu value=2i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:01Z").UnixNano()), fmt.Sprintf(`cpu value=3i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:05Z").UnixNano()), fmt.Sprintf(`cpu value=4i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:08Z").UnixNano()), fmt.Sprintf(`cpu value=5i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:09Z").UnixNano()), fmt.Sprintf(`cpu value=6i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:10Z").UnixNano()), } test := NewTest("db0", "rp0") test.writes = Writes{ &Write{data: strings.Join(writes, "\n")}, } test.addQueries([]*Query{ &Query{ name: "sum all time", params: url.Values{"db": []string{"db0"}}, command: `SELECT SUM(value) FROM cpu`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","sum"],"values":[["1970-01-01T00:00:00Z",21]]}]}]}`, }, &Query{ name: "sum all time grouped by time 5s", params: url.Values{"db": []string{"db0"}}, command: `SELECT SUM(value) FROM cpu where time >= '2000-01-01T00:00:00Z' and time <= '2000-01-01T00:00:10Z' group by time(5s)`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","sum"],"values":[["2000-01-01T00:00:00Z",3],["2000-01-01T00:00:05Z",12],["2000-01-01T00:00:10Z",6]]}]}]}`, }, &Query{ name: "sum all time grouped by time 5s missing first point", params: url.Values{"db": []string{"db0"}}, command: `SELECT SUM(value) FROM cpu where time >= '2000-01-01T00:00:01Z' and time <= '2000-01-01T00:00:10Z' group by time(5s)`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","sum"],"values":[["2000-01-01T00:00:00Z",2],["2000-01-01T00:00:05Z",12],["2000-01-01T00:00:10Z",6]]}]}]}`, }, &Query{ name: "sum all time grouped by time 5s missing first points (null for bucket)", params: url.Values{"db": []string{"db0"}}, command: `SELECT SUM(value) FROM cpu where time >= '2000-01-01T00:00:02Z' and time <= '2000-01-01T00:00:10Z' group by time(5s)`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","sum"],"values":[["2000-01-01T00:00:00Z",null],["2000-01-01T00:00:05Z",12],["2000-01-01T00:00:10Z",6]]}]}]}`, }, &Query{ name: "sum all time grouped by time 5s missing last point - 2 time intervals", params: url.Values{"db": []string{"db0"}}, command: `SELECT SUM(value) FROM cpu where time >= '2000-01-01T00:00:00Z' and time <= '2000-01-01T00:00:09Z' group by time(5s)`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","sum"],"values":[["2000-01-01T00:00:00Z",3],["2000-01-01T00:00:05Z",12]]}]}]}`, }, &Query{ name: "sum all time grouped by time 5s missing last 2 points - 2 time intervals", params: url.Values{"db": []string{"db0"}}, command: `SELECT SUM(value) FROM cpu where time >= '2000-01-01T00:00:00Z' and time <= '2000-01-01T00:00:08Z' group by time(5s)`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","sum"],"values":[["2000-01-01T00:00:00Z",3],["2000-01-01T00:00:05Z",7]]}]}]}`, }, }...) for i, query := range test.queries { if i == 0 { if err := test.init(s); err != nil { t.Fatalf("test init failed: %s", err) } } if query.skip { t.Logf("SKIP:: %s", query.name) continue } if err := query.Execute(s); err != nil { t.Error(query.Error(err)) } else if !query.success() { t.Error(query.failureMessage()) } } } func TestServer_Write_Precision(t *testing.T) { t.Parallel() s := OpenServer(NewConfig()) defer s.Close() if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { t.Fatal(err) } if err := s.MetaClient.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { t.Fatal(err) } writes := []struct { write string params url.Values }{ { write: fmt.Sprintf("cpu_n0_precision value=1 %d", mustParseTime(time.RFC3339Nano, "2000-01-01T12:34:56.789012345Z").UnixNano()), }, { write: fmt.Sprintf("cpu_n1_precision value=1.1 %d", mustParseTime(time.RFC3339Nano, "2000-01-01T12:34:56.789012345Z").UnixNano()), params: url.Values{"precision": []string{"n"}}, }, { write: fmt.Sprintf("cpu_u_precision value=100 %d", mustParseTime(time.RFC3339Nano, "2000-01-01T12:34:56.789012345Z").Truncate(time.Microsecond).UnixNano()/int64(time.Microsecond)), params: url.Values{"precision": []string{"u"}}, }, { write: fmt.Sprintf("cpu_ms_precision value=200 %d", mustParseTime(time.RFC3339Nano, "2000-01-01T12:34:56.789012345Z").Truncate(time.Millisecond).UnixNano()/int64(time.Millisecond)), params: url.Values{"precision": []string{"ms"}}, }, { write: fmt.Sprintf("cpu_s_precision value=300 %d", mustParseTime(time.RFC3339Nano, "2000-01-01T12:34:56.789012345Z").Truncate(time.Second).UnixNano()/int64(time.Second)), params: url.Values{"precision": []string{"s"}}, }, { write: fmt.Sprintf("cpu_m_precision value=400 %d", mustParseTime(time.RFC3339Nano, "2000-01-01T12:34:56.789012345Z").Truncate(time.Minute).UnixNano()/int64(time.Minute)), params: url.Values{"precision": []string{"m"}}, }, { write: fmt.Sprintf("cpu_h_precision value=500 %d", mustParseTime(time.RFC3339Nano, "2000-01-01T12:34:56.789012345Z").Truncate(time.Hour).UnixNano()/int64(time.Hour)), params: url.Values{"precision": []string{"h"}}, }, } test := NewTest("db0", "rp0") test.addQueries([]*Query{ &Query{ name: "point with nanosecond precision time - no precision specified on write", command: `SELECT * FROM cpu_n0_precision`, params: url.Values{"db": []string{"db0"}}, exp: `{"results":[{"series":[{"name":"cpu_n0_precision","columns":["time","value"],"values":[["2000-01-01T12:34:56.789012345Z",1]]}]}]}`, }, &Query{ name: "point with nanosecond precision time", command: `SELECT * FROM cpu_n1_precision`, params: url.Values{"db": []string{"db0"}}, exp: `{"results":[{"series":[{"name":"cpu_n1_precision","columns":["time","value"],"values":[["2000-01-01T12:34:56.789012345Z",1.1]]}]}]}`, }, &Query{ name: "point with microsecond precision time", command: `SELECT * FROM cpu_u_precision`, params: url.Values{"db": []string{"db0"}}, exp: `{"results":[{"series":[{"name":"cpu_u_precision","columns":["time","value"],"values":[["2000-01-01T12:34:56.789012Z",100]]}]}]}`, }, &Query{ name: "point with millisecond precision time", command: `SELECT * FROM cpu_ms_precision`, params: url.Values{"db": []string{"db0"}}, exp: `{"results":[{"series":[{"name":"cpu_ms_precision","columns":["time","value"],"values":[["2000-01-01T12:34:56.789Z",200]]}]}]}`, }, &Query{ name: "point with second precision time", command: `SELECT * FROM cpu_s_precision`, params: url.Values{"db": []string{"db0"}}, exp: `{"results":[{"series":[{"name":"cpu_s_precision","columns":["time","value"],"values":[["2000-01-01T12:34:56Z",300]]}]}]}`, }, &Query{ name: "point with minute precision time", command: `SELECT * FROM cpu_m_precision`, params: url.Values{"db": []string{"db0"}}, exp: `{"results":[{"series":[{"name":"cpu_m_precision","columns":["time","value"],"values":[["2000-01-01T12:34:00Z",400]]}]}]}`, }, &Query{ name: "point with hour precision time", command: `SELECT * FROM cpu_h_precision`, params: url.Values{"db": []string{"db0"}}, exp: `{"results":[{"series":[{"name":"cpu_h_precision","columns":["time","value"],"values":[["2000-01-01T12:00:00Z",500]]}]}]}`, }, }...) // we are doing writes that require parameter changes, so we are fighting the test harness a little to make this happen properly for _, w := range writes { test.writes = Writes{ &Write{data: w.write}, } test.params = w.params test.initialized = false if err := test.init(s); err != nil { t.Fatalf("test init failed: %s", err) } } for _, query := range test.queries { if query.skip { t.Logf("SKIP:: %s", query.name) continue } if err := query.Execute(s); err != nil { t.Error(query.Error(err)) } else if !query.success() { t.Error(query.failureMessage()) } } } func TestServer_Query_Wildcards(t *testing.T) { t.Parallel() s := OpenServer(NewConfig()) defer s.Close() if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { t.Fatal(err) } if err := s.MetaClient.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { t.Fatal(err) } writes := []string{ fmt.Sprintf(`wildcard,region=us-east value=10 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), fmt.Sprintf(`wildcard,region=us-east valx=20 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:10Z").UnixNano()), fmt.Sprintf(`wildcard,region=us-east value=30,valx=40 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:20Z").UnixNano()), fmt.Sprintf(`wgroup,region=us-east value=10.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), fmt.Sprintf(`wgroup,region=us-east value=20.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:10Z").UnixNano()), fmt.Sprintf(`wgroup,region=us-west value=30.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:20Z").UnixNano()), fmt.Sprintf(`m1,region=us-east value=10.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), fmt.Sprintf(`m2,host=server01 field=20.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:01Z").UnixNano()), } test := NewTest("db0", "rp0") test.writes = Writes{ &Write{data: strings.Join(writes, "\n")}, } test.addQueries([]*Query{ &Query{ name: "wildcard", params: url.Values{"db": []string{"db0"}}, command: `SELECT * FROM wildcard`, exp: `{"results":[{"series":[{"name":"wildcard","columns":["time","region","value","valx"],"values":[["2000-01-01T00:00:00Z","us-east",10,null],["2000-01-01T00:00:10Z","us-east",null,20],["2000-01-01T00:00:20Z","us-east",30,40]]}]}]}`, }, &Query{ name: "wildcard with group by", params: url.Values{"db": []string{"db0"}}, command: `SELECT * FROM wildcard GROUP BY *`, exp: `{"results":[{"series":[{"name":"wildcard","tags":{"region":"us-east"},"columns":["time","value","valx"],"values":[["2000-01-01T00:00:00Z",10,null],["2000-01-01T00:00:10Z",null,20],["2000-01-01T00:00:20Z",30,40]]}]}]}`, }, &Query{ name: "GROUP BY queries", params: url.Values{"db": []string{"db0"}}, command: `SELECT mean(value) FROM wgroup GROUP BY *`, exp: `{"results":[{"series":[{"name":"wgroup","tags":{"region":"us-east"},"columns":["time","mean"],"values":[["1970-01-01T00:00:00Z",15]]},{"name":"wgroup","tags":{"region":"us-west"},"columns":["time","mean"],"values":[["1970-01-01T00:00:00Z",30]]}]}]}`, }, &Query{ name: "GROUP BY queries with time", params: url.Values{"db": []string{"db0"}}, command: `SELECT mean(value) FROM wgroup WHERE time >= '2000-01-01T00:00:00Z' AND time < '2000-01-01T00:01:00Z' GROUP BY *,TIME(1m)`, exp: `{"results":[{"series":[{"name":"wgroup","tags":{"region":"us-east"},"columns":["time","mean"],"values":[["2000-01-01T00:00:00Z",15]]},{"name":"wgroup","tags":{"region":"us-west"},"columns":["time","mean"],"values":[["2000-01-01T00:00:00Z",30]]}]}]}`, }, &Query{ name: "wildcard and field in select", params: url.Values{"db": []string{"db0"}}, command: `SELECT value, * FROM wildcard`, exp: `{"results":[{"series":[{"name":"wildcard","columns":["time","value","region","value_1","valx"],"values":[["2000-01-01T00:00:00Z",10,"us-east",10,null],["2000-01-01T00:00:10Z",null,"us-east",null,20],["2000-01-01T00:00:20Z",30,"us-east",30,40]]}]}]}`, }, &Query{ name: "field and wildcard in select", params: url.Values{"db": []string{"db0"}}, command: `SELECT value, * FROM wildcard`, exp: `{"results":[{"series":[{"name":"wildcard","columns":["time","value","region","value_1","valx"],"values":[["2000-01-01T00:00:00Z",10,"us-east",10,null],["2000-01-01T00:00:10Z",null,"us-east",null,20],["2000-01-01T00:00:20Z",30,"us-east",30,40]]}]}]}`, }, &Query{ name: "field and wildcard in group by", params: url.Values{"db": []string{"db0"}}, command: `SELECT * FROM wildcard GROUP BY region, *`, exp: `{"results":[{"series":[{"name":"wildcard","tags":{"region":"us-east"},"columns":["time","value","valx"],"values":[["2000-01-01T00:00:00Z",10,null],["2000-01-01T00:00:10Z",null,20],["2000-01-01T00:00:20Z",30,40]]}]}]}`, }, &Query{ name: "wildcard and field in group by", params: url.Values{"db": []string{"db0"}}, command: `SELECT * FROM wildcard GROUP BY *, region`, exp: `{"results":[{"series":[{"name":"wildcard","tags":{"region":"us-east"},"columns":["time","value","valx"],"values":[["2000-01-01T00:00:00Z",10,null],["2000-01-01T00:00:10Z",null,20],["2000-01-01T00:00:20Z",30,40]]}]}]}`, }, &Query{ name: "wildcard with multiple measurements", params: url.Values{"db": []string{"db0"}}, command: `SELECT * FROM m1, m2`, exp: `{"results":[{"series":[{"name":"m1","columns":["time","field","host","region","value"],"values":[["2000-01-01T00:00:00Z",null,null,"us-east",10]]},{"name":"m2","columns":["time","field","host","region","value"],"values":[["2000-01-01T00:00:01Z",20,"server01",null,null]]}]}]}`, }, &Query{ name: "wildcard with multiple measurements via regex", params: url.Values{"db": []string{"db0"}}, command: `SELECT * FROM /^m.*/`, exp: `{"results":[{"series":[{"name":"m1","columns":["time","field","host","region","value"],"values":[["2000-01-01T00:00:00Z",null,null,"us-east",10]]},{"name":"m2","columns":["time","field","host","region","value"],"values":[["2000-01-01T00:00:01Z",20,"server01",null,null]]}]}]}`, }, &Query{ name: "wildcard with multiple measurements via regex and limit", params: url.Values{"db": []string{"db0"}}, command: `SELECT * FROM db0../^m.*/ LIMIT 2`, exp: `{"results":[{"series":[{"name":"m1","columns":["time","field","host","region","value"],"values":[["2000-01-01T00:00:00Z",null,null,"us-east",10]]},{"name":"m2","columns":["time","field","host","region","value"],"values":[["2000-01-01T00:00:01Z",20,"server01",null,null]]}]}]}`, }, }...) for i, query := range test.queries { if i == 0 { if err := test.init(s); err != nil { t.Fatalf("test init failed: %s", err) } } if query.skip { t.Logf("SKIP:: %s", query.name) continue } if err := query.Execute(s); err != nil { t.Error(query.Error(err)) } else if !query.success() { t.Error(query.failureMessage()) } } } func TestServer_Query_WildcardExpansion(t *testing.T) { t.Parallel() s := OpenServer(NewConfig()) defer s.Close() if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { t.Fatal(err) } if err := s.MetaClient.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { t.Fatal(err) } writes := []string{ fmt.Sprintf(`wildcard,region=us-east,host=A value=10,cpu=80 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), fmt.Sprintf(`wildcard,region=us-east,host=B value=20,cpu=90 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:10Z").UnixNano()), fmt.Sprintf(`wildcard,region=us-west,host=B value=30,cpu=70 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:20Z").UnixNano()), fmt.Sprintf(`wildcard,region=us-east,host=A value=40,cpu=60 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:30Z").UnixNano()), fmt.Sprintf(`dupnames,region=us-east,day=1 value=10,day=3i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), fmt.Sprintf(`dupnames,region=us-east,day=2 value=20,day=2i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:10Z").UnixNano()), fmt.Sprintf(`dupnames,region=us-west,day=3 value=30,day=1i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:20Z").UnixNano()), } test := NewTest("db0", "rp0") test.writes = Writes{ &Write{data: strings.Join(writes, "\n")}, } test.addQueries([]*Query{ &Query{ name: "wildcard", params: url.Values{"db": []string{"db0"}}, command: `SELECT * FROM wildcard`, exp: `{"results":[{"series":[{"name":"wildcard","columns":["time","cpu","host","region","value"],"values":[["2000-01-01T00:00:00Z",80,"A","us-east",10],["2000-01-01T00:00:10Z",90,"B","us-east",20],["2000-01-01T00:00:20Z",70,"B","us-west",30],["2000-01-01T00:00:30Z",60,"A","us-east",40]]}]}]}`, }, &Query{ name: "no wildcard in select", params: url.Values{"db": []string{"db0"}}, command: `SELECT cpu, host, region, value FROM wildcard`, exp: `{"results":[{"series":[{"name":"wildcard","columns":["time","cpu","host","region","value"],"values":[["2000-01-01T00:00:00Z",80,"A","us-east",10],["2000-01-01T00:00:10Z",90,"B","us-east",20],["2000-01-01T00:00:20Z",70,"B","us-west",30],["2000-01-01T00:00:30Z",60,"A","us-east",40]]}]}]}`, }, &Query{ name: "no wildcard in select, preserve column order", params: url.Values{"db": []string{"db0"}}, command: `SELECT host, cpu, region, value FROM wildcard`, exp: `{"results":[{"series":[{"name":"wildcard","columns":["time","host","cpu","region","value"],"values":[["2000-01-01T00:00:00Z","A",80,"us-east",10],["2000-01-01T00:00:10Z","B",90,"us-east",20],["2000-01-01T00:00:20Z","B",70,"us-west",30],["2000-01-01T00:00:30Z","A",60,"us-east",40]]}]}]}`, }, &Query{ name: "no wildcard with alias", params: url.Values{"db": []string{"db0"}}, command: `SELECT cpu as c, host as h, region, value FROM wildcard`, exp: `{"results":[{"series":[{"name":"wildcard","columns":["time","c","h","region","value"],"values":[["2000-01-01T00:00:00Z",80,"A","us-east",10],["2000-01-01T00:00:10Z",90,"B","us-east",20],["2000-01-01T00:00:20Z",70,"B","us-west",30],["2000-01-01T00:00:30Z",60,"A","us-east",40]]}]}]}`, }, &Query{ name: "duplicate tag and field key", command: `SELECT * FROM dupnames`, params: url.Values{"db": []string{"db0"}}, exp: `{"results":[{"series":[{"name":"dupnames","columns":["time","day","day_1","region","value"],"values":[["2000-01-01T00:00:00Z",3,"1","us-east",10],["2000-01-01T00:00:10Z",2,"2","us-east",20],["2000-01-01T00:00:20Z",1,"3","us-west",30]]}]}]}`, }, }...) for i, query := range test.queries { if i == 0 { if err := test.init(s); err != nil { t.Fatalf("test init failed: %s", err) } } if query.skip { t.Logf("SKIP:: %s", query.name) continue } if err := query.Execute(s); err != nil { t.Error(query.Error(err)) } else if !query.success() { t.Error(query.failureMessage()) } } } func TestServer_Query_AcrossShardsAndFields(t *testing.T) { t.Parallel() s := OpenServer(NewConfig()) defer s.Close() if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { t.Fatal(err) } if err := s.MetaClient.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { t.Fatal(err) } writes := []string{ fmt.Sprintf(`cpu load=100 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), fmt.Sprintf(`cpu load=200 %d`, mustParseTime(time.RFC3339Nano, "2010-01-01T00:00:00Z").UnixNano()), fmt.Sprintf(`cpu core=4 %d`, mustParseTime(time.RFC3339Nano, "2015-01-01T00:00:00Z").UnixNano()), } test := NewTest("db0", "rp0") test.writes = Writes{ &Write{data: strings.Join(writes, "\n")}, } test.addQueries([]*Query{ &Query{ name: "two results for cpu", params: url.Values{"db": []string{"db0"}}, command: `SELECT load FROM cpu`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","load"],"values":[["2000-01-01T00:00:00Z",100],["2010-01-01T00:00:00Z",200]]}]}]}`, }, &Query{ name: "two results for cpu, multi-select", params: url.Values{"db": []string{"db0"}}, command: `SELECT core,load FROM cpu`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","core","load"],"values":[["2000-01-01T00:00:00Z",null,100],["2010-01-01T00:00:00Z",null,200],["2015-01-01T00:00:00Z",4,null]]}]}]}`, }, &Query{ name: "two results for cpu, wildcard select", params: url.Values{"db": []string{"db0"}}, command: `SELECT * FROM cpu`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","core","load"],"values":[["2000-01-01T00:00:00Z",null,100],["2010-01-01T00:00:00Z",null,200],["2015-01-01T00:00:00Z",4,null]]}]}]}`, }, &Query{ name: "one result for core", params: url.Values{"db": []string{"db0"}}, command: `SELECT core FROM cpu`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","core"],"values":[["2015-01-01T00:00:00Z",4]]}]}]}`, }, &Query{ name: "empty result set from non-existent field", params: url.Values{"db": []string{"db0"}}, command: `SELECT foo FROM cpu`, exp: `{"results":[{}]}`, }, }...) for i, query := range test.queries { if i == 0 { if err := test.init(s); err != nil { t.Fatalf("test init failed: %s", err) } } if query.skip { t.Logf("SKIP:: %s", query.name) continue } if err := query.Execute(s); err != nil { t.Error(query.Error(err)) } else if !query.success() { t.Error(query.failureMessage()) } } } func TestServer_Query_Where_Fields(t *testing.T) { t.Parallel() s := OpenServer(NewConfig()) defer s.Close() if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { t.Fatal(err) } if err := s.MetaClient.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { t.Fatal(err) } writes := []string{ fmt.Sprintf(`cpu alert_id="alert",tenant_id="tenant",_cust="johnson brothers" %d`, mustParseTime(time.RFC3339Nano, "2015-02-28T01:03:36.703820946Z").UnixNano()), fmt.Sprintf(`cpu alert_id="alert",tenant_id="tenant",_cust="johnson brothers" %d`, mustParseTime(time.RFC3339Nano, "2015-02-28T01:03:36.703820946Z").UnixNano()), fmt.Sprintf(`cpu load=100.0,core=4 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:02Z").UnixNano()), fmt.Sprintf(`cpu load=80.0,core=2 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:01:02Z").UnixNano()), fmt.Sprintf(`clicks local=true %d`, mustParseTime(time.RFC3339Nano, "2014-11-10T23:00:01Z").UnixNano()), fmt.Sprintf(`clicks local=false %d`, mustParseTime(time.RFC3339Nano, "2014-11-10T23:00:02Z").UnixNano()), } test := NewTest("db0", "rp0") test.writes = Writes{ &Write{data: strings.Join(writes, "\n")}, } test.addQueries([]*Query{ // non type specific &Query{ name: "missing measurement with group by", params: url.Values{"db": []string{"db0"}}, command: `SELECT load from missing group by *`, exp: `{"results":[{}]}`, }, // string &Query{ name: "single string field", params: url.Values{"db": []string{"db0"}}, command: `SELECT alert_id FROM cpu WHERE alert_id='alert'`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","alert_id"],"values":[["2015-02-28T01:03:36.703820946Z","alert"]]}]}]}`, }, &Query{ name: "string AND query, all fields in SELECT", params: url.Values{"db": []string{"db0"}}, command: `SELECT alert_id,tenant_id,_cust FROM cpu WHERE alert_id='alert' AND tenant_id='tenant'`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","alert_id","tenant_id","_cust"],"values":[["2015-02-28T01:03:36.703820946Z","alert","tenant","johnson brothers"]]}]}]}`, }, &Query{ name: "string AND query, all fields in SELECT, one in parenthesis", params: url.Values{"db": []string{"db0"}}, command: `SELECT alert_id,tenant_id FROM cpu WHERE alert_id='alert' AND (tenant_id='tenant')`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","alert_id","tenant_id"],"values":[["2015-02-28T01:03:36.703820946Z","alert","tenant"]]}]}]}`, }, &Query{ name: "string underscored field", params: url.Values{"db": []string{"db0"}}, command: `SELECT alert_id FROM cpu WHERE _cust='johnson brothers'`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","alert_id"],"values":[["2015-02-28T01:03:36.703820946Z","alert"]]}]}]}`, }, &Query{ name: "string no match", params: url.Values{"db": []string{"db0"}}, command: `SELECT alert_id FROM cpu WHERE _cust='acme'`, exp: `{"results":[{}]}`, }, // float64 &Query{ name: "float64 GT no match", params: url.Values{"db": []string{"db0"}}, command: `select load from cpu where load > 100`, exp: `{"results":[{}]}`, }, &Query{ name: "float64 GTE match one", params: url.Values{"db": []string{"db0"}}, command: `select load from cpu where load >= 100`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","load"],"values":[["2009-11-10T23:00:02Z",100]]}]}]}`, }, &Query{ name: "float64 EQ match upper bound", params: url.Values{"db": []string{"db0"}}, command: `select load from cpu where load = 100`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","load"],"values":[["2009-11-10T23:00:02Z",100]]}]}]}`, }, &Query{ name: "float64 LTE match two", params: url.Values{"db": []string{"db0"}}, command: `select load from cpu where load <= 100`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","load"],"values":[["2009-11-10T23:00:02Z",100],["2009-11-10T23:01:02Z",80]]}]}]}`, }, &Query{ name: "float64 GT match one", params: url.Values{"db": []string{"db0"}}, command: `select load from cpu where load > 99`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","load"],"values":[["2009-11-10T23:00:02Z",100]]}]}]}`, }, &Query{ name: "float64 EQ no match", params: url.Values{"db": []string{"db0"}}, command: `select load from cpu where load = 99`, exp: `{"results":[{}]}`, }, &Query{ name: "float64 LT match one", params: url.Values{"db": []string{"db0"}}, command: `select load from cpu where load < 99`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","load"],"values":[["2009-11-10T23:01:02Z",80]]}]}]}`, }, &Query{ name: "float64 LT no match", params: url.Values{"db": []string{"db0"}}, command: `select load from cpu where load < 80`, exp: `{"results":[{}]}`, }, &Query{ name: "float64 NE match one", params: url.Values{"db": []string{"db0"}}, command: `select load from cpu where load != 100`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","load"],"values":[["2009-11-10T23:01:02Z",80]]}]}]}`, }, // int64 &Query{ name: "int64 GT no match", params: url.Values{"db": []string{"db0"}}, command: `select core from cpu where core > 4`, exp: `{"results":[{}]}`, }, &Query{ name: "int64 GTE match one", params: url.Values{"db": []string{"db0"}}, command: `select core from cpu where core >= 4`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","core"],"values":[["2009-11-10T23:00:02Z",4]]}]}]}`, }, &Query{ name: "int64 EQ match upper bound", params: url.Values{"db": []string{"db0"}}, command: `select core from cpu where core = 4`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","core"],"values":[["2009-11-10T23:00:02Z",4]]}]}]}`, }, &Query{ name: "int64 LTE match two ", params: url.Values{"db": []string{"db0"}}, command: `select core from cpu where core <= 4`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","core"],"values":[["2009-11-10T23:00:02Z",4],["2009-11-10T23:01:02Z",2]]}]}]}`, }, &Query{ name: "int64 GT match one", params: url.Values{"db": []string{"db0"}}, command: `select core from cpu where core > 3`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","core"],"values":[["2009-11-10T23:00:02Z",4]]}]}]}`, }, &Query{ name: "int64 EQ no match", params: url.Values{"db": []string{"db0"}}, command: `select core from cpu where core = 3`, exp: `{"results":[{}]}`, }, &Query{ name: "int64 LT match one", params: url.Values{"db": []string{"db0"}}, command: `select core from cpu where core < 3`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","core"],"values":[["2009-11-10T23:01:02Z",2]]}]}]}`, }, &Query{ name: "int64 LT no match", params: url.Values{"db": []string{"db0"}}, command: `select core from cpu where core < 2`, exp: `{"results":[{}]}`, }, &Query{ name: "int64 NE match one", params: url.Values{"db": []string{"db0"}}, command: `select core from cpu where core != 4`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","core"],"values":[["2009-11-10T23:01:02Z",2]]}]}]}`, }, // bool &Query{ name: "bool EQ match true", params: url.Values{"db": []string{"db0"}}, command: `select local from clicks where local = true`, exp: `{"results":[{"series":[{"name":"clicks","columns":["time","local"],"values":[["2014-11-10T23:00:01Z",true]]}]}]}`, }, &Query{ name: "bool EQ match false", params: url.Values{"db": []string{"db0"}}, command: `select local from clicks where local = false`, exp: `{"results":[{"series":[{"name":"clicks","columns":["time","local"],"values":[["2014-11-10T23:00:02Z",false]]}]}]}`, }, &Query{ name: "bool NE match one", params: url.Values{"db": []string{"db0"}}, command: `select local from clicks where local != true`, exp: `{"results":[{"series":[{"name":"clicks","columns":["time","local"],"values":[["2014-11-10T23:00:02Z",false]]}]}]}`, }, }...) for i, query := range test.queries { if i == 0 { if err := test.init(s); err != nil { t.Fatalf("test init failed: %s", err) } } if query.skip { t.Logf("SKIP:: %s", query.name) continue } if err := query.Execute(s); err != nil { t.Error(query.Error(err)) } else if !query.success() { t.Error(query.failureMessage()) } } } func TestServer_Query_Where_With_Tags(t *testing.T) { t.Parallel() s := OpenServer(NewConfig()) defer s.Close() if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { t.Fatal(err) } if err := s.MetaClient.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { t.Fatal(err) } writes := []string{ fmt.Sprintf(`where_events,tennant=paul foo="bar" %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:02Z").UnixNano()), fmt.Sprintf(`where_events,tennant=paul foo="baz" %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:03Z").UnixNano()), fmt.Sprintf(`where_events,tennant=paul foo="bat" %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:04Z").UnixNano()), fmt.Sprintf(`where_events,tennant=todd foo="bar" %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:05Z").UnixNano()), fmt.Sprintf(`where_events,tennant=david foo="bap" %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:06Z").UnixNano()), } test := NewTest("db0", "rp0") test.writes = Writes{ &Write{data: strings.Join(writes, "\n")}, } test.addQueries([]*Query{ &Query{ name: "tag field and time", params: url.Values{"db": []string{"db0"}}, command: `select foo from where_events where (tennant = 'paul' OR tennant = 'david') AND time > 1s AND (foo = 'bar' OR foo = 'baz' OR foo = 'bap')`, exp: `{"results":[{"series":[{"name":"where_events","columns":["time","foo"],"values":[["2009-11-10T23:00:02Z","bar"],["2009-11-10T23:00:03Z","baz"],["2009-11-10T23:00:06Z","bap"]]}]}]}`, }, &Query{ name: "tag or field", params: url.Values{"db": []string{"db0"}}, command: `select foo from where_events where tennant = 'paul' OR foo = 'bar'`, exp: `{"results":[{"series":[{"name":"where_events","columns":["time","foo"],"values":[["2009-11-10T23:00:02Z","bar"],["2009-11-10T23:00:03Z","baz"],["2009-11-10T23:00:04Z","bat"],["2009-11-10T23:00:05Z","bar"]]}]}]}`, }, &Query{ name: "non-existant tag and field", params: url.Values{"db": []string{"db0"}}, command: `select foo from where_events where tenant != 'paul' AND foo = 'bar'`, exp: `{"results":[{"series":[{"name":"where_events","columns":["time","foo"],"values":[["2009-11-10T23:00:02Z","bar"],["2009-11-10T23:00:05Z","bar"]]}]}]}`, }, &Query{ name: "non-existant tag or field", params: url.Values{"db": []string{"db0"}}, command: `select foo from where_events where tenant != 'paul' OR foo = 'bar'`, exp: `{"results":[{"series":[{"name":"where_events","columns":["time","foo"],"values":[["2009-11-10T23:00:02Z","bar"],["2009-11-10T23:00:03Z","baz"],["2009-11-10T23:00:04Z","bat"],["2009-11-10T23:00:05Z","bar"],["2009-11-10T23:00:06Z","bap"]]}]}]}`, }, &Query{ name: "where on tag that should be double quoted but isn't", params: url.Values{"db": []string{"db0"}}, command: `show series where data-center = 'foo'`, exp: `{"results":[{"error":"invalid tag comparison operator"}]}`, }, &Query{ name: "where comparing tag and field", params: url.Values{"db": []string{"db0"}}, command: `select foo from where_events where tennant != foo`, exp: `{"results":[{"series":[{"name":"where_events","columns":["time","foo"],"values":[["2009-11-10T23:00:02Z","bar"],["2009-11-10T23:00:03Z","baz"],["2009-11-10T23:00:04Z","bat"],["2009-11-10T23:00:05Z","bar"],["2009-11-10T23:00:06Z","bap"]]}]}]}`, }, &Query{ name: "where comparing tag and tag", params: url.Values{"db": []string{"db0"}}, command: `select foo from where_events where tennant = tennant`, exp: `{"results":[{"series":[{"name":"where_events","columns":["time","foo"],"values":[["2009-11-10T23:00:02Z","bar"],["2009-11-10T23:00:03Z","baz"],["2009-11-10T23:00:04Z","bat"],["2009-11-10T23:00:05Z","bar"],["2009-11-10T23:00:06Z","bap"]]}]}]}`, }, }...) for i, query := range test.queries { if i == 0 { if err := test.init(s); err != nil { t.Fatalf("test init failed: %s", err) } } if query.skip { t.Logf("SKIP:: %s", query.name) continue } if err := query.Execute(s); err != nil { t.Error(query.Error(err)) } else if !query.success() { t.Error(query.failureMessage()) } } } func TestServer_Query_With_EmptyTags(t *testing.T) { t.Parallel() s := OpenServer(NewConfig()) defer s.Close() if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { t.Fatal(err) } if err := s.MetaClient.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { t.Fatal(err) } writes := []string{ fmt.Sprintf(`cpu value=1 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:02Z").UnixNano()), fmt.Sprintf(`cpu,host=server01 value=2 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:03Z").UnixNano()), } test := NewTest("db0", "rp0") test.writes = Writes{ &Write{data: strings.Join(writes, "\n")}, } test.addQueries([]*Query{ &Query{ name: "where empty tag", params: url.Values{"db": []string{"db0"}}, command: `select value from cpu where host = ''`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","value"],"values":[["2009-11-10T23:00:02Z",1]]}]}]}`, }, &Query{ name: "where not empty tag", params: url.Values{"db": []string{"db0"}}, command: `select value from cpu where host != ''`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","value"],"values":[["2009-11-10T23:00:03Z",2]]}]}]}`, }, &Query{ name: "where regex all", params: url.Values{"db": []string{"db0"}}, command: `select value from cpu where host =~ /.*/`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","value"],"values":[["2009-11-10T23:00:02Z",1],["2009-11-10T23:00:03Z",2]]}]}]}`, }, &Query{ name: "where regex none", params: url.Values{"db": []string{"db0"}}, command: `select value from cpu where host !~ /.*/`, exp: `{"results":[{}]}`, }, &Query{ name: "where regex at least one char", params: url.Values{"db": []string{"db0"}}, command: `select value from cpu where host =~ /.+/`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","value"],"values":[["2009-11-10T23:00:03Z",2]]}]}]}`, }, &Query{ name: "where regex not at least one char", params: url.Values{"db": []string{"db0"}}, command: `select value from cpu where host !~ /.+/`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","value"],"values":[["2009-11-10T23:00:02Z",1]]}]}]}`, }, &Query{ name: "group by empty tag", params: url.Values{"db": []string{"db0"}}, command: `select value from cpu group by host`, exp: `{"results":[{"series":[{"name":"cpu","tags":{"host":""},"columns":["time","value"],"values":[["2009-11-10T23:00:02Z",1]]},{"name":"cpu","tags":{"host":"server01"},"columns":["time","value"],"values":[["2009-11-10T23:00:03Z",2]]}]}]}`, }, &Query{ name: "group by missing tag", params: url.Values{"db": []string{"db0"}}, command: `select value from cpu group by region`, exp: `{"results":[{"series":[{"name":"cpu","tags":{"region":""},"columns":["time","value"],"values":[["2009-11-10T23:00:02Z",1],["2009-11-10T23:00:03Z",2]]}]}]}`, }, }...) for i, query := range test.queries { if i == 0 { if err := test.init(s); err != nil { t.Fatalf("test init failed: %s", err) } } if query.skip { t.Logf("SKIP:: %s", query.name) continue } if err := query.Execute(s); err != nil { t.Error(query.Error(err)) } else if !query.success() { t.Error(query.failureMessage()) } } } func TestServer_Query_LimitAndOffset(t *testing.T) { t.Parallel() s := OpenServer(NewConfig()) defer s.Close() if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { t.Fatal(err) } if err := s.MetaClient.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { t.Fatal(err) } writes := []string{ fmt.Sprintf(`limited,tennant=paul foo=2 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:02Z").UnixNano()), fmt.Sprintf(`limited,tennant=paul foo=3 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:03Z").UnixNano()), fmt.Sprintf(`limited,tennant=paul foo=4 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:04Z").UnixNano()), fmt.Sprintf(`limited,tennant=todd foo=5 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:05Z").UnixNano()), } test := NewTest("db0", "rp0") test.writes = Writes{ &Write{data: strings.Join(writes, "\n")}, } test.addQueries([]*Query{ &Query{ name: "limit on points", params: url.Values{"db": []string{"db0"}}, command: `select foo from "limited" LIMIT 2`, exp: `{"results":[{"series":[{"name":"limited","columns":["time","foo"],"values":[["2009-11-10T23:00:02Z",2],["2009-11-10T23:00:03Z",3]]}]}]}`, }, &Query{ name: "limit higher than the number of data points", params: url.Values{"db": []string{"db0"}}, command: `select foo from "limited" LIMIT 20`, exp: `{"results":[{"series":[{"name":"limited","columns":["time","foo"],"values":[["2009-11-10T23:00:02Z",2],["2009-11-10T23:00:03Z",3],["2009-11-10T23:00:04Z",4],["2009-11-10T23:00:05Z",5]]}]}]}`, }, &Query{ name: "limit and offset", params: url.Values{"db": []string{"db0"}}, command: `select foo from "limited" LIMIT 2 OFFSET 1`, exp: `{"results":[{"series":[{"name":"limited","columns":["time","foo"],"values":[["2009-11-10T23:00:03Z",3],["2009-11-10T23:00:04Z",4]]}]}]}`, }, &Query{ name: "limit + offset equal to total number of points", params: url.Values{"db": []string{"db0"}}, command: `select foo from "limited" LIMIT 3 OFFSET 3`, exp: `{"results":[{"series":[{"name":"limited","columns":["time","foo"],"values":[["2009-11-10T23:00:05Z",5]]}]}]}`, }, &Query{ name: "limit - offset higher than number of points", command: `select foo from "limited" LIMIT 2 OFFSET 20`, exp: `{"results":[{}]}`, params: url.Values{"db": []string{"db0"}}, }, &Query{ name: "limit on points with group by time", command: `select mean(foo) from "limited" WHERE time >= '2009-11-10T23:00:02Z' AND time < '2009-11-10T23:00:06Z' GROUP BY TIME(1s) LIMIT 2`, exp: `{"results":[{"series":[{"name":"limited","columns":["time","mean"],"values":[["2009-11-10T23:00:02Z",2],["2009-11-10T23:00:03Z",3]]}]}]}`, params: url.Values{"db": []string{"db0"}}, }, &Query{ name: "limit higher than the number of data points with group by time", command: `select mean(foo) from "limited" WHERE time >= '2009-11-10T23:00:02Z' AND time < '2009-11-10T23:00:06Z' GROUP BY TIME(1s) LIMIT 20`, exp: `{"results":[{"series":[{"name":"limited","columns":["time","mean"],"values":[["2009-11-10T23:00:02Z",2],["2009-11-10T23:00:03Z",3],["2009-11-10T23:00:04Z",4],["2009-11-10T23:00:05Z",5]]}]}]}`, params: url.Values{"db": []string{"db0"}}, }, &Query{ name: "limit and offset with group by time", command: `select mean(foo) from "limited" WHERE time >= '2009-11-10T23:00:02Z' AND time < '2009-11-10T23:00:06Z' GROUP BY TIME(1s) LIMIT 2 OFFSET 1`, exp: `{"results":[{"series":[{"name":"limited","columns":["time","mean"],"values":[["2009-11-10T23:00:03Z",3],["2009-11-10T23:00:04Z",4]]}]}]}`, params: url.Values{"db": []string{"db0"}}, }, &Query{ name: "limit + offset equal to the number of points with group by time", command: `select mean(foo) from "limited" WHERE time >= '2009-11-10T23:00:02Z' AND time < '2009-11-10T23:00:06Z' GROUP BY TIME(1s) LIMIT 3 OFFSET 3`, exp: `{"results":[{"series":[{"name":"limited","columns":["time","mean"],"values":[["2009-11-10T23:00:05Z",5]]}]}]}`, params: url.Values{"db": []string{"db0"}}, }, &Query{ name: "limit - offset higher than number of points with group by time", command: `select mean(foo) from "limited" WHERE time >= '2009-11-10T23:00:02Z' AND time < '2009-11-10T23:00:06Z' GROUP BY TIME(1s) LIMIT 2 OFFSET 20`, exp: `{"results":[{}]}`, params: url.Values{"db": []string{"db0"}}, }, &Query{ name: "limit - group by tennant", command: `select foo from "limited" group by tennant limit 1`, exp: `{"results":[{"series":[{"name":"limited","tags":{"tennant":"paul"},"columns":["time","foo"],"values":[["2009-11-10T23:00:02Z",2]]},{"name":"limited","tags":{"tennant":"todd"},"columns":["time","foo"],"values":[["2009-11-10T23:00:05Z",5]]}]}]}`, params: url.Values{"db": []string{"db0"}}, }, &Query{ name: "limit and offset - group by tennant", command: `select foo from "limited" group by tennant limit 1 offset 1`, exp: `{"results":[{"series":[{"name":"limited","tags":{"tennant":"paul"},"columns":["time","foo"],"values":[["2009-11-10T23:00:03Z",3]]}]}]}`, params: url.Values{"db": []string{"db0"}}, }, }...) for i, query := range test.queries { if i == 0 { if err := test.init(s); err != nil { t.Fatalf("test init failed: %s", err) } } if query.skip { t.Logf("SKIP:: %s", query.name) continue } if err := query.Execute(s); err != nil { t.Error(query.Error(err)) } else if !query.success() { t.Error(query.failureMessage()) } } } func TestServer_Query_Fill(t *testing.T) { t.Parallel() s := OpenServer(NewConfig()) defer s.Close() if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { t.Fatal(err) } if err := s.MetaClient.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { t.Fatal(err) } writes := []string{ fmt.Sprintf(`fills val=3 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:02Z").UnixNano()), fmt.Sprintf(`fills val=5 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:03Z").UnixNano()), fmt.Sprintf(`fills val=4 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:06Z").UnixNano()), fmt.Sprintf(`fills val=10 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:16Z").UnixNano()), } test := NewTest("db0", "rp0") test.writes = Writes{ &Write{data: strings.Join(writes, "\n")}, } test.addQueries([]*Query{ &Query{ name: "fill with value", command: `select mean(val) from fills where time >= '2009-11-10T23:00:00Z' and time < '2009-11-10T23:00:20Z' group by time(5s) FILL(1)`, exp: `{"results":[{"series":[{"name":"fills","columns":["time","mean"],"values":[["2009-11-10T23:00:00Z",4],["2009-11-10T23:00:05Z",4],["2009-11-10T23:00:10Z",1],["2009-11-10T23:00:15Z",10]]}]}]}`, params: url.Values{"db": []string{"db0"}}, }, &Query{ name: "fill with value, WHERE all values match condition", command: `select mean(val) from fills where time >= '2009-11-10T23:00:00Z' and time < '2009-11-10T23:00:20Z' and val < 50 group by time(5s) FILL(1)`, exp: `{"results":[{"series":[{"name":"fills","columns":["time","mean"],"values":[["2009-11-10T23:00:00Z",4],["2009-11-10T23:00:05Z",4],["2009-11-10T23:00:10Z",1],["2009-11-10T23:00:15Z",10]]}]}]}`, params: url.Values{"db": []string{"db0"}}, }, &Query{ name: "fill with value, WHERE no values match condition", command: `select mean(val) from fills where time >= '2009-11-10T23:00:00Z' and time < '2009-11-10T23:00:20Z' and val > 50 group by time(5s) FILL(1)`, exp: `{"results":[{}]}`, params: url.Values{"db": []string{"db0"}}, }, &Query{ name: "fill with previous", command: `select mean(val) from fills where time >= '2009-11-10T23:00:00Z' and time < '2009-11-10T23:00:20Z' group by time(5s) FILL(previous)`, exp: `{"results":[{"series":[{"name":"fills","columns":["time","mean"],"values":[["2009-11-10T23:00:00Z",4],["2009-11-10T23:00:05Z",4],["2009-11-10T23:00:10Z",4],["2009-11-10T23:00:15Z",10]]}]}]}`, params: url.Values{"db": []string{"db0"}}, }, &Query{ name: "fill with none, i.e. clear out nulls", command: `select mean(val) from fills where time >= '2009-11-10T23:00:00Z' and time < '2009-11-10T23:00:20Z' group by time(5s) FILL(none)`, exp: `{"results":[{"series":[{"name":"fills","columns":["time","mean"],"values":[["2009-11-10T23:00:00Z",4],["2009-11-10T23:00:05Z",4],["2009-11-10T23:00:15Z",10]]}]}]}`, params: url.Values{"db": []string{"db0"}}, }, &Query{ name: "fill defaults to null", command: `select mean(val) from fills where time >= '2009-11-10T23:00:00Z' and time < '2009-11-10T23:00:20Z' group by time(5s)`, exp: `{"results":[{"series":[{"name":"fills","columns":["time","mean"],"values":[["2009-11-10T23:00:00Z",4],["2009-11-10T23:00:05Z",4],["2009-11-10T23:00:10Z",null],["2009-11-10T23:00:15Z",10]]}]}]}`, params: url.Values{"db": []string{"db0"}}, }, &Query{ name: "fill defaults to 0 for count", command: `select count(val) from fills where time >= '2009-11-10T23:00:00Z' and time < '2009-11-10T23:00:20Z' group by time(5s)`, exp: `{"results":[{"series":[{"name":"fills","columns":["time","count"],"values":[["2009-11-10T23:00:00Z",2],["2009-11-10T23:00:05Z",1],["2009-11-10T23:00:10Z",0],["2009-11-10T23:00:15Z",1]]}]}]}`, params: url.Values{"db": []string{"db0"}}, }, &Query{ name: "fill none drops 0s for count", command: `select count(val) from fills where time >= '2009-11-10T23:00:00Z' and time < '2009-11-10T23:00:20Z' group by time(5s) fill(none)`, exp: `{"results":[{"series":[{"name":"fills","columns":["time","count"],"values":[["2009-11-10T23:00:00Z",2],["2009-11-10T23:00:05Z",1],["2009-11-10T23:00:15Z",1]]}]}]}`, params: url.Values{"db": []string{"db0"}}, }, &Query{ name: "fill previous overwrites 0s for count", command: `select count(val) from fills where time >= '2009-11-10T23:00:00Z' and time < '2009-11-10T23:00:20Z' group by time(5s) fill(previous)`, exp: `{"results":[{"series":[{"name":"fills","columns":["time","count"],"values":[["2009-11-10T23:00:00Z",2],["2009-11-10T23:00:05Z",1],["2009-11-10T23:00:10Z",1],["2009-11-10T23:00:15Z",1]]}]}]}`, params: url.Values{"db": []string{"db0"}}, }, }...) for i, query := range test.queries { if i == 0 { if err := test.init(s); err != nil { t.Fatalf("test init failed: %s", err) } } if query.skip { t.Logf("SKIP:: %s", query.name) continue } if err := query.Execute(s); err != nil { t.Error(query.Error(err)) } else if !query.success() { t.Error(query.failureMessage()) } } } func TestServer_Query_Chunk(t *testing.T) { t.Parallel() s := OpenServer(NewConfig()) defer s.Close() if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { t.Fatal(err) } if err := s.MetaClient.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { t.Fatal(err) } writes := make([]string, 10001) // 10,000 is the default chunking size, even when no chunking requested. expectedValues := make([]string, len(writes)) for i := 0; i < len(writes); i++ { writes[i] = fmt.Sprintf(`cpu value=%d %d`, i, time.Unix(0, int64(i)).UnixNano()) expectedValues[i] = fmt.Sprintf(`["%s",%d]`, time.Unix(0, int64(i)).UTC().Format(time.RFC3339Nano), i) } expected := fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","columns":["time","value"],"values":[%s]}]}]}`, strings.Join(expectedValues, ",")) test := NewTest("db0", "rp0") test.writes = Writes{ &Write{data: strings.Join(writes, "\n")}, } test.addQueries([]*Query{ &Query{ name: "SELECT all values, no chunking", command: `SELECT value FROM cpu`, exp: expected, params: url.Values{"db": []string{"db0"}}, }, }...) for i, query := range test.queries { if i == 0 { if err := test.init(s); err != nil { t.Fatalf("test init failed: %s", err) } } if query.skip { t.Logf("SKIP:: %s", query.name) continue } if err := query.Execute(s); err != nil { t.Error(query.Error(err)) } else if !query.success() { t.Error(query.failureMessage()) } } } func TestServer_Query_DropAndRecreateMeasurement(t *testing.T) { t.Parallel() s := OpenServer(NewConfig()) defer s.Close() if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { t.Fatal(err) } if err := s.MetaClient.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { t.Fatal(err) } if err := s.CreateDatabaseAndRetentionPolicy("db1", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { t.Fatal(err) } if err := s.MetaClient.SetDefaultRetentionPolicy("db1", "rp0"); err != nil { t.Fatal(err) } writes := strings.Join([]string{ fmt.Sprintf(`cpu,host=serverA,region=uswest val=23.2 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), fmt.Sprintf(`memory,host=serverB,region=uswest val=33.2 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:01Z").UnixNano()), }, "\n") test := NewTest("db0", "rp0") test.writes = Writes{ &Write{data: writes}, &Write{db: "db1", data: writes}, } test.addQueries([]*Query{ &Query{ name: "verify cpu measurement exists in db1", command: `SELECT * FROM cpu`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","host","region","val"],"values":[["2000-01-01T00:00:00Z","serverA","uswest",23.2]]}]}]}`, params: url.Values{"db": []string{"db1"}}, }, &Query{ name: "Drop Measurement, series tags preserved tests", command: `SHOW MEASUREMENTS`, exp: `{"results":[{"series":[{"name":"measurements","columns":["name"],"values":[["cpu"],["memory"]]}]}]}`, params: url.Values{"db": []string{"db0"}}, }, &Query{ name: "show series", command: `SHOW SERIES`, exp: `{"results":[{"series":[{"columns":["key"],"values":[["cpu,host=serverA,region=uswest"],["memory,host=serverB,region=uswest"]]}]}]}`, params: url.Values{"db": []string{"db0"}}, }, &Query{ name: "ensure we can query for memory with both tags", command: `SELECT * FROM memory where region='uswest' and host='serverB' GROUP BY *`, exp: `{"results":[{"series":[{"name":"memory","tags":{"host":"serverB","region":"uswest"},"columns":["time","val"],"values":[["2000-01-01T00:00:01Z",33.2]]}]}]}`, params: url.Values{"db": []string{"db0"}}, }, &Query{ name: "drop measurement cpu", command: `DROP MEASUREMENT cpu`, exp: `{"results":[{}]}`, params: url.Values{"db": []string{"db0"}}, }, &Query{ name: "verify measurements in DB that we deleted a measurement from", command: `SHOW MEASUREMENTS`, exp: `{"results":[{"series":[{"name":"measurements","columns":["name"],"values":[["memory"]]}]}]}`, params: url.Values{"db": []string{"db0"}}, }, &Query{ name: "verify series", command: `SHOW SERIES`, exp: `{"results":[{"series":[{"columns":["key"],"values":[["memory,host=serverB,region=uswest"]]}]}]}`, params: url.Values{"db": []string{"db0"}}, }, &Query{ name: "verify cpu measurement is gone", command: `SELECT * FROM cpu`, exp: `{"results":[{}]}`, params: url.Values{"db": []string{"db0"}}, }, &Query{ name: "verify cpu measurement is NOT gone from other DB", command: `SELECT * FROM cpu`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","host","region","val"],"values":[["2000-01-01T00:00:00Z","serverA","uswest",23.2]]}]}]}`, params: url.Values{"db": []string{"db1"}}, }, &Query{ name: "verify selecting from a tag 'host' still works", command: `SELECT * FROM memory where host='serverB' GROUP BY *`, exp: `{"results":[{"series":[{"name":"memory","tags":{"host":"serverB","region":"uswest"},"columns":["time","val"],"values":[["2000-01-01T00:00:01Z",33.2]]}]}]}`, params: url.Values{"db": []string{"db0"}}, }, &Query{ name: "verify selecting from a tag 'region' still works", command: `SELECT * FROM memory where region='uswest' GROUP BY *`, exp: `{"results":[{"series":[{"name":"memory","tags":{"host":"serverB","region":"uswest"},"columns":["time","val"],"values":[["2000-01-01T00:00:01Z",33.2]]}]}]}`, params: url.Values{"db": []string{"db0"}}, }, &Query{ name: "verify selecting from a tag 'host' and 'region' still works", command: `SELECT * FROM memory where region='uswest' and host='serverB' GROUP BY *`, exp: `{"results":[{"series":[{"name":"memory","tags":{"host":"serverB","region":"uswest"},"columns":["time","val"],"values":[["2000-01-01T00:00:01Z",33.2]]}]}]}`, params: url.Values{"db": []string{"db0"}}, }, &Query{ name: "Drop non-existant measurement", command: `DROP MEASUREMENT doesntexist`, exp: `{"results":[{"error":"measurement not found: doesntexist"}]}`, params: url.Values{"db": []string{"db0"}}, }, }...) // Test that re-inserting the measurement works fine. for i, query := range test.queries { if i == 0 { if err := test.init(s); err != nil { t.Fatalf("test init failed: %s", err) } } if query.skip { t.Logf("SKIP:: %s", query.name) continue } if err := query.Execute(s); err != nil { t.Error(query.Error(err)) } else if !query.success() { t.Error(query.failureMessage()) } } test = NewTest("db0", "rp0") test.writes = Writes{ &Write{data: writes}, } test.addQueries([]*Query{ &Query{ name: "verify measurements after recreation", command: `SHOW MEASUREMENTS`, exp: `{"results":[{"series":[{"name":"measurements","columns":["name"],"values":[["cpu"],["memory"]]}]}]}`, params: url.Values{"db": []string{"db0"}}, }, &Query{ name: "verify cpu measurement has been re-inserted", command: `SELECT * FROM cpu GROUP BY *`, exp: `{"results":[{"series":[{"name":"cpu","tags":{"host":"serverA","region":"uswest"},"columns":["time","val"],"values":[["2000-01-01T00:00:00Z",23.2]]}]}]}`, params: url.Values{"db": []string{"db0"}}, }, }...) for i, query := range test.queries { if i == 0 { if err := test.init(s); err != nil { t.Fatalf("test init failed: %s", err) } } if query.skip { t.Logf("SKIP:: %s", query.name) continue } if err := query.Execute(s); err != nil { t.Error(query.Error(err)) } else if !query.success() { t.Error(query.failureMessage()) } } } func TestServer_Query_ShowQueries_Future(t *testing.T) { t.Parallel() s := OpenServer(NewConfig()) defer s.Close() if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { t.Fatal(err) } if err := s.MetaClient.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { t.Fatal(err) } writes := []string{ fmt.Sprintf(`cpu,host=server01 value=100 %d`, models.MaxNanoTime), } test := NewTest("db0", "rp0") test.writes = Writes{ &Write{data: strings.Join(writes, "\n")}, } test.addQueries([]*Query{ &Query{ name: `show measurements`, command: "SHOW MEASUREMENTS", exp: `{"results":[{"series":[{"name":"measurements","columns":["name"],"values":[["cpu"]]}]}]}`, params: url.Values{"db": []string{"db0"}}, }, &Query{ name: `show series`, command: "SHOW SERIES", exp: `{"results":[{"series":[{"columns":["key"],"values":[["cpu,host=server01"]]}]}]}`, params: url.Values{"db": []string{"db0"}}, }, &Query{ name: `show tag keys`, command: "SHOW TAG KEYS FROM cpu", exp: `{"results":[{"series":[{"name":"cpu","columns":["tagKey"],"values":[["host"]]}]}]}`, params: url.Values{"db": []string{"db0"}}, }, &Query{ name: `show tag values`, command: "SHOW TAG VALUES WITH KEY = \"host\"", exp: `{"results":[{"series":[{"name":"cpu","columns":["key","value"],"values":[["host","server01"]]}]}]}`, params: url.Values{"db": []string{"db0"}}, }, &Query{ name: `show field keys`, command: "SHOW FIELD KEYS", exp: `{"results":[{"series":[{"name":"cpu","columns":["fieldKey","fieldType"],"values":[["value","float"]]}]}]}`, params: url.Values{"db": []string{"db0"}}, }, }...) for i, query := range test.queries { if i == 0 { if err := test.init(s); err != nil { t.Fatalf("test init failed: %s", err) } } if query.skip { t.Logf("SKIP:: %s", query.name) continue } if err := query.Execute(s); err != nil { t.Error(query.Error(err)) } else if !query.success() { t.Error(query.failureMessage()) } } } func TestServer_Query_ShowSeries(t *testing.T) { t.Parallel() s := OpenServer(NewConfig()) defer s.Close() if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { t.Fatal(err) } if err := s.MetaClient.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { t.Fatal(err) } writes := []string{ fmt.Sprintf(`cpu,host=server01 value=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:01Z").UnixNano()), fmt.Sprintf(`cpu,host=server01,region=uswest value=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:02Z").UnixNano()), fmt.Sprintf(`cpu,host=server01,region=useast value=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:03Z").UnixNano()), fmt.Sprintf(`cpu,host=server02,region=useast value=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:04Z").UnixNano()), fmt.Sprintf(`gpu,host=server02,region=useast value=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:05Z").UnixNano()), fmt.Sprintf(`gpu,host=server03,region=caeast value=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:06Z").UnixNano()), fmt.Sprintf(`disk,host=server03,region=caeast value=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:07Z").UnixNano()), } test := NewTest("db0", "rp0") test.writes = Writes{ &Write{data: strings.Join(writes, "\n")}, } test.addQueries([]*Query{ &Query{ name: `show series`, command: "SHOW SERIES", exp: `{"results":[{"series":[{"columns":["key"],"values":[["cpu,host=server01"],["cpu,host=server01,region=useast"],["cpu,host=server01,region=uswest"],["cpu,host=server02,region=useast"],["disk,host=server03,region=caeast"],["gpu,host=server02,region=useast"],["gpu,host=server03,region=caeast"]]}]}]}`, params: url.Values{"db": []string{"db0"}}, }, &Query{ name: `show series from measurement`, command: "SHOW SERIES FROM cpu", exp: `{"results":[{"series":[{"columns":["key"],"values":[["cpu,host=server01"],["cpu,host=server01,region=useast"],["cpu,host=server01,region=uswest"],["cpu,host=server02,region=useast"]]}]}]}`, params: url.Values{"db": []string{"db0"}}, }, &Query{ name: `show series from regular expression`, command: "SHOW SERIES FROM /[cg]pu/", exp: `{"results":[{"series":[{"columns":["key"],"values":[["cpu,host=server01"],["cpu,host=server01,region=useast"],["cpu,host=server01,region=uswest"],["cpu,host=server02,region=useast"],["gpu,host=server02,region=useast"],["gpu,host=server03,region=caeast"]]}]}]}`, params: url.Values{"db": []string{"db0"}}, }, &Query{ name: `show series with where tag`, command: "SHOW SERIES WHERE region = 'uswest'", exp: `{"results":[{"series":[{"columns":["key"],"values":[["cpu,host=server01,region=uswest"]]}]}]}`, params: url.Values{"db": []string{"db0"}}, }, &Query{ name: `show series where tag matches regular expression`, command: "SHOW SERIES WHERE region =~ /ca.*/", exp: `{"results":[{"series":[{"columns":["key"],"values":[["disk,host=server03,region=caeast"],["gpu,host=server03,region=caeast"]]}]}]}`, params: url.Values{"db": []string{"db0"}}, }, &Query{ name: `show series`, command: "SHOW SERIES WHERE host !~ /server0[12]/", exp: `{"results":[{"series":[{"columns":["key"],"values":[["disk,host=server03,region=caeast"],["gpu,host=server03,region=caeast"]]}]}]}`, params: url.Values{"db": []string{"db0"}}, }, &Query{ name: `show series with from and where`, command: "SHOW SERIES FROM cpu WHERE region = 'useast'", exp: `{"results":[{"series":[{"columns":["key"],"values":[["cpu,host=server01,region=useast"],["cpu,host=server02,region=useast"]]}]}]}`, params: url.Values{"db": []string{"db0"}}, }, &Query{ name: `show series with WHERE time should fail`, command: "SHOW SERIES WHERE time > now() - 1h", exp: `{"results":[{"error":"SHOW SERIES doesn't support time in WHERE clause"}]}`, params: url.Values{"db": []string{"db0"}}, }, &Query{ name: `show series with WHERE field should fail`, command: "SHOW SERIES WHERE value > 10.0", exp: `{"results":[{"error":"invalid tag comparison operator"}]}`, params: url.Values{"db": []string{"db0"}}, }, }...) for i, query := range test.queries { if i == 0 { if err := test.init(s); err != nil { t.Fatalf("test init failed: %s", err) } } if query.skip { t.Logf("SKIP:: %s", query.name) continue } if err := query.Execute(s); err != nil { t.Error(query.Error(err)) } else if !query.success() { t.Error(query.failureMessage()) } } } func TestServer_Query_ShowMeasurements(t *testing.T) { t.Parallel() s := OpenServer(NewConfig()) defer s.Close() if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { t.Fatal(err) } if err := s.MetaClient.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { t.Fatal(err) } writes := []string{ fmt.Sprintf(`cpu,host=server01 value=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:00Z").UnixNano()), fmt.Sprintf(`cpu,host=server01,region=uswest value=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:00Z").UnixNano()), fmt.Sprintf(`cpu,host=server01,region=useast value=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:00Z").UnixNano()), fmt.Sprintf(`cpu,host=server02,region=useast value=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:00Z").UnixNano()), fmt.Sprintf(`gpu,host=server02,region=useast value=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:00Z").UnixNano()), fmt.Sprintf(`gpu,host=server02,region=caeast value=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:00Z").UnixNano()), fmt.Sprintf(`other,host=server03,region=caeast value=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:00Z").UnixNano()), } test := NewTest("db0", "rp0") test.writes = Writes{ &Write{data: strings.Join(writes, "\n")}, } test.addQueries([]*Query{ &Query{ name: `show measurements with limit 2`, command: "SHOW MEASUREMENTS LIMIT 2", exp: `{"results":[{"series":[{"name":"measurements","columns":["name"],"values":[["cpu"],["gpu"]]}]}]}`, params: url.Values{"db": []string{"db0"}}, }, &Query{ name: `show measurements using WITH`, command: "SHOW MEASUREMENTS WITH MEASUREMENT = cpu", exp: `{"results":[{"series":[{"name":"measurements","columns":["name"],"values":[["cpu"]]}]}]}`, params: url.Values{"db": []string{"db0"}}, }, &Query{ name: `show measurements using WITH and regex`, command: "SHOW MEASUREMENTS WITH MEASUREMENT =~ /[cg]pu/", exp: `{"results":[{"series":[{"name":"measurements","columns":["name"],"values":[["cpu"],["gpu"]]}]}]}`, params: url.Values{"db": []string{"db0"}}, }, &Query{ name: `show measurements using WITH and regex - no matches`, command: "SHOW MEASUREMENTS WITH MEASUREMENT =~ /.*zzzzz.*/", exp: `{"results":[{}]}`, params: url.Values{"db": []string{"db0"}}, }, &Query{ name: `show measurements where tag matches regular expression`, command: "SHOW MEASUREMENTS WHERE region =~ /ca.*/", exp: `{"results":[{"series":[{"name":"measurements","columns":["name"],"values":[["gpu"],["other"]]}]}]}`, params: url.Values{"db": []string{"db0"}}, }, &Query{ name: `show measurements where tag does not match a regular expression`, command: "SHOW MEASUREMENTS WHERE region !~ /ca.*/", exp: `{"results":[{"series":[{"name":"measurements","columns":["name"],"values":[["cpu"]]}]}]}`, params: url.Values{"db": []string{"db0"}}, }, &Query{ name: `show measurements with time in WHERE clauses errors`, command: `SHOW MEASUREMENTS WHERE time > now() - 1h`, exp: `{"results":[{"error":"SHOW MEASUREMENTS doesn't support time in WHERE clause"}]}`, params: url.Values{"db": []string{"db0"}}, }, }...) for i, query := range test.queries { if i == 0 { if err := test.init(s); err != nil { t.Fatalf("test init failed: %s", err) } } if query.skip { t.Logf("SKIP:: %s", query.name) continue } if err := query.Execute(s); err != nil { t.Error(query.Error(err)) } else if !query.success() { t.Error(query.failureMessage()) } } } func TestServer_Query_ShowTagKeys(t *testing.T) { t.Parallel() s := OpenServer(NewConfig()) defer s.Close() if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { t.Fatal(err) } if err := s.MetaClient.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { t.Fatal(err) } writes := []string{ fmt.Sprintf(`cpu,host=server01 value=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:00Z").UnixNano()), fmt.Sprintf(`cpu,host=server01,region=uswest value=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:00Z").UnixNano()), fmt.Sprintf(`cpu,host=server01,region=useast value=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:00Z").UnixNano()), fmt.Sprintf(`cpu,host=server02,region=useast value=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:00Z").UnixNano()), fmt.Sprintf(`gpu,host=server02,region=useast value=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:00Z").UnixNano()), fmt.Sprintf(`gpu,host=server03,region=caeast value=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:00Z").UnixNano()), fmt.Sprintf(`disk,host=server03,region=caeast value=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:00Z").UnixNano()), } test := NewTest("db0", "rp0") test.writes = Writes{ &Write{data: strings.Join(writes, "\n")}, } test.addQueries([]*Query{ &Query{ name: `show tag keys`, command: "SHOW TAG KEYS", exp: `{"results":[{"series":[{"name":"cpu","columns":["tagKey"],"values":[["host"],["region"]]},{"name":"disk","columns":["tagKey"],"values":[["host"],["region"]]},{"name":"gpu","columns":["tagKey"],"values":[["host"],["region"]]}]}]}`, params: url.Values{"db": []string{"db0"}}, }, &Query{ name: "show tag keys from", command: "SHOW TAG KEYS FROM cpu", exp: `{"results":[{"series":[{"name":"cpu","columns":["tagKey"],"values":[["host"],["region"]]}]}]}`, params: url.Values{"db": []string{"db0"}}, }, &Query{ name: "show tag keys from regex", command: "SHOW TAG KEYS FROM /[cg]pu/", exp: `{"results":[{"series":[{"name":"cpu","columns":["tagKey"],"values":[["host"],["region"]]},{"name":"gpu","columns":["tagKey"],"values":[["host"],["region"]]}]}]}`, params: url.Values{"db": []string{"db0"}}, }, &Query{ name: "show tag keys measurement not found", command: "SHOW TAG KEYS FROM doesntexist", exp: `{"results":[{}]}`, params: url.Values{"db": []string{"db0"}}, }, &Query{ name: "show tag keys with time in WHERE clause errors", command: "SHOW TAG KEYS FROM cpu WHERE time > now() - 1h", exp: `{"results":[{"error":"SHOW TAG KEYS doesn't support time in WHERE clause"}]}`, params: url.Values{"db": []string{"db0"}}, }, &Query{ name: "show tag values with key", command: "SHOW TAG VALUES WITH KEY = host", exp: `{"results":[{"series":[{"name":"cpu","columns":["key","value"],"values":[["host","server01"],["host","server02"]]},{"name":"disk","columns":["key","value"],"values":[["host","server03"]]},{"name":"gpu","columns":["key","value"],"values":[["host","server02"],["host","server03"]]}]}]}`, params: url.Values{"db": []string{"db0"}}, }, &Query{ name: `show tag values with key and where`, command: `SHOW TAG VALUES FROM cpu WITH KEY = host WHERE region = 'uswest'`, exp: `{"results":[{"series":[{"name":"cpu","columns":["key","value"],"values":[["host","server01"]]}]}]}`, params: url.Values{"db": []string{"db0"}}, }, &Query{ name: `show tag values with key and where matches the regular expression`, command: `SHOW TAG VALUES WITH KEY = host WHERE region =~ /ca.*/`, exp: `{"results":[{"series":[{"name":"disk","columns":["key","value"],"values":[["host","server03"]]},{"name":"gpu","columns":["key","value"],"values":[["host","server03"]]}]}]}`, params: url.Values{"db": []string{"db0"}}, }, &Query{ name: `show tag values with key and where does not match the regular expression`, command: `SHOW TAG VALUES WITH KEY = region WHERE host !~ /server0[12]/`, exp: `{"results":[{"series":[{"name":"disk","columns":["key","value"],"values":[["region","caeast"]]},{"name":"gpu","columns":["key","value"],"values":[["region","caeast"]]}]}]}`, params: url.Values{"db": []string{"db0"}}, }, &Query{ name: `show tag values with key and where partially matches the regular expression`, command: `SHOW TAG VALUES WITH KEY = host WHERE region =~ /us/`, exp: `{"results":[{"series":[{"name":"cpu","columns":["key","value"],"values":[["host","server01"],["host","server02"]]},{"name":"gpu","columns":["key","value"],"values":[["host","server02"]]}]}]}`, params: url.Values{"db": []string{"db0"}}, }, &Query{ name: `show tag values with key and where partially does not match the regular expression`, command: `SHOW TAG VALUES WITH KEY = host WHERE region !~ /us/`, exp: `{"results":[{"series":[{"name":"cpu","columns":["key","value"],"values":[["host","server01"]]},{"name":"disk","columns":["key","value"],"values":[["host","server03"]]},{"name":"gpu","columns":["key","value"],"values":[["host","server03"]]}]}]}`, params: url.Values{"db": []string{"db0"}}, }, &Query{ name: `show tag values with key in and where does not match the regular expression`, command: `SHOW TAG VALUES FROM cpu WITH KEY IN (host, region) WHERE region = 'uswest'`, exp: `{"results":[{"series":[{"name":"cpu","columns":["key","value"],"values":[["host","server01"],["region","uswest"]]}]}]}`, params: url.Values{"db": []string{"db0"}}, }, &Query{ name: `show tag values with key and measurement matches regular expression`, command: `SHOW TAG VALUES FROM /[cg]pu/ WITH KEY = host`, exp: `{"results":[{"series":[{"name":"cpu","columns":["key","value"],"values":[["host","server01"],["host","server02"]]},{"name":"gpu","columns":["key","value"],"values":[["host","server02"],["host","server03"]]}]}]}`, params: url.Values{"db": []string{"db0"}}, }, &Query{ name: `show tag values with key and time in WHERE clause should error`, command: `SHOW TAG VALUES WITH KEY = host WHERE time > now() - 1h`, exp: `{"results":[{"error":"SHOW TAG VALUES doesn't support time in WHERE clause"}]}`, params: url.Values{"db": []string{"db0"}}, }, }...) for i, query := range test.queries { if i == 0 { if err := test.init(s); err != nil { t.Fatalf("test init failed: %s", err) } } if query.skip { t.Logf("SKIP:: %s", query.name) continue } if err := query.Execute(s); err != nil { t.Error(query.Error(err)) } else if !query.success() { t.Error(query.failureMessage()) } } } func TestServer_Query_ShowFieldKeys(t *testing.T) { t.Parallel() s := OpenServer(NewConfig()) defer s.Close() if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { t.Fatal(err) } if err := s.MetaClient.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { t.Fatal(err) } writes := []string{ fmt.Sprintf(`cpu,host=server01 field1=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:00Z").UnixNano()), fmt.Sprintf(`cpu,host=server01,region=uswest field1=200,field2=300,field3=400 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:00Z").UnixNano()), fmt.Sprintf(`cpu,host=server01,region=useast field1=200,field2=300,field3=400 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:00Z").UnixNano()), fmt.Sprintf(`cpu,host=server02,region=useast field1=200,field2=300,field3=400 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:00Z").UnixNano()), fmt.Sprintf(`gpu,host=server01,region=useast field4=200,field5=300 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:00Z").UnixNano()), fmt.Sprintf(`gpu,host=server03,region=caeast field6=200,field7=300 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:00Z").UnixNano()), fmt.Sprintf(`disk,host=server03,region=caeast field8=200,field9=300 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:00Z").UnixNano()), } test := NewTest("db0", "rp0") test.writes = Writes{ &Write{data: strings.Join(writes, "\n")}, } test.addQueries([]*Query{ &Query{ name: `show field keys`, command: `SHOW FIELD KEYS`, exp: `{"results":[{"series":[{"name":"cpu","columns":["fieldKey","fieldType"],"values":[["field1","float"],["field2","float"],["field3","float"]]},{"name":"disk","columns":["fieldKey","fieldType"],"values":[["field8","float"],["field9","float"]]},{"name":"gpu","columns":["fieldKey","fieldType"],"values":[["field4","float"],["field5","float"],["field6","float"],["field7","float"]]}]}]}`, params: url.Values{"db": []string{"db0"}}, }, &Query{ name: `show field keys from measurement`, command: `SHOW FIELD KEYS FROM cpu`, exp: `{"results":[{"series":[{"name":"cpu","columns":["fieldKey","fieldType"],"values":[["field1","float"],["field2","float"],["field3","float"]]}]}]}`, params: url.Values{"db": []string{"db0"}}, }, &Query{ name: `show field keys measurement with regex`, command: `SHOW FIELD KEYS FROM /[cg]pu/`, exp: `{"results":[{"series":[{"name":"cpu","columns":["fieldKey","fieldType"],"values":[["field1","float"],["field2","float"],["field3","float"]]},{"name":"gpu","columns":["fieldKey","fieldType"],"values":[["field4","float"],["field5","float"],["field6","float"],["field7","float"]]}]}]}`, params: url.Values{"db": []string{"db0"}}, }, }...) for i, query := range test.queries { if i == 0 { if err := test.init(s); err != nil { t.Fatalf("test init failed: %s", err) } } if query.skip { t.Logf("SKIP:: %s", query.name) continue } if err := query.Execute(s); err != nil { t.Error(query.Error(err)) } else if !query.success() { t.Error(query.failureMessage()) } } } func TestServer_ContinuousQuery(t *testing.T) { t.Parallel() s := OpenServer(NewConfig()) defer s.Close() if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { t.Fatal(err) } if err := s.MetaClient.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { t.Fatal(err) } runTest := func(test *Test, t *testing.T) { for i, query := range test.queries { if i == 0 { if err := test.init(s); err != nil { t.Fatalf("test init failed: %s", err) } } if query.skip { t.Logf("SKIP:: %s", query.name) continue } if err := query.Execute(s); err != nil { t.Error(query.Error(err)) } else if !query.success() { t.Error(query.failureMessage()) } } } // Start times of CQ intervals. interval0 := time.Now().Add(-time.Second).Round(time.Second * 5) interval1 := interval0.Add(-time.Second * 5) interval2 := interval0.Add(-time.Second * 10) interval3 := interval0.Add(-time.Second * 15) writes := []string{ // Point too far in the past for CQ to pick up. fmt.Sprintf(`cpu,host=server01,region=uswest value=100 %d`, interval3.Add(time.Second).UnixNano()), // Points two intervals ago. fmt.Sprintf(`cpu,host=server01 value=100 %d`, interval2.Add(time.Second).UnixNano()), fmt.Sprintf(`cpu,host=server01,region=uswest value=100 %d`, interval2.Add(time.Second*2).UnixNano()), fmt.Sprintf(`cpu,host=server01,region=useast value=100 %d`, interval2.Add(time.Second*3).UnixNano()), // Points one interval ago. fmt.Sprintf(`gpu,host=server02,region=useast value=100 %d`, interval1.Add(time.Second).UnixNano()), fmt.Sprintf(`gpu,host=server03,region=caeast value=100 %d`, interval1.Add(time.Second*2).UnixNano()), // Points in the current interval. fmt.Sprintf(`gpu,host=server03,region=caeast value=100 %d`, interval0.Add(time.Second).UnixNano()), fmt.Sprintf(`disk,host=server03,region=caeast value=100 %d`, interval0.Add(time.Second*2).UnixNano()), } test := NewTest("db0", "rp0") test.writes = Writes{ &Write{data: strings.Join(writes, "\n")}, } test.addQueries([]*Query{ &Query{ name: `create another retention policy for CQ to write into`, command: `CREATE RETENTION POLICY rp1 ON db0 DURATION 1h REPLICATION 1`, exp: `{"results":[{}]}`, }, &Query{ name: "create continuous query with backreference", command: `CREATE CONTINUOUS QUERY "cq1" ON db0 BEGIN SELECT count(value) INTO "rp1".:MEASUREMENT FROM /[cg]pu/ GROUP BY time(5s) END`, exp: `{"results":[{}]}`, }, &Query{ name: `create another retention policy for CQ to write into`, command: `CREATE RETENTION POLICY rp2 ON db0 DURATION 1h REPLICATION 1`, exp: `{"results":[{}]}`, }, &Query{ name: "create continuous query with backreference and group by time", command: `CREATE CONTINUOUS QUERY "cq2" ON db0 BEGIN SELECT count(value) INTO "rp2".:MEASUREMENT FROM /[cg]pu/ GROUP BY time(5s), * END`, exp: `{"results":[{}]}`, }, &Query{ name: `show continuous queries`, command: `SHOW CONTINUOUS QUERIES`, exp: `{"results":[{"series":[{"name":"db0","columns":["name","query"],"values":[["cq1","CREATE CONTINUOUS QUERY cq1 ON db0 BEGIN SELECT count(value) INTO db0.rp1.:MEASUREMENT FROM db0.rp0./[cg]pu/ GROUP BY time(5s) END"],["cq2","CREATE CONTINUOUS QUERY cq2 ON db0 BEGIN SELECT count(value) INTO db0.rp2.:MEASUREMENT FROM db0.rp0./[cg]pu/ GROUP BY time(5s), * END"]]}]}]}`, }, }...) // Run first test to create CQs. runTest(&test, t) // Trigger CQs to run. u := fmt.Sprintf("%s/data/process_continuous_queries?time=%d", s.URL(), interval0.UnixNano()) if _, err := s.HTTPPost(u, nil); err != nil { t.Fatal(err) } // Wait for CQs to run. TODO: fix this ugly hack //time.Sleep(time.Second * 5) // Setup tests to check the CQ results. test2 := NewTest("db0", "rp1") test2.addQueries([]*Query{ &Query{ skip: true, name: "check results of cq1", command: `SELECT * FROM "rp1"./[cg]pu/`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","count","host","region","value"],"values":[["` + interval2.UTC().Format(time.RFC3339Nano) + `",3,null,null,null]]},{"name":"gpu","columns":["time","count","host","region","value"],"values":[["` + interval1.UTC().Format(time.RFC3339Nano) + `",2,null,null,null],["` + interval0.UTC().Format(time.RFC3339Nano) + `",1,null,null,null]]}]}]}`, params: url.Values{"db": []string{"db0"}}, }, // TODO: restore this test once this is fixed: https://github.com/influxdata/influxdb/issues/3968 &Query{ skip: true, name: "check results of cq2", command: `SELECT * FROM "rp2"./[cg]pu/`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","count","host","region","value"],"values":[["` + interval2.UTC().Format(time.RFC3339Nano) + `",1,"server01","uswest",null],["` + interval2.UTC().Format(time.RFC3339Nano) + `",1,"server01","",null],["` + interval2.UTC().Format(time.RFC3339Nano) + `",1,"server01","useast",null]]},{"name":"gpu","columns":["time","count","host","region","value"],"values":[["` + interval1.UTC().Format(time.RFC3339Nano) + `",1,"server02","useast",null],["` + interval1.UTC().Format(time.RFC3339Nano) + `",1,"server03","caeast",null],["` + interval0.UTC().Format(time.RFC3339Nano) + `",1,"server03","caeast",null]]}]}]}`, params: url.Values{"db": []string{"db0"}}, }, }...) // Run second test to check CQ results. runTest(&test2, t) } // Tests that a known CQ query with concurrent writes does not deadlock the server func TestServer_ContinuousQuery_Deadlock(t *testing.T) { // Skip until #3517 & #3522 are merged t.Skip("Skipping CQ deadlock test") if testing.Short() { t.Skip("skipping CQ deadlock test") } t.Parallel() s := OpenServer(NewConfig()) defer func() { s.Close() // Nil the server so our deadlock detector goroutine can determine if we completed writes // without timing out s.Server = nil }() if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { t.Fatal(err) } if err := s.MetaClient.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { t.Fatal(err) } test := NewTest("db0", "rp0") test.addQueries([]*Query{ &Query{ name: "create continuous query", command: `CREATE CONTINUOUS QUERY "my.query" ON db0 BEGIN SELECT sum(visits) as visits INTO test_1m FROM myseries GROUP BY time(1m), host END`, exp: `{"results":[{}]}`, }, }...) for i, query := range test.queries { if i == 0 { if err := test.init(s); err != nil { t.Fatalf("test init failed: %s", err) } } if query.skip { t.Logf("SKIP:: %s", query.name) continue } if err := query.Execute(s); err != nil { t.Error(query.Error(err)) } else if !query.success() { t.Error(query.failureMessage()) } } // Deadlock detector. If the deadlock is fixed, this test should complete all the writes in ~2.5s seconds (with artifical delays // added). After 10 seconds, if the server has not been closed then we hit the deadlock bug. iterations := 0 go func(s *Server) { <-time.After(10 * time.Second) // If the server is not nil then the test is still running and stuck. We panic to avoid // having the whole test suite hang indefinitely. if s.Server != nil { panic("possible deadlock. writes did not complete in time") } }(s) for { // After the second write, if the deadlock exists, we'll get a write timeout and // all subsequent writes will timeout if iterations > 5 { break } writes := []string{} for i := 0; i < 1000; i++ { writes = append(writes, fmt.Sprintf(`myseries,host=host-%d visits=1i`, i)) } write := strings.Join(writes, "\n") if _, err := s.Write(test.db, test.rp, write, test.params); err != nil { t.Fatal(err) } iterations += 1 time.Sleep(500 * time.Millisecond) } } func TestServer_Query_EvilIdentifiers(t *testing.T) { t.Parallel() s := OpenServer(NewConfig()) defer s.Close() if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { t.Fatal(err) } if err := s.MetaClient.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { t.Fatal(err) } test := NewTest("db0", "rp0") test.writes = Writes{ &Write{data: fmt.Sprintf("cpu select=1,in-bytes=2 %d", mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano())}, } test.addQueries([]*Query{ &Query{ name: `query evil identifiers`, command: `SELECT "select", "in-bytes" FROM cpu`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","select","in-bytes"],"values":[["2000-01-01T00:00:00Z",1,2]]}]}]}`, params: url.Values{"db": []string{"db0"}}, }, }...) for i, query := range test.queries { if i == 0 { if err := test.init(s); err != nil { t.Fatalf("test init failed: %s", err) } } if query.skip { t.Logf("SKIP:: %s", query.name) continue } if err := query.Execute(s); err != nil { t.Error(query.Error(err)) } else if !query.success() { t.Error(query.failureMessage()) } } } func TestServer_Query_OrderByTime(t *testing.T) { t.Parallel() s := OpenServer(NewConfig()) defer s.Close() if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { t.Fatal(err) } if err := s.MetaClient.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { t.Fatal(err) } writes := []string{ fmt.Sprintf(`cpu,host=server1 value=1 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:01Z").UnixNano()), fmt.Sprintf(`cpu,host=server1 value=2 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:02Z").UnixNano()), fmt.Sprintf(`cpu,host=server1 value=3 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:03Z").UnixNano()), fmt.Sprintf(`power,presence=true value=1 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:01Z").UnixNano()), fmt.Sprintf(`power,presence=true value=2 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:02Z").UnixNano()), fmt.Sprintf(`power,presence=true value=3 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:03Z").UnixNano()), fmt.Sprintf(`power,presence=false value=4 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:04Z").UnixNano()), } test := NewTest("db0", "rp0") test.writes = Writes{ &Write{data: strings.Join(writes, "\n")}, } test.addQueries([]*Query{ &Query{ name: "order on points", params: url.Values{"db": []string{"db0"}}, command: `select value from "cpu" ORDER BY time DESC`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","value"],"values":[["2000-01-01T00:00:03Z",3],["2000-01-01T00:00:02Z",2],["2000-01-01T00:00:01Z",1]]}]}]}`, }, &Query{ name: "order desc with tags", params: url.Values{"db": []string{"db0"}}, command: `select value from "power" ORDER BY time DESC`, exp: `{"results":[{"series":[{"name":"power","columns":["time","value"],"values":[["2000-01-01T00:00:04Z",4],["2000-01-01T00:00:03Z",3],["2000-01-01T00:00:02Z",2],["2000-01-01T00:00:01Z",1]]}]}]}`, }, }...) for i, query := range test.queries { if i == 0 { if err := test.init(s); err != nil { t.Fatalf("test init failed: %s", err) } } if query.skip { t.Logf("SKIP:: %s", query.name) continue } if err := query.Execute(s); err != nil { t.Error(query.Error(err)) } else if !query.success() { t.Error(query.failureMessage()) } } } func TestServer_Query_FieldWithMultiplePeriods(t *testing.T) { t.Parallel() s := OpenServer(NewConfig()) defer s.Close() if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { t.Fatal(err) } if err := s.MetaClient.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { t.Fatal(err) } writes := []string{ fmt.Sprintf(`cpu foo.bar.baz=1 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), } test := NewTest("db0", "rp0") test.writes = Writes{ &Write{data: strings.Join(writes, "\n")}, } test.addQueries([]*Query{ &Query{ name: "baseline", params: url.Values{"db": []string{"db0"}}, command: `select * from cpu`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","foo.bar.baz"],"values":[["2000-01-01T00:00:00Z",1]]}]}]}`, }, &Query{ name: "select field with periods", params: url.Values{"db": []string{"db0"}}, command: `select "foo.bar.baz" from cpu`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","foo.bar.baz"],"values":[["2000-01-01T00:00:00Z",1]]}]}]}`, }, }...) for i, query := range test.queries { if i == 0 { if err := test.init(s); err != nil { t.Fatalf("test init failed: %s", err) } } if query.skip { t.Logf("SKIP:: %s", query.name) continue } if err := query.Execute(s); err != nil { t.Error(query.Error(err)) } else if !query.success() { t.Error(query.failureMessage()) } } } func TestServer_Query_FieldWithMultiplePeriodsMeasurementPrefixMatch(t *testing.T) { t.Parallel() s := OpenServer(NewConfig()) defer s.Close() if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { t.Fatal(err) } if err := s.MetaClient.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { t.Fatal(err) } writes := []string{ fmt.Sprintf(`foo foo.bar.baz=1 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), } test := NewTest("db0", "rp0") test.writes = Writes{ &Write{data: strings.Join(writes, "\n")}, } test.addQueries([]*Query{ &Query{ name: "baseline", params: url.Values{"db": []string{"db0"}}, command: `select * from foo`, exp: `{"results":[{"series":[{"name":"foo","columns":["time","foo.bar.baz"],"values":[["2000-01-01T00:00:00Z",1]]}]}]}`, }, &Query{ name: "select field with periods", params: url.Values{"db": []string{"db0"}}, command: `select "foo.bar.baz" from foo`, exp: `{"results":[{"series":[{"name":"foo","columns":["time","foo.bar.baz"],"values":[["2000-01-01T00:00:00Z",1]]}]}]}`, }, }...) for i, query := range test.queries { if i == 0 { if err := test.init(s); err != nil { t.Fatalf("test init failed: %s", err) } } if query.skip { t.Logf("SKIP:: %s", query.name) continue } if err := query.Execute(s); err != nil { t.Error(query.Error(err)) } else if !query.success() { t.Error(query.failureMessage()) } } } func TestServer_Query_IntoTarget(t *testing.T) { t.Parallel() s := OpenServer(NewConfig()) defer s.Close() if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { t.Fatal(err) } if err := s.MetaClient.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { t.Fatal(err) } writes := []string{ fmt.Sprintf(`foo value=1 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), fmt.Sprintf(`foo value=2 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:10Z").UnixNano()), fmt.Sprintf(`foo value=3 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:20Z").UnixNano()), fmt.Sprintf(`foo value=4 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:30Z").UnixNano()), fmt.Sprintf(`foo value=4,foobar=3 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:40Z").UnixNano()), } test := NewTest("db0", "rp0") test.writes = Writes{ &Write{data: strings.Join(writes, "\n")}, } test.addQueries([]*Query{ &Query{ name: "into", params: url.Values{"db": []string{"db0"}}, command: `SELECT * INTO baz FROM foo`, exp: `{"results":[{"series":[{"name":"result","columns":["time","written"],"values":[["1970-01-01T00:00:00Z",5]]}]}]}`, }, &Query{ name: "confirm results", params: url.Values{"db": []string{"db0"}}, command: `SELECT * FROM baz`, exp: `{"results":[{"series":[{"name":"baz","columns":["time","foobar","value"],"values":[["2000-01-01T00:00:00Z",null,1],["2000-01-01T00:00:10Z",null,2],["2000-01-01T00:00:20Z",null,3],["2000-01-01T00:00:30Z",null,4],["2000-01-01T00:00:40Z",3,4]]}]}]}`, }, }...) if err := test.init(s); err != nil { t.Fatalf("test init failed: %s", err) } for _, query := range test.queries { if query.skip { t.Logf("SKIP:: %s", query.name) continue } if err := query.Execute(s); err != nil { t.Error(query.Error(err)) } else if !query.success() { t.Error(query.failureMessage()) } } } // This test ensures that data is not duplicated with measurements // of the same name. func TestServer_Query_DuplicateMeasurements(t *testing.T) { t.Parallel() s := OpenDefaultServer(NewConfig()) defer s.Close() // Create a second database. if err := s.CreateDatabaseAndRetentionPolicy("db1", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { t.Fatal(err) } if err := s.MetaClient.SetDefaultRetentionPolicy("db1", "rp0"); err != nil { t.Fatal(err) } test := NewTest("db0", "rp0") test.writes = Writes{ &Write{data: fmt.Sprintf(`cpu value=1 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano())}, } if err := test.init(s); err != nil { t.Fatalf("test init failed: %s", err) } test = NewTest("db1", "rp0") test.writes = Writes{ &Write{data: fmt.Sprintf(`cpu value=2 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:10Z").UnixNano())}, } if err := test.init(s); err != nil { t.Fatalf("test init failed: %s", err) } test.addQueries([]*Query{ &Query{ name: "select from both databases", params: url.Values{"db": []string{"db0"}}, command: `SELECT value FROM db0.rp0.cpu, db1.rp0.cpu`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","value"],"values":[["2000-01-01T00:00:00Z",1],["2000-01-01T00:00:10Z",2]]}]}]}`, }, }...) for _, query := range test.queries { if query.skip { t.Logf("SKIP:: %s", query.name) continue } if err := query.Execute(s); err != nil { t.Error(query.Error(err)) } else if !query.success() { t.Error(query.failureMessage()) } } } func TestServer_Query_LargeTimestamp(t *testing.T) { t.Parallel() s := OpenDefaultServer(NewConfig()) defer s.Close() writes := []string{ fmt.Sprintf(`cpu value=100 %d`, models.MaxNanoTime), } test := NewTest("db0", "rp0") test.writes = Writes{ &Write{data: strings.Join(writes, "\n")}, } test.addQueries([]*Query{ &Query{ name: `select value at max nano time`, params: url.Values{"db": []string{"db0"}}, command: fmt.Sprintf(`SELECT value FROM cpu WHERE time <= %d`, models.MaxNanoTime), exp: `{"results":[{"series":[{"name":"cpu","columns":["time","value"],"values":[["` + time.Unix(0, models.MaxNanoTime).Format(time.RFC3339Nano) + `",100]]}]}]}`, }, }...) if err := test.init(s); err != nil { t.Fatalf("test init failed: %s", err) } // Open a new server with the same configuration file. // This is to ensure the meta data was marshaled correctly. s2 := OpenServer(s.Config) defer s2.Close() for _, query := range test.queries { if query.skip { t.Logf("SKIP:: %s", query.name) continue } if err := query.Execute(s); err != nil { t.Error(query.Error(err)) } else if !query.success() { t.Error(query.failureMessage()) } } } // This test reproduced a data race with closing the // Subscriber points channel while writes were in-flight in the PointsWriter. func TestServer_ConcurrentPointsWriter_Subscriber(t *testing.T) { t.Parallel() s := OpenDefaultServer(NewConfig()) defer s.Close() // goroutine to write points done := make(chan struct{}) go func() { for { select { case <-done: return default: wpr := &coordinator.WritePointsRequest{ Database: "db0", RetentionPolicy: "rp0", } s.PointsWriter.WritePoints(wpr.Database, wpr.RetentionPolicy, models.ConsistencyLevelAny, wpr.Points) } } }() time.Sleep(10 * time.Millisecond) close(done) // Race occurs on s.Close() } // Ensure time in where clause is inclusive func TestServer_WhereTimeInclusive(t *testing.T) { t.Parallel() s := OpenServer(NewConfig()) defer s.Close() if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { t.Fatal(err) } if err := s.MetaClient.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { t.Fatal(err) } writes := []string{ fmt.Sprintf(`cpu value=1 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:01Z").UnixNano()), fmt.Sprintf(`cpu value=2 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:02Z").UnixNano()), fmt.Sprintf(`cpu value=3 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:03Z").UnixNano()), } test := NewTest("db0", "rp0") test.writes = Writes{ &Write{data: strings.Join(writes, "\n")}, } test.addQueries([]*Query{ &Query{ name: "all GTE/LTE", params: url.Values{"db": []string{"db0"}}, command: `SELECT * from cpu where time >= '2000-01-01T00:00:01Z' and time <= '2000-01-01T00:00:03Z'`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","value"],"values":[["2000-01-01T00:00:01Z",1],["2000-01-01T00:00:02Z",2],["2000-01-01T00:00:03Z",3]]}]}]}`, }, &Query{ name: "all GTE", params: url.Values{"db": []string{"db0"}}, command: `SELECT * from cpu where time >= '2000-01-01T00:00:01Z'`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","value"],"values":[["2000-01-01T00:00:01Z",1],["2000-01-01T00:00:02Z",2],["2000-01-01T00:00:03Z",3]]}]}]}`, }, &Query{ name: "all LTE", params: url.Values{"db": []string{"db0"}}, command: `SELECT * from cpu where time <= '2000-01-01T00:00:03Z'`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","value"],"values":[["2000-01-01T00:00:01Z",1],["2000-01-01T00:00:02Z",2],["2000-01-01T00:00:03Z",3]]}]}]}`, }, &Query{ name: "first GTE/LTE", params: url.Values{"db": []string{"db0"}}, command: `SELECT * from cpu where time >= '2000-01-01T00:00:01Z' and time <= '2000-01-01T00:00:01Z'`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","value"],"values":[["2000-01-01T00:00:01Z",1]]}]}]}`, }, &Query{ name: "last GTE/LTE", params: url.Values{"db": []string{"db0"}}, command: `SELECT * from cpu where time >= '2000-01-01T00:00:03Z' and time <= '2000-01-01T00:00:03Z'`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","value"],"values":[["2000-01-01T00:00:03Z",3]]}]}]}`, }, &Query{ name: "before GTE/LTE", params: url.Values{"db": []string{"db0"}}, command: `SELECT * from cpu where time <= '2000-01-01T00:00:00Z'`, exp: `{"results":[{}]}`, }, &Query{ name: "all GT/LT", params: url.Values{"db": []string{"db0"}}, command: `SELECT * from cpu where time > '2000-01-01T00:00:00Z' and time < '2000-01-01T00:00:04Z'`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","value"],"values":[["2000-01-01T00:00:01Z",1],["2000-01-01T00:00:02Z",2],["2000-01-01T00:00:03Z",3]]}]}]}`, }, &Query{ name: "first GT/LT", params: url.Values{"db": []string{"db0"}}, command: `SELECT * from cpu where time > '2000-01-01T00:00:00Z' and time < '2000-01-01T00:00:02Z'`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","value"],"values":[["2000-01-01T00:00:01Z",1]]}]}]}`, }, &Query{ name: "last GT/LT", params: url.Values{"db": []string{"db0"}}, command: `SELECT * from cpu where time > '2000-01-01T00:00:02Z' and time < '2000-01-01T00:00:04Z'`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","value"],"values":[["2000-01-01T00:00:03Z",3]]}]}]}`, }, &Query{ name: "all GT", params: url.Values{"db": []string{"db0"}}, command: `SELECT * from cpu where time > '2000-01-01T00:00:00Z'`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","value"],"values":[["2000-01-01T00:00:01Z",1],["2000-01-01T00:00:02Z",2],["2000-01-01T00:00:03Z",3]]}]}]}`, }, &Query{ name: "all LT", params: url.Values{"db": []string{"db0"}}, command: `SELECT * from cpu where time < '2000-01-01T00:00:04Z'`, exp: `{"results":[{"series":[{"name":"cpu","columns":["time","value"],"values":[["2000-01-01T00:00:01Z",1],["2000-01-01T00:00:02Z",2],["2000-01-01T00:00:03Z",3]]}]}]}`, }, }...) if err := test.init(s); err != nil { t.Fatalf("test init failed: %s", err) } for _, query := range test.queries { if query.skip { t.Logf("SKIP:: %s", query.name) continue } if err := query.Execute(s); err != nil { t.Error(query.Error(err)) } else if !query.success() { t.Error(query.failureMessage()) } } }
mit
o0rebelious0o/Beginners-Guide-to-Windows-10
Adeptly Adaptive Challenge/UserControls/ViewModels/INewsItemViewModel.cs
279
namespace Adeptly_Adaptive_Challenge.UserControls.ViewModels { interface INewsItemViewModel { string Headline { get; set; } string SubHeadline { get; set; } string DateLine { get; set; } string Image { get; set; } } }
mit
swp-unikat/Einschreibesystem
src/Core/EntityBundle/Repository/ParticipantsRepository.php
998
<?php /** * Created by IntelliJ IDEA. * Authors: Martin Griebel, Marco Hanisch * Date: 23.05.2016 * Time: 13:54 */ namespace Core\EntityBundle\Repository; use Core\EntityBundle\Entity\Workshop; use Doctrine\ORM\EntityRepository; use Doctrine\ORM\Query\Expr\Join; /** * this class provide the method to get all blacklisted participants */ class ParticipantsRepository extends EntityRepository { /** * function to get all blacklisted participants */ public function getAllBlacklistedParticipants() { $em = $this->getEntityManager(); $qb = $em->createQueryBuilder(); $q = $qb->select(array('p')) ->from('CoreEntityBundle:Participants', 'p') ->where( $qb->expr()->eq('p.blacklisted', '1') ) ->orderBy('p.email', 'ASC'); $result = $q->getQuery()->getResult(); if (!$result) { return false; } else { return $result; } } }
mit
ylatuya/oxyplot
Source/OxyPlot.Pdf/PdfPlotWriter.cs
2100
// -------------------------------------------------------------------------------------------------------------------- // <copyright file="PdfPlotWriter.cs" company="OxyPlot"> // The MIT License (MIT) // // Copyright (c) 2012 Oystein Bjorke // // Permission is hereby granted, free of charge, to any person obtaining a // copy of this software and associated documentation files (the // "Software"), to deal in the Software without restriction, including // without limitation the rights to use, copy, modify, merge, publish, // distribute, sublicense, and/or sell copies of the Software, and to // permit persons to whom the Software is furnished to do so, subject to // the following conditions: // // The above copyright notice and this permission notice shall be included // in all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS // OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF // MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. // IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY // CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, // TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE // SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. // </copyright> // -------------------------------------------------------------------------------------------------------------------- using System.IO; namespace OxyPlot.Pdf { public static class PdfPlotWriter { public static void Save(PlotModel model, string path, double width, double height) { using (var s = File.OpenWrite(path)) { Save(model, s, width, height); } } public static void Save(PlotModel model, Stream s, double width, double height) { var svgrc = new PdfRenderContext(width, height); model.Render(svgrc); svgrc.Save(s); } } }
mit
yogeshsaroya/new-cdnjs
ajax/libs/jquery.lazyloadxt/0.8.11/jquery.lazyloadxt.extra.js
129
version https://git-lfs.github.com/spec/v1 oid sha256:b3254e22c7a2f68c85682004f831509e8d1b0e6164e6f1c68cf691d7a18aaf5c size 8680
mit
densebrain/typestore
packages/typestore-mocks/src/MockRepoPlugin.ts
1859
import { DefaultModel, IKeyValue, Errors, IStorePlugin, ICoordinatorOptions, ICoordinator, Repo, IModel, IRepoPlugin, PluginType, PluginEventType, TKeyValue } from 'typestore' import {MockKeyValue} from "./MockStore" export class MockRepoPlugin<M extends IModel> implements IRepoPlugin<M> { type = PluginType.Repo supportedModels:any[] private coordinator private recordCount = 0 constructor(private store:IStorePlugin,private repo:Repo<M>,...supportedModels) { this.supportedModels = supportedModels repo.attach(this) } handle(eventType:PluginEventType, ...args):boolean|any { return false; } init(coordinator:ICoordinator, opts:ICoordinatorOptions):Promise<ICoordinator> { this.coordinator = coordinator return Promise.resolve(coordinator); } start():Promise<ICoordinator> { return Promise.resolve(this.coordinator) } stop():Promise<ICoordinator> { return Promise.resolve(this.coordinator) } key(...args):MockKeyValue { return new MockKeyValue(args) } get(key:TKeyValue):Promise<M> { if (!(key instanceof MockKeyValue)) { return null } return Promise.resolve(new this.repo.modelClazz()) as Promise<M> } save(o:M):Promise<M> { this.recordCount++ return Promise.resolve(o) } remove(key:TKeyValue):Promise<any> { this.recordCount-- return Promise.resolve({}) } count():Promise<number> { return Promise.resolve(this.recordCount) } async bulkGet(...keys:TKeyValue[]):Promise<M[]> { const promises = keys.map(key => this.get(key)) return await Promise.all(promises) } async bulkSave(...models:M[]):Promise<M[]> { const promises = models.map(model => this.save(model)) return await Promise.all(promises) } async bulkRemove(...keys:TKeyValue[]):Promise<any[]> { const promises = keys.map(key => this.remove(key)) return await Promise.all(promises) } }
mit
duongtruc/LVTN
modules/articles/tests/server/article.server.model.tests.js
1254
'use strict'; /** * Module dependencies. */ var should = require('should'), mongoose = require('mongoose'), User = mongoose.model('User'), Article = mongoose.model('Article'); /** * Globals */ var user, article; /** * Unit tests */ describe('Article Model Unit Tests:', function () { beforeEach(function (done) { user = new User({ firstName: 'Full', displayName: 'Full Name', email: '[email protected]', username: 'username', password: 'password' }); user.save(function () { article = new Article({ title: 'Article Title', content: 'Article Content', user: user }); done(); }); }); describe('Method Save', function () { it('should be able to save without problems', function (done) { return article.save(function (err) { should.not.exist(err); done(); }); }); it('should be able to show an error when try to save without title', function (done) { article.title = ''; return article.save(function (err) { should.exist(err); done(); }); }); }); afterEach(function (done) { Article.remove().exec(function () { User.remove().exec(done); }); }); });
mit
LatinWarrior/luis.angular.paging
Luis.Angular.Paging/Properties/AssemblyInfo.cs
1374
using System.Reflection; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; // General Information about an assembly is controlled through the following // set of attributes. Change these attribute values to modify the information // associated with an assembly. [assembly: AssemblyTitle("Luis.Angular.Paging")] [assembly: AssemblyDescription("")] [assembly: AssemblyConfiguration("")] [assembly: AssemblyCompany("")] [assembly: AssemblyProduct("Luis.Angular.Paging")] [assembly: AssemblyCopyright("Copyright © 2015")] [assembly: AssemblyTrademark("")] [assembly: AssemblyCulture("")] // Setting ComVisible to false makes the types in this assembly not visible // to COM components. If you need to access a type in this assembly from // COM, set the ComVisible attribute to true on that type. [assembly: ComVisible(false)] // The following GUID is for the ID of the typelib if this project is exposed to COM [assembly: Guid("086f7f6f-b255-4732-8ecc-2cda649ef91b")] // Version information for an assembly consists of the following four values: // // Major Version // Minor Version // Build Number // Revision // // You can specify all the values or you can default the Revision and Build Numbers // by using the '*' as shown below: [assembly: AssemblyVersion("1.0.0.0")] [assembly: AssemblyFileVersion("1.0.0.0")]
mit
maldrasen/archive
Mephidross/old/slaver/pages/dregus/smith/BuildAnvil.1.js
689
Lexicon.add('dregus/smith/BuildAnvil.1', { layout: 'event', links: [ { name:'Pay the Dwarf', action:'event:buildAnvil_2', requirements:['|C|Currency>249','|C|Inventory.ironBar>19'] }, { name:'Back', action:'event:complete' }, ], body: [ { id:'image', style:"float: left; margin-right: 12px;" }, { text:"An anvil eh? Well you came to the right place. I can make ye one, but it's not going to come cheap. I'll "+ "need one score bars of iron and 250 pearls o'sin for me labor." }, { style:"clear:both; height:0" }, ], onAfter: function() { $('#image').append(new ScaleCropper({ image:'gen', width:400, height:300 }).buildFrame()); }, });
mit
orangeeli/orangetree
app/js/year.js
204
module.exports= (()=>{ 'use strict'; return { update (doc){ const year = doc.querySelector(".year"); year.innerText = `${new Date().getFullYear()}`; } }; })();
mit
FacticiusVir/Warm
Keeper.Warm.Core/AddressType.cs
128
namespace Keeper.Warm { public enum AddressType { None, Retained, Heap, Stack } }
mit
Pieter-hogent/webapps
democode/models/User.js
988
let mongoose = require('mongoose'); let crypto = require('crypto'); let jwt = require('jsonwebtoken'); let UserSchema = new mongoose.Schema({ username: { type: String, lowercase: true, unique: true }, hash: String, salt: String }); UserSchema.methods.setPassword = function (password) { this.salt = crypto.randomBytes(32).toString('hex'); this.hash = crypto.pbkdf2Sync(password, this.salt, 10000, 64, 'sha512').toString('hex'); }; UserSchema.methods.validPassword = function (password) { var hash = crypto.pbkdf2Sync(password, this.salt, 10000, 64, 'sha512').toString('hex'); return this.hash === hash; }; UserSchema.methods.generateJWT = function () { var today = new Date(); var exp = new Date(today); exp.setDate(today.getDate() + 60); return jwt.sign({ _id: this._id, username: this.username, exp: parseInt(exp.getTime() / 1000), }, process.env.RECIPE_BACKEND_SECRET); }; mongoose.model('User', UserSchema);
mit
esteban67/sparql
src/sintax/GraphNode.java
1828
package sintax; import java.io.IOException; import java.util.ArrayList; import com.hp.hpl.jena.graph.Node; import lexic.Token; /* * GraphNode ::= VarOrTerm | TriplesNode * FIRST(GraphNode) = { * VAR1, VAR2, IRI_REF,PNAME_LN,PNAME_NS, STRING_LITERAL1, STRING_LITERAL2,STRING_LITERAL_LONG1,STRING_LITERAL_LONG2, * INTEGER,DECIMAL, DOUBLE, INTEGER_POSITIVE, DECIMAL_POSITIVE, DOUBLE_POSITIVE, INTEGER_NEGATIVE , * DECIMAL_NEGATIVE, DOUBLE_NEGATIVE, true, false, NIL, '(' * } **/ public class GraphNode extends Production{ public Node node = null; public boolean process() throws IOException{ switch($.current.token){ case VAR1: case VAR2: case IRI_REF: case PNAME_NS: case PNAME_LN: case STRING_LITERAL1: case STRING_LITERAL2: case STRING_LITERAL_LONG1: case STRING_LITERAL_LONG2: case INTEGER: case DECIMAL: case DOUBLE: case INTEGER_POSITIVE: case DECIMAL_POSITIVE: case DOUBLE_POSITIVE: case INTEGER_NEGATIVE: case DECIMAL_NEGATIVE: case DOUBLE_NEGATIVE: case TRUE: case FALSE: case NIL: VarOrTerm varOrTerm = (VarOrTerm) $.get("VarOrTerm"); if(!varOrTerm.analize()) return false; node = varOrTerm.node; break; case LEFT_PARENTH: if(!$.analize("TriplesNode")) return false; break; default: return false; } return true; } @Override public ArrayList<Token> FIRSTS() throws IOException { ArrayList<Token> ans = new ArrayList<Token>(); for ( Token t : get("VarOrTerm").FIRSTS() ) ans.add(t); for ( Token t : get("TriplesNode").FIRSTS() ) ans.add(t); return ans; } @Override public ArrayList<Token> FOLLOWS() throws IOException { ArrayList<Token> ans = new ArrayList<Token>(); ans.add( Token.LEFT_PARENTH ); for ( Token t : get("Object").FOLLOWS() ) ans.add(t); return ans; } }
mit
hikmahtiar6/drafterbit
src/Drafterbit/Bundle/SystemBundle/Form/DataTransformer/EntityToIdTransformer.php
1114
<?php namespace Drafterbit\Bundle\SystemBundle\Form\DataTransformer; use Symfony\Component\Form\DataTransformerInterface; use Symfony\Component\Form\Exception\TransformationFailedException; use Doctrine\Common\Persistence\ObjectManager; /** * @link https://gist.github.com/bjo3rnf/4061232 */ class EntityToIdTransformer implements DataTransformerInterface { /** * @var ObjectManager */ protected $objectManager; /** * @var string */ protected $class; public function __construct(ObjectManager $objectManager, $class) { $this->objectManager = $objectManager; $this->class = $class; } public function transform($entity) { if (null === $entity) { return; } return $entity->getId(); } public function reverseTransform($id) { if (!$id) { return null; } $entity = $this->objectManager->getRepository($this->class)->find($id); if (null === $entity) { throw new TransformationFailedException(); } return $entity; } }
mit
glynnc/liblcf
src/generated/ldb_item.cpp
2969
/* !!!! GENERATED FILE - DO NOT EDIT !!!! */ /* * Copyright (c) 2014 liblcf authors * This file is released under the MIT License * http://opensource.org/licenses/MIT */ // Headers #include "ldb_reader.h" #include "ldb_chunks.h" #include "reader_struct.h" // Read Item. #define LCF_CHUNK_SUFFIX LDB_Reader #define LCF_CURRENT_STRUCT Item LCF_STRUCT_FIELDS_BEGIN() LCF_STRUCT_TYPED_FIELD(std::string, name), LCF_STRUCT_TYPED_FIELD(std::string, description), LCF_STRUCT_TYPED_FIELD(int, type), LCF_STRUCT_TYPED_FIELD(int, price), LCF_STRUCT_TYPED_FIELD(int, uses), LCF_STRUCT_TYPED_FIELD(int, atk_points1), LCF_STRUCT_TYPED_FIELD(int, def_points1), LCF_STRUCT_TYPED_FIELD(int, spi_points1), LCF_STRUCT_TYPED_FIELD(int, agi_points1), LCF_STRUCT_TYPED_FIELD(bool, two_handed), LCF_STRUCT_TYPED_FIELD(int, sp_cost), LCF_STRUCT_TYPED_FIELD(int, hit), LCF_STRUCT_TYPED_FIELD(int, critical_hit), LCF_STRUCT_TYPED_FIELD(int, animation_id), LCF_STRUCT_TYPED_FIELD(bool, preemptive), LCF_STRUCT_TYPED_FIELD(bool, dual_attack), LCF_STRUCT_TYPED_FIELD(bool, attack_all), LCF_STRUCT_TYPED_FIELD(bool, ignore_evasion), LCF_STRUCT_TYPED_FIELD(bool, prevent_critical), LCF_STRUCT_TYPED_FIELD(bool, raise_evasion), LCF_STRUCT_TYPED_FIELD(bool, half_sp_cost), LCF_STRUCT_TYPED_FIELD(bool, no_terrain_damage), LCF_STRUCT_TYPED_FIELD(bool, cursed), LCF_STRUCT_TYPED_FIELD(bool, entire_party), LCF_STRUCT_TYPED_FIELD(int, recover_hp_rate), LCF_STRUCT_TYPED_FIELD(int, recover_hp), LCF_STRUCT_TYPED_FIELD(int, recover_sp_rate), LCF_STRUCT_TYPED_FIELD(int, recover_sp), LCF_STRUCT_TYPED_FIELD(bool, occasion_field1), LCF_STRUCT_TYPED_FIELD(bool, ko_only), LCF_STRUCT_TYPED_FIELD(int, max_hp_points), LCF_STRUCT_TYPED_FIELD(int, max_sp_points), LCF_STRUCT_TYPED_FIELD(int, atk_points2), LCF_STRUCT_TYPED_FIELD(int, def_points2), LCF_STRUCT_TYPED_FIELD(int, spi_points2), LCF_STRUCT_TYPED_FIELD(int, agi_points2), LCF_STRUCT_TYPED_FIELD(int, using_message), LCF_STRUCT_TYPED_FIELD(int, skill_id), LCF_STRUCT_TYPED_FIELD(int, switch_id), LCF_STRUCT_TYPED_FIELD(bool, occasion_field2), LCF_STRUCT_TYPED_FIELD(bool, occasion_battle), LCF_STRUCT_SIZE_FIELD(bool, actor_set), LCF_STRUCT_TYPED_FIELD(std::vector<bool>, actor_set), LCF_STRUCT_SIZE_FIELD(bool, state_set), LCF_STRUCT_TYPED_FIELD(std::vector<bool>, state_set), LCF_STRUCT_SIZE_FIELD(bool, attribute_set), LCF_STRUCT_TYPED_FIELD(std::vector<bool>, attribute_set), LCF_STRUCT_TYPED_FIELD(int, state_chance), LCF_STRUCT_TYPED_FIELD(bool, state_effect), LCF_STRUCT_TYPED_FIELD(int, weapon_animation), LCF_STRUCT_TYPED_FIELD(std::vector<RPG::ItemAnimation>, animation_data), LCF_STRUCT_TYPED_FIELD(bool, use_skill), LCF_STRUCT_SIZE_FIELD(bool, class_set), LCF_STRUCT_TYPED_FIELD(std::vector<bool>, class_set), LCF_STRUCT_TYPED_FIELD(int, ranged_trajectory), LCF_STRUCT_TYPED_FIELD(int, ranged_target), LCF_STRUCT_FIELDS_END() #undef LCF_CURRENT_STRUCT #undef LCF_CHUNK_SUFFIX
mit
borilla/object-pool
test/index.spec.js
8637
'use strict'; var Pool = require('../src/index'); var chai = require('chai'); var sinon = require('sinon'); var sinonChai = require('sinon-chai'); var expect = chai.expect; chai.use(sinonChai); describe('object-pool', function () { var arg0, arg1, arg2, arg3, arg4; var poolIndexProp, sandbox, Type, onError, pool; before(function () { arg0 = 'arg0'; arg1 = 'arg1'; arg2 = 'arg2'; arg3 = 'arg3'; arg4 = 'arg4'; poolIndexProp = '_customPoolIndexProp'; }); beforeEach(function () { sandbox = sinon.sandbox.create(); Type = sandbox.spy(function Type() { this.wasAllocatedWithNew = this.wasAllocatedWithNew === undefined; }); onError = sandbox.stub(); pool = new Pool(Type, onError, poolIndexProp); }); afterEach(function () { sandbox.restore(); }); it('should initially reflect that no items have been allocated or released', function () { expect(pool.info()).to.deep.equal({ allocated: 0, released: 0 }); }); describe('allocate', function () { var item; describe('when there are no released items', function () { var ITEMS_TO_ALLOCATE = 2; var ITEMS_TO_RELEASE = 0; beforeEach(function () { initialisePool(ITEMS_TO_ALLOCATE, ITEMS_TO_RELEASE); Type.reset(); item = pool.allocate(arg1, arg2, arg3, arg4); }); it('should call Type constructor with new', function () { expect(Type).to.be.calledOnce; expect(item.wasAllocatedWithNew).to.be.true; }); it('should pass args to constructor', function () { expect(Type).to.be.calledWithExactly(arg1, arg2, arg3, arg4); }); it('should return an object of type Type', function () { expect(item).to.be.an.instanceof(Type); }); it('should set "poolIndexProp" property of the created item', function () { expect(item[poolIndexProp]).to.be.a('number'); }); it('should reflect that item has been allocated', function () { expect(pool.info()).to.deep.equal({ allocated: ITEMS_TO_ALLOCATE + 1, released: 0 }); }); }); describe('when there is a released item', function () { var ITEMS_TO_ALLOCATE = 2; var ITEMS_TO_RELEASE = 1; beforeEach(function () { initialisePool(ITEMS_TO_ALLOCATE, ITEMS_TO_RELEASE); Type.reset(); item = pool.allocate(arg1, arg2); }); it('should call Type constructor without using new', function () { expect(Type).to.be.calledOnce; expect(item.wasAllocatedWithNew).to.be.false; }); it('should pass args to constructor', function () { expect(Type).to.be.calledWithExactly(arg1, arg2); }); it('should return an object of type Type', function () { expect(item).to.be.an.instanceof(Type); }); it('should set "poolIndexProp" property of the created item', function () { expect(item[poolIndexProp]).to.be.a('number'); }); it('should reflect that item has been allocated', function () { expect(pool.info()).to.deep.equal({ allocated: ITEMS_TO_ALLOCATE, released: ITEMS_TO_RELEASE - 1 }); }); }); describe('when creating new items', function () { beforeEach(function () { pool.allocate(arg0); pool.allocate(arg0, arg1); pool.allocate(arg0, arg1, arg2); pool.allocate(arg0, arg1, arg2, arg3); pool.allocate(arg0, arg1, arg2, arg3, arg4); }); it('should always pass correct arguments', function () { expect(Type).to.be.calledWithExactly(arg0); expect(Type).to.be.calledWithExactly(arg0, arg1); expect(Type).to.be.calledWithExactly(arg0, arg1, arg2); expect(Type).to.be.calledWithExactly(arg0, arg1, arg2, arg3); expect(Type).to.be.calledWithExactly(arg0, arg1, arg2, arg3, arg4); }); }); describe('when reallocating released items', function () { var ITEMS_TO_ALLOCATE = 5; var ITEMS_TO_RELEASE = 5; beforeEach(function () { initialisePool(ITEMS_TO_ALLOCATE, ITEMS_TO_RELEASE); Type.reset(); pool.allocate(arg0); pool.allocate(arg0, arg1); pool.allocate(arg0, arg1, arg2); pool.allocate(arg0, arg1, arg2, arg3); pool.allocate(arg0, arg1, arg2, arg3, arg4); }); it('should always pass correct arguments', function () { expect(Type).to.be.calledWithExactly(arg0); expect(Type).to.be.calledWithExactly(arg0, arg1); expect(Type).to.be.calledWithExactly(arg0, arg1, arg2); expect(Type).to.be.calledWithExactly(arg0, arg1, arg2, arg3); expect(Type).to.be.calledWithExactly(arg0, arg1, arg2, arg3, arg4); }); }); describe('when no "poolIndexProp" parameter is provided to Pool constructor', function () { var prevPoolIndexProp; before(function () { prevPoolIndexProp = poolIndexProp; poolIndexProp = undefined; }); beforeEach(function () { item = pool.allocate(); }); it('should use the default value', function () { expect(item._poolIndex).to.be.a('number'); }); after(function () { poolIndexProp = prevPoolIndexProp; }); }); }); describe('release', function () { var item; describe('when an item has previously been allocated', function () { beforeEach(function () { item = pool.allocate(); pool.release(item); }); it('should reflect that item has been released', function () { expect(pool.info()).to.deep.equal({ allocated: 0, released: 1 }); }); }); describe('when item has already been released', function () { beforeEach(function () { item = pool.allocate(); pool.release(item); pool.release(item); }); it('should call onError callback (if provided)', function () { expect(onError).to.be.calledOnce; }); it('should provide an appropriate error message', function () { expect(onError).to.be.calledWithMatch('not currently allocated'); }); }); describe('when item was created outside the object-pool', function () { beforeEach(function () { item = new Type(); pool.release(item); }); it('should call onError callback (if provided)', function () { expect(onError).to.be.calledOnce; }); it('should provide an appropriate error message', function () { expect(onError).to.be.calledWithMatch('not currently allocated'); }); }); }); describe('forEach', function () { var item0, item1, item2, item3, item4; var fn; beforeEach(function () { item0 = pool.allocate(); item1 = pool.allocate(); item2 = pool.allocate(); item3 = pool.allocate(); item4 = pool.allocate(); pool.release(item1); pool.release(item3); fn = sandbox.stub(); pool.forEach(fn); }); it('should trigger callback function for each allocated item', function () { expect(fn).to.be.calledThrice; expect(fn).calledWithExactly(item0); expect(fn).calledWithExactly(item2); expect(fn).calledWithExactly(item4); }); it('should not trigger callback function for released items', function () { expect(fn).to.not.be.calledWith(item1); expect(fn).to.not.be.calledWith(item3); }); [ 'allocate', 'release', 'forEach', 'clean' ].forEach(function (operation) { describe('when we try to perform "' + operation + '" during forEach loop', function () { var doOperation; beforeEach(function () { doOperation = sandbox.spy(function (item) { // NOTE: sending "item" to each method is okay for now, but may cause errors in future return pool[operation](item); }); pool.forEach(doOperation); }); it('should call onError callback', function () { // onError will be called for each iteration of our loop, ie for each allocated item expect(onError).to.be.calledThrice; }); it('should provide an appropriate error message', function () { expect(onError).to.be.calledWithMatch('inside "forEach" loop'); }); it('"' + operation + '" should return undefined', function () { expect(doOperation.alwaysReturned(undefined)).to.equal(true); }); }); }); }); describe('clean', function () { var ITEMS_TO_ALLOCATE = 10; var ITEMS_TO_RELEASE = 5; beforeEach(function () { initialisePool(ITEMS_TO_ALLOCATE, ITEMS_TO_RELEASE); pool.clean(); }); it('should remove any released items from store', function () { expect(pool.info().released).to.equal(0); }); }); // helper to setup pool for test by allocating and releasing some items function initialisePool(itemsToAllocate, itemsToRelease) { var items = []; var i; for (i = 0; i < itemsToAllocate; ++i) { items.push(pool.allocate()); } for (i = 0; i < itemsToRelease; ++i) { pool.release(items[i]); } expect(pool.info()).to.deep.equal({ allocated: itemsToAllocate - itemsToRelease, released: itemsToRelease }); } });
mit
andrew-zhou/tourguide
tests/query_parser_test.py
1086
from query_parser import QueryParser from unittest import TestCase class QueryParserTest(TestCase): def test_parse_empty_query(self): subject = QueryParser('') self.assertEqual(subject.query, '') self.assertEqual(subject.params, {}) self.assertEqual(subject.alias, '') def test_parse_query_strips_whitespace(self): subject = QueryParser(' \t\t\n ') self.assertEqual(subject.query, '') self.assertEqual(subject.params, {}) self.assertEqual(subject.alias, '') def test_parse_query_without_params(self): subject = QueryParser('abcde foo bar') self.assertEqual(subject.query, 'abcde foo bar') self.assertEqual(subject.params, {}) self.assertEqual(subject.alias, 'abcde foo bar') def test_parse_query_with_params(self): subject = QueryParser('abc de\tfoo:bar\nfop:baz ') self.assertEqual(subject.query, 'abc de\tfoo:bar\nfop:baz') self.assertEqual(subject.params, {'foo': 'bar', 'fop': 'baz'}) self.assertEqual(subject.alias, 'abc de')
mit
nodule/react-material-ui
ImageBrush.js
369
module.exports = { description: "", ns: "react-material-ui", type: "ReactNode", dependencies: { npm: { "material-ui/svg-icons/image/brush": require('material-ui/svg-icons/image/brush') } }, name: "ImageBrush", ports: { input: {}, output: { component: { title: "ImageBrush", type: "Component" } } } }
mit
richtermark/SMEAGOnline
src/Biz/Mail/Template/EmailSystemSelfTestTemplate.php
418
<?php namespace Biz\Mail\Template; class EmailSystemSelfTestTemplate extends BaseTemplate implements EmailTemplateInterface { /** * {@inheritdoc} */ public function parse($options) { return array( 'title' => sprintf('【%s】系统自检邮件', $this->getSiteName()), 'body' => '系统邮件发送检测测试,请不要回复此邮件!', ); } }
mit
benpolinsky/bp_custom_fields
spec/models/bp_custom_fields/appearance_spec.rb
10113
require 'rails_helper' RSpec.describe BpCustomFields::Appearance, type: :model do before do @appearance = BpCustomFields::Appearance.new end it "belongs_to a group_template" do expect(@appearance.create_group_template(name: "Badge")).to be_a BpCustomFields::GroupTemplate end context "returning conditions" do context "resource based" do before do class ::Post < ActiveRecord::Base include BpCustomFields::Fieldable end end it "one appearance can return all of a resource as a collection" do three_posts = Post.where(nil) appearance = BpCustomFields::Appearance.create(resource: "Post") 3.times { |i| three_posts << Post.create(title: "A Post ##{i}")} expect(appearance.appears_on).to eq three_posts end it "a collection of appearances can return all of a resouce" do three_posts = Post.where(nil) 3.times { |i| three_posts << Post.create(title: "A Post ##{i}")} appearance = BpCustomFields::Appearance.create(resource: "Post") expect(BpCustomFields::Appearance.all.appears_on).to match three_posts end it "one appearance can return a single instance of a resource as a record" do only_post = Post.create(title: "only post") appearance = BpCustomFields::Appearance.create(resource: "Post") expect(appearance.appears_on).to eq only_post end it "a collection of appearances can return a single instance of a resource as a record" do only_post = Post.create(title: "only post") appearance = BpCustomFields::Appearance.create(resource: "Post") second_appearance = BpCustomFields::Appearance.create(resource: "Post") expect(BpCustomFields::Appearance.all.appears_on).to eq only_post end it "a collection of appearances can return all of a resource minus one record" do three_posts = Post.where(nil) 3.times { |i| three_posts << Post.create(title: "A Post ##{i}")} exception_post = three_posts.first BpCustomFields::Appearance.create(resource: "Post") BpCustomFields::Appearance.create(resource: "Post", resource_id: exception_post.id, appears: false) expect(BpCustomFields::Appearance.all.appears_on).to match three_posts.where.not(id: exception_post.id) end it "can return all of a resource minus three records" do ten_posts = Post.where(nil) three_posts = Post.where(nil) 3.times {|i| three_posts << Post.create(title: "Nope #{i}")} 10.times {|i| ten_posts << Post.create(title: "Yup #{i}")} BpCustomFields::Appearance.create(resource: "Post") three_posts.each do |post| BpCustomFields::Appearance.create(resource: "Post", resource_id: post.id, appears: false) end expect(BpCustomFields::Appearance.all.appears_on).to match ten_posts.where.not(id: three_posts.map(&:id)) end it "can return a mixture of two different resources" do class ::Person < ActiveRecord::Base include BpCustomFields::Fieldable end Person.delete_all three_people = Person.where(nil) five_posts = Post.where(nil) # this seems to be neccessary b/c of the dynamic creation of the table/model? # really not sure... I've used the where(nil) other places without these results three_people.reload five_posts.reload 3.times {|i| three_people << Person.create(first_name: "Person #{i}")} 5.times {|i| five_posts << Post.create(title: "Post #{i}")} BpCustomFields::Appearance.create(resource: "Person") BpCustomFields::Appearance.create(resource: "Post") expect(BpCustomFields::Appearance.all.appears_on).to match [three_people, five_posts].flatten Person.delete_all Post.delete_all end it "can return a mixture of two different resources, with only one resource subtracting from its collection", focus: true do class ::Person < ActiveRecord::Base include BpCustomFields::Fieldable end Person.delete_all three_people = Person.where(nil) five_posts = Post.where(nil) # this seems to be neccessary b/c of the dynamic creation of the table/model? # really not sure... I've used the where(nil) other places without these results three_people.reload five_posts.reload 3.times {|i| three_people << Person.create(first_name: "Person #{i}")} 5.times {|i| five_posts << Post.create(title: "Post #{i}")} four_posts = five_posts.where.not(id: five_posts.last.id) BpCustomFields::Appearance.create(resource: "Post") BpCustomFields::Appearance.create(resource: "Post", resource_id: five_posts.last.id, appears: false) BpCustomFields::Appearance.create(resource: "Person") expect(BpCustomFields::Appearance.all.appears_on).to match [four_posts, three_people].flatten Person.delete_all Post.delete_all end it "can return a mixture of two different resources, each subtracting a respective record" do class ::Person < ActiveRecord::Base include BpCustomFields::Fieldable end Person.delete_all three_people = Person.where(nil) five_posts = Post.where(nil) # this seems to be neccessary b/c of the dynamic creation of the table/model? # really not sure... I've used the where(nil) other places without these results three_people.reload five_posts.reload 3.times {|i| three_people << Person.create(first_name: "Person #{i}")} 5.times {|i| five_posts << Post.create(title: "Post #{i}")} four_posts = five_posts.where.not(id: five_posts.last.id) two_people = three_people.where.not(id: three_people.last.id) BpCustomFields::Appearance.create(resource: "Post") BpCustomFields::Appearance.create(resource: "Post", resource_id: five_posts.last.id, appears: false) BpCustomFields::Appearance.create(resource: "Person") BpCustomFields::Appearance.create(resource: "Person", resource_id: three_people.last.id, appears: false) expect(BpCustomFields::Appearance.all.appears_on).to match [four_posts, two_people].flatten Person.delete_all Post.delete_all end it "can return a mixture of two different resources, each subtracting several records" do class ::Person < ActiveRecord::Base include BpCustomFields::Fieldable end Person.delete_all five_people = Person.where(nil) five_posts = Post.where(nil) # this seems to be neccessary b/c of the dynamic creation of the table/model? # really not sure... I've used the where(nil) other places without these results five_people.reload five_posts.reload 5.times {|i| five_people << Person.create(first_name: "Person #{i}")} 5.times {|i| five_posts << Post.create(title: "Post #{i}")} one_post = five_posts.first one_person = five_people.first BpCustomFields::Appearance.create(resource: "Post") BpCustomFields::Appearance.create(resource: "Post", resource_id: five_posts[1].id, appears: false) BpCustomFields::Appearance.create(resource: "Post", resource_id: five_posts[2].id, appears: false) BpCustomFields::Appearance.create(resource: "Post", resource_id: five_posts[3].id, appears: false) BpCustomFields::Appearance.create(resource: "Post", resource_id: five_posts[4].id, appears: false) BpCustomFields::Appearance.create(resource: "Person") BpCustomFields::Appearance.create(resource: "Person", resource_id: five_people[1].id, appears: false) BpCustomFields::Appearance.create(resource: "Person", resource_id: five_people[2].id, appears: false) BpCustomFields::Appearance.create(resource: "Person", resource_id: five_people[3].id, appears: false) BpCustomFields::Appearance.create(resource: "Person", resource_id: five_people[4].id, appears: false) expect(BpCustomFields::Appearance.all.appears_on).to match [one_post, one_person] Person.delete_all Post.delete_all end # pending "can return all of a resource minus all but one record" # pending "can return all of a resouce minus n using a range of ids rather than several appearances" # pending "can return scopes of a resource - possible?" # pending "has a #readable method to describe each query in english" do # appearance = BpCustomFields::Appearance.create(resource: "Post") # expect(appearance.readable).to eq "All Posts" # end end context "abstract based" do it "(for now) an abstract appearance requires a resource_id" do appearance = BpCustomFields::Appearance.new(resource: "BpCustomFields::AbstractResource", resource_id: nil) expect(appearance).to_not be_valid end it "#abstract?" do appearance = BpCustomFields::Appearance.create(resource: "BpCustomFields::AbstractResource", resource_id: "About") expect(appearance.abstract?).to eq true post_appearance = BpCustomFields::Appearance.create(resource: "Post") expect(post_appearance.abstract?).to eq false end it "returns which page it appears on", focus: true do appearance = BpCustomFields::Appearance.create(resource: "BpCustomFields::AbstractResource", resource_id: "About") expect(appearance.appears_on).to eq "BpCustomFields::AbstractResource: About" end end end end
mit
Sotanna/Quartz-Server
src/main/java/org/quartzpowered/protocol/data/Dimension.java
1877
/* * This file is a component of Quartz Powered, this license makes sure any work * associated with Quartz Powered, must follow the conditions of the license included. * * The MIT License (MIT) * * Copyright (c) 2015 Quartz Powered * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ package org.quartzpowered.protocol.data; import lombok.Getter; import java.util.HashMap; import java.util.Map; public enum Dimension { NETHER(-1), OVERWORLD(0), END(1); @Getter private final int id; Dimension(int id) { this.id = id; } private static final Map<Integer, Dimension> idMap = new HashMap<>(); static { for (Dimension dimension : values()) { idMap.put(dimension.id, dimension); } } public static Dimension fromId(int id) { return idMap.get(id); } }
mit
Rehab4/WhatToDo
wantToDo/src/app/app.js
94
(function () { 'use strict'; var app = angular.module('app', [ ]); })();
mit
longde123/MultiversePlatform
server/src/multiverse/simpleclient/DefaultHandler.java
2043
/******************************************************************** The Multiverse Platform is made available under the MIT License. Copyright (c) 2012 The Multiverse Foundation Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. *********************************************************************/ package multiverse.simpleclient; import multiverse.server.engine.*; import multiverse.server.util.*; public class DefaultHandler implements EventHandler { public DefaultHandler() { } public String getName() { return "simpleclient.DefaultHandler"; } public boolean handleEvent(Event event) { Long objOid = event.getObjectOid(); log.info("event=" + event.getName() + ", objOid=" + objOid); // if ((objOid != null) && // (NewObjectHandler.ObjectMap.getObject(objOid) == null)) { // throw new RuntimeException("DefaultHandler: did not get a newobject event for objoid " + objOid); // } return true; } static final Logger log = new Logger("DefaultHandler"); }
mit
Hokua/overture
source/foundation/Event.js
1152
import { Class } from '../core/Core'; /** Class: O.Event Represents a synthetic event. */ export default Class({ /** Constructor: O.Event Parameters: type - {String} The event type. target - {Object} The target on which the event is to fire. mixin - {Object} (optional) Any further properties to add to the event. */ init ( type, target, mixin ) { this.type = type; this.target = target; this.defaultPrevented = false; this.propagationStopped = false; Object.assign( this, mixin ); }, /** Method: O.Event#preventDefault Prevent the default action for this event (if any). Returns: {O.Event} Returns self. */ preventDefault () { this.defaultPrevented = true; return this; }, /** Method: O.Event#stopPropagation Stop bubbling the event up to the next target. Returns: {O.Event} Returns self. */ stopPropagation () { this.propagationStopped = true; return this; }, });
mit
jmhdez/ef-integration-tests
Core/DBC.cs
756
using System; using System.Runtime.Serialization; namespace Koalite.EFSample { public static class Check { public static void Require(bool condition, string message = null) { if (!condition) throw new DBCException(message ?? "Precondition failed"); } } public class DBCException : Exception { public DBCException() { } public DBCException(string message) : base(message) { } public DBCException(string message, Exception innerException) : base(message, innerException) { } protected DBCException(SerializationInfo info, StreamingContext context) : base(info, context) { } } }
mit
ProtonMail/WebClient
applications/calendar/src/app/components/calendar/DayGrid.tsx
9719
import { useMemo, useState, useRef, useLayoutEffect, useEffect, Ref } from 'react'; import { chunk } from '@proton/shared/lib/helpers/array'; import { eachDayOfInterval, isSameMonth } from '@proton/shared/lib/date-fns-utc'; import { getISOWeek } from 'date-fns'; import useDayGridEventLayout from './useDayGridEventLayout'; import createDayGridMouseHandler from './interactions/dayGridMouseHandler'; import { useRect } from '../../hooks/useRect'; import RowEvents from './DayGrid/RowEvents'; import DayButtons from './DayGrid/DayButtons'; import { DAY_EVENT_HEIGHT } from './constants'; import { CalendarViewEvent, TargetEventData, TargetMoreData } from '../../containers/calendar/interface'; interface Props { tzid: string; now: Date; date: Date; dateRange: [Date, Date]; displayWeekNumbers?: boolean; isInteractionEnabled?: boolean; events: CalendarViewEvent[]; targetEventRef: Ref<HTMLDivElement>; targetMoreData?: TargetMoreData; targetMoreRef: Ref<HTMLDivElement>; targetEventData?: TargetEventData; onMouseDown: (a: any) => any /** todo */; formatTime: (date: Date) => string; formatDate: (date: Date) => string; onClickDate: (date: Date) => void; weekdaysLong: string[]; } const DayGrid = ({ tzid, now, date, dateRange: [start, end], dateRange, displayWeekNumbers = false, isInteractionEnabled = false, events, targetEventRef, targetMoreRef, targetEventData, targetMoreData, onMouseDown, formatTime, formatDate, onClickDate, weekdaysLong = ['Sunday', 'Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday'], }: Props) => { const rowsWrapperRef = useRef<HTMLDivElement>(null); const firstRowRef = useRef<HTMLDivElement>(null); const firstRowRect = useRect(firstRowRef.current); const [numberOfRows, setNumberOfRows] = useState(0); const daysInWeek = 7; const dayEventHeight = DAY_EVENT_HEIGHT; const rows = useMemo(() => { return chunk(eachDayOfInterval(start, end), daysInWeek); }, [+start, +end]); const eventsPerRows = useDayGridEventLayout(rows, events, numberOfRows, dayEventHeight); useLayoutEffect(() => { const { height: firstRowHeight = 100 } = firstRowRect || {}; const newNumberOfRows = Math.max(Math.floor(firstRowHeight / dayEventHeight), 1); setNumberOfRows(newNumberOfRows - 1); }, [firstRowRect, dateRange]); const handleMouseDownRef = useRef<(e: MouseEvent) => void>(); handleMouseDownRef.current = (e: MouseEvent) => { if (!rowsWrapperRef.current) { return; } createDayGridMouseHandler({ e, events, eventsPerRows, rows, dayGridEl: rowsWrapperRef.current, onMouseDown, }); }; useEffect(() => { if (!isInteractionEnabled) { return; } const listener = (e: MouseEvent) => { if (e.button !== 0) { return; } handleMouseDownRef.current?.(e); }; document.addEventListener('mousedown', listener, true); return () => { document.removeEventListener('mousedown', listener, true); }; }, [isInteractionEnabled]); const formattedDates = useMemo(() => { return rows.map((days) => { return days.map(formatDate); }); }, [rows, formatDate]); const mainRef = useRef<HTMLDivElement>(null); return ( <div className="flex-item-fluid scroll-if-needed h100 is-month-view"> <div className="calendar-daygrid flex flex-column relative h100" ref={mainRef}> <div data-test-id="calendar-month-view:week-header" className="flex calendar-daygrid-days"> {displayWeekNumbers ? <div className="calendar-daygrid-weeknumber-width" /> : null} {rows[0].map((day) => { return ( <div className="flex-item-fluid text-center calendar-daygrid-day text-lg m0 p0-75 text-ellipsis" key={day.getUTCDate()} aria-current={ day.getUTCDay() === now.getUTCDay() && isSameMonth(date, now) ? 'true' : undefined } > <span className="calendar-grid-heading-day-fullname text-semibold"> {weekdaysLong[day.getUTCDay()]} </span> <span className="calendar-grid-heading-day-shortname no-desktop no-tablet" aria-hidden="true" > {weekdaysLong[day.getUTCDay()][0]} </span> </div> ); })} </div> <div className="flex flex-item-fluid"> {displayWeekNumbers ? ( <div className="flex flex-column calendar-daygrid-weeknumber-width"> {rows.map((days) => { const monday = days.find((date) => date.getDay() === 1); if (!monday) { return null; } const week = getISOWeek(monday); return ( <div key={+monday} className="flex-item-fluid flex flex-column flex relative calendar-daygrid-weeknumber" > <span className="mauto opacity-40 text-sm">{week}</span> </div> ); })} </div> ) : null} <div className="flex flex-item-fluid flex-column calendar-daygrid-rows" ref={rowsWrapperRef}> {rows.map((days, rowIndex) => { const { eventsInRow, eventsInRowStyles, eventsInRowSummary } = eventsPerRows[rowIndex]; return ( // eslint-disable-next-line react/no-array-index-key <div key={rowIndex} className="flex-item-fluid flex flex-column h100 w100 relative"> <div data-test-id="calendar-month-view:week-row" className="flex calendar-daygrid-columns no-pointer-events" > {days.map((day) => { return ( <div data-test-id="calendar-month-view:day-cell" className="flex-item-fluid calendar-daygrid-column" key={day.getUTCDate()} /> ); })} </div> <div className="flex"> <DayButtons days={days} now={now} date={date} formattedDates={formattedDates[rowIndex]} onClickDate={onClickDate} /> </div> <div className="relative flex-item-fluid calendar-daygrid-row" data-row={rowIndex} {...(rowIndex === 0 ? { ref: firstRowRef } : undefined)} > <RowEvents tzid={tzid} eventsInRowStyles={eventsInRowStyles} eventsInRowSummary={eventsInRowSummary} eventsInRow={eventsInRow} events={events} formatTime={formatTime} days={days} now={now} row={rowIndex} targetMoreData={targetMoreData} targetMoreRef={targetMoreRef} targetEventRef={targetEventRef} targetEventData={targetEventData} /> </div> </div> ); })} </div> </div> </div> </div> ); }; export default DayGrid;
mit
NikolaiMishev/Telerik-Academy
Module-1/01.CSharp Part 1/Console Input-Output/Sum of n Numbers/Program.cs
573
//Write a program that enters a number n and after that enters more n numbers and calculates and prints their sum. Note: //You may need to use a for-loop. using System; class Program { static void Main() { Console.Write("Enter how many numbers you want to sum:"); int number = int.Parse(Console.ReadLine()); double counter = 0; for (int i = 0; i < number; i++) { Console.Write("Enter number:"); counter += double.Parse(Console.ReadLine()); } Console.WriteLine(counter); } }
mit
matthiasnoback/simple-bus
src/Matthias/SimpleBus/Command/Command.php
134
<?php namespace Matthias\SimpleBus\Command; interface Command { /** * @return string */ public function name(); }
mit
john-guerra/NodeNavigator
src/index.js
211
import navio from "./navio.js"; // import NavioComponent from "./NavioComponent.jsx"; export default navio; // export { getAttribsFromObjectRecursive } from "./utils.js"; // export {NavioComponent, navio};
mit
sveetch/Optimus
tests/data_fixtures/basic2_template/pages.py
525
# -*- coding: utf-8 -*- """ The project pages map for basic """ from optimus.pages.views.base import PageViewBase class SamplePage(PageViewBase): """ Sample page defaults as index """ title = "My project index" template_name = "index.html" destination = "index.html" # Enabled pages to build PAGES = [ SamplePage(), SamplePage(title="Foo", template_name="sub/foo.html", destination="sub/foo.html"), SamplePage(title="Bar", template_name="sub/bar.html", destination="sub/bar.html"), ]
mit
molekilla/rutha-grunt-tasks-ui
grunt/ngtemplates.js
1506
module.exports = { dev: { cwd: '<%= cwd %>/<%= devEnvironment.distFolder || "dist" %>/html/src', src: 'app/**/*.html', dest: '<%= cwd %>/<%= devEnvironment.distFolder || "dist" %>/js/templates.js', options: { module: '<%= ngTemplates.moduleNamespace %>' } }, ionic: { cwd: '<%= cwd %>/www/html/src', src: 'app/**/*.html', dest: '<%= cwd %>/www/js/templates.js', options: { module: '<%= ngTemplates.moduleNamespace %>', collapseBooleanAttributes: true, collapseWhitespace: true, removeAttributeQuotes: true, removeComments: false, removeEmptyAttributes: true, removeRedundantAttributes: true, removeScriptTypeAttributes: true, removeStyleLinkTypeAttributes: true } }, specs: { cwd: '<%= cwd %>/dist/html/src', src: 'app/**/*.html', dest: '<%= cwd %>/src/test/templates.js', options: { module: '<%= ngTemplates.moduleNamespace %>' } }, build: { cwd: '<%= cwd %>/dist/html/src', src: 'app/**/*.html', dest: '<%= cwd %>/releases/v<%= pkg.version %>/ui/dist/js/templates.js', options: { module: '<%= ngTemplates.moduleNamespace %>', collapseBooleanAttributes: true, collapseWhitespace: true, removeAttributeQuotes: true, removeComments: false, removeEmptyAttributes: true, removeRedundantAttributes: true, removeScriptTypeAttributes: true, removeStyleLinkTypeAttributes: true } } };
mit
lakchote/projet3_symfony
src/AppBundle/EventListener/RedirectLocaleListener.php
1137
<?php /** * Created by PhpStorm. * User: BRANDON HEAT * Date: 03/11/2016 * Time: 23:32 */ namespace AppBundle\EventListener; use Symfony\Component\HttpFoundation\RedirectResponse; use Symfony\Component\HttpKernel\Event\GetResponseEvent; use Symfony\Bundle\FrameworkBundle\Routing\Router; class RedirectLocaleListener { private $defaultLocale; private $supportedLocales; private $router; public function __construct($defaultLocale, array $supportedLocales, Router $router) { $this->defaultLocale = $defaultLocale; $this->supportedLocales = $supportedLocales; $this->router = $router; } public function onKernelRequest(GetResponseEvent $event) { $request = $event->getRequest(); if ($request->getPathInfo() == '/') { $locale = substr($request->getPreferredLanguage(), 0, 2); if (!in_array($locale, $this->supportedLocales)) { $locale = $this->defaultLocale; } return $event->setResponse(new RedirectResponse($this->router->generate('app_homepage', ['_locale' => $locale]))); } } }
mit
ijklim/deploy_vue_heroku
server.js
624
var express = require("express"); var serveStatic = require("serve-static"); var history = require("connect-history-api-fallback"); var app = express(); var https_redirect = function(req, res, next) { if (process.env.NODE_ENV === "production") { if (req.headers["x-forwarded-proto"] != "https") { return res.redirect("https://" + req.headers.host + req.url); } else { return next(); } } else { return next(); } }; app.use(https_redirect); app.use(history()); app.use(serveStatic(__dirname)); var port = process.env.PORT || 5000; app.listen(port); console.log("server started " + port);
mit
sri-jay/some-kind-of-shell
process/message/message.js
537
var Stream; (function (Stream) { Stream[Stream["STDOUT"] = 0] = "STDOUT"; Stream[Stream["STDERR"] = 1] = "STDERR"; Stream[Stream["STDIN"] = 2] = "STDIN"; })(Stream || (Stream = {})); var OutMessage = (function () { function OutMessage(message) { this.stream = Stream.STDOUT; this.message = message; } return OutMessage; })(); var ErrMessage = (function () { function ErrMessage(message) { this.stream = Stream.STDERR; this.message = message; } return ErrMessage; })();
mit
amwenger/igv
src/main/java/org/broad/igv/tools/converters/MageTabToIGVConverter.java
7115
/* * The MIT License (MIT) * * Copyright (c) 2007-2015 Broad Institute * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ package org.broad.igv.tools.converters; import htsjdk.samtools.util.CloseableIterator; import org.broad.igv.util.collections.SortingCollection; import org.apache.log4j.Logger; import org.broad.igv.data.expression.GeneToLocusHelper; import org.broad.igv.feature.Locus; import org.broad.igv.feature.genome.Genome; import org.broad.igv.tools.sort.SortableRecord; import org.broad.igv.tools.sort.SortableRecordCodec; import org.broad.igv.track.TrackType; import java.io.*; import java.util.Comparator; import java.util.List; /** * @author jrobinso * @date Oct 9, 2010 * @deprecated Doesn't appear to be used anywhere, as of 23 May 2013 */ @Deprecated public class MageTabToIGVConverter { private static Logger log = Logger.getLogger(MageTabToIGVConverter.class); /** * Parse the file and output in ".igv" format * * @return */ public static void convert(File inputFile, File outputFile, String probeResource, int maxRecords, File tmpDir, Genome genome) throws IOException { GeneToLocusHelper locusHelper = new GeneToLocusHelper(probeResource); BufferedReader reader = null; PrintWriter writer = null; SortingCollection cltn = getSortingCollection(maxRecords, tmpDir); try { reader = new BufferedReader(new FileReader(inputFile)); writer = new PrintWriter(new BufferedWriter(new FileWriter(outputFile))); // Parse the header line String headerLine = reader.readLine(); String[] tokens = headerLine.split("\t"); //The sample names in a GCT file start at column 2, int sampleStart = 2; String nextLine = null; TrackType dataType = TrackType.GENE_EXPRESSION; while ((nextLine = reader.readLine()) != null) { // A gct row can map to multiple loci, normally this indicates a problem with the probe DataRow row = new DataRow(nextLine); String probe = row.getProbe(); if (probe.startsWith("cg")) { dataType = TrackType.DNA_METHYLATION; } List<Locus> loci = locusHelper.getLoci(probe, row.getDescription(), genome.getId()); if (loci == null || loci.isEmpty()) { System.out.println("No locus found for: " + probe + " " + row.getDescription()); } else { for (Locus locus : loci) { String igvLine = locus.getChr() + "\t" + locus.getStart() + "\t" + locus.getEnd() + "\t" + probe + row.getData(); cltn.add(new SortableRecord(locus.getChr(), locus.getStart(), igvLine)); } } } writer.println("#type=" + dataType.toString()); writer.print("Chr\tStart\tEnd\tProbe"); for (int i = sampleStart; i < tokens.length; i++) { writer.print("\t" + tokens[i]); } writer.println(); // Ouputput the sorted file CloseableIterator<SortableRecord> iter = cltn.iterator(); while (iter.hasNext()) { SortableRecord al = iter.next(); writer.println(al.getText()); } } finally { if (reader != null) { reader.close(); } if (writer != null) { writer.close(); } } } static SortingCollection getSortingCollection(int maxRecords, File tmpDir) { SortableRecordCodec codec = new SortableRecordCodec(); Comparator<SortableRecord> comp = new Comparator<SortableRecord>() { public int compare(SortableRecord o1, SortableRecord o2) { String chr1 = o1.getChromosome().replaceFirst("chr", ""); String chr2 = o2.getChromosome().replaceFirst("chr", ""); int s1 = Integer.MAX_VALUE; try { s1 = Integer.parseInt(chr1); } catch (Exception e) { // ignore } int s2 = Integer.MAX_VALUE; try { s2 = Integer.parseInt(chr2); } catch (Exception e) { // ignre } int t1 = s1 - s2; if (t1 == 0) { chr1 = chr1.replace("M", "Z"); chr2 = chr2.replace("M", "Z"); t1 = chr1.compareTo(chr2); } if (t1 == 0) { return (int) (o1.getStart() - o2.getStart()); } else { return t1; } } }; return SortingCollection.newInstance(SortableRecord.class, codec, comp, maxRecords, tmpDir); } /** * Represents a row of data from a GCT or mage-tab file. Using this class if more effecient than tokeninzing * the entire line. Some GCT files have over a thousand columns and we're only interested in the first 2 */ static class DataRow { private String probe; private String description; private String data; DataRow(String string) { int firstTab = string.indexOf('\t'); int secondTab = string.indexOf('\t', firstTab + 1); // TODO -- if either of the indeces firstTab or secondTab are < 0 throw an exception probe = string.substring(0, firstTab); description = string.substring(firstTab, secondTab); data = string.substring(secondTab); } private String getProbe() { return probe; } public String getDescription() { return description; } public String getData() { return data; } } }
mit
AlexEaton1105/computerScience
jamesQuiz.py
1244
import random import time import sys #variables name = input("What's your name? ") counter = 0 answer = "" userscore = 0 begin = input("Are you ready? ") time.sleep(1) if begin == "yes": time.sleep(1) print("Welcome to the Maths Quiz",name,"!") time.sleep(1) while counter<10: number1 = random.randint(0,20) number2 = random.randint(0,15) operators = random.randint(1,3) if operators == 1: print("What is",number1,"+",number2,) ans = number1 + number2 counter = counter+1 elif operators == 2: print("What is",number1,"*",number2,) ans = number1 * number2 counter = counter+1 else: print("What is",number1,"-",number2,) ans = number1 - number2 counter = counter+1 useranswer = int(input()) if useranswer == ans: print("Correct!") userscore = userscore + 1 else: print("Wrong!") if userscore<5: time.sleep(1) print("Better luck next time",name,"you scored",userscore,"/ 10!") else: time.sleep(1) print("Congratulations",name,"you scored",userscore,"/ 10!") else: print("James isn't worth oxygen!") sys.exit() time.sleep(4)
mit