prompt
large_stringlengths 70
991k
| completion
large_stringlengths 0
1.02k
|
---|---|
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|># -*-coding:Utf-8 -*
# Copyright (c) 2010-2017 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Package contenant la commande 'débarquer'."""
from math import sqrt
from primaires.interpreteur.commande.commande import Commande
from secondaires.navigation.constantes import *
class CmdDebarquer(Commande):
"""Commande 'debarquer'"""
def __init__(self):
"""Constructeur de la commande"""
Commande.__init__(self, "debarquer", "debark")
self.nom_categorie = "navire"
self.aide_courte = "débarque du navire"
self.aide_longue = \
"Cette commande permet de débarquer du navire sur lequel " \
"on se trouve. On doit se trouver assez prêt d'une côte " \
"pour débarquer dessus."
def interpreter(self, personnage, dic_masques):
"""Méthode d'interprétation de commande"""
salle = personnage.salle
if not hasattr(salle, "navire") or salle.navire is None:
personnage << "|err|Vous n'êtes pas sur un navire.|ff|"
return
navire = salle.navire
if navire.etendue is None:
personnage << "|err|Vous n'êtes pas sur un navire.|ff|"
return
personnage.agir("bouger")
# On va chercher la salle la plus proche
etendue = navire.etendue
# On cherche la salle de nagvire la plus proche
d_salle = None # la salle de destination
distance = 2
x, y, z = salle.coords.tuple()
for t_salle in etendue.cotes.values():
if t_salle.coords.z == z:
t_x, t_y, t_z = t_salle.coords.tuple()
t_distance = sqrt((x - t_x) ** 2 + (y - t_y) ** 2)
if t_distance < distance and t_salle.nom_terrain in \
TERRAINS_ACCOSTABLES:
d_salle = t_salle
distance = t_distance
if d_salle is None:
personnage << "|err|Aucun quai n'a pu être trouvé à " \
"proximité.|ff|"
return
personnage.salle = d_salle
personnage << "Vous sautez sur {}.".format(
d_salle.titre.lower())
personnage << d_salle.regarder(personnage)
d_salle.envoyer("{{}} arrive en sautant depuis {}.".format(
navire.nom), personnage)
salle.envoyer("{{}} saute sur {}.".format(
d_salle.titre.lower()), personnage)<|fim▁hole|> personnage.envoyer_tip("N'oubliez pas d'amarrer votre navire " \
"avec %amarre% %amarre:attacher%.")<|fim▁end|>
|
importeur.hook["personnage:deplacer"].executer(
personnage, d_salle, None, 0)
if not hasattr(d_salle, "navire") or d_salle.navire is None:
|
<|file_name|>message.js<|end_file_name|><|fim▁begin|>var models = require('../models');
var Message = models.Message;
var User = require('../proxy').User;
var messageProxy = require('../proxy/message');
var mail = require('./mail');
exports.sendReplyMessage = function (master_id, author_id, topic_id, reply_id) {
var message = new Message();
message.type = 'reply';
message.master_id = master_id;
message.author_id = author_id;
message.topic_id = topic_id;
message.reply_id = reply_id;
message.save(function (err) {
// TODO: 异常处理
User.getUserById(master_id, function (err, master) {
// TODO: 异常处理
if (master && master.receive_reply_mail) {
message.has_read = true;
message.save();
messageProxy.getMessageById(message._id, function (err, msg) {
msg.reply_id = reply_id;
// TODO: 异常处理
mail.sendReplyMail(master.email, msg);
});
}
});
});
};
exports.sendReply2Message = function (master_id, author_id, topic_id, reply_id) {
var message = new Message();
message.type = 'reply2';
message.master_id = master_id;
message.author_id = author_id;
message.topic_id = topic_id;
message.reply_id = reply_id;
message.save(function (err) {
// TODO: 异常处理
User.getUserById(master_id, function (err, master) {
// TODO: 异常处理
if (master && master.receive_reply_mail) {
message.has_read = true;
message.save();
messageProxy.getMessageById(message._id, function (err, msg) {
msg.reply_id = reply_id;
// TODO: 异常处理
mail.sendReplyMail(master.email, msg);
});
}
});
});
};
exports.sendAtMessage = function (master_id, author_id, topic_id, reply_id, callback) {
var message = new Message();
message.type = 'at';
message.master_id = master_id;
message.author_id = author_id;
message.topic_id = topic_id;
message.reply_id = reply_id;
message.save(function (err) {
// TODO: 异常处理
User.getUserById(master_id, function (err, master) {
// TODO: 异常处理
if (master && master.receive_at_mail) {
message.has_read = true;
message.save();
messageProxy.getMessageById(message._id, function (err, msg) {<|fim▁hole|> });
callback(err);
});
};
//author_id is the follower, so the message of the master is master_id
exports.sendFollowMessage = function (follow_id, author_id) {
var message = new Message();
message.type = 'follow';
message.master_id = follow_id;
message.author_id = author_id;
message.save();
};
//author_id is the attendee
exports.sendAttendMessage = function (master_id, topic_id, author_id) {
var message = new Message();
message.type = 'attend';
message.master_id = master_id;
message.author_id = author_id;
message.topic_id= topic_id;
message.save();
};<|fim▁end|>
|
// TODO: 异常处理
mail.sendAtMail(master.email, msg);
});
}
|
<|file_name|>app.route.js<|end_file_name|><|fim▁begin|>(function() {
'use strict';
angular
.module('lcRegistration')
.config(["$routeProvider", function($routeProvider) {
$routeProvider
.when("/", {
templateUrl: "/client/app/regForm/regForm.html",
controller: "registrationController"<|fim▁hole|> }).otherwise({
redirectTo: "/"
});
} ]);
})();<|fim▁end|>
|
}).when("/hello", {
templateUrl: "/client/app/hello/hello.html",
controller: "helloController"
|
<|file_name|>setup.py<|end_file_name|><|fim▁begin|>import os<|fim▁hole|>def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name="blake-archive",
version="0.1",
description="Blake archive web app",
license="Closed source",
packages=['blake', 'test'],
long_description=read('README'),
classifiers=["Development Status :: 3 - Alpha"],
install_requires=["flask", "sqlalchemy", "flask-sqlalchemy", 'lxml', 'xmltodict', "nose", 'tablib']
)<|fim▁end|>
|
from setuptools import setup
|
<|file_name|>PopupMenu(1).rs<|end_file_name|><|fim▁begin|>com.hepia.logisim.chronogui.PopupMenu
<|fim▁hole|><|fim▁end|>
|
com.hepia.logisim.chronogui.PopupContents
|
<|file_name|>regions-infer-at-fn-not-param.rs<|end_file_name|><|fim▁begin|>// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
struct parameterized1<'self> {
g: &'self fn()
}
struct not_parameterized1 {
g: @fn()
}
struct not_parameterized2 {
g: @fn()
}
fn take1(p: parameterized1) -> parameterized1 { p } //~ ERROR mismatched types
fn take3(p: not_parameterized1) -> not_parameterized1 { p }
fn take4(p: not_parameterized2) -> not_parameterized2 { p }
<|fim▁hole|><|fim▁end|>
|
fn main() {}
|
<|file_name|>scrape.js<|end_file_name|><|fim▁begin|>var request = require('request');
var cheerio = require('cheerio');
var admin = require("firebase-admin");
//var serviceAccount = require("tehillim-17559-firebase-adminsdk-gx90v-b712a63ab5.json");
admin.initializeApp({
credential: admin.credential.cert("tehillim-17559-firebase-adminsdk-gx90v-b712a63ab5.json"),
databaseURL: "https://tehillim-17559.firebaseio.com"
});
// As an admin, the app has access to read and write all data, regardless of Security Rules
var db = admin.database();
// Attach an asynchronous callback to read the data at our posts reference
//ref.on("value", function (snapshot) {
// console.log(snapshot.val());
//}, function (errorObject) {
// console.log("The read failed: " + errorObject.code);
//});
var psalmId = 1;
var stripeHtml = function (text) {
return text.replace(/<i><\/i>/g, '');
};
var removeLeadingVerseIndex = function (text) {
return text.replace(/\d{1,2}/g, '');
};
var runEnHeRequest = function (id) {
request('http://www.sefaria.org/api/texts/Psalms.' + id + '?commentary=0&context=1&pad=0', function (error, response, html) {
if (!error && response.statusCode == 200) {
console.log("Psalm: " + id + " OK");
var objHtml = JSON.parse(html);
var enArray = [];
objHtml.text.forEach(function (item) {
enArray.push(stripeHtml(item));
});
var newObj = {
id: id,
name: "Psalm " + id,
en: enArray,
he: objHtml.he
}
var path = "psalms/" + (id-1).toString();
var ref = db.ref(path);
ref.update(newObj, function (error) {
if (error) {
console.log("Data could not be saved." + error);
} else {
console.log("Psalm " + id + " saved successfully.");
}
});
}
if (error) {
console.log(response);
}
});
}
var runPhonetiqueFrRequest = function (id) {
request('http://tehilim-online.com/les-psaumes-de-David/Tehilim-' + id, function (error, response, html) {
if (!error && response.statusCode == 200) {
console.log("Psalm: " + id + " OK");
var $ = cheerio.load(html);
var ph = $('#phonetiqueBlock span').text().split("\n");
var frNotFormatted = $('#traductionBlock p').text().split("\n");
var fr = [];
frNotFormatted.forEach(function (item) {
fr.push(removeLeadingVerseIndex(item));
});
var newObj = {<|fim▁hole|> ph: ph,
fr: fr
}
// console.log(newObj);
var path = "psalms/" + (id - 1).toString();
var ref = db.ref(path);
ref.update(newObj, function (error) {
if (error) {
console.log("Data could not be saved." + error);
} else {
console.log("Psalm " + id + " saved successfully.");
}
});
}
if (error) {
console.log(response);
}
});
}
while(psalmId <= 150) {
var psalmTextArray = [];
//runEnHeRequest(psalmId);
runPhonetiqueFrRequest(psalmId);
psalmId++;
}<|fim▁end|>
| |
<|file_name|>0008_auto_20170908_1914.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-09-08 19:14
from __future__ import unicode_literals
from django.db import migrations, models
<|fim▁hole|>
dependencies = [
('invoice', '0007_profile_invoice_logo'),
]
operations = [
migrations.AddField(
model_name='invoiceitem',
name='quantity',
field=models.DecimalField(decimal_places=1, default=1, max_digits=5),
preserve_default=False,
),
migrations.AddField(
model_name='invoiceitem',
name='total',
field=models.DecimalField(blank=True, decimal_places=2, default=0, max_digits=10),
preserve_default=False,
),
]<|fim▁end|>
|
class Migration(migrations.Migration):
|
<|file_name|>admin.py<|end_file_name|><|fim▁begin|>from __future__ import absolute_import
import csv
from django.contrib import admin
from django.http import HttpResponse
from .models import Donor, Donation
class DonationInline(admin.TabularInline):
model = Donation
extra = 0
@admin.register(Donor)
class DonorAdmin(admin.ModelAdmin):
inlines = [
DonationInline
]
# date_hierarchy = 'last_donation'
actions_on_bottom = True
list_display = 'name', 'business', 'last_donation', 'last_amount'
search_fields = 'name', 'business', 'email', 'address'
@staticmethod
def last_donation(obj):
return obj.donation_set.latest().when
<|fim▁hole|> actions = []
def make_list(self, request, queryset):
response = HttpResponse(content_type="text/plain")
response['Content-Disposition'] = 'attachment; filename=donors.txt'
for donor in queryset:
if donor.email:
response.write("{} <{}>\n".format(donor.name, donor.email))
return response
make_list.short_description = "Create email list (plain text)"
actions.append(make_list)
def make_csv(self, request, queryset):
fields = ('name', 'business', 'email', 'phone', 'address', 'last_donation', 'notes')
response = HttpResponse(content_type="text/csv")
response['Content-Disposition'] = 'attachment; filename=donors.csv'
writer = csv.DictWriter(response, fields, extrasaction='ignore')
writer.writeheader()
for donor in queryset:
row = {"last_donation": self.last_donation(donor)}
row.update(vars(donor))
writer.writerow(row)
return response
make_csv.short_description = "Create CSV"
actions.append(make_csv)
@admin.register(Donation)
class DonationAdmin(admin.ModelAdmin):
date_hierarchy = 'when'
actions_on_bottom = True
list_display = 'donor', 'when', 'amount', 'memo'
search_fields = 'donor', 'memo'
actions = []
def make_csv(self, request, queryset):
fields = ('name', 'business', 'when', 'amount', 'memo')
response = HttpResponse(content_type="text/csv")
response['Content-Disposition'] = 'attachment; filename=donations.csv'
writer = csv.DictWriter(response, fields, extrasaction='ignore')
writer.writeheader()
for donation in queryset:
row = {
"name": donation.donor.name,
"business": donation.donor.business,
}
row.update(vars(donation))
writer.writerow(row)
return response
make_csv.short_description = "Create CSV"
actions.append(make_csv)<|fim▁end|>
|
@staticmethod
def last_amount(obj):
return obj.donation_set.latest().amount
|
<|file_name|>admin.py<|end_file_name|><|fim▁begin|>#
# Copyright 2015-2019, Institute for Systems Biology
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0<|fim▁hole|>#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from django.contrib import admin
# Register your models here.<|fim▁end|>
| |
<|file_name|>grapheme.go<|end_file_name|><|fim▁begin|>package boundaryh
//replacer:ignore
// TODO replace windows path separator
//go:generate go run $GOPATH\src\github.com\apaxa-go\generator\replacer\main.go -- $GOFILE
// Returns if there is a break between l0 and r0.
func gDecision(l1Diff, l0 gClass, lOddRI bool, r0 gClass) bool {
// TODO translate to single boolean expression?
switch {
case l0 == gClassControl || r0 == gClassControl: // GB4 & GB5
return true
case l0 == gClassHangulL && (r0 == gClassHangulL || r0 == gClassHangulV || r0 == gClassHangulLV || r0 == gClassHangulLVT): // GB6
case (l0 == gClassHangulLV || l0 == gClassHangulV) && (r0 == gClassHangulV || r0 == gClassHangulT): // GB7
case (l0 == gClassHangulLVT || l0 == gClassHangulT) && r0 == gClassHangulT: // GB8
case r0 == gClassExtend || r0 == gClassZWJ: // GB9
case r0 == gClassSpacingMark: //GB9a
case l0 == gClassPrepend: //GB9b
case ((l0 == gClassEBase || l0 == gClassEBG) || (l0 == gClassExtend && (l1Diff == gClassEBase || l1Diff == gClassEBG))) && r0 == gClassEModifier: // GB10
case l0 == gClassZWJ && (r0 == gClassGlueAfterZWJ || r0 == gClassEBG): // GB11
case lOddRI && r0 == gClassRI: // GB12 & GB13
default:
return true
}
return false
}
//replacer:replace
//replacer:old InRunes []rune runes
//replacer:new InString string s
//replacer:new "" []byte bytes
// Returns class of last rune in runes which is not equal to l0.
func gLastNotEqualToInRunes(runes []rune, l0 gClass) gClass {
for len(runes) > 0 {
c, pos := gLastClassInRunes(runes)
if c != l0 {
return c
}
runes = runes[:pos]
}
return gClassOther
}
// True if l0 is RI and it opens RI sequence in string <runes..., l0, ...> (may be joined with next RI).
func gIsOpenRIInRunes(runes []rune, l0 gClass) (res bool) {
if l0 != gClassRI {
return
}
res = true
for len(runes) > 0 {
c, pos := gLastClassInRunes(runes)
if c != gClassRI {
return
}
res = !res
runes = runes[:pos]
}
return
}
// runes must be valid (len>1).
// l0Pos must be valid (in runes).
func graphemeClusterEndInRunes(runes []rune, l0Pos int) int {
l := len(runes)
if l0Pos+1 < l && runes[l0Pos] == crRune && runes[l0Pos+1] == lfRune { // GB3
return l0Pos + 2
}
l0Pos = toRuneBeginInRunes(runes, l0Pos) // TODO do it only on external call
l0, r0Delta := gFirstClassInRunes(runes[l0Pos:])
l1Diff := gLastNotEqualToInRunes(runes[:l0Pos], l0)
lOddRI := gIsOpenRIInRunes(runes[:l0Pos], l0)
for l0Pos+r0Delta < l {
r0, r1Delta := gFirstClassInRunes(runes[l0Pos+r0Delta:])
if gDecision(l1Diff, l0, lOddRI, r0) {
return l0Pos + r0Delta
}
if l0 != r0 {
l1Diff = l0
}
l0 = r0
lOddRI = l0 == gClassRI && !lOddRI
l0Pos += r0Delta
r0Delta = r1Delta
}
return l
}
// GraphemeClusterEndInRunes computes grapheme cluster which contains pos-th rune.
// Returns (index of grapheme cluster's last rune)+1.
// In other words, returns first grapheme cluster's boundary on the right of pos-th rune.
func GraphemeClusterEndInRunes(runes []rune, pos int) int {
l := len(runes)
if pos < 0 || pos >= l {
return InvalidPos
}
if pos == l-1 {
return l
}
return graphemeClusterEndInRunes(runes, pos)
}
// runes must be valid (len>1).
// r0Pos must be valid (in runes).
func graphemeClusterBeginInRunes(runes []rune, r0Pos int) int {
if r0Pos >= 1 && runes[r0Pos-1] == crRune && runes[r0Pos] == lfRune { // GB3
return r0Pos - 1
}
r0Pos = toRuneBeginInRunes(runes, r0Pos) // TODO do it only on external call
r0, _ := gFirstClassInRunes(runes[r0Pos:])
for r0Pos > 0 {
l0, l0Pos := gLastClassInRunes(runes[:r0Pos])
l1Diff := gLastNotEqualToInRunes(runes[:l0Pos], l0)
lOddRI := gIsOpenRIInRunes(runes[:l0Pos], l0)
if gDecision(l1Diff, l0, lOddRI, r0) {
return r0Pos
}
r0 = l0
r0Pos = l0Pos
}
return 0
}
// GraphemeClusterBeginInRunes computes grapheme cluster which contains pos-th rune.
// Returns grapheme cluster's first rune index.
// In other words, returns first grapheme cluster's boundary on the left of pos-th rune.
func GraphemeClusterBeginInRunes(runes []rune, pos int) int {
l := len(runes)
if pos < 0 || pos >= l {
return InvalidPos
}
if pos == 0 {
return 0
}
return graphemeClusterBeginInRunes(runes, pos)
}
// GraphemeClusterAtInRunes computes grapheme clusters which contains pos-th rune and return their boundary.
// Grapheme cluster may retrieved by "runes[r.From:r.To]".
func GraphemeClusterAtInRunes(runes []rune, pos int) Boundary {
return Boundary{GraphemeClusterBeginInRunes(runes, pos), GraphemeClusterEndInRunes(runes, pos)}
}
// FirstGraphemeClusterInRunes computes first grapheme cluster.
// Returns (index of cluster's last rune)+1.
// Result also may be treated as length of the first grapheme cluster.
// First grapheme cluster may retrieved by "runes[:r]".
func FirstGraphemeClusterInRunes(runes []rune) (r int) {
return GraphemeClusterEndInRunes(runes, 0)
}
// LastGraphemeClusterInRunes computes last grapheme cluster.
// Returns index of cluster's first rune.
// Last grapheme cluster may retrieved by "runes[r:]".
func LastGraphemeClusterInRunes(runes []rune) (r int) {
return GraphemeClusterBeginInRunes(runes, len(runes)-1)
}
// GraphemeClustersInRunes computes all grapheme clusters and returns theirs boundaries.
func GraphemeClustersInRunes(runes []rune) (boundaries []Boundary) {
boundaries = make([]Boundary, 0, len(runes)) // TODO memory efficient
for i := 0; i < len(runes); {
length := FirstGraphemeClusterInRunes(runes[i:])
boundaries = append(boundaries, Boundary{i, i + length})
i += length
}
return
}
// GraphemeClusterBreaksInRunes computes all grapheme clusters and returns all breaks.
func GraphemeClusterBreaksInRunes(runes []rune) (breaks []int) {
l := len(runes)
if l == 0 {
return // []int{0}<|fim▁hole|> }
breaks = make([]int, 1, len(runes)) // TODO memory efficient
breaks[0] = 0
for pos := 0; pos < l; {
length := FirstGraphemeClusterInRunes(runes[pos:])
pos += length
breaks = append(breaks, pos)
}
return
}<|fim▁end|>
| |
<|file_name|>docstring_cop.py<|end_file_name|><|fim▁begin|>import sys, re
for fn in sys.argv[1:]:
with open(fn, 'r') as f:
s = f.read()
xx = re.findall(r'([^\n]+)\s+\'\'\'(.*?)\'\'\'', s, re.M|re.S)
for (obj, doc) in xx:
s = re.findall('[^:`]\B(([`*])[a-zA-Z_][a-zA-Z0-9_]*\\2)\B', doc)
if s:
print '-'*50
print fn, obj
print '.'*50
print doc
print '.'*50<|fim▁hole|> print [ss[0] for ss in s]
# for vim:
# :s/\([^`:]\)\([`*]\)\([a-zA-Z0-9_]\+\)\2/\1``\3``/<|fim▁end|>
| |
<|file_name|>helpers.py<|end_file_name|><|fim▁begin|>"""
Copyright 2017-present Airbnb, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import re
from streamalert.shared.artifact_extractor import Artifact
from streamalert.shared.firehose import FirehoseClient
from streamalert.shared.logger import get_logger
from streamalert.shared.alert import Alert
from streamalert_cli.helpers import record_to_schema
LOGGER = get_logger(__name__)
PARTITION_PARTS = re.compile(
r'dt=(?P<year>\d{4})\-(?P<month>\d{2})\-(?P<day>\d{2})\-(?P<hour>\d{2})')
# The returned partition from the SHOW PARTITIONS command is dt=YYYY-MM-DD-HH,
# But when re-creating new partitions this value must be quoted
PARTITION_STMT = ("PARTITION (dt = '{year}-{month}-{day}-{hour}') "
"LOCATION 's3://{bucket}/{table_name}/{year}/{month}/{day}/{hour}'")
# How to map log schema types to Athena/Hive types
SCHEMA_TYPE_MAPPING = {
'string': 'string',
'integer': 'bigint',
'boolean': 'boolean',
'float': 'decimal(10,3)',
dict: 'map<string,string>',
list: 'array<string>'
}
# Athena query statement length limit
MAX_QUERY_LENGTH = 262144
def add_partition_statements(partitions, bucket, table_name):
"""Generate ALTER TABLE commands from existing partitions. It wil yield Athena
statement string(s), the length of each string should be less than Athena query
statement length limit, 262144 bytes.
https://docs.aws.amazon.com/athena/latest/ug/service-limits.html
Args:
partitions (set): The unique set of partitions gathered from Athena
bucket (str): The bucket name
table_name (str): The name of the Athena table
Yields:
string: The ALTER TABLE statements to add the new partitions
"""
# Each add partition statement starting with "ALTER TABLE"
initial_statement = 'ALTER TABLE {} ADD IF NOT EXISTS'.format(table_name)
initial_statement_len = len(initial_statement)
# The statement will be stored in a list of string format before join into a string<|fim▁hole|> statement = [initial_statement]
statement_len = initial_statement_len
fmt_values = {
'bucket': bucket,
'table_name': table_name
}
for partition in sorted(partitions):
parts = PARTITION_PARTS.match(partition)
if not parts:
continue
fmt_values.update(parts.groupdict())
partition_stmt = PARTITION_STMT.format(**fmt_values)
partition_stmt_len = len(partition_stmt)
# It will add a space between sub strings when join the whole statement
space_count = len(statement)
# Monitor the lenght of whole statement and make sure it won't exceed the limit
if statement_len + partition_stmt_len + space_count >= MAX_QUERY_LENGTH:
# If the length of whole statement about to exceed the limit, yield
# the statement and reset it for rest of partitions
yield ' '.join(statement)
statement = [initial_statement]
statement_len = initial_statement_len
statement_len += partition_stmt_len
statement.append(partition_stmt)
yield ' '.join(statement)
def logs_schema_to_athena_schema(log_schema, ddl_statement=True):
"""Convert streamalert log schema to athena schema
Args:
log_schema (dict): StreamAlert log schema object.
ddl_statement (bool): Indicate if the Athena table created by Athena
DDL query or terraform aws_glue_catalog_table resource
Returns:
athena_schema (dict): Equivalent Athena schema used for generating create table statement
"""
athena_schema = {}
for key_name, key_type in log_schema.items():
if ddl_statement:
# Backticks are needed for backward compatibility when creating Athena
# table via Athena DDL query.
key_name = '`{}`'.format(key_name)
if key_type == {}:
# For empty dicts
athena_schema[key_name] = SCHEMA_TYPE_MAPPING[dict]
elif key_type == []:
# For empty array
athena_schema[key_name] = SCHEMA_TYPE_MAPPING[list]
elif isinstance(key_type, dict):
# For recursion
athena_schema[key_name] = logs_schema_to_athena_schema(key_type, ddl_statement)
else:
athena_schema[key_name] = SCHEMA_TYPE_MAPPING[key_type]
return athena_schema
def unique_values_from_query(query_result):
"""Simplify Athena query results into a set of values.
Useful for listing tables, partitions, databases, enable_metrics
Args:
query_result (dict): The result of run_athena_query
Returns:
set: Unique values from the query result
"""
return {
value
for row in query_result['ResultSet']['Rows'] for result in row['Data']
for value in list(result.values())
}
def format_schema_tf(schema):
"""Format schema for an Athena table for terraform.
Args:
schema (dict): Equivalent Athena schema used for generating create table statement
Returns:
formatted_schema (list(tuple))
"""
# Construct the main Athena Schema
formatted_schema = []
for key_name in sorted(schema.keys()):
key_type = schema[key_name]
if isinstance(key_type, str):
formatted_schema.append((key_name.lower(), key_type))
# Account for nested structs
elif isinstance(key_type, dict):
struct_schema = ','.join(
'{0}:{1}'.format(sub_key.lower(), key_type[sub_key])
for sub_key in sorted(key_type.keys())
)
formatted_schema.append((key_name.lower(), 'struct<{}>'.format(struct_schema)))
return formatted_schema
def generate_alerts_table_schema():
"""Generate the schema for alerts table in terraform by using a fake alert
Returns:
athena_schema (dict): Equivalent Athena schema used for generating create table statement
"""
alert = Alert('temp_rule_name', {}, {})
output = alert.output_dict()
schema = record_to_schema(output)
athena_schema = logs_schema_to_athena_schema(schema, False)
return format_schema_tf(athena_schema)
def generate_data_table_schema(config, table, schema_override=None):
"""Generate the schema for data table in terraform
Args:
config (CLIConfig): Loaded StreamAlert config
table (string): The name of data table
Returns:
athena_schema (dict): Equivalent Athena schema used for generating create table statement
"""
enabled_logs = FirehoseClient.load_enabled_log_sources(
config['global']['infrastructure']['firehose'],
config['logs']
)
# Convert special characters in schema name to underscores
sanitized_table_name = FirehoseClient.sanitized_value(table)
# Check that the log type is enabled via Firehose
if sanitized_table_name not in enabled_logs:
LOGGER.error('Table name %s missing from configuration or '
'is not enabled.', sanitized_table_name)
return None
log_info = config['logs'][enabled_logs.get(sanitized_table_name)]
schema = dict(log_info['schema'])
sanitized_schema = FirehoseClient.sanitize_keys(schema)
athena_schema = logs_schema_to_athena_schema(sanitized_schema, False)
# Add envelope keys to Athena Schema
configuration_options = log_info.get('configuration')
if configuration_options:
envelope_keys = configuration_options.get('envelope_keys')
if envelope_keys:
sanitized_envelope_key_schema = FirehoseClient.sanitize_keys(envelope_keys)
# Note: this key is wrapped in backticks to be Hive compliant
athena_schema['streamalert:envelope_keys'] = logs_schema_to_athena_schema(
sanitized_envelope_key_schema, False
)
# Handle Schema overrides
# This is useful when an Athena schema needs to differ from the normal log schema
if schema_override:
for override in schema_override:
column_name, column_type = override.split('=')
# Columns are escaped to avoid Hive issues with special characters
column_name = '{}'.format(column_name)
if column_name in athena_schema:
athena_schema[column_name] = column_type
LOGGER.info('Applied schema override: %s:%s', column_name, column_type)
else:
LOGGER.error(
'Schema override column %s not found in Athena Schema, skipping',
column_name
)
return format_schema_tf(athena_schema)
def generate_artifacts_table_schema():
"""Generate the schema for artifacts table in terraform by using a test artifact instance
Returns:
athena_schema (dict): Equivalent Athena schema used for generating create table statement
"""
artifact = artifact = Artifact(
normalized_type='test_normalized_type',
value='test_value',
source_type='test_source_type',
record_id='test_record_id',
function=None
)
schema = record_to_schema(artifact.artifact)
athena_schema = logs_schema_to_athena_schema(schema, False)
return format_schema_tf(athena_schema)<|fim▁end|>
| |
<|file_name|>recognition.py<|end_file_name|><|fim▁begin|># PyKinect
# Copyright(c) Microsoft Corporation
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the License); you may not use
# this file except in compliance with the License. You may obtain a copy of the
# License at http://www.apache.org/licenses/LICENSE-2.0
#
# THIS CODE IS PROVIDED ON AN *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS
# OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY
# IMPLIED WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE,
# MERCHANTABLITY OR NON-INFRINGEMENT.
#
# See the Apache Version 2.0 License for specific language governing
# permissions and limitations under the License.
import os
import ctypes
from os import path
_audio_path = path.join(path.dirname(__file__), '..', 'pykinect', 'audio', 'PyKinectAudio.dll')
if not os.path.exists(_audio_path):
_audio_path = path.join(path.dirname(__file__), '..', '..', '..', '..', '..', '..', 'Binaries', 'Debug', 'PyKinectAudio.dll')
if not path.exists(_audio_path):
raise Exception('Cannot find PyKinectAudio.dll')
_PYAUDIODLL = ctypes.CDLL(_audio_path)
_CreateRecognizer = _PYAUDIODLL.CreateRecognizer
_CreateRecognizer.argtypes = [ctypes.c_voidp, ctypes.POINTER(ctypes.c_voidp)]
_CreateRecognizer.restype = ctypes.HRESULT
_SetInputFile = _PYAUDIODLL.SetInputFile
_SetInputFile.argtypes = [ctypes.c_voidp, ctypes.c_voidp]
_SetInputFile.restype = ctypes.HRESULT
_SetInputStream = _PYAUDIODLL.SetInputStream
_SetInputStream.argtypes = [ctypes.c_voidp, ctypes.c_voidp]
_SetInputStream.restype = ctypes.HRESULT
_IUnknownRelease = _PYAUDIODLL.IUnknownRelease
_IUnknownRelease.argtypes = [ctypes.c_voidp]
_IUnknownRelease.restype = None
_LoadGrammar = _PYAUDIODLL.LoadGrammar
_LoadGrammar.argtypes = [ctypes.c_wchar_p, ctypes.c_voidp, ctypes.POINTER(ctypes.c_voidp)]
_LoadGrammar.restype = ctypes.HRESULT
_EnumRecognizers = _PYAUDIODLL.EnumRecognizers
_ReadCallback = ctypes.WINFUNCTYPE(ctypes.HRESULT, ctypes.c_uint32, ctypes.c_voidp, ctypes.POINTER(ctypes.c_uint32))
_Recognize_Callback = ctypes.WINFUNCTYPE(None, ctypes.c_wchar_p)
_RecognizeOne = _PYAUDIODLL.RecognizeOne
_RecognizeOne.argtypes = [ctypes.c_voidp, ctypes.c_uint32, _Recognize_Callback, _Recognize_Callback]
_RecognizeOne.restype = ctypes.HRESULT
_RecognizeAsync = _PYAUDIODLL.RecognizeAsync
_RecognizeAsync.argtypes = [ctypes.c_voidp, ctypes.c_uint, _Recognize_Callback, _Recognize_Callback, ctypes.POINTER(ctypes.c_voidp)]
_RecognizeAsync.restype = ctypes.HRESULT
_StopRecognizeAsync = _PYAUDIODLL.StopRecognizeAsync<|fim▁hole|>
class Grammar(object):
"""Represents a speech grammar constructed from an XML file"""
def __init__(self, filename):
self.filename = filename
def __del__(self):
#_IUnknownRelease(self._reco_ctx)
_IUnknownRelease(self._grammar)
class RecognizerInfo(object):
def __init__(self, id, description, token):
self.id = id
self.description = description
self._token = token
def __del__(self):
_IUnknownRelease(self._token)
def __repr__(self):
return 'RecognizerInfo(%r, %r, ...)' % (self.id, self.description)
class RecognitionResult(object):
def __init__(self, text, alternates = None):
self.text = text
if alternates:
self.alternates = tuple(RecognitionResult(alt) for alt in alternates)
else:
self.alternates = ()
class _event(object):
"""class used for adding/removing/invoking a set of listener functions"""
__slots__ = ['handlers']
def __init__(self):
self.handlers = []
def __iadd__(self, other):
self.handlers.append(other)
return self
def __isub__(self, other):
self.handlers.remove(other)
return self
def fire(self, *args):
for handler in self.handlers:
handler(*args)
class RecognitionEventArgs(object):
"""Provides information about speech recognition events."""
def __init__(self, result):
self.result = result
class SpeechRecognitionEngine(object):
"""Provides the means to access and manage an in-process speech recognition engine."""
def __init__(self, recognizer = None):
self.speech_recognized = _event()
self._async_handle = None
if isinstance(recognizer, str):
# TODO: Lookup by ID
pass
elif isinstance(recognizer, RecognizerInfo):
rec = ctypes.c_voidp()
_CreateRecognizer(recognizer._token, ctypes.byref(rec))
self._rec = rec
elif recognizer is None:
rec = ctypes.c_voidp()
_CreateRecognizer(None, ctypes.byref(rec))
self._rec = rec
else:
raise TypeError('Bad type for recognizer: ' + repr(recognizer))
def __del__(self):
# TODO: Need to shut down any listening threads
self.recognize_async_stop()
_IUnknownRelease(self._rec)
def load_grammar(self, grammar):
if isinstance(grammar, str):
grammar_obj = Grammar(grammar)
else:
grammar_obj = grammar
comGrammar = ctypes.c_voidp()
_LoadGrammar(grammar_obj.filename, self._rec, ctypes.byref(comGrammar))
grammar_obj._grammar = comGrammar
return grammar_obj
def set_input_to_audio_file(self, stream):
"""sets the input to a Python file-like object which implements read"""
stream_obj = getattr(stream, '__ISpStreamFormat__', None)
if stream_obj is not None:
# optimization: we can avoid going through Python to do the reading by passing
# the original ISpStreamFormat object through
_SetInputStream(self._rec, stream_obj)
else:
def reader(byteCount, buffer, bytesRead):
bytes = stream.read(byteCount)
ctypes.memmove(buffer, bytes, len(bytes))
bytesRead.contents.value = len(bytes)
return 0
self._reader = _ReadCallback(reader)
_SetInputFile(self._rec, self._reader)
def recognize_sync(self, timeout = 30000):
"""attempts to recognize speech and returns the recognized text.
By default times out after 30 seconds"""
res = []
alts = []
def callback(text):
res.append(text)
def alt_callback(text):
if text is not None:
alts.append(text)
_RecognizeOne(self._rec, timeout, _Recognize_Callback(callback), _Recognize_Callback(alt_callback))
if res:
return RecognitionResult(res[0], alts)
return None
def recognize_async(self, multiple = False):
cur_result = []
def callback(text):
cur_result.append(text)
def alt_callback(text):
if text == None:
# send the event
result = RecognitionResult(cur_result[0], cur_result[1:])
event_args = RecognitionEventArgs(result)
self.speech_recognized.fire(event_args)
del cur_result[:]
else:
cur_result.append(text)
stop_listening_handle = ctypes.c_voidp()
# keep alive our function pointers on ourselves...
self._async_callback = async_callback =_Recognize_Callback(callback)
self._async_alt_callback = async_alt_callback = _Recognize_Callback(alt_callback)
_RecognizeAsync(self._rec, multiple, async_callback, async_alt_callback, ctypes.byref(stop_listening_handle))
self._async_handle = stop_listening_handle
def recognize_async_stop(self):
if self._async_handle is not None:
_StopRecognizeAsync(self._async_handle)
self._async_handle = None
@staticmethod
def installed_recognizers():
ids = []
def callback(id, description, token):
ids.append(RecognizerInfo(id, description, token))
_EnumRecognizers(_EnumRecognizersCallback(callback))
return ids<|fim▁end|>
|
_StopRecognizeAsync.argtypes = [ctypes.c_voidp]
_StopRecognizeAsync.restype = ctypes.HRESULT
_EnumRecognizersCallback = ctypes.WINFUNCTYPE(None, ctypes.c_wchar_p, ctypes.c_wchar_p, ctypes.c_voidp)
|
<|file_name|>make_clim_file_bio_addons.py<|end_file_name|><|fim▁begin|>import subprocess
import os
import sys
import commands
import numpy as np
import pyroms
import pyroms_toolbox
from remap_bio_woa import remap_bio_woa<|fim▁hole|>dst_dir='./'
src_grd = pyroms_toolbox.BGrid_GFDL.get_nc_BGrid_GFDL('/archive/u1/uaf/kate/COBALT/GFDL_CM2.1_grid.nc', name='ESM2M_NWGOA3')
dst_grd = pyroms.grid.get_ROMS_grid('NWGOA3')
# define all tracer stuff
list_tracer = ['alk', 'cadet_arag', 'cadet_calc', 'dic', 'fed', 'fedet', 'fedi', 'felg', 'fesm', 'ldon', 'ldop', 'lith', 'lithdet', 'nbact', 'ndet', 'ndi', 'nlg', 'nsm', 'nh4', 'no3', 'o2', 'pdet', 'po4', 'srdon', 'srdop', 'sldon', 'sldop', 'sidet', 'silg', 'sio4', 'nsmz', 'nmdz', 'nlgz']
tracer_longname = ['Alkalinity', 'Detrital CaCO3', 'Detrital CaCO3', 'Dissolved Inorganic Carbon', 'Dissolved Iron', 'Detrital Iron', 'Diazotroph Iron', 'Large Phytoplankton Iron', 'Small Phytoplankton Iron', 'labile DON', 'labile DOP', 'Lithogenic Aluminosilicate', 'lithdet', 'bacterial', 'ndet', 'Diazotroph Nitrogen', 'Large Phytoplankton Nitrogen', 'Small Phytoplankton Nitrogen', 'Ammonia', 'Nitrate', 'Oxygen', 'Detrital Phosphorus', 'Phosphate', 'Semi-Refractory DON', 'Semi-Refractory DOP', 'Semilabile DON', 'Semilabile DOP', 'Detrital Silicon', 'Large Phytoplankton Silicon', 'Silicate', 'Small Zooplankton Nitrogen', 'Medium-sized zooplankton Nitrogen', 'large Zooplankton Nitrogen']
tracer_units = ['mol/kg', 'mol/kg', 'mol/kg', 'mol/kg', 'mol/kg', 'mol/kg', 'mol/kg', 'mol/kg', 'mol/kg', 'mol/kg', 'mol/kg', 'g/kg', 'g/kg', 'mol/kg', 'mol/kg', 'mol/kg', 'mol/kg', 'mol/kg', 'mol/kg', 'mol/kg', 'mol/kg', 'mol/kg', 'mol/kg', 'mol/kg', 'mol/kg', 'mol/kg', 'mol/kg', 'mol/kg', 'mol/kg', 'mol/kg', 'mol/kg', 'mol/kg', 'mol/kg']
#------- WOA13 ---------------------------------
id_tracer_update_woa = [19,20,22,29]
list_tracer_update_woa = []
tracer_longname_update_woa = []
tracer_units_update_woa = []
for idtra in id_tracer_update_woa:
print list_tracer[idtra]
for idtra in id_tracer_update_woa:
# add to tracer update
list_tracer_update_woa.append(list_tracer[idtra])
tracer_longname_update_woa.append(tracer_longname[idtra])
tracer_units_update_woa.append(tracer_units[idtra])
for mm in np.arange(12):
clim_file = dst_dir + dst_grd.name + '_clim_bio_GFDL+WOA+GLODAP_m' + str(mm+1).zfill(2) + '.nc'
print '\nBuild CLIM file for month', mm
for ktr in np.arange(len(list_tracer_update_woa)):
ctra = list_tracer_update_woa[ktr]
if ctra == 'sio4':
ctra = 'si'
mydict = {'tracer':list_tracer_update_woa[ktr],'longname':tracer_longname_update_woa[ktr],'units':tracer_units_update_woa[ktr],'file':data_dir_woa + ctra + '_WOA13-CM2.1_monthly.nc', \
'frame':mm}
remap_bio_woa(mydict, src_grd, dst_grd, dst_dir=dst_dir)
out_file = dst_dir + dst_grd.name + '_clim_bio_' + list_tracer_update_woa[ktr] + '.nc'
command = ('ncks', '-a', '-A', out_file, clim_file)
subprocess.check_call(command)
os.remove(out_file)
#--------- GLODAP -------------------------------
id_tracer_update_glodap = [0,3]
list_tracer_update_glodap = []
tracer_longname_update_glodap = []
tracer_units_update_glodap = []
for idtra in id_tracer_update_glodap:
print list_tracer[idtra]
for idtra in id_tracer_update_glodap:
# add to tracer update
list_tracer_update_glodap.append(list_tracer[idtra])
tracer_longname_update_glodap.append(tracer_longname[idtra])
tracer_units_update_glodap.append(tracer_units[idtra])
for mm in np.arange(12):
clim_file = dst_dir + dst_grd.name + '_clim_bio_GFDL+WOA+GLODAP_m' + str(mm+1).zfill(2) + '.nc'
print '\nBuild CLIM file for month', mm
for ktr in np.arange(len(list_tracer_update_glodap)):
ctra = list_tracer_update_glodap[ktr]
mydict = {'tracer':list_tracer_update_glodap[ktr],'longname':tracer_longname_update_glodap[ktr],'units':tracer_units_update_glodap[ktr],'file':data_dir_glodap + ctra + '_GLODAP-ESM2M_annual.nc', \
'frame':mm}
remap_bio_glodap(mydict, src_grd, dst_grd, dst_dir=dst_dir)
out_file = dst_dir + dst_grd.name + '_clim_bio_' + list_tracer_update_glodap[ktr] + '.nc'
command = ('ncks', '-a', '-A', out_file, clim_file)
subprocess.check_call(command)
os.remove(out_file)<|fim▁end|>
|
from remap_bio_glodap import remap_bio_glodap
data_dir_woa = '/archive/u1/uaf/kate/COBALT/'
data_dir_glodap = '/archive/u1/uaf/kate/COBALT/'
|
<|file_name|>sitecustomize.py<|end_file_name|><|fim▁begin|>import os
import socket<|fim▁hole|>
batch_enabled = int(os.environ.get('_BACKEND_BATCH_MODE', '0'))
if batch_enabled:
# Since latest Python 2 has `builtins`and `input`,
# we cannot detect Python 2 with the existence of them.
if sys.version_info.major > 2:
import builtins
def _input(prompt=''):
sys.stdout.write(prompt)
sys.stdout.flush()
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
try:
sock.connect((input_host, input_port))
userdata = sock.recv(1024)
except ConnectionRefusedError:
userdata = b'<user-input-unavailable>'
return userdata.decode()
builtins._input = input # type: ignore
builtins.input = _input
else:
# __builtins__ is an alias dict for __builtin__ in modules other than __main__.
# Thus, we have to explicitly import __builtin__ module in Python 2.
import __builtin__
builtins = __builtin__
def _raw_input(prompt=''):
sys.stdout.write(prompt)
sys.stdout.flush()
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((input_host, input_port))
userdata = sock.recv(1024)
except socket.error:
userdata = b'<user-input-unavailable>'
finally:
sock.close()
return userdata.decode()
builtins._raw_input = builtins.raw_input # type: ignore
builtins.raw_input = _raw_input # type: ignore<|fim▁end|>
|
import sys
input_host = '127.0.0.1'
input_port = 65000
|
<|file_name|>Spell.ts<|end_file_name|><|fim▁begin|>export default interface Spell {
id: number;
name: string;
icon: string;
//Death Knights
runesCost?: number
runicPowerCost?: number;
//Demon Hunter
furyCost?: number;
painCost?: number;
//Feral Druid & Rogue
energyCost?: number;
comboPointsCost?: number;
//Hunter
focusCost?: number;
//Mage, Healers & Warlock
manaCost?: number;
//Monk
chiCost?: number;
//Paladin
holyPowerCost?: number;
//Priest
insanityCost?: number;
//Warlock
soulShardsCost?: number;
//Warrior
rageCost?: number;
};
export interface LegendarySpell extends Spell {
bonusID?: number;
}
export interface Enchant extends Spell {
effectId: number;<|fim▁hole|> [key: string]: T
}<|fim▁end|>
|
}
export interface SpellList<T extends Spell = Spell> {
|
<|file_name|>bdo-poke.js<|end_file_name|><|fim▁begin|>/**
* Инициализация модуля.
*/
/*global define*/
define(['./poke-control', './poke-history', 'jquery'], function (pokeControl, pokeHistory, $) {
'use strict';
var pokeSettings = {
pokeControl: {<|fim▁hole|> pokeHistory: {
selector: '.poke-history-container',
template: '<table id="eventsHistory" class="table table-condensed table-hover table-striped"><thead><tr><th data-column-id="startDate">Начало</th><th data-column-id="channel" data-type="numeric">Канал</th><th data-column-id="event">Событие</th></tr></thead></table>',
tableSelector: '#eventsHistory'
}
},
create;
create = function (options) {
pokeSettings = $.extend(pokeSettings, options);
pokeControl.initialize(pokeSettings.pokeControl);
pokeHistory.initialize(pokeSettings.pokeHistory);
};
return {
create: create
};
});<|fim▁end|>
|
selector: '.poke-control-container',
template: '<form class="form"><div class="form-group"><label for="bdoChannel">Канал:</label><select class="form-control" name="bdoChannel" id="bdoChannel"></select></div><div class="form-group"><label for="bdoQuest">Квест:</label><select class="form-control" name="bdoQuest" id="bdoQuest"></select></div><button class="btn btn-success">Отправить</button></form>'
},
|
<|file_name|>configtx_test.go<|end_file_name|><|fim▁begin|>/*
Copyright IBM Corp. 2017 All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package configtx
import (
"testing"
"github.com/hyperledger/fabric/common/configtx"<|fim▁hole|>}
func TestConfigtxManagerInterface(t *testing.T) {
_ = configtx.Manager(&Manager{})
}<|fim▁end|>
|
)
func TestConfigtxInitializerInterface(t *testing.T) {
_ = configtx.Initializer(&Initializer{})
|
<|file_name|>mainService.js<|end_file_name|><|fim▁begin|>'use strict';
describe('Service: mainService', function () {
// load the service's module
beforeEach(module('catsGoApp'));
// instantiate service
var mainService;
beforeEach(inject(function (_mainService_) {
mainService = _mainService_;
}));
<|fim▁hole|> var a=mainService.randomArray(4,5,1);
expect(a).not.toBe(null)
expect(!!mainService).toBe(true);
});
});<|fim▁end|>
|
it('randomArray testing', function () {
|
<|file_name|>test_exact.py<|end_file_name|><|fim▁begin|>import unittest
from llvm.core import (Module, Type, Builder)
from .support import TestCase, tests
class TestExact(TestCase):
def make_module(self):
mod = Module.new('asdfa')
fnty = Type.function(Type.void(), [Type.int()] * 2)
func = mod.add_function(fnty, 'foo')
bldr = Builder.new(func.append_basic_block(''))
return mod, func, bldr
def has_exact(self, inst, op):
self.assertTrue(('%s exact' % op) in str(inst), "exact flag does not work")
def _test_template(self, opf, opname):
mod, func, bldr = self.make_module()
a, b = func.args<|fim▁hole|>
def test_udiv_exact(self):
self._test_template(Builder.udiv, 'udiv')
def test_sdiv_exact(self):
self._test_template(Builder.sdiv, 'sdiv')
def test_lshr_exact(self):
self._test_template(Builder.lshr, 'lshr')
def test_ashr_exact(self):
self._test_template(Builder.ashr, 'ashr')
tests.append(TestExact)
if __name__ == '__main__':
unittest.main()<|fim▁end|>
|
self.has_exact(opf(bldr, a, b, exact=True), opname)
|
<|file_name|>update.go<|end_file_name|><|fim▁begin|>// Copyright 2014 The Gogs Authors. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package models
import (
"container/list"
"fmt"
"os/exec"
"strings"
log "gopkg.in/clog.v1"
git "github.com/gogits/git-module"
)
<|fim▁hole|> return &PushCommit{
Sha1: commit.ID.String(),
Message: commit.Message(),
AuthorEmail: commit.Author.Email,
AuthorName: commit.Author.Name,
CommitterEmail: commit.Committer.Email,
CommitterName: commit.Committer.Name,
Timestamp: commit.Committer.When,
}
}
func ListToPushCommits(l *list.List) *PushCommits {
commits := make([]*PushCommit, 0)
var actEmail string
for e := l.Front(); e != nil; e = e.Next() {
commit := e.Value.(*git.Commit)
if actEmail == "" {
actEmail = commit.Committer.Email
}
commits = append(commits, CommitToPushCommit(commit))
}
return &PushCommits{l.Len(), commits, "", nil}
}
type PushUpdateOptions struct {
OldCommitID string
NewCommitID string
RefFullName string
PusherID int64
PusherName string
RepoUserName string
RepoName string
}
// PushUpdate must be called for any push actions in order to
// generates necessary push action history feeds.
func PushUpdate(opts PushUpdateOptions) (err error) {
isNewRef := opts.OldCommitID == git.EMPTY_SHA
isDelRef := opts.NewCommitID == git.EMPTY_SHA
if isNewRef && isDelRef {
return fmt.Errorf("Old and new revisions are both %s", git.EMPTY_SHA)
}
repoPath := RepoPath(opts.RepoUserName, opts.RepoName)
gitUpdate := exec.Command("git", "update-server-info")
gitUpdate.Dir = repoPath
if err = gitUpdate.Run(); err != nil {
return fmt.Errorf("Fail to call 'git update-server-info': %v", err)
}
if isDelRef {
log.Trace("Reference '%s' has been deleted from '%s/%s' by %s",
opts.RefFullName, opts.RepoUserName, opts.RepoName, opts.PusherName)
return nil
}
gitRepo, err := git.OpenRepository(repoPath)
if err != nil {
return fmt.Errorf("OpenRepository: %v", err)
}
owner, err := GetUserByName(opts.RepoUserName)
if err != nil {
return fmt.Errorf("GetUserByName: %v", err)
}
repo, err := GetRepositoryByName(owner.ID, opts.RepoName)
if err != nil {
return fmt.Errorf("GetRepositoryByName: %v", err)
}
// Push tags.
if strings.HasPrefix(opts.RefFullName, git.TAG_PREFIX) {
if err := CommitRepoAction(CommitRepoActionOptions{
PusherName: opts.PusherName,
RepoOwnerID: owner.ID,
RepoName: repo.Name,
RefFullName: opts.RefFullName,
OldCommitID: opts.OldCommitID,
NewCommitID: opts.NewCommitID,
Commits: &PushCommits{},
}); err != nil {
return fmt.Errorf("CommitRepoAction (tag): %v", err)
}
return nil
}
newCommit, err := gitRepo.GetCommit(opts.NewCommitID)
if err != nil {
return fmt.Errorf("gitRepo.GetCommit: %v", err)
}
// Push new branch.
var l *list.List
if isNewRef {
l, err = newCommit.CommitsBeforeLimit(10)
if err != nil {
return fmt.Errorf("newCommit.CommitsBeforeLimit: %v", err)
}
} else {
l, err = newCommit.CommitsBeforeUntil(opts.OldCommitID)
if err != nil {
return fmt.Errorf("newCommit.CommitsBeforeUntil: %v", err)
}
}
if err := CommitRepoAction(CommitRepoActionOptions{
PusherName: opts.PusherName,
RepoOwnerID: owner.ID,
RepoName: repo.Name,
RefFullName: opts.RefFullName,
OldCommitID: opts.OldCommitID,
NewCommitID: opts.NewCommitID,
Commits: ListToPushCommits(l),
}); err != nil {
return fmt.Errorf("CommitRepoAction (branch): %v", err)
}
return nil
}<|fim▁end|>
|
// CommitToPushCommit transforms a git.Commit to PushCommit type.
func CommitToPushCommit(commit *git.Commit) *PushCommit {
|
<|file_name|>DraftFeatureFlags-core.js<|end_file_name|><|fim▁begin|>/**
* Copyright 2013-present, Facebook, Inc.<|fim▁hole|> * of patent rights can be found in the PATENTS file in the same directory.
*
* @providesModule DraftFeatureFlags-core
*
*/
'use strict';
var DraftFeatureFlags = {
draft_accept_selection_after_refocus: false,
draft_killswitch_allow_nontextnodes: false,
draft_segmented_entities_behavior: false
};
module.exports = DraftFeatureFlags;<|fim▁end|>
|
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree. An additional grant
|
<|file_name|>api-helper.js<|end_file_name|><|fim▁begin|>/*******************************************************************************
* Copyright [2017] [Quirino Brizi ([email protected])]
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
******************************************************************************/
'use strict'
module.exports = {
handleError: function(eventEmitter, err, req, res) {
console.error("received error %j", err);
var statusCode = (!err.statusCode || err.statusCode == 0) ? 500 : err.statusCode,
message = !err.error ? !err.body ? { message: "Internal Server Error" } : err.body : err.error;
eventEmitter.emit('event', {message: message, payload: err});
res.status(statusCode).send(message);
},
handleSuccess: function(eventEmitter, res, message, payload) {
eventEmitter.emit('event', {message: message, payload: payload});
res.status(200).json(payload);
},
handleApiError: function(err, req, res) {
console.error("received error: ", err.stack);
var statusCode = (!err.statusCode || err.statusCode == 0) ? 500 : err.statusCode,
message = !err.error ? !err.body ? { message: "Internal Server Error" } : err.body : err.error;<|fim▁hole|>};<|fim▁end|>
|
res.status(statusCode).send(message);
},
|
<|file_name|>test_forked_green_actors.py<|end_file_name|><|fim▁begin|>import sys
if '' not in sys.path:
sys.path.append('')
import time
import unittest
from pyactors.logs import file_logger
from pyactors.exceptions import EmptyInboxException
from tests import ForkedGreActor as TestActor
from multiprocessing import Manager
class ForkedGreenletActorTest(unittest.TestCase):
def test_run(self):
''' test_forked_green_actors.test_run
'''
test_name = 'test_forked_gen_actors.test_run'
logger = file_logger(test_name, filename='logs/%s.log' % test_name)
actor = TestActor()
actor.start()
while actor.processing:
time.sleep(0.1)
actor.stop()
result = []
while True:
try:<|fim▁hole|>
self.assertEqual(actor.processing, False)
self.assertEqual(actor.waiting, False)
if __name__ == '__main__':
unittest.main()<|fim▁end|>
|
result.append(actor.inbox.get())
except EmptyInboxException:
break
self.assertEqual(len(result), 10)
|
<|file_name|>LabelField.java<|end_file_name|><|fim▁begin|>package autotest.tko;
import autotest.common.Utils;
public abstract class LabelField extends ParameterizedField {
@Override
public String getSqlCondition(String value) {
String condition = " IS NOT NULL";
if (value.equals(Utils.JSON_NULL)) {
condition = " IS NULL";<|fim▁hole|> return getFilteringName() + condition;
}
@Override
public String getFilteringName() {
return getQuotedSqlName() + ".id";
}
}<|fim▁end|>
|
}
|
<|file_name|>position.py<|end_file_name|><|fim▁begin|>from math import fabs
"""Kept these functions outside the class, since they are static
for the search and movement functions for board. The downside is it creates
an object for search purposes, which seems relatively heavy. I'll
optimize later if necessary
"""
def shift_up(pos):
"""returns new position that has shifted up"""
return Position(pos.x, pos.y + 1)
def shift_down(pos):
"""returns new position that has shifted down"""
return Position(pos.x, pos.y - 1)
def shift_right(pos):
"""returns new position that has shifted right"""
return Position(pos.x + 1, pos.y)
def shift_left(pos):
"""returns new position that has shifted left"""
return Position(pos.x - 1, pos.y)
def shift_up_left(pos):
"""returns new position that has shifted up"""
return Position(pos.x + 1, pos.y - 1)
def shift_down_left(pos):
"""returns new position that has shifted down"""
return Position(pos.x - 1, pos.y - 1)
def shift_up_right(pos):
"""returns new position that has shifted right"""
return Position(pos.x + 1, pos.y + 1)
def shift_down_right(pos):
"""returns new position that has shifted left"""
return Position(pos.x - 1, pos.y + 1)
class Position(object):
def __init__(self, x, y):
self._x, self._y = x, y
# TODO: test the speed of this implementation
# def __cmp__(self, other):
# if (self.width != other.width):
# return cmp(self.width, other.width)
# return cmp(self.height, other.height)
def __eq__(self, pos):
return self._x == pos.x and self._y == pos.y
def __ne__(self, pos):
return self._x != pos.x or self._y != pos.y
def __hash__(self):
return hash(('x', self._x, 'y', self._y))
def __repr__(self):
return '({0},{1})'.format(self._x, self._y)
def __str__(self):
return '({0},{1})'.format(self._x, self._y)
# ##################### Accessors/Modifiers ###############################
@property
def x(self):
return self._x
@property
def y(self):<|fim▁hole|> # ############################### Discovery ###############################
def is_diagonal(self, pos):
"""Verify if points are diagonal"""
return fabs(self.x - pos.x) == fabs(self.y - pos.y)
def is_parallel(self, pos):
"""Verify if points are parallel"""
return self.x == pos.x or self.y == pos.y
def is_adj(self, pos):
"""Verify if points are adjacent.
checks parallel on x plane if y +/- 1 is adj
checks parallel on y plane if x +/- 1 is adj
check if diagonal and if only 1 square away on the x plane
check if diagonal and if only 1 square away on the y plane
"""
return ((self.x == pos.x and fabs(self.y - pos.y)) == 1) \
or ((self.y == pos.y and fabs(self.x - pos.x)) == 1) \
or ((self.is_diagonal(pos) and fabs(self.y - pos.y)) == 1) \
or ((self.is_diagonal(pos) and fabs(self.x - pos.x)) == 1)
def to_json(self):
return {'x': self.x, 'y': self.y}<|fim▁end|>
|
return self._y
|
<|file_name|>sprk-footer.stories.ts<|end_file_name|><|fim▁begin|>// prettier-ignore
// @ts-ignore
import { moduleMetadata, Meta } from '@storybook/angular';
import { SprkFooterModule } from './sprk-footer.module';
import { BrowserAnimationsModule } from '@angular/platform-browser/animations';
import { SprkFooterComponent } from './sprk-footer.component';
import { markdownDocumentationLinkBuilder } from '../../../../../../../storybook-utilities/markdownDocumentationLinkBuilder';
import { RouterModule } from '@angular/router';
import { APP_BASE_HREF } from '@angular/common';
export default {
title: 'Components/Footer',
component: SprkFooterComponent,
decorators: [
moduleMetadata({
imports: [
SprkFooterModule,
BrowserAnimationsModule,
RouterModule.forRoot([
{
path: 'iframe.html',
component: SprkFooterComponent,
},
]),
],
providers: [{ provide: APP_BASE_HREF, useValue: '/' }],
}),
],
parameters: {
layout: 'fullscreen',
docs: {
source: {
type: 'code',
},
description: {
component: `
${markdownDocumentationLinkBuilder('footer')}
- The Footer is a navigation landmark for
accessibility tools. The attribute \`role=”contentinfo”\`
must be present.
`,
},
iframeHeight: 800,
},
},
} as Meta;
export const defaultStory = () => ({
template: `
<sprk-footer
idString="footer-1"
[localLinks]="[
{
heading: 'Site Links',
links: [
{
text: 'About This',
href: '#nogo',
analyticsString: 'Link to Sub Item 1'
},
{
text: 'About This Other Thing',
href: '#nogo'
},
{
text: 'About That',
href: '#nogo'
},
{
text: 'Link Item',
href: '#nogo',
analyticsString: 'Link to Sub Item 1'
},
{
text: 'This Link Item',
href: '#nogo',
analyticsString: 'Link to Sub Item 1'
}
]
},
{
heading: 'Learn More',
links: [
{
text: 'About This Other Thing',
href: '#nogo'
},
{
text: 'About This',
href: '#nogo'
},
{
text: 'About That',
href: '#nogo'
},
{
text: 'Link Item',
href: '#nogo',
analyticsString: 'Link to Sub Item 1'
}
]
},
{
heading: 'Support',
links: [
{
text: 'Share Your Screen',
href: '#nogo',
analyticsString: 'Link to Sub Item 1'
},
{
text: 'Opt Out',
href: '#nogo'
},
{
text: 'Disclosures and Other Things',
href: '#nogo',
analyticsString: 'Link to Sub Item 1'
},
{
text: 'We Want Your Feedback',
href: '#nogo',
analyticsString: 'Link to Sub Item 1'
}
]
}
]"
globalHeading="Our Sister Companies"
[globalLinks]="[
{
text: 'Buy a home, refinance, or manage your mortgage online with America’s largest mortgage lender',
href: '#nogo',
imgSrc:
'https://spark-assets.netlify.app/rocket.svg',
analyticsString: 'link-1',
imgAlt: 'Rocket Mortgage Logo'
},
{
text: 'Get a personal loan to consolidate debt, renovate your home and more',
href: '#nogo',
imgSrc:
'https://spark-assets.netlify.app/rocket-loans-white.svg',
imgAlt: 'Rocket Loans Logo',
analyticsString: 'link-2'
},
{
text: 'Get a real estate agent handpicked for you and search the latest home listings',
href: '#nogo',
imgSrc:
'https://spark-assets.netlify.app/rocket-homes-white.svg',
imgAlt: 'Rocket Homes Logo',
analyticsString: 'link-3'
},
{
text: 'Find and buy the perfect car or truck from thousands of vehicles, all in one marketplace',
href: 'https://rocketauto.com',
imgSrc:
'https://spark-assets.netlify.app/rocket_auto.svg',
imgAlt: 'Rocket Auto Logo',
analyticsString: 'link-4'
}
]"
[socialLinks]="[
{
href: '#nogo',
icon: 'facebook',
iconCSS: 'sprk-c-Icon--xl sprk-c-Icon--filled-current-color',
analyticsString: 'social-link-1',
iconScreenReaderText: 'facebook'
},
{
href: '#nogo',
icon: 'instagram',
iconCSS: 'sprk-c-Icon--xl sprk-c-Icon--filled-current-color',
analyticsString: 'social-link-2',
iconScreenReaderText: 'instagram'
},
{
href: '#nogo',
icon: 'twitter',
iconCSS: 'sprk-c-Icon--xl sprk-c-Icon--filled-current-color',
analyticsString: 'social-link-3',
iconScreenReaderText: 'twitter'
},
{
href: '#nogo',
icon: 'youtube',
iconCSS: 'sprk-c-Icon--xl sprk-c-Icon--filled-current-color',
analyticsString: 'social-link-4',
iconScreenReaderText: 'youtube'
}
]"
[badgeLinks]="[
{
href: '#nogo',
icon: 'townhouse',
iconCSS: 'sprk-c-Icon--xl sprk-c-Icon--filled-current-color',
analyticsString: 'link-1',
iconScreenReaderText: 'townhouse'
},
{
href: '#nogo',
icon: 'townhouse',
iconCSS: 'sprk-c-Icon--xl sprk-c-Icon--filled-current-color',
analyticsString: 'link-2',
iconScreenReaderText: 'townhouse'
},
{
href: '#nogo',
icon: 'townhouse',
iconCSS: 'sprk-c-Icon--xl sprk-c-Icon--filled-current-color',
analyticsString: 'link-3',
iconScreenReaderText: 'townhouse'
}
]"
awardsHeading="Awards Heading Title"
[awards]="[
{
href: '#nogo',
imgSrc:<|fim▁hole|> analyticsString: 'awards-link-1'
},
{
href: '#nogo',
imgSrc:
'https://spark-assets.netlify.app/spark-logo-mark.svg',
imgAlt: 'placeholder',
analyticsString: 'awards-link-2'
}
]"
connectHeading="Connect With Us"
[disclaimerText]="[
{
text: 'Lorem ipsum dolor sit amet, consectetur adipiscing elit.'
},
{
text:
'Incididunt ut labore et dolore magna aliqua.
Ut enim ad minim veniam.'
},
{
text: 'Lorem ipsum dolor sit amet, consectetur.'
},
{
text: 'Lorem ipsum dolor sit amet, consectetur.'
}
]"
[disclaimerToggle]="[
{
title: 'My Award Disclaimer',
body:
'Lorem ipsum dolor sit amet, consectetur adipiscing elit.
Aliquam in laoreet ante.',
analyticsString: 'disclaimer'
}
]"
>
<sprk-stack
additional-disclaimer-slot
itemSpacing="large"
sprkStackItem
>
<p
sprkStackItem
sprkText
variant="bodyFour"
class="sprk-c-Footer__text"
>
Sed ut perspiciatis unde omnis iste natus error sit <a href="#nogo" sprkLink class="sprk-b-Link--inline-light"> inline link</a> accusantium doloremque laudantium
</p>
</sprk-stack>
<sprk-stack
itemSpacing="medium"
splitAt="small"
app-slot
>
<div sprkStackItem>
<a href="#nogo" sprkLink>
<img src="https://spark-assets.netlify.app/apple-store.svg" alt="Go to Apple Store"/>
</a>
</div>
<div sprkStackItem>
<a href="#nogo" sprkLink>
<img src="https://spark-assets.netlify.app/google-play.svg" alt="Go to Google Play Store"/>
</a>
</div>
</sprk-stack>
<div
additional-award-slot
sprkStackItem
>
<p
sprkText
variant="bodyFour"
class="sprk-c-Footer__text"
>
Sed ut perspiciatis unde omnis iste natus error sit <a href="#nogo" sprkLink class="sprk-b-Link--inline-light"> inline link</a> accusantium doloremque laudantium
</p>
</div>
</sprk-footer>
`,
});
defaultStory.storyName = 'Default';
defaultStory.parameters = {
jest: ['sprk-footer.component'],
};<|fim▁end|>
|
'https://spark-assets.netlify.app/spark-logo-mark.svg',
imgAlt: 'placeholder',
|
<|file_name|>opencga-sample-browser.config.js<|end_file_name|><|fim▁begin|><|fim▁hole|>};<|fim▁end|>
|
const OpencgaSampleBrowserConfig = {
|
<|file_name|>module_horo.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
"""
Fetches horoscopes from iltalehti.fi
Created on Oct 17, 2012
@author: huqa / [email protected]
"""
import re
horo_url = "http://www.iltalehti.fi/horoskooppi"
def command_horo(bot, user, channel, args):
"""Hakee päivittäisen horoskoopin. Käyttö !horo <horoskooppimerkki>"""
nick = getNick(user)
if not args:
return bot.say(channel, "lähe ny pelle menee %s" % nick)
haku = args.decode('iso-8859-1')<|fim▁hole|> haku = haku.title()
soup = getUrl(horo_url).getBS()
merkki = None
for m in soup.findAll("div", "valiotsikko"):
if m.find(text=re.compile(haku+"*")):
merkki = m.find(text=re.compile(haku+"*"))
break
if not merkki:
return bot.say(channel, "opettele ny kirjottaan kevyt pelle %s" % nick)
tekstit = merkki.next.contents[0]
bot.say(channel, "%s %s" % (str(merkki), str(tekstit)))<|fim▁end|>
| |
<|file_name|>generateImageWebPage.py<|end_file_name|><|fim▁begin|>#! /usr/bin/env python
"""
##############################################################################
##
##
## @Name : generateImageWebPage.py
##
## @license : MetPX Copyright (C) 2004-2006 Environment Canada
## MetPX comes with ABSOLUTELY NO WARRANTY; For details type see the file
## named COPYING in the root of the source directory tree.
##
##
## @author: Nicholas Lemay
##
## @since: 22-11-2006, last updated on : 2008-04-30
##
##
## Description : Generates a web pages that gives access to user
## to the daily graphics of the last 7 days for all rx sources
## and tx clients.
##
##
##############################################################################
"""
import os, sys
import cgi
import cgitb; cgitb.enable()
"""
Small function that adds pxStats to the sys path.
"""
sys.path.insert(1, sys.path[0] + '/../../..')<|fim▁hole|>
"""
Small method required to add pxLib to syspath.
"""
PATHS = StatsPaths()
PATHS.setBasicPaths()
sys.path.append( PATHS.PXLIB )
def returnReplyToQuerier( error ="" ):
"""
@summary : Prints an empty reply so that the receiving web page will
not modify it's display.
@param error : Error to return to querier.
@return : None
@note: Method does not actually "return" anything.
It just prints out it's reply that is to be
intercepted by the querier.
"""
if error == "":
reply = "images='';error='';action=showImageWindow"
else:
reply = "images='';error=%s" %error
print """
HTTP/1.0 200 OK
Server: NCSA/1.0a6
Content-type: text/plain
"""
print """%s""" %( reply )
def generateWebPage( images, lang ):
"""
@summary : Generates a web page that simply displays a
series of images one on top of the other.
@param images : List of images to display.
@param lang : language with whom this generator was called.
"""
smallImageWidth = 900
smallImageHeight = 320
statsPaths = StatsPaths()
statsPaths.setPaths( lang )
file = statsPaths.STATSWEBPAGESHTML + "combinedImageWebPage.html"
fileHandle = open( file, "w")
fileHandle.write( """
<html>
<head>
<style type="text/css">
a.photosLink{
display: block;
width: 1200px;
height: 310px;
background: url("") 0 0 no-repeat;
text-decoration: none;
}
</style>
<script type="text/javascript" src="../scripts/js/windowfiles/dhtmlwindow.js">
This is left here to give credit to the original
creators of the dhtml script used for the group pop ups:
/***********************************************
* DHTML Window Widget- Dynamic Drive (www.dynamicdrive.com)
* This notice must stay intact for legal use.
* Visit http://www.dynamicdrive.com/ for full source code
***********************************************/
</script>
<script>
counter =0;
function wopen(url, name, w, h){
// This function was taken on www.boutell.com
w += 32;
h += 96;
counter +=1;
var win = window.open(url,
counter,
'width=' + w + ', height=' + h + ', ' +
'location=no, menubar=no, ' +
'status=no, toolbar=no, scrollbars=no, resizable=no');
win.resizeTo(w, h);
win.focus();
}
function transport( image ){
wopen( image, 'popup', %s, %s);
}
</script>
</head>
<body>
""" %( smallImageWidth, smallImageHeight ) )
relativePathTowardsPxStats = "../../../pxStats/"
for i in range(len(images) ):
pathTowardsImage = str(images[i]).split( "pxStats" )[-1:][0]
fileHandle.write("""
<a href="#" class="photosLink" name="photo%s" onclick="javascript:transport('%s')" id="photo%s" border=0>
</a>
<script>
document.getElementById('photo%s').style.background="url(" + "%s" + ") no-repeat";
</script>
"""%( i, relativePathTowardsPxStats + pathTowardsImage, i, i, relativePathTowardsPxStats + pathTowardsImage ) )
fileHandle.write( """
</body>
</html>
""")
fileHandle.close()
try:
os.chmod(file,0777)
except:
pass
def getImagesLangFromForm():
"""
@summary : Parses form with whom this program was called.
@return: Returns the images and language found within the form.
"""
lang = LanguageTools.getMainApplicationLanguage()
images = []
newForm = {}
form = cgi.FieldStorage()
for key in form.keys():
value = form.getvalue(key, "")
if isinstance(value, list):
newvalue = ",".join(value)
else:
newvalue = value
newForm[key.replace("?","")]= newvalue
try:
images = newForm["images"]
images = images.split(';')
except:
pass
try:
lang = newForm["lang"]
except:
pass
return images, lang
def main():
"""
@summary : Generate an html page displaying all the image received in parameter.
Replies to the querier after generating web page so that querier
is informed the page was generated.
"""
images, lang = getImagesLangFromForm()
#print images
generateWebPage( images, lang )
returnReplyToQuerier()
if __name__ == '__main__':
main()<|fim▁end|>
|
from pxStats.lib.StatsPaths import StatsPaths
from pxStats.lib.LanguageTools import LanguageTools
|
<|file_name|>io.go<|end_file_name|><|fim▁begin|>// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package io provides basic interfaces to I/O primitives.
// Its primary job is to wrap existing implementations of such primitives,
// such as those in package os, into shared public interfaces that
// abstract the functionality, plus some other related primitives.
//
// Because these interfaces and primitives wrap lower-level operations with
// various implementations, unless otherwise informed clients should not
// assume they are safe for parallel execution.
package io
import (
"errors"
)
// ErrShortWrite means that a write accepted fewer bytes than requested
// but failed to return an explicit error.
var ErrShortWrite = errors.New("short write")
// ErrShortBuffer means that a read required a longer buffer than was provided.
var ErrShortBuffer = errors.New("short buffer")
// EOF is the error returned by Read when no more input is available.
// Functions should return EOF only to signal a graceful end of input.
// If the EOF occurs unexpectedly in a structured data stream,
// the appropriate error is either ErrUnexpectedEOF or some other error
// giving more detail.
var EOF = errors.New("EOF")
// ErrUnexpectedEOF means that EOF was encountered in the
// middle of reading a fixed-size block or data structure.
var ErrUnexpectedEOF = errors.New("unexpected EOF")
// ErrNoProgress is returned by some clients of an io.Reader when
// many calls to Read have failed to return any data or error,
// usually the sign of a broken io.Reader implementation.
var ErrNoProgress = errors.New("multiple Read calls return no data or error")
// Reader is the interface that wraps the basic Read method.
//
// Read reads up to len(p) bytes into p. It returns the number of bytes
// read (0 <= n <= len(p)) and any error encountered. Even if Read
// returns n < len(p), it may use all of p as scratch space during the call.
// If some data is available but not len(p) bytes, Read conventionally
// returns what is available instead of waiting for more.
//
// When Read encounters an error or end-of-file condition after
// successfully reading n > 0 bytes, it returns the number of
// bytes read. It may return the (non-nil) error from the same call
// or return the error (and n == 0) from a subsequent call.
// An instance of this general case is that a Reader returning
// a non-zero number of bytes at the end of the input stream may
// return either err == EOF or err == nil. The next Read should
// return 0, EOF regardless.
//
// Callers should always process the n > 0 bytes returned before
// considering the error err. Doing so correctly handles I/O errors
// that happen after reading some bytes and also both of the
// allowed EOF behaviors.
//
// Implementations of Read are discouraged from returning a
// zero byte count with a nil error, and callers should treat
// that situation as a no-op.
type Reader interface {
Read(p []byte) (n int, err error)
}
// Writer is the interface that wraps the basic Write method.
//
// Write writes len(p) bytes from p to the underlying data stream.
// It returns the number of bytes written from p (0 <= n <= len(p))
// and any error encountered that caused the write to stop early.
// Write must return a non-nil error if it returns n < len(p).
type Writer interface {
Write(p []byte) (n int, err error)
}
// Closer is the interface that wraps the basic Close method.
//
// The behavior of Close after the first call is undefined.
// Specific implementations may document their own behavior.
type Closer interface {
Close() error
}
// Seeker is the interface that wraps the basic Seek method.
//
// Seek sets the offset for the next Read or Write to offset,
// interpreted according to whence: 0 means relative to the origin of
// the file, 1 means relative to the current offset, and 2 means
// relative to the end. Seek returns the new offset and an Error, if
// any.
type Seeker interface {
Seek(offset int64, whence int) (ret int64, err error)
}
// ReadWriter is the interface that groups the basic Read and Write methods.
type ReadWriter interface {
Reader
Writer
}
// ReadCloser is the interface that groups the basic Read and Close methods.
type ReadCloser interface {
Reader
Closer
}
// WriteCloser is the interface that groups the basic Write and Close methods.
type WriteCloser interface {
Writer
Closer
}
// ReadWriteCloser is the interface that groups the basic Read, Write and Close methods.
type ReadWriteCloser interface {
Reader
Writer
Closer
}
// ReadSeeker is the interface that groups the basic Read and Seek methods.
type ReadSeeker interface {
Reader
Seeker
}
// WriteSeeker is the interface that groups the basic Write and Seek methods.
type WriteSeeker interface {
Writer
Seeker
}
// ReadWriteSeeker is the interface that groups the basic Read, Write and Seek methods.
type ReadWriteSeeker interface {
Reader
Writer
Seeker
}
// ReaderFrom is the interface that wraps the ReadFrom method.
//
// ReadFrom reads data from r until EOF or error.
// The return value n is the number of bytes read.
// Any error except io.EOF encountered during the read is also returned.
//
// The Copy function uses ReaderFrom if available.
type ReaderFrom interface {
ReadFrom(r Reader) (n int64, err error)
}
// WriterTo is the interface that wraps the WriteTo method.
//
// WriteTo writes data to w until there's no more data to write or
// when an error occurs. The return value n is the number of bytes
// written. Any error encountered during the write is also returned.
//
// The Copy function uses WriterTo if available.
type WriterTo interface {
WriteTo(w Writer) (n int64, err error)
}
// ReaderAt is the interface that wraps the basic ReadAt method.
//
// ReadAt reads len(p) bytes into p starting at offset off in the
// underlying input source. It returns the number of bytes
// read (0 <= n <= len(p)) and any error encountered.
//
// When ReadAt returns n < len(p), it returns a non-nil error
// explaining why more bytes were not returned. In this respect,
// ReadAt is stricter than Read.
//
// Even if ReadAt returns n < len(p), it may use all of p as scratch
// space during the call. If some data is available but not len(p) bytes,
// ReadAt blocks until either all the data is available or an error occurs.
// In this respect ReadAt is different from Read.
//
// If the n = len(p) bytes returned by ReadAt are at the end of the
// input source, ReadAt may return either err == EOF or err == nil.
//
// If ReadAt is reading from an input source with a seek offset,
// ReadAt should not affect nor be affected by the underlying
// seek offset.
//
// Clients of ReadAt can execute parallel ReadAt calls on the
// same input source.
type ReaderAt interface {
ReadAt(p []byte, off int64) (n int, err error)
}
// WriterAt is the interface that wraps the basic WriteAt method.
//
// WriteAt writes len(p) bytes from p to the underlying data stream
// at offset off. It returns the number of bytes written from p (0 <= n <= len(p))
// and any error encountered that caused the write to stop early.
// WriteAt must return a non-nil error if it returns n < len(p).
//
// If WriteAt is writing to a destination with a seek offset,
// WriteAt should not affect nor be affected by the underlying
// seek offset.
//
// Clients of WriteAt can execute parallel WriteAt calls on the same
// destination if the ranges do not overlap.
type WriterAt interface {
WriteAt(p []byte, off int64) (n int, err error)
}
// ByteReader is the interface that wraps the ReadByte method.
//
// ReadByte reads and returns the next byte from the input.
// If no byte is available, err will be set.
type ByteReader interface {
ReadByte() (c byte, err error)
}
// ByteScanner is the interface that adds the UnreadByte method to the
// basic ReadByte method.
//
// UnreadByte causes the next call to ReadByte to return the same byte
// as the previous call to ReadByte.
// It may be an error to call UnreadByte twice without an intervening
// call to ReadByte.
type ByteScanner interface {
ByteReader
UnreadByte() error
}
// ByteWriter is the interface that wraps the WriteByte method.
type ByteWriter interface {
WriteByte(c byte) error
}
// RuneReader is the interface that wraps the ReadRune method.
//
// ReadRune reads a single UTF-8 encoded Unicode character
// and returns the rune and its size in bytes. If no character is
// available, err will be set.
type RuneReader interface {
ReadRune() (r rune, size int, err error)
}
// RuneScanner is the interface that adds the UnreadRune method to the
// basic ReadRune method.
//
// UnreadRune causes the next call to ReadRune to return the same rune
// as the previous call to ReadRune.
// It may be an error to call UnreadRune twice without an intervening
// call to ReadRune.
type RuneScanner interface {
RuneReader
UnreadRune() error
}
// stringWriter is the interface that wraps the WriteString method.
type stringWriter interface {
WriteString(s string) (n int, err error)
}
// WriteString writes the contents of the string s to w, which accepts an array of bytes.
// If w already implements a WriteString method, it is invoked directly.
func WriteString(w Writer, s string) (n int, err error) {
if sw, ok := w.(stringWriter); ok {
return sw.WriteString(s)
}
return w.Write([]byte(s))
}
// ReadAtLeast reads from r into buf until it has read at least min bytes.
// It returns the number of bytes copied and an error if fewer bytes were read.
// The error is EOF only if no bytes were read.
// If an EOF happens after reading fewer than min bytes,
// ReadAtLeast returns ErrUnexpectedEOF.
// If min is greater than the length of buf, ReadAtLeast returns ErrShortBuffer.
// On return, n >= min if and only if err == nil.
func ReadAtLeast(r Reader, buf []byte, min int) (n int, err error) {
if len(buf) < min {
return 0, ErrShortBuffer
}
for n < min && err == nil {
var nn int
nn, err = r.Read(buf[n:])
n += nn
}
if n >= min {
err = nil
} else if n > 0 && err == EOF {
err = ErrUnexpectedEOF
}
return
}
// ReadFull reads exactly len(buf) bytes from r into buf.
// It returns the number of bytes copied and an error if fewer bytes were read.
// The error is EOF only if no bytes were read.
// If an EOF happens after reading some but not all the bytes,
// ReadFull returns ErrUnexpectedEOF.
// On return, n == len(buf) if and only if err == nil.
func ReadFull(r Reader, buf []byte) (n int, err error) {
return ReadAtLeast(r, buf, len(buf))
}
// CopyN copies n bytes (or until an error) from src to dst.
// It returns the number of bytes copied and the earliest
// error encountered while copying.
// On return, written == n if and only if err == nil.
//
// If dst implements the ReaderFrom interface,
// the copy is implemented using it.
func CopyN(dst Writer, src Reader, n int64) (written int64, err error) {
written, err = Copy(dst, LimitReader(src, n))
if written == n {
return n, nil
}
if written < n && err == nil {
// src stopped early; must have been EOF.
err = EOF
}
return
}
// Copy copies from src to dst until either EOF is reached
// on src or an error occurs. It returns the number of bytes
// copied and the first error encountered while copying, if any.
//
// A successful Copy returns err == nil, not err == EOF.
// Because Copy is defined to read from src until EOF, it does
// not treat an EOF from Read as an error to be reported.
//
// If dst implements the ReaderFrom interface,
// the copy is implemented by calling dst.ReadFrom(src).
// Otherwise, if src implements the WriterTo interface,
// the copy is implemented by calling src.WriteTo(dst).
func Copy(dst Writer, src Reader) (written int64, err error) {
// If the writer has a ReadFrom method, use it to do the copy.
// Avoids an allocation and a copy.
if rt, ok := dst.(ReaderFrom); ok {
return rt.ReadFrom(src)
}
// Similarly, if the reader has a WriteTo method, use it to do the copy.
if wt, ok := src.(WriterTo); ok {
return wt.WriteTo(dst)
}
buf := make([]byte, 32*1024)
for {
nr, er := src.Read(buf)
if nr > 0 {
nw, ew := dst.Write(buf[0:nr])
if nw > 0 {
written += int64(nw)
}
if ew != nil {
err = ew
break
}
if nr != nw {
err = ErrShortWrite
break
}
}
if er == EOF {
break
}
if er != nil {
err = er<|fim▁hole|> break
}
}
return written, err
}
// LimitReader returns a Reader that reads from r
// but stops with EOF after n bytes.
// The underlying implementation is a *LimitedReader.
func LimitReader(r Reader, n int64) Reader { return &LimitedReader{r, n} }
// A LimitedReader reads from R but limits the amount of
// data returned to just N bytes. Each call to Read
// updates N to reflect the new amount remaining.
type LimitedReader struct {
R Reader // underlying reader
N int64 // max bytes remaining
}
func (l *LimitedReader) Read(p []byte) (n int, err error) {
if l.N <= 0 {
return 0, EOF
}
if int64(len(p)) > l.N {
p = p[0:l.N]
}
n, err = l.R.Read(p)
l.N -= int64(n)
return
}
// NewSectionReader returns a SectionReader that reads from r
// starting at offset off and stops with EOF after n bytes.
func NewSectionReader(r ReaderAt, off int64, n int64) *SectionReader {
return &SectionReader{r, off, off, off + n}
}
// SectionReader implements Read, Seek, and ReadAt on a section
// of an underlying ReaderAt.
type SectionReader struct {
r ReaderAt
base int64
off int64
limit int64
}
func (s *SectionReader) Read(p []byte) (n int, err error) {
if s.off >= s.limit {
return 0, EOF
}
if max := s.limit - s.off; int64(len(p)) > max {
p = p[0:max]
}
n, err = s.r.ReadAt(p, s.off)
s.off += int64(n)
return
}
var errWhence = errors.New("Seek: invalid whence")
var errOffset = errors.New("Seek: invalid offset")
func (s *SectionReader) Seek(offset int64, whence int) (ret int64, err error) {
switch whence {
default:
return 0, errWhence
case 0:
offset += s.base
case 1:
offset += s.off
case 2:
offset += s.limit
}
if offset < s.base || offset > s.limit {
return 0, errOffset
}
s.off = offset
return offset - s.base, nil
}
func (s *SectionReader) ReadAt(p []byte, off int64) (n int, err error) {
if off < 0 || off >= s.limit-s.base {
return 0, EOF
}
off += s.base
if max := s.limit - off; int64(len(p)) > max {
p = p[0:max]
n, err = s.r.ReadAt(p, off)
if err == nil {
err = EOF
}
return n, err
}
return s.r.ReadAt(p, off)
}
// Size returns the size of the section in bytes.
func (s *SectionReader) Size() int64 { return s.limit - s.base }
// TeeReader returns a Reader that writes to w what it reads from r.
// All reads from r performed through it are matched with
// corresponding writes to w. There is no internal buffering -
// the write must complete before the read completes.
// Any error encountered while writing is reported as a read error.
func TeeReader(r Reader, w Writer) Reader {
return &teeReader{r, w}
}
type teeReader struct {
r Reader
w Writer
}
func (t *teeReader) Read(p []byte) (n int, err error) {
n, err = t.r.Read(p)
if n > 0 {
if n, err := t.w.Write(p[:n]); err != nil {
return n, err
}
}
return
}<|fim▁end|>
| |
<|file_name|>test_bibgloss.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
"""
test_sphinx
~~~~~~~~~~~
General Sphinx test and check output.
"""
import sys
import pytest
import sphinx
from ipypublish.sphinx.tests import get_test_source_dir
from ipypublish.tests.utils import HTML2JSONParser
@pytest.mark.sphinx(buildername="html", srcdir=get_test_source_dir("bibgloss_basic"))
def test_basic(app, status, warning, get_sphinx_app_output, data_regression):
app.build()
assert "build succeeded" in status.getvalue() # Build succeeded
warnings = warning.getvalue().strip()
assert warnings == ""
output = get_sphinx_app_output(app, buildername="html")
parser = HTML2JSONParser()
parser.feed(output)
if sphinx.version_info >= (2,):
data_regression.check(parser.parsed, basename="test_basic_v2")
else:
data_regression.check(parser.parsed, basename="test_basic_v1")
@pytest.mark.sphinx(buildername="html", srcdir=get_test_source_dir("bibgloss_sortkeys"))
def test_sortkeys(app, status, warning, get_sphinx_app_output, data_regression):
app.build()
assert "build succeeded" in status.getvalue() # Build succeeded
warnings = warning.getvalue().strip()
assert warnings == ""
<|fim▁hole|>
parser = HTML2JSONParser()
parser.feed(output)
if sphinx.version_info >= (2,):
data_regression.check(parser.parsed, basename="test_sortkeys_v2")
else:
data_regression.check(parser.parsed, basename="test_sortkeys_v1")
@pytest.mark.sphinx(buildername="html", srcdir=get_test_source_dir("bibgloss_unsorted"))
def test_unsorted(app, status, warning, get_sphinx_app_output, data_regression):
app.build()
assert "build succeeded" in status.getvalue() # Build succeeded
warnings = warning.getvalue().strip()
assert warnings == ""
output = get_sphinx_app_output(app, buildername="html")
parser = HTML2JSONParser()
parser.feed(output)
if sphinx.version_info >= (2,):
data_regression.check(parser.parsed, basename="test_unsorted_v2")
else:
data_regression.check(parser.parsed, basename="test_unsorted_v1")
@pytest.mark.sphinx(
buildername="html", srcdir=get_test_source_dir("bibgloss_missingref")
)
def test_missingref(app, status, warning, get_sphinx_app_output):
app.build()
assert "build succeeded" in status.getvalue() # Build succeeded
warnings = warning.getvalue().strip()
if (
"could not relabel bibglossary reference [missingkey]" not in warnings
and "WARNING: citation not found: missingkey" not in warnings # sphinx < 2
): # sphinx >= 2
raise AssertionError(
"should raise warning for missing citation `missingkey`: {}".format(
warnings
)
)
@pytest.mark.sphinx(
buildername="html", srcdir=get_test_source_dir("bibgloss_duplicatekey")
)
def test_duplicatekey(app, status, warning, get_sphinx_app_output):
with pytest.raises(KeyError):
app.build()
@pytest.mark.skipif(
sys.version_info < (3, 0),
reason="SyntaxError on import of texsoup/data.py line 135",
)
@pytest.mark.sphinx(buildername="html", srcdir=get_test_source_dir("bibgloss_tex"))
def test_load_tex(app, status, warning, get_sphinx_app_output):
app.build()
assert "build succeeded" in status.getvalue() # Build succeeded
warnings = warning.getvalue().strip()
assert warnings == ""<|fim▁end|>
|
output = get_sphinx_app_output(app, buildername="html")
|
<|file_name|>utils.py<|end_file_name|><|fim▁begin|>import uuid
import os
import shutil
import urlparse
import re
import hashlib
from lxml import html
from PIL import Image, ImageFile
from django.conf import settings
import views
ImageFile.MAXBLOCKS = 10000000
def match_or_none(string, rx):
"""
Tries to match a regular expression and returns an integer if it can.
Otherwise, returns None.
@param string: String to match against
@type string: basestring
@param rx: compiled regular expression
@return: number or None
@rtype: int/long or None
"""
if string is None:
return None
match = rx.search(string)
if match:
return int(match.groups()[0])
return None
width_rx = re.compile(r'width\s*:\s*(\d+)(px)?')
height_rx = re.compile(r'height\s*:\s*(\d+)(px)?')
def get_dimensions(img):
"""
Attempts to get the dimensions of an image from the img tag.
It first tries to grab it from the css styles and then falls back
to looking at the attributes.
@param img: Image tag.
@type img: etree._Element
@return: width and height of the image
@rtype: (int or None, int or None)
"""
styles = img.attrib.get('style')
width = match_or_none(styles, width_rx) or img.attrib.get('width')
if isinstance(width, basestring):
width = int(width)
height = match_or_none(styles, height_rx) or img.attrib.get('height')
if isinstance(height, basestring):
height= int(height)
return width, height
def get_local_path(url):
"""
Converts a url to a local path
@param url: Url to convert
@type url: basestring
@return: Local path of the url
@rtype: basestring
"""
url = urlparse.unquote(url)
local_path = settings.STATIC_ROOT + os.path.normpath(url[len(settings.STATIC_URL):])
return local_path
# `buffer` is needed since hashlib apparently isn't unicode safe
hexhash = lambda s: hashlib.md5(buffer(s)).hexdigest()
def new_rendered_path(orig_path, width, height, ext=None):
"""
Builds a new rendered path based on the original path, width, and height.
It takes a hash of the original path to prevent users from accidentally
(or purposely) overwritting other's rendered thumbnails.
This isn't perfect: we are assuming that the original file's conents never
changes, which is the django default. We could make this function more
robust by hashing the file everytime we save but that has the obvious
disadvantage of having to hash the file everytime. YMMV.
@param orig_path: Path to the original image.
@type orig_path: "/path/to/file"
@param width: Desired width of the rendered image.
@type width: int or None
@param height: Desired height of the rendered image.
@type height: int or None
@param ext: Desired extension of the new image. If None, uses
the original extension.
@type ext: basestring or None
@return: Absolute path to where the rendered image should live.
@rtype: "/path/to/rendered/image"
"""
dirname = os.path.dirname(orig_path)<|fim▁hole|> os.mkdir(rendered_path)
hash_path = hexhash(orig_path)
if ext is None:
ext = os.path.splitext(os.path.basename(orig_path))[1]
if ext and ext[0] != u'.':
ext = u'.' + ext
name = '%s_%sx%s' % (hash_path, width, height)
return os.path.join(rendered_path, name) + ext
def is_rendered(path, width, height):
"""
Checks whether or not an image has been rendered to the given path
with the given dimensions
@param path: path to check
@type path: u"/path/to/image"
@param width: Desired width
@type width: int
@param height: Desired height
@type height: int
@return: Whether or not the image is correct
@rtype: bool
"""
if os.path.exists(path):
old_width, old_height = Image.open(path).size
return old_width == width and old_height == height
return False
def transcode_to_jpeg(image, path, width, height):
"""
Transcodes an image to JPEG.
@param image: Opened image to transcode to jpeg.
@type image: PIL.Image
@param path: Path to the opened image.
@type path: u"/path/to/image"
@param width: Desired width of the transcoded image.
@type width: int
@param height: Desired height of the transcoded image.
@type height: int
@return: Path to the new transcoded image.
@rtype: "/path/to/image"
"""
i_width, i_height = image.size
new_width = i_width if width is None else width
new_height = i_height if height is None else height
new_path = new_rendered_path(path, width, height, ext='jpg')
if is_rendered(new_path, new_width, new_height):
return new_path
new_image = image.resize((new_width, new_height), Image.ANTIALIAS)
new_image.save(new_path, quality=80, optimize=1)
return new_path
def re_render(path, width, height):
"""
Given an original image, width, and height, creates a thumbnailed image
of the exact dimensions given. We skip animated gifs because PIL can't
resize those automatically whereas browsers can contort them easily. We
also don't stretch images at all and return the original in that case.
@param path: Path to the original image
@type path: "/path/to/image"
@param width: Desired width
@type width: int or None
@param height: Desired height
@type height: int or None
@return: Path to the 'rendered' image.
@rtype: "/path/to/image"
"""
try:
image = Image.open(path)
except IOError:
# Probably doesn't exist or isn't an image
return path
# We have to call image.load first due to a PIL 1.1.7 bug
image.load()
if image.format == 'PNG' and getattr(settings, 'CKEDITOR_PNG_TO_JPEG', False):
pixels = reduce(lambda a,b: a*b, image.size)
# check that our entire alpha channel is set to full opaque
if image.mode == 'RGB' or image.split()[-1].histogram()[-1] == pixels:
return transcode_to_jpeg(image, path, width, height)
if image.size <= (width, height):
return path
if width is None and height is None:
return path
# We can't resize animated gifs
if image.format == 'GIF':
try:
image.seek(1)
return path
except EOFError:
# Static GIFs should throw an EOF on seek
pass
new_path = new_rendered_path(path, width, height)
if is_rendered(new_path, width, height):
return new_path
# Re-render the image, optimizing for filesize
new_image = image.resize((width, height), Image.ANTIALIAS)
new_image.save(new_path, quality=80, optimize=1)
return new_path
def get_html_tree(content):
return html.fragment_fromstring(content, create_parent='div')
def render_html_tree(tree):
return html.tostring(tree)[5:-6]
def resize_images(post_content):
"""
Goes through all images, resizing those that we know to be local to the
correct image size.
@param post_content: Raw html of the content to search for images with.
@type post_content: basestring containg HTML fragments
@return: Modified contents.
@rtype: basestring
"""
# Get tree
tree = get_html_tree(post_content)
# Get images
imgs = tree.xpath('//img[starts-with(@src, "%s")]' % settings.STATIC_URL)
for img in imgs:
orig_url = img.attrib['src']
orig_path = get_local_path(orig_url)
width, height = get_dimensions(img)
rendered_path = re_render(orig_path, width, height)
# If we haven't changed the image, move along.
if rendered_path == orig_path:
continue
# Flip to the rendered
img.attrib['data-original'] = orig_url
img.attrib['src'] = views.get_media_url(rendered_path)
# Strip of wrapping div tag
return render_html_tree(tree)
def swap_in_originals(content):
if 'data-original' not in content:
return content
tree = get_html_tree(content)
for img in tree.xpath('//img[@data-original]'):
img.attrib['src'] = img.attrib['data-original']
del img.attrib['data-original']
return render_html_tree(tree)<|fim▁end|>
|
rendered_path = os.path.join(dirname, 'rendered')
if not os.path.exists(rendered_path):
|
<|file_name|>index.js<|end_file_name|><|fim▁begin|>var fs = require('fs');
var join = require('path').join;<|fim▁hole|>var debug = require('debug')('ip');
var util = require('util');
var EventEmitter = require('events').EventEmitter;
var thunkify = require('thunkify-wrap');
function IpUtil(ipFile, encoding, isLoad) {
if (typeof encoding === 'function') {
isLoad = encoding;
encoding = null;
}
this.ipFile = joinDirectory(process.cwd(), ipFile);
this.ipList = [];
if (encoding && encoding.toLowerCase().indexOf('utf') > -1) {
this.filter = function(buf) {
return buf.toString();
};
} else {
this.filter = function(buf) {
return iconv.decode(new Buffer(buf), 'gbk');
};
}
this.isLoad = isLoad || function(){
return true;
};
this.init();
}
util.inherits(IpUtil, EventEmitter);
IpUtil.prototype.init = function() {
var that = this;
var isLoad = this.isLoad;
debug('begin parse ipfile %s', this.ipFile);
if (!fs.existsSync(this.ipFile)) {
debug('not found ip file!');
that.emit('error', 'ipfile_not_found');
return;
}
var ipMap = this.ipMap = {};
var ipList = this.ipList;
var getLine = readLine(this.ipFile, this.filter);
var result = getLine.next();
var line;
var lineNum = 0;
var counter = 1;
var _readLine = function () {
if (result.done) {
that.emit('loaded');
return;
}
// 避免ip读取独占cpu.
if (counter % 100000 === 0) {
counter = 1;
setImmediate(_readLine);
return;
}
counter++;
lineNum++;
line = result.value;
if (!line || !line.trim()) {
result = getLine.next();
_readLine();
return;
}
var tokens = line.split(',', 6);
if (tokens.length !== 6) {
debug('第%d行格式不正确: %s', lineNum, line);
result = getLine.next();
_readLine();
return;
}
var startIp = ip2Long(tokens[0]);
var endIp = ip2Long(tokens[1]);
if (!startIp || !endIp) {
debug('第%d行格式不正确: %s', lineNum, line);
result = getLine.next();
_readLine();
return;
}
var country = getValue(tokens[2]);
var province = getValue(tokens[3]);
var city = getValue(tokens[4]);
var address = getValue(tokens[5]);
// 针对国家、省份、城市解析的统一判空修改
// 首先对特殊值的解析
if ('IANA' === country) {
country = 'IANA';
province = 'IANA';
city = 'IANA';
}
if ('局域网' === country) {
country = '局域网';
province = '局域网';
city = '局域网';
}
if('国外' === country) {
country = '国外';
province = '国外';
city = '国外';
}
if('中国' === country && ('中国' === province || '中国' === city)) {
country = '中国';
province = '中国';
city = '中国';
}
if (!isLoad(country, province, city)) {
result = getLine.next();
setImmediate(_readLine);
return;
}
ipMap[startIp] = {
startIp: startIp,
endIp: endIp,
country: country,
province: province,
city: city,
address: address
};
ipList.push(startIp);
result = getLine.next();
setImmediate(_readLine);
};
_readLine();
var sortIp = function () {
//debug(this.ipMap)
debug('完成IP库的载入. 共载入 %d 条IP纪录', ipList.length);
ipList.sort(function(a, b) {
return a - b;
});
debug('ip 索引排序完成.');
that.emit('done');
};
this.on('loaded', sortIp);
};
function getValue(val) {
if (!val) {
return null;
}
val = val.trim();
if (val === 'null') {
return null;
}
return val;
}
IpUtil.prototype.getIpInfo = function(ip) {
if (!isIp(ip)) {
return null;
}
if (typeof ip === 'string') {
ip = ip2Long(ip);
}
var ipStart = this.locatStartIP(ip);
debug('开始获取 ip 信息: %d', ipStart);
var ipInfo = this.ipMap[ipStart];
debug('查找IP, %s 成功.', long2IP(ip));
if (ipInfo.endIp < ip) {
debug('在IP库中找不到IP[%s]', long2IP(ip));
return null;
}
return ipInfo;
};
IpUtil.prototype.refreshData = function() {
};
/**
* 查找ip对应的开始IP地址。如果IP库中正好有以该ip开始的IP信息,那么就是返回这个ip。
* 如果没有,则应该是比这个ip小的最大的start
* @param ip
* @return
*/
IpUtil.prototype.locatStartIP = function(ip) {
debug('开始查找IP: %d', ip);
var centerIP = 0;
var centerIndex = 0; // 当前指针位置
var startIndex = 0; // 起始位置
var endIndex = this.ipList.length - 1; // 结束位置
var count = 0; // 循环次数
while (true) {
debug('%d. start = %d, end = %d', count++, startIndex, endIndex);
// 中间位置
centerIndex = Math.floor((startIndex + endIndex) / 2);
centerIP = this.ipList[centerIndex];
if (centerIP < ip) {
// 如果中间位置的IP小于要查询的IP,那么下一次查找后半段
startIndex = centerIndex;
} else if (centerIP > ip) {
// 如果中间位置的IP大于要查询的IP,那么下一次查找前半段
endIndex = centerIndex;
} else {
// 如果相等,那么已经找到要查询的IP
break;
}
if (startIndex + 1 === endIndex) {
// 如果开始指针和结束指针相差只有1,那么说明IP库中没有正好以该ip开始的IP信息
// 只能返回IP信息的start ip比这个ip小的最大的那条IP信息的start ip
if (centerIP > ip) {
centerIP = this.ipList[centerIndex - 1];
}
break;
}
}
debug('对应的IP开始地址为: %d', centerIP, centerIndex);
return centerIP;
};
/**
* a,b,c ==> a/b/c
* a,b,/tmp ==> /tmp
* /a/b, c ==> /a/b/c
*/
function joinDirectory() {
var dirs = [].slice.call(arguments, 1);
var dir;
for (var i = 0, len = dirs.length; i < len; i++) {
dir = dirs[i];
if (/^\//.test(dir)) {
// 发现根目录, 直接返回.
return dir;
}
}
return join.apply(null, [].slice.call(arguments));
}
function ip2Long(ip) {
if (!isIp(ip)) {
return 0;
}
var segs = ip.split('.');
var iplong =(parseInt(segs[0]) << 24
| parseInt(segs[1]) << 16
| parseInt(segs[2]) << 8
| parseInt(segs[3])) >>> 0;
return iplong;
}
var IP_REGEXP = /^(\d{1,2}|1\d\d|2[0-4]\d|25[0-5])\.(\d{1,2}|1\d\d|2[0-4]\d|25[0-5])\.(\d{1,2}|1\d\d|2[0-4]\d|25[0-5])\.(\d{1,2}|1\d\d|2[0-4]\d|25[0-5])$/;
function isIp(str) {
if (!str) {
return false;
}
str = str.trim();
return IP_REGEXP.test(str);
/**
var tokens = str.split('.');
if (tokens.length !== 4) {
return false;
}
for (var i = 0, len = tokens.length; i < len; i++) {
if (parseInt(tokens[i]) > 255 || parseInt(tokens[i]) < 0) {
return false;
}
}
return true;
**/
}
function long2IP(ipLong) {
var ip = [ipLong >> 24];
ip.push((ipLong & 16711680) >> 16);
ip.push((ipLong & 65280) >> 8);
ip.push(ipLong & 255);
return ip.join('.');
}
function *readLine(file, filter) {
var buffer = fs.readFileSync(file);
var i = 0, len = 0 || buffer.length;
debug('load file succ', len);
// 换行符.
var nl = require('os').EOL.charCodeAt(0);
var buf = [];
while(i < len) {
if (buffer[i] !== nl) {
buf.push(buffer[i]);
} else {
yield filter(new Buffer(buf));
buf = [];
}
i++;
}
}
module.exports = IpUtil;
module.exports.isIP = isIp;
module.exports.ip2Long = ip2Long;
module.exports.long2Ip = long2IP;
module.exports.getIpUtil = function *(ipFile, encoding, ipFilter) {
var iputil = new IpUtil(ipFile, encoding, ipFilter);
var end = thunkify.event(iputil, ['done', 'error']);
yield end();
return iputil;
};<|fim▁end|>
|
var iconv = require('iconv-lite');
|
<|file_name|>setup.py<|end_file_name|><|fim▁begin|>import os
from setuptools import find_packages, setup
root = os.path.dirname(os.path.realpath(__file__))
long_description = open(os.path.join(root, 'README.rst')).read()
setup(
name='range-regex',
version='1.0.3',
description='Python numeric range regular expression generator',
long_description=long_description,
url='http://github.com/dimka665/range-regex',
author='Dmitry Voronin',
author_email='[email protected]',
license='BSD',
# packages=['range_regex'],
packages=find_packages(),<|fim▁hole|><|fim▁end|>
|
include_package_data=True,
keywords='numeric range regex regular expression generator',
)
|
<|file_name|>playerstate.ts<|end_file_name|><|fim▁begin|>/**<|fim▁hole|> */
export enum PlayerState {
LoadingAssets,
Ready,
Disconnected,
Idle
}<|fim▁end|>
|
* Handles the player state
|
<|file_name|>Util.py<|end_file_name|><|fim▁begin|>"""
Facilities for pyFTS Benchmark module
"""
import matplotlib as plt
import matplotlib.cm as cmx
import matplotlib.colors as pltcolors
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import sqlite3
#from mpl_toolkits.mplot3d import Axes3D
from copy import deepcopy
from pyFTS.common import Util
def open_benchmark_db(name):
"""
Open a connection with a Sqlite database designed to store benchmark results.
:param name: database filenem
:return: a sqlite3 database connection
"""
conn = sqlite3.connect(name)
#performance optimizations
conn.execute("PRAGMA journal_mode = WAL")
conn.execute("PRAGMA synchronous = NORMAL")
create_benchmark_tables(conn)
return conn
def create_benchmark_tables(conn):
"""
Create a sqlite3 table designed to store benchmark results.
:param conn: a sqlite3 database connection
"""
c = conn.cursor()
c.execute('''CREATE TABLE if not exists benchmarks(
ID integer primary key, Date int, Dataset text, Tag text,
Type text, Model text, Transformation text, 'Order' int,
Scheme text, Partitions int,
Size int, Steps int, Method text, Measure text, Value real)''')
conn.commit()
def insert_benchmark(data, conn):
"""
Insert benchmark data on database
:param data: a tuple with the benchmark data with format:
ID: integer incremental primary key
Date: Date/hour of benchmark execution
Dataset: Identify on which dataset the dataset was performed
Tag: a user defined word that indentify a benchmark set
Type: forecasting type (point, interval, distribution)
Model: FTS model
Transformation: The name of data transformation, if one was used
Order: the order of the FTS method
Scheme: UoD partitioning scheme
Partitions: Number of partitions
Size: Number of rules of the FTS model
Steps: prediction horizon, i. e., the number of steps ahead
Measure: accuracy measure
Value: the measure value
:param conn: a sqlite3 database connection
:return:
"""
c = conn.cursor()
c.execute("INSERT INTO benchmarks(Date, Dataset, Tag, Type, Model, "
+ "Transformation, 'Order', Scheme, Partitions, "
+ "Size, Steps, Method, Measure, Value) "
+ "VALUES(datetime('now'),?,?,?,?,?,?,?,?,?,?,?,?,?)", data)
conn.commit()
def process_common_data(dataset, tag, type, job):
"""
Wraps benchmark information on a tuple for sqlite database
:param dataset: benchmark dataset
:param tag: benchmark set alias
:param type: forecasting type
:param job: a dictionary with benchmark data
:return: tuple for sqlite database
"""
model = job["obj"]
if model.benchmark_only:
data = [dataset, tag, type, model.shortname,
str(model.transformations[0]) if len(model.transformations) > 0 else None,
model.order, None, None,
None]
else:
data = [dataset, tag, type, model.shortname,
str(model.partitioner.transformation) if model.partitioner.transformation is not None else None,
model.order, model.partitioner.name, str(model.partitioner.partitions),
len(model)]
return data
def process_common_data2(dataset, tag, type, job):
"""
Wraps benchmark information on a tuple for sqlite database
:param dataset: benchmark dataset
:param tag: benchmark set alias
:param type: forecasting type
:param job: a dictionary with benchmark data
:return: tuple for sqlite database
"""
data = [dataset, tag, type,
job['model'],
job['transformation'],
job['order'],
job['partitioner'],
job['partitions'],
job['size']
]
return data
def get_dataframe_from_bd(file, filter):
"""
Query the sqlite benchmark database and return a pandas dataframe with the results
:param file: the url of the benchmark database
:param filter: sql conditions to filter
:return: pandas dataframe with the query results
"""
con = sqlite3.connect(file)
sql = "SELECT * from benchmarks"
if filter is not None:
sql += " WHERE " + filter
return pd.read_sql_query(sql, con)
def extract_measure(dataframe, measure, data_columns):
if not dataframe.empty:
df = dataframe[(dataframe.Measure == measure)][data_columns]
tmp = df.to_dict(orient="records")[0]
ret = [k for k in tmp.values() if not np.isnan(k)]
return ret
else:
return None
def find_best(dataframe, criteria, ascending):
models = dataframe.Model.unique()
orders = dataframe.Order.unique()
ret = {}
for m in models:
for o in orders:
mod = {}
df = dataframe[(dataframe.Model == m) & (dataframe.Order == o)].sort_values(by=criteria, ascending=ascending)
if not df.empty:
_key = str(m) + str(o)
best = df.loc[df.index[0]]
mod['Model'] = m
mod['Order'] = o
mod['Scheme'] = best["Scheme"]
mod['Partitions'] = best["Partitions"]
ret[_key] = mod
return ret
def simple_synthetic_dataframe(file, tag, measure, sql=None):
'''
Read experiments results from sqlite3 database in 'file', make a synthesis of the results
of the metric 'measure' with the same 'tag', returning a Pandas DataFrame with the mean results.
:param file: sqlite3 database file name
:param tag: common tag of the experiments
:param measure: metric to synthetize
:return: Pandas DataFrame with the mean results
'''
df = get_dataframe_from_bd(file,"tag = '{}' and measure = '{}' {}"
.format(tag, measure,
'' if sql is None else 'and {}'.format(sql)))
data = []
models = df.Model.unique()
datasets = df.Dataset.unique()
for dataset in datasets:
for model in models:
_filter = (df.Dataset == dataset) & (df.Model == model)
avg = np.nanmean(df[_filter].Value)
std = np.nanstd(df[_filter].Value)
data.append([dataset, model, avg, std])
dat = pd.DataFrame(data, columns=['Dataset', 'Model', 'AVG', 'STD'])
dat = dat.sort_values(['AVG', 'STD'])
best = []
for dataset in datasets:
for model in models:
ix = dat[(dat.Dataset == dataset) & (dat.Model == model)].index[0]
best.append(ix)
ret = dat.loc[best].sort_values(['AVG', 'STD'])
ret.groupby('Dataset')
return ret
def analytic_tabular_dataframe(dataframe):
experiments = len(dataframe.columns) - len(base_dataframe_columns()) - 1
models = dataframe.Model.unique()
orders = dataframe.Order.unique()
schemes = dataframe.Scheme.unique()
partitions = dataframe.Partitions.unique()
steps = dataframe.Steps.unique()
measures = dataframe.Measure.unique()
data_columns = analytical_data_columns(experiments)
ret = []
for m in models:
for o in orders:
for s in schemes:
for p in partitions:
for st in steps:
for ms in measures:
df = dataframe[(dataframe.Model == m) & (dataframe.Order == o)
& (dataframe.Scheme == s) & (dataframe.Partitions == p)
& (dataframe.Steps == st) & (dataframe.Measure == ms) ]
if not df.empty:
for col in data_columns:
mod = [m, o, s, p, st, ms, df[col].values[0]]
ret.append(mod)
dat = pd.DataFrame(ret, columns=tabular_dataframe_columns())
return dat
def tabular_dataframe_columns():
return ["Model", "Order", "Scheme", "Partitions", "Steps", "Measure", "Value"]
def base_dataframe_columns():
return ["Model", "Order", "Scheme", "Partitions", "Size", "Steps", "Method"]
def point_dataframe_synthetic_columns():
return base_dataframe_columns().extend(["RMSEAVG", "RMSESTD",
"SMAPEAVG", "SMAPESTD", "UAVG","USTD", "TIMEAVG", "TIMESTD"])
def point_dataframe_analytic_columns(experiments):
columns = [str(k) for k in np.arange(0, experiments)]
columns.insert(0, "Model")
columns.insert(1, "Order")
columns.insert(2, "Scheme")
columns.insert(3, "Partitions")
columns.insert(4, "Size")
columns.insert(5, "Steps")
columns.insert(6, "Method")
columns.insert(7, "Measure")
return columns
def save_dataframe_point(experiments, file, objs, rmse, save, synthetic, smape, times, u, steps, method):
"""
Create a dataframe to store the benchmark results
:param experiments: dictionary with the execution results
:param file:
:param objs:
:param rmse:
:param save:
:param synthetic:
:param smape:
:param times:
:param u:
:return:
"""
ret = []
if synthetic:
for k in sorted(objs.keys()):
try:
mod = []
mfts = objs[k]
mod.append(mfts.shortname)
mod.append(mfts.order)
if not mfts.benchmark_only:
mod.append(mfts.partitioner.name)
mod.append(mfts.partitioner.partitions)
mod.append(len(mfts))
else:
mod.append('-')
mod.append('-')
mod.append('-')
mod.append(steps[k])
mod.append(method[k])
mod.append(np.round(np.nanmean(rmse[k]), 2))
mod.append(np.round(np.nanstd(rmse[k]), 2))
mod.append(np.round(np.nanmean(smape[k]), 2))
mod.append(np.round(np.nanstd(smape[k]), 2))
mod.append(np.round(np.nanmean(u[k]), 2))
mod.append(np.round(np.nanstd(u[k]), 2))
mod.append(np.round(np.nanmean(times[k]), 4))
mod.append(np.round(np.nanstd(times[k]), 4))
ret.append(mod)
except Exception as ex:
print("Erro ao salvar ", k)
print("Exceção ", ex)
columns = point_dataframe_synthetic_columns()
else:
for k in sorted(objs.keys()):
try:
mfts = objs[k]
n = mfts.shortname
o = mfts.order
if not mfts.benchmark_only:
s = mfts.partitioner.name
p = mfts.partitioner.partitions
l = len(mfts)
else:
s = '-'
p = '-'
l = '-'
st = steps[k]
mt = method[k]
tmp = [n, o, s, p, l, st, mt, 'RMSE']
tmp.extend(rmse[k])
ret.append(deepcopy(tmp))
tmp = [n, o, s, p, l, st, mt, 'SMAPE']
tmp.extend(smape[k])
ret.append(deepcopy(tmp))
tmp = [n, o, s, p, l, st, mt, 'U']
tmp.extend(u[k])
ret.append(deepcopy(tmp))
tmp = [n, o, s, p, l, st, mt, 'TIME']
tmp.extend(times[k])
ret.append(deepcopy(tmp))
except Exception as ex:
print("Erro ao salvar ", k)
print("Exceção ", ex)
columns = point_dataframe_analytic_columns(experiments)
try:
dat = pd.DataFrame(ret, columns=columns)
if save: dat.to_csv(Util.uniquefilename(file), sep=";", index=False)
return dat
except Exception as ex:
print(ex)
print(experiments)
print(columns)
print(ret)
def cast_dataframe_to_synthetic(infile, outfile, experiments, type):
if type == 'point':
analytic_columns = point_dataframe_analytic_columns
synthetic_columns = point_dataframe_synthetic_columns
synthetize_measures = cast_dataframe_to_synthetic_point
elif type == 'interval':
analytic_columns = interval_dataframe_analytic_columns
synthetic_columns = interval_dataframe_synthetic_columns
synthetize_measures = cast_dataframe_to_synthetic_interval
elif type == 'distribution':
analytic_columns = probabilistic_dataframe_analytic_columns
synthetic_columns = probabilistic_dataframe_synthetic_columns
synthetize_measures = cast_dataframe_to_synthetic_probabilistic
else:
raise ValueError("Type parameter has an unknown value!")
columns = analytic_columns(experiments)
dat = pd.read_csv(infile, sep=";", usecols=columns)
models = dat.Model.unique()
orders = dat.Order.unique()
schemes = dat.Scheme.unique()
partitions = dat.Partitions.unique()
steps = dat.Steps.unique()
methods = dat.Method.unique()
data_columns = analytical_data_columns(experiments)
ret = []
for m in models:
for o in orders:
for s in schemes:
for p in partitions:
for st in steps:
for mt in methods:
df = dat[(dat.Model == m) & (dat.Order == o) & (dat.Scheme == s) &
(dat.Partitions == p) & (dat.Steps == st) & (dat.Method == mt)]
if not df.empty:
mod = synthetize_measures(df, data_columns)
mod.insert(0, m)
mod.insert(1, o)
mod.insert(2, s)
mod.insert(3, p)
mod.insert(4, df.iat[0,5])
mod.insert(5, st)
mod.insert(6, mt)
ret.append(mod)
dat = pd.DataFrame(ret, columns=synthetic_columns())
dat.to_csv(outfile, sep=";", index=False)
def cast_dataframe_to_synthetic_point(df, data_columns):
ret = []
rmse = extract_measure(df, 'RMSE', data_columns)
smape = extract_measure(df, 'SMAPE', data_columns)
u = extract_measure(df, 'U', data_columns)
times = extract_measure(df, 'TIME', data_columns)
ret.append(np.round(np.nanmean(rmse), 2))
ret.append(np.round(np.nanstd(rmse), 2))
ret.append(np.round(np.nanmean(smape), 2))
ret.append(np.round(np.nanstd(smape), 2))
ret.append(np.round(np.nanmean(u), 2))
ret.append(np.round(np.nanstd(u), 2))
ret.append(np.round(np.nanmean(times), 4))
ret.append(np.round(np.nanstd(times), 4))
return ret
def analytical_data_columns(experiments):
data_columns = [str(k) for k in np.arange(0, experiments)]
return data_columns
def scale_params(data):
vmin = np.nanmin(data)
vlen = np.nanmax(data) - vmin
return (vmin, vlen)
def scale(data, params):
ndata = [(k-params[0])/params[1] for k in data]
return ndata
def stats(measure, data):
print(measure, np.nanmean(data), np.nanstd(data))
def unified_scaled_point(experiments, tam, save=False, file=None,
sort_columns=['UAVG', 'RMSEAVG', 'USTD', 'RMSESTD'],
sort_ascend=[1, 1, 1, 1],save_best=False,
ignore=None, replace=None):
fig, axes = plt.subplots(nrows=3, ncols=1, figsize=tam)
axes[0].set_title('RMSE')
axes[1].set_title('SMAPE')
axes[2].set_title('U Statistic')
models = {}
for experiment in experiments:
mdl = {}
dat_syn = pd.read_csv(experiment[0], sep=";", usecols=point_dataframe_synthetic_columns())
bests = find_best(dat_syn, sort_columns, sort_ascend)
dat_ana = pd.read_csv(experiment[1], sep=";", usecols=point_dataframe_analytic_columns(experiment[2]))
rmse = []
smape = []
u = []
times = []
data_columns = analytical_data_columns(experiment[2])
for b in sorted(bests.keys()):
if check_ignore_list(b, ignore):
continue
if b not in models:
models[b] = {}
models[b]['rmse'] = []
models[b]['smape'] = []
models[b]['u'] = []
models[b]['times'] = []
if b not in mdl:
mdl[b] = {}
mdl[b]['rmse'] = []
mdl[b]['smape'] = []
mdl[b]['u'] = []
mdl[b]['times'] = []
best = bests[b]
tmp = dat_ana[(dat_ana.Model == best["Model"]) & (dat_ana.Order == best["Order"])
& (dat_ana.Scheme == best["Scheme"]) & (dat_ana.Partitions == best["Partitions"])]
tmpl = extract_measure(tmp,'RMSE',data_columns)
mdl[b]['rmse'].extend( tmpl )
rmse.extend( tmpl )
tmpl = extract_measure(tmp, 'SMAPE', data_columns)
mdl[b]['smape'].extend(tmpl)
smape.extend(tmpl)
tmpl = extract_measure(tmp, 'U', data_columns)
mdl[b]['u'].extend(tmpl)
u.extend(tmpl)
tmpl = extract_measure(tmp, 'TIME', data_columns)
mdl[b]['times'].extend(tmpl)
times.extend(tmpl)
models[b]['label'] = check_replace_list(best["Model"] + " " + str(best["Order"]), replace)
print("GLOBAL")
rmse_param = scale_params(rmse)
stats("rmse", rmse)
smape_param = scale_params(smape)
stats("smape", smape)
u_param = scale_params(u)
stats("u", u)
times_param = scale_params(times)
for key in sorted(models.keys()):
models[key]['rmse'].extend( scale(mdl[key]['rmse'], rmse_param) )
models[key]['smape'].extend( scale(mdl[key]['smape'], smape_param) )
models[key]['u'].extend( scale(mdl[key]['u'], u_param) )
models[key]['times'].extend( scale(mdl[key]['times'], times_param) )
rmse = []
smape = []
u = []
times = []
labels = []
for key in sorted(models.keys()):
print(key)
rmse.append(models[key]['rmse'])
stats("rmse", models[key]['rmse'])
smape.append(models[key]['smape'])
stats("smape", models[key]['smape'])
u.append(models[key]['u'])
stats("u", models[key]['u'])
times.append(models[key]['times'])
labels.append(models[key]['label'])
axes[0].boxplot(rmse, labels=labels, autorange=True, showmeans=True)
axes[0].set_title("RMSE")
axes[1].boxplot(smape, labels=labels, autorange=True, showmeans=True)
axes[1].set_title("SMAPE")
axes[2].boxplot(u, labels=labels, autorange=True, showmeans=True)
axes[2].set_title("U Statistic")
plt.tight_layout()
Util.show_and_save_image(fig, file, save)
def plot_dataframe_point(file_synthetic, file_analytic, experiments, tam, save=False, file=None,
sort_columns=['UAVG', 'RMSEAVG', 'USTD', 'RMSESTD'],
sort_ascend=[1, 1, 1, 1],save_best=False,
ignore=None,replace=None):
fig, axes = plt.subplots(nrows=3, ncols=1, figsize=tam)
axes[0].set_title('RMSE')
axes[1].set_title('SMAPE')
axes[2].set_title('U Statistic')
dat_syn = pd.read_csv(file_synthetic, sep=";", usecols=point_dataframe_synthetic_columns())
bests = find_best(dat_syn, sort_columns, sort_ascend)
dat_ana = pd.read_csv(file_analytic, sep=";", usecols=point_dataframe_analytic_columns(experiments))
data_columns = analytical_data_columns(experiments)
if save_best:
dat = pd.DataFrame.from_dict(bests, orient='index')
dat.to_csv(Util.uniquefilename(file_synthetic.replace("synthetic","best")), sep=";", index=False)
rmse = []
smape = []
u = []
times = []
labels = []
for b in sorted(bests.keys()):
if check_ignore_list(b, ignore):
continue
best = bests[b]
tmp = dat_ana[(dat_ana.Model == best["Model"]) & (dat_ana.Order == best["Order"])
& (dat_ana.Scheme == best["Scheme"]) & (dat_ana.Partitions == best["Partitions"])]
rmse.append( extract_measure(tmp,'RMSE',data_columns) )
smape.append(extract_measure(tmp, 'SMAPE', data_columns))
u.append(extract_measure(tmp, 'U', data_columns))
times.append(extract_measure(tmp, 'TIME', data_columns))
labels.append(check_replace_list(best["Model"] + " " + str(best["Order"]),replace))
axes[0].boxplot(rmse, labels=labels, autorange=True, showmeans=True)
axes[0].set_title("RMSE")
axes[1].boxplot(smape, labels=labels, autorange=True, showmeans=True)
axes[1].set_title("SMAPE")
axes[2].boxplot(u, labels=labels, autorange=True, showmeans=True)
axes[2].set_title("U Statistic")<|fim▁hole|> plt.tight_layout()
Util.show_and_save_image(fig, file, save)
def check_replace_list(m, replace):
if replace is not None:
for r in replace:
if r[0] in m:
return r[1]
return m
def check_ignore_list(b, ignore):
flag = False
if ignore is not None:
for i in ignore:
if i in b:
flag = True
return flag
def save_dataframe_interval(coverage, experiments, file, objs, resolution, save, sharpness, synthetic, times,
q05, q25, q75, q95, steps, method):
ret = []
if synthetic:
for k in sorted(objs.keys()):
mod = []
mfts = objs[k]
mod.append(mfts.shortname)
mod.append(mfts.order)
l = len(mfts)
if not mfts.benchmark_only:
mod.append(mfts.partitioner.name)
mod.append(mfts.partitioner.partitions)
mod.append(l)
else:
mod.append('-')
mod.append('-')
mod.append('-')
mod.append(steps[k])
mod.append(method[k])
mod.append(round(np.nanmean(sharpness[k]), 2))
mod.append(round(np.nanstd(sharpness[k]), 2))
mod.append(round(np.nanmean(resolution[k]), 2))
mod.append(round(np.nanstd(resolution[k]), 2))
mod.append(round(np.nanmean(coverage[k]), 2))
mod.append(round(np.nanstd(coverage[k]), 2))
mod.append(round(np.nanmean(times[k]), 2))
mod.append(round(np.nanstd(times[k]), 2))
mod.append(round(np.nanmean(q05[k]), 2))
mod.append(round(np.nanstd(q05[k]), 2))
mod.append(round(np.nanmean(q25[k]), 2))
mod.append(round(np.nanstd(q25[k]), 2))
mod.append(round(np.nanmean(q75[k]), 2))
mod.append(round(np.nanstd(q75[k]), 2))
mod.append(round(np.nanmean(q95[k]), 2))
mod.append(round(np.nanstd(q95[k]), 2))
mod.append(l)
ret.append(mod)
columns = interval_dataframe_synthetic_columns()
else:
for k in sorted(objs.keys()):
try:
mfts = objs[k]
n = mfts.shortname
o = mfts.order
if not mfts.benchmark_only:
s = mfts.partitioner.name
p = mfts.partitioner.partitions
l = len(mfts)
else:
s = '-'
p = '-'
l = '-'
st = steps[k]
mt = method[k]
tmp = [n, o, s, p, l, st, mt, 'Sharpness']
tmp.extend(sharpness[k])
ret.append(deepcopy(tmp))
tmp = [n, o, s, p, l, st, mt, 'Resolution']
tmp.extend(resolution[k])
ret.append(deepcopy(tmp))
tmp = [n, o, s, p, l, st, mt, 'Coverage']
tmp.extend(coverage[k])
ret.append(deepcopy(tmp))
tmp = [n, o, s, p, l, st, mt, 'TIME']
tmp.extend(times[k])
ret.append(deepcopy(tmp))
tmp = [n, o, s, p, l, st, mt, 'Q05']
tmp.extend(q05[k])
ret.append(deepcopy(tmp))
tmp = [n, o, s, p, l, st, mt, 'Q25']
tmp.extend(q25[k])
ret.append(deepcopy(tmp))
tmp = [n, o, s, p, l, st, mt, 'Q75']
tmp.extend(q75[k])
ret.append(deepcopy(tmp))
tmp = [n, o, s, p, l, st, mt, 'Q95']
tmp.extend(q95[k])
ret.append(deepcopy(tmp))
except Exception as ex:
print("Erro ao salvar ", k)
print("Exceção ", ex)
columns = interval_dataframe_analytic_columns(experiments)
dat = pd.DataFrame(ret, columns=columns)
if save: dat.to_csv(Util.uniquefilename(file), sep=";")
return dat
def interval_dataframe_analytic_columns(experiments):
columns = [str(k) for k in np.arange(0, experiments)]
columns.insert(0, "Model")
columns.insert(1, "Order")
columns.insert(2, "Scheme")
columns.insert(3, "Partitions")
columns.insert(4, "Size")
columns.insert(5, "Steps")
columns.insert(6, "Method")
columns.insert(7, "Measure")
return columns
def interval_dataframe_synthetic_columns():
columns = ["Model", "Order", "Scheme", "Partitions","SIZE", "Steps","Method" "SHARPAVG", "SHARPSTD", "RESAVG", "RESSTD", "COVAVG",
"COVSTD", "TIMEAVG", "TIMESTD", "Q05AVG", "Q05STD", "Q25AVG", "Q25STD", "Q75AVG", "Q75STD", "Q95AVG", "Q95STD"]
return columns
def cast_dataframe_to_synthetic_interval(df, data_columns):
sharpness = extract_measure(df, 'Sharpness', data_columns)
resolution = extract_measure(df, 'Resolution', data_columns)
coverage = extract_measure(df, 'Coverage', data_columns)
times = extract_measure(df, 'TIME', data_columns)
q05 = extract_measure(df, 'Q05', data_columns)
q25 = extract_measure(df, 'Q25', data_columns)
q75 = extract_measure(df, 'Q75', data_columns)
q95 = extract_measure(df, 'Q95', data_columns)
ret = []
ret.append(np.round(np.nanmean(sharpness), 2))
ret.append(np.round(np.nanstd(sharpness), 2))
ret.append(np.round(np.nanmean(resolution), 2))
ret.append(np.round(np.nanstd(resolution), 2))
ret.append(np.round(np.nanmean(coverage), 2))
ret.append(np.round(np.nanstd(coverage), 2))
ret.append(np.round(np.nanmean(times), 4))
ret.append(np.round(np.nanstd(times), 4))
ret.append(np.round(np.nanmean(q05), 4))
ret.append(np.round(np.nanstd(q05), 4))
ret.append(np.round(np.nanmean(q25), 4))
ret.append(np.round(np.nanstd(q25), 4))
ret.append(np.round(np.nanmean(q75), 4))
ret.append(np.round(np.nanstd(q75), 4))
ret.append(np.round(np.nanmean(q95), 4))
ret.append(np.round(np.nanstd(q95), 4))
return ret
def unified_scaled_interval(experiments, tam, save=False, file=None,
sort_columns=['COVAVG', 'SHARPAVG', 'COVSTD', 'SHARPSTD'],
sort_ascend=[True, False, True, True],save_best=False,
ignore=None, replace=None):
fig, axes = plt.subplots(nrows=3, ncols=1, figsize=tam)
axes[0].set_title('Sharpness')
axes[1].set_title('Resolution')
axes[2].set_title('Coverage')
models = {}
for experiment in experiments:
mdl = {}
dat_syn = pd.read_csv(experiment[0], sep=";", usecols=interval_dataframe_synthetic_columns())
bests = find_best(dat_syn, sort_columns, sort_ascend)
dat_ana = pd.read_csv(experiment[1], sep=";", usecols=interval_dataframe_analytic_columns(experiment[2]))
sharpness = []
resolution = []
coverage = []
times = []
data_columns = analytical_data_columns(experiment[2])
for b in sorted(bests.keys()):
if check_ignore_list(b, ignore):
continue
if b not in models:
models[b] = {}
models[b]['sharpness'] = []
models[b]['resolution'] = []
models[b]['coverage'] = []
models[b]['times'] = []
if b not in mdl:
mdl[b] = {}
mdl[b]['sharpness'] = []
mdl[b]['resolution'] = []
mdl[b]['coverage'] = []
mdl[b]['times'] = []
best = bests[b]
print(best)
tmp = dat_ana[(dat_ana.Model == best["Model"]) & (dat_ana.Order == best["Order"])
& (dat_ana.Scheme == best["Scheme"]) & (dat_ana.Partitions == best["Partitions"])]
tmpl = extract_measure(tmp, 'Sharpness', data_columns)
mdl[b]['sharpness'].extend(tmpl)
sharpness.extend(tmpl)
tmpl = extract_measure(tmp, 'Resolution', data_columns)
mdl[b]['resolution'].extend(tmpl)
resolution.extend(tmpl)
tmpl = extract_measure(tmp, 'Coverage', data_columns)
mdl[b]['coverage'].extend(tmpl)
coverage.extend(tmpl)
tmpl = extract_measure(tmp, 'TIME', data_columns)
mdl[b]['times'].extend(tmpl)
times.extend(tmpl)
models[b]['label'] = check_replace_list(best["Model"] + " " + str(best["Order"]), replace)
sharpness_param = scale_params(sharpness)
resolution_param = scale_params(resolution)
coverage_param = scale_params(coverage)
times_param = scale_params(times)
for key in sorted(models.keys()):
models[key]['sharpness'].extend(scale(mdl[key]['sharpness'], sharpness_param))
models[key]['resolution'].extend(scale(mdl[key]['resolution'], resolution_param))
models[key]['coverage'].extend(scale(mdl[key]['coverage'], coverage_param))
models[key]['times'].extend(scale(mdl[key]['times'], times_param))
sharpness = []
resolution = []
coverage = []
times = []
labels = []
for key in sorted(models.keys()):
sharpness.append(models[key]['sharpness'])
resolution.append(models[key]['resolution'])
coverage.append(models[key]['coverage'])
times.append(models[key]['times'])
labels.append(models[key]['label'])
axes[0].boxplot(sharpness, labels=labels, autorange=True, showmeans=True)
axes[1].boxplot(resolution, labels=labels, autorange=True, showmeans=True)
axes[2].boxplot(coverage, labels=labels, autorange=True, showmeans=True)
plt.tight_layout()
Util.show_and_save_image(fig, file, save)
def plot_dataframe_interval(file_synthetic, file_analytic, experiments, tam, save=False, file=None,
sort_columns=['COVAVG', 'SHARPAVG', 'COVSTD', 'SHARPSTD'],
sort_ascend=[True, False, True, True],save_best=False,
ignore=None, replace=None):
fig, axes = plt.subplots(nrows=3, ncols=1, figsize=tam)
axes[0].set_title('Sharpness')
axes[1].set_title('Resolution')
axes[2].set_title('Coverage')
dat_syn = pd.read_csv(file_synthetic, sep=";", usecols=interval_dataframe_synthetic_columns())
bests = find_best(dat_syn, sort_columns, sort_ascend)
dat_ana = pd.read_csv(file_analytic, sep=";", usecols=interval_dataframe_analytic_columns(experiments))
data_columns = analytical_data_columns(experiments)
if save_best:
dat = pd.DataFrame.from_dict(bests, orient='index')
dat.to_csv(Util.uniquefilename(file_synthetic.replace("synthetic","best")), sep=";", index=False)
sharpness = []
resolution = []
coverage = []
times = []
labels = []
bounds_shp = []
for b in sorted(bests.keys()):
if check_ignore_list(b, ignore):
continue
best = bests[b]
df = dat_ana[(dat_ana.Model == best["Model"]) & (dat_ana.Order == best["Order"])
& (dat_ana.Scheme == best["Scheme"]) & (dat_ana.Partitions == best["Partitions"])]
sharpness.append( extract_measure(df,'Sharpness',data_columns) )
resolution.append(extract_measure(df, 'Resolution', data_columns))
coverage.append(extract_measure(df, 'Coverage', data_columns))
times.append(extract_measure(df, 'TIME', data_columns))
labels.append(check_replace_list(best["Model"] + " " + str(best["Order"]), replace))
axes[0].boxplot(sharpness, labels=labels, autorange=True, showmeans=True)
axes[0].set_title("Sharpness")
axes[1].boxplot(resolution, labels=labels, autorange=True, showmeans=True)
axes[1].set_title("Resolution")
axes[2].boxplot(coverage, labels=labels, autorange=True, showmeans=True)
axes[2].set_title("Coverage")
axes[2].set_ylim([0, 1.1])
plt.tight_layout()
Util.show_and_save_image(fig, file, save)
def unified_scaled_interval_pinball(experiments, tam, save=False, file=None,
sort_columns=['COVAVG','SHARPAVG','COVSTD','SHARPSTD'],
sort_ascend=[True, False, True, True], save_best=False,
ignore=None, replace=None):
fig, axes = plt.subplots(nrows=1, ncols=4, figsize=tam)
axes[0].set_title(r'$\tau=0.05$')
axes[1].set_title(r'$\tau=0.25$')
axes[2].set_title(r'$\tau=0.75$')
axes[3].set_title(r'$\tau=0.95$')
models = {}
for experiment in experiments:
mdl = {}
dat_syn = pd.read_csv(experiment[0], sep=";", usecols=interval_dataframe_synthetic_columns())
bests = find_best(dat_syn, sort_columns, sort_ascend)
dat_ana = pd.read_csv(experiment[1], sep=";", usecols=interval_dataframe_analytic_columns(experiment[2]))
q05 = []
q25 = []
q75 = []
q95 = []
data_columns = analytical_data_columns(experiment[2])
for b in sorted(bests.keys()):
if check_ignore_list(b, ignore):
continue
if b not in models:
models[b] = {}
models[b]['q05'] = []
models[b]['q25'] = []
models[b]['q75'] = []
models[b]['q95'] = []
if b not in mdl:
mdl[b] = {}
mdl[b]['q05'] = []
mdl[b]['q25'] = []
mdl[b]['q75'] = []
mdl[b]['q95'] = []
best = bests[b]
print(best)
tmp = dat_ana[(dat_ana.Model == best["Model"]) & (dat_ana.Order == best["Order"])
& (dat_ana.Scheme == best["Scheme"]) & (dat_ana.Partitions == best["Partitions"])]
tmpl = extract_measure(tmp, 'Q05', data_columns)
mdl[b]['q05'].extend(tmpl)
q05.extend(tmpl)
tmpl = extract_measure(tmp, 'Q25', data_columns)
mdl[b]['q25'].extend(tmpl)
q25.extend(tmpl)
tmpl = extract_measure(tmp, 'Q75', data_columns)
mdl[b]['q75'].extend(tmpl)
q75.extend(tmpl)
tmpl = extract_measure(tmp, 'Q95', data_columns)
mdl[b]['q95'].extend(tmpl)
q95.extend(tmpl)
models[b]['label'] = check_replace_list(best["Model"] + " " + str(best["Order"]), replace)
q05_param = scale_params(q05)
q25_param = scale_params(q25)
q75_param = scale_params(q75)
q95_param = scale_params(q95)
for key in sorted(models.keys()):
models[key]['q05'].extend(scale(mdl[key]['q05'], q05_param))
models[key]['q25'].extend(scale(mdl[key]['q25'], q25_param))
models[key]['q75'].extend(scale(mdl[key]['q75'], q75_param))
models[key]['q95'].extend(scale(mdl[key]['q95'], q95_param))
q05 = []
q25 = []
q75 = []
q95 = []
labels = []
for key in sorted(models.keys()):
q05.append(models[key]['q05'])
q25.append(models[key]['q25'])
q75.append(models[key]['q75'])
q95.append(models[key]['q95'])
labels.append(models[key]['label'])
axes[0].boxplot(q05, labels=labels, vert=False, autorange=True, showmeans=True)
axes[1].boxplot(q25, labels=labels, vert=False, autorange=True, showmeans=True)
axes[2].boxplot(q75, labels=labels, vert=False, autorange=True, showmeans=True)
axes[3].boxplot(q95, labels=labels, vert=False, autorange=True, showmeans=True)
plt.tight_layout()
Util.show_and_save_image(fig, file, save)
def plot_dataframe_interval_pinball(file_synthetic, file_analytic, experiments, tam, save=False, file=None,
sort_columns=['COVAVG','SHARPAVG','COVSTD','SHARPSTD'],
sort_ascend=[True, False, True, True], save_best=False,
ignore=None, replace=None):
fig, axes = plt.subplots(nrows=1, ncols=4, figsize=tam)
axes[0].set_title(r'$\tau=0.05$')
axes[1].set_title(r'$\tau=0.25$')
axes[2].set_title(r'$\tau=0.75$')
axes[3].set_title(r'$\tau=0.95$')
dat_syn = pd.read_csv(file_synthetic, sep=";", usecols=interval_dataframe_synthetic_columns())
bests = find_best(dat_syn, sort_columns, sort_ascend)
dat_ana = pd.read_csv(file_analytic, sep=";", usecols=interval_dataframe_analytic_columns(experiments))
data_columns = analytical_data_columns(experiments)
if save_best:
dat = pd.DataFrame.from_dict(bests, orient='index')
dat.to_csv(Util.uniquefilename(file_synthetic.replace("synthetic","best")), sep=";", index=False)
q05 = []
q25 = []
q75 = []
q95 = []
labels = []
for b in sorted(bests.keys()):
if check_ignore_list(b, ignore):
continue
best = bests[b]
df = dat_ana[(dat_ana.Model == best["Model"]) & (dat_ana.Order == best["Order"])
& (dat_ana.Scheme == best["Scheme"]) & (dat_ana.Partitions == best["Partitions"])]
q05.append(extract_measure(df, 'Q05', data_columns))
q25.append(extract_measure(df, 'Q25', data_columns))
q75.append(extract_measure(df, 'Q75', data_columns))
q95.append(extract_measure(df, 'Q95', data_columns))
labels.append(check_replace_list(best["Model"] + " " + str(best["Order"]), replace))
axes[0].boxplot(q05, labels=labels, vert=False, autorange=True, showmeans=True)
axes[1].boxplot(q25, labels=labels, vert=False, autorange=True, showmeans=True)
axes[2].boxplot(q75, labels=labels, vert=False, autorange=True, showmeans=True)
axes[3].boxplot(q95, labels=labels, vert=False, autorange=True, showmeans=True)
plt.tight_layout()
Util.show_and_save_image(fig, file, save)
def save_dataframe_probabilistic(experiments, file, objs, crps, times, save, synthetic, steps, method):
"""
Save benchmark results for m-step ahead probabilistic forecasters
:param experiments:
:param file:
:param objs:
:param crps_interval:
:param crps_distr:
:param times:
:param times2:
:param save:
:param synthetic:
:return:
"""
ret = []
if synthetic:
for k in sorted(objs.keys()):
try:
ret = []
for k in sorted(objs.keys()):
try:
mod = []
mfts = objs[k]
mod.append(mfts.shortname)
mod.append(mfts.order)
if not mfts.benchmark_only:
mod.append(mfts.partitioner.name)
mod.append(mfts.partitioner.partitions)
mod.append(len(mfts))
else:
mod.append('-')
mod.append('-')
mod.append('-')
mod.append(steps[k])
mod.append(method[k])
mod.append(np.round(np.nanmean(crps[k]), 2))
mod.append(np.round(np.nanstd(crps[k]), 2))
mod.append(np.round(np.nanmean(times[k]), 4))
mod.append(np.round(np.nanstd(times[k]), 4))
ret.append(mod)
except Exception as e:
print('Erro: %s' % e)
except Exception as ex:
print("Erro ao salvar ", k)
print("Exceção ", ex)
columns = probabilistic_dataframe_synthetic_columns()
else:
for k in sorted(objs.keys()):
try:
mfts = objs[k]
n = mfts.shortname
o = mfts.order
if not mfts.benchmark_only:
s = mfts.partitioner.name
p = mfts.partitioner.partitions
l = len(mfts)
else:
s = '-'
p = '-'
l = '-'
st = steps[k]
mt = method[k]
tmp = [n, o, s, p, l, st, mt, 'CRPS']
tmp.extend(crps[k])
ret.append(deepcopy(tmp))
tmp = [n, o, s, p, l, st, mt, 'TIME']
tmp.extend(times[k])
ret.append(deepcopy(tmp))
except Exception as ex:
print("Erro ao salvar ", k)
print("Exceção ", ex)
columns = probabilistic_dataframe_analytic_columns(experiments)
dat = pd.DataFrame(ret, columns=columns)
if save: dat.to_csv(Util.uniquefilename(file), sep=";")
return dat
def probabilistic_dataframe_analytic_columns(experiments):
columns = [str(k) for k in np.arange(0, experiments)]
columns.insert(0, "Model")
columns.insert(1, "Order")
columns.insert(2, "Scheme")
columns.insert(3, "Partitions")
columns.insert(4, "Size")
columns.insert(5, "Steps")
columns.insert(6, "Method")
columns.insert(7, "Measure")
return columns
def probabilistic_dataframe_synthetic_columns():
columns = ["Model", "Order", "Scheme", "Partitions","Size", "Steps", "Method", "CRPSAVG", "CRPSSTD",
"TIMEAVG", "TIMESTD"]
return columns
def cast_dataframe_to_synthetic_probabilistic(df, data_columns):
crps1 = extract_measure(df, 'CRPS', data_columns)
times1 = extract_measure(df, 'TIME', data_columns)
ret = []
ret.append(np.round(np.nanmean(crps1), 2))
ret.append(np.round(np.nanstd(crps1), 2))
ret.append(np.round(np.nanmean(times1), 2))
ret.append(np.round(np.nanstd(times1), 2))
return ret
def unified_scaled_probabilistic(experiments, tam, save=False, file=None,
sort_columns=['CRPSAVG', 'CRPSSTD'],
sort_ascend=[True, True], save_best=False,
ignore=None, replace=None):
fig, axes = plt.subplots(nrows=1, ncols=1, figsize=tam)
axes.set_title('CRPS')
#axes[1].set_title('CRPS Distribution Ahead')
models = {}
for experiment in experiments:
print(experiment)
mdl = {}
dat_syn = pd.read_csv(experiment[0], sep=";", usecols=probabilistic_dataframe_synthetic_columns())
bests = find_best(dat_syn, sort_columns, sort_ascend)
dat_ana = pd.read_csv(experiment[1], sep=";", usecols=probabilistic_dataframe_analytic_columns(experiment[2]))
crps1 = []
crps2 = []
data_columns = analytical_data_columns(experiment[2])
for b in sorted(bests.keys()):
if check_ignore_list(b, ignore):
continue
if b not in models:
models[b] = {}
models[b]['crps1'] = []
models[b]['crps2'] = []
if b not in mdl:
mdl[b] = {}
mdl[b]['crps1'] = []
mdl[b]['crps2'] = []
best = bests[b]
print(best)
tmp = dat_ana[(dat_ana.Model == best["Model"]) & (dat_ana.Order == best["Order"])
& (dat_ana.Scheme == best["Scheme"]) & (dat_ana.Partitions == best["Partitions"])]
tmpl = extract_measure(tmp, 'CRPS_Interval', data_columns)
mdl[b]['crps1'].extend(tmpl)
crps1.extend(tmpl)
tmpl = extract_measure(tmp, 'CRPS_Distribution', data_columns)
mdl[b]['crps2'].extend(tmpl)
crps2.extend(tmpl)
models[b]['label'] = check_replace_list(best["Model"] + " " + str(best["Order"]), replace)
crps1_param = scale_params(crps1)
crps2_param = scale_params(crps2)
for key in sorted(mdl.keys()):
print(key)
models[key]['crps1'].extend(scale(mdl[key]['crps1'], crps1_param))
models[key]['crps2'].extend(scale(mdl[key]['crps2'], crps2_param))
crps1 = []
crps2 = []
labels = []
for key in sorted(models.keys()):
crps1.append(models[key]['crps1'])
crps2.append(models[key]['crps2'])
labels.append(models[key]['label'])
axes[0].boxplot(crps1, labels=labels, autorange=True, showmeans=True)
axes[1].boxplot(crps2, labels=labels, autorange=True, showmeans=True)
plt.tight_layout()
Util.show_and_save_image(fig, file, save)
def plot_dataframe_probabilistic(file_synthetic, file_analytic, experiments, tam, save=False, file=None,
sort_columns=['CRPS1AVG', 'CRPS2AVG', 'CRPS1STD', 'CRPS2STD'],
sort_ascend=[True, True, True, True], save_best=False,
ignore=None, replace=None):
fig, axes = plt.subplots(nrows=2, ncols=1, figsize=tam)
axes[0].set_title('CRPS')
axes[1].set_title('CRPS')
dat_syn = pd.read_csv(file_synthetic, sep=";", usecols=probabilistic_dataframe_synthetic_columns())
bests = find_best(dat_syn, sort_columns, sort_ascend)
dat_ana = pd.read_csv(file_analytic, sep=";", usecols=probabilistic_dataframe_analytic_columns(experiments))
data_columns = analytical_data_columns(experiments)
if save_best:
dat = pd.DataFrame.from_dict(bests, orient='index')
dat.to_csv(Util.uniquefilename(file_synthetic.replace("synthetic","best")), sep=";", index=False)
crps1 = []
crps2 = []
labels = []
for b in sorted(bests.keys()):
if check_ignore_list(b, ignore):
continue
best = bests[b]
df = dat_ana[(dat_ana.Model == best["Model"]) & (dat_ana.Order == best["Order"])
& (dat_ana.Scheme == best["Scheme"]) & (dat_ana.Partitions == best["Partitions"])]
crps1.append( extract_measure(df,'CRPS_Interval',data_columns) )
crps2.append(extract_measure(df, 'CRPS_Distribution', data_columns))
labels.append(check_replace_list(best["Model"] + " " + str(best["Order"]), replace))
axes[0].boxplot(crps1, labels=labels, autorange=True, showmeans=True)
axes[1].boxplot(crps2, labels=labels, autorange=True, showmeans=True)
plt.tight_layout()
Util.show_and_save_image(fig, file, save)<|fim▁end|>
| |
<|file_name|>conf.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# rtstock documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 9 22:26:36 2013.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# Get the project root dir, which is the parent dir of this
cwd = os.getcwd()
project_root = os.path.dirname(cwd)
# Insert the project root dir as the first element in the PYTHONPATH.
# This lets us ensure that the source package is imported, and that its
# version is used.
sys.path.insert(0, project_root)
import rtstock
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.<|fim▁hole|># General information about the project.
project = u'Realtime Stock'
copyright = u"2016, Rafael Lopes Conde dos Reis"
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = rtstock.__version__
# The full version, including alpha/beta/rc tags.
release = rtstock.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to
# some non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built
# documents.
#keep_warnings = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as
# html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the
# top of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon
# of the docs. This file should be a Windows icon file (.ico) being
# 16x16 or 32x32 pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets)
# here, relative to this directory. They are copied after the builtin
# static files, so a file named "default.css" will overwrite the builtin
# "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names
# to template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer.
# Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer.
# Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages
# will contain a <link> tag referring to it. The value of this option
# must be the base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'rtstockdoc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'rtstock.tex',
u'Realtime Stock Documentation',
u'Rafael Lopes Conde dos Reis', 'manual'),
]
# The name of an image file (relative to this directory) to place at
# the top of the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings
# are parts, not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'rtstock',
u'Realtime Stock Documentation',
[u'Rafael Lopes Conde dos Reis'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'rtstock',
u'Realtime Stock Documentation',
u'Rafael Lopes Conde dos Reis',
'rtstock',
'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False<|fim▁end|>
|
master_doc = 'index'
|
<|file_name|>en-ca.js<|end_file_name|><|fim▁begin|>/*<|fim▁hole|> preview: 'Preview'
} );<|fim▁end|>
|
Copyright (c) 2003-2017, CKSource - Frederico Knabben. All rights reserved.
For licensing, see LICENSE.md or http://ckeditor.com/license
*/
CKEDITOR.plugins.setLang( 'preview', 'en-ca', {
|
<|file_name|>app.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
"""
frest - flask restful api frame
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This project is the frame of the restful api server created with flask.
<|fim▁hole|>"""
import os
from flask_script import Server, Manager
from flask_migrate import Migrate, MigrateCommand
from app import app, db, routes, handler
from app.config import APP_DEFAULT_PORT, APP_SECRET_KEY, ENVIRONMENT
from app.modules.auth import login, token
if __name__ == '__main__':
port = int(os.environ.get('PORT', APP_DEFAULT_PORT))
app.secret_key = APP_SECRET_KEY
db.create_all()
migrate = Migrate(app, db)
manager = Manager(app)
manager.add_command('db', MigrateCommand)
if ENVIRONMENT == 'production' or ENVIRONMENT == 'testing':
manager.add_command('runserver', Server(host='0.0.0.0', port=port, use_debugger=False))
else:
manager.add_command('runserver', Server(host='0.0.0.0', port=port, use_debugger=True))
manager.run()<|fim▁end|>
|
:copyright: (C) 2017 [email protected]
:license: MIT, see LICENSE for more details.
|
<|file_name|>repeat.py<|end_file_name|><|fim▁begin|>from django import template
register = template.Library()
class RepeatNode(template.Node):
def __init__(self, nodelist, count):
self.nodelist = nodelist
self.count = template.Variable(count)
def render(self, context):
output = self.nodelist.render(context)
return output * int(self.count.resolve(context) + 1)
def repeat(parser, token):
"""
Repeats the containing text a certain number of times.
Requires a single argument, an integer, to indicate the number of times to
repeat the enclosing content.
Example::
{% repeat 3 %}foo{% endrepeat %}
Yields::
foofoofoo
"""
bits = token.split_contents()
if len(bits) != 2:
raise template.TemplateSyntaxError('%r tag requires 1 argument.' % bits[0])
count = bits[1]
nodelist = parser.parse(('endrepeat',))
parser.delete_first_token()
return RepeatNode(nodelist, count)<|fim▁hole|>
repeat = register.tag(repeat)<|fim▁end|>
| |
<|file_name|>authentication.rs<|end_file_name|><|fim▁begin|>use std::io::{self, Read};
use aes::Aes192;
use byteorder::{BigEndian, ByteOrder};
use hmac::Hmac;
use pbkdf2::pbkdf2;
use protobuf::ProtobufEnum;
use serde::{Deserialize, Serialize};
use sha1::{Digest, Sha1};
use crate::protocol::authentication::AuthenticationType;
/// The credentials are used to log into the Spotify API.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Credentials {
pub username: String,
#[serde(serialize_with = "serialize_protobuf_enum")]
#[serde(deserialize_with = "deserialize_protobuf_enum")]
pub auth_type: AuthenticationType,
#[serde(alias = "encoded_auth_blob")]
#[serde(serialize_with = "serialize_base64")]
#[serde(deserialize_with = "deserialize_base64")]
pub auth_data: Vec<u8>,
}
impl Credentials {
/// Intialize these credentials from a username and a password.
///
/// ### Example
/// ```rust
/// use librespot_core::authentication::Credentials;
///
/// let creds = Credentials::with_password("my account", "my password");
/// ```
pub fn with_password(username: impl Into<String>, password: impl Into<String>) -> Credentials {
Credentials {
username: username.into(),
auth_type: AuthenticationType::AUTHENTICATION_USER_PASS,
auth_data: password.into().into_bytes(),
}
}
pub fn with_blob(
username: impl Into<String>,
encrypted_blob: impl AsRef<[u8]>,
device_id: impl AsRef<[u8]>,
) -> Credentials {<|fim▁hole|> stream.read_exact(&mut data)?;
Ok(data[0])
}
fn read_int<R: Read>(stream: &mut R) -> io::Result<u32> {
let lo = read_u8(stream)? as u32;
if lo & 0x80 == 0 {
return Ok(lo);
}
let hi = read_u8(stream)? as u32;
Ok(lo & 0x7f | hi << 7)
}
fn read_bytes<R: Read>(stream: &mut R) -> io::Result<Vec<u8>> {
let length = read_int(stream)?;
let mut data = vec![0u8; length as usize];
stream.read_exact(&mut data)?;
Ok(data)
}
let username = username.into();
let secret = Sha1::digest(device_id.as_ref());
let key = {
let mut key = [0u8; 24];
pbkdf2::<Hmac<Sha1>>(&secret, username.as_bytes(), 0x100, &mut key[0..20]);
let hash = &Sha1::digest(&key[..20]);
key[..20].copy_from_slice(hash);
BigEndian::write_u32(&mut key[20..], 20);
key
};
// decrypt data using ECB mode without padding
let blob = {
use aes::cipher::generic_array::typenum::Unsigned;
use aes::cipher::generic_array::GenericArray;
use aes::cipher::{BlockCipher, NewBlockCipher};
let mut data = base64::decode(encrypted_blob).unwrap();
let cipher = Aes192::new(GenericArray::from_slice(&key));
let block_size = <Aes192 as BlockCipher>::BlockSize::to_usize();
assert_eq!(data.len() % block_size, 0);
for chunk in data.chunks_exact_mut(block_size) {
cipher.decrypt_block(GenericArray::from_mut_slice(chunk));
}
let l = data.len();
for i in 0..l - 0x10 {
data[l - i - 1] ^= data[l - i - 0x11];
}
data
};
let mut cursor = io::Cursor::new(blob.as_slice());
read_u8(&mut cursor).unwrap();
read_bytes(&mut cursor).unwrap();
read_u8(&mut cursor).unwrap();
let auth_type = read_int(&mut cursor).unwrap();
let auth_type = AuthenticationType::from_i32(auth_type as i32).unwrap();
read_u8(&mut cursor).unwrap();
let auth_data = read_bytes(&mut cursor).unwrap();
Credentials {
username,
auth_type,
auth_data,
}
}
}
fn serialize_protobuf_enum<T, S>(v: &T, ser: S) -> Result<S::Ok, S::Error>
where
T: ProtobufEnum,
S: serde::Serializer,
{
serde::Serialize::serialize(&v.value(), ser)
}
fn deserialize_protobuf_enum<'de, T, D>(de: D) -> Result<T, D::Error>
where
T: ProtobufEnum,
D: serde::Deserializer<'de>,
{
let v: i32 = serde::Deserialize::deserialize(de)?;
T::from_i32(v).ok_or_else(|| serde::de::Error::custom("Invalid enum value"))
}
fn serialize_base64<T, S>(v: &T, ser: S) -> Result<S::Ok, S::Error>
where
T: AsRef<[u8]>,
S: serde::Serializer,
{
serde::Serialize::serialize(&base64::encode(v.as_ref()), ser)
}
fn deserialize_base64<'de, D>(de: D) -> Result<Vec<u8>, D::Error>
where
D: serde::Deserializer<'de>,
{
let v: String = serde::Deserialize::deserialize(de)?;
base64::decode(&v).map_err(|e| serde::de::Error::custom(e.to_string()))
}<|fim▁end|>
|
fn read_u8<R: Read>(stream: &mut R) -> io::Result<u8> {
let mut data = [0u8];
|
<|file_name|>wifi-olsr-flowmon.py<|end_file_name|><|fim▁begin|># -*- Mode: Python; -*-
# Copyright (c) 2009 INESC Porto
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation;
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the<|fim▁hole|># You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Authors: Gustavo Carneiro <[email protected]>
import sys
import ns.applications
import ns.core
import ns.flow_monitor
import ns.internet
import ns.mobility
import ns.network
import ns.olsr
import ns.wifi
try:
import ns.visualizer
except ImportError:
pass
DISTANCE = 100 # (m)
NUM_NODES_SIDE = 3
def main(argv):
cmd = ns.core.CommandLine()
cmd.NumNodesSide = None
cmd.AddValue("NumNodesSide", "Grid side number of nodes (total number of nodes will be this number squared)")
cmd.Results = None
cmd.AddValue("Results", "Write XML results to file")
cmd.Plot = None
cmd.AddValue("Plot", "Plot the results using the matplotlib python module")
cmd.Parse(argv)
wifi = ns.wifi.WifiHelper.Default()
wifiMac = ns.wifi.NqosWifiMacHelper.Default()
wifiPhy = ns.wifi.YansWifiPhyHelper.Default()
wifiChannel = ns.wifi.YansWifiChannelHelper.Default()
wifiPhy.SetChannel(wifiChannel.Create())
ssid = ns.wifi.Ssid("wifi-default")
wifi.SetRemoteStationManager("ns3::ArfWifiManager")
wifiMac.SetType ("ns3::AdhocWifiMac",
"Ssid", ns.wifi.SsidValue(ssid))
internet = ns.internet.InternetStackHelper()
list_routing = ns.internet.Ipv4ListRoutingHelper()
olsr_routing = ns.olsr.OlsrHelper()
static_routing = ns.internet.Ipv4StaticRoutingHelper()
list_routing.Add(static_routing, 0)
list_routing.Add(olsr_routing, 100)
internet.SetRoutingHelper(list_routing)
ipv4Addresses = ns.internet.Ipv4AddressHelper()
ipv4Addresses.SetBase(ns.network.Ipv4Address("10.0.0.0"), ns.network.Ipv4Mask("255.255.255.0"))
port = 9 # Discard port(RFC 863)
onOffHelper = ns.applications.OnOffHelper("ns3::UdpSocketFactory",
ns.network.Address(ns.network.InetSocketAddress(ns.network.Ipv4Address("10.0.0.1"), port)))
onOffHelper.SetAttribute("DataRate", ns.network.DataRateValue(ns.network.DataRate("100kbps")))
onOffHelper.SetAttribute("OnTime", ns.core.StringValue ("ns3::ConstantRandomVariable[Constant=1]"))
onOffHelper.SetAttribute("OffTime", ns.core.StringValue ("ns3::ConstantRandomVariable[Constant=0]"))
addresses = []
nodes = []
if cmd.NumNodesSide is None:
num_nodes_side = NUM_NODES_SIDE
else:
num_nodes_side = int(cmd.NumNodesSide)
for xi in range(num_nodes_side):
for yi in range(num_nodes_side):
node = ns.network.Node()
nodes.append(node)
internet.Install(ns.network.NodeContainer(node))
mobility = ns.mobility.ConstantPositionMobilityModel()
mobility.SetPosition(ns.core.Vector(xi*DISTANCE, yi*DISTANCE, 0))
node.AggregateObject(mobility)
devices = wifi.Install(wifiPhy, wifiMac, node)
ipv4_interfaces = ipv4Addresses.Assign(devices)
addresses.append(ipv4_interfaces.GetAddress(0))
for i, node in enumerate(nodes):
destaddr = addresses[(len(addresses) - 1 - i) % len(addresses)]
#print i, destaddr
onOffHelper.SetAttribute("Remote", ns.network.AddressValue(ns.network.InetSocketAddress(destaddr, port)))
app = onOffHelper.Install(ns.network.NodeContainer(node))
urv = ns.core.UniformRandomVariable()
app.Start(ns.core.Seconds(urv.GetValue(20, 30)))
#internet.EnablePcapAll("wifi-olsr")
flowmon_helper = ns.flow_monitor.FlowMonitorHelper()
#flowmon_helper.SetMonitorAttribute("StartTime", ns.core.TimeValue(ns.core.Seconds(31)))
monitor = flowmon_helper.InstallAll()
monitor = flowmon_helper.GetMonitor()
monitor.SetAttribute("DelayBinWidth", ns.core.DoubleValue(0.001))
monitor.SetAttribute("JitterBinWidth", ns.core.DoubleValue(0.001))
monitor.SetAttribute("PacketSizeBinWidth", ns.core.DoubleValue(20))
ns.core.Simulator.Stop(ns.core.Seconds(44.0))
ns.core.Simulator.Run()
def print_stats(os, st):
print >> os, " Tx Bytes: ", st.txBytes
print >> os, " Rx Bytes: ", st.rxBytes
print >> os, " Tx Packets: ", st.txPackets
print >> os, " Rx Packets: ", st.rxPackets
print >> os, " Lost Packets: ", st.lostPackets
if st.rxPackets > 0:
print >> os, " Mean{Delay}: ", (st.delaySum.GetSeconds() / st.rxPackets)
print >> os, " Mean{Jitter}: ", (st.jitterSum.GetSeconds() / (st.rxPackets-1))
print >> os, " Mean{Hop Count}: ", float(st.timesForwarded) / st.rxPackets + 1
if 0:
print >> os, "Delay Histogram"
for i in range(st.delayHistogram.GetNBins () ):
print >> os, " ",i,"(", st.delayHistogram.GetBinStart (i), "-", \
st.delayHistogram.GetBinEnd (i), "): ", st.delayHistogram.GetBinCount (i)
print >> os, "Jitter Histogram"
for i in range(st.jitterHistogram.GetNBins () ):
print >> os, " ",i,"(", st.jitterHistogram.GetBinStart (i), "-", \
st.jitterHistogram.GetBinEnd (i), "): ", st.jitterHistogram.GetBinCount (i)
print >> os, "PacketSize Histogram"
for i in range(st.packetSizeHistogram.GetNBins () ):
print >> os, " ",i,"(", st.packetSizeHistogram.GetBinStart (i), "-", \
st.packetSizeHistogram.GetBinEnd (i), "): ", st.packetSizeHistogram.GetBinCount (i)
for reason, drops in enumerate(st.packetsDropped):
print " Packets dropped by reason %i: %i" % (reason, drops)
#for reason, drops in enumerate(st.bytesDropped):
# print "Bytes dropped by reason %i: %i" % (reason, drops)
monitor.CheckForLostPackets()
classifier = flowmon_helper.GetClassifier()
if cmd.Results is None:
for flow_id, flow_stats in monitor.GetFlowStats():
t = classifier.FindFlow(flow_id)
proto = {6: 'TCP', 17: 'UDP'} [t.protocol]
print "FlowID: %i (%s %s/%s --> %s/%i)" % \
(flow_id, proto, t.sourceAddress, t.sourcePort, t.destinationAddress, t.destinationPort)
print_stats(sys.stdout, flow_stats)
else:
print monitor.SerializeToXmlFile(cmd.Results, True, True)
if cmd.Plot is not None:
import pylab
delays = []
for flow_id, flow_stats in monitor.GetFlowStats():
tupl = classifier.FindFlow(flow_id)
if tupl.protocol == 17 and tupl.sourcePort == 698:
continue
delays.append(flow_stats.delaySum.GetSeconds() / flow_stats.rxPackets)
pylab.hist(delays, 20)
pylab.xlabel("Delay (s)")
pylab.ylabel("Number of Flows")
pylab.show()
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))<|fim▁end|>
|
# GNU General Public License for more details.
#
|
<|file_name|>app.py<|end_file_name|><|fim▁begin|>from django.conf.urls import url
from cobra.core.application import Application
from cobra.core.loading import get_class
class AutoCheckDashboardApplication(Application):
name = None
index_view = get_class('dashboard.autocheck.views', 'IndexView')<|fim▁hole|>
]
return self.post_process_urls(urls)
application = AutoCheckDashboardApplication()<|fim▁end|>
|
def get_urls(self):
urls = [
url(r'^$', self.index_view.as_view(), name='autocheck-index'),
|
<|file_name|>index.js<|end_file_name|><|fim▁begin|>const Hapi = require('hapi');
const Request = require('request');
const port = process.env.PORT || 8080;
const server = new Hapi.Server();
const cephalopods = 'http://api.gbif.org/v1/species/136';
server.connection({
port: port,
host: '0.0.0.0'
});
server.route({
method: 'GET',
path: '/',
handler: function (request, reply) {
Request(cephalopods, function (err, response, body) {
return reply(body);<|fim▁hole|> });
}
});
server.start(function () {
console.log('Server started on ' + port);
});<|fim▁end|>
| |
<|file_name|>test_ovs_firewall.py<|end_file_name|><|fim▁begin|># Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import mock
from oslo_config import cfg
from neutron.agent.common import config
from neutron.agent.common import ovs_lib
from neutron.common import constants
from networking_vsphere.drivers import ovs_firewall as ovs_fw
from networking_vsphere.tests import base
fake_port = {'security_group_source_groups': 'abc',
'mac_address': '00:11:22:33:44:55',
'network_id': "netid",
'id': "123",
'security_groups': "abc",
'lvid': "100",
'sg_provider_rules': [],
'security_group_rules': [
{"direction": "ingress",
"protocol": "tcp",
"port_range_min": 2001,
"port_range_max": 2009,
"source_port_range_min": 67,
"source_port_range_max": 68,
"ethertype": "IPv4",
"source_ip_prefix": "150.1.1.0/22",
"dest_ip_prefix": "170.1.1.0/22"}]}
fake_res_port = {'security_group_source_groups': 'abc',
'mac_address': '00:11:22:33:44:55',
'network_id': "netid",
'id': "123",
'security_groups': "abc",
'lvid': "100",
'device': "123"}
cookie = ("0x%x" % (hash("123") & 0xffffffffffffffff))
class TestOVSFirewallDriver(base.TestCase):
@mock.patch('networking_vsphere.drivers.ovs_firewall.OVSFirewallDriver.'
'check_ovs_firewall_restart')
@mock.patch('networking_vsphere.drivers.ovs_firewall.OVSFirewallDriver.'
'setup_base_flows')
@mock.patch('neutron.agent.common.ovs_lib.OVSBridge.create')
@mock.patch('neutron.agent.common.ovs_lib.OVSBridge.set_secure_mode')
@mock.patch('neutron.agent.common.ovs_lib.OVSBridge.get_port_ofport')
@mock.patch('neutron.agent.ovsdb.api.'
'API.get')
def setUp(self, mock_ovsdb_api, mock_get_port_ofport, mock_set_secure_mode,
mock_create_ovs_bridge, mock_setup_base_flows,
mock_check_ovs_firewall_restart,):
super(TestOVSFirewallDriver, self).setUp()
config.register_root_helper(cfg.CONF)
cfg.CONF.set_override('security_bridge_mapping',
"fake_sec_br:fake_if", 'SECURITYGROUP')
mock_get_port_ofport.return_value = 5
self.ovs_firewall = ovs_fw.OVSFirewallDriver()
self.ovs_firewall.sg_br = mock.Mock()
self.mock_br = ovs_lib.DeferredOVSBridge(self.ovs_firewall.sg_br)
self.LOG = ovs_fw.LOG
def test_get_compact_port(self):
compact_port = {'security_group_source_groups': 'abc',
'mac_address': '00:11:22:33:44:55',
'network_id': "netid",
'id': "123",
'device': "123",
'security_groups': "abc",
'lvid': "100"}
res = self.ovs_firewall._get_compact_port(fake_port)
self.assertEqual(compact_port, res)
def test_remove_ports_from_provider_cache(self):
self.ovs_firewall.provider_port_cache = set(['123', '124', '125'])
self.ovs_firewall.remove_ports_from_provider_cache(['123', '125'])
self.assertEqual(set(['124']), self.ovs_firewall.provider_port_cache)
self.ovs_firewall.provider_port_cache = set(['123', '124', '125'])
self.ovs_firewall.remove_ports_from_provider_cache(['121', '125'])
self.assertEqual(set(['123', '124']),
self.ovs_firewall.provider_port_cache)
def test_add_ovs_flow(self):
with mock.patch.object(self.ovs_firewall.sg_br, 'deferred',
return_value=self.mock_br), \
mock.patch.object(self.mock_br, 'add_flow') as mock_add_flow:
self.ovs_firewall._add_ovs_flow(self.mock_br, 0, 1, "normal")
mock_add_flow.assert_called_with(priority=0, actions='normal',
table=1)
def test_add_ovs_flow_with_protocol(self):
with mock.patch.object(self.ovs_firewall.sg_br, 'deferred',
return_value=self.mock_br), \
mock.patch.object(self.mock_br, 'add_flow') as mock_add_flow:
# rule with protocol
self.ovs_firewall._add_ovs_flow(self.mock_br, 0, 1, "normal",
protocol="arp")
mock_add_flow.assert_called_with(table=1, priority=0,
proto="arp", actions="normal")
def test_add_ovs_flow_with_dest_mac(self):
with mock.patch.object(self.ovs_firewall.sg_br, 'deferred',
return_value=self.mock_br), \
mock.patch.object(self.mock_br, 'add_flow') as mock_add_flow:
# rule with dl_dest
dest_mac = "01:00:00:00:00:00"
self.ovs_firewall._add_ovs_flow(self.mock_br, 0, 1, "normal",
dl_dest=dest_mac)
mock_add_flow.assert_called_with(table=1, priority=0,
dl_dst=dest_mac,
actions="normal")
def test_add_ovs_flow_with_tcpflag(self):
with mock.patch.object(self.ovs_firewall.sg_br, 'deferred',
return_value=self.mock_br), \
mock.patch.object(self.mock_br, 'add_flow') as mock_add_flow:
# rule with tcp_flags
t_flag = "+rst"
self.ovs_firewall._add_ovs_flow(self.mock_br, 0, 1, "normal",
tcp_flag=t_flag)
mock_add_flow.assert_called_with(table=1, priority=0,
proto=constants.PROTO_NAME_TCP,
tcp_flags=t_flag,
actions="normal")
def test_add_ovs_flow_with_icmptype(self):
with mock.patch.object(self.ovs_firewall.sg_br, 'deferred',
return_value=self.mock_br), \
mock.patch.object(self.mock_br, 'add_flow') as mock_add_flow:
# rule with icmp_req_type
self.ovs_firewall._add_ovs_flow(self.mock_br, 0, 1, "normal",
icmp_req_type=11)
mock_add_flow.assert_called_with(table=1, priority=0,
proto=constants.PROTO_NAME_ICMP,
icmp_type=11, actions="normal")
def test_add_ports_to_filter(self):
self.ovs_firewall.filtered_ports = {}
self.ovs_firewall.add_ports_to_filter([fake_port])
self.assertIsNotNone(self.ovs_firewall.filtered_ports)
ret_port = self.ovs_firewall.filtered_ports["123"]
self.assertEqual(fake_res_port, ret_port)
def test_setup_aap_flows(self):
port_with_app = copy.deepcopy(fake_port)
key = "allowed_address_pairs"
port_with_app[key] = [{'ip_address': '10.0.0.2',
'mac_address': 'aa:bb:cc:dd:ee:aa'},
{'ip_address': '10.0.0.3',
'mac_address': 'aa:bb:cc:dd:ee:ab'}]
with mock.patch.object(self.ovs_firewall, '_get_port_vlan',
return_value=100), \
mock.patch.object(self.ovs_firewall.sg_br, 'deferred',
return_value=self.mock_br), \
mock.patch.object(self.mock_br, 'add_flow') as mock_add_flow:
self.ovs_firewall._setup_aap_flows(self.mock_br, port_with_app)
self.assertEqual(2, mock_add_flow.call_count)
def test_setup_aap_flows_invalid_call(self):
port_with_app = copy.deepcopy(fake_port)
with mock.patch.object(self.ovs_firewall, '_get_port_vlan',
return_value=100), \
mock.patch.object(self.ovs_firewall.sg_br, 'deferred',
return_value=self.mock_br), \
mock.patch.object(self.mock_br, 'add_flow') as mock_add_flow:
self.ovs_firewall._setup_aap_flows(self.mock_br, port_with_app)
self.assertFalse(mock_add_flow.called)
def test_get_net_prefix_len(self):
ip_addr = "150.1.1.0/22"
length = self.ovs_firewall._get_net_prefix_len(ip_addr)
self.assertNotEqual(0, length)
ip_addr = None
length = self.ovs_firewall._get_net_prefix_len(ip_addr)
self.assertEqual(0, length)
def test_get_protocol(self):
proto = self.ovs_firewall._get_protocol("IPv4", None)
self.assertEqual(['ip'], proto)
proto = self.ovs_firewall._get_protocol("IPv6", None)
self.assertEqual(['ipv6'], proto)
proto = self.ovs_firewall._get_protocol("IPv6", 'icmp')
self.assertEqual(['icmp6'], proto)
proto = self.ovs_firewall._get_protocol("IPv4", 'icmp')
self.assertEqual(['icmp'], proto)
proto = self.ovs_firewall._get_protocol("IPv4", 'udp')
self.assertEqual(['udp'], proto)
proto = self.ovs_firewall._get_protocol("IPv6", 'tcp')
self.assertEqual(['tcp'], proto)
proto = self.ovs_firewall._get_protocol("IPv6", 'unknown')
self.assertEqual(['ipv6', 'unknown'], proto)
def test_add_flow_with_range(self):
flow = {"priority": 1}
res_flow = {"priority": 1,
"tp_dst": 1,
"tp_src": 1}
port = fake_port
direction = "fake_direction"
with mock.patch.object(self.ovs_firewall.sg_br, 'deferred',
return_value=self.mock_br), \
mock.patch.object(self.ovs_firewall, '_add_flows_to_sec_br'
) as mock_add_flows_sec_br:
self.ovs_firewall._add_flow_with_range(self.mock_br, port, flow,
direction, 1, 2, 1, 2)
mock_add_flows_sec_br.called_with(res_flow)
self.assertEqual(4, mock_add_flows_sec_br.call_count)
def test_add_flow_with_multiple_range(self):
flow = {"priority": 1}
port = fake_port
direction = "fake_direction"
with mock.patch.object(self.ovs_firewall.sg_br, 'deferred',
return_value=self.mock_br), \
mock.patch.object(self.ovs_firewall, '_add_flows_to_sec_br'
) as mock_add_flows_sec_br:
self.ovs_firewall._add_flow_with_range(self.mock_br, port, flow,
direction, 1, 3, 1, 2)
self.assertEqual(6, mock_add_flows_sec_br.call_count)
def test_add_flow_with_range_all_ports(self):
flow = {"priority": 1}
port = fake_port
direction = "fake_direction"
with mock.patch.object(self.ovs_firewall.sg_br, 'deferred',
return_value=self.mock_br), \
mock.patch.object(self.ovs_firewall, '_add_flows_to_sec_br'
) as mock_add_flows_sec_br:
self.ovs_firewall._add_flow_with_range(self.mock_br, port,
flow, direction, 1, 65535)
self.assertEqual(1, mock_add_flows_sec_br.call_count)
def test_add_flow_with_range_some_ports(self):
flow = {"priority": 1}
port = fake_port
direction = "fake_direction"
with mock.patch.object(self.ovs_firewall.sg_br, 'deferred',
return_value=self.mock_br), \
mock.patch.object(self.ovs_firewall, '_add_flows_to_sec_br'
) as mock_add_flows_sec_br:
self.ovs_firewall._add_flow_with_range(self.mock_br, port,
flow, direction, 1, 100)
self.assertEqual(100, mock_add_flows_sec_br.call_count)
def test_add_flows_to_sec_br_ingress_direction(self):
flows = {}
port = fake_port
direction = "ingress"
with mock.patch.object(self.ovs_firewall.sg_br, 'deferred',
return_value=self.mock_br), \
mock.patch.object(self.mock_br, 'add_flow') as mock_add_flow:
self.ovs_firewall._add_flows_to_sec_br(self.mock_br, port,
flows, direction)
self.assertTrue(mock_add_flow.called)
self.assertEqual(1, mock_add_flow.call_count)
def test_add_flows_to_sec_br_egress_direction(self):
flows = {}
port = fake_port
flows['dl_src'] = '01:02:03:04:05:06'
flows['proto'] = 'ip'
flows['dl_vlan'] = 25
port['fixed_ips'] = [u'70.0.0.5']
direction = "egress"
with mock.patch.object(self.ovs_firewall.sg_br, 'deferred',
return_value=self.mock_br), \
mock.patch.object(self.mock_br, 'add_flow') as mock_add_flow:
self.ovs_firewall._add_flows_to_sec_br(self.mock_br, port,
flows, direction)
self.assertTrue(mock_add_flow.called)
self.assertEqual(2, mock_add_flow.call_count)
def test_add_flows_to_sec_br_egress_direction_multiple_fixed_ips(self):
flows = {}
port = fake_port
flows['dl_src'] = '01:02:03:04:05:06'
flows['proto'] = 'ip'
flows['dl_vlan'] = 25
port['fixed_ips'] = [u'70.0.0.5', u'80.0.0.6']
direction = "egress"
with mock.patch.object(self.ovs_firewall.sg_br, 'deferred',
return_value=self.mock_br), \
mock.patch.object(self.mock_br, 'add_flow') as mock_add_flow:
self.ovs_firewall._add_flows_to_sec_br(self.mock_br, port,
flows, direction)
self.assertTrue(mock_add_flow.called)
self.assertEqual(4, mock_add_flow.call_count)
def test_add_flows_call_no_vlan(self):
port_with_app = copy.deepcopy(fake_port)
with mock.patch.object(self.ovs_firewall, '_get_port_vlan',
return_value=None), \
mock.patch.object(self.ovs_firewall.sg_br, 'deferred',
return_value=self.mock_br), \
mock.patch.object(self.mock_br, 'add_flow') as mock_add_flow,\
mock.patch.object(self.LOG, 'error') as mock_error_log:
self.ovs_firewall._add_flows(self.mock_br, port_with_app, cookie)
self.assertFalse(mock_add_flow.called)
self.assertTrue(mock_error_log.called)
def test_add_flows_call_tcp(self):
port = copy.deepcopy(fake_port)
with mock.patch.object(self.ovs_firewall, '_get_port_vlan',
return_value=100) as mock_get_vlan, \
mock.patch.object(self.ovs_firewall, '_get_protocol',
return_value=['tcp']) as mock_get_proto, \
mock.patch.object(self.ovs_firewall, '_add_flow_with_range'
) as mock_add_range_flows, \
mock.patch.object(self.ovs_firewall.sg_br, 'deferred',
return_value=self.mock_br), \
mock.patch.object(self.mock_br, 'add_flow'):
self.ovs_firewall._add_flows(self.mock_br, port, cookie)
self.assertTrue(mock_get_vlan.called)
self.assertTrue(mock_get_proto.called)
self.assertTrue(mock_add_range_flows.called)
def test_add_flows_call_normal(self):
port = copy.deepcopy(fake_port)
with mock.patch.object(self.ovs_firewall, '_get_port_vlan',
return_value=100) as mock_get_vlan, \
mock.patch.object(self.ovs_firewall, '_get_protocol',
return_value=['ip']) as mock_get_proto, \
mock.patch.object(self.ovs_firewall, '_add_flow_with_range'
) as mock_add_range_flows, \
mock.patch.object(self.ovs_firewall.sg_br, 'deferred',
return_value=self.mock_br), \
mock.patch.object(self.mock_br, 'add_flow') as mock_add_flow:
self.ovs_firewall._add_flows(self.mock_br, port, cookie)
self.assertTrue(mock_get_vlan.called)
self.assertTrue(mock_get_proto.called)
self.assertFalse(mock_add_range_flows.called)
self.assertTrue(mock_add_flow.called)
def test_prepare_port_filter(self):
self.ovs_firewall.provider_port_cache = set()
with mock.patch.object(self.ovs_firewall.sg_br, 'deferred',
return_value=self.mock_br), \
mock.patch.object(self.ovs_firewall, '_setup_aap_flows'
) as mock_aap_flow_fn, \
mock.patch.object(self.ovs_firewall, '_add_flows'
) as mock_add_flow_fn, \
mock.patch.object(self.mock_br, 'add_flow'):
self.ovs_firewall.prepare_port_filter(fake_port)
mock_aap_flow_fn.assert_called_with(self.mock_br, fake_port)
mock_add_flow_fn.assert_called_with(self.mock_br, fake_port,
cookie)
self.assertEqual(2, mock_add_flow_fn.call_count)
ret_port = self.ovs_firewall.filtered_ports['123']
self.assertEqual(fake_res_port, ret_port)
self.assertEqual(set(['123']),
self.ovs_firewall.provider_port_cache)
def test_prepare_port_filter_exception(self):
self.ovs_firewall.provider_port_cache = set()
with mock.patch.object(self.ovs_firewall.sg_br, 'deferred',
return_value=self.mock_br), \
mock.patch.object(self.ovs_firewall, '_setup_aap_flows',
side_effect=Exception()
) as mock_aap_flow_fn, \
mock.patch.object(self.ovs_firewall, '_add_flows'
) as mock_add_flow_fn, \
mock.patch.object(self.LOG, 'exception'
) as mock_exception_log:
self.ovs_firewall.prepare_port_filter(fake_port)
mock_aap_flow_fn.assert_called_with(self.mock_br, fake_port)
self.assertFalse(mock_add_flow_fn.called)
self.assertTrue(mock_exception_log.called)
self.assertEqual(set(), self.ovs_firewall.provider_port_cache)
def test_remove_only_tenant_flows(self):
self.ovs_firewall.filtered_ports["123"] = fake_res_port
with mock.patch.object(self.ovs_firewall, '_get_port_vlan',
return_value=100) as mock_get_vlan, \
mock.patch.object(self.ovs_firewall.sg_br, 'deferred',
return_value=self.mock_br), \
mock.patch.object(self.mock_br, 'delete_flows'
) as mock_del_flows:
self.ovs_firewall._remove_flows(self.mock_br, "123")
self.assertTrue(mock_get_vlan.called)
self.assertEqual(4, mock_del_flows.call_count)
def test_remove_all_flows(self):
self.ovs_firewall.filtered_ports["123"] = fake_res_port
with mock.patch.object(self.ovs_firewall, '_get_port_vlan',
return_value=100) as mock_get_vlan, \
mock.patch.object(self.ovs_firewall.sg_br, 'deferred',
return_value=self.mock_br), \
mock.patch.object(self.mock_br, 'delete_flows'
) as mock_del_flows:
self.ovs_firewall._remove_flows(self.mock_br, "123", True)
self.assertTrue(mock_get_vlan.called)
self.assertEqual(7, mock_del_flows.call_count)
def test_remove_flows_invalid_port(self):
res_port = copy.deepcopy(fake_res_port)
res_port.pop('mac_address')
self.ovs_firewall.filtered_ports["123"] = res_port
with mock.patch.object(self.ovs_firewall, '_get_port_vlan',
return_value=100) as mock_get_vlan, \
mock.patch.object(self.ovs_firewall.sg_br, 'deferred',
return_value=self.mock_br), \
mock.patch.object(self.mock_br, 'delete_flows'
) as mock_del_flows, \
mock.patch.object(self.LOG, 'debug') as mock_debug_log:
self.ovs_firewall._remove_flows(self.mock_br, "123")
self.assertTrue(mock_get_vlan.called)
self.assertEqual(1, mock_del_flows.call_count)
self.assertEqual(2, mock_debug_log.call_count)
def test_clean_port_filters(self):
self.ovs_firewall.filtered_ports["123"] = fake_res_port
with mock.patch.object(self.ovs_firewall.sg_br, 'deferred',
return_value=self.mock_br), \
mock.patch.object(self.ovs_firewall, '_remove_flows'
) as mock_rem_flow:
self.ovs_firewall.clean_port_filters(["123"])
mock_rem_flow.assert_called_with(self.mock_br, "123")
self.assertIn("123", self.ovs_firewall.filtered_ports)
def test_clean_port_filters_remove_port(self):
self.ovs_firewall.filtered_ports["123"] = fake_res_port
self.ovs_firewall.provider_port_cache = set(['123'])
with mock.patch.object(self.ovs_firewall.sg_br, 'deferred',
return_value=self.mock_br), \
mock.patch.object(self.ovs_firewall, '_remove_flows'
) as mock_rem_flow:
self.ovs_firewall.clean_port_filters(["123"], True)
mock_rem_flow.assert_called_with(self.mock_br, "123", True)
self.assertNotIn("123", self.ovs_firewall.filtered_ports)
self.assertNotIn("123", self.ovs_firewall.provider_port_cache)
def test_clean_port_filters_exception(self):
self.ovs_firewall.filtered_ports["123"] = fake_res_port
self.ovs_firewall.provider_port_cache = set(['123'])
with mock.patch.object(self.ovs_firewall.sg_br, 'deferred',
return_value=self.mock_br), \
mock.patch.object(self.ovs_firewall, '_remove_flows',
side_effect=Exception()
) as mock_rem_flow, \
mock.patch.object(self.LOG, 'exception'
) as mock_exception_log:
self.ovs_firewall.clean_port_filters(["123"], True)
mock_rem_flow.assert_called_with(self.mock_br, "123", True)
self.assertTrue(mock_exception_log.called)
self.assertIn("123", self.ovs_firewall.provider_port_cache)
self.assertIn("123", self.ovs_firewall.filtered_ports)
def test_normal_update_port_filters(self):
self.ovs_firewall.filtered_ports["123"] = fake_res_port
self.ovs_firewall.provider_port_cache = set(['123'])
with mock.patch.object(self.ovs_firewall.sg_br, 'deferred',
return_value=self.mock_br), \
mock.patch.object(self.ovs_firewall, '_remove_flows'
) as mock_rem_flow, \
mock.patch.object(self.ovs_firewall, '_setup_aap_flows'
) as mock_aap_flow_fn, \
mock.patch.object(self.ovs_firewall, '_add_flows'
) as mock_add_flow_fn:
self.ovs_firewall.update_port_filter(fake_port)
mock_rem_flow.assert_called_with(self.mock_br, "123")
mock_aap_flow_fn.assert_called_with(self.mock_br, fake_port)
mock_add_flow_fn.assert_called_with(self.mock_br, fake_port,
cookie)
self.assertEqual(1, mock_add_flow_fn.call_count)
self.assertIn("123", self.ovs_firewall.filtered_ports)
def test_update_port_filters_for_provider_update(self):
self.ovs_firewall.filtered_ports["123"] = fake_res_port
self.ovs_firewall.provider_port_cache = set()
with mock.patch.object(self.ovs_firewall.sg_br, 'deferred',
return_value=self.mock_br), \
mock.patch.object(self.ovs_firewall, '_remove_flows'
) as mock_rem_flow, \
mock.patch.object(self.ovs_firewall, '_setup_aap_flows'
) as mock_aap_flow_fn, \
mock.patch.object(self.ovs_firewall, '_add_flows'
) as mock_add_flow_fn:
self.ovs_firewall.update_port_filter(fake_port)
mock_rem_flow.assert_called_with(self.mock_br, "123", True)
mock_aap_flow_fn.assert_called_with(self.mock_br, fake_port)
mock_add_flow_fn.assert_called_with(self.mock_br, fake_port,
cookie)
self.assertEqual(2, mock_add_flow_fn.call_count)
self.assertIn("123", self.ovs_firewall.filtered_ports)
self.assertIn("123", self.ovs_firewall.provider_port_cache)
def test_update_port_filters_exception(self):
self.ovs_firewall.filtered_ports["123"] = fake_res_port
self.ovs_firewall.provider_port_cache = set(['123'])
with mock.patch.object(self.ovs_firewall.sg_br, 'deferred',
return_value=self.mock_br), \
mock.patch.object(self.ovs_firewall, '_remove_flows',
side_effect=Exception()) as mock_rem_flow, \
mock.patch.object(self.ovs_firewall, '_add_flows'
) as mock_add_flow_fn, \
mock.patch.object(self.LOG, 'exception'
) as mock_exception_log:
self.ovs_firewall.update_port_filter(fake_port)
mock_rem_flow.assert_called_with(self.mock_br, "123")
self.assertFalse(mock_add_flow_fn.called)<|fim▁hole|>
def test_ovs_firewall_restart_with_canary_flow(self):
flow = "cookie=0x0, duration=4633.482s, table=23, n_packets=0" + \
"n_bytes=0, idle_age=4633, priority=0 actions=drop"
with mock.patch.object(self.ovs_firewall.sg_br,
"dump_flows_for_table",
return_value=flow) as mock_dump_flow:
canary_flow = self.ovs_firewall.check_ovs_firewall_restart()
self.assertTrue(mock_dump_flow.called)
self.assertTrue(canary_flow)
def test_ovs_firewall_restart_without_canary_flow(self):
flow = ""
with mock.patch.object(self.ovs_firewall.sg_br,
"dump_flows_for_table",
return_value=flow) as mock_dump_flow:
canary_flow = self.ovs_firewall.check_ovs_firewall_restart()
self.assertTrue(mock_dump_flow.called)
self.assertFalse(canary_flow)<|fim▁end|>
|
self.assertIn("123", self.ovs_firewall.filtered_ports)
self.assertTrue(mock_exception_log.called)
|
<|file_name|>ViewLoader.java<|end_file_name|><|fim▁begin|>/* Copyright (C) 2022, Specify Collections Consortium
*
* Specify Collections Consortium, Biodiversity Institute, University of Kansas,
* 1345 Jayhawk Boulevard, Lawrence, Kansas, 66045, USA, [email protected]
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
package edu.ku.brc.af.ui.forms.persist;
import static edu.ku.brc.helpers.XMLHelper.getAttr;
import static edu.ku.brc.ui.UIHelper.createDuplicateJGoodiesDef;
import static edu.ku.brc.ui.UIRegistry.getResourceString;
import static org.apache.commons.lang.StringUtils.isEmpty;
import static org.apache.commons.lang.StringUtils.isNotEmpty;
import java.io.File;
import java.io.FileWriter;
import java.util.ArrayList;
import java.util.Calendar;
import java.util.Date;
import java.util.Hashtable;
import java.util.Iterator;
import java.util.List;
import java.util.Properties;
import java.util.Vector;
import javax.swing.JLabel;
import javax.swing.JScrollPane;
import javax.swing.JTable;
import javax.swing.ScrollPaneConstants;
import javax.swing.table.DefaultTableModel;
import org.apache.commons.betwixt.XMLIntrospector;
import org.apache.commons.betwixt.io.BeanWriter;
import org.apache.commons.lang.StringUtils;
import org.apache.log4j.Logger;
import org.dom4j.Element;
import org.dom4j.Node;
import com.jgoodies.forms.builder.PanelBuilder;
import com.jgoodies.forms.layout.CellConstraints;
import com.jgoodies.forms.layout.FormLayout;
import edu.ku.brc.af.core.db.DBFieldInfo;
import edu.ku.brc.af.core.db.DBRelationshipInfo;
import edu.ku.brc.af.core.db.DBTableChildIFace;
import edu.ku.brc.af.core.db.DBTableIdMgr;
import edu.ku.brc.af.core.db.DBTableInfo;
import edu.ku.brc.af.prefs.AppPreferences;
import edu.ku.brc.af.ui.forms.FormDataObjIFace;
import edu.ku.brc.af.ui.forms.FormHelper;
import edu.ku.brc.af.ui.forms.formatters.UIFieldFormatterIFace;
import edu.ku.brc.af.ui.forms.formatters.UIFieldFormatterMgr;
import edu.ku.brc.af.ui.forms.validation.TypeSearchForQueryFactory;
import edu.ku.brc.ui.CustomFrame;
import edu.ku.brc.ui.UIHelper;
import edu.ku.brc.ui.UIRegistry;
import edu.ku.brc.helpers.XMLHelper;
/**
* Factory that creates Views from ViewSet files. This class uses the singleton ViewSetMgr to verify the View Set Name is unique.
* If it is not unique than it throws an exception.<br> In this case a "form" is really the definition of a form. The form's object hierarchy
* is used to creates the forms using Swing UI objects. The classes will also be used by the forms editor.
* @code_status Beta
**
* @author rods
*/
public class ViewLoader
{
public static final int DEFAULT_ROWS = 4;
public static final int DEFAULT_COLS = 10;
public static final int DEFAULT_SUBVIEW_ROWS = 5;
// Statics
private static final Logger log = Logger.getLogger(ViewLoader.class);
private static final ViewLoader instance = new ViewLoader();
private static final String ID = "id";
private static final String NAME = "name";
private static final String TYPE = "type";
private static final String LABEL = "label";
private static final String DESC = "desc";
private static final String TITLE = "title";
private static final String CLASSNAME = "class";
private static final String GETTABLE = "gettable";
private static final String SETTABLE = "settable";
private static final String INITIALIZE = "initialize";
private static final String DSPUITYPE = "dspuitype";
private static final String VALIDATION = "validation";
private static final String ISREQUIRED = "isrequired";
private static final String RESOURCELABELS = "useresourcelabels";
// Data Members
protected boolean doingResourceLabels = false;
protected String viewSetName = null;
// Members needed for verification
protected static boolean doFieldVerification = true;
protected static boolean isTreeClass = false;
protected static DBTableInfo fldVerTableInfo = null;
protected static FormViewDef fldVerFormViewDef = null;
protected static String colDefType = null;
protected static CustomFrame verifyDlg = null;
protected FieldVerifyTableModel fldVerTableModel = null;
// Debug
//protected static ViewDef gViewDef = null;
static
{
doFieldVerification = AppPreferences.getLocalPrefs().getBoolean("verify_field_names", false);
}
/**
* Default Constructor
*
*/
protected ViewLoader()
{
// do nothing
}
/**
* Creates the view.
* @param element the element to build the View from
* @param altViewsViewDefName the hashtable to track the AltView's ViewDefName
* @return the View
* @throws Exception
*/
protected static ViewIFace createView(final Element element,
final Hashtable<AltViewIFace, String> altViewsViewDefName) throws Exception
{
String name = element.attributeValue(NAME);
String objTitle = getAttr(element, "objtitle", null);
String className = element.attributeValue(CLASSNAME);
String desc = getDesc(element);
String businessRules = getAttr(element, "busrules", null);
boolean isInternal = getAttr(element, "isinternal", true);
DBTableInfo ti = DBTableIdMgr.getInstance().getByClassName(className);
if (ti != null && StringUtils.isEmpty(objTitle))
{
objTitle = ti.getTitle();
}
View view = new View(instance.viewSetName,
name,
objTitle,
className,
businessRules != null ? businessRules.trim() : null,
getAttr(element, "usedefbusrule", true),
isInternal,
desc);
// Later we should get this from a properties file.
if (ti != null)
{
view.setTitle(ti.getTitle());
}
/*if (!isInternal)
{
System.err.println(StringUtils.replace(name, " ", "_")+"="+UIHelper.makeNamePretty(name));
}*/
Element altviews = (Element)element.selectSingleNode("altviews");
if (altviews != null)
{
AltViewIFace defaultAltView = null;
AltView.CreationMode defaultMode = AltView.parseMode(getAttr(altviews, "mode", ""), AltViewIFace.CreationMode.VIEW);
String selectorName = altviews.attributeValue("selector");
view.setDefaultMode(defaultMode);
view.setSelectorName(selectorName);
Hashtable<String, Boolean> nameCheckHash = new Hashtable<String, Boolean>();
// iterate through child elements
for ( Iterator<?> i = altviews.elementIterator( "altview" ); i.hasNext(); )
{
Element altElement = (Element) i.next();
AltView.CreationMode mode = AltView.parseMode(getAttr(altElement, "mode", ""), AltViewIFace.CreationMode.VIEW);
String altName = altElement.attributeValue(NAME);
String viewDefName = altElement.attributeValue("viewdef");
String title = altElement.attributeValue(TITLE);
boolean isValidated = getAttr(altElement, "validated", mode == AltViewIFace.CreationMode.EDIT);
boolean isDefault = getAttr(altElement, "default", false);
// Make sure we only have one default view
if (defaultAltView != null && isDefault)
{
isDefault = false;
}
// Check to make sure all the AlViews have different names.
Boolean nameExists = nameCheckHash.get(altName);
if (nameExists == null) // no need to check the boolean
{
AltView altView = new AltView(view, altName, title, mode, isValidated, isDefault, null); // setting a null viewdef
altViewsViewDefName.put(altView, viewDefName);
if (StringUtils.isNotEmpty(selectorName))
{
altView.setSelectorName(selectorName);
String selectorValue = altElement.attributeValue("selector_value");
if (StringUtils.isNotEmpty(selectorValue))
{
altView.setSelectorValue(selectorValue);
} else
{
FormDevHelper.appendFormDevError("Selector Value is missing for viewDefName["+viewDefName+"] altName["+altName+"]");
}
}
if (defaultAltView == null && isDefault)
{
defaultAltView = altView;
}
view.addAltView(altView);
nameCheckHash.put(altName, true);
} else
{
log.error("The altView name["+altName+"] already exists!");
}
nameCheckHash.clear(); // why not?
}
// No default Alt View was indicated, so choose the first one (if there is one)
if (defaultAltView == null && view.getAltViews() != null && view.getAltViews().size() > 0)
{
view.getAltViews().get(0).setDefault(true);
}
}
return view;
}
/**
* Creates a ViewDef
* @param element the element to build the ViewDef from
* @return a viewdef
* @throws Exception
*/
private static ViewDef createViewDef(final Element element) throws Exception
{
String name = element.attributeValue(NAME);
String className = element.attributeValue(CLASSNAME);
String gettableClassName = element.attributeValue(GETTABLE);
String settableClassName = element.attributeValue(SETTABLE);
String desc = getDesc(element);
String resLabels = getAttr(element, RESOURCELABELS, "false");
boolean useResourceLabels = resLabels.equals("true");
if (isEmpty(name))
{
FormDevHelper.appendFormDevError("Name is null for element["+element.asXML()+"]");
return null;
}
if (isEmpty(className))
{
FormDevHelper.appendFormDevError("className is null. name["+name+"] for element["+element.asXML()+"]");
return null;
}
if (isEmpty(gettableClassName))
{
FormDevHelper.appendFormDevError("gettableClassName Name is null.name["+name+"] classname["+className+"]");
return null;
}
DBTableInfo tableinfo = DBTableIdMgr.getInstance().getByClassName(className);
ViewDef.ViewType type = null;
try
{
type = ViewDefIFace.ViewType.valueOf(element.attributeValue(TYPE));
} catch (Exception ex)
{
String msg = "view["+name+"] has illegal type["+element.attributeValue(TYPE)+"]";
log.error(msg, ex);
FormDevHelper.appendFormDevError(msg, ex);
return null;
}
ViewDef viewDef = null;//new ViewDef(type, name, className, gettableClassName, settableClassName, desc);
switch (type)
{
case rstable:
case formtable :
case form :
viewDef = createFormViewDef(element, type, name, className, gettableClassName, settableClassName, desc, useResourceLabels, tableinfo);
break;
case table :
//view = createTableView(element, id, name, className, gettableClassName, settableClassName,
// desc, instance.doingResourceLabels, isValidated);
break;
<|fim▁hole|>
case iconview:
viewDef = createIconViewDef(type, name, className, gettableClassName, settableClassName, desc, useResourceLabels);
break;
}
return viewDef;
}
/**
* Gets the optional description text
* @param element the parent element of the desc node
* @return the string of the text or null
*/
protected static String getDesc(final Element element)
{
String desc = null;
Element descElement = (Element)element.selectSingleNode(DESC);
if (descElement != null)
{
desc = descElement.getTextTrim();
}
return desc;
}
/**
* Fill the Vector with all the views from the DOM document
* @param doc the DOM document conforming to form.xsd
* @param views the list to be filled
* @throws Exception for duplicate view set names or if a Form ID is not unique
*/
public static String getViews(final Element doc,
final Hashtable<String, ViewIFace> views,
final Hashtable<AltViewIFace, String> altViewsViewDefName) throws Exception
{
instance.viewSetName = doc.attributeValue(NAME);
/*
System.err.println("#################################################");
System.err.println("# "+instance.viewSetName);
System.err.println("#################################################");
*/
Element viewsElement = (Element)doc.selectSingleNode("views");
if (viewsElement != null)
{
for ( Iterator<?> i = viewsElement.elementIterator( "view" ); i.hasNext(); )
{
Element element = (Element) i.next(); // assume element is NOT null, if it is null it will cause an exception
ViewIFace view = createView(element, altViewsViewDefName);
if (view != null)
{
if (views.get(view.getName()) == null)
{
views.put(view.getName(), view);
} else
{
String msg = "View Set ["+instance.viewSetName+"] ["+view.getName()+"] is not unique.";
log.error(msg);
FormDevHelper.appendFormDevError(msg);
}
}
}
}
return instance.viewSetName;
}
/**
* Fill the Vector with all the views from the DOM document
* @param doc the DOM document conforming to form.xsd
* @param viewDefs the list to be filled
* @param doMapDefinitions tells it to map and clone the definitions for formtables (use false for the FormEditor)
* @return the viewset name
* @throws Exception for duplicate view set names or if a ViewDef name is not unique
*/
public static String getViewDefs(final Element doc,
final Hashtable<String, ViewDefIFace> viewDefs,
@SuppressWarnings("unused") final Hashtable<String, ViewIFace> views,
final boolean doMapDefinitions) throws Exception
{
colDefType = AppPreferences.getLocalPrefs().get("ui.formatting.formtype", UIHelper.getOSTypeAsStr());
instance.viewSetName = doc.attributeValue(NAME);
Element viewDefsElement = (Element)doc.selectSingleNode("viewdefs");
if (viewDefsElement != null)
{
for ( Iterator<?> i = viewDefsElement.elementIterator( "viewdef" ); i.hasNext(); )
{
Element element = (Element) i.next(); // assume element is NOT null, if it is null it will cause an exception
ViewDef viewDef = createViewDef(element);
if (viewDef != null)
{
if (viewDefs.get(viewDef.getName()) == null)
{
viewDefs.put(viewDef.getName(), viewDef);
} else
{
String msg = "View Set ["+instance.viewSetName+"] the View Def Name ["+viewDef.getName()+"] is not unique.";
log.error(msg);
FormDevHelper.appendFormDevError(msg);
}
}
}
if (doMapDefinitions)
{
mapDefinitionViewDefs(viewDefs);
}
}
return instance.viewSetName;
}
/**
* Re-maps and clones the definitions.
* @param viewDefs the hash table to be mapped
* @throws Exception
*/
public static void mapDefinitionViewDefs(final Hashtable<String, ViewDefIFace> viewDefs) throws Exception
{
// Now that all the definitions have been read in
// cycle thru and have all the tableform objects clone there definitions
for (ViewDefIFace viewDef : new Vector<ViewDefIFace>(viewDefs.values()))
{
if (viewDef.getType() == ViewDefIFace.ViewType.formtable)
{
String viewDefName = ((FormViewDefIFace)viewDef).getDefinitionName();
if (viewDefName != null)
{
//log.debug(viewDefName);
ViewDefIFace actualDef = viewDefs.get(viewDefName);
if (actualDef != null)
{
viewDefs.remove(viewDef.getName());
actualDef = (ViewDef)actualDef.clone();
actualDef.setType(ViewDefIFace.ViewType.formtable);
actualDef.setName(viewDef.getName());
viewDefs.put(actualDef.getName(), actualDef);
} else
{
String msg = "Couldn't find the ViewDef for formtable definition name["+((FormViewDefIFace)viewDef).getDefinitionName()+"]";
log.error(msg);
FormDevHelper.appendFormDevError(msg);
}
}
}
}
}
/**
* Processes all the AltViews
* @param aFormView the form they should be associated with
* @param aElement the element to process
*/
public static Hashtable<String, String> getEnableRules(final Element element)
{
Hashtable<String, String> rulesList = new Hashtable<String, String>();
if (element != null)
{
Element enableRules = (Element)element.selectSingleNode("enableRules");
if (enableRules != null)
{
// iterate through child elements of root with element name "foo"
for ( Iterator<?> i = enableRules.elementIterator( "rule" ); i.hasNext(); )
{
Element ruleElement = (Element) i.next();
String id = getAttr(ruleElement, ID, "");
if (isNotEmpty(id))
{
rulesList.put(id, ruleElement.getTextTrim());
} else
{
String msg = "The name is missing for rule["+ruleElement.getTextTrim()+"] is missing.";
log.error(msg);
FormDevHelper.appendFormDevError(msg);
}
}
}
} else
{
log.error("View Set ["+instance.viewSetName+"] element ["+element+"] is null.");
}
return rulesList;
}
/**
* Gets the string (or creates one) from a columnDef
* @param element the DOM element to process
* @param attrName the name of the element to go get all the elements (strings) from
* @param numRows the number of rows
* @param item
* @return the String representing the column definition for JGoodies
*/
protected static String createDef(final Element element,
final String attrName,
final int numRows,
final FormViewDef.JGDefItem item)
{
Element cellDef = null;
if (attrName.equals("columnDef"))
{
// For columnDef(s) we can mark one or more as being platform specific
// but if we can't find a default one (no 'os' defined)
// then we ultimately pick the first one.
List<?> list = element.selectNodes(attrName);
if (list.size() == 1)
{
cellDef = (Element)list.get(0); // pick the first one if there is only one.
} else
{
String osTypeStr = UIHelper.getOSTypeAsStr();
Element defCD = null;
Element defOSCD = null;
Element ovrOSCD = null;
for (Object obj : list)
{
Element ce = (Element)obj;
String osType = getAttr(ce, "os", null);
if (osType == null)
{
defCD = ce; // ok we found the default one
} else
{
if (osType.equals(osTypeStr))
{
defOSCD = ce; // we found the matching our OS
}
if (colDefType != null && osType.equals(colDefType))
{
ovrOSCD = ce; // we found the one matching prefs
}
}
}
if (ovrOSCD != null)
{
cellDef = ovrOSCD;
} else if (defOSCD != null)
{
cellDef = defOSCD;
} else if (defCD != null)
{
cellDef = defCD;
} else
{
// ok, we couldn't find one for our platform, so use the default
// or pick the first one.
cellDef = (Element)list.get(0);
}
}
} else
{
// this is for rowDef
cellDef = (Element)element.selectSingleNode(attrName);
}
if (cellDef != null)
{
String cellText = cellDef.getText();
String cellStr = getAttr(cellDef, "cell", null);
String sepStr = getAttr(cellDef, "sep", null);
item.setDefStr(cellText);
item.setCellDefStr(cellStr);
item.setSepDefStr(sepStr);
if (StringUtils.isNotEmpty(cellStr) && StringUtils.isNotEmpty(sepStr))
{
boolean auto = getAttr(cellDef, "auto", false);
item.setAuto(auto);
if (auto)
{
String autoStr = createDuplicateJGoodiesDef(cellStr, sepStr, numRows) +
(StringUtils.isNotEmpty(cellText) ? ("," + cellText) : "");
item.setDefStr(autoStr);
return autoStr;
}
// else
FormDevHelper.appendFormDevError("Element ["+element.getName()+"] Cell or Sep is null for 'dup' or 'auto 'on column def.");
return "";
}
// else
item.setAuto(false);
return cellText;
}
// else
String msg = "Element ["+element.getName()+"] must have a columnDef";
log.error(msg);
FormDevHelper.appendFormDevError(msg);
return "";
}
/**
* Returns a resource string if it is suppose to
* @param label the label or the label key
* @return Returns a resource string if it is suppose to
*/
protected static String getResourceLabel(final String label)
{
if (isNotEmpty(label) && StringUtils.deleteWhitespace(label).length() > 0)
{
return instance.doingResourceLabels ? getResourceString(label) : label;
}
// else
return "";
}
/**
* Returns a Label from the cell and gets the resource string for it if necessary
* @param cellElement the cell
* @param labelId the Id of the resource or the string
* @return the localized string (if necessary)
*/
protected static String getLabel(final Element cellElement)
{
String lbl = getAttr(cellElement, LABEL, null);
if (lbl == null || lbl.equals("##"))
{
return "##";
}
return getResourceLabel(lbl);
}
/**
* Processes all the rows
* @param element the parent DOM element of the rows
* @param cellRows the list the rows are to be added to
*/
protected static void processRows(final Element element,
final List<FormRowIFace> cellRows,
final DBTableInfo tableinfo)
{
Element rowsElement = (Element)element.selectSingleNode("rows");
if (rowsElement != null)
{
byte rowNumber = 0;
for ( Iterator<?> i = rowsElement.elementIterator( "row" ); i.hasNext(); )
{
Element rowElement = (Element) i.next();
FormRow formRow = new FormRow();
formRow.setRowNumber(rowNumber);
for ( Iterator<?> cellIter = rowElement.elementIterator( "cell" ); cellIter.hasNext(); )
{
Element cellElement = (Element)cellIter.next();
String cellId = getAttr(cellElement, ID, "");
String cellName = getAttr(cellElement, NAME, cellId); // let the name default to the id if it doesn't have a name
int colspan = getAttr(cellElement, "colspan", 1);
int rowspan = getAttr(cellElement, "rowspan", 1);
/*boolean isReq = getAttr(cellElement, ISREQUIRED, false);
if (isReq)
{
System.err.println(String.format("%s\t%s\t%s\t%s", gViewDef.getName(), cellId, cellName, tableinfo != null ? tableinfo.getTitle() : "N/A"));
}*/
FormCell.CellType cellType = null;
FormCellIFace cell = null;
try
{
cellType = FormCellIFace.CellType.valueOf(cellElement.attributeValue(TYPE));
} catch (java.lang.IllegalArgumentException ex)
{
FormDevHelper.appendFormDevError(ex.toString());
FormDevHelper.appendFormDevError(String.format("Cell Name[%s] Id[%s] Type[%s]", cellName, cellId, cellElement.attributeValue(TYPE)));
return;
}
if (doFieldVerification &&
fldVerTableInfo != null &&
cellType == FormCellIFace.CellType.field &&
StringUtils.isNotEmpty(cellId) &&
!cellName.equals("this"))
{
processFieldVerify(cellName, cellId, rowNumber);
}
switch (cellType)
{
case label:
{
cell = formRow.addCell(new FormCellLabel(cellId,
cellName,
getLabel(cellElement),
getAttr(cellElement, "labelfor", ""),
getAttr(cellElement, "icon", null),
getAttr(cellElement, "recordobj", false),
colspan));
String initialize = getAttr(cellElement, INITIALIZE, null);
if (StringUtils.isNotEmpty(initialize))
{
cell.setProperties(UIHelper.parseProperties(initialize));
}
break;
}
case separator:
{
cell = formRow.addCell(new FormCellSeparator(cellId,
cellName,
getLabel(cellElement),
getAttr(cellElement, "collapse", ""),
colspan));
String initialize = getAttr(cellElement, INITIALIZE, null);
if (StringUtils.isNotEmpty(initialize))
{
cell.setProperties(UIHelper.parseProperties(initialize));
}
break;
}
case field:
{
String uitypeStr = getAttr(cellElement, "uitype", "");
String format = getAttr(cellElement, "format", "");
String formatName = getAttr(cellElement, "formatname", "");
String uiFieldFormatterName = getAttr(cellElement, "uifieldformatter", "");
int cols = getAttr(cellElement, "cols", DEFAULT_COLS); // XXX PREF for default width of text field
int rows = getAttr(cellElement, "rows", DEFAULT_ROWS); // XXX PREF for default heightof text area
String validationType = getAttr(cellElement, "valtype", "Changed");
String validationRule = getAttr(cellElement, VALIDATION, "");
String initialize = getAttr(cellElement, INITIALIZE, "");
boolean isRequired = getAttr(cellElement, ISREQUIRED, false);
String pickListName = getAttr(cellElement, "picklist", "");
if (isNotEmpty(format) && isNotEmpty(formatName))
{
String msg = "Both format and formatname cannot both be set! ["+cellName+"] ignoring format";
log.error(msg);
FormDevHelper.appendFormDevError(msg);
format = "";
}
Properties properties = UIHelper.parseProperties(initialize);
if (isEmpty(uitypeStr))
{
// XXX DEBUG ONLY PLease REMOVE LATER
//log.debug("***************************************************************************");
//log.debug("***** Cell Id["+cellId+"] Name["+cellName+"] uitype is empty and should be 'text'. (Please Fix!)");
//log.debug("***************************************************************************");
uitypeStr = "text";
}
// THis switch is used to get the "display type" and
// set up other vars needed for creating the controls
FormCellFieldIFace.FieldType uitype = null;
try
{
uitype = FormCellFieldIFace.FieldType.valueOf(uitypeStr);
} catch (java.lang.IllegalArgumentException ex)
{
FormDevHelper.appendFormDevError(ex.toString());
FormDevHelper.appendFormDevError(String.format("Cell Name[%s] Id[%s] uitype[%s] is in error", cellName, cellId, uitypeStr));
uitype = FormCellFieldIFace.FieldType.text; // default to text
}
String dspUITypeStr = null;
switch (uitype)
{
case textarea:
dspUITypeStr = getAttr(cellElement, DSPUITYPE, "dsptextarea");
break;
case textareabrief:
dspUITypeStr = getAttr(cellElement, DSPUITYPE, "textareabrief");
break;
case querycbx:
{
dspUITypeStr = getAttr(cellElement, DSPUITYPE, "textfieldinfo");
String fmtName = TypeSearchForQueryFactory.getInstance().getDataObjFormatterName(properties.getProperty("name"));
if (isEmpty(formatName) && isNotEmpty(fmtName))
{
formatName = fmtName;
}
break;
}
case formattedtext:
{
validationRule = getAttr(cellElement, VALIDATION, "formatted"); // XXX Is this OK?
dspUITypeStr = getAttr(cellElement, DSPUITYPE, "formattedtext");
//-------------------------------------------------------
// This part should be moved to the ViewFactory
// because it is the only part that need the Schema Information
//-------------------------------------------------------
if (isNotEmpty(uiFieldFormatterName))
{
UIFieldFormatterIFace uiFormatter = UIFieldFormatterMgr.getInstance().getFormatter(uiFieldFormatterName);
if (uiFormatter == null)
{
String msg = "Couldn't find formatter["+uiFieldFormatterName+"]";
log.error(msg);
FormDevHelper.appendFormDevError(msg);
uiFieldFormatterName = "";
uitype = FormCellFieldIFace.FieldType.text;
}
} else // ok now check the schema for the UI formatter
{
if (tableinfo != null)
{
DBFieldInfo fieldInfo = tableinfo.getFieldByName(cellName);
if (fieldInfo != null)
{
if (fieldInfo.getFormatter() != null)
{
uiFieldFormatterName = fieldInfo.getFormatter().getName();
} else if (fieldInfo.getDataClass().isAssignableFrom(Date.class) ||
fieldInfo.getDataClass().isAssignableFrom(Calendar.class))
{
String msg = "Missing Date Formatter for ["+cellName+"]";
log.error(msg);
FormDevHelper.appendFormDevError(msg);
uiFieldFormatterName = "Date";
UIFieldFormatterIFace uiFormatter = UIFieldFormatterMgr.getInstance().getFormatter(uiFieldFormatterName);
if (uiFormatter == null)
{
uiFieldFormatterName = "";
uitype = FormCellFieldIFace.FieldType.text;
}
} else
{
uiFieldFormatterName = "";
uitype = FormCellFieldIFace.FieldType.text;
}
}
}
}
break;
}
case url:
dspUITypeStr = getAttr(cellElement, DSPUITYPE, uitypeStr);
properties = UIHelper.parseProperties(initialize);
break;
case list:
case image:
case tristate:
case checkbox:
case password:
dspUITypeStr = getAttr(cellElement, DSPUITYPE, uitypeStr);
break;
case plugin:
case button:
dspUITypeStr = getAttr(cellElement, DSPUITYPE, uitypeStr);
properties = UIHelper.parseProperties(initialize);
String ttl = properties.getProperty(TITLE);
if (ttl != null)
{
properties.put(TITLE, getResourceLabel(ttl));
}
break;
case spinner:
dspUITypeStr = getAttr(cellElement, DSPUITYPE, "dsptextfield");
properties = UIHelper.parseProperties(initialize);
break;
case combobox:
dspUITypeStr = getAttr(cellElement, DSPUITYPE, "textpl");
if (tableinfo != null)
{
DBFieldInfo fieldInfo = tableinfo.getFieldByName(cellName);
if (fieldInfo != null)
{
if (StringUtils.isNotEmpty(pickListName))
{
fieldInfo.setPickListName(pickListName);
} else
{
pickListName = fieldInfo.getPickListName();
}
}
}
break;
default:
dspUITypeStr = getAttr(cellElement, DSPUITYPE, "dsptextfield");
break;
} //switch
FormCellFieldIFace.FieldType dspUIType = FormCellFieldIFace.FieldType.valueOf(dspUITypeStr);
try
{
dspUIType = FormCellFieldIFace.FieldType.valueOf(dspUITypeStr);
} catch (java.lang.IllegalArgumentException ex)
{
FormDevHelper.appendFormDevError(ex.toString());
FormDevHelper.appendFormDevError(String.format("Cell Name[%s] Id[%s] dspuitype[%s] is in error", cellName, cellId, dspUIType));
uitype = FormCellFieldIFace.FieldType.label; // default to text
}
// check to see see if the validation is a node in the cell
if (isEmpty(validationRule))
{
Element valNode = (Element)cellElement.selectSingleNode(VALIDATION);
if (valNode != null)
{
String str = valNode.getTextTrim();
if (isNotEmpty(str))
{
validationRule = str;
}
}
}
boolean isEncrypted = getAttr(cellElement, "isencrypted", false);
boolean isReadOnly = uitype == FormCellFieldIFace.FieldType.dsptextfield ||
uitype == FormCellFieldIFace.FieldType.dsptextarea ||
uitype == FormCellFieldIFace.FieldType.label;
FormCellField field = new FormCellField(FormCellIFace.CellType.field, cellId,
cellName, uitype, dspUIType, format, formatName, uiFieldFormatterName, isRequired,
cols, rows, colspan, rowspan, validationType, validationRule, isEncrypted);
String labelStr = uitype == FormCellFieldIFace.FieldType.checkbox ? getLabel(cellElement) : getAttr(cellElement, "label", "");
field.setLabel(labelStr);
field.setReadOnly(getAttr(cellElement, "readonly", isReadOnly));
field.setDefaultValue(getAttr(cellElement, "default", ""));
field.setPickListName(pickListName);
field.setChangeListenerOnly(getAttr(cellElement, "changesonly", true) && !isRequired);
field.setProperties(properties);
cell = formRow.addCell(field);
break;
}
case command:
{
cell = formRow.addCell(new FormCellCommand(cellId, cellName,
getLabel(cellElement),
getAttr(cellElement, "commandtype", ""),
getAttr(cellElement, "action", "")));
String initialize = getAttr(cellElement, INITIALIZE, null);
if (StringUtils.isNotEmpty(initialize))
{
cell.setProperties(UIHelper.parseProperties(initialize));
}
break;
}
case panel:
{
FormCellPanel cellPanel = new FormCellPanel(cellId, cellName,
getAttr(cellElement, "paneltype", ""),
getAttr(cellElement, "coldef", "p"),
getAttr(cellElement, "rowdef", "p"),
colspan, rowspan);
String initialize = getAttr(cellElement, INITIALIZE, null);
if (StringUtils.isNotEmpty(initialize))
{
cellPanel.setProperties(UIHelper.parseProperties(initialize));
}
processRows(cellElement, cellPanel.getRows(), tableinfo);
fixLabels(cellPanel.getName(), cellPanel.getRows(), tableinfo);
cell = formRow.addCell(cellPanel);
break;
}
case subview:
{
Properties properties = UIHelper.parseProperties(getAttr(cellElement, INITIALIZE, null));
String svViewSetName = cellElement.attributeValue("viewsetname");
if (isEmpty(svViewSetName))
{
svViewSetName = null;
}
if (instance.doingResourceLabels && properties != null)
{
String title = properties.getProperty(TITLE);
if (title != null)
{
properties.setProperty(TITLE, UIRegistry.getResourceString(title));
}
}
String viewName = getAttr(cellElement, "viewname", null);
cell = formRow.addCell(new FormCellSubView(cellId,
cellName,
svViewSetName,
viewName,
cellElement.attributeValue("class"),
getAttr(cellElement, "desc", ""),
getAttr(cellElement, "defaulttype", null),
getAttr(cellElement, "rows", DEFAULT_SUBVIEW_ROWS),
colspan,
rowspan,
getAttr(cellElement, "single", false)));
cell.setProperties(properties);
break;
}
case iconview:
{
String vsName = cellElement.attributeValue("viewsetname");
if (isEmpty(vsName))
{
vsName = instance.viewSetName;
}
String viewName = getAttr(cellElement, "viewname", null);
cell = formRow.addCell(new FormCellSubView(cellId, cellName,
vsName,
viewName,
cellElement.attributeValue("class"),
getAttr(cellElement, "desc", ""),
colspan,
rowspan));
break;
}
case statusbar:
{
cell = formRow.addCell(new FormCell(FormCellIFace.CellType.statusbar, cellId, cellName, colspan, rowspan));
break;
}
default:
{
// what is this?
log.error("Encountered unknown cell type");
continue;
}
} // switch
cell.setIgnoreSetGet(getAttr(cellElement, "ignore", false));
}
cellRows.add(formRow);
rowNumber++;
}
}
}
/**
* @param cellName
* @param cellId
* @param rowNumber
*/
private static void processFieldVerify(final String cellName, final String cellId, final int rowNumber)
{
try
{
boolean isOK = false;
if (StringUtils.contains(cellName, '.'))
{
DBTableInfo tblInfo = fldVerTableInfo;
String[] fieldNames = StringUtils.split(cellName, ".");
for (int i=0;i<fieldNames.length-1;i++)
{
String type = null;
DBTableChildIFace child = tblInfo.getItemByName(fieldNames[i]);
if (child instanceof DBFieldInfo)
{
DBFieldInfo fldInfo = (DBFieldInfo)child;
type = fldInfo.getType();
if (type != null)
{
DBTableInfo tInfo = DBTableIdMgr.getInstance().getByClassName(type);
tblInfo = tInfo != null ? tInfo : tblInfo;
}
isOK = tblInfo.getItemByName(fieldNames[fieldNames.length-1]) != null;
} else if (child instanceof DBRelationshipInfo)
{
DBRelationshipInfo relInfo = (DBRelationshipInfo)child;
type = relInfo.getDataClass().getName();
if (type != null)
{
tblInfo = DBTableIdMgr.getInstance().getByClassName(type);
}
}
//System.out.println(type);
}
if (tblInfo != null)
{
isOK = tblInfo.getItemByName(fieldNames[fieldNames.length-1]) != null;
}
} else
{
isOK = fldVerTableInfo.getItemByName(cellName) != null;
}
if (!isOK)
{
String msg = " ViewSet["+instance.viewSetName+"]\n ViewDef["+fldVerFormViewDef.getName()+"]\n The cell name ["+cellName+"] for cell with Id ["+cellId+"] is not a field\n in Data Object["+fldVerTableInfo.getName()+"]\n on Row ["+rowNumber+"]";
if (!isTreeClass)
{
instance.fldVerTableModel.addRow(instance.viewSetName, fldVerFormViewDef.getName(), cellId, cellName, Integer.toString(rowNumber));
}
log.error(msg);
}
} catch (Exception ex)
{
log.error(ex);
}
}
/**
* @param element the DOM element for building the form
* @param type the type of form to be built
* @param id the id of the form
* @param name the name of the form
* @param className the class name of the data object
* @param gettableClassName the class name of the getter
* @param settableClassName the class name of the setter
* @param desc the description
* @param useResourceLabels whether to use resource labels
* @param tableinfo table info
* @return a form view of type "form"
*/
protected static FormViewDef createFormViewDef(final Element element,
final ViewDef.ViewType type,
final String name,
final String className,
final String gettableClassName,
final String settableClassName,
final String desc,
final boolean useResourceLabels,
final DBTableInfo tableinfo)
{
FormViewDef formViewDef = new FormViewDef(type, name, className, gettableClassName, settableClassName, desc,
useResourceLabels, XMLHelper.getAttr(element, "editableDlg", true));
fldVerTableInfo = null;
if (type != ViewDefIFace.ViewType.formtable)
{
if (doFieldVerification)
{
if (instance.fldVerTableModel == null)
{
instance.createFieldVerTableModel();
}
try
{
//log.debug(className);
Class<?> classObj = Class.forName(className);
if (FormDataObjIFace.class.isAssignableFrom(classObj))
{
fldVerTableInfo = DBTableIdMgr.getInstance().getByClassName(className);
isTreeClass = fldVerTableInfo != null && fldVerTableInfo.getFieldByName("highestChildNodeNumber") != null;
fldVerFormViewDef = formViewDef;
}
} catch (ClassNotFoundException ex)
{
String msg = "ClassNotFoundException["+className+"] Name["+name+"]";
log.error(msg);
FormDevHelper.appendFormDevError(msg);
//edu.ku.brc.af.core.UsageTracker.incrHandledUsageCount();
//edu.ku.brc.exceptions.ExceptionTracker.getInstance().capture(ViewLoader.class, comments, ex);
} catch (Exception ex)
{
edu.ku.brc.af.core.UsageTracker.incrHandledUsageCount();
edu.ku.brc.exceptions.ExceptionTracker.getInstance().capture(ViewLoader.class, ex);
}
}
List<FormRowIFace> rows = formViewDef.getRows();
instance.doingResourceLabels = useResourceLabels;
//gViewDef = formViewDef;
processRows(element, rows, tableinfo);
instance.doingResourceLabels = false;
createDef(element, "columnDef", rows.size(), formViewDef.getColumnDefItem());
createDef(element, "rowDef", rows.size(), formViewDef.getRowDefItem());
formViewDef.setEnableRules(getEnableRules(element));
fixLabels(formViewDef.getName(), rows, tableinfo);
} else
{
Node defNode = element.selectSingleNode("definition");
if (defNode != null) {
String defName = defNode.getText();
if (StringUtils.isNotEmpty(defName)) {
formViewDef.setDefinitionName(defName);
return formViewDef;
}
}
String msg = "formtable is missing or has empty <defintion> node";
log.error(msg);
FormDevHelper.appendFormDevError(msg);
return null;
}
return formViewDef;
}
/**
* @param fieldName
* @param tableInfo
* @return
*/
protected static String getTitleFromFieldName(final String fieldName,
final DBTableInfo tableInfo)
{
DBTableChildIFace derivedCI = null;
if (fieldName.indexOf(".") > -1)
{
derivedCI = FormHelper.getChildInfoFromPath(fieldName, tableInfo);
if (derivedCI == null)
{
String msg = "The name 'path' ["+fieldName+"] was not valid in ViewSet ["+instance.viewSetName+"]";
FormDevHelper.appendFormDevError(msg);
log.error(msg);
return "";
}
}
DBTableChildIFace tblChild = derivedCI != null ? derivedCI : tableInfo.getItemByName(fieldName);
if (tblChild == null)
{
String msg = "The Field Name ["+fieldName+"] was not in the Table ["+tableInfo.getTitle()+"] in ViewSet ["+instance.viewSetName+"]";
log.error(msg);
FormDevHelper.appendFormDevError(msg);
return "";
}
return tblChild.getTitle();
}
/**
* @param rows
* @param tableInfo
*/
protected static void fixLabels(final String name,
final List<FormRowIFace> rows,
final DBTableInfo tableInfo)
{
if (tableInfo == null)
{
return;
}
Hashtable<String, String> fldIdMap = new Hashtable<String, String>();
for (FormRowIFace row : rows)
{
for (FormCellIFace cell : row.getCells())
{
if (cell.getType() == FormCellIFace.CellType.field ||
cell.getType() == FormCellIFace.CellType.subview)
{
fldIdMap.put(cell.getIdent(), cell.getName());
}/* else
{
System.err.println("Skipping ["+cell.getIdent()+"] " + cell.getType());
}*/
}
}
for (FormRowIFace row : rows)
{
for (FormCellIFace cell : row.getCells())
{
if (cell.getType() == FormCellIFace.CellType.label)
{
FormCellLabelIFace lblCell = (FormCellLabelIFace)cell;
String label = lblCell.getLabel();
if (label.length() == 0 || label.equals("##"))
{
String idFor = lblCell.getLabelFor();
if (StringUtils.isNotEmpty(idFor))
{
String fieldName = fldIdMap.get(idFor);
if (StringUtils.isNotEmpty(fieldName))
{
if (!fieldName.equals("this"))
{
//FormCellFieldIFace fcf = get
lblCell.setLabel(getTitleFromFieldName(fieldName, tableInfo));
}
} else
{
String msg = "Setting Label - Form control with id["+idFor+"] is not in ViewDef or Panel ["+name+"] in ViewSet ["+instance.viewSetName+"]";
log.error(msg);
FormDevHelper.appendFormDevError(msg);
}
}
}
} else if (cell.getType() == FormCellIFace.CellType.field && cell instanceof FormCellFieldIFace &&
((((FormCellFieldIFace)cell).getUiType() == FormCellFieldIFace.FieldType.checkbox) ||
(((FormCellFieldIFace)cell).getUiType() == FormCellFieldIFace.FieldType.tristate)))
{
FormCellFieldIFace fcf = (FormCellFieldIFace)cell;
if (fcf.getLabel().equals("##"))
{
fcf.setLabel(getTitleFromFieldName(cell.getName(), tableInfo));
}
}
}
}
}
/**
* @param type the type of form to be built
* @param name the name of the form
* @param className the class name of the data object
* @param gettableClassName the class name of the getter
* @param settableClassName the class name of the setter
* @param desc the description
* @param useResourceLabels whether to use resource labels
* @return a form view of type "form"
*/
protected static ViewDef createIconViewDef(final ViewDef.ViewType type,
final String name,
final String className,
final String gettableClassName,
final String settableClassName,
final String desc,
final boolean useResourceLabels)
{
ViewDef formView = new ViewDef(type, name, className, gettableClassName, settableClassName, desc, useResourceLabels);
//formView.setEnableRules(getEnableRules(element));
return formView;
}
/**
* Creates a Table Form View
* @param typeName the type of form to be built
* @param element the DOM element for building the form
* @param name the name of the form
* @param className the class name of the data object
* @param gettableClassName the class name of the getter
* @param settableClassName the class name of the setter
* @param desc the description
* @param useResourceLabels whether to use resource labels
* @return a form view of type "table"
*/
protected static TableViewDefIFace createTableView(final Element element,
final String name,
final String className,
final String gettableClassName,
final String settableClassName,
final String desc,
final boolean useResourceLabels)
{
TableViewDefIFace tableView = new TableViewDef( name, className, gettableClassName, settableClassName, desc, useResourceLabels);
//tableView.setResourceLabels(resLabels);
Element columns = (Element)element.selectSingleNode("columns");
if (columns != null)
{
for ( Iterator<?> i = columns.elementIterator( "column" ); i.hasNext(); ) {
Element colElement = (Element) i.next();
FormColumn column = new FormColumn(colElement.attributeValue(NAME),
colElement.attributeValue(LABEL),
getAttr(colElement, "dataobjformatter", null),
getAttr(colElement, "format", null)
);
tableView.addColumn(column);
}
}
return tableView;
}
/**
* Save out a viewSet to a file
* @param viewSet the viewSet to save
* @param filename the filename (full path) as to where to save it
*/
public static void save(final ViewSet viewSet, final String filename)
{
try
{
Vector<ViewSet> viewsets = new Vector<ViewSet>();
viewsets.add(viewSet);
File file = new File(filename);
FileWriter fw = new FileWriter(file);
fw.write("<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n");
BeanWriter beanWriter = new BeanWriter(fw);
XMLIntrospector introspector = beanWriter.getXMLIntrospector();
introspector.getConfiguration().setWrapCollectionsInElement(false);
beanWriter.getBindingConfiguration().setMapIDs(false);
beanWriter.setWriteEmptyElements(false);
beanWriter.enablePrettyPrint();
beanWriter.write(viewSet);
fw.close();
} catch(Exception ex)
{
log.error("error writing views", ex);
}
}
//--------------------------------------------------------------------------------------------
//-- Field Verify Methods, Classes, Helpers
//--------------------------------------------------------------------------------------------
public void createFieldVerTableModel()
{
fldVerTableModel = new FieldVerifyTableModel();
}
/**
* @return the doFieldVerification
*/
public static boolean isDoFieldVerification()
{
return doFieldVerification;
}
/**
* @param doFieldVerification the doFieldVerification to set
*/
public static void setDoFieldVerification(boolean doFieldVerification)
{
ViewLoader.doFieldVerification = doFieldVerification;
}
public static void clearFieldVerInfo()
{
if (instance.fldVerTableModel != null)
{
instance.fldVerTableModel.clear();
}
}
/**
* Di
*/
public static void displayFieldVerInfo()
{
if (verifyDlg != null)
{
verifyDlg.setVisible(false);
verifyDlg.dispose();
verifyDlg = null;
}
System.err.println("------------- "+(instance.fldVerTableModel != null ? instance.fldVerTableModel.getRowCount() : "null"));
if (instance.fldVerTableModel != null && instance.fldVerTableModel.getRowCount() > 0)
{
JLabel lbl = UIHelper.createLabel("<html><i>(Some of fields are special buttons or labal names. Review them to make sure you have not <br>mis-named any of the fields you are working with.)");
final JTable table = new JTable(instance.fldVerTableModel);
UIHelper.calcColumnWidths(table);
CellConstraints cc = new CellConstraints();
JScrollPane sp = new JScrollPane(table, ScrollPaneConstants.VERTICAL_SCROLLBAR_AS_NEEDED, ScrollPaneConstants.HORIZONTAL_SCROLLBAR_AS_NEEDED);
PanelBuilder pb = new PanelBuilder(new FormLayout("f:p:g", "f:p:g,4px,p"));
pb.add(sp, cc.xy(1, 1));
pb.add(lbl, cc.xy(1, 3));
pb.setDefaultDialogBorder();
verifyDlg = new CustomFrame("Field Names on Form, but not in Database : "+instance.fldVerTableModel.getRowCount(), CustomFrame.OK_BTN, pb.getPanel())
{
@Override
protected void okButtonPressed()
{
super.okButtonPressed();
table.setModel(new DefaultTableModel());
dispose();
verifyDlg = null;
}
};
verifyDlg.setOkLabel(getResourceString("CLOSE"));
verifyDlg.createUI();
verifyDlg.setVisible(true);
}
}
class FieldVerifyTableModel extends DefaultTableModel
{
protected Vector<List<String>> rowData = new Vector<List<String>>();
protected String[] colNames = {"ViewSet", "View Def", "Cell Id", "Cell Name", "Row"};
protected Hashtable<String, Boolean> nameHash = new Hashtable<String, Boolean>();
public FieldVerifyTableModel()
{
super();
}
public void clear()
{
for (List<String> list : rowData)
{
list.clear();
}
rowData.clear();
nameHash.clear();
}
public void addRow(final String viewSet,
final String viewDef,
final String cellId,
final String cellName,
final String rowInx)
{
String key = viewSet + viewDef + cellId;
if (nameHash.get(key) == null)
{
List<String> row = new ArrayList<String>(5);
row.add(viewSet);
row.add(viewDef);
row.add(cellId);
row.add(cellName);
row.add(rowInx);
rowData.add(row);
nameHash.put(key, true);
}
}
/* (non-Javadoc)
* @see javax.swing.table.DefaultTableModel#getColumnCount()
*/
@Override
public int getColumnCount()
{
return colNames.length;
}
/* (non-Javadoc)
* @see javax.swing.table.DefaultTableModel#getColumnName(int)
*/
@Override
public String getColumnName(int column)
{
return colNames[column];
}
/* (non-Javadoc)
* @see javax.swing.table.DefaultTableModel#getRowCount()
*/
@Override
public int getRowCount()
{
return rowData == null ? 0 : rowData.size();
}
/* (non-Javadoc)
* @see javax.swing.table.DefaultTableModel#getValueAt(int, int)
*/
@Override
public Object getValueAt(int row, int column)
{
List<String> rowList = rowData.get(row);
return rowList.get(column);
}
}
}<|fim▁end|>
|
case field :
//view = createFormView(FormView.ViewType.field, element, id, name, gettableClassName, settableClassName,
// className, desc, instance.doingResourceLabels, isValidated);
break;
|
<|file_name|>log.py<|end_file_name|><|fim▁begin|># Copyright 2014 Xinyu, He <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import logging
import logging.handlers<|fim▁hole|>
file_name = 'log/home_debug.log'
debug_logger = logging.getLogger('DebugLog')
handler = logging.handlers.RotatingFileHandler(file_name, maxBytes=50*1024*1024)
formatter = logging.Formatter("%(asctime)s - [%(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s")
handler.setFormatter(formatter)
debug_logger.setLevel(logging.DEBUG)
debug_logger.addHandler(handler)
debug_logger.propagate = False # now if you use logger it will not log to console.
comm_name = 'log/home.log'
comm_logger = logging.getLogger('CommonLog')
handler = logging.handlers.RotatingFileHandler(comm_name, maxBytes=20*1024*1024)
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s [%(filename)s - %(funcName)s] ')
handler.setFormatter(formatter)
comm_logger.setLevel(logging.INFO)
comm_logger.addHandler(handler)
# comm_logger.propagate = False # now if you use logger it will not log to console.
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s: %(message)s')
# def stack_info_debug(info):
# stack_info = inspect.currentframe().f_back.f_code.co_name
# debug_logger.debug("%s: %s" % (stack_info, info))
DEBUG = debug_logger.debug
# DEBUG = stack_info_debug # only output to file
INFO = comm_logger.info
WARN = comm_logger.warning
ERROR = comm_logger.error
CRITICAL = comm_logger.critical
FDEBUG = debug_logger.debug
FINFO = debug_logger.info
FWARN = debug_logger.warning
FERROR = debug_logger.error
FCRITICAL = debug_logger.critical
EXCEPTION = comm_logger.exception<|fim▁end|>
| |
<|file_name|>webpack.config.renderer.js<|end_file_name|><|fim▁begin|>const merge = require('webpack-merge')
const HtmlWebpackPlugin = require('html-webpack-plugin')
const ExtractTextPlugin = require('extract-text-webpack-plugin')
const base = require('./webpack.config.base')
const pkg = require('../app/package.json')
module.exports = merge(base, {
entry: {
renderer: ['./app/renderer.js']
},
module: {
loaders: [
{ test: /\.vue$/, loader: 'vue' },
{ test: /\.css$/, loader: ExtractTextPlugin.extract('style', 'css', { publicPath: '../' }) },
{ test: /\.less$/, loader: ExtractTextPlugin.extract('style', 'css!less', { publicPath: '../' }) },
// { test: /\.html$/, loader: 'vue-html' },
{ test: /\.(png|jpe?g|gif|svg)(\?.*)?$/, loader: 'url', query: { limit: 10000, name: 'img/[name].[hash:7].[ext]' } },
{ test: /\.(woff2?|eot|ttf|otf)(\?.*)?$/, loader: 'url', query: { limit: 10000, name: 'font/[name].[hash:7].[ext]' } }
]
},
resolve: {
extensions: ['', '.js', '.json', '.css', '.less', '.sass', '.scss', '.vue']
},
target: 'electron-renderer',
// devServer: {
// // contentBase: './build',<|fim▁hole|> // quiet: false,
// noInfo: false,
// // lazy: true,
// hot: true,
// port: 2080
// },
plugins: [
new ExtractTextPlugin('css/[name].css')
],
vue: {
loaders: {
// html: 'raw',
// js: 'babel',
css: ExtractTextPlugin.extract('css', { publicPath: '../' }),
less: ExtractTextPlugin.extract('css!less', { publicPath: '../' })
},
autoprefixer: false
}
})
if (process.env.NODE_ENV === 'production') {
module.exports.plugins = (module.exports.plugins || []).concat([
// generate dist index.html with correct asset hash for caching.
// you can customize output by editing /index.html
// see https://github.com/ampedandwired/html-webpack-plugin
new HtmlWebpackPlugin({
template: './app/index.ejs',
filename: 'index.html',
title: pkg.productName,
chunks: ['renderer'],
excludeChunks: ['main'],
inject: true,
minify: {
removeComments: true,
collapseWhitespace: true,
removeAttributeQuotes: true
// more options: https://github.com/kangax/html-minifier#options-quick-reference
}
})
])
} else {
module.exports.module.preLoaders.push(
{ test: /\.vue$/, loader: 'eslint', exclude: /node_modules/ }
)
module.exports.plugins = (module.exports.plugins || []).concat([
new HtmlWebpackPlugin({
template: './app/index.ejs',
filename: 'index.html',
title: pkg.productName,
chunks: ['renderer'],
excludeChunks: ['main'],
inject: true
})
])
}<|fim▁end|>
|
// historyApiFallback: true,
// progress: true,
// inline: true,
// colors: true,
|
<|file_name|>historybuffer.py<|end_file_name|><|fim▁begin|>from __future__ import annotations<|fim▁hole|>import gc
import threading
from typing import Optional
from utils.utilfuncs import safeprint
def DummyAsyncFileWrite(fn, writestr, access='a'):
safeprint('Called HB file write before init {} {} {}'.format(fn, writestr, access))
AsyncFileWrite = DummyAsyncFileWrite # set from log support to avoid circular imports
DevPrint = None
# import topper
WatchGC = False # set True to see garbage collection info
Buffers = {}
HBdir = ''
GCBuf: Optional[HistoryBuffer] = None
bufdumpseq = 0
HBNet = None
def SetupHistoryBuffers(dirnm, maxlogs):
global HBdir, GCBuf
r = [k for k in os.listdir('.') if '.HistoryBuffer' in k]
if ".HistoryBuffer." + str(maxlogs) in r:
shutil.rmtree(".HistoryBuffer." + str(maxlogs))
for i in range(maxlogs - 1, 0, -1):
if ".HistoryBuffer." + str(i) in r:
os.rename('.HistoryBuffer.' + str(i), ".HistoryBuffer." + str(i + 1))
# noinspection PyBroadException
try:
os.rename('.HistoryBuffer', '.HistoryBuffer.1')
except:
pass
os.mkdir('.HistoryBuffer')
HBdir = dirnm + '/.HistoryBuffer/'
if WatchGC:
gc.callbacks.append(NoteGCs)
GCBuf = HistoryBuffer(50, 'GC')
def NoteGCs(phase, info):
if GCBuf is not None:
GCBuf.Entry('GC Call' + phase + repr(info))
def DumpAll(idline, entrytime):
global bufdumpseq
if HBdir == '': # logs not yet set up
safeprint(time.strftime('%m-%d-%y %H:%M:%S') + ' Suppressing History Buffer Dump for {}'.format(idline))
return
fn = HBdir + str(bufdumpseq) + '-' + entrytime
try:
#topper.mvtops(str(bufdumpseq) + '-' + entrytime)
bufdumpseq += 1
t = {}
curfirst = {}
curtime = {}
initial = {}
now = time.time()
more = True
for nm, HB in Buffers.items():
t[nm] = HB.content()
try:
curfirst[nm] = next(t[nm])
curtime[nm] = curfirst[nm][1]
except StopIteration:
if nm in curfirst: del curfirst[nm]
if nm in curtime: del curtime[nm]
initial[nm] = '*'
if curfirst == {} or curtime == {}:
more = False
prevtime = 0
AsyncFileWrite(fn, '{} ({}): '.format(entrytime, now) + idline + '\n', 'w')
while more:
nextup = min(curtime, key=curtime.get)
if curtime[nextup] > prevtime:
prevtime = curtime[nextup]
else:
AsyncFileWrite(fn, 'seq error:' + str(prevtime) + ' ' + str(curtime[nextup]) + '\n')
prevtime = 0
if now - curfirst[nextup][1] < 300: # limit history dump to 5 minutes worth
AsyncFileWrite(fn,
'{:1s}{:10s}:({:3d}) {:.5f}: [{}] {}\n'.format(initial[nextup], nextup,
curfirst[nextup][0],
now - curfirst[nextup][1],
curfirst[nextup][3],
curfirst[nextup][2]))
initial[nextup] = ' '
try:
curfirst[nextup] = next(t[nextup])
curtime[nextup] = curfirst[nextup][1]
except StopIteration:
del curfirst[nextup]
del curtime[nextup]
if curfirst == {} or curtime == {}: more = False
except Exception as E:
AsyncFileWrite(fn, 'Error dumping buffer for: ' + entrytime + ': ' + idline + '\n')
AsyncFileWrite(fn, 'Exception was: ' + repr(E) + '\n')
class EntryItem(object):
def __init__(self):
self.timeofentry = 0
self.entry = ""
self.thread = ""
class HistoryBuffer(object):
def __init__(self, size, name):
self.buf = []
for i in range(size):
self.buf.append(EntryItem())
self.current = 0
self.size = size
self.name = name
Buffers[name] = self
def Entry(self, entry):
self.buf[self.current].entry = entry
self.buf[self.current].timeofentry = time.time()
self.buf[self.current].thread = threading.current_thread().name
self.current = (self.current + 1) % self.size
def content(self):
# freeze for dump and reset empty
# this is subject to races from other threads doing entry reports
# sequence must be create new buf offline, replace current buf with it so always one or other valid list
# then change current back to 0
# at worst this loses a few events that record between grabbing current and replacing with new one
tempbuf = []
for i in range(self.size):
tempbuf.append(EntryItem())
cur = self.buf
curind = self.current
self.buf = tempbuf
self.current = 0
#DevPrint('Enter HB content for: {} index {}'.format(self.name, curind))
for i in range(self.size):
j = (i + curind) % self.size
if cur[j].timeofentry != 0:
# DevPrint('Item from {}: {}/{}/{}/{}'.format(self.name, i, j, cur[j].timeofentry, cur[j].entry))
yield j, cur[j].timeofentry, cur[j].entry, cur[j].thread
#DevPrint('Content exit: {}/{}'.format(self.name, j))<|fim▁end|>
|
import os
import shutil
import time
|
<|file_name|>config.py<|end_file_name|><|fim▁begin|># coding=utf-8<|fim▁hole|> 'db_user': '', # mongodb的用户名
'db_pass': '', # mongodb的密码
'db_host': 'localhost',
'db_port': 27017,
'db_name': 'novelRS',
'cpu_num': 4 # 开几个进程计算
}<|fim▁end|>
|
config = {
'timeout': 10,
|
<|file_name|>Product.java<|end_file_name|><|fim▁begin|>/*
* To change this license header, choose License Headers in Project Properties.
* To change this template file, choose Tools | Templates
* and open the template in the editor.
*/
package entities;
import java.io.Serializable;
import javax.persistence.Basic;
import javax.persistence.Column;
import javax.persistence.Entity;
import javax.persistence.GeneratedValue;
import javax.persistence.GenerationType;
import javax.persistence.Id;
import javax.persistence.JoinColumn;
import javax.persistence.Lob;
import javax.persistence.ManyToOne;
import javax.persistence.NamedQueries;
import javax.persistence.NamedQuery;
import javax.persistence.Table;
import javax.validation.constraints.NotNull;
import javax.validation.constraints.Size;
import javax.xml.bind.annotation.XmlRootElement;
/**
*
* @author Maarten De Weerdt
*/
@Entity
@Table(name = "Product")
@XmlRootElement
@NamedQueries({
@NamedQuery(name = "Product.findAll", query = "SELECT p FROM Product p")
, @NamedQuery(name = "Product.findById", query = "SELECT p FROM Product p WHERE p.id = :id")
, @NamedQuery(name = "Product.findByPrijs", query = "SELECT p FROM Product p WHERE p.prijs = :prijs")
, @NamedQuery(name = "Product.findByCategorie", query = "SELECT p FROM Product p WHERE p.categorieNaam = :categorieNaam")
})
public class Product implements Serializable {
private static final long serialVersionUID = 1L;
@Id
@GeneratedValue(strategy = GenerationType.IDENTITY)
@Basic(optional = false)
@Column(name = "ID")
private Integer id;
@Basic(optional = false)
@NotNull
@Lob
@Size(min = 1, max = 65535)
@Column(name = "Naam")
private String naam;
@Basic(optional = false)
@NotNull
@Lob
@Size(min = 1, max = 65535)
@Column(name = "Omschrijving")
private String omschrijving;
@Basic(optional = false)
@NotNull
@Column(name = "Prijs")
private double prijs;
@Basic(optional = false)
@NotNull
@Lob
@Size(min = 1, max = 65535)
@Column(name = "Afbeelding")
private String afbeelding;
@Lob
@Size(max = 65535)
@Column(name = "Informatie")
private String informatie;
@JoinColumn(name = "CategorieNaam", referencedColumnName = "CategorieNaam")
@ManyToOne(optional = false)
private Categorie categorieNaam;
public Product() {
}
public Product(Integer id) {
this.id = id;
}
public Product(Integer id, String naam, String omschrijving, double prijs, String afbeelding) {
this.id = id;
this.naam = naam;
this.omschrijving = omschrijving;
this.prijs = prijs;
<|fim▁hole|>
public Integer getId() {
return id;
}
public void setId(Integer id) {
this.id = id;
}
public String getNaam() {
return naam;
}
public void setNaam(String naam) {
this.naam = naam;
}
public String getOmschrijving() {
return omschrijving;
}
public void setOmschrijving(String omschrijving) {
this.omschrijving = omschrijving;
}
public double getPrijs() {
return prijs;
}
public void setPrijs(double prijs) {
this.prijs = prijs;
}
public String getAfbeelding() {
return afbeelding;
}
public void setAfbeelding(String afbeelding) {
this.afbeelding = afbeelding;
}
public String getInformatie() {
return informatie;
}
public void setInformatie(String informatie) {
this.informatie = informatie;
}
public Categorie getCategorieNaam() {
return categorieNaam;
}
public void setCategorieNaam(Categorie categorieNaam) {
this.categorieNaam = categorieNaam;
}
@Override
public int hashCode() {
int hash = 0;
hash += (id != null ? id.hashCode() : 0);
return hash;
}
@Override
public boolean equals(Object object) {
// TODO: Warning - this method won't work in the case the id fields are not set
if (!(object instanceof Product)) {
return false;
}
Product other = (Product) object;
if ((this.id == null && other.id != null) || (this.id != null && !this.id.equals(other.id))) {
return false;
}
return true;
}
@Override
public String toString() {
return "entities.Product[ id=" + id + " ]";
}
}<|fim▁end|>
|
this.afbeelding = afbeelding;
}
|
<|file_name|>pelicanconf.py<|end_file_name|><|fim▁begin|># Theme-specific settings
SITENAME = "Marc Sleegers"
DOMAIN = "marcsleegers.com"
BIO_TEXT = "Infrequent ramblings."
FOOTER_TEXT = '© 2022 Marc Sleegers. Licensed <a href="https://creativecommons.org/licenses/by/4.0/">CC BY 4.0</a>.'
SITE_AUTHOR = "Marc Sleegers"
TWITTER_USERNAME = "@marcardioid"
GOOGLE_PLUS_URL = ""
INDEX_DESCRIPTION = "Lead Data Engineer at Nike, especially interested in driving growth through insights – not just metrics. These are my infrequent ramblings."
INDEX_KEYWORDS = [
"Marc",
"Sleegers",
"About",
"Blog",
"Resume",
"CV",
"Portfolio",
"Marcardioid",
"Pumpkinsoup",
"AWildPumpkin",
"Computer",
"Science",
"Developer",
"Programmer",
"Software",
"Data",
"Engineer",
"Technology",
]
NAVIGATION_ITEMS = []
# NAVIGATION_ITEMS = [
# ('/blog/', 'blog', 'Blog'),
# ('/blog/archive/', 'archive', 'Archive'),
# ]
ICONS_PATH = "images/icons"
GOOGLE_FONTS = [
"Inter",
"Source Code Pro",
]
SOCIAL_ICONS = [
# (
# "mailto:[email protected]",
# "Contact ([email protected])",
# "fa-envelope-square",
# ),
# ('https://facebook.com/marc.sleegers', 'Facebook', 'fa-facebook-square'),
# ('https://twitter.com/marcardioid', 'Twitter', 'fa-twitter-square'),
# ("https://github.com/marcardioid", "GitHub", "fa-github-square"),
# ('/files/CV_Marc-Sleegers_2015_EN_WEB.pdf', 'Resume', 'fa-check-square'),
# ("/atom.xml", "RSS (Atom Feed)", "fa-rss-square"),
]
THEME_COLOR = "#052"
ASSET_URL = "/theme/style.min.css"
# Pelican settings
RELATIVE_URLS = False
SITEURL = "http://localhost:8000"
TIMEZONE = "Europe/Amsterdam"
DEFAULT_DATE = "fs"
DEFAULT_DATE_FORMAT = "%B %d, %Y"
DEFAULT_PAGINATION = False
SUMMARY_MAX_LENGTH = 50
THEME = "themes/pneumatic"
# Relocate blog directory
BLOG_URL = "blog/"
BLOG_DESCRIPTION = "These are my infrequent ramblings."
ARTICLE_URL = BLOG_URL + "{date:%Y}/{date:%m}/{slug}/"
ARTICLE_SAVE_AS = ARTICLE_URL + "index.html"
DRAFT_URL = BLOG_URL + "drafts/{date:%Y}/{date:%m}/{slug}/"
DRAFT_SAVE_AS = DRAFT_URL + "index.html"
PAGE_URL = "{slug}/"
PAGE_SAVE_AS = PAGE_URL + "index.html"
ARCHIVES_SAVE_AS = BLOG_URL + "archive/index.html"
ARCHIVES_DESCRIPTION = "These are the archives of my infrequent ramblings."
YEAR_ARCHIVE_SAVE_AS = BLOG_URL + "{date:%Y}/index.html"
MONTH_ARCHIVE_SAVE_AS = BLOG_URL + "{date:%Y}/{date:%m}/index.html"
# Disable authors, categories, tags, and category pages
DIRECT_TEMPLATES = ["index", "archives"]
INDEX_SAVE_AS = BLOG_URL + "index.html"
CATEGORY_SAVE_AS = ""
# Disable Atom feed generation
FEED_ATOM = "atom.xml"
FEED_ALL_ATOM = None
CATEGORY_FEED_ATOM = None
TRANSLATION_FEED_ATOM = None
TYPOGRIFY = True
MARKDOWN = {
"extension_configs": {
"markdown.extensions.codehilite": {"linenums": "True"},
"markdown.extensions.admonition": {},
"markdown.extensions.extra": {},
"markdown.extensions.toc": {"anchorlink": "True"},
"markdown.extensions.footnotes": {},
"markdown.extensions.meta": {},
},
"output_format": "html5",
}
JINJA_ENVIRONMENT = {"trim_blocks": True, "lstrip_blocks": True}
CACHE_CONTENT = False
DELETE_OUTPUT_DIRECTORY = False
OUTPUT_PATH = "output/develop/"
PATH = "content"
templates = ["404.html"]
TEMPLATE_PAGES = {page: page for page in templates}
STATIC_PATHS = ["images", "extra"]
IGNORE_FILES = ["style.css"]
extras = ["favicon.ico", "robots.txt", "humans.txt"]
EXTRA_PATH_METADATA = {"extra/%s" % file: {"path": file} for file in extras}
PLUGINS = ["neighbors", "sitemap", "webassets", "share_post", "series"]
GOOGLE_ANALYTICS = "UA-72969416-1"
SITEMAP = {
"format": "xml",
"priorities": {"articles": 0.5, "indexes": 0.5, "pages": 0.5},
"changefreqs": {"articles": "monthly", "indexes": "weekly", "pages": "monthly"},
}
WEBASSETS_CONFIG = [
("cache", False),
("manifest", False),<|fim▁hole|><|fim▁end|>
|
("url_expire", False),
("versions", False),
]
|
<|file_name|>injection.js<|end_file_name|><|fim▁begin|>import { defineComponent, mount, Html } from 'js-surface';
import { Component } from 'js-surface/common';
const { br, div } = Html;
const Parent = defineComponent({
displayName: 'Parent',
properties: {
masterValue: {
type: String,
defaultValue: 'default-value'
}
},
childContext: ['value'],
main: class extends Component {
getChildContext() {
return {
value: this.props.masterValue
};
}
render() {
return (
div(null,
div(null,
`Provided value: ${this.props.masterValue}`),
br(),<|fim▁hole|> div(null,
ChildFunctionBased(),
ChildClassBased(),
ChildFunctionBased({ value: 'with explicit value' }),
ChildClassBased({ value: 'with another explicit value' })))
);
}
}
});
const ChildFunctionBased = defineComponent({
displayName: 'ChildFunctionBased',
properties: {
value: {
type: String,
inject: true,
defaultValue: 'default value'
}
},
render(props) {
return (
div(null,
`ChildFunctionBased(${props.value})`)
);
}
});
const ChildClassBased = defineComponent({
displayName: 'ChildClassBased',
properties: {
value: {
type: String,
inject: true,
defaultValue: 'default value'
}
},
main: class extends Component {
render () {
return (
div(null,
`ChildClassBased(${this.props.value})`)
);
}
}
});
mount(Parent({ masterValue: 'the injected value' }), 'main-content');<|fim▁end|>
| |
<|file_name|>InWechatTextMsgExecutor.java<|end_file_name|><|fim▁begin|>package com.byhealth.wechat.mysdk.process.in.executor;
import com.byhealth.wechat.base.admin.entity.RespMsgActionEntity;
import com.byhealth.wechat.config.MsgTemplateConstants;
import com.byhealth.wechat.mysdk.constants.WechatReqMsgtypeConstants;
import com.byhealth.wechat.mysdk.context.WechatContext;
import com.byhealth.wechat.mysdk.process.ext.TextExtService;
import com.byhealth.wechat.mysdk.tools.NameTool;
import com.byhealth.wechat.mysdk.beans.req.ReqTextMessage;
import org.apache.commons.lang3.StringUtils;
import org.springframework.beans.factory.annotation.Autowired;
/**
* 文本消息处理器
* @author fengjx [email protected]
* @date 2014年9月11日
*/
public class InWechatTextMsgExecutor extends InServiceExecutor {
@Autowired
private TextExtService textExtService;
@Override
public String execute() throws Exception {<|fim▁hole|> RespMsgActionEntity actionEntity = msgActionService.loadMsgAction(null,WechatReqMsgtypeConstants.REQ_MSG_TYPE_TEXT, null,textMessage.getContent(), WechatContext.getPublicAccount().getSysUser());
//没有找到匹配规则
if(null == actionEntity){
String res = textExtService.execute();
if(StringUtils.isNotBlank(res)){ //如果有数据则直接返回
return res;
}
//返回默认回复消息
actionEntity = msgActionService.loadMsgAction(MsgTemplateConstants.WECHAT_DEFAULT_MSG, null, null, null, WechatContext.getPublicAccount().getSysUser());
}
return doAction(actionEntity);
}
@Override
public String getExecutorName() {
return NameTool.buildInServiceName(WechatReqMsgtypeConstants.REQ_MSG_TYPE_TEXT, null);
}
}<|fim▁end|>
|
ReqTextMessage textMessage = new ReqTextMessage(WechatContext.getWechatPostMap());
logger.info("进入文本消息处理器fromUserName="+textMessage.getFromUserName());
|
<|file_name|>JsonImagePanorama.java<|end_file_name|><|fim▁begin|>package alexiil.mods.load.json.subtypes;
import java.util.Collections;<|fim▁hole|>import java.util.List;
import java.util.Map;
import net.minecraft.util.ResourceLocation;
import alexiil.mods.load.baked.func.BakedFunction;
import alexiil.mods.load.baked.func.FunctionBaker;
import alexiil.mods.load.baked.insn.BakedInstruction;
import alexiil.mods.load.baked.render.BakedPanoramaRender;
import alexiil.mods.load.json.JsonImage;
public class JsonImagePanorama extends JsonImage {
public JsonImagePanorama(ResourceLocation resourceLocation, String image) {
super("", image, null, null, null, null, null, null, null);
this.resourceLocation = resourceLocation;
}
@Override
protected JsonImagePanorama actuallyConsolidate() {
return this;
}
@Override
protected BakedPanoramaRender actuallyBake(Map<String, BakedFunction<?>> functions) {
BakedFunction<Double> angle = FunctionBaker.bakeFunctionDouble("seconds * 40", functions);
return new BakedPanoramaRender(angle, image);
}
@Override
public List<BakedInstruction> bakeInstructions(Map<String, BakedFunction<?>> functions) {
return Collections.emptyList();
}
}<|fim▁end|>
| |
<|file_name|>index.ts<|end_file_name|><|fim▁begin|>import { Component } from '@angular/core';
import { OrderService } from '../order.component/order.service';
import { ActivatedRoute, Params } from '@angular/router';
@Component({
selector: 'order-detail',
styleUrls: [
'./style.scss'
],
templateUrl: './template.html',
providers: [
OrderService
]
})
export class OrderDetailComponent {
order: any;
ngOnInit(): void {
this.route.params.forEach((params: Params) => {
this.getOrder(params.id);
})
}
getOrder(id) {
this.orderService.getById(id)
.subscribe(info => {
this.order = info
})
}<|fim▁hole|> private orderService: OrderService,
private route: ActivatedRoute
) {}
}<|fim▁end|>
|
constructor(
|
<|file_name|>MovieGet.java<|end_file_name|><|fim▁begin|>package uk.co.ourfriendirony.medianotifier.clients.tmdb.movie.get;
import com.fasterxml.jackson.annotation.JsonAnyGetter;
import com.fasterxml.jackson.annotation.JsonAnySetter;
import com.fasterxml.jackson.annotation.JsonFormat;
import com.fasterxml.jackson.annotation.JsonIgnore;
import com.fasterxml.jackson.annotation.JsonInclude;
import com.fasterxml.jackson.annotation.JsonProperty;
import com.fasterxml.jackson.annotation.JsonPropertyOrder;
import java.util.Date;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
@JsonInclude(JsonInclude.Include.NON_NULL)
@JsonPropertyOrder({
"adult",
"backdrop_path",
"belongs_to_collection",
"budget",
"movieGetGenres",
"homepage",
"id",
"imdb_id",
"original_language",
"original_title",
"overview",
"popularity",
"poster_path",
"production_companies",
"production_countries",
"release_date",
"revenue",
"runtime",
"spoken_languages",
"status",
"tagline",
"title",
"video",
"vote_average",
"vote_count",
"external_ids"
})
public class MovieGet {
@JsonProperty("adult")
private Boolean adult;
@JsonProperty("backdrop_path")
private String backdropPath;
@JsonProperty("belongs_to_collection")
private MovieGetBelongsToCollection belongsToCollection;
@JsonProperty("budget")
private Integer budget;
@JsonProperty("movieGetGenres")
private List<MovieGetGenre> movieGetGenres = null;
@JsonProperty("homepage")
private String homepage;
@JsonProperty("id")
private Integer id;
@JsonProperty("imdb_id")
private String imdbId;
@JsonProperty("original_language")
private String originalLanguage;
@JsonProperty("original_title")
private String originalTitle;
@JsonProperty("overview")
private String overview;
@JsonProperty("popularity")
private Double popularity;
@JsonProperty("poster_path")
private String posterPath;
@JsonProperty("production_companies")
private List<MovieGetProductionCompany> productionCompanies = null;
@JsonProperty("production_countries")
private List<MovieGetProductionCountry> productionCountries = null;
@JsonFormat(shape = JsonFormat.Shape.STRING, pattern = "yyyy-MM-dd")
@JsonProperty("release_date")
private Date releaseDate;
@JsonProperty("revenue")
private Integer revenue;
@JsonProperty("runtime")
private Integer runtime;
@JsonProperty("spoken_languages")
private List<MovieGetSpokenLanguage> movieGetSpokenLanguages = null;
@JsonProperty("status")
private String status;
@JsonProperty("tagline")
private String tagline;
@JsonProperty("title")
private String title;
@JsonProperty("video")
private Boolean video;
@JsonProperty("vote_average")
private Double voteAverage;
@JsonProperty("vote_count")
private Integer voteCount;
@JsonProperty("external_ids")
private MovieGetExternalIds movieGetExternalIds;
@JsonIgnore
private Map<String, Object> additionalProperties = new HashMap<String, Object>();
@JsonProperty("adult")
public Boolean getAdult() {
return adult;
}
@JsonProperty("adult")
public void setAdult(Boolean adult) {
this.adult = adult;
}
@JsonProperty("backdrop_path")
public String getBackdropPath() {
return backdropPath;
}
@JsonProperty("backdrop_path")
public void setBackdropPath(String backdropPath) {
this.backdropPath = backdropPath;
}
@JsonProperty("belongs_to_collection")
public MovieGetBelongsToCollection getBelongsToCollection() {
return belongsToCollection;
}
@JsonProperty("belongs_to_collection")
public void setBelongsToCollection(MovieGetBelongsToCollection belongsToCollection) {
this.belongsToCollection = belongsToCollection;
}
@JsonProperty("budget")
public Integer getBudget() {
return budget;
}
@JsonProperty("budget")
public void setBudget(Integer budget) {
this.budget = budget;
}
@JsonProperty("movieGetGenres")
public List<MovieGetGenre> getMovieGetGenres() {
return movieGetGenres;
}
@JsonProperty("movieGetGenres")
public void setMovieGetGenres(List<MovieGetGenre> movieGetGenres) {
this.movieGetGenres = movieGetGenres;
}
@JsonProperty("homepage")
public String getHomepage() {
return homepage;
}
@JsonProperty("homepage")
public void setHomepage(String homepage) {
this.homepage = homepage;
}
@JsonProperty("id")
public Integer getId() {
return id;
}
@JsonProperty("id")
public void setId(Integer id) {
this.id = id;
}
@JsonProperty("imdb_id")
public String getImdbId() {
return imdbId;
}
@JsonProperty("imdb_id")
public void setImdbId(String imdbId) {
this.imdbId = imdbId;
}
@JsonProperty("original_language")
public String getOriginalLanguage() {
return originalLanguage;
}
@JsonProperty("original_language")
public void setOriginalLanguage(String originalLanguage) {
this.originalLanguage = originalLanguage;
}
@JsonProperty("original_title")
public String getOriginalTitle() {
return originalTitle;
}
@JsonProperty("original_title")
public void setOriginalTitle(String originalTitle) {
this.originalTitle = originalTitle;
}
@JsonProperty("overview")
public String getOverview() {
return overview;
}
@JsonProperty("overview")
public void setOverview(String overview) {
this.overview = overview;
}
@JsonProperty("popularity")
public Double getPopularity() {
return popularity;
}
@JsonProperty("popularity")
public void setPopularity(Double popularity) {
this.popularity = popularity;
}
@JsonProperty("poster_path")
public String getPosterPath() {
return posterPath;
}
@JsonProperty("poster_path")
public void setPosterPath(String posterPath) {
this.posterPath = posterPath;
}
@JsonProperty("production_companies")
public List<MovieGetProductionCompany> getProductionCompanies() {
return productionCompanies;
}
@JsonProperty("production_companies")
public void setProductionCompanies(List<MovieGetProductionCompany> productionCompanies) {
this.productionCompanies = productionCompanies;
}
@JsonProperty("production_countries")
public List<MovieGetProductionCountry> getProductionCountries() {
return productionCountries;
}
@JsonProperty("production_countries")
public void setProductionCountries(List<MovieGetProductionCountry> productionCountries) {
this.productionCountries = productionCountries;
}
@JsonProperty("release_date")
public Date getReleaseDate() {
return releaseDate;
}
@JsonProperty("release_date")
public void setReleaseDate(Date releaseDate) {
this.releaseDate = releaseDate;
}
@JsonProperty("revenue")
public Integer getRevenue() {
return revenue;
}
@JsonProperty("revenue")
public void setRevenue(Integer revenue) {
this.revenue = revenue;
}
@JsonProperty("runtime")
public Integer getRuntime() {
return runtime;
}
@JsonProperty("runtime")
public void setRuntime(Integer runtime) {
this.runtime = runtime;
}
@JsonProperty("spoken_languages")
public List<MovieGetSpokenLanguage> getMovieGetSpokenLanguages() {
return movieGetSpokenLanguages;
}
@JsonProperty("spoken_languages")
public void setMovieGetSpokenLanguages(List<MovieGetSpokenLanguage> movieGetSpokenLanguages) {
this.movieGetSpokenLanguages = movieGetSpokenLanguages;
}
@JsonProperty("status")
public String getStatus() {
return status;
}
@JsonProperty("status")
public void setStatus(String status) {
this.status = status;
}
@JsonProperty("tagline")
public String getTagline() {
return tagline;
}
@JsonProperty("tagline")
public void setTagline(String tagline) {
this.tagline = tagline;
}
@JsonProperty("title")
public String getTitle() {
return title;
}
@JsonProperty("title")
public void setTitle(String title) {
this.title = title;
}
@JsonProperty("video")
public Boolean getVideo() {
return video;
}
@JsonProperty("video")
public void setVideo(Boolean video) {
this.video = video;
}
@JsonProperty("vote_average")
public Double getVoteAverage() {
return voteAverage;
}
@JsonProperty("vote_average")
public void setVoteAverage(Double voteAverage) {
this.voteAverage = voteAverage;
}
@JsonProperty("vote_count")
public Integer getVoteCount() {
return voteCount;
}
@JsonProperty("vote_count")
public void setVoteCount(Integer voteCount) {
this.voteCount = voteCount;
}
@JsonProperty("external_ids")
public MovieGetExternalIds getMovieGetExternalIds() {
return movieGetExternalIds;
}
@JsonProperty("external_ids")
public void setMovieGetExternalIds(MovieGetExternalIds movieGetExternalIds) {
this.movieGetExternalIds = movieGetExternalIds;
}
@JsonAnyGetter
public Map<String, Object> getAdditionalProperties() {
return this.additionalProperties;
}
@JsonAnySetter
public void setAdditionalProperty(String name, Object value) {
this.additionalProperties.put(name, value);
}
}
//@JsonInclude(JsonInclude.Include.NON_NULL)
//@JsonPropertyOrder({
// "id",
// "name",
// "poster_path",
// "backdrop_path"
//})
//class MovieGetBelongsToCollection {
//
// @JsonProperty("id")
// private Integer id;<|fim▁hole|>// @JsonProperty("poster_path")
// private String posterPath;
// @JsonProperty("backdrop_path")
// private String backdropPath;
// @JsonIgnore
// private Map<String, Object> additionalProperties = new HashMap<String, Object>();
//
// @JsonProperty("id")
// public Integer getId() {
// return id;
// }
//
// @JsonProperty("id")
// public void setId(Integer id) {
// this.id = id;
// }
//
// @JsonProperty("name")
// public String getName() {
// return name;
// }
//
// @JsonProperty("name")
// public void setName(String name) {
// this.name = name;
// }
//
// @JsonProperty("poster_path")
// public String getPosterPath() {
// return posterPath;
// }
//
// @JsonProperty("poster_path")
// public void setPosterPath(String posterPath) {
// this.posterPath = posterPath;
// }
//
// @JsonProperty("backdrop_path")
// public String getBackdropPath() {
// return backdropPath;
// }
//
// @JsonProperty("backdrop_path")
// public void setBackdropPath(String backdropPath) {
// this.backdropPath = backdropPath;
// }
//
// @JsonAnyGetter
// public Map<String, Object> getAdditionalProperties() {
// return this.additionalProperties;
// }
//
// @JsonAnySetter
// public void setAdditionalProperty(String name, Object value) {
// this.additionalProperties.put(name, value);
// }
//
//}<|fim▁end|>
|
// @JsonProperty("name")
// private String name;
|
<|file_name|>mod.rs<|end_file_name|><|fim▁begin|>/*!
Various BWT-DC compression models
*/
use compress::bwt::dc;
use compress::entropy::ari;
use std::io;
/// A copy of `bbb` model
pub mod bbb;
/// Old Dark-0.51 model
pub mod dark;
/// Original BWT-DC compression model
pub mod exp;
/// Raw output for debugging
pub mod raw;
/// A simplest model to compare with
pub mod simple;
/// A attempt to reproduce YBS model
pub mod ybs;
/// Distance type
pub type Distance = u32;
/// Symbol type
pub type Symbol = u8;
/// Symbol encoding context //TODO
pub type SymContext = ();
/// An abstract BWT output encoding model (BWT-???-Ari)
pub trait Model<T, C> {
/// Reset current estimations
fn reset(&mut self);
/// Encode an element
fn encode<W: io::Write>(&mut self, T, &C, &mut ari::Encoder<W>) -> io::Result<()>;
/// Decode an element
fn decode<R: io::Read>(&mut self, &C, &mut ari::Decoder<R>) -> io::Result<T>;
}
/// A generic BWT-DC output coding model
pub trait DistanceModel: Model<Distance, dc::Context> {}
impl<M: Model<Distance, dc::Context>> DistanceModel for M {}
/// A generic BWT raw output coding model
pub trait RawModel: Model<Symbol, SymContext> {}
impl<M: Model<Symbol, SymContext>> RawModel for M {}
#[cfg(test)]
pub mod test {
use std::io;
use rand::{Rng, StdRng};
use compress::bwt::dc;
use compress::entropy::ari;
use super::{Distance, DistanceModel};
use super::{RawModel, Symbol, SymContext};
fn roundtrip_dc<M: DistanceModel>(m: &mut M, input: &[(Distance, dc::Context)]) {
let mut eh = ari::Encoder::new(Vec::new());
m.reset();
for &(dist, ref ctx) in input.iter() {
debug!("Encode: {}", dist);
m.encode(dist, ctx, &mut eh).unwrap();
}
let (mem, err) = eh.finish();
err.unwrap();
m.reset();
let mut dh = ari::Decoder::new(io::BufReader::new(io::Cursor::new(&mem[..])));
for &(dist, ref ctx) in input.iter() {
let d2 = m.decode(ctx, &mut dh).unwrap();
debug!("Actual: {}, Decoded: {}", dist, d2);
assert_eq!(d2, dist);
}
}
fn roundtrip_raw<M: RawModel>(mut m: M, input: &[(Symbol, SymContext)]) {
let mut eh = ari::Encoder::new(Vec::new());
m.reset();
for &(sym, ref ctx) in input.iter() {
debug!("Encode: {}", sym);
m.encode(sym, ctx, &mut eh).unwrap();
}
let (mem, err) = eh.finish();
err.unwrap();
m.reset();
let mut dh = ari::Decoder::new(io::BufReader::new(io::Cursor::new(&mem[..])));
for &(sym, ref ctx) in input.iter() {
let sym2 = m.decode(ctx, &mut dh).unwrap();
debug!("Actual: {}, Decoded: {}", sym, sym2);
assert_eq!(sym, sym2);
}
}
fn gen_data_dc(size: usize, max_dist: Distance) -> Vec<(Distance, dc::Context)> {
let mut rng = StdRng::new().unwrap();
(0..size).map(|_| {
let sym: Symbol = rng.gen();
let ctx = dc::Context::new(sym, 0, max_dist as usize);
(rng.gen_range(0, max_dist), ctx)
}).collect()
}
fn gen_data_raw(size: usize) -> Vec<(Symbol, SymContext)> {
let mut rng = StdRng::new().unwrap();
(0..size).map(|_| {
let sym: Symbol = rng.gen();
(sym, ())
}).collect()
}
fn roundtrips_dc<M: DistanceModel>(mut m: M) {
roundtrip_dc(&mut m, &[
(1, dc::Context::new(1,1,5)),
(2, dc::Context::new(2,2,5)),
(3, dc::Context::new(3,3,5)),
(4, dc::Context::new(4,4,5))
]);
roundtrip_dc(&mut m, &gen_data_dc(1000,200));
}
#[test]
fn roundtrip_bbb() {
let input = gen_data_raw(1000);
roundtrip_raw(super::bbb::Model::new(), &input);
}
#[test]
fn roundtrips_dark() {
roundtrips_dc(super::dark::Model::new());
}
#[test]
fn roundtrips_exp() {
roundtrips_dc(super::exp::Model::new());
}
#[test]<|fim▁hole|> #[test]
fn roundtrips_ybs() {
roundtrips_dc(super::ybs::Model::new());
}
}<|fim▁end|>
|
fn roundtrips_simple() {
roundtrips_dc(super::simple::Model::new());
}
|
<|file_name|>GET.java<|end_file_name|><|fim▁begin|>/*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS HEADER.
*
* Copyright (c) 2010-2013 Oracle and/or its affiliates. All rights reserved.
*
* The contents of this file are subject to the terms of either the GNU
* General Public License Version 2 only ("GPL") or the Common Development
* and Distribution License("CDDL") (collectively, the "License"). You
* may not use this file except in compliance with the License. You can
* obtain a copy of the License at
* http://glassfish.java.net/public/CDDL+GPL_1_1.html
* or packager/legal/LICENSE.txt. See the License for the specific
* language governing permissions and limitations under the License.
*
* When distributing the software, include this License Header Notice in each
* file and include the License file at packager/legal/LICENSE.txt.
*
* GPL Classpath Exception:
* Oracle designates this particular file as subject to the "Classpath"
* exception as provided by Oracle in the GPL Version 2 section of the License
* file that accompanied this code.
*
* Modifications:
* If applicable, add the following below the License Header, with the fields
* enclosed by brackets [] replaced by your own identifying information:
* "Portions Copyright [year] [name of copyright owner]"
*
* Contributor(s):
* If you wish your version of this file to be governed by only the CDDL or
* only the GPL Version 2, indicate your decision by adding "[Contributor]
* elects to include this software in this distribution under the [CDDL or GPL
* Version 2] license." If you don't indicate a single choice of license, a
* recipient has the option to distribute your version of this file under
* either the CDDL, the GPL Version 2 or to extend the choice of license to
* its licensees as provided above. However, if you add GPL Version 2 code
* and therefore, elected the GPL Version 2 license, then the option applies
* only if the new code is made subject to such option by the copyright
* holder.
*/
package co.mewf.minirs.rs;
import java.lang.annotation.Documented;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
/**
* Indicates that the annotated method responds to HTTP GET requests.<|fim▁hole|> * @since 1.0
*/
@Target({ElementType.METHOD})
@Retention(RetentionPolicy.RUNTIME)
@HttpMethod(HttpMethod.GET)
@Documented
public @interface GET {
}<|fim▁end|>
|
*
* @author Paul Sandoz
* @author Marc Hadley
* @see HttpMethod
|
<|file_name|>database.py<|end_file_name|><|fim▁begin|>import logging
import rethinkdb as r
log = logging.getLogger(__name__)
class Database():
def __init__(self, bot):
self.bot = bot
self.db_name = self.bot.config.rname
self.db = None
r.set_loop_type("asyncio")
self.ready = False
def get_db(self):
"""
Returns the RethinkDB module/instance
"""
return r
async def insert(self, table, data):
"""
Insert a document into a table
"""
log.debug(
"Saving document to table {} with data: {}".format(table, data))
return await r.table(table).insert(data, conflict="update").run(self.db)
async def delete(self, table, primary_key=None):
"""
Deletes a document(s) from a table
"""
log.debug(
"Deleting document from table {} with primary key {}".format(table, primary_key))
if primary_key is not None:
# Delete one document with the key name
return await r.table(table).get(primary_key).delete().run(self.db)
else:
# Delete all documents in the table
return await r.table(table).delete().run(self.db)
async def connect(self, host, port, user, password):
"""
Establish a database connection
"""
log.info("Connecting to database: {}".format(self.db_name))
try:
self.db = await r.connect(db=self.db_name, host=host, port=port, user=user, password=password)
except r.errors.ReqlDriverError as e:
log.error(e)
return False
<|fim▁hole|>
# Create the database if it does not exist
try:
await r.db_create(self.db_name).run(self.db)
log.info("Created database: {}".format(self.db_name))
except r.errors.ReqlOpFailedError:
log.debug(
"Database {} already exists, skipping creation".format(self.db_name))
return True
async def create_table(self, name, primary='id'):
"""
Creates a new table in the database
"""
try:
await r.table_create(name, primary_key=primary).run(self.db)
log.info("Created table: {}".format(name))
except r.errors.ReqlOpFailedError:
log.debug(
"Table {} already exists, skipping creation".format(name))<|fim▁end|>
|
info = await self.db.server()
|
<|file_name|>adminaddmemberwv.cpp<|end_file_name|><|fim▁begin|>#include "adminaddmemberwv.h"
void AdminAddMemberWV::processItems(){
const QString & type = intro->cgetType();
const QString & nick = bio->cgetField("nick");
const QString & name = bio->cgetField("name");
const QString & surname = bio->cgetField("surname");
const QString & birthDay = bio->cgetField("birthDay");
const QString & phone = bio->cgetField("phone");
const QString & eMail = bio->cgetField("eMail");
const QVector<QString> & hobbyList = hobby->cgetHobby();
hobby->clear();
const QVector<QString> & interestsList = interests->cgetInterests();
interests->clear();
const QVector<Event> & experiencesList = experiences->cgetExperiences();
experiences->clear();
emit endAdd(type,
nick,
name,
surname,
birthDay,
phone,
eMail,
hobbyList,
interestsList,
experiencesList);
}<|fim▁hole|>AdminAddMemberWV::AdminAddMemberWV(QWidget * parent)
: QWizard(parent),
intro (new AdminAMWIntro),
bio (new AdminAMWBio),
hobby (new AdminAMWHobby),
interests (new AdminAMWInterests),
experiences(new AdminAMWExperiences),
end (new AdminAMWEnd)
{
addPage(intro);
addPage(bio);
addPage(hobby);
addPage(interests);
addPage(experiences);
addPage(end);
setWindowTitle( tr("Wizard Aggiunta Iscritto") );
setFixedSize( sizeHint() );
/*
* Devo passare gli argomenti ottenuti a chi se ne occuperà
* dopo attraverso la signal
*/
connect (this,
SIGNAL (accepted()),
this,
SLOT (processItems()));
connect (this,
SIGNAL (rejected()),
this,
SIGNAL (endAdd()));
}
AdminAddMemberWV::~AdminAddMemberWV(){
delete intro;
delete bio;
delete hobby;
delete interests;
delete experiences;
delete end;
}<|fim▁end|>
| |
<|file_name|>FeaturesVectorFulltext.java<|end_file_name|><|fim▁begin|>package org.grobid.core.features;
import org.grobid.core.layout.LayoutToken;
import org.grobid.core.utilities.TextUtilities;
/**
* Class for features used for fulltext parsing.
*
*/
public class FeaturesVectorFulltext {
public LayoutToken token = null; // not a feature, reference value
public String string = null; // lexical feature
public String label = null; // label if known
public String blockStatus = null; // one of BLOCKSTART, BLOCKIN, BLOCKEND
public String lineStatus = null; // one of LINESTART, LINEIN, LINEEND
public String fontStatus = null; // one of NEWFONT, SAMEFONT
public String fontSize = null; // one of HIGHERFONT, SAMEFONTSIZE, LOWERFONT
public String alignmentStatus = null; // one of ALIGNEDLEFT, INDENTED, CENTERED - applied to the whole line
public boolean bold = false;
public boolean italic = false;
public String capitalisation = null; // one of INITCAP, ALLCAPS, NOCAPS
public String digit; // one of ALLDIGIT, CONTAINDIGIT, NODIGIT
public boolean singleChar = false;
public String punctType = null;
// one of NOPUNCT, OPENBRACKET, ENDBRACKET, DOT, COMMA, HYPHEN, QUOTE, PUNCT (default)
public int relativeDocumentPosition = -1;
public int relativePagePositionChar = -1;
public int relativePagePosition = -1;
// graphic in closed proximity of the current block<|fim▁hole|> public int closestGraphicHeight = -1;
public int closestGraphicWidth = -1;
public int closestGraphicSurface = -1;
public int spacingWithPreviousBlock = 0; // discretized
public int characterDensity = 0; // discretized
// how the reference callouts are expressed, if known
public String calloutType = null; // one of UNKNOWN, NUMBER, AUTHOR
public boolean calloutKnown = false; // true if the token match a known reference label
public boolean superscript = false;
public String printVector() {
if (string == null) return null;
if (string.length() == 0) return null;
StringBuffer res = new StringBuffer();
// token string (1)
res.append(string);
// lowercase string
res.append(" " + string.toLowerCase());
// prefix (4)
res.append(" " + TextUtilities.prefix(string, 1));
res.append(" " + TextUtilities.prefix(string, 2));
res.append(" " + TextUtilities.prefix(string, 3));
res.append(" " + TextUtilities.prefix(string, 4));
// suffix (4)
res.append(" " + TextUtilities.suffix(string, 1));
res.append(" " + TextUtilities.suffix(string, 2));
res.append(" " + TextUtilities.suffix(string, 3));
res.append(" " + TextUtilities.suffix(string, 4));
// at this stage, we have written 10 features
// block information (1)
res.append(" " + blockStatus);
// line information (1)
res.append(" " + lineStatus);
// line position/identation (1)
res.append(" " + alignmentStatus);
// font information (1)
res.append(" " + fontStatus);
// font size information (1)
res.append(" " + fontSize);
// string type information (3)
if (bold)
res.append(" 1");
else
res.append(" 0");
if (italic)
res.append(" 1");
else
res.append(" 0");
// capitalisation (1)
if (digit.equals("ALLDIGIT"))
res.append(" NOCAPS");
else
res.append(" " + capitalisation);
// digit information (1)
res.append(" " + digit);
// character information (1)
if (singleChar)
res.append(" 1");
else
res.append(" 0");
// at this stage, we have written 20 features
// punctuation information (1)
res.append(" " + punctType); // in case the token is a punctuation (NO otherwise)
// relative document position (1)
res.append(" " + relativeDocumentPosition);
// relative page position (1)
res.append(" " + relativePagePosition);
// proximity of a graphic to the current block (2)
if (bitmapAround)
res.append(" 1");
else
res.append(" 0");
/*if (vectorAround)
res.append(" 1");
else
res.append(" 0");*/
// space with previous block, discretised (1)
//res.append(" " + spacingWithPreviousBlock);
//res.append(" " + 0);
// character density of the previous block, discretised (1)
//res.append(" " + characterDensity);
//res.append(" " + 0);
// label - for training data (1)
/*if (label != null)
res.append(" " + label + "\n");
else
res.append(" 0\n");
*/
if (calloutType != null)
res.append(" " + calloutType);
else
res.append(" UNKNOWN");
if (calloutKnown)
res.append(" 1");
else
res.append(" 0");
if (superscript)
res.append(" 1");
else
res.append(" 0");
res.append("\n");
return res.toString();
}
}<|fim▁end|>
|
public boolean bitmapAround = false;
public boolean vectorAround = false;
// if a graphic is in close proximity of the current block, characteristics of this graphic
|
<|file_name|>main.rs<|end_file_name|><|fim▁begin|>use std::ops::{Add, Sub, Mul, Div};
fn do_operation<F, V>(f: F, v1: V, v2: V) -> V
where
F: Fn(V, V) -> V,
V: Add + Sub + Mul + Div {
f(v1, v2)
}
pub fn rpn(raw: &str) -> f64 {
let mut stack = Vec::new();
for c in raw.split(' ') {
if let Some(i) = c.parse::<f64>().ok() {
stack.push(i);
continue;
}
let r = stack.pop().expect("Invalid equation. No numbers left in stack for the right side of the operation");
let l = stack.pop().expect("Invalid equation. No numbers left in stack for the left side of the operation");
let result = match c {
"+" => do_operation(|l, r| l + r, l, r),
"-" => do_operation(|l, r| l - r, l, r),
"*" => do_operation(|l, r| l * r, l, r),
"/" =>
{
if r == 0.0
{
panic!("Division by zero not allowed");
}
do_operation(|l, r| l / r, l, r)
},
_ => panic!("Unknown character {:?}", c),
};
stack.push(result);
}
if stack.len() != 1
{
panic!("Invalid equation. Wrong number of elements left in stack. Expected left: 1, actual: {:?}", stack.len());
}
stack.pop().unwrap()
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn basic_integer() {
assert_eq!(0.25, rpn("14 4 6 8 + * /"));
assert_eq!(14.0, rpn("5 1 2 + 4 * + 3 -"));
assert_eq!(0.5, rpn("5 4 6 + /"));
assert_eq!(2.0, rpn("2 5 * 4 + 3 2 * 1 + /"));
}
#[test]
fn basic_floating_point() {
assert_eq!(20.04, rpn("5.5 1.3 2.3 + 4.9 * + 3.1 -"));
assert_eq!(11.25, rpn("1.5 3.0 4.5 + *"));<|fim▁hole|> #[test]
fn negative() {
assert_eq!(-2503.0, rpn("-4 -9 -33 -76 * + -"));
assert_eq!(2653660.0, rpn("-56 -34 + -54 * 43 23 54 + * -800 * -"));
}
#[test]
#[should_panic]
fn divide_by_zero() {
assert_eq!(-2503.0, rpn("2 0 /"));
}
#[test]
#[should_panic]
fn invalid_input_1() {
rpn("");
}
#[test]
#[should_panic]
fn invalid_input_2() {
rpn("14 4 6 8 + . /");
}
#[test]
#[should_panic]
fn invalid_input_3() {
rpn("POTATO");
}
#[test]
#[should_panic]
fn invalid_input_4() {
rpn("54 4 6 O + \\ /");
}
#[test]
fn long_equation() {
let mut eq = "2 ".to_string();
for _ in 0..2000000 {
eq.push_str("2 + ");
}
eq.push_str("1 +");
assert_eq!(4000003.0, rpn(&eq));
}
}<|fim▁end|>
|
}
|
<|file_name|>main.py<|end_file_name|><|fim▁begin|>import sys, Tkinter, tkFont, ttk
sys.path.insert(0, "./src/")
import button, database
from config import *
# Note: need to set size for bg_canvas here; otherwise it will grow disregard the size set while created!
def AuxscrollFunction(event):
bg_canvas.configure(scrollregion=bg_canvas.bbox("all"), height=THUMB_HEIGHT)
# create root
root = Tkinter.Tk()
root.geometry(str(WINDOW_WIDTH)+"x"+str(WINDOW_HEIGHT)+"+100+100")
root.minsize(width=WINDOW_WIDTH, height=WINDOW_HEIGHT)
root.title("Find Duplicated Photos")
Tkinter.Grid.columnconfigure(root, 0, weight=0)
Tkinter.Grid.columnconfigure(root, 1, weight=0)
Tkinter.Grid.columnconfigure(root, 2, weight=int(DISPLAY_WIDTH/INFO_WIDTH))
Tkinter.Grid.columnconfigure(root, 3, weight=0)
Tkinter.Grid.rowconfigure(root, 0, weight=int(DISPLAY_HEIGHT/THUMB_HEIGHT))
Tkinter.Grid.rowconfigure(root, 1, weight=0)
Tkinter.Grid.rowconfigure(root, 2, weight=0)
# create frame for displaying selected photo
display_photo_frame = Tkinter.Frame(root, height=DISPLAY_HEIGHT, width=DISPLAY_WIDTH)
display_photo_frame.grid(row=0, column=0, columnspan=3)
# create frame for displaying file info
display_photo_info_frame = Tkinter.Frame(root, height=DISPLAY_HEIGHT, width=INFO_WIDTH, background="white")
display_photo_info_frame.grid(row=0, column=3, sticky=Tkinter.E+Tkinter.W+Tkinter.N+Tkinter.S)
display_photo_info_frame.pack_propagate(False) # by default the frame will shrink to whatever is inside of it
# create background for scroll bar
bg_frame = Tkinter.Frame(root, height=THUMB_HEIGHT)
bg_frame.grid(row=1, column=0, columnspan=4, sticky=Tkinter.E+Tkinter.W+Tkinter.N+Tkinter.S)
bg_canvas = Tkinter.Canvas(bg_frame, background='white')
xscrollbar = Tkinter.Scrollbar(bg_frame, orient="horizontal", command=bg_canvas.xview)
xscrollbar.pack(side=Tkinter.BOTTOM, fill="x")
xscrollbar.grid_forget()
bg_canvas.configure(xscrollcommand=xscrollbar.set)
bg_canvas.pack(fill=Tkinter.BOTH, expand=True, pady=5)
# create frame for duplicated photo batch display
batch_photo_frame = Tkinter.Frame(bg_canvas, height=THUMB_HEIGHT, background='white')
bg_canvas.create_window((0,0),window=batch_photo_frame,anchor='nw')<|fim▁hole|># create photo database and loading progress bar
progress_bar = ttk.Progressbar(root, orient=Tkinter.HORIZONTAL, length=PROGRESS_BAR_LENGTH, mode='determinate')
progress_bar.grid(row=2, column=2, columnspan=2, sticky=Tkinter.E+Tkinter.W, padx=10)
db = database.Database(progress_bar)
# create buttons
#button_cfg = button.ConfigButton(root, db, 2, 3)
button_next = button.NextBatchButton(root, batch_photo_frame, display_photo_frame, display_photo_info_frame, db, 2, 1)
button_open = button.OpenFolderButton(root, batch_photo_frame, db, button_next, 2, 0)
root.mainloop()<|fim▁end|>
|
batch_photo_frame.bind("<Configure>", AuxscrollFunction)
# Note: don't pack batch_photo_frame here, otherwise scroll bar won't show!!!
|
<|file_name|>gauge.rs<|end_file_name|><|fim▁begin|>use std::f64;
use std::mem;
use std::sync::atomic::{AtomicU64, Ordering};
#[derive(Debug)]
pub struct Gauge {
value: AtomicU64,
}
impl Gauge {
pub fn new() -> Gauge {
let bits = unsafe { mem::transmute(f64::NAN) };
Gauge { value: AtomicU64::new(bits) }
}
pub fn clear(&mut self) {
let bits = unsafe { mem::transmute(f64::NAN) };
self.value.store(bits, Ordering::Relaxed);
}
pub fn set(&mut self, value: f64) {
let bits = unsafe { mem::transmute(value) };
self.value.store(bits, Ordering::Relaxed);
}
pub fn snapshot(&self) -> f64 {
let bits = self.value.load(Ordering::Relaxed);
unsafe { mem::transmute(bits) }
}
}
#[cfg(test)]
mod test {
use super::*;
use std::f64;
#[test]
fn snapshot() {
let mut c: Gauge = Gauge::new();
let s1 = c.snapshot();
c.set(1f64);
let s2 = c.snapshot();
assert!(f64::is_nan(s1));
assert!(s2 == 1f64);<|fim▁hole|> }
}<|fim▁end|>
| |
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|># Copyright (c) 2015 Ultimaker B.V.<|fim▁hole|>from UM.i18n import i18nCatalog
catalog = i18nCatalog("cura")
def getMetaData():
return {
"plugin": {
"name": catalog.i18nc("@label", "Cura Profile Reader"),
"author": "Ultimaker",
"version": "1.0",
"description": catalog.i18nc("@info:whatsthis", "Provides support for importing Cura profiles."),
"api": 3
},
"profile_reader": [
{
"extension": "curaprofile",
"description": catalog.i18nc("@item:inlistbox", "Cura Profile")
}
]
}
def register(app):
return { "profile_reader": CuraProfileReader.CuraProfileReader() }<|fim▁end|>
|
# Cura is released under the terms of the AGPLv3 or higher.
from . import CuraProfileReader
|
<|file_name|>extern-fail.rs<|end_file_name|><|fim▁begin|>// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//<|fim▁hole|>// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// error-pattern:explicit failure
// Testing that runtime failure doesn't cause callbacks to abort abnormally.
// Instead the failure will be delivered after the callbacks return.
use core::old_iter;
mod rustrt {
pub extern {
pub fn rust_dbg_call(cb: *u8, data: libc::uintptr_t)
-> libc::uintptr_t;
}
}
extern fn cb(data: libc::uintptr_t) -> libc::uintptr_t {
if data == 1u {
data
} else {
count(data - 1u) + count(data - 1u)
}
}
fn count(n: uint) -> uint {
unsafe {
task::yield();
rustrt::rust_dbg_call(cb, n)
}
}
fn main() {
for old_iter::repeat(10u) {
do task::spawn {
let result = count(5u);
debug!("result = %?", result);
fail!();
};
}
}<|fim▁end|>
| |
<|file_name|>Owner_0001.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
'''
Printmodel django module for condominium
@author: Laurent GAY
@organization: sd-libre.fr
@contact: [email protected]
@copyright: 2016 sd-libre.fr
@license: This file is part of Lucterios.
Lucterios is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Lucterios is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Lucterios. If not, see <http://www.gnu.org/licenses/>.
'''
from __future__ import unicode_literals
from django.utils.translation import ugettext_lazy as _
from diacamma.condominium.models import Owner
name = _("owner")
kind = 2
modelname = Owner.get_long_name()
value = """
<model hmargin="10.0" vmargin="10.0" page_width="210.0" page_height="297.0">
<header extent="25.0">
<text height="10.0" width="120.0" top="0.0" left="70.0" padding="1.0" spacing="0.0" border_color="black" border_style="" border_width="0.2" text_align="center" line_height="20" font_family="sans-serif" font_weight="" font_size="20">
{[b]}#OUR_DETAIL.name{[/b]}
</text>
<text height="10.0" width="120.0" top="10.0" left="70.0" padding="1.0" spacing="0.0" border_color="black" border_style="" border_width="0.2" text_align="center" line_height="8" font_family="sans-serif" font_weight="" font_size="8">
{[italic]}
#OUR_DETAIL.address - #OUR_DETAIL.postal_code #OUR_DETAIL.city - #OUR_DETAIL.tel1 #OUR_DETAIL.tel2 #OUR_DETAIL.email{[br/]}#OUR_DETAIL.identify_number
{[/italic]}
</text>
<image height="25.0" width="30.0" top="0.0" left="10.0" padding="1.0" spacing="0.0" border_color="black" border_style="" border_width="0.2">
#OUR_DETAIL.image
</image>
</header>
<bottom extent="10.0">
</bottom>
<body>
<text height="8.0" width="190.0" top="0.0" left="0.0" padding="1.0" spacing="0.0" border_color="black" border_style="" border_width="0.2" text_align="center" line_height="15" font_family="sans-serif" font_weight="" font_size="15">
{[b]}%(title)s{[/b]}
</text>
<text height="8.0" width="190.0" top="8.0" left="0.0" padding="1.0" spacing="0.0" border_color="black" border_style="" border_width="0.2" text_align="center" line_height="13" font_family="sans-serif" font_weight="" font_size="13">
#date_begin - #date_end
</text>
<text height="20.0" width="100.0" top="25.0" left="80.0" padding="1.0" spacing="0.0" border_color="black" border_style="" border_width="0.2" text_align="center" line_height="11" font_family="sans-serif" font_weight="" font_size="11">
{[b]}#third.contact.str{[/b]}{[br/]}#third.contact.address{[br/]}#third.contact.postal_code #third.contact.city
</text>
<text height="20.0" width="100.0" top="25.0" left="0.0" padding="1.0" spacing="0.0" border_color="black" border_style="" border_width="0.2" text_align="center" line_height="11" font_family="sans-serif" font_weight="" font_size="11">
{[b]}%(info)s{[/b]}: #information
</text>
<text height="10.0" width="75.0" top="45.0" left="00.0" padding="1.0" spacing="0.0" border_color="black" border_style="" border_width="0.2" text_align="left" line_height="11" font_family="sans-serif" font_weight="" font_size="11">
{[b]}%(current)s{[/b]}
</text>
<table height="40.0" width="75.0" top="55.0" left="00.0" padding="1.0" spacing="0.0" border_color="black" border_style="" border_width="0.2">
<columns width="50.0" display_align="center" border_color="black" border_style="solid" border_width="0.2" text_align="right" line_height="10" font_family="sans-serif" font_weight="" font_size="9">
</columns>
<columns width="25.0" display_align="center" border_color="black" border_style="solid" border_width="0.2" text_align="right" line_height="10" font_family="sans-serif" font_weight="" font_size="9">
{[i]}%(value)s{[/i]}
</columns>
<rows>
<cell border_color="black" border_style="" border_width="0.2" text_align="right" line_height="11" font_family="sans-serif" font_weight="" font_size="10">
{[b]}%(total_current_initial)s{[/b]}
</cell>
<cell border_color="black" border_style="" border_width="0.2" text_align="right" line_height="11" font_family="sans-serif" font_weight="" font_size="10">
#total_current_initial
</cell>
</rows>
<rows>
<cell border_color="black" border_style="" border_width="0.2" text_align="right" line_height="11" font_family="sans-serif" font_weight="" font_size="10">
{[b]}%(total_current_call)s{[/b]}
</cell>
<cell border_color="black" border_style="" border_width="0.2" text_align="right" line_height="11" font_family="sans-serif" font_weight="" font_size="10">
#total_current_call{[br/]}
</cell>
</rows>
<rows>
<cell border_color="black" border_style="" border_width="0.2" text_align="right" line_height="11" font_family="sans-serif" font_weight="" font_size="10">
{[b]}%(total_current_payoff)s{[/b]}
</cell>
<cell border_color="black" border_style="" border_width="0.2" text_align="right" line_height="11" font_family="sans-serif" font_weight="" font_size="10">
#total_current_payoff
</cell>
</rows>
<rows>
<cell border_color="black" border_style="" border_width="0.2" text_align="right" line_height="11" font_family="sans-serif" font_weight="" font_size="10">
{[b]}%(total_current_owner)s{[/b]}
</cell>
<cell border_color="black" border_style="" border_width="0.2" text_align="right" line_height="11" font_family="sans-serif" font_weight="" font_size="10">
#total_current_owner
</cell>
</rows>
<rows>
<cell border_color="black" border_style="" border_width="0.2" text_align="right" line_height="11" font_family="sans-serif" font_weight="" font_size="10">
{[b]}%(total_current_regularization)s{[/b]}
</cell>
<cell border_color="black" border_style="" border_width="0.2" text_align="right" line_height="11" font_family="sans-serif" font_weight="" font_size="10">
#total_current_regularization
</cell>
</rows>
<rows>
<cell border_color="black" border_style="" border_width="0.2" text_align="right" line_height="11" font_family="sans-serif" font_weight="" font_size="10">
{[b]}%(total_current_ventilated)s{[/b]}
</cell>
<cell border_color="black" border_style="" border_width="0.2" text_align="right" line_height="11" font_family="sans-serif" font_weight="" font_size="10">
#total_current_ventilated
</cell>
</rows>
<rows>
<cell border_color="black" border_style="" border_width="0.2" text_align="right" line_height="11" font_family="sans-serif" font_weight="" font_size="10">
{[b]}%(total_recoverable_load)s{[/b]}
</cell>
<cell border_color="black" border_style="" border_width="0.2" text_align="right" line_height="11" font_family="sans-serif" font_weight="" font_size="10">
#total_recoverable_load
</cell>
</rows>
</table>
<text height="10.0" width="75.0" top="45.0" left="100.0" padding="1.0" spacing="0.0" border_color="black" border_style="" border_width="0.2" text_align="left" line_height="11" font_family="sans-serif" font_weight="" font_size="11">
{[b]}%(exceptional)s{[/b]}
</text>
<table height="40.0" width="75.0" top="55.0" left="100.0" padding="1.0" spacing="0.0" border_color="black" border_style="" border_width="0.2">
<columns width="50.0" display_align="center" border_color="black" border_style="solid" border_width="0.2" text_align="right" line_height="10" font_family="sans-serif" font_weight="" font_size="9">
</columns>
<columns width="25.0" display_align="center" border_color="black" border_style="solid" border_width="0.2" text_align="right" line_height="10" font_family="sans-serif" font_weight="" font_size="9">
{[i]}%(value)s{[/i]}
</columns>
<rows>
<cell border_color="black" border_style="" border_width="0.2" text_align="right" line_height="11" font_family="sans-serif" font_weight="" font_size="10">
{[b]}%(total_exceptional_initial)s{[/b]}
</cell>
<cell border_color="black" border_style="" border_width="0.2" text_align="right" line_height="11" font_family="sans-serif" font_weight="" font_size="10">
#total_exceptional_initial
</cell>
</rows>
<rows>
<cell border_color="black" border_style="" border_width="0.2" text_align="right" line_height="11" font_family="sans-serif" font_weight="" font_size="10">
{[b]}%(total_exceptional_call)s{[/b]}
</cell>
<cell border_color="black" border_style="" border_width="0.2" text_align="right" line_height="11" font_family="sans-serif" font_weight="" font_size="10">
#total_exceptional_call{[br/]}
</cell>
</rows>
<rows>
<cell border_color="black" border_style="" border_width="0.2" text_align="right" line_height="11" font_family="sans-serif" font_weight="" font_size="10">
{[b]}%(total_exceptional_payoff)s{[/b]}
</cell>
<cell border_color="black" border_style="" border_width="0.2" text_align="right" line_height="11" font_family="sans-serif" font_weight="" font_size="10">
#total_exceptional_payoff
</cell>
</rows>
<rows>
<cell border_color="black" border_style="" border_width="0.2" text_align="right" line_height="11" font_family="sans-serif" font_weight="" font_size="10">
{[b]}%(total_exceptional_owner)s{[/b]}
</cell>
<cell border_color="black" border_style="" border_width="0.2" text_align="right" line_height="11" font_family="sans-serif" font_weight="" font_size="10">
#total_exceptional_owner
</cell>
</rows>
</table>
<table height="40.0" width="76.0" top="100.0" left="100.0" padding="1.0" spacing="0.0" border_color="black" border_style="" border_width="0.2">
<columns width="30.0" display_align="center" border_color="black" border_style="solid" border_width="0.2" text_align="center" line_height="10" font_family="sans-serif" font_weight="" font_size="9">
{[b]}%(set)s{[/b]}
</columns>
<columns width="12.0" display_align="center" border_color="black" border_style="solid" border_width="0.2" text_align="center" line_height="10" font_family="sans-serif" font_weight="" font_size="9">
{[b]}%(ratio)s{[/b]}
</columns>
<columns width="17.0" display_align="center" border_color="black" border_style="solid" border_width="0.2" text_align="center" line_height="10" font_family="sans-serif" font_weight="" font_size="9">
{[b]}%(total_callfunds)s{[/b]}
</columns>
<columns width="17.0" display_align="center" border_color="black" border_style="solid" border_width="0.2" text_align="center" line_height="10" font_family="sans-serif" font_weight="" font_size="9">
{[b]}%(ventilated)s{[/b]}
</columns>
<rows data="exceptionnal_set">
<cell display_align="center" border_color="black" border_style="solid" border_width="0.2" text_align="start" line_height="7" font_family="sans-serif" font_weight="" font_size="7">
#set
</cell>
<cell display_align="center" border_color="black" border_style="solid" border_width="0.2" text_align="start" line_height="7" font_family="sans-serif" font_weight="" font_size="7">
#ratio
</cell>
<cell display_align="center" border_color="black" border_style="solid" border_width="0.2" text_align="start" line_height="7" font_family="sans-serif" font_weight="" font_size="7">
#total_callfunds
</cell>
<cell display_align="center" border_color="black" border_style="solid" border_width="0.2" text_align="start" line_height="7" font_family="sans-serif" font_weight="" font_size="7">
#ventilated_txt
</cell>
</rows>
</table>
<text height="10.0" width="70.0" top="120.0" left="120.0" padding="1.0" spacing="0.0" border_color="black" border_style="" border_width="0.2" text_align="left" line_height="11" font_family="sans-serif" font_weight="" font_size="11">
{[b]}%(property)s{[/b]}
</text>
<table height="30.0" width="70.0" top="130.0" left="120.0" padding="1.0" spacing="0.0" border_color="black" border_style="" border_width="0.2">
<columns width="10.0" display_align="center" border_color="black" border_style="solid" border_width="0.2" text_align="center" line_height="10" font_family="sans-serif" font_weight="" font_size="9">
{[b]}%(num)s{[/b]}
</columns>
<columns width="15.0" display_align="center" border_color="black" border_style="solid" border_width="0.2" text_align="center" line_height="10" font_family="sans-serif" font_weight="" font_size="9">
{[b]}%(value)s{[/b]}
</columns>
<columns width="12.0" display_align="center" border_color="black" border_style="solid" border_width="0.2" text_align="center" line_height="10" font_family="sans-serif" font_weight="" font_size="9">
{[b]}%(ratio)s{[/b]}
</columns>
<columns width="33.0" display_align="center" border_color="black" border_style="solid" border_width="0.2" text_align="center" line_height="10" font_family="sans-serif" font_weight="" font_size="9">
{[b]}%(description)s{[/b]}
</columns>
<rows data="propertylot_set">
<cell display_align="center" border_color="black" border_style="solid" border_width="0.2" text_align="start" line_height="7" font_family="sans-serif" font_weight="" font_size="7">
#num
</cell>
<cell display_align="center" border_color="black" border_style="solid" border_width="0.2" text_align="start" line_height="7" font_family="sans-serif" font_weight="" font_size="7">
#value
</cell>
<cell display_align="center" border_color="black" border_style="solid" border_width="0.2" text_align="start" line_height="7" font_family="sans-serif" font_weight="" font_size="7">
#ratio
</cell>
<cell display_align="center" border_color="black" border_style="solid" border_width="0.2" text_align="start" line_height="7" font_family="sans-serif" font_weight="" font_size="7">
#description
</cell>
</rows>
</table>
<text height="10.0" width="110.0" top="120.0" left="00.0" padding="1.0" spacing="0.0" border_color="black" border_style="" border_width="0.2" text_align="left" line_height="11" font_family="sans-serif" font_weight="" font_size="11">
{[b]}%(partition)s{[/b]}
</text>
<table height="50.0" width="110.0" top="130.0" left="00.0" padding="1.0" spacing="0.0" border_color="black" border_style="" border_width="0.2">
<columns width="30.0" display_align="center" border_color="black" border_style="solid" border_width="0.2" text_align="center" line_height="10" font_family="sans-serif" font_weight="" font_size="9">
{[b]}%(set)s{[/b]}
</columns>
<columns width="15.0" display_align="center" border_color="black" border_style="solid" border_width="0.2" text_align="center" line_height="9" font_family="sans-serif" font_weight="" font_size="8">
{[b]}%(budget)s{[/b]}
</columns>
<columns width="15.0" display_align="center" border_color="black" border_style="solid" border_width="0.2" text_align="center" line_height="9" font_family="sans-serif" font_weight="" font_size="8">
{[b]}%(expense)s{[/b]}
</columns>
<columns width="15.0" display_align="center" border_color="black" border_style="solid" border_width="0.2" text_align="center" line_height="9" font_family="sans-serif" font_weight="" font_size="8">
{[b]}%(value)s{[/b]}
</columns>
<columns width="12.0" display_align="center" border_color="black" border_style="solid" border_width="0.2" text_align="center" line_height="9" font_family="sans-serif" font_weight="" font_size="8">
{[b]}%(ratio)s{[/b]}
</columns>
<columns width="15.0" display_align="center" border_color="black" border_style="solid" border_width="0.2" text_align="center" line_height="9" font_family="sans-serif" font_weight="" font_size="8">
{[b]}%(ventilated)s{[/b]}
</columns>
<columns width="15.0" display_align="center" border_color="black" border_style="solid" border_width="0.2" text_align="center" line_height="9" font_family="sans-serif" font_weight="" font_size="8">
{[b]}%(recover_load)s{[/b]}
</columns>
<rows data="partition_set">
<cell display_align="center" border_color="black" border_style="solid" border_width="0.2" text_align="start" line_height="7" font_family="sans-serif" font_weight="" font_size="7">
#set
</cell>
<cell display_align="center" border_color="black" border_style="solid" border_width="0.2" text_align="start" line_height="7" font_family="sans-serif" font_weight="" font_size="7">
#set.budget_txt
</cell>
<cell display_align="center" border_color="black" border_style="solid" border_width="0.2" text_align="start" line_height="7" font_family="sans-serif" font_weight="" font_size="7">
#set.sumexpense_txt
</cell>
<cell display_align="center" border_color="black" border_style="solid" border_width="0.2" text_align="start" line_height="7" font_family="sans-serif" font_weight="" font_size="7">
#value
</cell>
<cell display_align="center" border_color="black" border_style="solid" border_width="0.2" text_align="start" line_height="7" font_family="sans-serif" font_weight="" font_size="7">
#ratio
</cell>
<cell display_align="center" border_color="black" border_style="solid" border_width="0.2" text_align="start" line_height="7" font_family="sans-serif" font_weight="" font_size="7">
#ventilated_txt
</cell>
<cell display_align="center" border_color="black" border_style="solid" border_width="0.2" text_align="start" line_height="7" font_family="sans-serif" font_weight="" font_size="7">
#recovery_load_txt
</cell>
</rows>
</table>
<text height="10.0" width="175.0" top="150.0" left="10.0" padding="0.5" spacing="1.0" border_color="black" border_style="" border_width="0.2" text_align="left" line_height="11" font_family="sans-serif" font_weight="" font_size="11">
{[b]}%(call of funds)s{[/b]}
</text>
<table height="20.0" width="175.0" top="160.0" left="10.0" padding="1.0" spacing="0.1" border_color="black" border_style="" border_width="0.2">
<columns width="10.0" display_align="center" border_color="black" border_style="solid" border_width="0.2" text_align="center" line_height="10" font_family="sans-serif" font_weight="" font_size="9">
{[b]}%(num)s{[/b]}
</columns>
<columns width="25.0" display_align="center" border_color="black" border_style="solid" border_width="0.2" text_align="center" line_height="10" font_family="sans-serif" font_weight="" font_size="9">
{[b]}%(date)s{[/b]}
</columns>
<columns width="90.0" display_align="center" border_color="black" border_style="solid" border_width="0.2" text_align="center" line_height="10" font_family="sans-serif" font_weight="" font_size="9">
{[b]}%(comment)s{[/b]}
</columns>
<columns width="25.0" display_align="center" border_color="black" border_style="solid" border_width="0.2" text_align="center" line_height="10" font_family="sans-serif" font_weight="" font_size="9">
{[b]}%(total)s{[/b]}
</columns>
<columns width="25.0" display_align="center" border_color="black" border_style="solid" border_width="0.2" text_align="center" line_height="10" font_family="sans-serif" font_weight="" font_size="9">
{[b]}%(rest_to_pay)s{[/b]}
</columns>
<rows data="callfunds_set">
<cell display_align="center" border_color="black" border_style="solid" border_width="0.2" text_align="start" line_height="7" font_family="sans-serif" font_weight="" font_size="7">
#num
</cell>
<cell display_align="center" border_color="black" border_style="solid" border_width="0.2" text_align="start" line_height="7" font_family="sans-serif" font_weight="" font_size="7">
#date
</cell>
<cell display_align="center" border_color="black" border_style="solid" border_width="0.2" text_align="start" line_height="7" font_family="sans-serif" font_weight="" font_size="7">
#comment
</cell>
<cell display_align="center" border_color="black" border_style="solid" border_width="0.2" text_align="start" line_height="7" font_family="sans-serif" font_weight="" font_size="7">
#total
</cell>
<cell display_align="center" border_color="black" border_style="solid" border_width="0.2" text_align="start" line_height="7" font_family="sans-serif" font_weight="" font_size="7">
#supporting.total_rest_topay
</cell>
</rows>
</table>
<text height="10.0" width="150.0" top="180.0" left="20.0" padding="1.0" spacing="1.0" border_color="black" border_style="" border_width="0.2" text_align="left" line_height="11" font_family="sans-serif" font_weight="" font_size="11">
{[b]}%(payments)s{[/b]}
</text>
<table height="20.0" width="150.0" top="190.0" left="20.0" padding="1.0" spacing="0.1" border_color="black" border_style="" border_width="0.2">
<columns width="25.0" display_align="center" border_color="black" border_style="solid" border_width="0.2" text_align="center" line_height="10" font_family="sans-serif" font_weight="" font_size="9">
{[b]}%(date)s{[/b]}
</columns>
<columns width="20.0" display_align="center" border_color="black" border_style="solid" border_width="0.2" text_align="center" line_height="10" font_family="sans-serif" font_weight="" font_size="9">
{[b]}%(amount)s{[/b]}
</columns>
<columns width="20.0" display_align="center" border_color="black" border_style="solid" border_width="0.2" text_align="center" line_height="10" font_family="sans-serif" font_weight="" font_size="9">
{[b]}%(mode)s{[/b]}
</columns>
<columns width="25.0" display_align="center" border_color="black" border_style="solid" border_width="0.2" text_align="center" line_height="10" font_family="sans-serif" font_weight="" font_size="9">
{[b]}%(bank_account)s{[/b]}
</columns>
<columns width="30.0" display_align="center" border_color="black" border_style="solid" border_width="0.2" text_align="center" line_height="10" font_family="sans-serif" font_weight="" font_size="9">
{[b]}%(reference)s{[/b]}
</columns>
<columns width="30.0" display_align="center" border_color="black" border_style="solid" border_width="0.2" text_align="center" line_height="10" font_family="sans-serif" font_weight="" font_size="9">
{[b]}%(assignment)s{[/b]}
</columns>
<rows data="payments_set">
<cell display_align="center" border_color="black" border_style="solid" border_width="0.2" text_align="start" line_height="7" font_family="sans-serif" font_weight="" font_size="7">
#date
</cell>
<cell display_align="center" border_color="black" border_style="solid" border_width="0.2" text_align="start" line_height="7" font_family="sans-serif" font_weight="" font_size="7">
#amount
</cell>
<cell display_align="center" border_color="black" border_style="solid" border_width="0.2" text_align="start" line_height="7" font_family="sans-serif" font_weight="" font_size="7">
#mode
</cell>
<cell display_align="center" border_color="black" border_style="solid" border_width="0.2" text_align="start" line_height="7" font_family="sans-serif" font_weight="" font_size="7">
#bank_account
</cell>
<cell display_align="center" border_color="black" border_style="solid" border_width="0.2" text_align="start" line_height="7" font_family="sans-serif" font_weight="" font_size="7">
#reference
</cell>
<cell display_align="center" border_color="black" border_style="solid" border_width="0.2" text_align="start" line_height="7" font_family="sans-serif" font_weight="" font_size="7">
#assignment
</cell>
</rows>
</table>
<text height="10.0" width="150.0" top="210.0" left="20.0" padding="1.0" spacing="1.0" border_color="black" border_style="" border_width="0.2" text_align="left" line_height="11" font_family="sans-serif" font_weight="" font_size="11">
{[b]}%(payoffs)s{[/b]}
</text>
<table height="20.0" width="150.0" top="220.0" left="20.0" padding="1.0" spacing="0.1" border_color="black" border_style="" border_width="0.2">
<columns width="25.0" display_align="center" border_color="black" border_style="solid" border_width="0.2" text_align="center" line_height="10" font_family="sans-serif" font_weight="" font_size="9">
{[b]}%(date)s{[/b]}
</columns>
<columns width="20.0" display_align="center" border_color="black" border_style="solid" border_width="0.2" text_align="center" line_height="10" font_family="sans-serif" font_weight="" font_size="9">
{[b]}%(amount)s{[/b]}
</columns>
<columns width="30.0" display_align="center" border_color="black" border_style="solid" border_width="0.2" text_align="center" line_height="10" font_family="sans-serif" font_weight="" font_size="9">
{[b]}%(payer)s{[/b]}
</columns>
<columns width="20.0" display_align="center" border_color="black" border_style="solid" border_width="0.2" text_align="center" line_height="10" font_family="sans-serif" font_weight="" font_size="9">
{[b]}%(mode)s{[/b]}
</columns>
<columns width="25.0" display_align="center" border_color="black" border_style="solid" border_width="0.2" text_align="center" line_height="10" font_family="sans-serif" font_weight="" font_size="9">
{[b]}%(bank_account)s{[/b]}
</columns>
<columns width="30.0" display_align="center" border_color="black" border_style="solid" border_width="0.2" text_align="center" line_height="10" font_family="sans-serif" font_weight="" font_size="9">
{[b]}%(reference)s{[/b]}
</columns>
<rows data="payoff_set">
<cell display_align="center" border_color="black" border_style="solid" border_width="0.2" text_align="start" line_height="7" font_family="sans-serif" font_weight="" font_size="7">
#date
</cell>
<cell display_align="center" border_color="black" border_style="solid" border_width="0.2" text_align="start" line_height="7" font_family="sans-serif" font_weight="" font_size="7">
#value
</cell>
<cell display_align="center" border_color="black" border_style="solid" border_width="0.2" text_align="start" line_height="7" font_family="sans-serif" font_weight="" font_size="7">
#payer
</cell>
<cell display_align="center" border_color="black" border_style="solid" border_width="0.2" text_align="start" line_height="7" font_family="sans-serif" font_weight="" font_size="7">
#mode
</cell><|fim▁hole|> <cell display_align="center" border_color="black" border_style="solid" border_width="0.2" text_align="start" line_height="7" font_family="sans-serif" font_weight="" font_size="7">
#reference
</cell>
</rows>
</table>
</body>
</model>""" % {'title': _('Owner situation'), 'info': _('information'), 'call of funds': _('call of funds'), 'num': _('numeros'), 'date': _('date'), 'comment': _('comment'), 'total': _('total'),
'exceptional': _('exceptional'), 'current': _('current'),
'partition': _('partition'), 'set': _('set'), 'budget': _('budget'), 'expense': _('expense'), 'value': _('tantime'), 'ratio': _('ratio'),
'ventilated': _('ventilated'), 'recover_load': _('recover. load'), 'total_callfunds': _('total call for funds'), 'rest_to_pay': _('rest to pay'),
'property': _('property lot'), 'num': _('numeros'), 'value': _('tantime'), 'ratio': _("ratio"), 'description': _('description'),
'payments': _('payments'), 'assignment': _('assignment'),
'payoffs': _('additional payoffs'), 'amount': _('amount'), 'payer': _('payer'), 'mode': _('mode'), 'bank_account': _('bank account'), 'reference': _('reference'),
'total_current_initial': _('current initial state'),
'total_current_call': _('current total call for funds'),
'total_current_payoff': _('current total payoff'),
'total_current_regularization': _('estimated regularization'),
'total_current_ventilated': _('current total ventilated'),
'total_recoverable_load': _('total recoverable load'),
'total_current_owner': _('current total owner'),
'total_exceptional_initial': _('exceptional initial state'),
'total_exceptional_call': _('exceptional total call for funds'),
'total_exceptional_payoff': _('exceptional total payoff'),
'total_exceptional_owner': _('exceptional total owner'),
}<|fim▁end|>
|
<cell display_align="center" border_color="black" border_style="solid" border_width="0.2" text_align="start" line_height="7" font_family="sans-serif" font_weight="" font_size="7">
#bank_account
</cell>
|
<|file_name|>index.js<|end_file_name|><|fim▁begin|>/* jslint node: true */
'use strict';
var express = require('express'),
sections = require('./sections'),
http = require('http'),
expressLess = require('express-less'),
path = require('path');
/**
* Create server
*/
var app = express();
/**
* Configuration
*/
// all environments
app.set('port', process.env.PORT || 4000);
app.set('views', __dirname + '/sections');
app.set('view engine', 'jade');<|fim▁hole|>app.use(express.bodyParser());
app.use('/css', expressLess(__dirname + '/sections/_default/less'));
app.use(express.static(path.join(__dirname, 'public')));
app.use('/vendor', express.static(__dirname + '/bower_components'));
app.use(app.router);
/**
* Routes
*/
/*
* Start Server
*/
var server = http.createServer( app ),
io = require('socket.io')( server ),
params = {
server: app,
io: io
};
// Add the routes from the sections
sections( params );
// serve index and view partials
app.get('/', function (req, res) {
res.render('_default/index');
});
app.get(/\/html\/([\w\/]+)\.html/, function (req, res) {
var name = req.params[0];
res.render(name);
});
server.listen(app.get('port'), function () {
console.log('Express app listening on port ' + app.get('port'));
});<|fim▁end|>
|
app.use(express.compress());
app.use(express.methodOverride());
|
<|file_name|>list.py<|end_file_name|><|fim▁begin|>from __future__ import absolute_import
import json
import logging
from pip._vendor import six
from pip._vendor.six.moves import zip_longest
from pip._internal.cli import cmdoptions
from pip._internal.cli.base_command import Command
from pip._internal.exceptions import CommandError
from pip._internal.index import PackageFinder
from pip._internal.utils.misc import (
dist_is_editable, get_installed_distributions,
)
from pip._internal.utils.packaging import get_installer
logger = logging.getLogger(__name__)
class ListCommand(Command):
"""
List installed packages, including editables.
Packages are listed in a case-insensitive sorted order.
"""
name = 'list'
usage = """
%prog [options]"""
summary = 'List installed packages.'
def __init__(self, *args, **kw):
super(ListCommand, self).__init__(*args, **kw)
cmd_opts = self.cmd_opts
cmd_opts.add_option(
'-o', '--outdated',
action='store_true',
default=False,
help='List outdated packages')
cmd_opts.add_option(
'-u', '--uptodate',
action='store_true',
default=False,
help='List uptodate packages')
cmd_opts.add_option(
'-e', '--editable',
action='store_true',
default=False,
help='List editable projects.')
cmd_opts.add_option(
'-l', '--local',
action='store_true',
default=False,
help=('If in a virtualenv that has global access, do not list '
'globally-installed packages.'),
)
self.cmd_opts.add_option(
'--user',
dest='user',
action='store_true',
default=False,
help='Only output packages installed in user-site.')
cmd_opts.add_option(
'--pre',
action='store_true',
default=False,
help=("Include pre-release and development versions. By default, "
"pip only finds stable versions."),
)
cmd_opts.add_option(
'--format',
action='store',
dest='list_format',
default="columns",
choices=('columns', 'freeze', 'json'),
help="Select the output format among: columns (default), freeze, "
"or json",
)
cmd_opts.add_option(
'--not-required',
action='store_true',
dest='not_required',
help="List packages that are not dependencies of "
"installed packages.",
)
cmd_opts.add_option(
'--exclude-editable',
action='store_false',
dest='include_editable',
help='Exclude editable package from output.',
)
cmd_opts.add_option(
'--include-editable',
action='store_true',
dest='include_editable',
help='Include editable package from output.',<|fim▁hole|> cmdoptions.index_group, self.parser
)
self.parser.insert_option_group(0, index_opts)
self.parser.insert_option_group(0, cmd_opts)
def _build_package_finder(self, options, index_urls, session):
"""
Create a package finder appropriate to this list command.
"""
return PackageFinder(
find_links=options.find_links,
index_urls=index_urls,
allow_all_prereleases=options.pre,
trusted_hosts=options.trusted_hosts,
session=session,
)
def run(self, options, args):
if options.outdated and options.uptodate:
raise CommandError(
"Options --outdated and --uptodate cannot be combined.")
packages = get_installed_distributions(
local_only=options.local,
user_only=options.user,
editables_only=options.editable,
include_editables=options.include_editable,
)
# get_not_required must be called firstly in order to find and
# filter out all dependencies correctly. Otherwise a package
# can't be identified as requirement because some parent packages
# could be filtered out before.
if options.not_required:
packages = self.get_not_required(packages, options)
if options.outdated:
packages = self.get_outdated(packages, options)
elif options.uptodate:
packages = self.get_uptodate(packages, options)
self.output_package_listing(packages, options)
def get_outdated(self, packages, options):
return [
dist for dist in self.iter_packages_latest_infos(packages, options)
if dist.latest_version > dist.parsed_version
]
def get_uptodate(self, packages, options):
return [
dist for dist in self.iter_packages_latest_infos(packages, options)
if dist.latest_version == dist.parsed_version
]
def get_not_required(self, packages, options):
dep_keys = set()
for dist in packages:
dep_keys.update(requirement.key for requirement in dist.requires())
return {pkg for pkg in packages if pkg.key not in dep_keys}
def iter_packages_latest_infos(self, packages, options):
index_urls = [options.index_url] + options.extra_index_urls
if options.no_index:
logger.debug('Ignoring indexes: %s', ','.join(index_urls))
index_urls = []
with self._build_session(options) as session:
finder = self._build_package_finder(options, index_urls, session)
for dist in packages:
typ = 'unknown'
all_candidates = finder.find_all_candidates(dist.key)
if not options.pre:
# Remove prereleases
all_candidates = [candidate for candidate in all_candidates
if not candidate.version.is_prerelease]
if not all_candidates:
continue
best_candidate = max(all_candidates,
key=finder._candidate_sort_key)
remote_version = best_candidate.version
if best_candidate.location.is_wheel:
typ = 'wheel'
else:
typ = 'sdist'
# This is dirty but makes the rest of the code much cleaner
dist.latest_version = remote_version
dist.latest_filetype = typ
yield dist
def output_package_listing(self, packages, options):
packages = sorted(
packages,
key=lambda dist: dist.project_name.lower(),
)
if options.list_format == 'columns' and packages:
data, header = format_for_columns(packages, options)
self.output_package_listing_columns(data, header)
elif options.list_format == 'freeze':
for dist in packages:
if options.verbose >= 1:
logger.info("%s==%s (%s)", dist.project_name,
dist.version, dist.location)
else:
logger.info("%s==%s", dist.project_name, dist.version)
elif options.list_format == 'json':
logger.info(format_for_json(packages, options))
def output_package_listing_columns(self, data, header):
# insert the header first: we need to know the size of column names
if len(data) > 0:
data.insert(0, header)
pkg_strings, sizes = tabulate(data)
# Create and add a separator.
if len(data) > 0:
pkg_strings.insert(1, " ".join(map(lambda x: '-' * x, sizes)))
for val in pkg_strings:
logger.info(val)
def tabulate(vals):
# From pfmoore on GitHub:
# https://github.com/pypa/pip/issues/3651#issuecomment-216932564
assert len(vals) > 0
sizes = [0] * max(len(x) for x in vals)
for row in vals:
sizes = [max(s, len(str(c))) for s, c in zip_longest(sizes, row)]
result = []
for row in vals:
display = " ".join([str(c).ljust(s) if c is not None else ''
for s, c in zip_longest(sizes, row)])
result.append(display)
return result, sizes
def format_for_columns(pkgs, options):
"""
Convert the package data into something usable
by output_package_listing_columns.
"""
running_outdated = options.outdated
# Adjust the header for the `pip list --outdated` case.
if running_outdated:
header = ["Package", "Version", "Latest", "Type"]
else:
header = ["Package", "Version"]
data = []
if options.verbose >= 1 or any(dist_is_editable(x) for x in pkgs):
header.append("Location")
if options.verbose >= 1:
header.append("Installer")
for proj in pkgs:
# if we're working on the 'outdated' list, separate out the
# latest_version and type
row = [proj.project_name, proj.version]
if running_outdated:
row.append(proj.latest_version)
row.append(proj.latest_filetype)
if options.verbose >= 1 or dist_is_editable(proj):
row.append(proj.location)
if options.verbose >= 1:
row.append(get_installer(proj))
data.append(row)
return data, header
def format_for_json(packages, options):
data = []
for dist in packages:
info = {
'name': dist.project_name,
'version': six.text_type(dist.version),
}
if options.verbose >= 1:
info['location'] = dist.location
info['installer'] = get_installer(dist)
if options.outdated:
info['latest_version'] = six.text_type(dist.latest_version)
info['latest_filetype'] = dist.latest_filetype
data.append(info)
return json.dumps(data)<|fim▁end|>
|
default=True,
)
index_opts = cmdoptions.make_option_group(
|
<|file_name|>FigureDetector.java<|end_file_name|><|fim▁begin|>package yuka.detectors;
import yuka.containers.News;
import javax.imageio.ImageIO;
import java.awt.image.BufferedImage;
import java.io.IOException;
import java.io.InputStream;
import java.net.URL;
import java.net.URLConnection;
import java.util.HashMap;
import java.util.Map;
/**
* Created by imyuka on 18/08/2016.
*/
public class FigureDetector {
private Map<String, Integer> cmap;
private Map<String, Integer> pmap;
private Map<String, Integer> dim;
//private int height = 0;
//private int height = 0;
public FigureDetector(Map<String, String> figure) {
String url = figure.get(News.IMAGE_URL);
dim = getDimension(url);
String caption = figure.get(News.IMAGE_CAPTION);
cmap = ClubDetector.count(caption);
PlayerDetector pd = new PlayerDetector(cmap.keySet());
pmap = pd.count(caption);
}
public int getHeight() {
return dim.get("height");
}
public int getWidth() {
return dim.get("width");
}
public double getPercentage (double totalHeight) {
return (double) getWidth() / HeightDetector.COLUMN_WIDTH
* (getHeight() / totalHeight);
}
public Map<String, Integer> getClubMap() {
return cmap;
}
public Map<String, Integer> getPlayerMap() {
return pmap;
}
public static Map<String, Integer> getDimension (String source) {
//int[] dim = new int[]{0,0};
Map<String, Integer> dim = new HashMap<>();
dim.put("width", 0);
dim.put("height", 0);
try {
URL url = new URL(source);
URLConnection conn = url.openConnection();
// now you get the content length
/////int contentLength = conn.getContentLength();
// you can check size here using contentLength
InputStream in = conn.getInputStream();
BufferedImage image = ImageIO.read(in);
// you can get size dimesion
//int width = image.getWidth();<|fim▁hole|> dim.put("width", image.getWidth());
dim.put("height", image.getHeight());
} catch (IOException e) {
System.out.println();
}
return dim;
}
}<|fim▁end|>
|
//int height = image.getHeight();
|
<|file_name|>place.search.unit.ts<|end_file_name|><|fim▁begin|>import { assert } from "chai";
import * as helper from "./helper/index";
import { getConfig } from "../src/config/config";
const { defaults } = getConfig();
const searchDefaults = defaults.placesSearch;
const { Place } = helper;
describe("Place Model", () => {
before(async function () {
this.timeout(0);
await helper.clearPostcodeDb();
await helper.seedPostcodeDb();
});
after(async () => helper.clearPostcodeDb);
const searchMethods = [
{
name: "Prefix Search",
fn: Place.prefixSearch,
},
{
name: "Terms Search",
fn: Place.termsSearch,
},
];
const testQueries = ["taobh a chaolais", "llwyn y groes", "corston"];
searchMethods.forEach((method) => {
const fn = method.fn;
describe(`${method.name}`, () => {
testQueries.forEach((testQuery) => {
it(`finds exact matches on query: ${testQuery}`, async () => {
const results = await fn({ name: testQuery });
assert.equal(results.length, 1);
results.forEach(helper.isRawPlaceObject);
assert.equal(results[0].name_1_search, testQuery);
});
});
});
});
describe("_prefixSearch", () => {
const testQueries = ["be", "s", "br"];<|fim▁hole|> assert(results.length > 0);
results.forEach(helper.isRawPlaceObject);
results.forEach((result) => {
assert(result.name_1_search.startsWith(query));
});
});
});
});
describe("_termsSearch", () => {
it("matches prepositions like 'of'", async () => {
const results = await Place.termsSearch({ name: "of" });
assert(results.length > 0);
results.forEach(helper.isRawPlaceObject);
results.forEach((result) => {
assert(result.name_1_search.includes("of"));
});
});
});
describe("#search", () => {
it("returns a list of places for given search term", async () => {
const results = await Place.search({ name: "b" });
results.forEach(helper.isRawPlaceObject);
});
it("returns null if no query", async () => {
const results = await Place.search({});
assert.isNull(results);
});
it("is sensitive to limit", async () => {
const results = await Place.search({
name: "b",
limit: 1,
});
assert.equal(results.length, 1);
results.forEach(helper.isRawPlaceObject);
});
it("returns up to 10 results by default", async () => {
const results = await Place.search({ name: "b" });
assert.equal(results.length, 10);
results.forEach(helper.isRawPlaceObject);
});
it("sets limit to default maximum if it's greater than it", async () => {
const searchDefaultMax = searchDefaults.limit.MAX;
searchDefaults.limit.MAX = 5;
const results = await Place.search({
name: "b",
limit: 1000,
});
assert.equal(results.length, 5);
results.forEach(helper.isRawPlaceObject);
searchDefaults.limit.MAX = searchDefaultMax;
});
it("uses default limit if invalid limit supplied", async () => {
const results = await Place.search({
name: "b",
limit: -1,
});
assert.equal(results.length, 10);
results.forEach(helper.isRawPlaceObject);
});
it("searches with name_2", async () => {
const name = "East Kilbride";
const results = await Place.search({ name: name });
assert.equal(results.length, 1);
results.forEach(helper.isRawPlaceObject);
assert.equal(results[0].name_2, name);
});
describe("result specs", () => {
it("returns names with apostrophes", async () => {
const name = "Taobh a' Chaolais";
const results = await Place.search({
name: name.replace(/'/g, ""),
});
assert.equal(results.length, 1);
results.forEach(helper.isRawPlaceObject);
assert.equal(results[0].name_1, name);
});
it("returns names with non-ascii characters", async () => {
const name = "Mynydd-llêch";
const results = await Place.search({
name: name.replace("ê", "e"),
});
assert.equal(results.length, 1);
results.forEach(helper.isRawPlaceObject);
assert.equal(results[0].name_1, name);
});
it("returns names with hyphens", async () => {
const name = "Llwyn-y-groes";
const results = await Place.search({
name: name.replace(/-/g, " "),
});
assert.equal(results.length, 1);
results.forEach(helper.isRawPlaceObject);
assert.equal(results[0].name_1, name);
});
it("successfully matches where query is middle word", async () => {
const query = "of";
const results = await Place.search({
name: query,
});
assert(results.length > 0);
results.forEach(helper.isRawPlaceObject);
results.forEach((result) => {
assert(result.name_1_search.includes(query));
});
});
it("returns null if both prefix and terms search fail", async () => {
const query = "this is never gonna get matched";
const results = await Place.search({
name: query,
});
assert.isNull(results);
});
});
describe("query specs", () => {
it("is case insensitive", async () => {
const name = "Corston";
const results = await Place.search({
name: name.toUpperCase(),
});
assert.equal(results.length, 1);
results.forEach(helper.isRawPlaceObject);
assert.equal(results[0].name_1, name);
});
it("handles apostrophes", async () => {
const name = "Taobh a' Chaolais";
const results = await Place.search({
name: name,
});
assert.equal(results.length, 1);
results.forEach(helper.isRawPlaceObject);
assert.equal(results[0].name_1, name);
});
it("handles non-ascii characters", async () => {
const name = "Mynydd-llêch";
const results = await Place.search({ name: name });
assert.equal(results.length, 1);
results.forEach(helper.isRawPlaceObject);
assert.equal(results[0].name_1, name);
});
it("handles non-ascii character prefix searches", async () => {
const prefix = "Mynydd-llêc";
const name = "Mynydd-llêch";
const results = await Place.search({ name: prefix });
assert.equal(results.length, 1);
results.forEach(helper.isRawPlaceObject);
assert.equal(results[0].name_1, name);
});
it("handles hyphens as spaces", async () => {
const name = "Llwyn-y-groes";
const results = await Place.search({ name: name });
assert.equal(results.length, 1);
results.forEach(helper.isRawPlaceObject);
assert.equal(results[0].name_1, name);
});
});
});
});<|fim▁end|>
|
testQueries.forEach((query) => {
it(`finds incomplete words. like '${query}'`, async () => {
const results = await Place.prefixSearch({ name: query });
|
<|file_name|>dm.py<|end_file_name|><|fim▁begin|># ============================================================================
'''
This file is part of the lenstractor project.
Copyright 2012 David W. Hogg (NYU) and Phil Marshall (Oxford).
Description
-----------
General-purpose data management classes and functions:
* Order a pile of FITS files into scifiles and matching varfiles
* Read in a deck of postcard images in FITS files and return an
array of tractor image data structures.
'''
import numpy as np
import os,glob,string,pyfits,subprocess
from astrometry.util import util
import tractor
import lenstractor
# ============================================================================
# Parse filenames for sci and wht images:
def Riffle(filenames,vb=False):
if vb: print "Looking at",len(filenames),"files: ",filenames
# Break down file names. Naming convention: fruit_flavor.fits
fruits = []
flavors = []
for filename in set(filenames):
pieces = string.split(filename,'_')
fruits.append(string.join(pieces[0:-1],'_'))
flavors.append(string.split(pieces[-1],'.')[0])
if len(set(flavors)) > 2:
raise ValueError("ERROR: expecting 1 or 2 flavors of datafile, got more")
elif len(set(flavors)) == 0:
raise ValueError("ERROR: expecting 1 or 2 flavors of datafile, got none")
if 'sci' not in set(flavors):
raise ValueError("ERROR: expecting at least some files to be xxx_sci.fits")
if len(set(flavors)) == 1:
whttype = 'No-one_will_ever_choose_this_flavor'
else:
for x in (set(flavors) - set(['sci'])):
whttype = x
number = len(set(fruits))
scifiles = []
whtfiles = []
for fruit in set(fruits):
x = fruit+'_sci.fits'
if os.path.exists(x):
scifiles.append(x)
else:
scifiles.append(None)
x = fruit+'_'+whttype+'.fits'
if os.path.exists(x):
whtfiles.append(x)
else:
whtfiles.append(None)
if vb:
print "Riffled files into",number,"pair(s)"
if len(set(flavors)) == 1:
print "Only 1 flavor of file found, sci"
else:
print "2 flavors of file found: sci and",whttype
for i in range(number):
print " ",i+1,"th pair:",[scifiles[i],whtfiles[i]]
return scifiles,whtfiles
# ============================================================================
# Read in data and organise into Tractor Image objects.
# Some of this is survey specific: subroutines to be stored in $survey.py.
def Deal(scifiles,varfiles,SURVEY='PS1',vb=False):
images = []
bands = []
epochs = []
centroids = []
total_mags = []
for scifile,varfile in zip(scifiles,varfiles):
name = scifile.replace('_sci.fits','')
if vb:
print " "
print "Making Tractor image from "+name+"_*.fits:"
# Read in sci and wht images. Note assumptions about file format:
sci,invvar,hdr,total_flux = Read_in_data(scifile,varfile,SURVEY=SURVEY,vb=vb)
if total_flux == 0.0:
print "No flux found in image from "+scifile
print "Skipping to next image!"
continue
# Initialize a PSF object (single Gaussian by default), first
# getting FWHM from somewhere. Start with FWHM a little small,
# then refine it:
if SURVEY=='PS1':
try:
FWHM = lenstractor.PS1_IQ(hdr)
except:
FWHM = 1.4
elif SURVEY=='KIDS':
FWHM = lenstractor.KIDS_IQ(hdr)
elif SURVEY=='SDSS':
try:
FWHM = lenstractor.SDSS_IQ(hdr)
except:
FWHM = 'NaN'
if FWHM == 'NaN':
print "Problem with initialising PSF for SDSS, using (1.4,0.4) default"
FWHM = 1.4/0.4
else:
raise ValueError('Unrecognised survey name '+SURVEY)
if vb: print " PSF FWHM =",FWHM,"pixels"
# MAGIC shrinkage factor:
shrink = 0.8
psf = Initial_PSF(shrink*FWHM)
if vb: print psf
# Now get the photometric calibration from the image header.
if SURVEY=='PS1':
try:
band,photocal = lenstractor.PS1_photocal(hdr)
except:
band,photocal = lenstractor.SDSS_photocal(hdr)
elif SURVEY=='KIDS':
band,photocal = lenstractor.KIDS_photocal(hdr)
elif SURVEY=='SDSS':
band,photocal = lenstractor.SDSS_photocal(hdr)
else:
print "Unrecognised survey name "+SURVEY+", assuming SDSS"
band,photocal = lenstractor.SDSS_photocal(hdr)
if vb: print photocal
bands.append(band)
if SURVEY=='PS1':
try:
epochs.append(lenstractor.PS1_epoch(hdr))
except:
epochs.append(lenstractor.SDSS_epoch(hdr))
elif SURVEY=='KIDS':
epochs.append(lenstractor.KIDS_epoch(hdr))
elif SURVEY=='SDSS':
epochs.append(lenstractor.SDSS_epoch(hdr))
# Use photocal to return a total magnitude:
total_mag = photocal.countsToMag(total_flux)
if vb: print "Total brightness of image (mag):",total_mag
total_mags.append(total_mag)
# Set up sky to be varied:
median = np.median(sci[invvar > 0])
sky = tractor.ConstantSky(median)
delta = 0.1*np.sqrt(1.0/np.sum(invvar))
assert delta > 0
sky.stepsize = delta
if vb: print sky
# Get WCS from FITS header:
if SURVEY=='PS1':
try:
wcs = lenstractor.PS1WCS(hdr)
except:
wcs = lenstractor.SDSSWCS(hdr)
elif SURVEY=='KIDS':
wcs = lenstractor.KIDSWCS(hdr)
else:
try:
wcs = lenstractor.SDSSWCS(hdr)
except:
wcs = lenstractor.SDSSWCS(hdr)
# if vb:
# print wcs
# Compute flux-weighted centroid, in world coordinates:
NX,NY = sci.shape
x = np.outer(np.ones(NX),np.linspace(0,NY-1,NY))
y = np.outer(np.linspace(0,NX-1,NX),np.ones(NY))
x0 = np.sum(sci*x)/np.sum(sci)
y0 = np.sum(sci*y)/np.sum(sci)
# BUG: this returns pretty much the image center, not
# the object center... Need a better object finder!
radec = wcs.pixelToPosition(x0,y0)
centroids.append(radec)
print "Flux centroid: ",radec
# Make a tractor Image object out of all this stuff, and add it to the array:
images.append(tractor.Image(data=sci, invvar=invvar, name=name,
psf=psf, wcs=wcs, sky=sky, photocal=photocal))
# Figure out the unique band names and epochs:
uniqbands = np.unique(np.array(bands))
if vb:
print " "
print "Read in",len(images),"image datasets"
print " in",len(uniqbands),"bands:",uniqbands
print " at",len(epochs),"epochs"
print " "
return images,centroids,np.array(total_mags),np.array(bands)
# ============================================================================
# Read in sci and wht images. Note assumptions about file format:
def Read_in_data(scifile,varfile,SURVEY='PS1',vb=False):
hdulist = pyfits.open(scifile)
sci = hdulist[0].data
hdr = hdulist[0].header
hdulist.close()
NX,NY = sci.shape
if (varfile is not None):
hdulist = pyfits.open(varfile)
var = hdulist[0].data
hdulist.close()<|fim▁hole|> # Get the flux-to-count conversion factor from header, at least in SDSS:
try:
tmpsurvey = hdr['ORIGIN']
if (tmpsurvey == 'SDSS'):
tempmtoc = hdr['NMGY']
else:
tempmtoc = 1.
except:
tempmtoc = 1.
background, diffimage = background/tempmtoc, diffimage/tempmtoc # units: counts
variance = np.median(diffimage*diffimage) # sky count variance
var = diffimage + variance # variance in the whole image number-counts
# Go again in fluxes
var = (tempmtoc**2)*var
# Ensure positivity:
var[var <= 0] = variance*(tempmtoc**2)
# Check image sizes...
assert sci.shape == var.shape
if SURVEY == 'KIDS':
# Var image is actually already an inverse variance image!
invvar = var.copy()
var = 1.0/invvar
else:
# Convert var to wht, and find median uncertainty as well:
# Regardless of maggy-count conversion, start again here:
invvar = 1.0/var
# Assign zero weight to var=nan, var<=0:
invvar[var != var] = 0.0
invvar[var <= 0] = 0.0
bad = np.where(invvar == 0)
# Zero out sci image where wht is 0.0:
sci[bad] = 0.0
assert(all(np.isfinite(sci.ravel())))
assert(all(np.isfinite(invvar.ravel())))
# Measure total flux in sci image:
# total_flux = np.sum(sci)
# background-subtracted
background = np.median(sci)
diffimage = sci - background
total_flux = np.sum(diffimage)
# Report on progress so far:
if vb:
print 'Science image:', sci.shape #, sci
print 'Total flux:', total_flux
print 'Variance image:', var.shape #, var
if total_flux != 0.0:
# Very rough estimates of background level and rms, never used:
good = np.where(invvar > 0)
sciback = np.median(sci[good])
scirms = np.sqrt(np.median(var[good]))
if vb:
print 'Useful variance range:', var[good].min(), var[good].max()
print 'Useful image median level:', sciback
print 'Useful image median pixel uncertainty:', scirms
return sci,invvar,hdr,total_flux
# ============================================================================
# Initialize a PSF object - by default, a single circularly symmetric Gaussian
# defined on same grid as sci image:
def Initial_PSF(FWHM,double=False):
# NB. FWHM of PSF is given in pixels.
if not double:
# Single Gaussian default:
w = np.array([1.0]) # amplitude at peak
mu = np.array([[0.0,0.0]]) # centroid position in pixels
var = (FWHM/2.35)**2.0
cov = np.array([[[var,0.0],[0.0,var]]]) # pixels^2, covariance matrix
else:
# Double Gaussian alternative:
w = np.array([1.0,1.0])
mu = np.array([[0.0,0.0],[0.0,0.0]])
var = (FWHM/2.35)**2.0
cov = np.array([[[1.0,0.0],[0.0,1.0]],[[var,0.0],[0.0,var]]])
return tractor.GaussianMixturePSF(w,mu,cov)
# ============================================================================
# Compute suitable mean centroid and magnitudes in each band:
def Turnover(allbands,allmagnitudes,allcentroids,vb=False):
# Models need good initial fluxes to avoid wasting time getting these
# right. Take a quick look at the data to do this:
# 1) Get rough idea of object position from wcs of first image - works
# OK if all images are the same size and well registered, and the
# target is in the center of the field...
ra, dec = 0.0, 0.0
for radec in allcentroids:
ra += radec.ra
dec += radec.dec
ra, dec = ra/len(allcentroids), dec/len(allcentroids)
centroid = tractor.RaDecPos(ra,dec)
print "Mean flux-weighted centroid: ",centroid
# 2) Get rough idea of total object magnitudes from median of images
# in each filter. (Models have non-variable flux, by assumption!)
bandnames = np.unique(allbands)
magnitudes = np.zeros(len(bandnames))
for i,bandname in enumerate(bandnames):
index = np.where(allbands == bandname)
magnitudes[i] = np.median(allmagnitudes[index])
SED = tractor.Mags(order=bandnames, **dict(zip(bandnames,magnitudes)))
if vb: print "Mean SED: ",SED
return centroid,SED
# ============================================================================
if __name__ == '__main__':
if True:
# Basic test on lenstractor examples dir:
folder = os.environ['LENSTRACTOR_DIR']+'/examples'
inputfiles = glob.glob(os.path.join(folder,'*.fits'))
scifiles,varfiles = riffle(inputfiles)<|fim▁end|>
|
else:
# Make a var image from the sci image...
background = np.median(sci)
diffimage = sci - background
|
<|file_name|>assignmentClientPlayer.js<|end_file_name|><|fim▁begin|>"use strict";
//
// Bot Player
// assignmentClientPlayer.js
// Created by Milad Nazeri on 2019-06-06
// Copyright 2019 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
(function() {
// *************************************
// START UTILITY FUNCTIONS
// *************************************
// #region UTILITY FUNCTIONS
// Keep trying to see if the manager is available before registering
var MANAGER_CHECK_RETRY_MS = 2000;
function searchForManager() {
if (manager) {
Messages.sendMessage(ASSIGNMENT_MANAGER_CHANNEL, JSON.stringify({
action: "REGISTER_ME",
uuid: scriptUUID
}));
return;
} else {
Messages.sendMessage(ASSIGNMENT_MANAGER_CHANNEL, JSON.stringify({
action: "ARE_YOU_THERE_MANAGER_ITS_ME_BOT"
}));
}
Script.setTimeout(function() {
searchForManager();
}, MANAGER_CHECK_RETRY_MS);
}
// #endregion
// *************************************
// END UTILITY FUNCTIONS
// *************************************
// *************************************
// START CONSTS_AND_VARS
// *************************************
// #region CONSTS_AND_VARS
var ASSIGNMENT_MANAGER_CHANNEL = "ASSIGNMENT_MANAGER_CHANNEL";
var scriptUUID;
var player;
var manager;
// #endregion
// *************************************
// END CONSTS_AND_VARS
// *************************************
// *************************************
// START PLAYER
// *************************************
// #region PLAYER
// Player Class for the hfr recordings
function Player() {
this.isPlayingRecording = false;
this.recordingFilename = "";
}
// Play the bot
function play(fileToPlay) {
console.log("play playing " + JSON.stringify(fileToPlay));
this.recordingFilename = fileToPlay;
var _this = this;
Recording.loadRecording(fileToPlay, function(success, url) {
if (success) {
_this.isPlayingRecording = true;
Users.disableIgnoreRadius();
Agent.isAvatar = true;
Recording.setPlayFromCurrentLocation(false);
Recording.setPlayerUseDisplayName(true);
Recording.setPlayerUseHeadModel(false);
Recording.setPlayerUseAttachments(true);
Recording.setPlayerLoop(true);
Recording.setPlayerUseSkeletonModel(true);
Recording.setPlayerTime(0.0);
Recording.setPlayerVolume(0.5);
Recording.startPlaying();
} else {
console.log("Could not load recording " + fileToPlay);
_this.isPlayingRecording = false;
_this.recordingFilename = "";
// This should remove the avatars however they are coming back in as white spheres at the origin
// Agent.isAvatar = false;
}
});
}
// Stop the bot and remove
// #NOTE: Adding and removing the agent is currently causing issues. Using a work around for the meantime
function stop() {
console.log("Stop playing " + this.recordingFilename);
if (Recording.isPlaying()) {
Recording.stopPlaying();
// This looks like it's a platform bug that this can't be removed
// Agent.isAvatar = false;
}
this.isPlayingRecording = false;
}
// Check if the bot is playing
function isPlaying() {
return this.isPlayingRecording;
}
Player.prototype = {
play: play,
stop: stop,
isPlaying: isPlaying
};
// #endregion
// *************************************
// END PLAYER
// *************************************
// *************************************
// START MESSAGES
// *************************************
// #region MESSAGES
// Handle messages fromt he manager
var IGNORE_PLAYER_MESSAGES = ["REGISTER_ME", "ARE_YOU_THERE_MANAGER"];
var playInterval = null;
function onMessageReceived(channel, message, sender) {
if (channel !== ASSIGNMENT_MANAGER_CHANNEL ||
sender === scriptUUID ||
IGNORE_PLAYER_MESSAGES.indexOf(message.action) > -1) {
return;
}
try {
message = JSON.parse(message);
} catch (e) {
console.log("Can not parse message object");
console.log(e);
return;
}
if (message.uuid !== scriptUUID) {
return;
}
switch (message.action){
case "PLAY":
if (!player.isPlaying()) {
player.play(message.fileToPlay);
} else {
console.log("Didn't start playing " + message.fileToPlay + " because already playing ");
}
break;
case "STOP":
if (player.isPlaying()) {
player.stop();
}
break;
case "REGISTER_MANAGER":
manager = true;
break;
default:
console.log("unrecongized action in assignmentClientPlayer.js");
break;
}
}
// #endregion
// *************************************
// END MESSAGES
// *************************************
// *************************************
// START MAIN
// *************************************
// #region MAIN
// Main function to run when player comes online
function startUp() {
scriptUUID = Agent.sessionUUID;
player = new Player();
Messages.messageReceived.connect(onMessageReceived);
Messages.subscribe(ASSIGNMENT_MANAGER_CHANNEL);
searchForManager();
Script.scriptEnding.connect(onEnding);
}
startUp();
// #endregion
// *************************************
// END MAIN
// *************************************
<|fim▁hole|> // START CLEANUP
// *************************************
// #region CLEANUP
function onEnding() {
player.stop();
Messages.messageReceived.disconnect(onMessageReceived);
Messages.unsubscribe(ASSIGNMENT_MANAGER_CHANNEL);
if (playInterval) {
Script.clearInterval(playInterval);
playInterval = null;
}
}
// #endregion
// *************************************
// END CLEANUP
// *************************************
})();<|fim▁end|>
|
// *************************************
|
<|file_name|>BindParameterMapperManagerTest.java<|end_file_name|><|fim▁begin|>package jp.co.future.uroborosql.parameter.mapper;
import static org.hamcrest.MatcherAssert.*;
import static org.hamcrest.Matchers.*;
import java.lang.reflect.Method;
import java.lang.reflect.Proxy;
import java.math.BigDecimal;
import java.sql.Connection;
import java.sql.Timestamp;
import java.text.ParseException;
import java.time.Clock;
import java.time.LocalDate;
import java.time.Month;
import java.time.ZoneId;
import java.util.Date;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import jp.co.future.uroborosql.parameter.mapper.legacy.DateToStringParameterMapper;
public class BindParameterMapperManagerTest {
private Clock clock;
@BeforeEach
public void setUp() {
this.clock = Clock.systemDefaultZone();
}
@Test
public void test() throws ParseException {
var parameterMapperManager = new BindParameterMapperManager(this.clock);
assertThat(parameterMapperManager.toJdbc(null, null), is(nullValue()));
assertThat(parameterMapperManager.toJdbc(true, null), is(true));
assertThat(parameterMapperManager.toJdbc((byte) 1, null), is((byte) 1));
assertThat(parameterMapperManager.toJdbc((short) 1, null), is((short) 1));
assertThat(parameterMapperManager.toJdbc(1, null), is(1));
assertThat(parameterMapperManager.toJdbc(1L, null), is(1L));
assertThat(parameterMapperManager.toJdbc(1F, null), is(1F));
assertThat(parameterMapperManager.toJdbc(1D, null), is(1D));
assertThat(parameterMapperManager.toJdbc(BigDecimal.TEN, null), is(BigDecimal.TEN));
assertThat(parameterMapperManager.toJdbc("A", null), is("A"));
assertThat(parameterMapperManager.toJdbc(new byte[] { 1, 2 }, null), is(new byte[] { 1, 2 }));
assertThat(parameterMapperManager.toJdbc(new java.sql.Date(1), null), is(new java.sql.Date(1)));
assertThat(parameterMapperManager.toJdbc(new java.sql.Time(1), null), is(new java.sql.Time(1)));
assertThat(parameterMapperManager.toJdbc(new java.sql.Timestamp(1), null), is(new java.sql.Timestamp(1)));
var array = newProxy(java.sql.Array.class);
assertThat(parameterMapperManager.toJdbc(array, null), is(array));
var ref = newProxy(java.sql.Ref.class);
assertThat(parameterMapperManager.toJdbc(ref, null), is(ref));
var blob = newProxy(java.sql.Blob.class);
assertThat(parameterMapperManager.toJdbc(blob, null), is(blob));
var clob = newProxy(java.sql.Clob.class);
assertThat(parameterMapperManager.toJdbc(clob, null), is(clob));
var sqlxml = newProxy(java.sql.SQLXML.class);
assertThat(parameterMapperManager.toJdbc(sqlxml, null), is(sqlxml));
var struct = newProxy(java.sql.Struct.class);
assertThat(parameterMapperManager.toJdbc(struct, null), is(struct));
var object = new Object();
assertThat(parameterMapperManager.toJdbc(object, null), is(object));
var date = Date.from(LocalDate.parse("2000-01-01").atStartOfDay(ZoneId.systemDefault()).toInstant());
assertThat(parameterMapperManager.toJdbc(date, null), is(new java.sql.Timestamp(date.getTime())));
assertThat(parameterMapperManager.toJdbc(Month.APRIL, null), is(4));<|fim▁hole|> @Test
public void testWithCustom() throws ParseException {
var original = new BindParameterMapperManager(this.clock);
original.addMapper(new EmptyStringToNullParameterMapper());
var mapper = new DateToStringParameterMapper();
original.addMapper(mapper);
var parameterMapperManager = new BindParameterMapperManager(original, this.clock);
var date = Date.from(LocalDate.parse("2000-01-01").atStartOfDay(this.clock.getZone()).toInstant());
assertThat(parameterMapperManager.toJdbc(date, null), is("20000101"));
assertThat(parameterMapperManager.canAcceptByStandard(date), is(true));
parameterMapperManager.removeMapper(mapper);
assertThat(parameterMapperManager.toJdbc(date, null), is(instanceOf(Timestamp.class)));
}
@Test
public void testCustom() {
var parameterMapperManager = new BindParameterMapperManager(this.clock);
parameterMapperManager.addMapper(new BindParameterMapper<String>() {
@Override
public Class<String> targetType() {
return String.class;
}
@Override
public Object toJdbc(final String original, final Connection connection,
final BindParameterMapperManager parameterMapperManager) {
return original.toLowerCase();
}
});
assertThat(parameterMapperManager.toJdbc("S", null), is("s"));
assertThat(parameterMapperManager.toJdbc(true, null), is(true));
}
@Test
public void testCustomWithClock() {
var parameterMapperManager = new BindParameterMapperManager(this.clock);
parameterMapperManager.addMapper(new BindParameterMapperWithClock<String>() {
private Clock clock;
@Override
public Class<String> targetType() {
return String.class;
}
@Override
public Object toJdbc(final String original, final Connection connection,
final BindParameterMapperManager parameterMapperManager) {
return original.toLowerCase();
}
@Override
public Clock getClock() {
return this.clock;
}
@Override
public void setClock(final Clock clock) {
this.clock = clock;
}
});
assertThat(parameterMapperManager.toJdbc("S", null), is("s"));
assertThat(parameterMapperManager.toJdbc(true, null), is(true));
}
interface ProxyContainer {
Object getOriginal();
}
@SuppressWarnings("unchecked")
private static <I> I newProxy(final Class<I> interfaceType) {
var o = new Object();
Method getOriginal;
try {
getOriginal = ProxyContainer.class.getMethod("getOriginal");
} catch (NoSuchMethodException | SecurityException e) {
throw new AssertionError(e);
}
var proxyInstance = (I) Proxy.newProxyInstance(Thread.currentThread().getContextClassLoader(), new Class<?>[] {
interfaceType, ProxyContainer.class }, (proxy, method, args) -> {
if (getOriginal.equals(method)) {
return o;
}
for (int i = 0; i < args.length; i++) {
if (args[i] instanceof ProxyContainer) {
args[i] = ((ProxyContainer) args[i]).getOriginal();
}
}
return method.invoke(o, args);
});
return proxyInstance;
}
}<|fim▁end|>
|
}
|
<|file_name|>run.py<|end_file_name|><|fim▁begin|>from mainapp import create_app<|fim▁hole|> app.run(host='0.0.0.0')<|fim▁end|>
|
app = create_app()
if __name__ == '__main__':
|
<|file_name|>0006_chg_field_feed_title.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Feed.title'
db.alter_column('feedmanager_feed', 'title', self.gf('django.db.models.fields.TextField')())
def backwards(self, orm):
# Changing field 'Feed.title'
db.alter_column('feedmanager_feed', 'title', self.gf('django.db.models.fields.CharField')(max_length=70))
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'feedmanager.feed': {
'Meta': {'object_name': 'Feed'},
'added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_checked': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.TextField', [], {}),
'url': ('django.db.models.fields.TextField', [], {}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.User']", 'symmetrical': 'False'})
},
'feedmanager.item': {
'Meta': {'object_name': 'Item'},
'added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {}),
'feed': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['feedmanager.Feed']"}),
'guid': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'link': ('django.db.models.fields.TextField', [], {}),
'pubdate': ('django.db.models.fields.DateTimeField', [], {}),<|fim▁hole|>
complete_apps = ['feedmanager']<|fim▁end|>
|
'title': ('django.db.models.fields.CharField', [], {'max_length': '70'})
}
}
|
<|file_name|>frosted_checker.py<|end_file_name|><|fim▁begin|># # # # # # # # # # # # # #
# CAPTAINHOOK IDENTIFIER #
# # # # # # # # # # # # # #
from .utils import bash, filter_python_files
DEFAULT = 'off'
CHECK_NAME = 'frosted'
NO_FROSTED_MSG = (
"frosted is required for the frosted plugin.\n"
"`pip install frosted` or turn it off in your tox.ini file.")
REQUIRED_FILES = ['tox.ini']
<|fim▁hole|> try:
import frosted # NOQA
except ImportError:
return NO_FROSTED_MSG
py_files = filter_python_files(files)
cmd = 'frosted {0}'.format(' '.join(py_files))
return bash(cmd).value()<|fim▁end|>
|
def run(files, temp_folder):
"Check frosted errors in the code base."
|
<|file_name|>d095.cpp<|end_file_name|><|fim▁begin|>#include <iostream>
#include <string>
#include<iomanip>
using namespace std;
int main()
{
string a;
double h,m,x;
while(cin>>a)
{
if(a=="0:00")break;
if(a[1]==':')
{
h=a[0]-'0';
m=(a[2]-'0')*10+(a[3]-'0');
}
else if(a[2]==':')
{
h=(a[0]-'0')*10+(a[1]-'0');
<|fim▁hole|> m=(a[3]-'0')*10+(a[4]-'0');
}
x=30*h+m/2-6*m;
if(x<0)
{
x=-x;
}
if(x>180)
{
x=360-x;
}
cout<<fixed<<setprecision(3)<<x<<endl;
}
}<|fim▁end|>
| |
<|file_name|>gather-2.6.x.py<|end_file_name|><|fim▁begin|>from twilio.twiml.voice_response import Gather, VoiceResponse
response = VoiceResponse()
response.gather()
<|fim▁hole|><|fim▁end|>
|
print(response)
|
<|file_name|>must-authorize.js<|end_file_name|><|fim▁begin|><|fim▁hole|>const appErrorsFactory = require('../../app-errors/app-errors-factory');
/**
*
*/
function setup(req, res, next) {
if (!req.currentUser) {
next(appErrorsFactory.createAppError({ statusCode: 401 }));
}
else {
next();
}
}
module.exports = setup;<|fim▁end|>
|
'use strict';
|
<|file_name|>utils.py<|end_file_name|><|fim▁begin|>from datetime import date, timedelta, datetime
import time
def get_first_day(dt, d_years=0, d_months=0):<|fim▁hole|>
def get_last_day(dt):
return get_first_day(dt, 0, 1) + timedelta(-1)
def str_to_date(value):
"""
Convert string to datatime object
"""
if not value:
return value
if value.__class__.__name__ in ['date']:
return value
return datetime.strptime(value, "%Y-%m-%d").date()<|fim▁end|>
|
# d_years, d_months are "deltas" to apply to dt
y, m = dt.year + d_years, dt.month + d_months
a, m = divmod(m-1, 12)
return date(y+a, m+1, 1)
|
<|file_name|>vec.rs<|end_file_name|><|fim▁begin|>// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// xfail-test
// compile-flags:-Z extra-debug-info
// debugger:set print pretty off
// debugger:break 29<|fim▁hole|>// debugger:print b.vec[0]
// check:$2 = 4
// debugger:print c->boxed.data[1]
// check:$3 = 8
// debugger:print d->boxed.data[2]
// check:$4 = 12
fn main() {
let a = [1, 2, 3];
let b = &[4, 5, 6];
let c = @[7, 8, 9];
let d = ~[10, 11, 12];
let _z = 0;
}<|fim▁end|>
|
// debugger:run
// debugger:print a
// check:$1 = {1, 2, 3}
|
<|file_name|>gen_prime.py<|end_file_name|><|fim▁begin|>import argparse
import sys
# Sieve of Eratosthenes
# Code by David Eppstein, UC Irvine, 28 Feb 2002
# http://code.activestate.com/recipes/117119/
def gen_primes():
""" Generate an infinite sequence of prime numbers.
"""
# Maps composites to primes witnessing their compositeness.
# This is memory efficient, as the sieve is not "run forward"
# indefinitely, but only as long as required by the current
# number being tested.
#
D = {}
# The running integer that's checked for primeness
q = 2
while True:
if q not in D:
# q is a new prime.
# Yield it and mark its first multiple that isn't
# already marked in previous iterations
#
yield q
D[q * q] = [q]
else:
# q is composite. D[q] is the list of primes that
# divide it. Since we've reached q, we no longer
# need it in the map, but we'll mark the next
# multiples of its witnesses to prepare for larger
# numbers
#
for p in D[q]:
D.setdefault(p + q, []).append(p)
del D[q]
q += 1
<|fim▁hole|> parser.add_argument('n', metavar='N', nargs=1, type=int, help="Limit value")
group = parser.add_mutually_exclusive_group()
group.add_argument('--count', action='store_const', const=True,
default=False, help='limit number of generated prime number (default)')
group.add_argument('--value', action='store_const', const=True,
default=False, help='limit max value of generated prime number')
args = parser.parse_args()
if args.value:
limit = args.n[0]
else:
limit = args.n[0]-2
prime = iter(gen_primes())
sys.stdout.write("{"+str(prime.next()))
for idx, val in enumerate(prime):
if args.value and limit < val:
break
elif limit < idx:
break
sys.stdout.write(", "+str(val))
print("}")<|fim▁end|>
|
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Generate prime number array")
|
<|file_name|>test_init.py<|end_file_name|><|fim▁begin|>import pytest<|fim▁hole|>
@pytest.fixture
def abstract_serial_server():
return AbstractSerialServer()
def test_abstract_serial_server_get_meta_data(abstract_serial_server):
""" Test if meta data is correctly extracted from request. """
assert abstract_serial_server.get_meta_data(b'\x01x\02\x03') ==\
{'unit_id': 1}
def test_abract_serial_server_shutdown(abstract_serial_server):
assert abstract_serial_server._shutdown_request is False
abstract_serial_server.shutdown()
assert abstract_serial_server._shutdown_request is True<|fim▁end|>
|
from umodbus.server.serial import AbstractSerialServer
|
<|file_name|>xyz.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
# HORTON: Helpful Open-source Research TOol for N-fermion systems.
# Copyright (C) 2011-2017 The HORTON Development Team
#
# This file is part of HORTON.
#
# HORTON is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# HORTON is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>
#
# --
'''XYZ file format'''
import numpy as np
from horton.units import angstrom
from horton.periodic import periodic
__all__ = ['load_xyz', 'dump_xyz']
def load_xyz(filename):
'''Load a molecular geometry from a .xyz file.
**Argument:**
filename
The file to load the geometry from
**Returns:** dictionary with ``title`, ``coordinates`` and ``numbers``.
'''
f = file(filename)
size = int(f.next())
title = f.next().strip()
coordinates = np.empty((size, 3), float)
numbers = np.empty(size, int)
for i in xrange(size):
words = f.next().split()
numbers[i] = periodic[words[0]].number
coordinates[i,0] = float(words[1])*angstrom
coordinates[i,1] = float(words[2])*angstrom
coordinates[i,2] = float(words[3])*angstrom
f.close()
return {
'title': title,
'coordinates': coordinates,
'numbers': numbers
}
<|fim▁hole|> '''Write an ``.xyz`` file.
**Arguments:**
filename
The name of the file to be written. This usually the extension
".xyz".
data
An IOData instance. Must contain ``coordinates`` and ``numbers``.
May contain ``title``.
'''
with open(filename, 'w') as f:
print >> f, data.natom
print >> f, getattr(data, 'title', 'Created with HORTON')
for i in xrange(data.natom):
n = periodic[data.numbers[i]].symbol
x, y, z = data.coordinates[i]/angstrom
print >> f, '%2s %15.10f %15.10f %15.10f' % (n, x, y, z)<|fim▁end|>
|
def dump_xyz(filename, data):
|
<|file_name|>sphero_multiple.go<|end_file_name|><|fim▁begin|>// +build example
//
// Do not build by default.
package main
import (
"fmt"
"time"
"gobot.io/x/gobot"
"gobot.io/x/gobot/api"
"gobot.io/x/gobot/platforms/sphero"
)
func NewSwarmBot(port string) *gobot.Robot {
spheroAdaptor := sphero.NewAdaptor(port)
spheroDriver := sphero.NewSpheroDriver(spheroAdaptor)
spheroDriver.SetName("Sphero" + port)
work := func() {
spheroDriver.Stop()<|fim▁hole|>
gobot.Every(1*time.Second, func() {
spheroDriver.Roll(100, uint16(gobot.Rand(360)))
})
gobot.Every(3*time.Second, func() {
spheroDriver.SetRGB(uint8(gobot.Rand(255)),
uint8(gobot.Rand(255)),
uint8(gobot.Rand(255)),
)
})
}
robot := gobot.NewRobot("sphero",
[]gobot.Connection{spheroAdaptor},
[]gobot.Device{spheroDriver},
work,
)
return robot
}
func main() {
master := gobot.NewMaster()
api.NewAPI(master).Start()
spheros := []string{
"/dev/rfcomm0",
"/dev/rfcomm1",
"/dev/rfcomm2",
"/dev/rfcomm3",
}
for _, port := range spheros {
master.AddRobot(NewSwarmBot(port))
}
master.Start()
}<|fim▁end|>
|
spheroDriver.On(sphero.Collision, func(data interface{}) {
fmt.Println("Collision Detected!")
})
|
<|file_name|>magma_csolverinfo.cpp<|end_file_name|><|fim▁begin|>/*
-- MAGMA (version 1.6.2) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date May 2015
@generated from magma_zsolverinfo.cpp normal z -> c, Sun May 3 11:23:01 2015
@author Hartwig Anzt
*/
#include "common_magmasparse.h"
#define RTOLERANCE lapackf77_slamch( "E" )
#define ATOLERANCE lapackf77_slamch( "E" )
/**
Purpose
-------
Prints information about a previously called solver.
Arguments
---------
@param[in]
solver_par magma_c_solver_par*
structure containing all solver information
@param[in,out]
precond_par magma_c_preconditioner*
structure containing all preconditioner information
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_caux
********************************************************************/
extern "C" magma_int_t
magma_csolverinfo(
magma_c_solver_par *solver_par,
magma_c_preconditioner *precond_par,
magma_queue_t queue )
{
if( solver_par->verbose > 0 ){
magma_int_t k = solver_par->verbose;
printf("%%======================================================="
"======%%\n");
switch( solver_par->solver ) {
case Magma_CG:
printf("%% CG performance analysis every %d iteration\n",
(int) k); break;
case Magma_PCG:
printf("%% CG performance analysis every %d iteration\n",
(int) k); break;
case Magma_CGMERGE:<|fim▁hole|> (int) k); break;
case Magma_BICGSTAB:
printf("%% BiCGSTAB performance analysis every %d iteration\n",
(int) k); break;
case Magma_PBICGSTAB:
printf("%% BiCGSTAB performance analysis every %d iteration\n",
(int) k); break;
case Magma_BICGSTABMERGE:
printf("%% BiCGSTAB (merged) performance analysis every %d iteration\n",
(int) k); break;
case Magma_BICGSTABMERGE2:
printf("%% BiCGSTAB (merged) performance analysis every %d iteration\n",
(int) k); break;
case Magma_GMRES:
printf("%% GMRES(%d) performance analysis every %d iteration\n",
(int) solver_par->restart, (int) k); break;
case Magma_PGMRES:
printf("%% GMRES(%d) performance analysis every %d iteration\n",
(int) solver_par->restart, (int) k); break;
case Magma_ITERREF:
printf("%% Iterative refinement performance analysis every %d iteration\n",
(int) k); break;
default:
printf("%% Detailed performance analysis not supported.\n"); break;
}
switch( precond_par->solver ) {
case Magma_CG:
printf("%% Preconditioner used: CG.\n"); break;
case Magma_BICGSTAB:
printf("%% Preconditioner used: BiCGSTAB.\n"); break;
case Magma_GMRES:
printf("%% Preconditioner used: GMRES.\n"); break;
case Magma_JACOBI:
printf("%% Preconditioner used: Jacobi.\n"); break;
case Magma_BAITER:
printf("%% Preconditioner used: Block-asynchronous iteration.\n"); break;
case Magma_ILU:
printf("%% Preconditioner used: ILU(%d).\n", precond_par->levels); break;
case Magma_AILU:
printf("%% Preconditioner used: iterative ILU(%d).\n", precond_par->levels); break;
case Magma_ICC:
printf("%% Preconditioner used: IC(%d).\n", precond_par->levels); break;
case Magma_AICC:
printf("%% Preconditioner used: iterative IC(%d).\n", precond_par->levels); break;
default:
break;
}
printf("%%======================================================="
"======%%\n");
switch( solver_par->solver ) {
case Magma_CG:
printf("%% iter || residual-nrm2 || runtime \n");
printf("%%======================================================="
"======%%\n");
for( int j=0; j<(solver_par->numiter)/k+1; j++ ) {
printf(" %4d || %e || %f\n",
(int) (j*k), solver_par->res_vec[j], solver_par->timing[j]);
}
printf("%%======================================================="
"======%%\n"); break;
case Magma_PCG:
printf("%% iter || residual-nrm2 || runtime \n");
printf("%%======================================================="
"======%%\n");
for( int j=0; j<(solver_par->numiter)/k+1; j++ ) {
printf(" %4d || %e || %f\n",
(int) (j*k), solver_par->res_vec[j], solver_par->timing[j]);
}
printf("%%======================================================="
"======%%\n"); break;
case Magma_CGMERGE:
printf("%% iter || residual-nrm2 || runtime \n");
printf("%%======================================================="
"======%%\n");
for( int j=0; j<(solver_par->numiter)/k+1; j++ ) {
printf(" %4d || %e || %f\n",
(int) (j*k), solver_par->res_vec[j], solver_par->timing[j]);
}
printf("%%======================================================="
"======%%\n"); break;
case Magma_BICGSTAB:
printf("%% iter || residual-nrm2 || runtime \n");
printf("%%======================================================="
"======%%\n");
for( int j=0; j<(solver_par->numiter)/k+1; j++ ) {
printf(" %4d || %e || %f\n",
(int) (j*k), solver_par->res_vec[j], solver_par->timing[j]);
}
printf("%%======================================================="
"======%%\n"); break;
case Magma_PBICGSTAB:
printf("%% iter || residual-nrm2 || runtime \n");
printf("%%======================================================="
"======%%\n");
for( int j=0; j<(solver_par->numiter)/k+1; j++ ) {
printf(" %4d || %e || %f\n",
(int) (j*k), solver_par->res_vec[j], solver_par->timing[j]);
}
printf("%%======================================================="
"======%%\n"); break;
case Magma_BICGSTABMERGE:
printf("%% iter || residual-nrm2 || runtime \n");
printf("%%======================================================="
"======%%\n");
for( int j=0; j<(solver_par->numiter)/k+1; j++ ) {
printf(" %4d || %e || %f\n",
(int) (j*k), solver_par->res_vec[j], solver_par->timing[j]);
}
printf("%%======================================================="
"======%%\n"); break;
case Magma_BICGSTABMERGE2:
printf("%% iter || residual-nrm2 || runtime \n");
printf("%%======================================================="
"======%%\n");
for( int j=0; j<(solver_par->numiter)/k+1; j++ ) {
printf(" %4d || %e || %f\n",
(int) (j*k), solver_par->res_vec[j], solver_par->timing[j]);
}
printf("%%======================================================="
"======%%\n"); break;
case Magma_GMRES:
printf("%% iter || residual-nrm2 || runtime \n");
printf("%%======================================================="
"======%%\n");
for( int j=0; j<(solver_par->numiter)/k+1; j++ ) {
printf(" %4d || %e || %f\n",
(int) (j*k), solver_par->res_vec[j], solver_par->timing[j]);
}
printf("%%======================================================="
"======%%\n"); break;
case Magma_PGMRES:
printf("%% iter || residual-nrm2 || runtime \n");
printf("%%======================================================="
"======%%\n");
for( int j=0; j<(solver_par->numiter)/k+1; j++ ) {
printf(" %4d || %e || %f\n",
(int) (j*k), solver_par->res_vec[j], solver_par->timing[j]);
}
printf("%%======================================================="
"======%%\n"); break;
case Magma_ITERREF:
printf("%% iter || residual-nrm2 || runtime \n");
printf("%%======================================================="
"======%%\n");
for( int j=0; j<(solver_par->numiter)/k+1; j++ ) {
printf(" %4d || %e || %f\n",
(int) (j*k), solver_par->res_vec[j], solver_par->timing[j]);
}
printf("%%======================================================="
"======%%\n"); break;
default:
printf("%%======================================================="
"======%%\n"); break;
}
}
printf("\n%%======================================================="
"======%%\n");
switch( solver_par->solver ) {
case Magma_CG:
printf("%% CG solver summary:\n"); break;
case Magma_PCG:
printf("%% PCG solver summary:\n"); break;
case Magma_CGMERGE:
printf("%% CG solver summary:\n"); break;
case Magma_BICGSTAB:
printf("%% BiCGSTAB solver summary:\n"); break;
case Magma_PBICGSTAB:
printf("%% PBiCGSTAB solver summary:\n"); break;
case Magma_BICGSTABMERGE:
printf("%% BiCGSTAB solver summary:\n"); break;
case Magma_BICGSTABMERGE2:
printf("%% BiCGSTAB solver summary:\n"); break;
case Magma_GMRES:
printf("%% GMRES(%d) solver summary:\n", solver_par->restart); break;
case Magma_PGMRES:
printf("%% PGMRES(%d) solver summary:\n", solver_par->restart); break;
case Magma_ITERREF:
printf("%% Iterative refinement solver summary:\n"); break;
case Magma_JACOBI:
printf("%% CG solver summary:\n"); break;
case Magma_BAITER:
printf("%% Block-asynchronous iteration solver summary:\n"); break;
case Magma_LOBPCG:
printf("%% LOBPCG iteration solver summary:\n"); break;
default:
printf("%% Solver info not supported.\n"); goto cleanup;
}
printf("%% initial residual: %e\n", solver_par->init_res );
printf("%% iterations: %4d\n", (int) (solver_par->numiter) );
printf("%% exact final residual: %e\n%% runtime: %.4f sec\n",
solver_par->final_res, solver_par->runtime);
cleanup:
printf("%%======================================================="
"======%%\n");
return MAGMA_SUCCESS;
}
/**
Purpose
-------
Frees any memory assocoiated with the verbose mode of solver_par. The
other values are set to default.
Arguments
---------
@param[in,out]
solver_par magma_c_solver_par*
structure containing all solver information
@param[in,out]
precond_par magma_c_preconditioner*
structure containing all preconditioner information
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_caux
********************************************************************/
extern "C" magma_int_t
magma_csolverinfo_free(
magma_c_solver_par *solver_par,
magma_c_preconditioner *precond_par,
magma_queue_t queue )
{
solver_par->init_res = 0.0;
solver_par->iter_res = 0.0;
solver_par->final_res = 0.0;
if ( solver_par->res_vec != NULL ) {
magma_free_cpu( solver_par->res_vec );
solver_par->res_vec = NULL;
}
if ( solver_par->timing != NULL ) {
magma_free_cpu( solver_par->timing );
solver_par->timing = NULL;
}
if ( solver_par->eigenvectors != NULL ) {
magma_free( solver_par->eigenvectors );
solver_par->eigenvectors = NULL;
}
if ( solver_par->eigenvalues != NULL ) {
magma_free_cpu( solver_par->eigenvalues );
solver_par->eigenvalues = NULL;
}
if ( precond_par->d.val != NULL ) {
magma_free( precond_par->d.val );
precond_par->d.val = NULL;
}
if ( precond_par->d2.val != NULL ) {
magma_free( precond_par->d2.val );
precond_par->d2.val = NULL;
}
if ( precond_par->work1.val != NULL ) {
magma_free( precond_par->work1.val );
precond_par->work1.val = NULL;
}
if ( precond_par->work2.val != NULL ) {
magma_free( precond_par->work2.val );
precond_par->work2.val = NULL;
}
if ( precond_par->M.val != NULL ) {
if ( precond_par->M.memory_location == Magma_DEV )
magma_free( precond_par->M.dval );
else
magma_free_cpu( precond_par->M.val );
precond_par->M.val = NULL;
}
if ( precond_par->M.col != NULL ) {
if ( precond_par->M.memory_location == Magma_DEV )
magma_free( precond_par->M.dcol );
else
magma_free_cpu( precond_par->M.col );
precond_par->M.col = NULL;
}
if ( precond_par->M.row != NULL ) {
if ( precond_par->M.memory_location == Magma_DEV )
magma_free( precond_par->M.drow );
else
magma_free_cpu( precond_par->M.row );
precond_par->M.row = NULL;
}
if ( precond_par->M.blockinfo != NULL ) {
magma_free_cpu( precond_par->M.blockinfo );
precond_par->M.blockinfo = NULL;
}
if ( precond_par->L.val != NULL ) {
if ( precond_par->L.memory_location == Magma_DEV )
magma_free( precond_par->L.dval );
else
magma_free_cpu( precond_par->L.val );
precond_par->L.val = NULL;
}
if ( precond_par->L.col != NULL ) {
if ( precond_par->L.memory_location == Magma_DEV )
magma_free( precond_par->L.col );
else
magma_free_cpu( precond_par->L.dcol );
precond_par->L.col = NULL;
}
if ( precond_par->L.row != NULL ) {
if ( precond_par->L.memory_location == Magma_DEV )
magma_free( precond_par->L.drow );
else
magma_free_cpu( precond_par->L.row );
precond_par->L.row = NULL;
}
if ( precond_par->L.blockinfo != NULL ) {
magma_free_cpu( precond_par->L.blockinfo );
precond_par->L.blockinfo = NULL;
}
if ( precond_par->U.val != NULL ) {
if ( precond_par->U.memory_location == Magma_DEV )
magma_free( precond_par->U.dval );
else
magma_free_cpu( precond_par->U.val );
precond_par->U.val = NULL;
}
if ( precond_par->U.col != NULL ) {
if ( precond_par->U.memory_location == Magma_DEV )
magma_free( precond_par->U.dcol );
else
magma_free_cpu( precond_par->U.col );
precond_par->U.col = NULL;
}
if ( precond_par->U.row != NULL ) {
if ( precond_par->U.memory_location == Magma_DEV )
magma_free( precond_par->U.drow );
else
magma_free_cpu( precond_par->U.row );
precond_par->U.row = NULL;
}
if ( precond_par->U.blockinfo != NULL ) {
magma_free_cpu( precond_par->U.blockinfo );
precond_par->U.blockinfo = NULL;
}
if ( precond_par->solver == Magma_ILU ||
precond_par->solver == Magma_AILU ||
precond_par->solver == Magma_ICC||
precond_par->solver == Magma_AICC ) {
cusparseDestroySolveAnalysisInfo( precond_par->cuinfoL );
cusparseDestroySolveAnalysisInfo( precond_par->cuinfoU );
precond_par->cuinfoL = NULL;
precond_par->cuinfoU = NULL;
}
if ( precond_par->LD.val != NULL ) {
if ( precond_par->LD.memory_location == Magma_DEV )
magma_free( precond_par->LD.dval );
else
magma_free_cpu( precond_par->LD.val );
precond_par->LD.val = NULL;
}
if ( precond_par->LD.col != NULL ) {
if ( precond_par->LD.memory_location == Magma_DEV )
magma_free( precond_par->LD.dcol );
else
magma_free_cpu( precond_par->LD.col );
precond_par->LD.col = NULL;
}
if ( precond_par->LD.row != NULL ) {
if ( precond_par->LD.memory_location == Magma_DEV )
magma_free( precond_par->LD.drow );
else
magma_free_cpu( precond_par->LD.row );
precond_par->LD.row = NULL;
}
if ( precond_par->LD.blockinfo != NULL ) {
magma_free_cpu( precond_par->LD.blockinfo );
precond_par->LD.blockinfo = NULL;
}
if ( precond_par->UD.val != NULL ) {
if ( precond_par->UD.memory_location == Magma_DEV )
magma_free( precond_par->UD.dval );
else
magma_free_cpu( precond_par->UD.val );
precond_par->UD.val = NULL;
}
if ( precond_par->UD.col != NULL ) {
if ( precond_par->UD.memory_location == Magma_DEV )
magma_free( precond_par->UD.dcol );
else
magma_free_cpu( precond_par->UD.col );
precond_par->UD.col = NULL;
}
if ( precond_par->UD.row != NULL ) {
if ( precond_par->UD.memory_location == Magma_DEV )
magma_free( precond_par->UD.drow );
else
magma_free_cpu( precond_par->UD.row );
precond_par->UD.row = NULL;
}
if ( precond_par->UD.blockinfo != NULL ) {
magma_free_cpu( precond_par->UD.blockinfo );
precond_par->UD.blockinfo = NULL;
}
precond_par->solver = Magma_NONE;
return MAGMA_SUCCESS;
}
/**
Purpose
-------
Initializes all solver and preconditioner parameters.
Arguments
---------
@param[in,out]
solver_par magma_c_solver_par*
structure containing all solver information
@param[in,out]
precond_par magma_c_preconditioner*
structure containing all preconditioner information
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_caux
********************************************************************/
extern "C" magma_int_t
magma_csolverinfo_init(
magma_c_solver_par *solver_par,
magma_c_preconditioner *precond_par,
magma_queue_t queue )
{
magma_int_t info = 0;
solver_par->res_vec = NULL;
solver_par->timing = NULL;
solver_par->eigenvectors = NULL;
solver_par->eigenvalues = NULL;
if( solver_par->maxiter == 0 )
solver_par->maxiter = 1000;
if( solver_par->version == 0 )
solver_par->version = 0;
if( solver_par->restart == 0 )
solver_par->restart = 30;
if( solver_par->solver == 0 )
solver_par->solver = Magma_CG;
if ( solver_par->verbose > 0 ) {
CHECK( magma_malloc_cpu( (void **)&solver_par->res_vec, sizeof(real_Double_t)
* ( (solver_par->maxiter)/(solver_par->verbose)+1) ));
CHECK( magma_malloc_cpu( (void **)&solver_par->timing, sizeof(real_Double_t)
*( (solver_par->maxiter)/(solver_par->verbose)+1) ));
} else {
solver_par->res_vec = NULL;
solver_par->timing = NULL;
}
precond_par->d.val = NULL;
precond_par->d2.val = NULL;
precond_par->work1.val = NULL;
precond_par->work2.val = NULL;
precond_par->M.val = NULL;
precond_par->M.col = NULL;
precond_par->M.row = NULL;
precond_par->M.blockinfo = NULL;
precond_par->L.val = NULL;
precond_par->L.col = NULL;
precond_par->L.row = NULL;
precond_par->L.blockinfo = NULL;
precond_par->U.val = NULL;
precond_par->U.col = NULL;
precond_par->U.row = NULL;
precond_par->U.blockinfo = NULL;
precond_par->LD.val = NULL;
precond_par->LD.col = NULL;
precond_par->LD.row = NULL;
precond_par->LD.blockinfo = NULL;
precond_par->UD.val = NULL;
precond_par->UD.col = NULL;
precond_par->UD.row = NULL;
precond_par->UD.blockinfo = NULL;
precond_par->cuinfoL = NULL;
precond_par->cuinfoU = NULL;
cleanup:
if( info != 0 ){
magma_free( solver_par->timing );
magma_free( solver_par->res_vec );
}
return info;
}
/**
Purpose
-------
Initializes space for eigensolvers.
Arguments
---------
@param[in,out]
solver_par magma_c_solver_par*
structure containing all solver information
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_caux
********************************************************************/
extern "C" magma_int_t
magma_ceigensolverinfo_init(
magma_c_solver_par *solver_par,
magma_queue_t queue )
{
magma_int_t info = 0;
magmaFloatComplex *initial_guess=NULL;
solver_par->eigenvectors = NULL;
solver_par->eigenvalues = NULL;
if ( solver_par->solver == Magma_LOBPCG ) {
CHECK( magma_smalloc_cpu( &solver_par->eigenvalues ,
3*solver_par->num_eigenvalues ));
// setup initial guess EV using lapack
// then copy to GPU
magma_int_t ev = solver_par->num_eigenvalues * solver_par->ev_length;
CHECK( magma_cmalloc_cpu( &initial_guess, ev ));
CHECK( magma_cmalloc( &solver_par->eigenvectors, ev ));
magma_int_t ISEED[4] = {0,0,0,1}, ione = 1;
lapackf77_clarnv( &ione, ISEED, &ev, initial_guess );
magma_csetmatrix( solver_par->ev_length, solver_par->num_eigenvalues,
initial_guess, solver_par->ev_length, solver_par->eigenvectors,
solver_par->ev_length );
} else {
solver_par->eigenvectors = NULL;
solver_par->eigenvalues = NULL;
}
cleanup:
if( info != 0 ){
magma_free( solver_par->eigenvectors );
magma_free( solver_par->eigenvalues );
}
magma_free_cpu( initial_guess );
return info;
}<|fim▁end|>
|
printf("%% CG (merged) performance analysis every %d iteration\n",
|
<|file_name|>issue-19358.rs<|end_file_name|><|fim▁begin|>// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
trait Trait {}
#[derive(Show)]
struct Foo<T: Trait> {
foo: T,
}
#[derive(Show)]
struct Bar<T> where T: Trait {
bar: T,<|fim▁hole|>fn main() {
let a = Foo { foo: 12i };
let b = Bar { bar: 12i };
println!("{:?} {:?}", a, b);
}<|fim▁end|>
|
}
impl Trait for int {}
|
<|file_name|>sh.py<|end_file_name|><|fim▁begin|>'''
Created on 22/ago/2011
@author: norby
'''
from core.moduleexception import ModuleException, ProbeException, ExecutionException, ProbeSucceed
from core.moduleguess import ModuleGuess
from core.argparse import ArgumentParser, StoredNamespace
from core.argparse import SUPPRESS
from ast import literal_eval
import random
MSG_SH_INTERPRETER_SUCCEED = 'Shell interpreter load succeed'
WARN_SH_INTERPRETER_FAIL = 'Shell interpreters load failed'
class Sh(ModuleGuess):
'''Execute system shell command'''
def _set_vectors(self):
self.vectors.add_vector("system", 'shell.php', "@system('$cmd $no_stderr');")
self.vectors.add_vector("passthru" , 'shell.php', "@passthru('$cmd $no_stderr');")
self.vectors.add_vector("shell_exec", 'shell.php', "echo @shell_exec('$cmd $no_stderr');")
self.vectors.add_vector("exec", 'shell.php', "@exec('$cmd $no_stderr', $r);echo(join(\"\\n\",$r));")
#self.vectors.add_vector("pcntl", 'shell.php', ' $p = pcntl_fork(); if(!$p) {{ pcntl_exec( "/bin/sh", Array("-c", "$cmd")); }} else {{ pcntl_waitpid($p,$status); }}'),
self.vectors.add_vector("popen", 'shell.php', "$h = popen('$cmd','r'); while(!feof($h)) echo(fread($h,4096)); pclose($h);")
self.vectors.add_vector("python_eval", 'shell.php', "python_eval('import os; os.system('$cmd$no_stderr');")
self.vectors.add_vector("perl_system", 'shell.php', "$perl = new perl(); $r = @perl->system('$cmd$no_stderr'); echo $r;")
self.vectors.add_vector("proc_open", 'shell.php', """$p = array(array('pipe', 'r'), array('pipe', 'w'), array('pipe', 'w'));
$h = proc_open('$cmd', $p, $pipes); while(!feof($pipes[1])) echo(fread($pipes[1],4096));
while(!feof($pipes[2])) echo(fread($pipes[2],4096)); fclose($pipes[0]); fclose($pipes[1]);
fclose($pipes[2]); proc_close($h);""")
def _set_args(self):
self.argparser.add_argument('cmd', help='Shell command', nargs='+')
self.argparser.add_argument('-no-stderr', help='Suppress error output', action='store_false')
self.argparser.add_argument('-vector', choices = self.vectors.keys())
self.argparser.add_argument('-just-probe', help=SUPPRESS, action='store_true')
def _init_stored_args(self):
self.stored_args_namespace = StoredNamespace()
setattr(self.stored_args_namespace, 'vector', None )
def _execute_vector(self):
if not getattr(self.stored_args_namespace, 'vector') or self.args['just_probe']:
self.__slacky_probe()
# Execute if is current vector is saved or choosen
if self.current_vector.name in (getattr(self.stored_args_namespace, 'vector'), self.args['vector']):
self._result = self.current_vector.execute( self.formatted_args)
def _prepare_vector(self):
# Format cmd
self.formatted_args['cmd'] = ' '.join(self.args['cmd']).replace( "'", "\\'" )
# Format stderr
if any('$no_stderr' in p for p in self.current_vector.payloads):
if self.args['no_stderr']:
self.formatted_args['no_stderr'] = '2>&1'
else:
self.formatted_args['no_stderr'] = ''
def __slacky_probe(self):
rand = str(random.randint( 11111, 99999 ))
<|fim▁hole|> slacky_formats = self.formatted_args.copy()
slacky_formats['cmd'] = 'echo %s' % (rand)
if self.current_vector.execute( slacky_formats) == rand:
setattr(self.stored_args_namespace, 'vector', self.current_vector.name)
# Set as best interpreter
#self.modhandler.interpreter = self.name
if self.args['just_probe']:
self._result = True
raise ProbeSucceed(self.name, MSG_SH_INTERPRETER_SUCCEED)
return
raise ModuleException(self.name, WARN_SH_INTERPRETER_FAIL)<|fim▁end|>
| |
<|file_name|>hu.js<|end_file_name|><|fim▁begin|>/*!
* froala_editor v4.0.1 (https://www.froala.com/wysiwyg-editor)
* License https://froala.com/wysiwyg-editor/terms/
* Copyright 2014-2021 Froala Labs
*/
(function (global, factory) {
typeof exports === 'object' && typeof module !== 'undefined' ? factory(require('froala-editor')) :
typeof define === 'function' && define.amd ? define(['froala-editor'], factory) :
(factory(global.FroalaEditor));
}(this, (function (FE) { 'use strict';
FE = FE && FE.hasOwnProperty('default') ? FE['default'] : FE;
/**
* Hungarian
*/
FE.LANGUAGE['hu'] = {
translation: {
// Place holder
'Type something': 'Szöveg...',
// Basic formatting
'Bold': 'Félkövér',
'Italic': 'Dőlt',
'Underline': 'Aláhúzott',
'Strikethrough': 'Áthúzott',
// Main buttons
'Insert': 'Beillesztés',
'Delete': 'Törlés',
'Cancel': 'Mégse',
'OK': 'Rendben',
'Back': 'Vissza',
'Remove': 'Eltávolítás',
'More': 'Több',
'Update': 'Frissítés',
'Style': 'Stílus',
// Font
'Font Family': 'Betűtípus',
'Font Size': 'Betűméret',
// Colors
'Colors': 'Színek',
'Background': 'Háttér',
'Text': 'Szöveg',
'HEX Color': 'HEX színkód',
// Paragraphs
'Paragraph Format': 'Formátumok',
'Normal': 'Normál',
'Code': 'Kód',
'Heading 1': 'Címsor 1',
'Heading 2': 'Címsor 2',
'Heading 3': 'Címsor 3',
'Heading 4': 'Címsor 4',
// Style
'Paragraph Style': 'Bekezdés stílusa',
'Inline Style': ' Helyi stílus',
// Alignment
'Align': 'Igazítás',
'Align Left': 'Balra igazít',
'Align Center': 'Középre zár',
'Align Right': 'Jobbra igazít',
'Align Justify': 'Sorkizárás',
'None': 'Egyik sem',
// Lists
'Ordered List': 'Számozás',
'Default': 'Alapértelmezett',
'Lower Alpha': 'Alacsonyabb alfa',
'Lower Greek': 'Alsó görög',
'Lower Roman': 'Alacsonyabb római',
'Upper Alpha': 'Felső alfa',
'Upper Roman': 'Felső római',
'Unordered List': 'Felsorolás',
'Circle': 'Kör',
'Disc': 'Lemez',
'Square': 'Négyzet',
// Line height
'Line Height': 'Vonal magassága',
'Single': 'Egyetlen',
'Double': 'Kettős',
// Indent
'Decrease Indent': 'Behúzás csökkentése',
'Increase Indent': 'Behúzás növelése',
// Links
'Insert Link': 'Hivatkozás beillesztése',
'Open in new tab': 'Megnyitás új lapon',
'Open Link': 'Hivatkozás megnyitása',
'Edit Link': 'Hivatkozás szerkesztése',
'Unlink': 'Hivatkozás törlése',
'Choose Link': 'Keresés a lapok között',
// Images
'Insert Image': 'Kép beillesztése',
'Upload Image': 'Kép feltöltése',
'By URL': 'Webcím megadása',
'Browse': 'Böngészés',
'Drop image': 'Húzza ide a képet',
'or click': 'vagy kattintson ide',
'Manage Images': 'Képek kezelése',
'Loading': 'Betöltés...',
'Deleting': 'Törlés...',
'Tags': 'Címkék',
'Are you sure? Image will be deleted.': 'Biztos benne? A kép törlésre kerül.',
'Replace': 'Csere',
'Uploading': 'Feltöltés',
'Loading image': 'Kép betöltése',
'Display': 'Kijelző',
'Inline': 'Sorban',
'Break Text': 'Szöveg törése',
'Alternative Text': 'Alternatív szöveg',
'Change Size': 'Méret módosítása',
'Width': 'Szélesség',
'Height': 'Magasság',
'Something went wrong. Please try again.': 'Valami elromlott. Kérjük próbálja újra.',
'Image Caption': 'Képaláírás',
'Advanced Edit': 'Fejlett szerkesztés',
// Video
'Insert Video': 'Videó beillesztése',
'Embedded Code': 'Kód bemásolása',
'Paste in a video URL': 'Illessze be a videó webcímét',
'Drop video': 'Húzza ide a videót',
'Your browser does not support HTML5 video.': 'A böngészője nem támogatja a HTML5 videót.',
'Upload Video': 'Videó feltöltése',
// Tables
'Insert Table': 'Táblázat beillesztése',
'Table Header': 'Táblázat fejléce',
'Remove Table': 'Tábla eltávolítása',
'Table Style': 'Táblázat stílusa',
'Horizontal Align': 'Vízszintes igazítás',
'Row': 'Sor',
'Insert row above': 'Sor beszúrása elé',
'Insert row below': 'Sor beszúrása mögé',
'Delete row': 'Sor törlése',
'Column': 'Oszlop',
'Insert column before': 'Oszlop beszúrása elé',
'Insert column after': 'Oszlop beszúrása mögé',
'Delete column': 'Oszlop törlése',
'Cell': 'Cella',
'Merge cells': 'Cellák egyesítése',
'Horizontal split': 'Vízszintes osztott',
'Vertical split': 'Függőleges osztott',
'Cell Background': 'Cella háttere',
'Vertical Align': 'Függőleges igazítás',
'Top': 'Felső',
'Middle': 'Középső',
'Bottom': 'Alsó',
'Align Top': 'Igazítsa felülre',
'Align Middle': 'Igazítsa középre',
'Align Bottom': 'Igazítsa alúlra',
'Cell Style': 'Cella stílusa',
// Files
'Upload File': 'Fájl feltöltése',
'Drop file': 'Húzza ide a fájlt',
// Emoticons
'Emoticons': 'Hangulatjelek',
'Grinning face': 'Vigyorgó arc',
'Grinning face with smiling eyes': 'Vigyorgó arc mosolygó szemekkel',
'Face with tears of joy': 'Arcon az öröm könnyei',
'Smiling face with open mouth': 'Mosolygó arc tátott szájjal',
'Smiling face with open mouth and smiling eyes': 'Mosolygó arc tátott szájjal és mosolygó szemek',
'Smiling face with open mouth and cold sweat': 'Mosolygó arc tátott szájjal és hideg veríték',
'Smiling face with open mouth and tightly-closed eyes': 'Mosolygó arc tátott szájjal és lehunyt szemmel',
'Smiling face with halo': 'Mosolygó arc dicsfényben',
'Smiling face with horns': 'Mosolygó arc szarvakkal',
'Winking face': 'Kacsintós arc',
'Smiling face with smiling eyes': 'Mosolygó arc mosolygó szemekkel',
'Face savoring delicious food': 'Ízletes ételek kóstolása',
'Relieved face': 'Megkönnyebbült arc',
'Smiling face with heart-shaped eyes': 'Mosolygó arc szív alakú szemekkel',
'Smilin g face with sunglasses': 'Mosolygó arc napszemüvegben',
'Smirking face': 'Vigyorgó arc',
'Neutral face': 'Semleges arc',
'Expressionless face': 'Kifejezéstelen arc',
'Unamused face': 'Unott arc',
'Face with cold sweat': 'Arcán hideg verejtékkel',
'Pensive face': 'Töprengő arc',
'Confused face': 'Zavaros arc',
'Confounded face': 'Rácáfolt arc',
'Kissing face': 'Csókos arc',
'Face throwing a kiss': 'Arcra dobott egy csókot',
'Kissing face with smiling eyes': 'Csókos arcán mosolygó szemek',
'Kissing face with closed eyes': 'Csókos arcán csukott szemmel',
'Face with stuck out tongue': 'Kinyújototta a nyelvét',
'Face with stuck out tongue and winking eye': 'Kinyújtotta a nyelvét és kacsintó szem',
'Face with stuck out tongue and tightly-closed eyes': 'Kinyújtotta a nyelvét és szorosan lehunyt szemmel',
'Disappointed face': 'Csalódott arc',
'Worried face': 'Aggódó arc',
'Angry face': 'Dühös arc',
'Pouting face': 'Duzzogó arc',
'Crying face': 'Síró arc',
'Persevering face': 'Kitartó arc',
'Face with look of triumph': 'Arcát diadalmas pillantást',
'Disappointed but relieved face': 'Csalódott, de megkönnyebbült arc',
'Frowning face with open mouth': 'Komor arc tátott szájjal',
'Anguished face': 'Gyötrődő arc',
'Fearful face': 'Félelmetes arc',
'Weary face': 'Fáradt arc',
'Sleepy face': 'Álmos arc',
'Tired face': 'Fáradt arc',
'Grimacing face': 'Elfintorodott arc',
'Loudly crying face': 'Hangosan síró arc',
'Face with open mouth': 'Arc nyitott szájjal',
'Hushed face': 'Csitított arc',
'Face with open mouth and cold sweat': 'Arc tátott szájjal és hideg veríték',
'Face screaming in fear': 'Sikoltozó arc a félelemtől',
'Astonished face': 'Meglepett arc',
'Flushed face': 'Kipirult arc',
'Sleeping face': 'Alvó arc',
'Dizzy face': ' Szádülő arc',
'Face without mouth': 'Arc nélküli száj',
'Face with medical mask': 'Arcán orvosi maszk',
// Line breaker
'Break': 'Törés',
// Math
'Subscript': 'Alsó index',
'Superscript': 'Felső index',
// Full screen
'Fullscreen': 'Teljes képernyő',
// Horizontal line
'Insert Horizontal Line': 'Vízszintes vonal',
// Clear formatting
'Clear Formatting': 'Formázás eltávolítása',
// Save
'Save': 'Mentés',
// Undo, redo
'Undo': 'Visszavonás',
'Redo': 'Ismét',
// Select all
'Select All': 'Minden kijelölése',
// Code view
'Code View': 'Forráskód',
// Quote
'Quote': 'Idézet',
'Increase': 'Növelés',
'Decrease': 'Csökkentés',
// Quick Insert
'Quick Insert': 'Beillesztés',
// Spcial Characters
'Special Characters': 'Speciális karakterek',
'Latin': 'Latin',
'Greek': 'Görög',
'Cyrillic': 'Cirill',
'Punctuation': 'Központozás',
'Currency': 'Valuta',
'Arrows': 'Nyilak',
'Math': 'Matematikai',
'Misc': 'Egyéb',
// Print
'Print': 'Nyomtatás',
// Spell Checker
'Spell Checker': 'Helyesírás-ellenőrző',
// Help
'Help': 'Segítség',<|fim▁hole|> 'Shortcuts': 'Hivatkozások',
'Inline Editor': 'Inline szerkesztő',
'Show the editor': 'Mutassa a szerkesztőt',
'Common actions': 'Közös cselekvések',
'Copy': 'Másolás',
'Cut': 'Kivágás',
'Paste': 'Beillesztés',
'Basic Formatting': 'Alap formázás',
'Increase quote level': 'Növeli az idézet behúzását',
'Decrease quote level': 'Csökkenti az idézet behúzását',
'Image / Video': 'Kép / videó',
'Resize larger': 'Méretezés nagyobbra',
'Resize smaller': 'Méretezés kisebbre',
'Table': 'Asztal',
'Select table cell': 'Válasszon táblázat cellát',
'Extend selection one cell': 'Növelje meg egy sorral',
'Extend selection one row': 'Csökkentse egy sorral',
'Navigation': 'Navigáció',
'Focus popup / toolbar': 'Felugró ablak / eszköztár',
'Return focus to previous position': 'Visszaáll az előző pozícióra',
// Embed.ly
'Embed URL': 'Beágyazott webcím',
'Paste in a URL to embed': 'Beilleszteni egy webcímet a beágyazáshoz',
// Word Paste
'The pasted content is coming from a Microsoft Word document. Do you want to keep the format or clean it up?': 'A beillesztett tartalom egy Microsoft Word dokumentumból származik. Szeretné megtartani a formázását vagy sem?',
'Keep': 'Megtartás',
'Clean': 'Tisztítás',
'Word Paste Detected': 'Word beillesztés észlelhető'
},
direction: 'ltr'
};
})));
//# sourceMappingURL=hu.js.map<|fim▁end|>
| |
<|file_name|>tree.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
# Copyright (c) 2015 Zhassulan Zhussupov
# Author [email protected]
# Tree class
import random
class Tree:
def __init__(self, world, screen, x, y):
self.world, self.pygame = world, world.pygame
self.screen = screen
model = random.choice(['tree.png', 'tree_1.png'])
self.image = self.pygame.image.load("./images/houses/" + model)
self.x, self.y = x, y
def draw(self):
self.screen.blit(self.image, [self.x, self.y])
def move(self):
if self.y >= 480:
self.change()
self.y = 0<|fim▁hole|> key = self.pygame.key.get_pressed()
if key[self.pygame.K_UP]:
if self.world.taxi.gear == 1:
self.y += 0.2
elif self.world.taxi.gear == 2:
self.y += 0.4
elif self.world.taxi.gear == 3:
self.y += 0.5
elif self.world.taxi.gear == 4:
self.y += 0.75
def change(self):
model = random.choice(['tree.png', 'tree_1.png'])
self.image = self.pygame.image.load("./images/houses/" + model)<|fim▁end|>
| |
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|><|fim▁hole|>from ilogue.fexpect.api import expect, controlchar, expecting, run, sudo, local<|fim▁end|>
| |
<|file_name|>sort.py<|end_file_name|><|fim▁begin|><|fim▁hole|>import sublime
import sublime_plugin
from isort.isort import SortImports
class PysortCommand(sublime_plugin.TextCommand):
def run(self, edit):
old_content = self.view.substr(sublime.Region(0, self.view.size()))
new_content = SortImports(file_contents=old_content).output
self.view.replace(edit, sublime.Region(0, self.view.size()), new_content)
sublime.status_message("Python sort import complete.")
sublime.run_command('sub_notify', {'title': 'ISort', 'msg': 'Python sort import complete.', 'sound': False})<|fim▁end|>
| |
<|file_name|>max_cost_assignment.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
# The contents of this file are in the public domain. See LICENSE_FOR_EXAMPLE_PROGRAMS.txt
#
#
# This simple example shows how to call dlib's optimal linear assignment problem solver.
# It is an implementation of the famous Hungarian algorithm and is quite fast, operating in
# O(N^3) time.
#
# COMPILING THE DLIB PYTHON INTERFACE
# Dlib comes with a compiled python interface for python 2.7 on MS Windows. If
# you are using another python version or operating system then you need to
# compile the dlib python interface before you can use this file. To do this,
# run compile_dlib_python_module.bat. This should work on any operating system
# so long as you have CMake and boost-python installed. On Ubuntu, this can be
# done easily by running the command: sudo apt-get install libboost-python-dev cmake
import dlib
# Lets imagine you need to assign N people to N jobs. Additionally, each person will make
# your company a certain amount of money at each job, but each person has different skills
# so they are better at some jobs and worse at others. You would like to find the best way
# to assign people to these jobs. In particular, you would like to maximize the amount of
# money the group makes as a whole. This is an example of an assignment problem and is
# what is solved by the dlib.max_cost_assignment() routine.
# So in this example, lets imagine we have 3 people and 3 jobs. We represent the amount of
# money each person will produce at each job with a cost matrix. Each row corresponds to a
# person and each column corresponds to a job. So for example, below we are saying that
# person 0 will make $1 at job 0, $2 at job 1, and $6 at job 2.
cost = dlib.matrix([[1, 2, 6],
[5, 3, 6],
[4, 5, 0]])
# To find out the best assignment of people to jobs we just need to call this function.
assignment = dlib.max_cost_assignment(cost)
# This prints optimal assignments: [2, 0, 1]
# which indicates that we should assign the person from the first row of the cost matrix to
# job 2, the middle row person to job 0, and the bottom row person to job 1.<|fim▁hole|>
# This prints optimal cost: 16.0
# which is correct since our optimal assignment is 6+5+5.
print "optimal cost: ", dlib.assignment_cost(cost, assignment)<|fim▁end|>
|
print "optimal assignments: ", assignment
|
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|>from . import font
from .indicator import Indicator, IndicatorOptions
from .airspeed import AirspeedIndicator<|fim▁hole|>from .altitude import AltitudeIndicator
from .attitude import AttitudeIndicator
from .compass import CompassIndicator
from .pfd import PFD
from .joystick import Joystick
from . import base_test<|fim▁end|>
| |
<|file_name|>DomainVerificationTwoTone.js<|end_file_name|><|fim▁begin|>"use strict";
var _interopRequireDefault = require("@babel/runtime/helpers/interopRequireDefault");
Object.defineProperty(exports, "__esModule", {<|fim▁hole|>});
exports.default = void 0;
var _createSvgIcon = _interopRequireDefault(require("./utils/createSvgIcon"));
var _jsxRuntime = require("react/jsx-runtime");
var _default = (0, _createSvgIcon.default)([/*#__PURE__*/(0, _jsxRuntime.jsx)("path", {
d: "M5 18h14V8H5v10zm3.82-6.42 2.12 2.12 4.24-4.24 1.41 1.41-5.66 5.66L7.4 13l1.42-1.42z",
opacity: ".3"
}, "0"), /*#__PURE__*/(0, _jsxRuntime.jsx)("path", {
d: "m16.6 10.88-1.42-1.42-4.24 4.25-2.12-2.13L7.4 13l3.54 3.54z"
}, "1"), /*#__PURE__*/(0, _jsxRuntime.jsx)("path", {
d: "M19 4H5c-1.11 0-2 .9-2 2v12c0 1.1.89 2 2 2h14c1.1 0 2-.9 2-2V6c0-1.1-.89-2-2-2zm0 14H5V8h14v10z"
}, "2")], 'DomainVerificationTwoTone');
exports.default = _default;<|fim▁end|>
|
value: true
|
<|file_name|>expanded_landing_page_view.pb.go<|end_file_name|><|fim▁begin|>// Code generated by protoc-gen-go. DO NOT EDIT.
// source: google/ads/googleads/v2/resources/expanded_landing_page_view.proto
package resources
import (
fmt "fmt"
math "math"
proto "github.com/golang/protobuf/proto"
wrappers "github.com/golang/protobuf/ptypes/wrappers"
_ "google.golang.org/genproto/googleapis/api/annotations"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file<|fim▁hole|>// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
// A landing page view with metrics aggregated at the expanded final URL
// level.
type ExpandedLandingPageView struct {
// The resource name of the expanded landing page view.
// Expanded landing page view resource names have the form:
//
// `customers/{customer_id}/expandedLandingPageViews/{expanded_final_url_fingerprint}`
ResourceName string `protobuf:"bytes,1,opt,name=resource_name,json=resourceName,proto3" json:"resource_name,omitempty"`
// The final URL that clicks are directed to.
ExpandedFinalUrl *wrappers.StringValue `protobuf:"bytes,2,opt,name=expanded_final_url,json=expandedFinalUrl,proto3" json:"expanded_final_url,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ExpandedLandingPageView) Reset() { *m = ExpandedLandingPageView{} }
func (m *ExpandedLandingPageView) String() string { return proto.CompactTextString(m) }
func (*ExpandedLandingPageView) ProtoMessage() {}
func (*ExpandedLandingPageView) Descriptor() ([]byte, []int) {
return fileDescriptor_f0d9f18d76cfc25b, []int{0}
}
func (m *ExpandedLandingPageView) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_ExpandedLandingPageView.Unmarshal(m, b)
}
func (m *ExpandedLandingPageView) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_ExpandedLandingPageView.Marshal(b, m, deterministic)
}
func (m *ExpandedLandingPageView) XXX_Merge(src proto.Message) {
xxx_messageInfo_ExpandedLandingPageView.Merge(m, src)
}
func (m *ExpandedLandingPageView) XXX_Size() int {
return xxx_messageInfo_ExpandedLandingPageView.Size(m)
}
func (m *ExpandedLandingPageView) XXX_DiscardUnknown() {
xxx_messageInfo_ExpandedLandingPageView.DiscardUnknown(m)
}
var xxx_messageInfo_ExpandedLandingPageView proto.InternalMessageInfo
func (m *ExpandedLandingPageView) GetResourceName() string {
if m != nil {
return m.ResourceName
}
return ""
}
func (m *ExpandedLandingPageView) GetExpandedFinalUrl() *wrappers.StringValue {
if m != nil {
return m.ExpandedFinalUrl
}
return nil
}
func init() {
proto.RegisterType((*ExpandedLandingPageView)(nil), "google.ads.googleads.v2.resources.ExpandedLandingPageView")
}
func init() {
proto.RegisterFile("google/ads/googleads/v2/resources/expanded_landing_page_view.proto", fileDescriptor_f0d9f18d76cfc25b)
}
var fileDescriptor_f0d9f18d76cfc25b = []byte{
// 342 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x91, 0xc1, 0x4a, 0xf3, 0x40,
0x14, 0x85, 0x49, 0x7e, 0xf8, 0xc1, 0xa8, 0x20, 0xd9, 0x58, 0x4a, 0x91, 0x56, 0x29, 0x74, 0x35,
0x81, 0xb8, 0x1b, 0x57, 0x29, 0x68, 0xa1, 0x88, 0x94, 0x8a, 0x59, 0x48, 0x20, 0xdc, 0x76, 0x6e,
0x87, 0x81, 0x74, 0x26, 0xcc, 0x24, 0xad, 0xaf, 0xa0, 0x8f, 0xe1, 0xd2, 0x47, 0xf1, 0x51, 0x7c,
0x0a, 0x49, 0x93, 0x99, 0x9d, 0xba, 0x3b, 0xcc, 0x9c, 0x73, 0xee, 0x77, 0xb9, 0xc1, 0x94, 0x2b,
0xc5, 0x0b, 0x8c, 0x80, 0x99, 0xa8, 0x95, 0x8d, 0xda, 0xc5, 0x91, 0x46, 0xa3, 0x6a, 0xbd, 0x46,
0x13, 0xe1, 0x4b, 0x09, 0x92, 0x21, 0xcb, 0x0b, 0x90, 0x4c, 0x48, 0x9e, 0x97, 0xc0, 0x31, 0xdf,
0x09, 0xdc, 0x93, 0x52, 0xab, 0x4a, 0x85, 0xa3, 0x36, 0x48, 0x80, 0x19, 0xe2, 0x3a, 0xc8, 0x2e,
0x26, 0xae, 0xa3, 0x7f, 0xd1, 0x8d, 0x39, 0x04, 0x56, 0xf5, 0x26, 0xda, 0x6b, 0x28, 0x4b, 0xd4,
0xa6, 0xad, 0xe8, 0x0f, 0x2c, 0x46, 0x29, 0x22, 0x90, 0x52, 0x55, 0x50, 0x09, 0x25, 0xbb, 0xdf,
0xcb, 0x37, 0x2f, 0x38, 0xbf, 0xed, 0x28, 0xee, 0x5b, 0x88, 0x05, 0x70, 0x4c, 0x05, 0xee, 0xc3,
0xab, 0xe0, 0xd4, 0x8e, 0xc9, 0x25, 0x6c, 0xb1, 0xe7, 0x0d, 0xbd, 0xc9, 0xd1, 0xf2, 0xc4, 0x3e,
0x3e, 0xc0, 0x16, 0xc3, 0x79, 0x10, 0xba, 0x2d, 0x36, 0x42, 0x42, 0x91, 0xd7, 0xba, 0xe8, 0xf9,
0x43, 0x6f, 0x72, 0x1c, 0x0f, 0x3a, 0x66, 0x62, 0xd9, 0xc8, 0x63, 0xa5, 0x85, 0xe4, 0x29, 0x14,
0x35, 0x2e, 0xcf, 0x6c, 0xee, 0xae, 0x89, 0x3d, 0xe9, 0x62, 0xfa, 0xea, 0x07, 0xe3, 0xb5, 0xda,
0x92, 0x3f, 0x97, 0x9e, 0x0e, 0x7e, 0x60, 0x5e, 0x34, 0x83, 0x16, 0xde, 0xf3, 0xbc, 0xab, 0xe0,
0xaa, 0x00, 0xc9, 0x89, 0xd2, 0x3c, 0xe2, 0x28, 0x0f, 0x18, 0xf6, 0x16, 0xa5, 0x30, 0xbf, 0x9c,
0xe6, 0xc6, 0xa9, 0x77, 0xff, 0xdf, 0x2c, 0x49, 0x3e, 0xfc, 0xd1, 0xac, 0xad, 0x4c, 0x98, 0x21,
0xad, 0x6c, 0x54, 0x1a, 0x93, 0xa5, 0x75, 0x7e, 0x5a, 0x4f, 0x96, 0x30, 0x93, 0x39, 0x4f, 0x96,
0xc6, 0x99, 0xf3, 0x7c, 0xf9, 0xe3, 0xf6, 0x83, 0xd2, 0x84, 0x19, 0x4a, 0x9d, 0x8b, 0xd2, 0x34,
0xa6, 0xd4, 0xf9, 0x56, 0xff, 0x0f, 0xb0, 0xd7, 0xdf, 0x01, 0x00, 0x00, 0xff, 0xff, 0x09, 0x2a,
0xe3, 0x32, 0x46, 0x02, 0x00, 0x00,
}<|fim▁end|>
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.