prompt
large_stringlengths 70
991k
| completion
large_stringlengths 0
1.02k
|
---|---|
<|file_name|>logger.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
from __future__ import unicode_literals
import time
import string
import json
import config
import helper
import busses
def log_message(msg):
#log format: time type message
time_str = str(time.time())
line = time_str[:time_str.find(".")]
line = line.rjust(10, str(" "))
line += " "
busses.status_bus["latest_messages"][msg.chat_id] = msg
msg_type = helper.get_message_type(msg)
if msg_type == "text" and msg.text.startswith("/"):
msg_type = "command"
appendix = "ERROR"
if msg_type == "text":
appendix = msg.text
elif msg_type == "command":
appendix = msg.text[1:]
elif msg_type == "location":
location_data = msg.location.to_dict()
appendix = str(location_data["latitude"]) + "°, " + str(location_data["longitude"]) + "°"
elif msg_type == "contact":
appendix = str(msg.contact.user_id) + " " + msg.contact.first_name + " " + msg.contact.last_name
elif msg_type == "new_user":
appendix = str(msg.new_chat_member.id) + " " + str(msg.new_chat_member.first_name) + " " + str(msg.new_chat_member.last_name)
elif msg_type in ["audio", "document", "game", "photo", "sticker", "video", "voice", "video_note", "unknown"]:
appendix = ""
msg_type = msg_type.rjust(10, str(" "))<|fim▁hole|> line += msg_type + " " + appendix + " "
line += str(msg.chat_id) + "," + str(msg.message_id)
line += "\n"
with open(config.msg_log_file_path, "a") as log_file:
log_file.write(line.encode("utf-8"))
def complete_log(update):
with open(config.complete_log_file_path, "a") as log_file:
data = update.to_dict()
data.update({"time": time.time()})
json_data = json.dumps(data)
log_file.write(str(json_data).replace("\n", "\\n") + "\n".encode("utf-8"))<|fim▁end|> | appendix = appendix.replace("\n", "\\n").rjust(40, str(" ")) |
<|file_name|>cdgdec.rs<|end_file_name|><|fim▁begin|>// Copyright (C) 2019 Guillaume Desmottes <[email protected]>
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use gst::prelude::*;
use std::path::PathBuf;
fn init() {
use std::sync::Once;
static INIT: Once = Once::new();
INIT.call_once(|| {
gst::init().unwrap();
gstcdg::plugin_register_static().expect("cdgdec tests");
});
}
#[test]
fn test_cdgdec() {
init();
let pipeline = gst::Pipeline::new(Some("cdgdec-test"));
let input_path = {
let mut r = PathBuf::new();
r.push(env!("CARGO_MANIFEST_DIR"));
r.push("tests");
r.push("BrotherJohn");
r.set_extension("cdg");
r
};
// Ensure we are in push mode so 'blocksize' prop is used
let filesrc = gst::ElementFactory::make("pushfilesrc", None).unwrap();
filesrc
.set_property("location", &input_path.to_str().unwrap())
.expect("failed to set 'location' property");
{
let child_proxy = filesrc.dynamic_cast_ref::<gst::ChildProxy>().unwrap();
child_proxy
.set_child_property("real-filesrc::num-buffers", &1)
.expect("failed to set 'num-buffers' property");
let blocksize: u32 = 24; // One CDG instruction
child_proxy
.set_child_property("real-filesrc::blocksize", &blocksize)
.expect("failed to set 'blocksize' property");
}
let parse = gst::ElementFactory::make("cdgparse", None).unwrap();
let dec = gst::ElementFactory::make("cdgdec", None).unwrap();
let sink = gst::ElementFactory::make("appsink", None).unwrap();
pipeline
.add_many(&[&filesrc, &parse, &dec, &sink])
.expect("failed to add elements to the pipeline");
gst::Element::link_many(&[&filesrc, &parse, &dec, &sink]).expect("failed to link the elements");
let sink = sink.downcast::<gst_app::AppSink>().unwrap();
sink.set_callbacks(
gst_app::AppSinkCallbacks::builder()
// Add a handler to the "new-sample" signal.
.new_sample(move |appsink| {<|fim▁hole|> let map = buffer.map_readable().map_err(|_| gst::FlowError::Error)?;
// First frame fully blue
map.as_slice()
.chunks_exact(4)
.for_each(|p| assert_eq!(p, [0, 0, 136, 255]));
Ok(gst::FlowSuccess::Ok)
})
.build(),
);
pipeline
.set_state(gst::State::Playing)
.expect("Unable to set the pipeline to the `Playing` state");
let bus = pipeline.get_bus().unwrap();
for msg in bus.iter_timed(gst::CLOCK_TIME_NONE) {
use gst::MessageView;
match msg.view() {
MessageView::Error(err) => {
eprintln!(
"Error received from element {:?}: {}",
err.get_src().map(|s| s.get_path_string()),
err.get_error()
);
eprintln!("Debugging information: {:?}", err.get_debug());
unreachable!();
}
MessageView::Eos(..) => break,
_ => (),
}
}
pipeline
.set_state(gst::State::Null)
.expect("Unable to set the pipeline to the `Null` state");
}<|fim▁end|> | // Pull the sample in question out of the appsink's buffer.
let sample = appsink.pull_sample().map_err(|_| gst::FlowError::Eos)?;
let buffer = sample.get_buffer().ok_or(gst::FlowError::Error)?; |
<|file_name|>AnnotateStatsProcCtx.java<|end_file_name|><|fim▁begin|>/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hive.ql.optimizer.stats.annotation;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx;
import org.apache.hadoop.hive.ql.parse.ParseContext;
import org.apache.hadoop.hive.ql.plan.Statistics;
public class AnnotateStatsProcCtx implements NodeProcessorCtx {
private ParseContext pctx;
private HiveConf conf;
private Statistics andExprStats = null;
public AnnotateStatsProcCtx(ParseContext pctx) {
this.setParseContext(pctx);
if(pctx != null) {
this.setConf(pctx.getConf());
} else {
this.setConf(null);
}
}
public HiveConf getConf() {<|fim▁hole|> return conf;
}
public void setConf(HiveConf conf) {
this.conf = conf;
}
public ParseContext getParseContext() {
return pctx;
}
public void setParseContext(ParseContext pctx) {
this.pctx = pctx;
}
public Statistics getAndExprStats() {
return andExprStats;
}
public void setAndExprStats(Statistics andExprStats) {
this.andExprStats = andExprStats;
}
}<|fim▁end|> | |
<|file_name|>CheckedCastBrJumpThreading.cpp<|end_file_name|><|fim▁begin|>#define DEBUG_TYPE "sil-simplify-cfg"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include "swift/SIL/SILInstruction.h"
#include "swift/SILOptimizer/Analysis/DominanceAnalysis.h"
#include "swift/SILOptimizer/Utils/CFG.h"
#include "swift/SILOptimizer/Utils/Local.h"
#include "swift/SILOptimizer/Utils/SILInliner.h"
using namespace swift;
namespace {
/// This is a class implementing a dominator-based jump-threading
/// for checked_cast_br [exact].
class CheckedCastBrJumpThreading {
// The checked_cast_br instruction, which
// we try to jump-thread
CheckedCastBranchInst *CCBI;
// Basic block of the current checked_cast_br instruction.
SILBasicBlock *BB;
// Condition used by the current checked_cast_br instruction.
SILValue Condition;
// Success branch of the current checked_cast_br instruction.
SILBasicBlock *SuccessBB;
// Failure branch of the current checked_cast_br instruction.
SILBasicBlock *FailureBB;
// Current dominating checked_cast_br instruction.
CheckedCastBranchInst *DomCCBI;
// Basic block of the dominating checked_cast_br instruction.
SILBasicBlock *DomBB;
// Condition used by the dominating checked_cast_br instruction.
SILValue DomCondition;
// Success branch of the dominating checked_cast_br instruction.
SILBasicBlock *DomSuccessBB;
// Failure branch of the dominating checked_cast_br instruction.
SILBasicBlock *DomFailureBB;
// Current dominator tree node where we look for a dominating
// checked_cast_br instruction.
llvm::DomTreeNodeBase<SILBasicBlock> *Node;
SILBasicBlock *ArgBB;
// Dominator information to be used.
DominanceInfo *DT;
// Basic block created as a landing BB for all failure predecessors.
SILBasicBlock *TargetFailureBB;
// Basic block created as a landing BB for all success predecessors.
SILBasicBlock *TargetSuccessBB;
// Cloner used to clone the BB to FailureSuccessBB.
Optional<BasicBlockCloner> FailureBBCloner;
// Cloner used to clone the BB to TargetSuccessBB.
Optional<BasicBlockCloner> SuccessBBCloner;
// Predecessors reached only via a path along the
// success branch of the dominating checked_cast_br.
SmallVector<SILBasicBlock *, 8> SuccessPreds;
// Predecessors reached only via a path along the
// failure branch of the dominating checked_cast_br.
SmallVector<SILBasicBlock *, 8> FailurePreds;
// All other predecessors, where the outcome of the
// checked_cast_br along the path is not known.
SmallVector<SILBasicBlock *, 8> UnknownPreds;
// Basic blocks to be added to for reprocessing
// after jump-threading is done.
SmallVectorImpl<SILBasicBlock *> &BlocksForWorklist;
bool areEquivalentConditionsAlongPaths();
bool areEquivalentConditionsAlongSomePaths();
bool handleArgBBIsEntryBlock(SILBasicBlock *ArgBB);
bool checkCloningConstraints();
void modifyCFGForUnknownPreds();
void modifyCFGForFailurePreds();
void modifyCFGForSuccessPreds();<|fim▁hole|> void updateDominatorTree();
void updateSSA();
void addBlockToSimplifyCFGWorklist(SILBasicBlock *BB);
void addBlocksToWorklist();
void classifyPredecessor(
SILBasicBlock *Pred,
SmallVectorImpl<SILBasicBlock *> &SuccessPreds,
SmallVectorImpl<SILBasicBlock *> &FailurePreds,
SmallVectorImpl<SILBasicBlock *> &UnknownPreds,
bool SuccessDominates,
bool FailureDominates);
SILValue isArgValueEquivalentToCondition(SILValue Value,
SILBasicBlock *DomBB,
SILValue DomValue,
DominanceInfo *DT);
public:
CheckedCastBrJumpThreading(DominanceInfo *DT,
SmallVectorImpl<SILBasicBlock *> &BBs)
: DT(DT), BlocksForWorklist(BBs) { }
bool trySimplify(TermInst *Term);
ArrayRef<SILBasicBlock*> getBlocksForWorklist() {
return BlocksForWorklist;
}
};
} // end anonymous namespace
/// Find a nearest common dominator for a given set of basic blocks.
static DominanceInfoNode *findCommonDominator(ArrayRef<SILBasicBlock *> BBs,
DominanceInfo *DT) {
DominanceInfoNode *CommonDom = nullptr;
for (auto *BB : BBs) {
if (!CommonDom) {
CommonDom = DT->getNode(BB);
} else {
CommonDom = DT->getNode(
DT->findNearestCommonDominator(CommonDom->getBlock(), BB));
}
}
return CommonDom;
}
/// Find a nearest common dominator for all predecessors of
/// a given basic block.
static DominanceInfoNode *findCommonDominator(SILBasicBlock *BB,
DominanceInfo *DT) {
SmallVector<SILBasicBlock *, 8> Preds;
for (auto *Pred: BB->getPreds())
Preds.push_back(Pred);
return findCommonDominator(Preds, DT);
}
/// Estimate the cost of inlining a given basic block.
static unsigned basicBlockInlineCost(SILBasicBlock *BB, unsigned Cutoff) {
unsigned Cost = 0;
for (auto &I : *BB) {
auto ICost = instructionInlineCost(I);
Cost += unsigned(ICost);
if (Cost > Cutoff)
return Cost;
}
return Cost;
}
/// We cannot duplicate blocks with AllocStack instructions (they need to be
/// FIFO). Other instructions can be duplicated.
static bool canDuplicateBlock(SILBasicBlock *BB) {
for (auto &I : *BB) {
if (!I.isTriviallyDuplicatable())
return false;
}
return true;
}
void CheckedCastBrJumpThreading::addBlockToSimplifyCFGWorklist(SILBasicBlock *BB) {
BlocksForWorklist.push_back(BB);
}
/// Add affected blocks for re-processing by simplifyCFG
void CheckedCastBrJumpThreading::addBlocksToWorklist() {
if (TargetFailureBB) {
if (!TargetFailureBB->pred_empty())
addBlockToSimplifyCFGWorklist(TargetFailureBB);
}
if (TargetSuccessBB) {
if (!TargetSuccessBB->pred_empty())
addBlockToSimplifyCFGWorklist(TargetSuccessBB);
}
if (!BB->pred_empty())
addBlockToSimplifyCFGWorklist(BB);
}
/// Classify a predecessor of a BB containing checked_cast_br as being
/// reachable via success or failure branches of a dominating checked_cast_br
/// or as unknown if it can be reached via success or failure branches
/// at the same time.
void CheckedCastBrJumpThreading::classifyPredecessor(
SILBasicBlock *Pred,
SmallVectorImpl<SILBasicBlock *> &SuccessPreds,
SmallVectorImpl<SILBasicBlock *> &FailurePreds,
SmallVectorImpl<SILBasicBlock *> &UnknownPreds,
bool SuccessDominates,
bool FailureDominates) {
if (SuccessDominates && FailureDominates) {
UnknownPreds.push_back(Pred);
return;
}
if (SuccessDominates) {
SuccessPreds.push_back(Pred);
return;
}
if (FailureDominates) {
FailurePreds.push_back(Pred);
return;
}
UnknownPreds.push_back(Pred);
}
/// Check if the root value for Value that comes
/// along the path from DomBB is equivalent to the
/// DomCondition.
SILValue CheckedCastBrJumpThreading::isArgValueEquivalentToCondition(
SILValue Value, SILBasicBlock *DomBB, SILValue DomValue,
DominanceInfo *DT) {
SmallPtrSet<ValueBase *, 16> SeenValues;
DomValue = DomValue.stripClassCasts();
while (true) {
Value = Value.stripClassCasts();
if (Value == DomValue)
return Value;
// We know how to propagate through BBArgs only.
auto *V = dyn_cast<SILArgument>(Value);
if (!V)
return SILValue();
// Have we visited this BB already?
if (!SeenValues.insert(Value.getDef()).second)
return SILValue();
if (SeenValues.size() > 10)
return SILValue();
SmallVector<SILValue, 4> IncomingValues;
if (!V->getIncomingValues(IncomingValues) || IncomingValues.empty())
return SILValue();
ValueBase *Def = nullptr;
for (auto IncomingValue : IncomingValues) {
// Each incoming value should be either from a block
// dominated by DomBB or it should be the value used in
// condition in DomBB
Value = IncomingValue.stripClassCasts();
if (Value == DomValue)
continue;
// Values should be the same
if (!Def)
Def = Value.getDef();
if (Def != Value.getDef())
return SILValue();
if (!DT->dominates(DomBB, Value.getDef()->getParentBB()))
return SILValue();
// OK, this value is a potential candidate
}
Value = IncomingValues[0];
}
}
/// Update the SSA form after all changes.
void CheckedCastBrJumpThreading::updateSSA() {
assert(!(SuccessBBCloner.hasValue() && FailureBBCloner.hasValue()) &&
"Both cloners cannot be used at the same time yet");
// Now update the SSA form.
if (!FailurePreds.empty() && FailureBBCloner.hasValue() &&
!SuccessBBCloner.hasValue())
updateSSAAfterCloning(*FailureBBCloner.getPointer(), TargetFailureBB, BB);
if (SuccessBBCloner.hasValue() && !FailureBBCloner.hasValue()) {
updateSSAAfterCloning(*SuccessBBCloner.getPointer(), TargetSuccessBB, BB);
}
}
/// Update the SSA form after all changes.
void CheckedCastBrJumpThreading::updateDominatorTree() {
// Update the dominator tree.
// If BB was IDom of something, then PredCBBI becomes the IDOM
// of this after jump-threading.
auto *BBDomNode = DT->getNode(BB);
auto &Children = BBDomNode->getChildren();
if (Children.size() > 1) {
SmallVector<DominanceInfoNode *, 16> ChildrenCopy;
std::copy(Children.begin(), Children.end(),
std::back_inserter(ChildrenCopy));
for (auto *Child : ChildrenCopy) {
DT->changeImmediateDominator(Child, Node);
}
}
DominanceInfoNode *CommonDom;
// Find a common dominator for all unknown preds.
if (!UnknownPreds.empty()) {
// Find a new IDom for FailureBB
CommonDom = findCommonDominator(FailureBB, DT);
if (CommonDom)
DT->changeImmediateDominator(FailureBB, CommonDom->getBlock());
CommonDom = findCommonDominator(UnknownPreds, DT);
// This common dominator dominates the BB now.
if (CommonDom) {
DT->changeImmediateDominator(BB, CommonDom->getBlock());
}
}
// Find a common dominator for all failure preds.
CommonDom = findCommonDominator(FailurePreds, DT);
// This common dominator dominates the TargetFailureBB now.
if (CommonDom) {
DT->addNewBlock(TargetFailureBB, CommonDom->getBlock());
// Find a new IDom for FailureBB
CommonDom = findCommonDominator(FailureBB, DT);
if (CommonDom)
DT->changeImmediateDominator(FailureBB, CommonDom->getBlock());
}
// Find a common dominator for all success preds.
CommonDom = findCommonDominator(SuccessPreds, DT);
// This common dominator of all success preds dominates the BB now.
if (CommonDom) {
if (TargetSuccessBB) {
DT->addNewBlock(TargetSuccessBB, CommonDom->getBlock());
} else {
DT->changeImmediateDominator(BB, CommonDom->getBlock());
}
CommonDom = findCommonDominator(SuccessBB, DT);
if (CommonDom)
DT->changeImmediateDominator(SuccessBB, CommonDom->getBlock());
}
// End of dominator tree update.
}
void CheckedCastBrJumpThreading::modifyCFGForUnknownPreds() {
if (UnknownPreds.empty())
return;
// Check the FailureBB if it is a BB that contains a class_method
// referring to the same value as a condition. This pattern is typical
// for method chaining code like obj.method1().method2().etc()
SILInstruction *Inst = &*FailureBB->begin();
if (ClassMethodInst *CMI = dyn_cast<ClassMethodInst>(Inst)) {
if (CMI->getOperand() == Condition) {
// Replace checked_cast_br by branch to FailureBB.
SILBuilder(BB).createBranch(CCBI->getLoc(), FailureBB);
CCBI->eraseFromParent();
}
}
}
/// Create a copy of the BB as a landing BB
/// for all FailurePreds.
void CheckedCastBrJumpThreading::modifyCFGForFailurePreds() {
if (FailurePreds.empty())
return;
FailureBBCloner.emplace(BasicBlockCloner(BB));
FailureBBCloner->clone();
TargetFailureBB = FailureBBCloner->getDestBB();
auto *TI = TargetFailureBB->getTerminator();
SILBuilderWithScope Builder(TI);
// This BB copy branches to a FailureBB.
Builder.createBranch(TI->getLoc(), FailureBB);
TI->eraseFromParent();
// Redirect all FailurePreds to the copy of BB.
for (auto *Pred : FailurePreds) {
TermInst *TI = Pred->getTerminator();
// Replace branch to BB by branch to TargetFailureBB.
replaceBranchTarget(TI, BB, TargetFailureBB, /*PreserveArgs=*/true);
Pred = nullptr;
}
}
/// Create a copy of the BB or reuse BB as
/// a landing basic block for all FailurePreds.
void CheckedCastBrJumpThreading::modifyCFGForSuccessPreds() {
if (!UnknownPreds.empty()) {
if (!SuccessPreds.empty()) {
// Create a copy of the BB as a landing BB.
// for all SuccessPreds.
SuccessBBCloner.emplace(BasicBlockCloner(BB));
SuccessBBCloner->clone();
TargetSuccessBB = SuccessBBCloner->getDestBB();
auto *TI = TargetSuccessBB->getTerminator();
SILBuilderWithScope Builder(TI);
SmallVector<SILValue, 8> SuccessBBArgs;
// Take argument value from the dominating BB.
SuccessBBArgs.push_back(DomSuccessBB->getBBArg(0));
// This BB copy branches to SuccessBB.
Builder.createBranch(TI->getLoc(), SuccessBB, SuccessBBArgs);
TI->eraseFromParent();
// Redirect all SuccessPreds to the copy of BB.
for (auto *Pred : SuccessPreds) {
TermInst *TI = Pred->getTerminator();
// Replace branch to BB by branch to TargetSuccessBB.
replaceBranchTarget(TI, BB, TargetSuccessBB, /*PreserveArgs=*/true);
SuccessBBArgs.push_back(DomSuccessBB->getBBArg(0));
Pred = nullptr;
}
}
} else {
// There are no predecessors where it is not clear
// if they are dominated by a success or failure branch
// of DomBB. Therefore, there is no need to clone
// the BB for SuccessPreds. Current BB can be re-used
// instead as their target.
// Add an unconditional jump at the end of the block.
SmallVector<SILValue, 1> SuccessBBArgs;
// Take argument value from the dominating BB
SuccessBBArgs.push_back(DomSuccessBB->getBBArg(0));
SILBuilder(BB).createBranch(CCBI->getLoc(), SuccessBB, SuccessBBArgs);
CCBI->eraseFromParent();
}
}
/// Handle a special case, where ArgBB is the entry block.
bool CheckedCastBrJumpThreading::handleArgBBIsEntryBlock(SILBasicBlock *ArgBB) {
if (ArgBB->getPreds().begin() == ArgBB->getPreds().end()) {
// It must be the entry block
// See if it is reached over Success or Failure path.
bool SuccessDominates = DomSuccessBB == BB;
bool FailureDominates = DomFailureBB == BB;
classifyPredecessor(ArgBB, SuccessPreds, FailurePreds, UnknownPreds,
SuccessDominates, FailureDominates);
return true;
}
return false;
}
// Returns false if cloning required by jump threading cannot
// be performed, because some of the constraints are violated.
bool CheckedCastBrJumpThreading::checkCloningConstraints() {
// Check some cloning related constraints.
// If this argument from a different BB, then jump-threading
// may require too much code duplication.
if (ArgBB && ArgBB != BB)
return false;
// Bail out if current BB cannot be duplicated.
if (!canDuplicateBlock(BB))
return false;
// Check if code-bloat would be too big when this BB
// is jump-threaded.
// TODO: Make InlineCostCutoff parameter configurable?
// Dec 1, 2014:
// We looked at the inline costs of BBs from our benchmark suite
// and found that currently the highest inline cost for the
// whole benchmark suite is 12. In 95% of all cases it is <=3.
const unsigned InlineCostCutoff = 20;
if (basicBlockInlineCost(BB, InlineCostCutoff) >= InlineCostCutoff)
return false;
return true;
}
/// If conditions are not equivalent along all paths, try harder
/// to check if they are actually equivalent along a subset of paths.
/// To do it, try to back-propagate the Condition
/// backwards and see if it is actually equivalent to DomCondition.
/// along some of the paths.
bool CheckedCastBrJumpThreading::areEquivalentConditionsAlongSomePaths() {
auto *Arg = dyn_cast<SILArgument>(Condition);
if (!Arg)
return false;
ArgBB = Arg->getParent();
if (!DT->dominates(DomBB, ArgBB))
return false;
// Incoming values for the BBArg.
SmallVector<SILValue, 4> IncomingValues;
if (ArgBB != ArgBB->getParent()->begin() &&
(!Arg->getIncomingValues(IncomingValues) || IncomingValues.empty()))
return false;
// Check for each predecessor, if the incoming value coming from it
// is equivalent to the DomCondition. If this is the case, it is
// possible to try jump-threading along this path.
if (!handleArgBBIsEntryBlock(ArgBB)) {
// ArgBB is not the entry block and has predecessors.
unsigned idx = 0;
for (auto *PredBB : ArgBB->getPreds()) {
auto IncomingValue = IncomingValues[idx];
SILValue ReachingValue = isArgValueEquivalentToCondition(
IncomingValue, DomBB, DomCondition, DT);
if (ReachingValue == SILValue()) {
UnknownPreds.push_back(PredBB);
idx++;
continue;
}
// Condition is the same if BB is reached over a pass through Pred.
DEBUG(llvm::dbgs() << "Condition is the same if reached over ");
DEBUG(PredBB->print(llvm::dbgs()));
// See if it is reached over Success or Failure path.
bool SuccessDominates = DT->dominates(DomSuccessBB, PredBB) ||
DT->dominates(DomSuccessBB, BB) ||
DomSuccessBB == BB;
bool FailureDominates = DT->dominates(DomFailureBB, PredBB) ||
DT->dominates(DomFailureBB, BB) ||
DomFailureBB == BB;
classifyPredecessor(
PredBB, SuccessPreds, FailurePreds, UnknownPreds,
SuccessDominates, FailureDominates);
idx++;
}
} else {
// ArgBB is the entry block. Check that conditions are the equivalent in this
// case as well.
if (!isArgValueEquivalentToCondition(Condition, DomBB, DomCondition, DT))
return false;
}
// At this point we know for each predecessor of ArgBB if its reached
// over the success, failure or unknown path from DomBB.
// Now we can generate a new BB for preds reaching BB over the success
// path and a new BB for preds reaching BB over the failure path.
// Then we redirect those preds to those new basic blocks.
return true;
}
/// Check if conditions of CCBI and DomCCBI are equivalent along
/// all or at least some paths.
bool CheckedCastBrJumpThreading::areEquivalentConditionsAlongPaths() {
// Are conditions equivalent along all paths?
if (DomCondition == Condition) {
// Conditions are exactly the same, without any restrictions.
// They are equivalent along all paths.
// Figure out for each predecessor which branch of
// the dominating checked_cast_br is used to reach it.
for (auto *PredBB : BB->getPreds()) {
// All predecessors should either unconditionally branch
// to the current BB or be another checked_cast_br instruction.
if (!dyn_cast<CheckedCastBranchInst>(PredBB->getTerminator()) &&
!dyn_cast<BranchInst>(PredBB->getTerminator()))
return false;
bool SuccessDominates =
DT->dominates(DomSuccessBB, PredBB) || DomSuccessBB == BB;
bool FailureDominates =
DT->dominates(DomFailureBB, PredBB) || DomFailureBB == BB;
classifyPredecessor(PredBB, SuccessPreds, FailurePreds, UnknownPreds,
SuccessDominates, FailureDominates);
}
return true;
}
// Check if conditions are equivalent along a subset of reaching paths.
return areEquivalentConditionsAlongSomePaths();
}
/// Try performing a dominator-based jump-threading for
/// checked_cast_br instructions.
bool CheckedCastBrJumpThreading::trySimplify(TermInst *Term) {
CCBI = cast<CheckedCastBranchInst>(Term);
if (!CCBI)
return false;
// Init information about the checked_cast_br we try to
// jump-thread.
BB = Term->getParent();
Condition = Term->getOperand(0).stripClassCasts();
SuccessBB = CCBI->getSuccessBB();
FailureBB = CCBI->getFailureBB();
// Find a dominating checked_cast_br, which performs the same check.
for (Node = DT->getNode(BB)->getIDom(); Node; Node = Node->getIDom()) {
// Get current dominating block.
DomBB = Node->getBlock();
auto *DomTerm = DomBB->getTerminator();
if (!DomTerm->getNumOperands())
continue;
// Check that it is a dominating checked_cast_br.
DomCCBI = dyn_cast<CheckedCastBranchInst>(DomTerm);
if (!DomCCBI)
continue;
// We need to verify that the result type is the same in the
// dominating checked_cast_br, but only for non-exact casts.
// For exact casts, we are interested only in the
// fact that the source operand is the same for
// both instructions.
if (!CCBI->isExact() && !DomCCBI->isExact()) {
if (DomCCBI->getCastType() != CCBI->getCastType())
continue;
}
// Conservatively check that both checked_cast_br instructions
// are either exact or non-exact. This is very conservative,
// but safe.
//
// TODO:
// If the dominating checked_cast_br is non-exact, then
// it is in general not safe to assume that current exact cast
// would have the same outcome. But if the dominating non-exact
// checked_cast_br fails, then the current exact cast would
// always fail as well.
//
// If the dominating checked_cast_br is exact then then
// it is in general not safe to assume that the current non-exact
// cast would have the same outcome. But if the dominating exact
// checked_cast_br succeeds, then the current non-exact cast
// would always succeed as well.
//
// TODO: In some specific cases, it is possible to prove that
// success or failure of the dominating cast is equivalent to
// the success or failure of the current cast, even if one
// of them is exact and the other not. This is the case
// e.g. if the class has no subclasses.
if (DomCCBI->isExact() != CCBI->isExact())
continue;
// Initialize state variables for the current round of checks
// based on the found dominating checked_cast_br.
DomSuccessBB = DomCCBI->getSuccessBB();
DomFailureBB = DomCCBI->getFailureBB();
DomCondition = DomTerm->getOperand(0).stripClassCasts();
// Init state variables for paths analysis
SuccessPreds.clear();
FailurePreds.clear();
UnknownPreds.clear();
ArgBB = nullptr;
// Init state variables for jump-threading transformation.
TargetFailureBB = nullptr;
TargetSuccessBB = nullptr;
// Are conditions of CCBI and DomCCBI equivalent along (some) paths?
// If this is the case, classify all incoming paths into SuccessPreds,
// FailurePreds or UnknownPreds depending on how they reach CCBI.
if (!areEquivalentConditionsAlongPaths())
continue;
// Check if any jump-threading is required and possible.
if (SuccessPreds.empty() && FailurePreds.empty())
return false;
// If this check is reachable via success, failure and unknown
// at the same time, then we don't know the outcome of the
// dominating check. No jump-threading is possible in this case.
if (!SuccessPreds.empty() && !FailurePreds.empty() &&
!UnknownPreds.empty()) {
return false;
}
unsigned TotalPreds =
SuccessPreds.size() + FailurePreds.size() + UnknownPreds.size();
// We only need to clone the BB if not all of its
// predecessors are in the same group.
if (TotalPreds != SuccessPreds.size() &&
TotalPreds != UnknownPreds.size()) {
// Check some cloning related constraints.
if (!checkCloningConstraints())
return false;
}
bool InvertSuccess = false;
if (DomCCBI->isExact() && CCBI->isExact() &&
DomCCBI->getCastType() != CCBI->getCastType()) {
if (TotalPreds == SuccessPreds.size()) {
// The dominating exact cast was successful, but it casted to a
// different type. Therefore, the current cast fails for sure.
// Since we are going to change the BB,
// add its successors and predecessors
// for re-processing.
InvertSuccess = true;
} else {
// Otherwise, we don't know if the current cast will succeed or
// fail.
return false;
}
}
// If we have predecessors, where it is not known if they are reached over
// success or failure path, we cannot eliminate a checked_cast_br.
// We have to generate new dedicated BBs as landing BBs for all
// FailurePreds and all SuccessPreds.
// Since we are going to change the BB,
// add its successors and predecessors
// for re-processing.
for (auto *B : BB->getPreds()) {
addBlockToSimplifyCFGWorklist(B);
}
for (auto *B : BB->getSuccessorBlocks()) {
addBlockToSimplifyCFGWorklist(B);
}
// Create a copy of the BB as a landing BB
// for all FailurePreds.
modifyCFGForFailurePreds();
if (InvertSuccess) {
SILBuilder(BB).createBranch(CCBI->getLoc(), FailureBB);
CCBI->eraseFromParent();
SuccessPreds.clear();
} else {
// Create a copy of the BB or reuse BB as
// a landing basic block for all SuccessPreds.
modifyCFGForSuccessPreds();
}
// Handle unknown preds.
modifyCFGForUnknownPreds();
// Update the dominator tree after all changes.
updateDominatorTree();
// Update the SSA form after all changes.
updateSSA();
// Since a few BBs were changed now, add them for re-processing.
addBlocksToWorklist();
return true;
}
// Jump-threading was not possible.
return false;
}
namespace swift {
bool tryCheckedCastBrJumpThreading(TermInst *Term, DominanceInfo *DT,
SmallVectorImpl<SILBasicBlock *> &BBs) {
CheckedCastBrJumpThreading CCBJumpThreading(DT, BBs);
return CCBJumpThreading.trySimplify(Term);
}
} // end namespace swift<|fim▁end|> | |
<|file_name|>conf.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
#
# romeo documentation build configuration file, created by
# sphinx-quickstart on Sat Dec 19 15:51:40 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
import sphinx_rtd_theme
import alabaster
import subprocess
read_the_docs_build = os.environ.get('READTHEDOCS', None) == 'True'
print os.getcwd()
if read_the_docs_build:
subprocess.call('doxygen doxygen.cfg', shell=True)
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
breathe_projects = {
"romeo":"xml/",
}
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ["breathe", "alabaster"]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'romeo'
copyright = u'2015, erik'
author = u'erik'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'0.1'
# The full version, including alpha/beta/rc tags.
release = u'0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {'collapsiblesidebar': True}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
#html_theme_path = [alabaster.get_path()]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
html_sidebars = {
'**': [
'about.html',
'navigation.html',
'relations.html',
'searchbox.html',
'donate.html',
]
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'romeodoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').<|fim▁hole|># Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'romeo.tex', u'romeo Documentation',
u'erik', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'romeo', u'romeo Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'romeo', u'romeo Documentation',
author, 'romeo', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False<|fim▁end|> | #'pointsize': '10pt',
|
<|file_name|>coord.js<|end_file_name|><|fim▁begin|>PUMP_SELECTOR.CoordSlicer = function( parameters ) {
var image = parameters.image;
var coord = parameters.coord;
var name = parameters.name;
var obj = {};
if( image === undefined) {
//PUMPER.debug("PUMPER::CoordSlicer() - Cannot slice image, image missing.");
return undefined;
}else if (coord === undefined) {
//PUMPER.debug("PUMPER::CoordSlicer() - Warn: No coordinate data given. Returning image.");
return obj[name] = obj;
}else{
var coords = coord.split("\n");
coords.clean("");<|fim▁hole|> for(var i=0;i<coords.length;i++) {
var coordinate = coords[i].split(" ");
obj[coordinate[0]] = PUMP_SELECTOR.CropImage(image,coordinate[1],coordinate[2],coordinate[3],coordinate[4]);
}
return obj;
}
};<|fim▁end|> | |
<|file_name|>bitcoin_et.ts<|end_file_name|><|fim▁begin|><?xml version="1.0" ?><!DOCTYPE TS><TS language="et" version="2.1">
<context>
<name>AboutDialog</name>
<message>
<location filename="../forms/aboutdialog.ui" line="+14"/>
<source>About Mercury</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+39"/>
<source><b>Mercury</b> version</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+41"/>
<source>Copyright © 2009-2014 The Bitcoin developers
Copyright © 2012-2014 The NovaCoin developers
Copyright © 2014 The Mercury developers</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+15"/>
<source>
This is experimental software.
Distributed under the MIT/X11 software license, see the accompanying file COPYING or http://www.opensource.org/licenses/mit-license.php.
This product includes software developed by the OpenSSL Project for use in the OpenSSL Toolkit (http://www.openssl.org/) and cryptographic software written by Eric Young ([email protected]) and UPnP software written by Thomas Bernard.</source>
<translation>⏎
See on eksperimentaalne tarkvara.⏎
⏎
Levitatud MIT/X11 tarkvara litsentsi all, vaata kaasasolevat faili COPYING või http://www.opensource.org/licenses/mit-license.php⏎
⏎
Toode sisaldab OpenSSL Projekti all toodetud tarkvara, mis on kasutamiseks OpenSSL Toolkitis (http://www.openssl.org/) ja Eric Young'i poolt loodud krüptograafilist tarkvara ([email protected]) ning Thomas Bernard'i loodud UPnP tarkvara.</translation>
</message>
</context>
<context>
<name>AddressBookPage</name>
<message>
<location filename="../forms/addressbookpage.ui" line="+14"/>
<source>Address Book</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+22"/>
<source>Double-click to edit address or label</source>
<translation>Topeltklõps aadressi või märgise muutmiseks</translation>
</message>
<message>
<location line="+27"/>
<source>Create a new address</source>
<translation>Loo uus aadress</translation>
</message>
<message>
<location line="+14"/>
<source>Copy the currently selected address to the system clipboard</source>
<translation>Kopeeri märgistatud aadress vahemällu</translation>
</message>
<message>
<location line="-11"/>
<source>&New Address</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-46"/>
<source>These are your Mercury addresses for receiving payments. You may want to give a different one to each sender so you can keep track of who is paying you.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+60"/>
<source>&Copy Address</source>
<translation>&Aadressi kopeerimine</translation>
</message>
<message>
<location line="+11"/>
<source>Show &QR Code</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+11"/>
<source>Sign a message to prove you own a Mercury address</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Sign &Message</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+25"/>
<source>Delete the currently selected address from the list</source>
<translation>Kustuta märgistatud aadress loetelust</translation>
</message>
<message>
<location line="-14"/>
<source>Verify a message to ensure it was signed with a specified Mercury address</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>&Verify Message</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+14"/>
<source>&Delete</source>
<translation>&Kustuta</translation>
</message>
<message>
<location filename="../addressbookpage.cpp" line="+65"/>
<source>Copy &Label</source>
<translation>&Märgise kopeerimine</translation>
</message>
<message>
<location line="+2"/>
<source>&Edit</source>
<translation>&Muuda</translation>
</message>
<message>
<location line="+250"/>
<source>Export Address Book Data</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Comma separated file (*.csv)</source>
<translation>Komaeraldatud fail (*.csv)</translation>
</message>
<message>
<location line="+13"/>
<source>Error exporting</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<source>Could not write to file %1.</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>AddressTableModel</name>
<message>
<location filename="../addresstablemodel.cpp" line="+144"/>
<source>Label</source>
<translation>Silt</translation>
</message>
<message>
<location line="+0"/>
<source>Address</source>
<translation>Aadress</translation>
</message>
<message>
<location line="+36"/>
<source>(no label)</source>
<translation>(silti pole)</translation>
</message>
</context>
<context>
<name>AskPassphraseDialog</name>
<message>
<location filename="../forms/askpassphrasedialog.ui" line="+26"/>
<source>Passphrase Dialog</source>
<translation>Salafraasi dialoog</translation>
</message>
<message>
<location line="+21"/>
<source>Enter passphrase</source>
<translation>Sisesta salafraas</translation>
</message>
<message>
<location line="+14"/>
<source>New passphrase</source>
<translation>Uus salafraas</translation>
</message>
<message>
<location line="+14"/>
<source>Repeat new passphrase</source>
<translation>Korda salafraasi</translation>
</message>
<message>
<location line="+33"/>
<source>Serves to disable the trivial sendmoney when OS account compromised. Provides no real security.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>For staking only</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../askpassphrasedialog.cpp" line="+35"/>
<source>Enter the new passphrase to the wallet.<br/>Please use a passphrase of <b>10 or more random characters</b>, or <b>eight or more words</b>.</source>
<translation>Sisesta rahakotile uus salafraas.<br/>Palun kasuta salafraasina <b>vähemalt 10 tähte/numbrit/sümbolit</b>, või <b>vähemalt 8 sõna</b>.</translation>
</message>
<message>
<location line="+1"/>
<source>Encrypt wallet</source>
<translation>Krüpteeri rahakott</translation>
</message>
<message>
<location line="+7"/>
<source>This operation needs your wallet passphrase to unlock the wallet.</source>
<translation>See toiming nõuab sinu rahakoti salafraasi.</translation>
</message>
<message>
<location line="+5"/>
<source>Unlock wallet</source>
<translation>Tee rahakott lukust lahti.</translation>
</message>
<message>
<location line="+3"/>
<source>This operation needs your wallet passphrase to decrypt the wallet.</source>
<translation>See toiming nõuab sinu rahakoti salafraasi.</translation>
</message>
<message>
<location line="+5"/>
<source>Decrypt wallet</source>
<translation>Dekrüpteeri rahakott.</translation>
</message>
<message>
<location line="+3"/>
<source>Change passphrase</source>
<translation>Muuda salafraasi</translation>
</message>
<message>
<location line="+1"/>
<source>Enter the old and new passphrase to the wallet.</source>
<translation>Sisesta rahakoti vana ning uus salafraas.</translation>
</message>
<message>
<location line="+46"/>
<source>Confirm wallet encryption</source>
<translation>Kinnita rahakoti krüpteering</translation>
</message>
<message>
<location line="+1"/>
<source>Warning: If you encrypt your wallet and lose your passphrase, you will <b>LOSE ALL OF YOUR COINS</b>!</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<source>Are you sure you wish to encrypt your wallet?</source>
<translation>Kas soovid oma rahakoti krüpteerida?</translation>
</message>
<message>
<location line="+15"/>
<source>IMPORTANT: Any previous backups you have made of your wallet file should be replaced with the newly generated, encrypted wallet file. For security reasons, previous backups of the unencrypted wallet file will become useless as soon as you start using the new, encrypted wallet.</source>
<translation>TÄHTIS: Kõik varasemad rahakoti varundfailid tuleks üle kirjutada äsja loodud krüpteeritud rahakoti failiga. Turvakaalutlustel tühistatakse krüpteerimata rahakoti failid alates uue, krüpteeritud rahakoti, kasutusele võtust.</translation>
</message>
<message>
<location line="+103"/>
<location line="+24"/>
<source>Warning: The Caps Lock key is on!</source>
<translation>Hoiatus: Caps Lock on sisse lülitatud!</translation>
</message>
<message>
<location line="-133"/>
<location line="+60"/>
<source>Wallet encrypted</source>
<translation>Rahakott krüpteeritud</translation>
</message>
<message>
<location line="-58"/>
<source>Mercury will close now to finish the encryption process. Remember that encrypting your wallet cannot fully protect your coins from being stolen by malware infecting your computer.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+13"/>
<location line="+7"/>
<location line="+44"/>
<location line="+6"/>
<source>Wallet encryption failed</source>
<translation>Tõrge rahakoti krüpteerimisel</translation>
</message>
<message>
<location line="-56"/>
<source>Wallet encryption failed due to an internal error. Your wallet was not encrypted.</source>
<translation>Rahakoti krüpteering ebaõnnestus tõrke tõttu. Sinu rahakotti ei krüpteeritud.</translation>
</message>
<message>
<location line="+7"/>
<location line="+50"/>
<source>The supplied passphrases do not match.</source>
<translation>Salafraasid ei kattu.</translation>
</message>
<message>
<location line="-38"/>
<source>Wallet unlock failed</source>
<translation>Rahakoti avamine ebaõnnestus</translation>
</message>
<message>
<location line="+1"/>
<location line="+12"/>
<location line="+19"/>
<source>The passphrase entered for the wallet decryption was incorrect.</source>
<translation>Rahakoti salafraas ei ole õige.</translation>
</message>
<message>
<location line="-20"/>
<source>Wallet decryption failed</source>
<translation>Rahakoti dekrüpteerimine ei õnnestunud</translation>
</message>
<message>
<location line="+14"/>
<source>Wallet passphrase was successfully changed.</source>
<translation>Rahakoti salafraasi muutmine õnnestus.</translation>
</message>
</context>
<context>
<name>BitcoinGUI</name>
<message>
<location filename="../bitcoingui.cpp" line="+282"/>
<source>Sign &message...</source>
<translation>Signeeri &sõnum</translation>
</message>
<message>
<location line="+251"/>
<source>Synchronizing with network...</source>
<translation>Võrgusünkimine...</translation>
</message>
<message>
<location line="-319"/>
<source>&Overview</source>
<translation>&Ülevaade</translation>
</message>
<message>
<location line="+1"/>
<source>Show general overview of wallet</source>
<translation>Kuva rahakoti üld-ülevaade</translation>
</message>
<message>
<location line="+17"/>
<source>&Transactions</source>
<translation>&Tehingud</translation>
</message>
<message>
<location line="+1"/>
<source>Browse transaction history</source>
<translation>Sirvi tehingute ajalugu</translation>
</message>
<message>
<location line="+5"/>
<source>&Address Book</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Edit the list of stored addresses and labels</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-13"/>
<source>&Receive coins</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Show the list of addresses for receiving payments</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-7"/>
<source>&Send coins</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+35"/>
<source>E&xit</source>
<translation>V&älju</translation>
</message>
<message>
<location line="+1"/>
<source>Quit application</source>
<translation>Väljumine</translation>
</message>
<message>
<location line="+6"/>
<source>Show information about Mercury</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>About &Qt</source>
<translation>Teave &Qt kohta</translation>
</message>
<message>
<location line="+1"/>
<source>Show information about Qt</source>
<translation>Kuva Qt kohta käiv info</translation>
</message>
<message>
<location line="+2"/>
<source>&Options...</source>
<translation>&Valikud...</translation>
</message>
<message>
<location line="+4"/>
<source>&Encrypt Wallet...</source>
<translation>&Krüpteeri Rahakott</translation>
</message>
<message>
<location line="+3"/>
<source>&Backup Wallet...</source>
<translation>&Varunda Rahakott</translation>
</message>
<message>
<location line="+2"/>
<source>&Change Passphrase...</source>
<translation>&Salafraasi muutmine</translation>
</message>
<message numerus="yes">
<location line="+259"/>
<source>~%n block(s) remaining</source>
<translation type="unfinished"><numerusform></numerusform><numerusform></numerusform></translation>
</message>
<message>
<location line="+6"/>
<source>Downloaded %1 of %2 blocks of transaction history (%3% done).</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-256"/>
<source>&Export...</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-64"/>
<source>Send coins to a Mercury address</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+47"/>
<source>Modify configuration options for Mercury</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+18"/>
<source>Export the data in the current tab to a file</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-14"/>
<source>Encrypt or decrypt wallet</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Backup wallet to another location</source>
<translation>Varunda rahakott teise asukohta</translation>
</message>
<message>
<location line="+2"/>
<source>Change the passphrase used for wallet encryption</source>
<translation>Rahakoti krüpteerimise salafraasi muutmine</translation>
</message>
<message>
<location line="+10"/>
<source>&Debug window</source>
<translation>&Debugimise aken</translation>
</message>
<message>
<location line="+1"/>
<source>Open debugging and diagnostic console</source>
<translation>Ava debugimise ja diagnostika konsool</translation>
</message>
<message>
<location line="-5"/>
<source>&Verify message...</source>
<translation>&Kontrolli sõnumit...</translation>
</message>
<message>
<location line="-202"/>
<source>Mercury</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<source>Wallet</source>
<translation>Rahakott</translation>
</message>
<message>
<location line="+180"/>
<source>&About Mercury</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+9"/>
<source>&Show / Hide</source>
<translation>&Näita / Peida</translation>
</message>
<message>
<location line="+9"/>
<source>Unlock wallet</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>&Lock Wallet</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Lock wallet</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+35"/>
<source>&File</source>
<translation>&Fail</translation>
</message>
<message>
<location line="+8"/>
<source>&Settings</source>
<translation>&Seaded</translation>
</message>
<message>
<location line="+8"/>
<source>&Help</source>
<translation>&Abi</translation>
</message>
<message>
<location line="+12"/>
<source>Tabs toolbar</source>
<translation>Vahelehe tööriistariba</translation>
</message>
<message>
<location line="+8"/>
<source>Actions toolbar</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+13"/>
<location line="+9"/>
<source>[testnet]</source>
<translation>[testnet]</translation>
</message>
<message>
<location line="+0"/>
<location line="+60"/>
<source>Mercury client</source>
<translation type="unfinished"/>
</message>
<message numerus="yes">
<location line="+75"/>
<source>%n active connection(s) to Mercury network</source>
<translation type="unfinished"><numerusform></numerusform><numerusform></numerusform></translation>
</message>
<message>
<location line="+40"/>
<source>Downloaded %1 blocks of transaction history.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+413"/>
<source>Staking.<br>Your weight is %1<br>Network weight is %2<br>Expected time to earn reward is %3</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+6"/>
<source>Not staking because wallet is locked</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Not staking because wallet is offline</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Not staking because wallet is syncing</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Not staking because you don't have mature coins</source>
<translation type="unfinished"/>
</message>
<message numerus="yes">
<location line="-403"/>
<source>%n second(s) ago</source>
<translation type="unfinished"><numerusform></numerusform><numerusform></numerusform></translation>
</message>
<message>
<location line="-312"/>
<source>About Mercury card</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Show information about Mercury card</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+18"/>
<source>&Unlock Wallet...</source>
<translation type="unfinished"/>
</message>
<message numerus="yes">
<location line="+297"/>
<source>%n minute(s) ago</source>
<translation type="unfinished"><numerusform></numerusform><numerusform></numerusform></translation>
</message>
<message numerus="yes">
<location line="+4"/>
<source>%n hour(s) ago</source>
<translation type="unfinished"><numerusform></numerusform><numerusform></numerusform></translation>
</message>
<message numerus="yes">
<location line="+4"/>
<source>%n day(s) ago</source>
<translation type="unfinished"><numerusform></numerusform><numerusform></numerusform></translation>
</message>
<message>
<location line="+6"/>
<source>Up to date</source>
<translation>Ajakohane</translation>
</message>
<message>
<location line="+7"/>
<source>Catching up...</source>
<translation>Jõuan...</translation>
</message>
<message>
<location line="+10"/>
<source>Last received block was generated %1.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+59"/>
<source>This transaction is over the size limit. You can still send it for a fee of %1, which goes to the nodes that process your transaction and helps to support the network. Do you want to pay the fee?</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>Confirm transaction fee</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+27"/>
<source>Sent transaction</source>
<translation>Saadetud tehing</translation>
</message>
<message>
<location line="+1"/>
<source>Incoming transaction</source>
<translation>Sisenev tehing</translation>
</message>
<message>
<location line="+1"/>
<source>Date: %1
Amount: %2
Type: %3
Address: %4
</source>
<translation>Kuupäev: %1⏎
Summa: %2⏎
Tüüp: %3⏎
Aadress: %4⏎</translation>
</message>
<message>
<location line="+100"/>
<location line="+15"/>
<source>URI handling</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-15"/>
<location line="+15"/>
<source>URI can not be parsed! This can be caused by an invalid Mercury address or malformed URI parameters.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+18"/>
<source>Wallet is <b>encrypted</b> and currently <b>unlocked</b></source>
<translation>Rahakott on <b>krüpteeritud</b> ning hetkel <b>avatud</b></translation>
</message>
<message>
<location line="+10"/>
<source>Wallet is <b>encrypted</b> and currently <b>locked</b></source>
<translation>Rahakott on <b>krüpteeritud</b> ning hetkel <b>suletud</b></translation>
</message>
<message>
<location line="+25"/>
<source>Backup Wallet</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<source>Wallet Data (*.dat)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Backup Failed</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<source>There was an error trying to save the wallet data to the new location.</source>
<translation type="unfinished"/>
</message>
<message numerus="yes">
<location line="+76"/>
<source>%n second(s)</source>
<translation type="unfinished"><numerusform></numerusform><numerusform></numerusform></translation>
</message>
<message numerus="yes">
<location line="+4"/>
<source>%n minute(s)</source>
<translation type="unfinished"><numerusform></numerusform><numerusform></numerusform></translation>
</message>
<message numerus="yes">
<location line="+4"/>
<source>%n hour(s)</source>
<translation><numerusform>%n tund</numerusform><numerusform>%n tundi</numerusform></translation>
</message>
<message numerus="yes">
<location line="+4"/>
<source>%n day(s)</source>
<translation><numerusform>%n päev</numerusform><numerusform>%n päeva</numerusform></translation>
</message>
<message>
<location line="+18"/>
<source>Not staking</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoin.cpp" line="+109"/>
<source>A fatal error occurred. Mercury can no longer continue safely and will quit.</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>ClientModel</name>
<message>
<location filename="../clientmodel.cpp" line="+90"/>
<source>Network Alert</source>
<translation>Võrgu Häire</translation>
</message>
</context>
<context>
<name>CoinControlDialog</name>
<message>
<location filename="../forms/coincontroldialog.ui" line="+14"/>
<source>Coin Control</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+31"/>
<source>Quantity:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+32"/>
<source>Bytes:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+48"/>
<source>Amount:</source>
<translation>Summa:</translation>
</message>
<message>
<location line="+32"/>
<source>Priority:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+48"/>
<source>Fee:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+35"/>
<source>Low Output:</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../coincontroldialog.cpp" line="+551"/>
<source>no</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../forms/coincontroldialog.ui" line="+51"/>
<source>After Fee:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+35"/>
<source>Change:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+69"/>
<source>(un)select all</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+13"/>
<source>Tree mode</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+16"/>
<source>List mode</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+45"/>
<source>Amount</source>
<translation>Kogus</translation>
</message>
<message>
<location line="+5"/>
<source>Label</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>Address</source>
<translation>Aadress</translation>
</message>
<message>
<location line="+5"/>
<source>Date</source>
<translation>Kuupäev</translation>
</message>
<message>
<location line="+5"/>
<source>Confirmations</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Confirmed</source>
<translation>Kinnitatud</translation>
</message>
<message>
<location line="+5"/>
<source>Priority</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../coincontroldialog.cpp" line="-515"/>
<source>Copy address</source>
<translation>Aadressi kopeerimine</translation>
</message>
<message>
<location line="+1"/>
<source>Copy label</source>
<translation>Märgise kopeerimine</translation>
</message>
<message>
<location line="+1"/>
<location line="+26"/>
<source>Copy amount</source>
<translation>Kopeeri summa</translation>
</message>
<message>
<location line="-25"/>
<source>Copy transaction ID</source>
<translation>Kopeeri tehingu ID</translation>
</message>
<message>
<location line="+24"/>
<source>Copy quantity</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Copy fee</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Copy after fee</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Copy bytes</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Copy priority</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Copy low output</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Copy change</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+317"/>
<source>highest</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>high</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>medium-high</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>medium</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+4"/>
<source>low-medium</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>low</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>lowest</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+155"/>
<source>DUST</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<source>yes</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+10"/>
<source>This label turns red, if the transaction size is bigger than 10000 bytes.
This means a fee of at least %1 per kb is required.
Can vary +/- 1 Byte per input.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Transactions with higher priority get more likely into a block.
This label turns red, if the priority is smaller than "medium".
This means a fee of at least %1 per kb is required.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>This label turns red, if any recipient receives an amount smaller than %1.
This means a fee of at least %2 is required.
Amounts below 0.546 times the minimum relay fee are shown as DUST.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>This label turns red, if the change is smaller than %1.
This means a fee of at least %2 is required.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+37"/>
<location line="+66"/>
<source>(no label)</source>
<translation>(silti pole)</translation>
</message>
<message>
<location line="-9"/>
<source>change from %1 (%2)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>(change)</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>EditAddressDialog</name>
<message>
<location filename="../forms/editaddressdialog.ui" line="+14"/>
<source>Edit Address</source>
<translation>Muuda aadressi</translation>
</message>
<message>
<location line="+11"/>
<source>&Label</source>
<translation>&Märgis</translation>
</message>
<message>
<location line="+10"/>
<source>The label associated with this address book entry</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>&Address</source>
<translation>&Aadress</translation>
</message>
<message>
<location line="+10"/>
<source>The address associated with this address book entry. This can only be modified for sending addresses.</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../editaddressdialog.cpp" line="+20"/>
<source>New receiving address</source>
<translation>Uus sissetulev aadress</translation>
</message>
<message>
<location line="+4"/>
<source>New sending address</source>
<translation>Uus väljaminev aadress</translation>
</message>
<message>
<location line="+3"/>
<source>Edit receiving address</source>
<translation>Sissetulevate aadresside muutmine</translation>
</message>
<message>
<location line="+4"/>
<source>Edit sending address</source>
<translation>Väljaminevate aadresside muutmine</translation>
</message>
<message>
<location line="+76"/>
<source>The entered address "%1" is already in the address book.</source>
<translation>Selline aadress on juba olemas: "%1"</translation>
</message>
<message>
<location line="-5"/>
<source>The entered address "%1" is not a valid Mercury address.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+10"/>
<source>Could not unlock wallet.</source>
<translation>Rahakotti ei avatud</translation>
</message>
<message>
<location line="+5"/>
<source>New key generation failed.</source>
<translation>Tõrge uue võtme loomisel.</translation>
</message>
</context>
<context>
<name>GUIUtil::HelpMessageBox</name>
<message>
<location filename="../guiutil.cpp" line="+420"/>
<location line="+12"/>
<source>Mercury-Qt</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-12"/>
<source>version</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Usage:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>command-line options</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+4"/>
<source>UI options</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Set language, for example "de_DE" (default: system locale)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Start minimized</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Show splash screen on startup (default: 1)</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>OptionsDialog</name>
<message>
<location filename="../forms/optionsdialog.ui" line="+14"/>
<source>Options</source>
<translation>Valikud</translation>
</message>
<message>
<location line="+16"/>
<source>&Main</source>
<translation>%Peamine</translation>
</message>
<message>
<location line="+6"/>
<source>Optional transaction fee per kB that helps make sure your transactions are processed quickly. Most transactions are 1 kB. Fee 0.01 recommended.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+15"/>
<source>Pay transaction &fee</source>
<translation>Tasu tehingu &fee</translation>
</message>
<message>
<location line="+31"/>
<source>Reserved amount does not participate in staking and is therefore spendable at any time.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+15"/>
<source>Reserve</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+31"/>
<source>Automatically start Mercury after logging in to the system.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>&Start Mercury on system login</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>Detach block and address databases at shutdown. This means they can be moved to another data directory, but it slows down shutdown. The wallet is always detached.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>&Detach databases at shutdown</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+21"/>
<source>&Network</source>
<translation>&Võrk</translation>
</message>
<message>
<location line="+6"/>
<source>Automatically open the Mercury client port on the router. This only works when your router supports UPnP and it is enabled.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Map port using &UPnP</source>
<translation>Suuna port &UPnP kaudu</translation>
</message>
<message>
<location line="+7"/>
<source>Connect to the Mercury network through a SOCKS proxy (e.g. when connecting through Tor).</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>&Connect through SOCKS proxy:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+9"/>
<source>Proxy &IP:</source>
<translation>Proxi &IP:</translation>
</message>
<message>
<location line="+19"/>
<source>IP address of the proxy (e.g. 127.0.0.1)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>&Port:</source>
<translation>&Port:</translation>
</message>
<message>
<location line="+19"/>
<source>Port of the proxy (e.g. 9050)</source>
<translation>Proxi port (nt 9050)</translation>
</message>
<message>
<location line="+7"/>
<source>SOCKS &Version:</source>
<translation>Turva proxi SOCKS &Version:</translation>
</message>
<message>
<location line="+13"/>
<source>SOCKS version of the proxy (e.g. 5)</source>
<translation>Turva proxi SOCKS versioon (nt 5)</translation>
</message>
<message>
<location line="+36"/>
<source>&Window</source>
<translation>&Aken</translation>
</message>
<message>
<location line="+6"/>
<source>Show only a tray icon after minimizing the window.</source>
<translation>Minimeeri systray alale.</translation>
</message>
<message>
<location line="+3"/>
<source>&Minimize to the tray instead of the taskbar</source>
<translation>&Minimeeri systray alale</translation>
</message>
<message>
<location line="+7"/>
<source>Minimize instead of exit the application when the window is closed. When this option is enabled, the application will be closed only after selecting Quit in the menu.</source>
<translation>Sulgemise asemel minimeeri aken. Selle valiku tegemisel suletakse programm Menüüst "Välju" käsuga.</translation>
</message>
<message>
<location line="+3"/>
<source>M&inimize on close</source>
<translation>M&inimeeri sulgemisel</translation>
</message>
<message>
<location line="+21"/>
<source>&Display</source>
<translation>&Kuva</translation>
</message>
<message>
<location line="+8"/>
<source>User Interface &language:</source>
<translation>Kasutajaliidese &keel:</translation>
</message>
<message>
<location line="+13"/>
<source>The user interface language can be set here. This setting will take effect after restarting Mercury.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+11"/>
<source>&Unit to show amounts in:</source>
<translation>Summade kuvamise &Unit:</translation>
</message>
<message>
<location line="+13"/>
<source>Choose the default subdivision unit to show in the interface and when sending coins.</source>
<translation>Vali liideses ning müntide saatmisel kuvatav vaikimisi alajaotus.</translation>
</message>
<message>
<location line="+9"/>
<source>Whether to show Mercury addresses in the transaction list or not.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>&Display addresses in transaction list</source>
<translation>Tehingute loetelu &Display aadress</translation>
</message>
<message>
<location line="+7"/>
<source>Whether to show coin control features or not.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Display coin &control features (experts only!)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+71"/>
<source>&OK</source>
<translation>&OK</translation>
</message>
<message>
<location line="+7"/>
<source>&Cancel</source>
<translation>&Katkesta</translation>
</message>
<message>
<location line="+10"/>
<source>&Apply</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../optionsdialog.cpp" line="+55"/>
<source>default</source>
<translation>vaikeväärtus</translation>
</message>
<message>
<location line="+149"/>
<location line="+9"/>
<source>Warning</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-9"/>
<location line="+9"/>
<source>This setting will take effect after restarting Mercury.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+29"/>
<source>The supplied proxy address is invalid.</source>
<translation>Sisestatud kehtetu proxy aadress.</translation>
</message>
</context>
<context>
<name>OverviewPage</name>
<message>
<location filename="../forms/overviewpage.ui" line="+14"/>
<source>Form</source>
<translation>Vorm</translation>
</message>
<message>
<location line="+33"/>
<location line="+231"/>
<source>The displayed information may be out of date. Your wallet automatically synchronizes with the Mercury network after a connection is established, but this process has not completed yet.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-160"/>
<source>Stake:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+29"/>
<source>Unconfirmed:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-107"/>
<source>Wallet</source>
<translation>Rahakott</translation>
</message>
<message>
<location line="+49"/>
<source>Spendable:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+16"/>
<source>Your current spendable balance</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+71"/>
<source>Immature:</source>
<translation>Ebaküps:</translation>
</message>
<message>
<location line="+13"/>
<source>Mined balance that has not yet matured</source>
<translation>Mitte aegunud mine'itud jääk</translation>
</message>
<message>
<location line="+20"/>
<source>Total:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+16"/>
<source>Your current total balance</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+46"/>
<source><b>Recent transactions</b></source>
<translation><b>Uuesti saadetud tehingud</b></translation>
</message>
<message>
<location line="-108"/>
<source>Total of transactions that have yet to be confirmed, and do not yet count toward the current balance</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-29"/>
<source>Total of coins that was staked, and do not yet count toward the current balance</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../overviewpage.cpp" line="+113"/>
<location line="+1"/>
<source>out of sync</source>
<translation>sünkimata</translation>
</message>
</context>
<context>
<name>QRCodeDialog</name>
<message>
<location filename="../forms/qrcodedialog.ui" line="+14"/>
<source>QR Code Dialog</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+59"/>
<source>Request Payment</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+56"/>
<source>Amount:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-44"/>
<source>Label:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+19"/>
<source>Message:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+71"/>
<source>&Save As...</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../qrcodedialog.cpp" line="+62"/>
<source>Error encoding URI into QR Code.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+40"/>
<source>The entered amount is invalid, please check.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+23"/>
<source>Resulting URI too long, try to reduce the text for label / message.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+25"/>
<source>Save QR Code</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<source>PNG Images (*.png)</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>RPCConsole</name>
<message>
<location filename="../forms/rpcconsole.ui" line="+46"/>
<source>Client name</source>
<translation>Kliendi nimi</translation>
</message>
<message>
<location line="+10"/>
<location line="+23"/>
<location line="+26"/>
<location line="+23"/>
<location line="+23"/>
<location line="+36"/>
<location line="+53"/>
<location line="+23"/>
<location line="+23"/>
<location filename="../rpcconsole.cpp" line="+348"/>
<source>N/A</source>
<translation>N/A</translation>
</message>
<message>
<location line="-217"/>
<source>Client version</source>
<translation>Kliendi versioon</translation>
</message>
<message>
<location line="-45"/>
<source>&Information</source>
<translation>&Informatsioon</translation>
</message>
<message>
<location line="+68"/>
<source>Using OpenSSL version</source>
<translation>Kasutan OpenSSL versiooni</translation>
</message>
<message>
<location line="+49"/>
<source>Startup time</source>
<translation>Käivitamise hetk</translation>
</message>
<message>
<location line="+29"/>
<source>Network</source>
<translation>Võrgustik</translation>
</message>
<message>
<location line="+7"/>
<source>Number of connections</source>
<translation>Ühenduste arv</translation>
</message>
<message>
<location line="+23"/>
<source>On testnet</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+23"/>
<source>Block chain</source>
<translation>Ploki jada</translation>
</message>
<message>
<location line="+7"/>
<source>Current number of blocks</source>
<translation>Plokkide hetkearv</translation>
</message>
<message>
<location line="+23"/>
<source>Estimated total blocks</source>
<translation>Ligikaudne plokkide kogus</translation>
</message>
<message>
<location line="+23"/>
<source>Last block time</source>
<translation>Viimane ploki aeg</translation>
</message>
<message>
<location line="+52"/>
<source>&Open</source>
<translation>&Ava</translation>
</message>
<message>
<location line="+16"/>
<source>Command-line options</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>Show the Mercury-Qt help message to get a list with possible Mercury command-line options.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>&Show</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+24"/>
<source>&Console</source>
<translation>&Konsool</translation>
</message>
<message>
<location line="-260"/>
<source>Build date</source>
<translation>Valmistusaeg</translation>
</message>
<message>
<location line="-104"/>
<source>Mercury - Debug window</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+25"/>
<source>Mercury Core</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+279"/>
<source>Debug log file</source>
<translation>Debugimise logifail</translation>
</message>
<message>
<location line="+7"/>
<source>Open the Mercury debug log file from the current data directory. This can take a few seconds for large log files.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+102"/>
<source>Clear console</source>
<translation>Puhasta konsool</translation>
</message>
<message>
<location filename="../rpcconsole.cpp" line="-33"/>
<source>Welcome to the Mercury RPC console.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Use up and down arrows to navigate history, and <b>Ctrl-L</b> to clear screen.</source>
<translation>Ajaloo sirvimiseks kasuta üles ja alla nooli, ekraani puhastamiseks <b>Ctrl-L</b>.</translation>
</message>
<message>
<location line="+1"/>
<source>Type <b>help</b> for an overview of available commands.</source>
<translation>Ülevaateks võimalikest käsklustest trüki <b>help</b>.</translation>
</message>
</context>
<context>
<name>SendCoinsDialog</name>
<message>
<location filename="../forms/sendcoinsdialog.ui" line="+14"/>
<location filename="../sendcoinsdialog.cpp" line="+182"/>
<location line="+5"/>
<location line="+5"/>
<location line="+5"/>
<location line="+6"/>
<location line="+5"/>
<location line="+5"/>
<source>Send Coins</source>
<translation>Müntide saatmine</translation>
</message>
<message>
<location line="+76"/>
<source>Coin Control Features</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+20"/>
<source>Inputs...</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>automatically selected</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+19"/>
<source>Insufficient funds!</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+77"/>
<source>Quantity:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+22"/>
<location line="+35"/>
<source>0</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-19"/>
<source>Bytes:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+51"/>
<source>Amount:</source>
<translation>Summa:</translation>
</message>
<message>
<location line="+22"/>
<location line="+86"/>
<location line="+86"/>
<location line="+32"/>
<source>0.00 CLAM</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-191"/>
<source>Priority:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+19"/>
<source>medium</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+32"/>
<source>Fee:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+35"/>
<source>Low Output:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+19"/>
<source>no</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+32"/>
<source>After Fee:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+35"/>
<source>Change</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+50"/>
<source>custom change address</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+106"/>
<source>Send to multiple recipients at once</source>
<translation>Saatmine mitmele korraga</translation>
</message>
<message>
<location line="+3"/>
<source>Add &Recipient</source>
<translation>Lisa &Saaja</translation>
</message>
<message>
<location line="+20"/>
<source>Remove all transaction fields</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Clear &All</source>
<translation>Puhasta &Kõik</translation>
</message>
<message>
<location line="+28"/>
<source>Balance:</source>
<translation>Jääk:</translation>
</message>
<message>
<location line="+16"/>
<source>123.456 CLAM</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+31"/>
<source>Confirm the send action</source>
<translation>Saatmise kinnitamine</translation>
</message>
<message>
<location line="+3"/>
<source>S&end</source>
<translation>S&aada</translation>
</message>
<message>
<location filename="../sendcoinsdialog.cpp" line="-173"/>
<source>Enter a Mercury address (e.g. B8gZqgY4r2RoEdqYk3QsAqFckyf9pRHN6i)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+15"/>
<source>Copy quantity</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Copy amount</source>
<translation>Kopeeri summa</translation>
</message>
<message>
<location line="+1"/>
<source>Copy fee</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Copy after fee</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Copy bytes</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Copy priority</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Copy low output</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Copy change</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+86"/>
<source><b>%1</b> to %2 (%3)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>Confirm send coins</source>
<translation>Müntide saatmise kinnitamine</translation>
</message>
<message>
<location line="+1"/>
<source>Are you sure you want to send %1?</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<source> and </source>
<translation type="unfinished"/>
</message>
<message>
<location line="+29"/>
<source>The recipient address is not valid, please recheck.</source>
<translation>Saaja aadress ei ole kehtiv, palun kontrolli.</translation>
</message>
<message>
<location line="+5"/>
<source>The amount to pay must be larger than 0.</source>
<translation>Makstav summa peab olema suurem kui 0.</translation>
</message>
<message>
<location line="+5"/>
<source>The amount exceeds your balance.</source>
<translation>Summa ületab jäägi.</translation>
</message>
<message>
<location line="+5"/>
<source>The total exceeds your balance when the %1 transaction fee is included.</source>
<translation>Summa koos tehingu tasuga %1 ületab sinu jääki.</translation>
</message>
<message>
<location line="+6"/>
<source>Duplicate address found, can only send to each address once per send operation.</source>
<translation>Ühe saatmisega topelt-adressaati olla ei tohi.</translation>
</message>
<message>
<location line="+5"/>
<source>Error: Transaction creation failed.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>Error: The transaction was rejected. This might happen if some of the coins in your wallet were already spent, such as if you used a copy of wallet.dat and coins were spent in the copy but not marked as spent here.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+251"/>
<source>WARNING: Invalid Mercury address</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+13"/>
<source>(no label)</source>
<translation>(silti pole)</translation>
</message>
<message>
<location line="+4"/>
<source>WARNING: unknown change address</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>SendCoinsEntry</name>
<message>
<location filename="../forms/sendcoinsentry.ui" line="+14"/>
<source>Form</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+15"/>
<source>A&mount:</source>
<translation>S&umma:</translation>
</message>
<message>
<location line="+13"/>
<source>Pay &To:</source>
<translation>Maksa &:</translation>
</message>
<message>
<location line="+24"/>
<location filename="../sendcoinsentry.cpp" line="+25"/>
<source>Enter a label for this address to add it to your address book</source>
<translation>Aadressiraamatusse sisestamiseks märgista aadress</translation>
</message>
<message>
<location line="+9"/>
<source>&Label:</source>
<translation>&Märgis</translation>
</message>
<message>
<location line="+18"/>
<source>The address to send the payment to (e.g. B8gZqgY4r2RoEdqYk3QsAqFckyf9pRHN6i)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+10"/>
<source>Choose address from address book</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+10"/>
<source>Alt+A</source>
<translation>Alt+A</translation>
</message>
<message>
<location line="+7"/>
<source>Paste address from clipboard</source>
<translation>Kleebi aadress vahemälust</translation>
</message>
<message>
<location line="+10"/>
<source>Alt+P</source>
<translation>Alt+P</translation>
</message>
<message>
<location line="+7"/>
<source>Remove this recipient</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../sendcoinsentry.cpp" line="+1"/>
<source>Enter a Mercury address (e.g. B8gZqgY4r2RoEdqYk3QsAqFckyf9pRHN6i)</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>SignVerifyMessageDialog</name>
<message>
<location filename="../forms/signverifymessagedialog.ui" line="+14"/>
<source>Signatures - Sign / Verify a Message</source>
<translation>Signatuurid - Allkirjasta / Kinnita Sõnum</translation>
</message>
<message>
<location line="+13"/>
<location line="+124"/>
<source>&Sign Message</source>
<translation>&Allkirjastamise teade</translation>
</message>
<message>
<location line="-118"/>
<source>You can sign messages with your addresses to prove you own them. Be careful not to sign anything vague, as phishing attacks may try to trick you into signing your identity over to them. Only sign fully-detailed statements you agree to.</source>
<translation>Omandiõigsuse tõestamiseks saad sõnumeid allkirjastada oma aadressiga. Ettevaatust petturitega, kes üritavad saada sinu allkirja endale saada. Allkirjasta ainult korralikult täidetud avaldusi, millega nõustud.</translation>
</message>
<message>
<location line="+18"/>
<source>The address to sign the message with (e.g. B8gZqgY4r2RoEdqYk3QsAqFckyf9pRHN6i)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+10"/>
<location line="+203"/>
<source>Choose an address from the address book</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-193"/>
<location line="+203"/>
<source>Alt+A</source>
<translation>Alt+A</translation>
</message>
<message>
<location line="-193"/>
<source>Paste address from clipboard</source>
<translation>Kleebi aadress vahemälust</translation>
</message>
<message>
<location line="+10"/>
<source>Alt+P</source>
<translation>Alt+P</translation>
</message>
<message>
<location line="+12"/>
<source>Enter the message you want to sign here</source>
<translation>Sisesta siia allkirjastamise sõnum</translation>
</message>
<message>
<location line="+24"/>
<source>Copy the current signature to the system clipboard</source>
<translation>Kopeeri praegune signatuur vahemällu</translation>
</message>
<message>
<location line="+21"/>
<source>Sign the message to prove you own this Mercury address</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+17"/>
<source>Reset all sign message fields</source>
<translation>Tühjenda kõik sõnumi allkirjastamise väljad</translation>
</message>
<message>
<location line="+3"/>
<location line="+146"/>
<source>Clear &All</source>
<translation>Puhasta &Kõik</translation>
</message>
<message>
<location line="-87"/>
<location line="+70"/>
<source>&Verify Message</source>
<translation>&Kinnita Sõnum</translation>
</message>
<message>
<location line="-64"/>
<source>Enter the signing address, message (ensure you copy line breaks, spaces, tabs, etc. exactly) and signature below to verify the message. Be careful not to read more into the signature than what is in the signed message itself, to avoid being tricked by a man-in-the-middle attack.</source>
<translation>Kinnitamiseks sisesta allkirjastamise aadress, sõnum (kindlasti kopeeri täpselt ka reavahetused, tühikud, tabulaatorid jms) ning allolev signatuur.</translation>
</message>
<message>
<location line="+21"/>
<source>The address the message was signed with (e.g. B8gZqgY4r2RoEdqYk3QsAqFckyf9pRHN6i)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+40"/>
<source>Verify the message to ensure it was signed with the specified Mercury address</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+17"/>
<source>Reset all verify message fields</source>
<translation>Tühjenda kõik sõnumi kinnitamise väljad</translation>
</message>
<message>
<location filename="../signverifymessagedialog.cpp" line="+27"/>
<location line="+3"/>
<source>Enter a Mercury address (e.g. B8gZqgY4r2RoEdqYk3QsAqFckyf9pRHN6i)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-2"/>
<source>Click "Sign Message" to generate signature</source>
<translation>Signatuuri genereerimiseks vajuta "Allkirjasta Sõnum"</translation>
</message>
<message>
<location line="+3"/>
<source>Enter Mercury signature</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+82"/>
<location line="+81"/>
<source>The entered address is invalid.</source>
<translation>Sisestatud aadress ei kehti.</translation>
</message>
<message>
<location line="-81"/>
<location line="+8"/>
<location line="+73"/>
<location line="+8"/>
<source>Please check the address and try again.</source>
<translation>Palun kontrolli aadressi ning proovi uuesti.</translation>
</message>
<message>
<location line="-81"/>
<location line="+81"/>
<source>The entered address does not refer to a key.</source>
<translation>Sisestatud aadress ei viita võtmele.</translation>
</message>
<message>
<location line="-73"/>
<source>Wallet unlock was cancelled.</source>
<translation>Rahakoti avamine katkestati.</translation>
</message>
<message>
<location line="+8"/>
<source>Private key for the entered address is not available.</source>
<translation>Sisestatud aadressi privaatvõti ei ole saadaval.</translation>
</message>
<message>
<location line="+12"/>
<source>Message signing failed.</source>
<translation>Sõnumi signeerimine ebaõnnestus.</translation>
</message>
<message>
<location line="+5"/>
<source>Message signed.</source>
<translation>Sõnum signeeritud.</translation>
</message>
<message>
<location line="+59"/>
<source>The signature could not be decoded.</source>
<translation>Signatuuri ei õnnestunud dekodeerida.</translation>
</message>
<message>
<location line="+0"/>
<location line="+13"/>
<source>Please check the signature and try again.</source>
<translation>Palun kontrolli signatuuri ning proovi uuesti.</translation>
</message>
<message>
<location line="+0"/>
<source>The signature did not match the message digest.</source>
<translation>Signatuur ei kattunud sõnumi kokkuvõttega.</translation>
</message>
<message>
<location line="+7"/>
<source>Message verification failed.</source>
<translation>Sõnumi kontroll ebaõnnestus.</translation>
</message>
<message>
<location line="+5"/>
<source>Message verified.</source>
<translation>Sõnum kontrollitud.</translation>
</message>
</context>
<context>
<name>TransactionDesc</name>
<message>
<location filename="../transactiondesc.cpp" line="+19"/>
<source>Open until %1</source>
<translation>Avatud kuni %1</translation>
</message>
<message numerus="yes">
<location line="-2"/>
<source>Open for %n block(s)</source>
<translation type="unfinished"><numerusform></numerusform><numerusform></numerusform></translation>
</message>
<message>
<location line="+8"/>
<source>conflicted</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>%1/offline</source>
<translation>%/1offline'is</translation>
</message>
<message>
<location line="+2"/>
<source>%1/unconfirmed</source>
<translation>%1/kinnitamata</translation>
</message>
<message>
<location line="+2"/>
<source>%1 confirmations</source>
<translation>%1 kinnitust</translation>
</message>
<message>
<location line="+18"/>
<source>Status</source>
<translation>Staatus</translation>
</message>
<message numerus="yes">
<location line="+7"/>
<source>, broadcast through %n node(s)</source>
<translation><numerusform>, levita läbi %n node'i</numerusform><numerusform>, levita läbi %n node'i</numerusform></translation>
</message>
<message>
<location line="+4"/>
<source>Date</source>
<translation>Kuupäev</translation>
</message>
<message>
<location line="+7"/>
<source>Source</source>
<translation>Allikas</translation>
</message>
<message>
<location line="+0"/>
<source>Generated</source>
<translation>Genereeritud</translation>
</message>
<message>
<location line="+5"/>
<location line="+17"/>
<source>From</source>
<translation>Saatja</translation>
</message>
<message>
<location line="+1"/>
<location line="+22"/>
<location line="+58"/>
<source>To</source>
<translation>Saaja</translation>
</message>
<message>
<location line="-77"/>
<location line="+2"/>
<source>own address</source>
<translation>oma aadress</translation>
</message>
<message>
<location line="-2"/>
<source>label</source>
<translation>märgis</translation>
</message>
<message>
<location line="+37"/>
<location line="+12"/>
<location line="+45"/>
<location line="+17"/>
<location line="+30"/>
<source>Credit</source>
<translation>Krediit</translation>
</message>
<message numerus="yes">
<location line="-102"/>
<source>matures in %n more block(s)</source>
<translation><numerusform>aegub %n bloki pärast</numerusform><numerusform>aegub %n bloki pärast</numerusform></translation>
</message>
<message>
<location line="+2"/>
<source>not accepted</source>
<translation>mitte aktsepteeritud</translation>
</message>
<message>
<location line="+44"/>
<location line="+8"/>
<location line="+15"/>
<location line="+30"/>
<source>Debit</source>
<translation>Deebet</translation>
</message>
<message>
<location line="-39"/>
<source>Transaction fee</source>
<translation>Tehingu tasu</translation>
</message>
<message>
<location line="+16"/>
<source>Net amount</source>
<translation>Neto summa</translation>
</message>
<message>
<location line="+6"/>
<source>Message</source>
<translation>Sõnum</translation>
</message>
<message>
<location line="+2"/>
<source>Comment</source>
<translation>Kommentaar</translation>
</message>
<message>
<location line="+2"/>
<source>Transaction ID</source>
<translation>Tehingu ID</translation>
</message>
<message>
<location line="+3"/>
<source>Generated coins must mature 510 blocks before they can be spent. When you generated this block, it was broadcast to the network to be added to the block chain. If it fails to get into the chain, its state will change to "not accepted" and it won't be spendable. This may occasionally happen if another node generates a block within a few seconds of yours.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>Debug information</source>
<translation>Debug'imise info</translation>
</message>
<message>
<location line="+8"/>
<source>Transaction</source>
<translation>Tehing</translation>
</message>
<message>
<location line="+5"/>
<source>Inputs</source>
<translation>Sisendid</translation>
</message>
<message>
<location line="+23"/>
<source>Amount</source>
<translation>Kogus</translation>
</message>
<message>
<location line="+1"/>
<source>true</source>
<translation>õige</translation>
</message>
<message>
<location line="+0"/>
<source>false</source>
<translation>vale</translation>
</message>
<message>
<location line="-211"/>
<source>, has not been successfully broadcast yet</source>
<translation>, veel esitlemata</translation>
</message>
<message>
<location line="+35"/>
<source>unknown</source>
<translation>tundmatu</translation>
</message>
</context>
<context>
<name>TransactionDescDialog</name>
<message>
<location filename="../forms/transactiondescdialog.ui" line="+14"/>
<source>Transaction details</source>
<translation>Tehingu üksikasjad</translation>
</message>
<message>
<location line="+6"/>
<source>This pane shows a detailed description of the transaction</source>
<translation>Paan kuvab tehingu detailid</translation>
</message>
</context>
<context>
<name>TransactionTableModel</name>
<message>
<location filename="../transactiontablemodel.cpp" line="+226"/>
<source>Date</source>
<translation>Kuupäev</translation>
</message>
<message>
<location line="+0"/>
<source>Type</source>
<translation>Tüüp</translation>
</message>
<message>
<location line="+0"/>
<source>Address</source>
<translation>Aadress</translation>
</message>
<message>
<location line="+0"/>
<source>Amount</source>
<translation>Kogus</translation>
</message>
<message>
<location line="+60"/>
<source>Open until %1</source>
<translation>Avatud kuni %1</translation>
</message>
<message>
<location line="+12"/>
<source>Confirmed (%1 confirmations)</source>
<translation>Kinnitatud (%1 kinnitust)</translation>
</message>
<message numerus="yes">
<location line="-15"/>
<source>Open for %n more block(s)</source>
<translation><numerusform>Avaneb %n bloki pärast</numerusform><numerusform>Avaneb %n bloki pärast</numerusform></translation>
</message>
<message>
<location line="+6"/>
<source>Offline</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Unconfirmed</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Confirming (%1 of %2 recommended confirmations)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+6"/>
<source>Conflicted</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Immature (%1 confirmations, will be available after %2)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>This block was not received by any other nodes and will probably not be accepted!</source>
<translation>Antud klotsi pole saanud ükski osapool ning tõenäoliselt seda ei aktsepteerita!</translation>
</message>
<message>
<location line="+3"/>
<source>Generated but not accepted</source>
<translation>Loodud, kuid aktsepteerimata</translation>
</message>
<message>
<location line="+42"/>
<source>Received with</source>
<translation>Saadud koos</translation>
</message>
<message>
<location line="+2"/>
<source>Received from</source>
<translation>Kellelt saadud</translation>
</message>
<message>
<location line="+3"/>
<source>Sent to</source>
<translation>Saadetud</translation>
</message>
<message>
<location line="+2"/>
<source>Payment to yourself</source>
<translation>Makse iseendale</translation>
</message>
<message>
<location line="+2"/>
<source>Mined</source>
<translation>Mine'itud</translation>
</message>
<message>
<location line="+38"/>
<source>(n/a)</source>
<translation>(n/a)</translation>
</message>
<message>
<location line="+190"/>
<source>Transaction status. Hover over this field to show number of confirmations.</source>
<translation>Tehingu staatus. Kinnituste arvu kuvamiseks liigu hiire noolega selle peale.</translation>
</message>
<message>
<location line="+2"/>
<source>Date and time that the transaction was received.</source>
<translation>Tehingu saamise kuupäev ning kellaaeg.</translation>
</message>
<message>
<location line="+2"/>
<source>Type of transaction.</source>
<translation>Tehingu tüüp.</translation>
</message>
<message>
<location line="+2"/>
<source>Destination address of transaction.</source>
<translation>Tehingu saaja aadress.</translation>
</message>
<message>
<location line="+2"/>
<source>Amount removed from or added to balance.</source>
<translation>Jäägile lisatud või eemaldatud summa.</translation>
</message>
</context>
<context>
<name>TransactionView</name>
<message>
<location filename="../transactionview.cpp" line="+55"/>
<location line="+16"/>
<source>All</source>
<translation>Kõik</translation>
</message>
<message>
<location line="-15"/>
<source>Today</source>
<translation>Täna</translation>
</message>
<message>
<location line="+1"/>
<source>This week</source>
<translation>Jooksev nädal</translation>
</message>
<message>
<location line="+1"/>
<source>This month</source>
<translation>Jooksev kuu</translation>
</message>
<message>
<location line="+1"/>
<source>Last month</source>
<translation>Eelmine kuu</translation>
</message>
<message>
<location line="+1"/>
<source>This year</source>
<translation>Jooksev aasta</translation>
</message>
<message>
<location line="+1"/>
<source>Range...</source>
<translation>Ulatus...</translation>
</message>
<message>
<location line="+11"/>
<source>Received with</source>
<translation>Saadud koos</translation>
</message>
<message>
<location line="+2"/>
<source>Sent to</source>
<translation>Saadetud</translation>
</message>
<message>
<location line="+2"/>
<source>To yourself</source>
<translation>Iseendale</translation>
</message>
<message>
<location line="+1"/>
<source>Mined</source>
<translation>Mine'itud</translation>
</message>
<message>
<location line="+1"/>
<source>Other</source>
<translation>Muu</translation>
</message>
<message>
<location line="+7"/>
<source>Enter address or label to search</source>
<translation>Otsimiseks sisesta märgis või aadress</translation>
</message>
<message>
<location line="+7"/>
<source>Min amount</source>
<translation>Vähim summa</translation>
</message>
<message>
<location line="+34"/>
<source>Copy address</source>
<translation>Aadressi kopeerimine</translation>
</message>
<message>
<location line="+1"/>
<source>Copy label</source>
<translation>Märgise kopeerimine</translation>
</message>
<message>
<location line="+1"/>
<source>Copy amount</source>
<translation>Kopeeri summa</translation>
</message>
<message>
<location line="+1"/>
<source>Copy transaction ID</source>
<translation>Kopeeri tehingu ID</translation>
</message>
<message>
<location line="+1"/>
<source>Edit label</source>
<translation>Märgise muutmine</translation>
</message>
<message>
<location line="+1"/>
<source>Show transaction details</source>
<translation>Kuva tehingu detailid</translation>
</message>
<message>
<location line="+144"/>
<source>Export Transaction Data</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Comma separated file (*.csv)</source>
<translation>Komaeraldatud fail (*.csv)</translation>
</message>
<message>
<location line="+8"/>
<source>Confirmed</source>
<translation>Kinnitatud</translation>
</message>
<message>
<location line="+1"/>
<source>Date</source>
<translation>Kuupäev</translation>
</message>
<message>
<location line="+1"/>
<source>Type</source>
<translation>Tüüp</translation>
</message>
<message>
<location line="+1"/>
<source>Label</source>
<translation>Silt</translation>
</message>
<message>
<location line="+1"/>
<source>Address</source>
<translation>Aadress</translation>
</message>
<message>
<location line="+1"/>
<source>Amount</source>
<translation>Kogus</translation>
</message>
<message>
<location line="+1"/>
<source>ID</source>
<translation>ID</translation>
</message>
<message>
<location line="+4"/>
<source>Error exporting</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<source>Could not write to file %1.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+100"/>
<source>Range:</source>
<translation>Ulatus:</translation>
</message>
<message>
<location line="+8"/>
<source>to</source>
<translation>saaja</translation>
</message>
</context>
<context>
<name>WalletModel</name>
<message>
<location filename="../walletmodel.cpp" line="+206"/>
<source>Sending...</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>bitcoin-core</name>
<message>
<location filename="../bitcoinstrings.cpp" line="+33"/>
<source>Mercury version</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Usage:</source>
<translation>Kasutus:</translation>
</message>
<message>
<location line="+1"/>
<source>Send command to -server or mercuryd</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>List commands</source>
<translation>Käskluste loetelu</translation>
</message>
<message>
<location line="+1"/>
<source>Get help for a command</source>
<translation>Käskluste abiinfo</translation>
</message>
<message>
<location line="+2"/>
<source>Options:</source>
<translation>Valikud:</translation>
</message>
<message>
<location line="+2"/>
<source>Specify configuration file (default: mercury.conf)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Specify pid file (default: mercuryd.pid)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Specify wallet file (within data directory)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-1"/>
<source>Specify data directory</source>
<translation>Täpsusta andmekataloog</translation>
</message>
<message>
<location line="+2"/>
<source>Set database cache size in megabytes (default: 25)</source>
<translation>Sea andmebaasi vahemälu suurus MB (vaikeväärtus: 25)</translation>
</message>
<message>
<location line="+1"/>
<source>Set database disk log size in megabytes (default: 100)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+6"/>
<source>Listen for connections on <port> (default: 15714 or testnet: 25714)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Maintain at most <n> connections to peers (default: 125)</source>
<translation>Säilita vähemalt <n> ühendust peeridega (vaikeväärtus: 125)</translation>
</message>
<message>
<location line="+3"/>
<source>Connect to a node to retrieve peer addresses, and disconnect</source>
<translation>Peeri aadressi saamiseks ühendu korraks node'iga</translation>
</message>
<message>
<location line="+1"/>
<source>Specify your own public address</source>
<translation>Täpsusta enda avalik aadress</translation>
</message>
<message>
<location line="+5"/>
<source>Bind to given address. Use [host]:port notation for IPv6</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Stake your coins to support network and gain reward (default: 1)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>Threshold for disconnecting misbehaving peers (default: 100)</source>
<translation>Ulakate peeride valulävi (vaikeväärtus: 100)</translation>
</message>
<message>
<location line="+1"/>
<source>Number of seconds to keep misbehaving peers from reconnecting (default: 86400)</source>
<translation>Mitme sekundi pärast ulakad peerid tagasi võivad tulla (vaikeväärtus: 86400)</translation>
</message>
<message>
<location line="-44"/>
<source>An error occurred while setting up the RPC port %u for listening on IPv4: %s</source>
<translation>RPC pordi %u kuulamiseks seadistamisel ilmnes viga IPv4'l: %s</translation>
</message>
<message>
<location line="+51"/>
<source>Detach block and address databases. Increases shutdown time (default: 0)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+109"/>
<source>Error: The transaction was rejected. This might happen if some of the coins in your wallet were already spent, such as if you used a copy of wallet.dat and coins were spent in the copy but not marked as spent here.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-5"/>
<source>Error: This transaction requires a transaction fee of at least %s because of its amount, complexity, or use of recently received funds </source>
<translation type="unfinished"/>
</message>
<message>
<location line="-87"/>
<source>Listen for JSON-RPC connections on <port> (default: 15715 or testnet: 56413)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-11"/>
<source>Accept command line and JSON-RPC commands</source>
<translation>Luba käsurea ning JSON-RPC käsklusi</translation>
</message>
<message>
<location line="+101"/>
<source>Error: Transaction creation failed </source>
<translation type="unfinished"/>
</message>
<message>
<location line="-5"/>
<source>Error: Wallet locked, unable to create transaction </source>
<translation type="unfinished"/>
</message>
<message>
<location line="-8"/>
<source>Importing blockchain data file.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Importing bootstrap blockchain data file.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-88"/>
<source>Run in the background as a daemon and accept commands</source>
<translation>Tööta taustal ning aktsepteeri käsklusi</translation>
</message>
<message>
<location line="+1"/>
<source>Use the test network</source>
<translation>Testvõrgu kasutamine</translation>
</message>
<message>
<location line="-24"/>
<source>Accept connections from outside (default: 1 if no -proxy or -connect)</source>
<translation>Luba välisühendusi (vaikeväärtus: 1 kui puudub -proxy või -connect)</translation>
</message>
<message>
<location line="-38"/>
<source>An error occurred while setting up the RPC port %u for listening on IPv6, falling back to IPv4: %s</source>
<translation>RPC pordi %u kuulamiseks seadistamisel ilmnes viga IPv6'l, lülitumine tagasi IPv4'le : %s</translation>
</message>
<message>
<location line="+117"/>
<source>Error initializing database environment %s! To recover, BACKUP THAT DIRECTORY, then remove everything from it except for wallet.dat.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-20"/>
<source>Set maximum size of high-priority/low-fee transactions in bytes (default: 27000)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+11"/>
<source>Warning: -paytxfee is set very high! This is the transaction fee you will pay if you send a transaction.</source>
<translation>Hoiatus: -paytxfee on seatud väga kõrgeks! See on sinu poolt makstav tehingu lisatasu.</translation>
</message>
<message>
<location line="+61"/>
<source>Warning: Please check that your computer's date and time are correct! If your clock is wrong Mercury will not work properly.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-31"/>
<source>Warning: error reading wallet.dat! All keys read correctly, but transaction data or address book entries might be missing or incorrect.</source>
<translation>Hoiatus: ilmnes tõrge wallet.dat faili lugemisel! Võtmed on terved, kuid tehingu andmed või aadressiraamatu kirjed võivad olla kadunud või vigased.</translation>
</message>
<message>
<location line="-18"/>
<source>Warning: wallet.dat corrupt, data salvaged! Original wallet.dat saved as wallet.{timestamp}.bak in %s; if your balance or transactions are incorrect you should restore from a backup.</source>
<translation>Hoiatus: toimus wallet.dat faili andmete päästmine! Originaal wallet.dat nimetati kaustas %s ümber wallet.{ajatempel}.bak'iks, jäägi või tehingute ebakõlade puhul tuleks teha backup'ist taastamine.</translation>
</message>
<message>
<location line="-30"/>
<source>Attempt to recover private keys from a corrupt wallet.dat</source>
<translation>Püüa vigasest wallet.dat failist taastada turvavõtmed</translation>
</message>
<message>
<location line="+4"/>
<source>Block creation options:</source>
<translation>Blokeeri loomise valikud:</translation>
</message>
<message>
<location line="-62"/>
<source>Connect only to the specified node(s)</source>
<translation>Ühendu ainult määratud node'i(de)ga</translation>
</message>
<message>
<location line="+4"/>
<source>Discover own IP address (default: 1 when listening and no -externalip)</source>
<translation>Leia oma IP aadress (vaikeväärtus: 1, kui kuulatakse ning puudub -externalip)</translation>
</message>
<message>
<location line="+94"/>
<source>Failed to listen on any port. Use -listen=0 if you want this.</source>
<translation>Pordi kuulamine nurjus. Soovikorral kasuta -listen=0.</translation>
</message>
<message>
<location line="-90"/>
<source>Find peers using DNS lookup (default: 1)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>Sync checkpoints policy (default: strict)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+83"/>
<source>Invalid -tor address: '%s'</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+4"/>
<source>Invalid amount for -reservebalance=<amount></source>
<translation type="unfinished"/>
</message>
<message>
<location line="-82"/>
<source>Maximum per-connection receive buffer, <n>*1000 bytes (default: 5000)</source>
<translation>Maksimaalne saamise puhver -connection kohta , <n>*1000 baiti (vaikeväärtus: 5000)</translation>
</message>
<message>
<location line="+1"/>
<source>Maximum per-connection send buffer, <n>*1000 bytes (default: 1000)</source>
<translation>Maksimaalne saatmise puhver -connection kohta , <n>*1000 baiti (vaikeväärtus: 1000)</translation>
</message>
<message>
<location line="-16"/>
<source>Only connect to nodes in network <net> (IPv4, IPv6 or Tor)</source>
<translation>Ühenda ainult node'idega <net> võrgus (IPv4, IPv6 või Tor)</translation>
</message>
<message>
<location line="+28"/>
<source>Output extra debugging information. Implies all other -debug* options</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Output extra network debugging information</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Prepend debug output with timestamp</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+35"/>
<source>SSL options: (see the Bitcoin Wiki for SSL setup instructions)</source>
<translation>SSL valikud: (vaata Bitcoini Wikist või SSL sätete juhendist)</translation>
</message>
<message>
<location line="-74"/>
<source>Select the version of socks proxy to use (4-5, default: 5)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+41"/>
<source>Send trace/debug info to console instead of debug.log file</source>
<translation>Saada jälitus/debug, debug.log faili asemel, konsooli</translation>
</message>
<message>
<location line="+1"/>
<source>Send trace/debug info to debugger</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+28"/>
<source>Set maximum block size in bytes (default: 250000)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-1"/>
<source>Set minimum block size in bytes (default: 0)</source>
<translation>Sea minimaalne bloki suurus baitides (vaikeväärtus: 0)</translation>
</message>
<message>
<location line="-29"/>
<source>Shrink debug.log file on client startup (default: 1 when no -debug)</source>
<translation>Kahanda programmi käivitamisel debug.log faili (vaikeväärtus: 1, kui ei ole -debug)</translation>
</message>
<message>
<location line="-42"/>
<source>Specify connection timeout in milliseconds (default: 5000)</source>
<translation>Sea ühenduse timeout millisekundites (vaikeväärtus: 5000)</translation>
</message>
<message>
<location line="+109"/>
<source>Unable to sign checkpoint, wrong checkpointkey?
</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-80"/>
<source>Use UPnP to map the listening port (default: 0)</source>
<translation>Kasuta kuulatava pordi määramiseks UPnP ühendust (vaikeväärtus: 0)</translation>
</message>
<message>
<location line="-1"/>
<source>Use UPnP to map the listening port (default: 1 when listening)</source>
<translation>Kasuta kuulatava pordi määramiseks UPnP ühendust (vaikeväärtus: 1, kui kuulatakse)</translation>
</message>
<message>
<location line="-25"/>
<source>Use proxy to reach tor hidden services (default: same as -proxy)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+42"/>
<source>Username for JSON-RPC connections</source>
<translation>JSON-RPC ühenduste kasutajatunnus</translation>
</message>
<message>
<location line="+47"/>
<source>Verifying database integrity...</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+57"/>
<source>WARNING: syncronized checkpoint violation detected, but skipped!</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Warning: Disk space is low!</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-2"/>
<source>Warning: This version is obsolete, upgrade required!</source>
<translation>Hoiatus: versioon on aegunud, uuendus on nõutav!</translation>
</message>
<message>
<location line="-48"/>
<source>wallet.dat corrupt, salvage failed</source>
<translation>wallet.dat fail on katki, päästmine ebaõnnestus</translation>
</message>
<message>
<location line="-54"/>
<source>Password for JSON-RPC connections</source>
<translation>JSON-RPC ühenduste salasõna</translation>
</message>
<message>
<location line="-84"/>
<source>%s, you must set a rpcpassword in the configuration file:
%s
It is recommended you use the following random password:
rpcuser=mercuryrpc
rpcpassword=%s
(you do not need to remember this password)
The username and password MUST NOT be the same.
If the file does not exist, create it with owner-readable-only file permissions.
It is also recommended to set alertnotify so you are notified of problems;
for example: alertnotify=echo %%s | mail -s "Mercury Alert" [email protected]
</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+51"/>
<source>Find peers using internet relay chat (default: 0)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>Sync time with other nodes. Disable if time on your system is precise e.g. syncing with NTP (default: 1)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+15"/>
<source>When creating transactions, ignore inputs with value less than this (default: 0.01)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+16"/>
<source>Allow JSON-RPC connections from specified IP address</source>
<translation>JSON-RPC ühenduste lubamine kindla IP pealt</translation>
</message>
<message>
<location line="+1"/>
<source>Send commands to node running on <ip> (default: 127.0.0.1)</source>
<translation>Saada käsklusi node'ile IP'ga <ip> (vaikeväärtus: 127.0.0.1)</translation>
</message>
<message>
<location line="+1"/>
<source>Execute command when the best block changes (%s in cmd is replaced by block hash)</source>
<translation>Käivita käsklus, kui parim plokk muutub (käskluse %s asendatakse ploki hash'iga)</translation>
</message>
<message>
<location line="+3"/>
<source>Execute command when a wallet transaction changes (%s in cmd is replaced by TxID)</source>
<translation>Käivita käsklus, kui rahakoti tehing muutub (%s cmd's muudetakse TxID'ks)</translation>
</message>
<message>
<location line="+3"/>
<source>Require a confirmations for change (default: 0)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Enforce transaction scripts to use canonical PUSH operators (default: 1)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Execute command when a relevant alert is received (%s in cmd is replaced by message)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Upgrade wallet to latest format</source>
<translation>Uuenda rahakott uusimasse vormingusse</translation>
</message>
<message>
<location line="+1"/>
<source>Set key pool size to <n> (default: 100)</source>
<translation>Sea võtmete hulgaks <n> (vaikeväärtus: 100)</translation>
</message>
<message>
<location line="+1"/>
<source>Rescan the block chain for missing wallet transactions</source>
<translation>Otsi ploki jadast rahakoti kadunud tehinguid</translation>
</message>
<message>
<location line="+2"/>
<source>How many blocks to check at startup (default: 2500, 0 = all)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>How thorough the block verification is (0-6, default: 1)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Imports blocks from external blk000?.dat file</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+8"/>
<source>Use OpenSSL (https) for JSON-RPC connections</source>
<translation>Kasuta JSON-RPC ühenduste jaoks OpenSSL'i (https)</translation>
</message>
<message>
<location line="+1"/>
<source>Server certificate file (default: server.cert)</source>
<translation>Serveri sertifikaadifail (vaikeväärtus: server.cert)</translation>
</message>
<message>
<location line="+1"/>
<source>Server private key (default: server.pem)</source>
<translation>Serveri privaatvõti (vaikeväärtus: server.pem)</translation>
</message>
<message>
<location line="+1"/>
<source>Acceptable ciphers (default: TLSv1+HIGH:!SSLv2:!aNULL:!eNULL:!AH:!3DES:@STRENGTH)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+53"/>
<source>Error: Wallet unlocked for staking only, unable to create transaction.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+18"/>
<source>WARNING: Invalid checkpoint found! Displayed transactions may not be correct! You may need to upgrade, or notify developers.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-158"/>
<source>This help message</source>
<translation>Käesolev abitekst</translation>
</message>
<message>
<location line="+95"/>
<source>Wallet %s resides outside data directory %s.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Cannot obtain a lock on data directory %s. Mercury is probably already running.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-98"/>
<source>Mercury</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+140"/>
<source>Unable to bind to %s on this computer (bind returned error %d, %s)</source>
<translation>Selle arvutiga ei ole võimalik siduda %s külge (katse nurjus %d, %s tõttu)</translation>
</message>
<message>
<location line="-130"/>
<source>Connect through socks proxy</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Allow DNS lookups for -addnode, -seednode and -connect</source>
<translation>-addnode, -seednode ja -connect tohivad kasutada DNS lookup'i</translation>
</message>
<message>
<location line="+122"/>
<source>Loading addresses...</source>
<translation>Aadresside laadimine...</translation>
</message>
<message>
<location line="-15"/>
<source>Error loading blkindex.dat</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Error loading wallet.dat: Wallet corrupted</source>
<translation>Viga wallet.dat käivitamisel. Vigane rahakkott</translation>
</message>
<message>
<location line="+4"/>
<source>Error loading wallet.dat: Wallet requires newer version of Mercury</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Wallet needed to be rewritten: restart Mercury to complete</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Error loading wallet.dat</source>
<translation>Viga wallet.dat käivitamisel</translation>
</message>
<message>
<location line="-16"/>
<source>Invalid -proxy address: '%s'</source>
<translation>Vigane -proxi aadress: '%s'</translation>
</message>
<message>
<location line="-1"/>
<source>Unknown network specified in -onlynet: '%s'</source>
<translation>Kirjeldatud tundmatu võrgustik -onlynet'is: '%s'</translation>
</message>
<message>
<location line="-1"/>
<source>Unknown -socks proxy version requested: %i</source>
<translation>Küsitud tundmatu -socks proxi versioon: %i</translation>
</message>
<message>
<location line="+4"/>
<source>Cannot resolve -bind address: '%s'</source>
<translation>Tundmatu -bind aadress: '%s'</translation>
</message>
<message>
<location line="+2"/>
<source>Cannot resolve -externalip address: '%s'</source>
<translation>Tundmatu -externalip aadress: '%s'</translation>
</message>
<message>
<location line="-24"/>
<source>Invalid amount for -paytxfee=<amount>: '%s'</source>
<translation>-paytxfee=<amount> jaoks vigane kogus: '%s'</translation>
</message>
<message>
<location line="+44"/>
<source>Error: could not start node</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+11"/>
<source>Sending...</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>Invalid amount</source>
<translation>Kehtetu summa</translation>
</message>
<message>
<location line="+1"/>
<source>Insufficient funds</source>
<translation>Liiga suur summa</translation>
</message>
<message>
<location line="-34"/>
<source>Loading block index...</source>
<translation>Klotside indeksi laadimine...</translation>
</message>
<message>
<location line="-103"/>
<source>Add a node to connect to and attempt to keep the connection open</source>
<translation>Lisa node ning hoia ühendus avatud</translation>
</message>
<message>
<location line="+122"/>
<source>Unable to bind to %s on this computer. Mercury is probably already running.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-97"/>
<source>Fee per KB to add to transactions you send</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+55"/>
<source>Invalid amount for -mininput=<amount>: '%s'</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+25"/>
<source>Loading wallet...</source>
<translation>Rahakoti laadimine...</translation>
</message>
<message>
<location line="+8"/>
<source>Cannot downgrade wallet</source>
<translation>Rahakoti vanandamine ebaõnnestus</translation>
</message>
<message>
<location line="+1"/>
<source>Cannot initialize keypool</source>
<translation type="unfinished"/>
</message><|fim▁hole|> <translation>Tõrge vaikimisi aadressi kirjutamisel</translation>
</message>
<message>
<location line="+1"/>
<source>Rescanning...</source>
<translation>Üleskaneerimine...</translation>
</message>
<message>
<location line="+5"/>
<source>Done loading</source>
<translation>Laetud</translation>
</message>
<message>
<location line="-167"/>
<source>To use the %s option</source>
<translation>%s valiku kasutamine</translation>
</message>
<message>
<location line="+14"/>
<source>Error</source>
<translation>Tõrge</translation>
</message>
<message>
<location line="+6"/>
<source>You must set rpcpassword=<password> in the configuration file:
%s
If the file does not exist, create it with owner-readable-only file permissions.</source>
<translation>rpcpassword=<password> peab sätete failis olema seadistatud:⏎
%s⏎
Kui seda faili ei ole, loo see ainult-omanikule-lugemiseks faili õigustes.</translation>
</message>
</context>
</TS><|fim▁end|> | <message>
<location line="+1"/>
<source>Cannot write default address</source> |
<|file_name|>encode.go<|end_file_name|><|fim▁begin|>// Copyright 2011 The Snappy-Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package snappy
import (
"encoding/binary"
"io"
)
// We limit how far copy back-references can go, the same as the C++ code.
const maxOffset = 1 << 15
// emitLiteral writes a literal chunk and returns the number of bytes written.
func emitLiteral(dst, lit []byte) int {
i, n := 0, uint(len(lit)-1)
switch {
case n < 60:
dst[0] = uint8(n)<<2 | tagLiteral
i = 1
case n < 1<<8:
dst[0] = 60<<2 | tagLiteral
dst[1] = uint8(n)
i = 2
case n < 1<<16:
dst[0] = 61<<2 | tagLiteral
dst[1] = uint8(n)
dst[2] = uint8(n >> 8)
i = 3
case n < 1<<24:
dst[0] = 62<<2 | tagLiteral
dst[1] = uint8(n)
dst[2] = uint8(n >> 8)
dst[3] = uint8(n >> 16)
i = 4
case int64(n) < 1<<32:
dst[0] = 63<<2 | tagLiteral
dst[1] = uint8(n)
dst[2] = uint8(n >> 8)
dst[3] = uint8(n >> 16)
dst[4] = uint8(n >> 24)
i = 5
default:
panic("snappy: source buffer is too long")
}
if copy(dst[i:], lit) != len(lit) {
panic("snappy: destination buffer is too short")
}
return i + len(lit)
}
// emitCopy writes a copy chunk and returns the number of bytes written.
func emitCopy(dst []byte, offset, length int) int {
i := 0
for length > 0 {
x := length - 4
if 0 <= x && x < 1<<3 && offset < 1<<11 {
dst[i+0] = uint8(offset>>8)&0x07<<5 | uint8(x)<<2 | tagCopy1
dst[i+1] = uint8(offset)
i += 2
break
}
x = length
if x > 1<<6 {
x = 1 << 6
}
dst[i+0] = uint8(x-1)<<2 | tagCopy2
dst[i+1] = uint8(offset)
dst[i+2] = uint8(offset >> 8)
i += 3
length -= x
}
return i
}
// Encode returns the encoded form of src. The returned slice may be a sub-
// slice of dst if dst was large enough to hold the entire encoded block.
// Otherwise, a newly allocated slice will be returned.
// It is valid to pass a nil dst.
func Encode(dst, src []byte) []byte {
if n := MaxEncodedLen(len(src)); len(dst) < n {
dst = make([]byte, n)
}
// The block starts with the varint-encoded length of the decompressed bytes.
d := binary.PutUvarint(dst, uint64(len(src)))
// Return early if src is short.
if len(src) <= 4 {
if len(src) != 0 {
d += emitLiteral(dst[d:], src)
}
return dst[:d]
}
// Initialize the hash table. Its size ranges from 1<<8 to 1<<14 inclusive.
const maxTableSize = 1 << 14
shift, tableSize := uint(32-8), 1<<8
for tableSize < maxTableSize && tableSize < len(src) {
shift--
tableSize *= 2
}
var table [maxTableSize]int
// Iterate over the source bytes.
var (
s int // The iterator position.
t int // The last position with the same hash as s.
lit int // The start position of any pending literal bytes.
)
for s+3 < len(src) {
// Update the hash table.
b0, b1, b2, b3 := src[s], src[s+1], src[s+2], src[s+3]
h := uint32(b0) | uint32(b1)<<8 | uint32(b2)<<16 | uint32(b3)<<24
p := &table[(h*0x1e35a7bd)>>shift]
// We need to to store values in [-1, inf) in table. To save
// some initialization time, (re)use the table's zero value
// and shift the values against this zero: add 1 on writes,
// subtract 1 on reads.
t, *p = *p-1, s+1
// If t is invalid or src[s:s+4] differs from src[t:t+4], accumulate a literal byte.
if t < 0 || s-t >= maxOffset || b0 != src[t] || b1 != src[t+1] || b2 != src[t+2] || b3 != src[t+3] {
s++
continue
}
// Otherwise, we have a match. First, emit any pending literal bytes.
if lit != s {
d += emitLiteral(dst[d:], src[lit:s])
}
// Extend the match to be as long as possible.
s0 := s
s, t = s+4, t+4
for s < len(src) && src[s] == src[t] {
s++
t++
}
// Emit the copied bytes.
d += emitCopy(dst[d:], s-t, s-s0)
lit = s
}
// Emit any final pending literal bytes and return.
if lit != len(src) {
d += emitLiteral(dst[d:], src[lit:])
}
return dst[:d]
}
// MaxEncodedLen returns the maximum length of a snappy block, given its
// uncompressed length.
func MaxEncodedLen(srcLen int) int {
// Compressed data can be defined as:
// compressed := item* literal*
// item := literal* copy
//
// The trailing literal sequence has a space blowup of at most 62/60
// since a literal of length 60 needs one tag byte + one extra byte
// for length information.
//
// Item blowup is trickier to measure. Suppose the "copy" op copies
// 4 bytes of data. Because of a special check in the encoding code,
// we produce a 4-byte copy only if the offset is < 65536. Therefore
// the copy op takes 3 bytes to encode, and this type of item leads
// to at most the 62/60 blowup for representing literals.
//
// Suppose the "copy" op copies 5 bytes of data. If the offset is big
// enough, it will take 5 bytes to encode the copy op. Therefore the
// worst case here is a one-byte literal followed by a five-byte copy.
// That is, 6 bytes of input turn into 7 bytes of "compressed" data.
//
// This last factor dominates the blowup, so the final estimate is:
return 32 + srcLen + srcLen/6
}
// NewWriter returns a new Writer that compresses to w, using the framing
// format described at
// https://yougam/libraries/google/snappy/blob/master/framing_format.txt
func NewWriter(w io.Writer) *Writer {
return &Writer{
w: w,
enc: make([]byte, MaxEncodedLen(maxUncompressedChunkLen)),
}
}
// Writer is an io.Writer than can write Snappy-compressed bytes.
type Writer struct {
w io.Writer
err error
enc []byte
buf [checksumSize + chunkHeaderSize]byte<|fim▁hole|>}
// Reset discards the writer's state and switches the Snappy writer to write to
// w. This permits reusing a Writer rather than allocating a new one.
func (w *Writer) Reset(writer io.Writer) {
w.w = writer
w.err = nil
w.wroteHeader = false
}
// Write satisfies the io.Writer interface.
func (w *Writer) Write(p []byte) (n int, errRet error) {
if w.err != nil {
return 0, w.err
}
if !w.wroteHeader {
copy(w.enc, magicChunk)
if _, err := w.w.Write(w.enc[:len(magicChunk)]); err != nil {
w.err = err
return n, err
}
w.wroteHeader = true
}
for len(p) > 0 {
var uncompressed []byte
if len(p) > maxUncompressedChunkLen {
uncompressed, p = p[:maxUncompressedChunkLen], p[maxUncompressedChunkLen:]
} else {
uncompressed, p = p, nil
}
checksum := crc(uncompressed)
// Compress the buffer, discarding the result if the improvement
// isn't at least 12.5%.
chunkType := uint8(chunkTypeCompressedData)
chunkBody := Encode(w.enc, uncompressed)
if len(chunkBody) >= len(uncompressed)-len(uncompressed)/8 {
chunkType, chunkBody = chunkTypeUncompressedData, uncompressed
}
chunkLen := 4 + len(chunkBody)
w.buf[0] = chunkType
w.buf[1] = uint8(chunkLen >> 0)
w.buf[2] = uint8(chunkLen >> 8)
w.buf[3] = uint8(chunkLen >> 16)
w.buf[4] = uint8(checksum >> 0)
w.buf[5] = uint8(checksum >> 8)
w.buf[6] = uint8(checksum >> 16)
w.buf[7] = uint8(checksum >> 24)
if _, err := w.w.Write(w.buf[:]); err != nil {
w.err = err
return n, err
}
if _, err := w.w.Write(chunkBody); err != nil {
w.err = err
return n, err
}
n += len(uncompressed)
}
return n, nil
}<|fim▁end|> | wroteHeader bool |
<|file_name|>volk_register.py<|end_file_name|><|fim▁begin|>#! /usr/bin/env python
import sys
import os
import re
import string
from xml.dom import minidom
from volk_regexp import *
from make_cpuid_c import make_cpuid_c
from make_cpuid_h import make_cpuid_h
from make_set_simd import make_set_simd
from make_config_fixed import make_config_fixed
from make_typedefs import make_typedefs
from make_environment_init_c import make_environment_init_c
from make_environment_init_h import make_environment_init_h
from make_makefile_am import make_makefile_am
from make_machines_h import make_machines_h
from make_machines_c import make_machines_c
from make_each_machine_c import make_each_machine_c
from make_c import make_c
from make_h import make_h
import copy
#set srcdir and gendir
srcdir = os.path.dirname(os.path.dirname(__file__))
try: gendir = sys.argv[1]
except: gendir = os.path.dirname(__file__)
#ensure directories exist
for dir in (
(os.path.join(gendir, 'include', 'volk')),
(os.path.join(gendir, 'lib')),
(os.path.join(gendir, 'config'))
):
if not os.path.exists(dir): os.makedirs(dir)
outfile_set_simd = open(os.path.join(gendir, "config/lv_set_simd_flags.m4"), "w")
outfile_h = open(os.path.join(gendir, "include/volk/volk.h"), "w")
outfile_c = open(os.path.join(gendir, "lib/volk.c"), "w")
outfile_typedefs = open(os.path.join(gendir, "include/volk/volk_typedefs.h"), "w")
outfile_init_h = open(os.path.join(gendir, "lib/volk_init.h"), "w")
outfile_cpu_h = open(os.path.join(gendir, "include/volk/volk_cpu.h"), "w")
outfile_cpu_c = open(os.path.join(gendir, "lib/volk_cpu.c"), "w")
#outfile_config_in = open(os.path.join(gendir, "include/volk/volk_config.h.in"), "w")
outfile_config_fixed = open(os.path.join(gendir, "include/volk/volk_config_fixed.h"), "w")
outfile_environment_c = open(os.path.join(gendir, "lib/volk_environment_init.c"), "w")
outfile_environment_h = open(os.path.join(gendir, "lib/volk_environment_init.h"), "w")
outfile_makefile_am = open(os.path.join(gendir, "lib/Makefile.am"), "w")
outfile_machines_h = open(os.path.join(gendir, "lib/volk_machines.h"), "w")
outfile_machines_c = open(os.path.join(gendir, "lib/volk_machines.c"), "w")
infile = open(os.path.join(srcdir, "include/volk/Makefile.am"), "r")
mfile = infile.readlines();
datatypes = [];
functions = [];
for line in mfile:
subline = re.search(".*_(a|u)\.h.*", line);
if subline:
subsubline = re.search("(?<=volk_).*", subline.group(0));
if subsubline:
dtype = remove_after_underscore.sub("", subsubline.group(0));
subdtype = re.search("[0-9]+[A-z]+", dtype);
if subdtype:
datatypes.append(subdtype.group(0));
datatypes = set(datatypes);
for line in mfile:
for dt in datatypes:
if dt in line:
subline = re.search("(volk_" + dt +"_.*(a|u).*\.h)", line);
if subline:
subsubline = re.search(".+(?=\.h)", subline.group(0));
functions.append(subsubline.group(0));
archs = [];
afile = minidom.parse(os.path.join(srcdir, "gen/archs.xml"))
filearchs = afile.getElementsByTagName("arch");
for filearch in filearchs:
archs.append(str(filearch.attributes["name"].value));
for arch in archs:
a_var = re.search("^\$", arch);
if a_var:
archs.remove(arch);
archflags_dict = {}
for filearch in filearchs:
archflags_dict[str(filearch.attributes["name"].value)] = str(filearch.getElementsByTagName("flag")[0].firstChild.data)
archalign_dict = {}
for filearch in filearchs:
alignelem = filearch.getElementsByTagName("alignment")
if(alignelem):
archalign_dict[str(filearch.attributes["name"].value)] = int(alignelem[0].firstChild.data)
archs_or = "("
for arch in archs:
archs_or = archs_or + string.upper(arch) + "|";
archs_or = archs_or[0:len(archs_or)-1];
archs_or = archs_or + ")";
#get machine list and parse to a list of machines, each with a list of archs (none of this DOM crap)
machine_str_dict = {}
mfile = minidom.parse(os.path.join(srcdir, "gen/machines.xml"))
filemachines = mfile.getElementsByTagName("machine")
for filemachine in filemachines:
machine_str_dict[str(filemachine.attributes["name"].value)] = str(filemachine.getElementsByTagName("archs")[0].firstChild.data).split()
#all right now you have a dict of arch lists
#next we expand it
#this is an expanded list accounting for the OR syntax
#TODO: make this work for multiple "|" machines
machines = {}
already_done = False
for machine_name in machine_str_dict:
already_done = False
marchlist = machine_str_dict[machine_name]
for march in marchlist:
or_marchs = march.split("|")
if len(or_marchs) > 1:
marchlist.remove(march)
for or_march in or_marchs:
tempmarchlist = copy.deepcopy(marchlist)
tempmarchlist.append(or_march)
machines[machine_name + "_" + or_march] = tempmarchlist
already_done = True
if not already_done:
machines[machine_name] = marchlist
#get the maximum alignment for all archs in a machine
machine_alignment_dict = {}
for machine in machines:
machine_alignment_dict[machine] = max((archalign_dict.get(k, 1)) for k in machines[machine])
#for machine in machine_alignment_dict:
# print machine + ": %d" % machine_alignment_dict[machine]
taglist = [];
fcountlist = [];
arched_arglist = [];
retlist = [];
my_arglist = [];
my_argtypelist = [];
for func in functions:
tags = [];
fcount = [];
infile_source = open(os.path.join(srcdir, 'include', 'volk', func + ".h"))
begun_name = 0;
begun_paren = 0;
sourcefile = infile_source.readlines();
infile_source.close();
for line in sourcefile:
#FIXME: make it work for multiple #if define()s
archline = re.search("^\#if.*?LV_HAVE_" + archs_or + ".*", line);
if archline:
arch = archline.group(0);
archline = re.findall(archs_or + "(?=( |\n|&))", line);
if archline:
archsublist = [];
for tup in archline:
archsublist.append(tup[0]);
fcount.append(archsublist);
testline = re.search("static inline.*?" + func, line);
if (not testline):
continue
tagline = re.search(func + "_.+", line);
if tagline:
tag = re.search("(?<=" + func + "_)\w+(?= *\()",line);
if tag:
tag = re.search("\w+", tag.group(0));
if tag:
tags.append(tag.group(0));
if begun_name == 0:
retline = re.search(".+(?=" + func + ")", line);
if retline:
ret = retline.group(0);
subline = re.search(func + ".*", line);
if subline:
subsubline = re.search("\(.*?\)", subline.group(0));
if subsubline:
args = subsubline.group(0);
else:
begun_name = 1;
subsubline = re.search("\(.*", subline.group(0));
if subsubline:
args = subsubline.group(0);
begun_paren = 1;
else:
if begun_paren == 1:
subline = re.search(".*?\)", line);
if subline:
args = args + subline.group(0);
begun_name = 0;
begun_paren = 0;
else:
subline = re.search(".*", line);
args = args + subline.group(0);
else:
subline = re.search("\(.*?\)", line);
if subline:
args = subline.group(0);
begun_name = 0;
else:
subline = re.search("\(.*", line);
if subline:
args = subline.group(0);
begun_paren = 1;
replace = re.compile("static ");
ret = replace.sub("", ret);
replace = re.compile("inline ");
ret = replace.sub("", ret);
replace = re.compile("\)");
arched_args = replace.sub(", const char* arch) {", args);
remove = re.compile('\)|\(|{');
rargs = remove.sub("", args);
sargs = rargs.split(',');
margs = [];
atypes = [];
for arg in sargs:
temp = arg.split(" ");
margs.append(temp[-1]);
replace = re.compile(" " + temp[-1]);
atypes.append(replace.sub("", arg));
my_args = ""
arg_types = ""
for arg in range(0, len(margs) - 1):
this_arg = leading_space_remove.sub("", margs[arg]);
my_args = my_args + this_arg + ", ";
this_type = leading_space_remove.sub("", atypes[arg]);
arg_types = arg_types + this_type + ", ";
this_arg = leading_space_remove.sub("", margs[-1]);
my_args = my_args + this_arg;
this_type = leading_space_remove.sub("", atypes[-1]);
arg_types = arg_types + this_type;
my_argtypelist.append(arg_types);
if(ret[-1] != ' '):
ret = ret + ' ';
arched_arglist.append(arched_args); #!!!!!!!!!!!
my_arglist.append(my_args) #!!!!!!!!!!!!!!!!!
retlist.append(ret);
fcountlist.append(fcount);
taglist.append(tags);
outfile_cpu_h.write(make_cpuid_h(filearchs));
outfile_cpu_h.close();
outfile_cpu_c.write(make_cpuid_c(filearchs));
outfile_cpu_c.close();
outfile_set_simd.write(make_set_simd(filearchs, machines));
outfile_set_simd.close();
<|fim▁hole|>outfile_typedefs.close();
outfile_makefile_am.write(make_makefile_am(filearchs, machines, archflags_dict))
outfile_makefile_am.close()
outfile_machines_h.write(make_machines_h(functions, machines, archs))
outfile_machines_h.close()
outfile_machines_c.write(make_machines_c(machines))
outfile_machines_c.close()
outfile_c.write(make_c(machines, archs, functions, arched_arglist, my_arglist))
outfile_c.close()
outfile_h.write(make_h(functions, arched_arglist))
outfile_h.close()
for machine in machines:
machine_c_filename = os.path.join(gendir, "lib/volk_machine_" + machine + ".c")
outfile_machine_c = open(machine_c_filename, "w")
outfile_machine_c.write(make_each_machine_c(machine, machines[machine], functions, fcountlist, taglist, machine_alignment_dict[machine]))
outfile_machine_c.close()<|fim▁end|> | outfile_config_fixed.write(make_config_fixed(filearchs));
outfile_config_fixed.close();
outfile_typedefs.write(make_typedefs(functions, retlist, my_argtypelist)); |
<|file_name|>pcspeaker_pl.ts<|end_file_name|><|fim▁begin|><?xml version="1.0" ?><!DOCTYPE TS><TS language="pl" version="2.1">
<context>
<name>@default</name>
<message>
<source>PC Speaker</source>
<translation>PC Speaker</translation>
</message>
</context>
<context><|fim▁hole|> <translation>Wpisz dźwięki oddzielone spacją, dla pauzy użyj znaku _ (twarda spacja), np. D2/2 C1#/1 _/4 H0#/8. Po ukośniku wstawiasz odwrotność długości dźwięku (1, 2, 4, 8, dla 16-F)</translation>
</message>
</context>
</TS><|fim▁end|> | <name>PCSpeakerConfigurationWidget</name>
<message>
<source>Put the played sounds separate by space, _ for pause, eg. D2 C1# G0</source> |
<|file_name|>FileLogger.java<|end_file_name|><|fim▁begin|>import java.io.BufferedWriter;<|fim▁hole|>import java.io.IOException;
public class FileLogger extends MyLoggerImpl {
private String fileName;
public FileLogger(String fileName) {
super();
this.fileName = fileName;
}
@Override
public void log(int level, String message) throws ErrorIntegerLevelException {
super.log(level, message);
try {
File file = new File(fileName);
FileWriter fw = new FileWriter(file.getAbsoluteFile(), true);
BufferedWriter bw = new BufferedWriter(fw);
if(file.length() > 0) {
bw.write("\n"+ getLogMessage());
} else {
bw.write(getLogMessage());
}
bw.close();
} catch (IOException e) {
System.err.println("This file doesn`t exist!");
}
}
public static void main(String[] argv) {
MyLogger consoleLogger = new FileLogger("test.txt");
try {
consoleLogger.log(1, "Hello world!");
consoleLogger.log(2, "The application`s digital signature has error!");
consoleLogger.log(3, "Checking file system on C!");
consoleLogger.log(4, "Error level!");
} catch (ErrorIntegerLevelException e) {
System.err.println("Error level! It must be 1, 2 or 3");
}
}
}<|fim▁end|> | import java.io.File;
import java.io.FileWriter; |
<|file_name|>base.py<|end_file_name|><|fim▁begin|># -*- coding:utf-8 -*-<|fim▁hole|>class Model(SqlBaseModel):
def __init__(self):
super(Model, self).__init__(db_name='blog')
Base = Model().Base<|fim▁end|> | from models.base import SqlBaseModel
|
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|><|fim▁hole|>"""Tests for the canary component."""
from unittest.mock import MagicMock, PropertyMock
from canary.api import SensorType
def mock_device(device_id, name, is_online=True, device_type_name=None):
"""Mock Canary Device class."""
device = MagicMock()
type(device).device_id = PropertyMock(return_value=device_id)
type(device).name = PropertyMock(return_value=name)
type(device).is_online = PropertyMock(return_value=is_online)
type(device).device_type = PropertyMock(
return_value={"id": 1, "name": device_type_name}
)
return device
def mock_location(
location_id, name, is_celsius=True, devices=None, mode=None, is_private=False
):
"""Mock Canary Location class."""
location = MagicMock()
type(location).location_id = PropertyMock(return_value=location_id)
type(location).name = PropertyMock(return_value=name)
type(location).is_celsius = PropertyMock(return_value=is_celsius)
type(location).is_private = PropertyMock(return_value=is_private)
type(location).devices = PropertyMock(return_value=devices or [])
type(location).mode = PropertyMock(return_value=mode)
return location
def mock_mode(mode_id, name):
"""Mock Canary Mode class."""
mode = MagicMock()
type(mode).mode_id = PropertyMock(return_value=mode_id)
type(mode).name = PropertyMock(return_value=name)
type(mode).resource_url = PropertyMock(return_value=f"/v1/modes/{mode_id}")
return mode
def mock_reading(sensor_type, sensor_value):
"""Mock Canary Reading class."""
reading = MagicMock()
type(reading).sensor_type = SensorType(sensor_type)
type(reading).value = PropertyMock(return_value=sensor_value)
return reading<|fim▁end|> | |
<|file_name|>auth.js<|end_file_name|><|fim▁begin|>'use strict';
var os = require('os');
// expose our config directly to our application using module.exports
module.exports = {
'twitterAuth' : {
'consumerKey' : process.env.TWITTER_CONSUMER_KEY || 'unknown',
'consumerSecret' : process.env.TWITTER_CONSUMER_SECRET || 'unknown' ,
'callbackURL' : 'http://' + os.hostname() + '/auth/twitter/callback'
}
<|fim▁hole|><|fim▁end|> | }; |
<|file_name|>dummy.cpp<|end_file_name|><|fim▁begin|>// Copyright (c) 2020 WNProject Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be<|fim▁hole|>#include "hashing/inc/hash.h"<|fim▁end|> | // found in the LICENSE file.
// Intentionally left empty so that we can build a lib target for this library
|
<|file_name|>minibuf.rs<|end_file_name|><|fim▁begin|>//! Minibuffer input and completion.
use remacs_macros::lisp_fn;
use crate::{
buffers::{current_buffer, LispBufferOrName},
editfns::field_end,
eval::unbind_to,
keymap::get_keymap,
lisp::LispObject,
lists::{car_safe, cdr_safe, memq},
multibyte::LispStringRef,
obarray::{intern, lisp_intern},
remacs_sys::{
globals, Qcommandp, Qcustom_variable_p, Qfield, Qminibuffer_completion_table,
Qminibuffer_history, Qnil, Qt, Vminibuffer_list,
},
remacs_sys::{
make_buffer_string, minibuf_level, minibuf_prompt, minibuf_window, read_minibuf, specbind,
EmacsInt, Fcopy_sequence,
},
symbols::symbol_value,
textprop::get_char_property,
threads::{c_specpdl_index, ThreadState},
};
/// Return t if BUFFER is a minibuffer.
/// No argument or nil as argument means use current buffer as BUFFER.
/// BUFFER can be a buffer or a buffer name.
#[lisp_fn(min = "0")]
pub fn minibufferp(buffer_or_name: Option<LispBufferOrName>) -> bool {
let buffer = buffer_or_name.map_or_else(current_buffer, LispObject::from);
memq(buffer, unsafe { Vminibuffer_list }).is_not_nil()
}
/// Return the currently active minibuffer window, or nil if none.
#[lisp_fn]
pub fn active_minibuffer_window() -> LispObject {
unsafe {
if minibuf_level == 0 {
Qnil
} else {
minibuf_window
}
}
}
/// Specify which minibuffer window to use for the minibuffer.
/// This affects where the minibuffer is displayed if you put text in it
/// without invoking the usual minibuffer commands.
#[lisp_fn]
pub fn set_minibuffer_window(window: LispObject) -> LispObject {
window.as_minibuffer_or_error(); // just for the checks
unsafe {
minibuf_window = window;
}
window
}
/// Return current depth of activations of minibuffer,
/// a nonnegative integer.
#[lisp_fn]
pub fn minibuffer_depth() -> EmacsInt {
unsafe { minibuf_level }
}<|fim▁hole|>pub fn minibuffer_prompt() -> LispObject {
unsafe { Fcopy_sequence(minibuf_prompt) }
}
/// Return the buffer position of the end of the minibuffer prompt.
/// Return (point-min) if current buffer is not a minibuffer.
#[lisp_fn]
pub fn minibuffer_prompt_end() -> EmacsInt {
let buffer = ThreadState::current_buffer_unchecked();
let beg = buffer.beg() as EmacsInt;
if memq(buffer.into(), unsafe { Vminibuffer_list }).is_nil() {
return beg;
}
let end = field_end(Some(beg.into()), false, None);
let buffer_end = buffer.zv as EmacsInt;
if end == buffer_end && get_char_property(beg, Qfield, Qnil).is_nil() {
beg
} else {
end
}
}
/// Return the user input in a minibuffer as a string.
/// If the current buffer is not a minibuffer, return its entire contents.
#[lisp_fn]
pub fn minibuffer_contents() -> LispObject {
let prompt_end = minibuffer_prompt_end() as isize;
unsafe { make_buffer_string(prompt_end, ThreadState::current_buffer_unchecked().zv, true) }
}
/// Return the user input in a minibuffer as a string, without text-properties.
/// If the current buffer is not a minibuffer, return its entire contents.
#[lisp_fn]
pub fn minibuffer_contents_no_properties() -> LispObject {
let prompt_end = minibuffer_prompt_end() as isize;
unsafe {
make_buffer_string(
prompt_end,
ThreadState::current_buffer_unchecked().zv,
false,
)
}
}
/// Read a string from the minibuffer, prompting with string PROMPT.
/// The optional second arg INITIAL-CONTENTS is an obsolete alternative to
/// DEFAULT-VALUE. It normally should be nil in new code, except when
/// HIST is a cons. It is discussed in more detail below.
///
/// Third arg KEYMAP is a keymap to use whilst reading;
/// if omitted or nil, the default is `minibuffer-local-map'.
///
/// If fourth arg READ is non-nil, interpret the result as a Lisp object
/// and return that object:
/// in other words, do `(car (read-from-string INPUT-STRING))'
///
/// Fifth arg HIST, if non-nil, specifies a history list and optionally
/// the initial position in the list. It can be a symbol, which is the
/// history list variable to use, or a cons cell (HISTVAR . HISTPOS).
/// In that case, HISTVAR is the history list variable to use, and
/// HISTPOS is the initial position for use by the minibuffer history
/// commands. For consistency, you should also specify that element of
/// the history as the value of INITIAL-CONTENTS. Positions are counted
/// starting from 1 at the beginning of the list.
///
/// Sixth arg DEFAULT-VALUE, if non-nil, should be a string, which is used
/// as the default to `read' if READ is non-nil and the user enters
/// empty input. But if READ is nil, this function does _not_ return
/// DEFAULT-VALUE for empty input! Instead, it returns the empty string.
///
/// Whatever the value of READ, DEFAULT-VALUE is made available via the
/// minibuffer history commands. DEFAULT-VALUE can also be a list of
/// strings, in which case all the strings are available in the history,
/// and the first string is the default to `read' if READ is non-nil.
///
/// Seventh arg INHERIT-INPUT-METHOD, if non-nil, means the minibuffer inherits
/// the current input method and the setting of `enable-multibyte-characters'.
///
/// If the variable `minibuffer-allow-text-properties' is non-nil,
/// then the string which is returned includes whatever text properties
/// were present in the minibuffer. Otherwise the value has no text properties.
///
/// The remainder of this documentation string describes the
/// INITIAL-CONTENTS argument in more detail. It is only relevant when
/// studying existing code, or when HIST is a cons. If non-nil,
/// INITIAL-CONTENTS is a string to be inserted into the minibuffer before
/// reading input. Normally, point is put at the end of that string.
/// However, if INITIAL-CONTENTS is (STRING . POSITION), the initial
/// input is STRING, but point is placed at _one-indexed_ position
/// POSITION in the minibuffer. Any integer value less than or equal to
/// one puts point at the beginning of the string. *Note* that this
/// behavior differs from the way such arguments are used in `completing-read'
/// and some related functions, which use zero-indexing for POSITION.
#[lisp_fn(min = "1")]
pub fn read_from_minibuffer(
prompt: LispStringRef,
initial_contents: LispObject,
mut keymap: LispObject,
read: bool,
hist: LispObject,
default_value: LispObject,
inherit_input_method: bool,
) -> LispObject {
keymap = if keymap.is_nil() {
unsafe { globals.Vminibuffer_local_map }
} else {
get_keymap(keymap, true, false)
};
let (mut histvar, mut histpos) = if hist.is_symbol() {
(hist, Qnil)
} else {
(car_safe(hist), cdr_safe(hist))
};
if histvar.is_nil() {
histvar = Qminibuffer_history
};
if histpos.is_nil() {
histpos = LispObject::from_natnum(0)
};
unsafe {
read_minibuf(
keymap,
initial_contents,
prompt.into(),
read,
histvar,
histpos,
default_value,
globals.minibuffer_allow_text_properties,
inherit_input_method,
)
}
}
// Functions that use the minibuffer to read various things.
/// Read a string in the minibuffer, with completion.
/// PROMPT is a string to prompt with; normally it ends in a colon and a space.
/// COLLECTION can be a list of strings, an alist, an obarray or a hash table.
/// COLLECTION can also be a function to do the completion itself.
/// PREDICATE limits completion to a subset of COLLECTION.
/// See `try-completion', `all-completions', `test-completion',
/// and `completion-boundaries', for more details on completion,
/// COLLECTION, and PREDICATE. See also Info nodes `(elisp)Basic Completion'
/// for the details about completion, and `(elisp)Programmed Completion' for
/// expectations from COLLECTION when it's a function.
///
/// REQUIRE-MATCH can take the following values:
/// - t means that the user is not allowed to exit unless
/// the input is (or completes to) an element of COLLECTION or is null.
/// - nil means that the user can exit with any input.
/// - `confirm' means that the user can exit with any input, but she needs
/// to confirm her choice if the input is not an element of COLLECTION.
/// - `confirm-after-completion' means that the user can exit with any
/// input, but she needs to confirm her choice if she called
/// `minibuffer-complete' right before `minibuffer-complete-and-exit'
/// and the input is not an element of COLLECTION.
/// - anything else behaves like t except that typing RET does not exit if it
/// does non-null completion.
///
/// If the input is null, `completing-read' returns DEF, or the first element
/// of the list of default values, or an empty string if DEF is nil,
/// regardless of the value of REQUIRE-MATCH.
///
/// If INITIAL-INPUT is non-nil, insert it in the minibuffer initially,
/// with point positioned at the end.
/// If it is (STRING . POSITION), the initial input is STRING, but point
/// is placed at _zero-indexed_ position POSITION in STRING. (*Note*
/// that this is different from `read-from-minibuffer' and related
/// functions, which use one-indexing for POSITION.) This feature is
/// deprecated--it is best to pass nil for INITIAL-INPUT and supply the
/// default value DEF instead. The user can yank the default value into
/// the minibuffer easily using \\<minibuffer-local-map>\\[next-history-element].
///
/// HIST, if non-nil, specifies a history list and optionally the initial
/// position in the list. It can be a symbol, which is the history list
/// variable to use, or it can be a cons cell (HISTVAR . HISTPOS). In
/// that case, HISTVAR is the history list variable to use, and HISTPOS
/// is the initial position (the position in the list used by the
/// minibuffer history commands). For consistency, you should also
/// specify that element of the history as the value of
/// INITIAL-INPUT. (This is the only case in which you should use
/// INITIAL-INPUT instead of DEF.) Positions are counted starting from
/// 1 at the beginning of the list. The variable `history-length'
/// controls the maximum length of a history list.
///
/// DEF, if non-nil, is the default value or the list of default values.
///
/// If INHERIT-INPUT-METHOD is non-nil, the minibuffer inherits
/// the current input method and the setting of `enable-multibyte-characters'.
///
/// Completion ignores case if the ambient value of
/// `completion-ignore-case' is non-nil.
///
/// See also `completing-read-function'.
#[lisp_fn(min = "2")]
pub fn completing_read(
prompt: LispObject,
collection: LispObject,
predicate: LispObject,
require_match: LispObject,
initial_input: LispObject,
hist: LispObject,
def: LispObject,
inherit_input_method: LispObject,
) -> LispObject {
call!(
symbol_value(intern("completing-read-function")),
prompt,
collection,
predicate,
require_match,
initial_input,
hist,
def,
inherit_input_method
)
}
/// Read a string from the minibuffer, prompting with string PROMPT.
/// If non-nil, second arg INITIAL-INPUT is a string to insert before reading.
/// This argument has been superseded by DEFAULT-VALUE and should normally be nil
/// in new code. It behaves as INITIAL-CONTENTS in `read-from-minibuffer' (which
/// see).
/// The third arg HISTORY, if non-nil, specifies a history list
/// and optionally the initial position in the list.
/// See `read-from-minibuffer' for details of HISTORY argument.
/// Fourth arg DEFAULT-VALUE is the default value or the list of default values.
/// If non-nil, it is used for history commands, and as the value (or the first
/// element of the list of default values) to return if the user enters the
/// empty string.
/// Fifth arg INHERIT-INPUT-METHOD, if non-nil, means the minibuffer inherits
/// the current input method and the setting of `enable-multibyte-characters'.
#[lisp_fn(min = "1")]
pub fn read_string(
prompt: LispStringRef,
initial_input: LispObject,
history: LispObject,
default_value: LispObject,
inherit_input_method: bool,
) -> LispObject {
let count = c_specpdl_index();
// Just in case we're in a recursive minibuffer, make it clear that the
// previous minibuffer's completion table does not apply to the new
// minibuffer.
// FIXME: `minibuffer-completion-table' should be buffer-local instead.
unsafe { specbind(Qminibuffer_completion_table, Qnil) };
let mut val: LispObject;
val = read_from_minibuffer(
prompt,
initial_input,
Qnil,
false,
history,
default_value,
inherit_input_method,
);
if let Some(s) = val.as_string() {
if s.is_empty() && default_value.is_not_nil() {
val = match default_value.into() {
None => default_value,
Some((a, _)) => a,
}
}
}
unbind_to(count, val)
}
pub fn read_command_or_variable(
prompt: LispObject,
default_value: LispObject,
symbol: LispObject,
) -> LispObject {
let default_string = if default_value.is_nil() {
Qnil
} else if let Some(s) = default_value.as_symbol() {
s.symbol_name()
} else {
default_value
};
let name = completing_read(
prompt,
unsafe { globals.Vobarray },
symbol,
Qt,
Qnil,
Qnil,
default_string,
Qnil,
);
if name.is_nil() {
name
} else {
lisp_intern(name.into(), None)
}
}
/// Read the name of a command and return as a symbol. */
/// Prompt with PROMPT. By default, return DEFAULT-VALUE or its first element */
/// if it is a list.
#[lisp_fn(min = "1")]
pub fn read_command(prompt: LispObject, default_value: LispObject) -> LispObject {
read_command_or_variable(prompt, default_value, Qcommandp)
}
/// Read the name of a user option and return it as a symbol.
/// Prompt with PROMPT. By default, return DEFAULT-VALUE or its first element
/// if it is a list.
/// A user option, or customizable variable, is one for which
/// `custom-variable-p' returns non-nil.
#[lisp_fn(min = "1")]
pub fn read_variable(prompt: LispObject, default_value: LispObject) -> LispObject {
read_command_or_variable(prompt, default_value, Qcustom_variable_p)
}
/// Read a string from the terminal, not allowing blanks.
/// Prompt with PROMPT. Whitespace terminates the input. If INITIAL is
/// non-nil, it should be a string, which is used as initial input, with
/// point positioned at the end, so that SPACE will accept the input.
/// (Actually, INITIAL can also be a cons of a string and an integer.
/// Such values are treated as in `read-from-minibuffer', but are normally
/// not useful in this function.)
/// Third arg INHERIT-INPUT-METHOD, if non-nil, means the minibuffer inherits
/// the current input method and the setting of`enable-multibyte-characters'.
#[lisp_fn(min = "1")]
pub fn read_no_blanks_input(
prompt: LispStringRef,
initial: LispObject,
inherit_input_method: LispObject,
) -> LispObject {
unsafe {
read_minibuf(
globals.Vminibuffer_local_ns_map,
initial,
prompt.into(),
false,
Qminibuffer_history,
LispObject::from_fixnum(0),
Qnil,
false,
inherit_input_method.is_not_nil(),
)
}
}
include!(concat!(env!("OUT_DIR"), "/minibuf_exports.rs"));<|fim▁end|> |
/// Return the prompt string of the currently active
/// minibuffer. If no minibuffer is active return nil.
#[lisp_fn] |
<|file_name|>dismantling.module.ts<|end_file_name|><|fim▁begin|>import { NgModule } from '@angular/core';
import { SharedModule } from '../../shared/shared.module';
import { DismantlingRoutingModule } from './dismantling-routing.module';
import { DismantlingHomeComponent } from './dismantling-home/dismantling-home.component';
// import { DismantlingIdleComponent } from './dismantling-idle/dismantling-idle.component';
import { DismantlingProgressingComponent } from './dismantling-progressing/dismantling-progressing.component';
import { DismantlingCompletedComponent } from './dismantling-completed/dismantling-completed.component';
import { DismantlingIdle2Component } from './dismantling-idle2/dismantling-idle2.component';<|fim▁hole|>import { DismantlingPreDismantlingComponent } from './dismantling-pre-dismantling/dismantling-pre-dismantling.component';
@NgModule({
imports: [
SharedModule,
DismantlingRoutingModule
],
declarations: [DismantlingHomeComponent, /* DismantlingIdleComponent, */ DismantlingProgressingComponent, DismantlingCompletedComponent, DismantlingIdle2Component, DismantlingPreDismantlingComponent]
})
export class DismantlingModule { }<|fim▁end|> | |
<|file_name|>IssueHistory.java<|end_file_name|><|fim▁begin|>/*
* Property of RECAPT http://recapt.com.ec/
* Chief Developer Ing. Eduardo Alfonso Sanchez [email protected]
*/
package com.recapt.domain;
import java.time.LocalDateTime;
/**
*
* @author Eduardo
*/
public class IssueHistory {
private String name;
private String description;
private String reference;
private LocalDateTime created;
private Usuario createBy;
private Issue issue;
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public String getDescription() {
return description;
}<|fim▁hole|> this.description = description;
}
public String getReference() {
return reference;
}
public void setReference(String reference) {
this.reference = reference;
}
public LocalDateTime getCreated() {
return created;
}
public void setCreated(LocalDateTime created) {
this.created = created;
}
public Usuario getCreateBy() {
return createBy;
}
public void setCreateBy(Usuario createBy) {
this.createBy = createBy;
}
public Issue getIssue() {
return issue;
}
public void setIssue(Issue issue) {
this.issue = issue;
}
}<|fim▁end|> |
public void setDescription(String description) { |
<|file_name|>types.generated.go<|end_file_name|><|fim▁begin|>/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// ************************************************************
// DO NOT EDIT.
// THIS FILE IS AUTO-GENERATED BY codecgen.
// ************************************************************
package v1
import (
"errors"
"fmt"
codec1978 "github.com/ugorji/go/codec"
pkg1_unversioned "k8s.io/client-go/1.4/pkg/api/unversioned"
pkg2_v1 "k8s.io/client-go/1.4/pkg/api/v1"
pkg3_types "k8s.io/client-go/1.4/pkg/types"
"reflect"
"runtime"
time "time"
)
const (
// ----- content types ----
codecSelferC_UTF81234 = 1
codecSelferC_RAW1234 = 0
// ----- value types used ----
codecSelferValueTypeArray1234 = 10
codecSelferValueTypeMap1234 = 9
// ----- containerStateValues ----
codecSelfer_containerMapKey1234 = 2
codecSelfer_containerMapValue1234 = 3
codecSelfer_containerMapEnd1234 = 4
codecSelfer_containerArrayElem1234 = 6
codecSelfer_containerArrayEnd1234 = 7
)
var (
codecSelferBitsize1234 = uint8(reflect.TypeOf(uint(0)).Bits())
codecSelferOnlyMapOrArrayEncodeToStructErr1234 = errors.New(`only encoded map or array can be decoded into a struct`)
)
type codecSelfer1234 struct{}
func init() {
if codec1978.GenVersion != 5 {
_, file, _, _ := runtime.Caller(0)
err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v",
5, codec1978.GenVersion, file)
panic(err)
}
if false { // reference the types, but skip this branch at build/run time
var v0 pkg1_unversioned.Time
var v1 pkg2_v1.ObjectMeta
var v2 pkg3_types.UID
var v3 time.Time
_, _, _, _ = v0, v1, v2, v3
}
}
func (x *CrossVersionObjectReference) CodecEncodeSelf(e *codec1978.Encoder) {
var h codecSelfer1234
z, r := codec1978.GenHelperEncoder(e)
_, _, _ = h, z, r
if x == nil {
r.EncodeNil()
} else {
yym1 := z.EncBinary()
_ = yym1
if false {
} else if z.HasExtensions() && z.EncExt(x) {
} else {
yysep2 := !z.EncBinary()
yy2arr2 := z.EncBasicHandle().StructToArray
var yyq2 [3]bool
_, _, _ = yysep2, yyq2, yy2arr2
const yyr2 bool = false
yyq2[2] = x.APIVersion != ""
var yynn2 int
if yyr2 || yy2arr2 {
r.EncodeArrayStart(3)
} else {
yynn2 = 2
for _, b := range yyq2 {
if b {
yynn2++
}
}
r.EncodeMapStart(yynn2)
yynn2 = 0
}
if yyr2 || yy2arr2 {
z.EncSendContainerState(codecSelfer_containerArrayElem1234)
yym4 := z.EncBinary()
_ = yym4
if false {
} else {
r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
}
} else {
z.EncSendContainerState(codecSelfer_containerMapKey1234)
r.EncodeString(codecSelferC_UTF81234, string("kind"))
z.EncSendContainerState(codecSelfer_containerMapValue1234)
yym5 := z.EncBinary()
_ = yym5
if false {
} else {
r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
}
}
if yyr2 || yy2arr2 {
z.EncSendContainerState(codecSelfer_containerArrayElem1234)
yym7 := z.EncBinary()
_ = yym7
if false {
} else {
r.EncodeString(codecSelferC_UTF81234, string(x.Name))
}
} else {
z.EncSendContainerState(codecSelfer_containerMapKey1234)
r.EncodeString(codecSelferC_UTF81234, string("name"))
z.EncSendContainerState(codecSelfer_containerMapValue1234)
yym8 := z.EncBinary()
_ = yym8
if false {
} else {
r.EncodeString(codecSelferC_UTF81234, string(x.Name))
}
}
if yyr2 || yy2arr2 {
z.EncSendContainerState(codecSelfer_containerArrayElem1234)
if yyq2[2] {
yym10 := z.EncBinary()
_ = yym10
if false {
} else {
r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
}
} else {
r.EncodeString(codecSelferC_UTF81234, "")
}
} else {
if yyq2[2] {
z.EncSendContainerState(codecSelfer_containerMapKey1234)
r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
z.EncSendContainerState(codecSelfer_containerMapValue1234)
yym11 := z.EncBinary()
_ = yym11
if false {
} else {
r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
}
}
}
if yyr2 || yy2arr2 {
z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
} else {
z.EncSendContainerState(codecSelfer_containerMapEnd1234)
}
}
}
}
func (x *CrossVersionObjectReference) CodecDecodeSelf(d *codec1978.Decoder) {
var h codecSelfer1234
z, r := codec1978.GenHelperDecoder(d)
_, _, _ = h, z, r
yym12 := z.DecBinary()
_ = yym12
if false {
} else if z.HasExtensions() && z.DecExt(x) {
} else {
yyct13 := r.ContainerType()
if yyct13 == codecSelferValueTypeMap1234 {
yyl13 := r.ReadMapStart()
if yyl13 == 0 {
z.DecSendContainerState(codecSelfer_containerMapEnd1234)
} else {
x.codecDecodeSelfFromMap(yyl13, d)
}
} else if yyct13 == codecSelferValueTypeArray1234 {
yyl13 := r.ReadArrayStart()
if yyl13 == 0 {
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
} else {
x.codecDecodeSelfFromArray(yyl13, d)
}
} else {
panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
}
}
}
func (x *CrossVersionObjectReference) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
var h codecSelfer1234
z, r := codec1978.GenHelperDecoder(d)
_, _, _ = h, z, r
var yys14Slc = z.DecScratchBuffer() // default slice to decode into
_ = yys14Slc
var yyhl14 bool = l >= 0
for yyj14 := 0; ; yyj14++ {
if yyhl14 {
if yyj14 >= l {
break
}
} else {
if r.CheckBreak() {
break
}
}
z.DecSendContainerState(codecSelfer_containerMapKey1234)
yys14Slc = r.DecodeBytes(yys14Slc, true, true)
yys14 := string(yys14Slc)
z.DecSendContainerState(codecSelfer_containerMapValue1234)
switch yys14 {
case "kind":
if r.TryDecodeAsNil() {
x.Kind = ""
} else {
x.Kind = string(r.DecodeString())
}
case "name":
if r.TryDecodeAsNil() {
x.Name = ""
} else {
x.Name = string(r.DecodeString())
}
case "apiVersion":
if r.TryDecodeAsNil() {
x.APIVersion = ""
} else {
x.APIVersion = string(r.DecodeString())
}
default:
z.DecStructFieldNotFound(-1, yys14)
} // end switch yys14
} // end for yyj14
z.DecSendContainerState(codecSelfer_containerMapEnd1234)
}
func (x *CrossVersionObjectReference) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
var h codecSelfer1234
z, r := codec1978.GenHelperDecoder(d)
_, _, _ = h, z, r
var yyj18 int
var yyb18 bool
var yyhl18 bool = l >= 0
yyj18++
if yyhl18 {
yyb18 = yyj18 > l
} else {
yyb18 = r.CheckBreak()
}
if yyb18 {
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
return
}
z.DecSendContainerState(codecSelfer_containerArrayElem1234)
if r.TryDecodeAsNil() {
x.Kind = ""
} else {
x.Kind = string(r.DecodeString())
}
yyj18++
if yyhl18 {
yyb18 = yyj18 > l
} else {
yyb18 = r.CheckBreak()
}
if yyb18 {
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
return
}
z.DecSendContainerState(codecSelfer_containerArrayElem1234)
if r.TryDecodeAsNil() {
x.Name = ""
} else {
x.Name = string(r.DecodeString())
}
yyj18++
if yyhl18 {
yyb18 = yyj18 > l
} else {
yyb18 = r.CheckBreak()
}
if yyb18 {
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
return
}
z.DecSendContainerState(codecSelfer_containerArrayElem1234)
if r.TryDecodeAsNil() {
x.APIVersion = ""
} else {
x.APIVersion = string(r.DecodeString())
}
for {
yyj18++
if yyhl18 {
yyb18 = yyj18 > l
} else {
yyb18 = r.CheckBreak()
}
if yyb18 {
break
}
z.DecSendContainerState(codecSelfer_containerArrayElem1234)
z.DecStructFieldNotFound(yyj18-1, "")
}
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
}
func (x *HorizontalPodAutoscalerSpec) CodecEncodeSelf(e *codec1978.Encoder) {
var h codecSelfer1234
z, r := codec1978.GenHelperEncoder(e)
_, _, _ = h, z, r
if x == nil {
r.EncodeNil()
} else {
yym22 := z.EncBinary()
_ = yym22
if false {
} else if z.HasExtensions() && z.EncExt(x) {
} else {
yysep23 := !z.EncBinary()
yy2arr23 := z.EncBasicHandle().StructToArray
var yyq23 [4]bool
_, _, _ = yysep23, yyq23, yy2arr23
const yyr23 bool = false
yyq23[1] = x.MinReplicas != nil
yyq23[3] = x.TargetCPUUtilizationPercentage != nil
var yynn23 int
if yyr23 || yy2arr23 {
r.EncodeArrayStart(4)
} else {
yynn23 = 2
for _, b := range yyq23 {
if b {
yynn23++
}
}
r.EncodeMapStart(yynn23)
yynn23 = 0
}
if yyr23 || yy2arr23 {
z.EncSendContainerState(codecSelfer_containerArrayElem1234)
yy25 := &x.ScaleTargetRef
yy25.CodecEncodeSelf(e)
} else {
z.EncSendContainerState(codecSelfer_containerMapKey1234)
r.EncodeString(codecSelferC_UTF81234, string("scaleTargetRef"))
z.EncSendContainerState(codecSelfer_containerMapValue1234)
yy26 := &x.ScaleTargetRef
yy26.CodecEncodeSelf(e)
}
if yyr23 || yy2arr23 {
z.EncSendContainerState(codecSelfer_containerArrayElem1234)
if yyq23[1] {
if x.MinReplicas == nil {
r.EncodeNil()
} else {
yy28 := *x.MinReplicas
yym29 := z.EncBinary()
_ = yym29
if false {
} else {
r.EncodeInt(int64(yy28))
}
}
} else {
r.EncodeNil()
}
} else {
if yyq23[1] {
z.EncSendContainerState(codecSelfer_containerMapKey1234)
r.EncodeString(codecSelferC_UTF81234, string("minReplicas"))
z.EncSendContainerState(codecSelfer_containerMapValue1234)
if x.MinReplicas == nil {
r.EncodeNil()
} else {
yy30 := *x.MinReplicas
yym31 := z.EncBinary()
_ = yym31
if false {
} else {
r.EncodeInt(int64(yy30))
}
}
}
}
if yyr23 || yy2arr23 {
z.EncSendContainerState(codecSelfer_containerArrayElem1234)
yym33 := z.EncBinary()
_ = yym33
if false {
} else {
r.EncodeInt(int64(x.MaxReplicas))
}
} else {
z.EncSendContainerState(codecSelfer_containerMapKey1234)
r.EncodeString(codecSelferC_UTF81234, string("maxReplicas"))
z.EncSendContainerState(codecSelfer_containerMapValue1234)
yym34 := z.EncBinary()
_ = yym34
if false {
} else {
r.EncodeInt(int64(x.MaxReplicas))
}
}
if yyr23 || yy2arr23 {
z.EncSendContainerState(codecSelfer_containerArrayElem1234)
if yyq23[3] {
if x.TargetCPUUtilizationPercentage == nil {
r.EncodeNil()
} else {
yy36 := *x.TargetCPUUtilizationPercentage
yym37 := z.EncBinary()
_ = yym37
if false {
} else {
r.EncodeInt(int64(yy36))
}
}
} else {
r.EncodeNil()
}
} else {
if yyq23[3] {
z.EncSendContainerState(codecSelfer_containerMapKey1234)
r.EncodeString(codecSelferC_UTF81234, string("targetCPUUtilizationPercentage"))
z.EncSendContainerState(codecSelfer_containerMapValue1234)
if x.TargetCPUUtilizationPercentage == nil {
r.EncodeNil()
} else {
yy38 := *x.TargetCPUUtilizationPercentage
yym39 := z.EncBinary()
_ = yym39
if false {
} else {
r.EncodeInt(int64(yy38))
}
}
}
}
if yyr23 || yy2arr23 {
z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
} else {
z.EncSendContainerState(codecSelfer_containerMapEnd1234)
}
}
}
}
func (x *HorizontalPodAutoscalerSpec) CodecDecodeSelf(d *codec1978.Decoder) {
var h codecSelfer1234
z, r := codec1978.GenHelperDecoder(d)
_, _, _ = h, z, r
yym40 := z.DecBinary()
_ = yym40
if false {
} else if z.HasExtensions() && z.DecExt(x) {
} else {
yyct41 := r.ContainerType()
if yyct41 == codecSelferValueTypeMap1234 {
yyl41 := r.ReadMapStart()
if yyl41 == 0 {
z.DecSendContainerState(codecSelfer_containerMapEnd1234)
} else {
x.codecDecodeSelfFromMap(yyl41, d)
}
} else if yyct41 == codecSelferValueTypeArray1234 {
yyl41 := r.ReadArrayStart()
if yyl41 == 0 {
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
} else {
x.codecDecodeSelfFromArray(yyl41, d)
}
} else {
panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
}
}
}
func (x *HorizontalPodAutoscalerSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
var h codecSelfer1234
z, r := codec1978.GenHelperDecoder(d)
_, _, _ = h, z, r
var yys42Slc = z.DecScratchBuffer() // default slice to decode into
_ = yys42Slc
var yyhl42 bool = l >= 0
for yyj42 := 0; ; yyj42++ {
if yyhl42 {
if yyj42 >= l {
break
}
} else {
if r.CheckBreak() {
break
}
}
z.DecSendContainerState(codecSelfer_containerMapKey1234)
yys42Slc = r.DecodeBytes(yys42Slc, true, true)
yys42 := string(yys42Slc)
z.DecSendContainerState(codecSelfer_containerMapValue1234)
switch yys42 {
case "scaleTargetRef":
if r.TryDecodeAsNil() {
x.ScaleTargetRef = CrossVersionObjectReference{}
} else {
yyv43 := &x.ScaleTargetRef
yyv43.CodecDecodeSelf(d)
}
case "minReplicas":
if r.TryDecodeAsNil() {
if x.MinReplicas != nil {
x.MinReplicas = nil
}
} else {
if x.MinReplicas == nil {
x.MinReplicas = new(int32)
}
yym45 := z.DecBinary()
_ = yym45
if false {
} else {
*((*int32)(x.MinReplicas)) = int32(r.DecodeInt(32))
}
}
case "maxReplicas":
if r.TryDecodeAsNil() {
x.MaxReplicas = 0
} else {
x.MaxReplicas = int32(r.DecodeInt(32))
}
case "targetCPUUtilizationPercentage":
if r.TryDecodeAsNil() {
if x.TargetCPUUtilizationPercentage != nil {
x.TargetCPUUtilizationPercentage = nil
}
} else {
if x.TargetCPUUtilizationPercentage == nil {
x.TargetCPUUtilizationPercentage = new(int32)
}
yym48 := z.DecBinary()
_ = yym48
if false {
} else {
*((*int32)(x.TargetCPUUtilizationPercentage)) = int32(r.DecodeInt(32))
}
}
default:
z.DecStructFieldNotFound(-1, yys42)
} // end switch yys42
} // end for yyj42
z.DecSendContainerState(codecSelfer_containerMapEnd1234)
}
func (x *HorizontalPodAutoscalerSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
var h codecSelfer1234
z, r := codec1978.GenHelperDecoder(d)
_, _, _ = h, z, r
var yyj49 int
var yyb49 bool
var yyhl49 bool = l >= 0
yyj49++
if yyhl49 {
yyb49 = yyj49 > l
} else {
yyb49 = r.CheckBreak()
}
if yyb49 {
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
return
}
z.DecSendContainerState(codecSelfer_containerArrayElem1234)
if r.TryDecodeAsNil() {
x.ScaleTargetRef = CrossVersionObjectReference{}
} else {
yyv50 := &x.ScaleTargetRef
yyv50.CodecDecodeSelf(d)
}
yyj49++
if yyhl49 {
yyb49 = yyj49 > l
} else {
yyb49 = r.CheckBreak()
}
if yyb49 {
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
return
}
z.DecSendContainerState(codecSelfer_containerArrayElem1234)
if r.TryDecodeAsNil() {
if x.MinReplicas != nil {
x.MinReplicas = nil
}
} else {
if x.MinReplicas == nil {
x.MinReplicas = new(int32)
}
yym52 := z.DecBinary()
_ = yym52
if false {
} else {
*((*int32)(x.MinReplicas)) = int32(r.DecodeInt(32))
}
}
yyj49++
if yyhl49 {
yyb49 = yyj49 > l
} else {
yyb49 = r.CheckBreak()
}
if yyb49 {
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
return
}
z.DecSendContainerState(codecSelfer_containerArrayElem1234)
if r.TryDecodeAsNil() {
x.MaxReplicas = 0
} else {
x.MaxReplicas = int32(r.DecodeInt(32))
}
yyj49++
if yyhl49 {
yyb49 = yyj49 > l
} else {
yyb49 = r.CheckBreak()
}
if yyb49 {
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
return
}
z.DecSendContainerState(codecSelfer_containerArrayElem1234)
if r.TryDecodeAsNil() {
if x.TargetCPUUtilizationPercentage != nil {
x.TargetCPUUtilizationPercentage = nil
}
} else {
if x.TargetCPUUtilizationPercentage == nil {
x.TargetCPUUtilizationPercentage = new(int32)
}
yym55 := z.DecBinary()
_ = yym55
if false {
} else {
*((*int32)(x.TargetCPUUtilizationPercentage)) = int32(r.DecodeInt(32))
}
}
for {
yyj49++
if yyhl49 {
yyb49 = yyj49 > l
} else {
yyb49 = r.CheckBreak()
}
if yyb49 {
break
}
z.DecSendContainerState(codecSelfer_containerArrayElem1234)
z.DecStructFieldNotFound(yyj49-1, "")
}
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
}
func (x *HorizontalPodAutoscalerStatus) CodecEncodeSelf(e *codec1978.Encoder) {
var h codecSelfer1234
z, r := codec1978.GenHelperEncoder(e)
_, _, _ = h, z, r
if x == nil {
r.EncodeNil()
} else {
yym56 := z.EncBinary()
_ = yym56
if false {
} else if z.HasExtensions() && z.EncExt(x) {
} else {
yysep57 := !z.EncBinary()
yy2arr57 := z.EncBasicHandle().StructToArray
var yyq57 [5]bool
_, _, _ = yysep57, yyq57, yy2arr57
const yyr57 bool = false
yyq57[0] = x.ObservedGeneration != nil
yyq57[1] = x.LastScaleTime != nil
yyq57[4] = x.CurrentCPUUtilizationPercentage != nil
var yynn57 int
if yyr57 || yy2arr57 {
r.EncodeArrayStart(5)
} else {
yynn57 = 2
for _, b := range yyq57 {
if b {
yynn57++
}
}
r.EncodeMapStart(yynn57)
yynn57 = 0
}
if yyr57 || yy2arr57 {
z.EncSendContainerState(codecSelfer_containerArrayElem1234)
if yyq57[0] {
if x.ObservedGeneration == nil {
r.EncodeNil()
} else {
yy59 := *x.ObservedGeneration
yym60 := z.EncBinary()
_ = yym60
if false {
} else {
r.EncodeInt(int64(yy59))
}
}
} else {
r.EncodeNil()
}
} else {
if yyq57[0] {
z.EncSendContainerState(codecSelfer_containerMapKey1234)
r.EncodeString(codecSelferC_UTF81234, string("observedGeneration"))
z.EncSendContainerState(codecSelfer_containerMapValue1234)
if x.ObservedGeneration == nil {
r.EncodeNil()
} else {
yy61 := *x.ObservedGeneration
yym62 := z.EncBinary()
_ = yym62
if false {
} else {
r.EncodeInt(int64(yy61))
}
}
}
}
if yyr57 || yy2arr57 {
z.EncSendContainerState(codecSelfer_containerArrayElem1234)
if yyq57[1] {
if x.LastScaleTime == nil {
r.EncodeNil()
} else {
yym64 := z.EncBinary()
_ = yym64
if false {
} else if z.HasExtensions() && z.EncExt(x.LastScaleTime) {
} else if yym64 {
z.EncBinaryMarshal(x.LastScaleTime)
} else if !yym64 && z.IsJSONHandle() {
z.EncJSONMarshal(x.LastScaleTime)
} else {
z.EncFallback(x.LastScaleTime)
}
}
} else {
r.EncodeNil()
}
} else {
if yyq57[1] {
z.EncSendContainerState(codecSelfer_containerMapKey1234)
r.EncodeString(codecSelferC_UTF81234, string("lastScaleTime"))
z.EncSendContainerState(codecSelfer_containerMapValue1234)
if x.LastScaleTime == nil {
r.EncodeNil()
} else {
yym65 := z.EncBinary()
_ = yym65
if false {
} else if z.HasExtensions() && z.EncExt(x.LastScaleTime) {
} else if yym65 {
z.EncBinaryMarshal(x.LastScaleTime)
} else if !yym65 && z.IsJSONHandle() {
z.EncJSONMarshal(x.LastScaleTime)
} else {
z.EncFallback(x.LastScaleTime)
}
}
}
}
if yyr57 || yy2arr57 {
z.EncSendContainerState(codecSelfer_containerArrayElem1234)
yym67 := z.EncBinary()
_ = yym67
if false {
} else {
r.EncodeInt(int64(x.CurrentReplicas))
}
} else {
z.EncSendContainerState(codecSelfer_containerMapKey1234)
r.EncodeString(codecSelferC_UTF81234, string("currentReplicas"))
z.EncSendContainerState(codecSelfer_containerMapValue1234)
yym68 := z.EncBinary()
_ = yym68
if false {
} else {
r.EncodeInt(int64(x.CurrentReplicas))
}
}
if yyr57 || yy2arr57 {
z.EncSendContainerState(codecSelfer_containerArrayElem1234)
yym70 := z.EncBinary()
_ = yym70
if false {
} else {
r.EncodeInt(int64(x.DesiredReplicas))
}
} else {
z.EncSendContainerState(codecSelfer_containerMapKey1234)
r.EncodeString(codecSelferC_UTF81234, string("desiredReplicas"))
z.EncSendContainerState(codecSelfer_containerMapValue1234)
yym71 := z.EncBinary()
_ = yym71
if false {
} else {
r.EncodeInt(int64(x.DesiredReplicas))
}
}
if yyr57 || yy2arr57 {
z.EncSendContainerState(codecSelfer_containerArrayElem1234)
if yyq57[4] {
if x.CurrentCPUUtilizationPercentage == nil {
r.EncodeNil()
} else {
yy73 := *x.CurrentCPUUtilizationPercentage
yym74 := z.EncBinary()
_ = yym74
if false {
} else {
r.EncodeInt(int64(yy73))
}
}
} else {
r.EncodeNil()
}
} else {
if yyq57[4] {
z.EncSendContainerState(codecSelfer_containerMapKey1234)
r.EncodeString(codecSelferC_UTF81234, string("currentCPUUtilizationPercentage"))
z.EncSendContainerState(codecSelfer_containerMapValue1234)
if x.CurrentCPUUtilizationPercentage == nil {
r.EncodeNil()
} else {
yy75 := *x.CurrentCPUUtilizationPercentage
yym76 := z.EncBinary()
_ = yym76
if false {
} else {
r.EncodeInt(int64(yy75))
}
}
}
}
if yyr57 || yy2arr57 {
z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
} else {
z.EncSendContainerState(codecSelfer_containerMapEnd1234)
}
}
}
}
func (x *HorizontalPodAutoscalerStatus) CodecDecodeSelf(d *codec1978.Decoder) {
var h codecSelfer1234
z, r := codec1978.GenHelperDecoder(d)
_, _, _ = h, z, r
yym77 := z.DecBinary()
_ = yym77
if false {
} else if z.HasExtensions() && z.DecExt(x) {
} else {
yyct78 := r.ContainerType()
if yyct78 == codecSelferValueTypeMap1234 {
yyl78 := r.ReadMapStart()
if yyl78 == 0 {
z.DecSendContainerState(codecSelfer_containerMapEnd1234)
} else {
x.codecDecodeSelfFromMap(yyl78, d)
}
} else if yyct78 == codecSelferValueTypeArray1234 {
yyl78 := r.ReadArrayStart()
if yyl78 == 0 {
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
} else {
x.codecDecodeSelfFromArray(yyl78, d)
}
} else {
panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
}
}
}
func (x *HorizontalPodAutoscalerStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
var h codecSelfer1234
z, r := codec1978.GenHelperDecoder(d)
_, _, _ = h, z, r
var yys79Slc = z.DecScratchBuffer() // default slice to decode into
_ = yys79Slc
var yyhl79 bool = l >= 0
for yyj79 := 0; ; yyj79++ {
if yyhl79 {
if yyj79 >= l {
break
}
} else {
if r.CheckBreak() {
break
}
}
z.DecSendContainerState(codecSelfer_containerMapKey1234)
yys79Slc = r.DecodeBytes(yys79Slc, true, true)
yys79 := string(yys79Slc)
z.DecSendContainerState(codecSelfer_containerMapValue1234)
switch yys79 {
case "observedGeneration":
if r.TryDecodeAsNil() {
if x.ObservedGeneration != nil {
x.ObservedGeneration = nil
}
} else {
if x.ObservedGeneration == nil {
x.ObservedGeneration = new(int64)
}
yym81 := z.DecBinary()
_ = yym81
if false {
} else {
*((*int64)(x.ObservedGeneration)) = int64(r.DecodeInt(64))
}
}
case "lastScaleTime":
if r.TryDecodeAsNil() {
if x.LastScaleTime != nil {
x.LastScaleTime = nil
}
} else {
if x.LastScaleTime == nil {
x.LastScaleTime = new(pkg1_unversioned.Time)
}
yym83 := z.DecBinary()
_ = yym83
if false {
} else if z.HasExtensions() && z.DecExt(x.LastScaleTime) {
} else if yym83 {
z.DecBinaryUnmarshal(x.LastScaleTime)
} else if !yym83 && z.IsJSONHandle() {
z.DecJSONUnmarshal(x.LastScaleTime)
} else {
z.DecFallback(x.LastScaleTime, false)
}
}
case "currentReplicas":
if r.TryDecodeAsNil() {
x.CurrentReplicas = 0
} else {
x.CurrentReplicas = int32(r.DecodeInt(32))
}
case "desiredReplicas":
if r.TryDecodeAsNil() {
x.DesiredReplicas = 0
} else {
x.DesiredReplicas = int32(r.DecodeInt(32))
}
case "currentCPUUtilizationPercentage":
if r.TryDecodeAsNil() {
if x.CurrentCPUUtilizationPercentage != nil {
x.CurrentCPUUtilizationPercentage = nil
}
} else {
if x.CurrentCPUUtilizationPercentage == nil {
x.CurrentCPUUtilizationPercentage = new(int32)
}
yym87 := z.DecBinary()
_ = yym87
if false {
} else {
*((*int32)(x.CurrentCPUUtilizationPercentage)) = int32(r.DecodeInt(32))
}
}
default:
z.DecStructFieldNotFound(-1, yys79)
} // end switch yys79
} // end for yyj79
z.DecSendContainerState(codecSelfer_containerMapEnd1234)
}
<|fim▁hole|> var yyj88 int
var yyb88 bool
var yyhl88 bool = l >= 0
yyj88++
if yyhl88 {
yyb88 = yyj88 > l
} else {
yyb88 = r.CheckBreak()
}
if yyb88 {
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
return
}
z.DecSendContainerState(codecSelfer_containerArrayElem1234)
if r.TryDecodeAsNil() {
if x.ObservedGeneration != nil {
x.ObservedGeneration = nil
}
} else {
if x.ObservedGeneration == nil {
x.ObservedGeneration = new(int64)
}
yym90 := z.DecBinary()
_ = yym90
if false {
} else {
*((*int64)(x.ObservedGeneration)) = int64(r.DecodeInt(64))
}
}
yyj88++
if yyhl88 {
yyb88 = yyj88 > l
} else {
yyb88 = r.CheckBreak()
}
if yyb88 {
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
return
}
z.DecSendContainerState(codecSelfer_containerArrayElem1234)
if r.TryDecodeAsNil() {
if x.LastScaleTime != nil {
x.LastScaleTime = nil
}
} else {
if x.LastScaleTime == nil {
x.LastScaleTime = new(pkg1_unversioned.Time)
}
yym92 := z.DecBinary()
_ = yym92
if false {
} else if z.HasExtensions() && z.DecExt(x.LastScaleTime) {
} else if yym92 {
z.DecBinaryUnmarshal(x.LastScaleTime)
} else if !yym92 && z.IsJSONHandle() {
z.DecJSONUnmarshal(x.LastScaleTime)
} else {
z.DecFallback(x.LastScaleTime, false)
}
}
yyj88++
if yyhl88 {
yyb88 = yyj88 > l
} else {
yyb88 = r.CheckBreak()
}
if yyb88 {
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
return
}
z.DecSendContainerState(codecSelfer_containerArrayElem1234)
if r.TryDecodeAsNil() {
x.CurrentReplicas = 0
} else {
x.CurrentReplicas = int32(r.DecodeInt(32))
}
yyj88++
if yyhl88 {
yyb88 = yyj88 > l
} else {
yyb88 = r.CheckBreak()
}
if yyb88 {
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
return
}
z.DecSendContainerState(codecSelfer_containerArrayElem1234)
if r.TryDecodeAsNil() {
x.DesiredReplicas = 0
} else {
x.DesiredReplicas = int32(r.DecodeInt(32))
}
yyj88++
if yyhl88 {
yyb88 = yyj88 > l
} else {
yyb88 = r.CheckBreak()
}
if yyb88 {
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
return
}
z.DecSendContainerState(codecSelfer_containerArrayElem1234)
if r.TryDecodeAsNil() {
if x.CurrentCPUUtilizationPercentage != nil {
x.CurrentCPUUtilizationPercentage = nil
}
} else {
if x.CurrentCPUUtilizationPercentage == nil {
x.CurrentCPUUtilizationPercentage = new(int32)
}
yym96 := z.DecBinary()
_ = yym96
if false {
} else {
*((*int32)(x.CurrentCPUUtilizationPercentage)) = int32(r.DecodeInt(32))
}
}
for {
yyj88++
if yyhl88 {
yyb88 = yyj88 > l
} else {
yyb88 = r.CheckBreak()
}
if yyb88 {
break
}
z.DecSendContainerState(codecSelfer_containerArrayElem1234)
z.DecStructFieldNotFound(yyj88-1, "")
}
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
}
func (x *HorizontalPodAutoscaler) CodecEncodeSelf(e *codec1978.Encoder) {
var h codecSelfer1234
z, r := codec1978.GenHelperEncoder(e)
_, _, _ = h, z, r
if x == nil {
r.EncodeNil()
} else {
yym97 := z.EncBinary()
_ = yym97
if false {
} else if z.HasExtensions() && z.EncExt(x) {
} else {
yysep98 := !z.EncBinary()
yy2arr98 := z.EncBasicHandle().StructToArray
var yyq98 [5]bool
_, _, _ = yysep98, yyq98, yy2arr98
const yyr98 bool = false
yyq98[0] = x.Kind != ""
yyq98[1] = x.APIVersion != ""
yyq98[2] = true
yyq98[3] = true
yyq98[4] = true
var yynn98 int
if yyr98 || yy2arr98 {
r.EncodeArrayStart(5)
} else {
yynn98 = 0
for _, b := range yyq98 {
if b {
yynn98++
}
}
r.EncodeMapStart(yynn98)
yynn98 = 0
}
if yyr98 || yy2arr98 {
z.EncSendContainerState(codecSelfer_containerArrayElem1234)
if yyq98[0] {
yym100 := z.EncBinary()
_ = yym100
if false {
} else {
r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
}
} else {
r.EncodeString(codecSelferC_UTF81234, "")
}
} else {
if yyq98[0] {
z.EncSendContainerState(codecSelfer_containerMapKey1234)
r.EncodeString(codecSelferC_UTF81234, string("kind"))
z.EncSendContainerState(codecSelfer_containerMapValue1234)
yym101 := z.EncBinary()
_ = yym101
if false {
} else {
r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
}
}
}
if yyr98 || yy2arr98 {
z.EncSendContainerState(codecSelfer_containerArrayElem1234)
if yyq98[1] {
yym103 := z.EncBinary()
_ = yym103
if false {
} else {
r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
}
} else {
r.EncodeString(codecSelferC_UTF81234, "")
}
} else {
if yyq98[1] {
z.EncSendContainerState(codecSelfer_containerMapKey1234)
r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
z.EncSendContainerState(codecSelfer_containerMapValue1234)
yym104 := z.EncBinary()
_ = yym104
if false {
} else {
r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
}
}
}
if yyr98 || yy2arr98 {
z.EncSendContainerState(codecSelfer_containerArrayElem1234)
if yyq98[2] {
yy106 := &x.ObjectMeta
yy106.CodecEncodeSelf(e)
} else {
r.EncodeNil()
}
} else {
if yyq98[2] {
z.EncSendContainerState(codecSelfer_containerMapKey1234)
r.EncodeString(codecSelferC_UTF81234, string("metadata"))
z.EncSendContainerState(codecSelfer_containerMapValue1234)
yy107 := &x.ObjectMeta
yy107.CodecEncodeSelf(e)
}
}
if yyr98 || yy2arr98 {
z.EncSendContainerState(codecSelfer_containerArrayElem1234)
if yyq98[3] {
yy109 := &x.Spec
yy109.CodecEncodeSelf(e)
} else {
r.EncodeNil()
}
} else {
if yyq98[3] {
z.EncSendContainerState(codecSelfer_containerMapKey1234)
r.EncodeString(codecSelferC_UTF81234, string("spec"))
z.EncSendContainerState(codecSelfer_containerMapValue1234)
yy110 := &x.Spec
yy110.CodecEncodeSelf(e)
}
}
if yyr98 || yy2arr98 {
z.EncSendContainerState(codecSelfer_containerArrayElem1234)
if yyq98[4] {
yy112 := &x.Status
yy112.CodecEncodeSelf(e)
} else {
r.EncodeNil()
}
} else {
if yyq98[4] {
z.EncSendContainerState(codecSelfer_containerMapKey1234)
r.EncodeString(codecSelferC_UTF81234, string("status"))
z.EncSendContainerState(codecSelfer_containerMapValue1234)
yy113 := &x.Status
yy113.CodecEncodeSelf(e)
}
}
if yyr98 || yy2arr98 {
z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
} else {
z.EncSendContainerState(codecSelfer_containerMapEnd1234)
}
}
}
}
func (x *HorizontalPodAutoscaler) CodecDecodeSelf(d *codec1978.Decoder) {
var h codecSelfer1234
z, r := codec1978.GenHelperDecoder(d)
_, _, _ = h, z, r
yym114 := z.DecBinary()
_ = yym114
if false {
} else if z.HasExtensions() && z.DecExt(x) {
} else {
yyct115 := r.ContainerType()
if yyct115 == codecSelferValueTypeMap1234 {
yyl115 := r.ReadMapStart()
if yyl115 == 0 {
z.DecSendContainerState(codecSelfer_containerMapEnd1234)
} else {
x.codecDecodeSelfFromMap(yyl115, d)
}
} else if yyct115 == codecSelferValueTypeArray1234 {
yyl115 := r.ReadArrayStart()
if yyl115 == 0 {
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
} else {
x.codecDecodeSelfFromArray(yyl115, d)
}
} else {
panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
}
}
}
func (x *HorizontalPodAutoscaler) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
var h codecSelfer1234
z, r := codec1978.GenHelperDecoder(d)
_, _, _ = h, z, r
var yys116Slc = z.DecScratchBuffer() // default slice to decode into
_ = yys116Slc
var yyhl116 bool = l >= 0
for yyj116 := 0; ; yyj116++ {
if yyhl116 {
if yyj116 >= l {
break
}
} else {
if r.CheckBreak() {
break
}
}
z.DecSendContainerState(codecSelfer_containerMapKey1234)
yys116Slc = r.DecodeBytes(yys116Slc, true, true)
yys116 := string(yys116Slc)
z.DecSendContainerState(codecSelfer_containerMapValue1234)
switch yys116 {
case "kind":
if r.TryDecodeAsNil() {
x.Kind = ""
} else {
x.Kind = string(r.DecodeString())
}
case "apiVersion":
if r.TryDecodeAsNil() {
x.APIVersion = ""
} else {
x.APIVersion = string(r.DecodeString())
}
case "metadata":
if r.TryDecodeAsNil() {
x.ObjectMeta = pkg2_v1.ObjectMeta{}
} else {
yyv119 := &x.ObjectMeta
yyv119.CodecDecodeSelf(d)
}
case "spec":
if r.TryDecodeAsNil() {
x.Spec = HorizontalPodAutoscalerSpec{}
} else {
yyv120 := &x.Spec
yyv120.CodecDecodeSelf(d)
}
case "status":
if r.TryDecodeAsNil() {
x.Status = HorizontalPodAutoscalerStatus{}
} else {
yyv121 := &x.Status
yyv121.CodecDecodeSelf(d)
}
default:
z.DecStructFieldNotFound(-1, yys116)
} // end switch yys116
} // end for yyj116
z.DecSendContainerState(codecSelfer_containerMapEnd1234)
}
func (x *HorizontalPodAutoscaler) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
var h codecSelfer1234
z, r := codec1978.GenHelperDecoder(d)
_, _, _ = h, z, r
var yyj122 int
var yyb122 bool
var yyhl122 bool = l >= 0
yyj122++
if yyhl122 {
yyb122 = yyj122 > l
} else {
yyb122 = r.CheckBreak()
}
if yyb122 {
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
return
}
z.DecSendContainerState(codecSelfer_containerArrayElem1234)
if r.TryDecodeAsNil() {
x.Kind = ""
} else {
x.Kind = string(r.DecodeString())
}
yyj122++
if yyhl122 {
yyb122 = yyj122 > l
} else {
yyb122 = r.CheckBreak()
}
if yyb122 {
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
return
}
z.DecSendContainerState(codecSelfer_containerArrayElem1234)
if r.TryDecodeAsNil() {
x.APIVersion = ""
} else {
x.APIVersion = string(r.DecodeString())
}
yyj122++
if yyhl122 {
yyb122 = yyj122 > l
} else {
yyb122 = r.CheckBreak()
}
if yyb122 {
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
return
}
z.DecSendContainerState(codecSelfer_containerArrayElem1234)
if r.TryDecodeAsNil() {
x.ObjectMeta = pkg2_v1.ObjectMeta{}
} else {
yyv125 := &x.ObjectMeta
yyv125.CodecDecodeSelf(d)
}
yyj122++
if yyhl122 {
yyb122 = yyj122 > l
} else {
yyb122 = r.CheckBreak()
}
if yyb122 {
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
return
}
z.DecSendContainerState(codecSelfer_containerArrayElem1234)
if r.TryDecodeAsNil() {
x.Spec = HorizontalPodAutoscalerSpec{}
} else {
yyv126 := &x.Spec
yyv126.CodecDecodeSelf(d)
}
yyj122++
if yyhl122 {
yyb122 = yyj122 > l
} else {
yyb122 = r.CheckBreak()
}
if yyb122 {
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
return
}
z.DecSendContainerState(codecSelfer_containerArrayElem1234)
if r.TryDecodeAsNil() {
x.Status = HorizontalPodAutoscalerStatus{}
} else {
yyv127 := &x.Status
yyv127.CodecDecodeSelf(d)
}
for {
yyj122++
if yyhl122 {
yyb122 = yyj122 > l
} else {
yyb122 = r.CheckBreak()
}
if yyb122 {
break
}
z.DecSendContainerState(codecSelfer_containerArrayElem1234)
z.DecStructFieldNotFound(yyj122-1, "")
}
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
}
func (x *HorizontalPodAutoscalerList) CodecEncodeSelf(e *codec1978.Encoder) {
var h codecSelfer1234
z, r := codec1978.GenHelperEncoder(e)
_, _, _ = h, z, r
if x == nil {
r.EncodeNil()
} else {
yym128 := z.EncBinary()
_ = yym128
if false {
} else if z.HasExtensions() && z.EncExt(x) {
} else {
yysep129 := !z.EncBinary()
yy2arr129 := z.EncBasicHandle().StructToArray
var yyq129 [4]bool
_, _, _ = yysep129, yyq129, yy2arr129
const yyr129 bool = false
yyq129[0] = x.Kind != ""
yyq129[1] = x.APIVersion != ""
yyq129[2] = true
var yynn129 int
if yyr129 || yy2arr129 {
r.EncodeArrayStart(4)
} else {
yynn129 = 1
for _, b := range yyq129 {
if b {
yynn129++
}
}
r.EncodeMapStart(yynn129)
yynn129 = 0
}
if yyr129 || yy2arr129 {
z.EncSendContainerState(codecSelfer_containerArrayElem1234)
if yyq129[0] {
yym131 := z.EncBinary()
_ = yym131
if false {
} else {
r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
}
} else {
r.EncodeString(codecSelferC_UTF81234, "")
}
} else {
if yyq129[0] {
z.EncSendContainerState(codecSelfer_containerMapKey1234)
r.EncodeString(codecSelferC_UTF81234, string("kind"))
z.EncSendContainerState(codecSelfer_containerMapValue1234)
yym132 := z.EncBinary()
_ = yym132
if false {
} else {
r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
}
}
}
if yyr129 || yy2arr129 {
z.EncSendContainerState(codecSelfer_containerArrayElem1234)
if yyq129[1] {
yym134 := z.EncBinary()
_ = yym134
if false {
} else {
r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
}
} else {
r.EncodeString(codecSelferC_UTF81234, "")
}
} else {
if yyq129[1] {
z.EncSendContainerState(codecSelfer_containerMapKey1234)
r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
z.EncSendContainerState(codecSelfer_containerMapValue1234)
yym135 := z.EncBinary()
_ = yym135
if false {
} else {
r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
}
}
}
if yyr129 || yy2arr129 {
z.EncSendContainerState(codecSelfer_containerArrayElem1234)
if yyq129[2] {
yy137 := &x.ListMeta
yym138 := z.EncBinary()
_ = yym138
if false {
} else if z.HasExtensions() && z.EncExt(yy137) {
} else {
z.EncFallback(yy137)
}
} else {
r.EncodeNil()
}
} else {
if yyq129[2] {
z.EncSendContainerState(codecSelfer_containerMapKey1234)
r.EncodeString(codecSelferC_UTF81234, string("metadata"))
z.EncSendContainerState(codecSelfer_containerMapValue1234)
yy139 := &x.ListMeta
yym140 := z.EncBinary()
_ = yym140
if false {
} else if z.HasExtensions() && z.EncExt(yy139) {
} else {
z.EncFallback(yy139)
}
}
}
if yyr129 || yy2arr129 {
z.EncSendContainerState(codecSelfer_containerArrayElem1234)
if x.Items == nil {
r.EncodeNil()
} else {
yym142 := z.EncBinary()
_ = yym142
if false {
} else {
h.encSliceHorizontalPodAutoscaler(([]HorizontalPodAutoscaler)(x.Items), e)
}
}
} else {
z.EncSendContainerState(codecSelfer_containerMapKey1234)
r.EncodeString(codecSelferC_UTF81234, string("items"))
z.EncSendContainerState(codecSelfer_containerMapValue1234)
if x.Items == nil {
r.EncodeNil()
} else {
yym143 := z.EncBinary()
_ = yym143
if false {
} else {
h.encSliceHorizontalPodAutoscaler(([]HorizontalPodAutoscaler)(x.Items), e)
}
}
}
if yyr129 || yy2arr129 {
z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
} else {
z.EncSendContainerState(codecSelfer_containerMapEnd1234)
}
}
}
}
func (x *HorizontalPodAutoscalerList) CodecDecodeSelf(d *codec1978.Decoder) {
var h codecSelfer1234
z, r := codec1978.GenHelperDecoder(d)
_, _, _ = h, z, r
yym144 := z.DecBinary()
_ = yym144
if false {
} else if z.HasExtensions() && z.DecExt(x) {
} else {
yyct145 := r.ContainerType()
if yyct145 == codecSelferValueTypeMap1234 {
yyl145 := r.ReadMapStart()
if yyl145 == 0 {
z.DecSendContainerState(codecSelfer_containerMapEnd1234)
} else {
x.codecDecodeSelfFromMap(yyl145, d)
}
} else if yyct145 == codecSelferValueTypeArray1234 {
yyl145 := r.ReadArrayStart()
if yyl145 == 0 {
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
} else {
x.codecDecodeSelfFromArray(yyl145, d)
}
} else {
panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
}
}
}
func (x *HorizontalPodAutoscalerList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
var h codecSelfer1234
z, r := codec1978.GenHelperDecoder(d)
_, _, _ = h, z, r
var yys146Slc = z.DecScratchBuffer() // default slice to decode into
_ = yys146Slc
var yyhl146 bool = l >= 0
for yyj146 := 0; ; yyj146++ {
if yyhl146 {
if yyj146 >= l {
break
}
} else {
if r.CheckBreak() {
break
}
}
z.DecSendContainerState(codecSelfer_containerMapKey1234)
yys146Slc = r.DecodeBytes(yys146Slc, true, true)
yys146 := string(yys146Slc)
z.DecSendContainerState(codecSelfer_containerMapValue1234)
switch yys146 {
case "kind":
if r.TryDecodeAsNil() {
x.Kind = ""
} else {
x.Kind = string(r.DecodeString())
}
case "apiVersion":
if r.TryDecodeAsNil() {
x.APIVersion = ""
} else {
x.APIVersion = string(r.DecodeString())
}
case "metadata":
if r.TryDecodeAsNil() {
x.ListMeta = pkg1_unversioned.ListMeta{}
} else {
yyv149 := &x.ListMeta
yym150 := z.DecBinary()
_ = yym150
if false {
} else if z.HasExtensions() && z.DecExt(yyv149) {
} else {
z.DecFallback(yyv149, false)
}
}
case "items":
if r.TryDecodeAsNil() {
x.Items = nil
} else {
yyv151 := &x.Items
yym152 := z.DecBinary()
_ = yym152
if false {
} else {
h.decSliceHorizontalPodAutoscaler((*[]HorizontalPodAutoscaler)(yyv151), d)
}
}
default:
z.DecStructFieldNotFound(-1, yys146)
} // end switch yys146
} // end for yyj146
z.DecSendContainerState(codecSelfer_containerMapEnd1234)
}
func (x *HorizontalPodAutoscalerList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
var h codecSelfer1234
z, r := codec1978.GenHelperDecoder(d)
_, _, _ = h, z, r
var yyj153 int
var yyb153 bool
var yyhl153 bool = l >= 0
yyj153++
if yyhl153 {
yyb153 = yyj153 > l
} else {
yyb153 = r.CheckBreak()
}
if yyb153 {
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
return
}
z.DecSendContainerState(codecSelfer_containerArrayElem1234)
if r.TryDecodeAsNil() {
x.Kind = ""
} else {
x.Kind = string(r.DecodeString())
}
yyj153++
if yyhl153 {
yyb153 = yyj153 > l
} else {
yyb153 = r.CheckBreak()
}
if yyb153 {
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
return
}
z.DecSendContainerState(codecSelfer_containerArrayElem1234)
if r.TryDecodeAsNil() {
x.APIVersion = ""
} else {
x.APIVersion = string(r.DecodeString())
}
yyj153++
if yyhl153 {
yyb153 = yyj153 > l
} else {
yyb153 = r.CheckBreak()
}
if yyb153 {
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
return
}
z.DecSendContainerState(codecSelfer_containerArrayElem1234)
if r.TryDecodeAsNil() {
x.ListMeta = pkg1_unversioned.ListMeta{}
} else {
yyv156 := &x.ListMeta
yym157 := z.DecBinary()
_ = yym157
if false {
} else if z.HasExtensions() && z.DecExt(yyv156) {
} else {
z.DecFallback(yyv156, false)
}
}
yyj153++
if yyhl153 {
yyb153 = yyj153 > l
} else {
yyb153 = r.CheckBreak()
}
if yyb153 {
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
return
}
z.DecSendContainerState(codecSelfer_containerArrayElem1234)
if r.TryDecodeAsNil() {
x.Items = nil
} else {
yyv158 := &x.Items
yym159 := z.DecBinary()
_ = yym159
if false {
} else {
h.decSliceHorizontalPodAutoscaler((*[]HorizontalPodAutoscaler)(yyv158), d)
}
}
for {
yyj153++
if yyhl153 {
yyb153 = yyj153 > l
} else {
yyb153 = r.CheckBreak()
}
if yyb153 {
break
}
z.DecSendContainerState(codecSelfer_containerArrayElem1234)
z.DecStructFieldNotFound(yyj153-1, "")
}
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
}
func (x *Scale) CodecEncodeSelf(e *codec1978.Encoder) {
var h codecSelfer1234
z, r := codec1978.GenHelperEncoder(e)
_, _, _ = h, z, r
if x == nil {
r.EncodeNil()
} else {
yym160 := z.EncBinary()
_ = yym160
if false {
} else if z.HasExtensions() && z.EncExt(x) {
} else {
yysep161 := !z.EncBinary()
yy2arr161 := z.EncBasicHandle().StructToArray
var yyq161 [5]bool
_, _, _ = yysep161, yyq161, yy2arr161
const yyr161 bool = false
yyq161[0] = x.Kind != ""
yyq161[1] = x.APIVersion != ""
yyq161[2] = true
yyq161[3] = true
yyq161[4] = true
var yynn161 int
if yyr161 || yy2arr161 {
r.EncodeArrayStart(5)
} else {
yynn161 = 0
for _, b := range yyq161 {
if b {
yynn161++
}
}
r.EncodeMapStart(yynn161)
yynn161 = 0
}
if yyr161 || yy2arr161 {
z.EncSendContainerState(codecSelfer_containerArrayElem1234)
if yyq161[0] {
yym163 := z.EncBinary()
_ = yym163
if false {
} else {
r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
}
} else {
r.EncodeString(codecSelferC_UTF81234, "")
}
} else {
if yyq161[0] {
z.EncSendContainerState(codecSelfer_containerMapKey1234)
r.EncodeString(codecSelferC_UTF81234, string("kind"))
z.EncSendContainerState(codecSelfer_containerMapValue1234)
yym164 := z.EncBinary()
_ = yym164
if false {
} else {
r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
}
}
}
if yyr161 || yy2arr161 {
z.EncSendContainerState(codecSelfer_containerArrayElem1234)
if yyq161[1] {
yym166 := z.EncBinary()
_ = yym166
if false {
} else {
r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
}
} else {
r.EncodeString(codecSelferC_UTF81234, "")
}
} else {
if yyq161[1] {
z.EncSendContainerState(codecSelfer_containerMapKey1234)
r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
z.EncSendContainerState(codecSelfer_containerMapValue1234)
yym167 := z.EncBinary()
_ = yym167
if false {
} else {
r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
}
}
}
if yyr161 || yy2arr161 {
z.EncSendContainerState(codecSelfer_containerArrayElem1234)
if yyq161[2] {
yy169 := &x.ObjectMeta
yy169.CodecEncodeSelf(e)
} else {
r.EncodeNil()
}
} else {
if yyq161[2] {
z.EncSendContainerState(codecSelfer_containerMapKey1234)
r.EncodeString(codecSelferC_UTF81234, string("metadata"))
z.EncSendContainerState(codecSelfer_containerMapValue1234)
yy170 := &x.ObjectMeta
yy170.CodecEncodeSelf(e)
}
}
if yyr161 || yy2arr161 {
z.EncSendContainerState(codecSelfer_containerArrayElem1234)
if yyq161[3] {
yy172 := &x.Spec
yy172.CodecEncodeSelf(e)
} else {
r.EncodeNil()
}
} else {
if yyq161[3] {
z.EncSendContainerState(codecSelfer_containerMapKey1234)
r.EncodeString(codecSelferC_UTF81234, string("spec"))
z.EncSendContainerState(codecSelfer_containerMapValue1234)
yy173 := &x.Spec
yy173.CodecEncodeSelf(e)
}
}
if yyr161 || yy2arr161 {
z.EncSendContainerState(codecSelfer_containerArrayElem1234)
if yyq161[4] {
yy175 := &x.Status
yy175.CodecEncodeSelf(e)
} else {
r.EncodeNil()
}
} else {
if yyq161[4] {
z.EncSendContainerState(codecSelfer_containerMapKey1234)
r.EncodeString(codecSelferC_UTF81234, string("status"))
z.EncSendContainerState(codecSelfer_containerMapValue1234)
yy176 := &x.Status
yy176.CodecEncodeSelf(e)
}
}
if yyr161 || yy2arr161 {
z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
} else {
z.EncSendContainerState(codecSelfer_containerMapEnd1234)
}
}
}
}
func (x *Scale) CodecDecodeSelf(d *codec1978.Decoder) {
var h codecSelfer1234
z, r := codec1978.GenHelperDecoder(d)
_, _, _ = h, z, r
yym177 := z.DecBinary()
_ = yym177
if false {
} else if z.HasExtensions() && z.DecExt(x) {
} else {
yyct178 := r.ContainerType()
if yyct178 == codecSelferValueTypeMap1234 {
yyl178 := r.ReadMapStart()
if yyl178 == 0 {
z.DecSendContainerState(codecSelfer_containerMapEnd1234)
} else {
x.codecDecodeSelfFromMap(yyl178, d)
}
} else if yyct178 == codecSelferValueTypeArray1234 {
yyl178 := r.ReadArrayStart()
if yyl178 == 0 {
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
} else {
x.codecDecodeSelfFromArray(yyl178, d)
}
} else {
panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
}
}
}
func (x *Scale) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
var h codecSelfer1234
z, r := codec1978.GenHelperDecoder(d)
_, _, _ = h, z, r
var yys179Slc = z.DecScratchBuffer() // default slice to decode into
_ = yys179Slc
var yyhl179 bool = l >= 0
for yyj179 := 0; ; yyj179++ {
if yyhl179 {
if yyj179 >= l {
break
}
} else {
if r.CheckBreak() {
break
}
}
z.DecSendContainerState(codecSelfer_containerMapKey1234)
yys179Slc = r.DecodeBytes(yys179Slc, true, true)
yys179 := string(yys179Slc)
z.DecSendContainerState(codecSelfer_containerMapValue1234)
switch yys179 {
case "kind":
if r.TryDecodeAsNil() {
x.Kind = ""
} else {
x.Kind = string(r.DecodeString())
}
case "apiVersion":
if r.TryDecodeAsNil() {
x.APIVersion = ""
} else {
x.APIVersion = string(r.DecodeString())
}
case "metadata":
if r.TryDecodeAsNil() {
x.ObjectMeta = pkg2_v1.ObjectMeta{}
} else {
yyv182 := &x.ObjectMeta
yyv182.CodecDecodeSelf(d)
}
case "spec":
if r.TryDecodeAsNil() {
x.Spec = ScaleSpec{}
} else {
yyv183 := &x.Spec
yyv183.CodecDecodeSelf(d)
}
case "status":
if r.TryDecodeAsNil() {
x.Status = ScaleStatus{}
} else {
yyv184 := &x.Status
yyv184.CodecDecodeSelf(d)
}
default:
z.DecStructFieldNotFound(-1, yys179)
} // end switch yys179
} // end for yyj179
z.DecSendContainerState(codecSelfer_containerMapEnd1234)
}
func (x *Scale) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
var h codecSelfer1234
z, r := codec1978.GenHelperDecoder(d)
_, _, _ = h, z, r
var yyj185 int
var yyb185 bool
var yyhl185 bool = l >= 0
yyj185++
if yyhl185 {
yyb185 = yyj185 > l
} else {
yyb185 = r.CheckBreak()
}
if yyb185 {
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
return
}
z.DecSendContainerState(codecSelfer_containerArrayElem1234)
if r.TryDecodeAsNil() {
x.Kind = ""
} else {
x.Kind = string(r.DecodeString())
}
yyj185++
if yyhl185 {
yyb185 = yyj185 > l
} else {
yyb185 = r.CheckBreak()
}
if yyb185 {
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
return
}
z.DecSendContainerState(codecSelfer_containerArrayElem1234)
if r.TryDecodeAsNil() {
x.APIVersion = ""
} else {
x.APIVersion = string(r.DecodeString())
}
yyj185++
if yyhl185 {
yyb185 = yyj185 > l
} else {
yyb185 = r.CheckBreak()
}
if yyb185 {
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
return
}
z.DecSendContainerState(codecSelfer_containerArrayElem1234)
if r.TryDecodeAsNil() {
x.ObjectMeta = pkg2_v1.ObjectMeta{}
} else {
yyv188 := &x.ObjectMeta
yyv188.CodecDecodeSelf(d)
}
yyj185++
if yyhl185 {
yyb185 = yyj185 > l
} else {
yyb185 = r.CheckBreak()
}
if yyb185 {
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
return
}
z.DecSendContainerState(codecSelfer_containerArrayElem1234)
if r.TryDecodeAsNil() {
x.Spec = ScaleSpec{}
} else {
yyv189 := &x.Spec
yyv189.CodecDecodeSelf(d)
}
yyj185++
if yyhl185 {
yyb185 = yyj185 > l
} else {
yyb185 = r.CheckBreak()
}
if yyb185 {
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
return
}
z.DecSendContainerState(codecSelfer_containerArrayElem1234)
if r.TryDecodeAsNil() {
x.Status = ScaleStatus{}
} else {
yyv190 := &x.Status
yyv190.CodecDecodeSelf(d)
}
for {
yyj185++
if yyhl185 {
yyb185 = yyj185 > l
} else {
yyb185 = r.CheckBreak()
}
if yyb185 {
break
}
z.DecSendContainerState(codecSelfer_containerArrayElem1234)
z.DecStructFieldNotFound(yyj185-1, "")
}
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
}
func (x *ScaleSpec) CodecEncodeSelf(e *codec1978.Encoder) {
var h codecSelfer1234
z, r := codec1978.GenHelperEncoder(e)
_, _, _ = h, z, r
if x == nil {
r.EncodeNil()
} else {
yym191 := z.EncBinary()
_ = yym191
if false {
} else if z.HasExtensions() && z.EncExt(x) {
} else {
yysep192 := !z.EncBinary()
yy2arr192 := z.EncBasicHandle().StructToArray
var yyq192 [1]bool
_, _, _ = yysep192, yyq192, yy2arr192
const yyr192 bool = false
yyq192[0] = x.Replicas != 0
var yynn192 int
if yyr192 || yy2arr192 {
r.EncodeArrayStart(1)
} else {
yynn192 = 0
for _, b := range yyq192 {
if b {
yynn192++
}
}
r.EncodeMapStart(yynn192)
yynn192 = 0
}
if yyr192 || yy2arr192 {
z.EncSendContainerState(codecSelfer_containerArrayElem1234)
if yyq192[0] {
yym194 := z.EncBinary()
_ = yym194
if false {
} else {
r.EncodeInt(int64(x.Replicas))
}
} else {
r.EncodeInt(0)
}
} else {
if yyq192[0] {
z.EncSendContainerState(codecSelfer_containerMapKey1234)
r.EncodeString(codecSelferC_UTF81234, string("replicas"))
z.EncSendContainerState(codecSelfer_containerMapValue1234)
yym195 := z.EncBinary()
_ = yym195
if false {
} else {
r.EncodeInt(int64(x.Replicas))
}
}
}
if yyr192 || yy2arr192 {
z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
} else {
z.EncSendContainerState(codecSelfer_containerMapEnd1234)
}
}
}
}
func (x *ScaleSpec) CodecDecodeSelf(d *codec1978.Decoder) {
var h codecSelfer1234
z, r := codec1978.GenHelperDecoder(d)
_, _, _ = h, z, r
yym196 := z.DecBinary()
_ = yym196
if false {
} else if z.HasExtensions() && z.DecExt(x) {
} else {
yyct197 := r.ContainerType()
if yyct197 == codecSelferValueTypeMap1234 {
yyl197 := r.ReadMapStart()
if yyl197 == 0 {
z.DecSendContainerState(codecSelfer_containerMapEnd1234)
} else {
x.codecDecodeSelfFromMap(yyl197, d)
}
} else if yyct197 == codecSelferValueTypeArray1234 {
yyl197 := r.ReadArrayStart()
if yyl197 == 0 {
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
} else {
x.codecDecodeSelfFromArray(yyl197, d)
}
} else {
panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
}
}
}
func (x *ScaleSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
var h codecSelfer1234
z, r := codec1978.GenHelperDecoder(d)
_, _, _ = h, z, r
var yys198Slc = z.DecScratchBuffer() // default slice to decode into
_ = yys198Slc
var yyhl198 bool = l >= 0
for yyj198 := 0; ; yyj198++ {
if yyhl198 {
if yyj198 >= l {
break
}
} else {
if r.CheckBreak() {
break
}
}
z.DecSendContainerState(codecSelfer_containerMapKey1234)
yys198Slc = r.DecodeBytes(yys198Slc, true, true)
yys198 := string(yys198Slc)
z.DecSendContainerState(codecSelfer_containerMapValue1234)
switch yys198 {
case "replicas":
if r.TryDecodeAsNil() {
x.Replicas = 0
} else {
x.Replicas = int32(r.DecodeInt(32))
}
default:
z.DecStructFieldNotFound(-1, yys198)
} // end switch yys198
} // end for yyj198
z.DecSendContainerState(codecSelfer_containerMapEnd1234)
}
func (x *ScaleSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
var h codecSelfer1234
z, r := codec1978.GenHelperDecoder(d)
_, _, _ = h, z, r
var yyj200 int
var yyb200 bool
var yyhl200 bool = l >= 0
yyj200++
if yyhl200 {
yyb200 = yyj200 > l
} else {
yyb200 = r.CheckBreak()
}
if yyb200 {
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
return
}
z.DecSendContainerState(codecSelfer_containerArrayElem1234)
if r.TryDecodeAsNil() {
x.Replicas = 0
} else {
x.Replicas = int32(r.DecodeInt(32))
}
for {
yyj200++
if yyhl200 {
yyb200 = yyj200 > l
} else {
yyb200 = r.CheckBreak()
}
if yyb200 {
break
}
z.DecSendContainerState(codecSelfer_containerArrayElem1234)
z.DecStructFieldNotFound(yyj200-1, "")
}
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
}
func (x *ScaleStatus) CodecEncodeSelf(e *codec1978.Encoder) {
var h codecSelfer1234
z, r := codec1978.GenHelperEncoder(e)
_, _, _ = h, z, r
if x == nil {
r.EncodeNil()
} else {
yym202 := z.EncBinary()
_ = yym202
if false {
} else if z.HasExtensions() && z.EncExt(x) {
} else {
yysep203 := !z.EncBinary()
yy2arr203 := z.EncBasicHandle().StructToArray
var yyq203 [2]bool
_, _, _ = yysep203, yyq203, yy2arr203
const yyr203 bool = false
yyq203[1] = x.Selector != ""
var yynn203 int
if yyr203 || yy2arr203 {
r.EncodeArrayStart(2)
} else {
yynn203 = 1
for _, b := range yyq203 {
if b {
yynn203++
}
}
r.EncodeMapStart(yynn203)
yynn203 = 0
}
if yyr203 || yy2arr203 {
z.EncSendContainerState(codecSelfer_containerArrayElem1234)
yym205 := z.EncBinary()
_ = yym205
if false {
} else {
r.EncodeInt(int64(x.Replicas))
}
} else {
z.EncSendContainerState(codecSelfer_containerMapKey1234)
r.EncodeString(codecSelferC_UTF81234, string("replicas"))
z.EncSendContainerState(codecSelfer_containerMapValue1234)
yym206 := z.EncBinary()
_ = yym206
if false {
} else {
r.EncodeInt(int64(x.Replicas))
}
}
if yyr203 || yy2arr203 {
z.EncSendContainerState(codecSelfer_containerArrayElem1234)
if yyq203[1] {
yym208 := z.EncBinary()
_ = yym208
if false {
} else {
r.EncodeString(codecSelferC_UTF81234, string(x.Selector))
}
} else {
r.EncodeString(codecSelferC_UTF81234, "")
}
} else {
if yyq203[1] {
z.EncSendContainerState(codecSelfer_containerMapKey1234)
r.EncodeString(codecSelferC_UTF81234, string("selector"))
z.EncSendContainerState(codecSelfer_containerMapValue1234)
yym209 := z.EncBinary()
_ = yym209
if false {
} else {
r.EncodeString(codecSelferC_UTF81234, string(x.Selector))
}
}
}
if yyr203 || yy2arr203 {
z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
} else {
z.EncSendContainerState(codecSelfer_containerMapEnd1234)
}
}
}
}
func (x *ScaleStatus) CodecDecodeSelf(d *codec1978.Decoder) {
var h codecSelfer1234
z, r := codec1978.GenHelperDecoder(d)
_, _, _ = h, z, r
yym210 := z.DecBinary()
_ = yym210
if false {
} else if z.HasExtensions() && z.DecExt(x) {
} else {
yyct211 := r.ContainerType()
if yyct211 == codecSelferValueTypeMap1234 {
yyl211 := r.ReadMapStart()
if yyl211 == 0 {
z.DecSendContainerState(codecSelfer_containerMapEnd1234)
} else {
x.codecDecodeSelfFromMap(yyl211, d)
}
} else if yyct211 == codecSelferValueTypeArray1234 {
yyl211 := r.ReadArrayStart()
if yyl211 == 0 {
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
} else {
x.codecDecodeSelfFromArray(yyl211, d)
}
} else {
panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
}
}
}
func (x *ScaleStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
var h codecSelfer1234
z, r := codec1978.GenHelperDecoder(d)
_, _, _ = h, z, r
var yys212Slc = z.DecScratchBuffer() // default slice to decode into
_ = yys212Slc
var yyhl212 bool = l >= 0
for yyj212 := 0; ; yyj212++ {
if yyhl212 {
if yyj212 >= l {
break
}
} else {
if r.CheckBreak() {
break
}
}
z.DecSendContainerState(codecSelfer_containerMapKey1234)
yys212Slc = r.DecodeBytes(yys212Slc, true, true)
yys212 := string(yys212Slc)
z.DecSendContainerState(codecSelfer_containerMapValue1234)
switch yys212 {
case "replicas":
if r.TryDecodeAsNil() {
x.Replicas = 0
} else {
x.Replicas = int32(r.DecodeInt(32))
}
case "selector":
if r.TryDecodeAsNil() {
x.Selector = ""
} else {
x.Selector = string(r.DecodeString())
}
default:
z.DecStructFieldNotFound(-1, yys212)
} // end switch yys212
} // end for yyj212
z.DecSendContainerState(codecSelfer_containerMapEnd1234)
}
func (x *ScaleStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
var h codecSelfer1234
z, r := codec1978.GenHelperDecoder(d)
_, _, _ = h, z, r
var yyj215 int
var yyb215 bool
var yyhl215 bool = l >= 0
yyj215++
if yyhl215 {
yyb215 = yyj215 > l
} else {
yyb215 = r.CheckBreak()
}
if yyb215 {
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
return
}
z.DecSendContainerState(codecSelfer_containerArrayElem1234)
if r.TryDecodeAsNil() {
x.Replicas = 0
} else {
x.Replicas = int32(r.DecodeInt(32))
}
yyj215++
if yyhl215 {
yyb215 = yyj215 > l
} else {
yyb215 = r.CheckBreak()
}
if yyb215 {
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
return
}
z.DecSendContainerState(codecSelfer_containerArrayElem1234)
if r.TryDecodeAsNil() {
x.Selector = ""
} else {
x.Selector = string(r.DecodeString())
}
for {
yyj215++
if yyhl215 {
yyb215 = yyj215 > l
} else {
yyb215 = r.CheckBreak()
}
if yyb215 {
break
}
z.DecSendContainerState(codecSelfer_containerArrayElem1234)
z.DecStructFieldNotFound(yyj215-1, "")
}
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
}
func (x codecSelfer1234) encSliceHorizontalPodAutoscaler(v []HorizontalPodAutoscaler, e *codec1978.Encoder) {
var h codecSelfer1234
z, r := codec1978.GenHelperEncoder(e)
_, _, _ = h, z, r
r.EncodeArrayStart(len(v))
for _, yyv218 := range v {
z.EncSendContainerState(codecSelfer_containerArrayElem1234)
yy219 := &yyv218
yy219.CodecEncodeSelf(e)
}
z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
}
func (x codecSelfer1234) decSliceHorizontalPodAutoscaler(v *[]HorizontalPodAutoscaler, d *codec1978.Decoder) {
var h codecSelfer1234
z, r := codec1978.GenHelperDecoder(d)
_, _, _ = h, z, r
yyv220 := *v
yyh220, yyl220 := z.DecSliceHelperStart()
var yyc220 bool
if yyl220 == 0 {
if yyv220 == nil {
yyv220 = []HorizontalPodAutoscaler{}
yyc220 = true
} else if len(yyv220) != 0 {
yyv220 = yyv220[:0]
yyc220 = true
}
} else if yyl220 > 0 {
var yyrr220, yyrl220 int
var yyrt220 bool
if yyl220 > cap(yyv220) {
yyrg220 := len(yyv220) > 0
yyv2220 := yyv220
yyrl220, yyrt220 = z.DecInferLen(yyl220, z.DecBasicHandle().MaxInitLen, 360)
if yyrt220 {
if yyrl220 <= cap(yyv220) {
yyv220 = yyv220[:yyrl220]
} else {
yyv220 = make([]HorizontalPodAutoscaler, yyrl220)
}
} else {
yyv220 = make([]HorizontalPodAutoscaler, yyrl220)
}
yyc220 = true
yyrr220 = len(yyv220)
if yyrg220 {
copy(yyv220, yyv2220)
}
} else if yyl220 != len(yyv220) {
yyv220 = yyv220[:yyl220]
yyc220 = true
}
yyj220 := 0
for ; yyj220 < yyrr220; yyj220++ {
yyh220.ElemContainerState(yyj220)
if r.TryDecodeAsNil() {
yyv220[yyj220] = HorizontalPodAutoscaler{}
} else {
yyv221 := &yyv220[yyj220]
yyv221.CodecDecodeSelf(d)
}
}
if yyrt220 {
for ; yyj220 < yyl220; yyj220++ {
yyv220 = append(yyv220, HorizontalPodAutoscaler{})
yyh220.ElemContainerState(yyj220)
if r.TryDecodeAsNil() {
yyv220[yyj220] = HorizontalPodAutoscaler{}
} else {
yyv222 := &yyv220[yyj220]
yyv222.CodecDecodeSelf(d)
}
}
}
} else {
yyj220 := 0
for ; !r.CheckBreak(); yyj220++ {
if yyj220 >= len(yyv220) {
yyv220 = append(yyv220, HorizontalPodAutoscaler{}) // var yyz220 HorizontalPodAutoscaler
yyc220 = true
}
yyh220.ElemContainerState(yyj220)
if yyj220 < len(yyv220) {
if r.TryDecodeAsNil() {
yyv220[yyj220] = HorizontalPodAutoscaler{}
} else {
yyv223 := &yyv220[yyj220]
yyv223.CodecDecodeSelf(d)
}
} else {
z.DecSwallow()
}
}
if yyj220 < len(yyv220) {
yyv220 = yyv220[:yyj220]
yyc220 = true
} else if yyj220 == 0 && yyv220 == nil {
yyv220 = []HorizontalPodAutoscaler{}
yyc220 = true
}
}
yyh220.End()
if yyc220 {
*v = yyv220
}
}<|fim▁end|> | func (x *HorizontalPodAutoscalerStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
var h codecSelfer1234
z, r := codec1978.GenHelperDecoder(d)
_, _, _ = h, z, r |
<|file_name|>modifyexperience.js<|end_file_name|><|fim▁begin|>(function (badges, $, undefined) {
"use strict";
var options = {}, geocoder, map, marker;
badges.options = function (o) {
$.extend(options, o);
};
badges.init = function () {
initializeTypeahead();
initializeImagePreview();
$(".datepicker").datepicker({ format: 'm/d/yyyy', autoclose: true });
initializeMap();
};
function initializeTypeahead() {
$("#Experience_Name").typeahead({
name: 'title',
prefetch: options.TitlesUrl,
limit: 10
});
$("#Experience_Organization").typeahead({
name: 'orgs',
prefetch: options.OrganizationsUrl,
limit: 10
});
}
function initializeImagePreview() {
$("#cover-image").change(function () {
var filesToUpload = this.files;
if (filesToUpload.length !== 1) return;
var file = filesToUpload[0];
if (!file.type.match(/image.*/)) {
alert("only images, please");
}
var img = document.getElementById("cover-preview");
img.src = window.URL.createObjectURL(file);
img.onload = function(e) {
window.URL.revokeObjectURL(this.src);
};
//img.height = 500;
});
}
function initializeMap() {
var davisLatLng = new google.maps.LatLng(38.5449065, -121.7405167);
geocoder = new google.maps.Geocoder();
var mapOptions = {
center: davisLatLng,
zoom: 10,
mapTypeId: google.maps.MapTypeId.ROADMAP
};
map = new google.maps.Map(document.getElementById("map-canvas"),
mapOptions);
marker = new google.maps.Marker({
map: map,
position: davisLatLng
});
$("#Experience_Location").blur(function () {
var address = this.value;
geocoder.geocode({ 'address': address }, function (results, status) {
if (status == google.maps.GeocoderStatus.OK) {
if (marker) marker.setMap(null); //clear existing marker
map.setCenter(results[0].geometry.location);
marker = new google.maps.Marker({
map: map,
position: results[0].geometry.location
});
if (results[0].geometry.viewport)
map.fitBounds(results[0].geometry.viewport);
<|fim▁hole|> });
});
}
}(window.Badges = window.Badges || {}, jQuery));<|fim▁end|> | } else {
alert('Geocode was not successful for the following reason: ' + status);
}
|
<|file_name|>dependencies.py<|end_file_name|><|fim▁begin|><|fim▁hole|>import os
import json
import requests
# Plan is to import and to checkout dependencies:<|fim▁end|> | import time |
<|file_name|>ufunc_db.py<|end_file_name|><|fim▁begin|>"""This file contains information on how to translate different ufuncs
into numba. It is a database of different ufuncs and how each of its
loops maps to a function that implements the inner kernel of that ufunc
(the inner kernel being the per-element function).
Use the function get_ufunc_info to get the information related to the
ufunc
"""
from __future__ import print_function, division, absolute_import
import numpy as np
# this is lazily initialized to avoid circular imports
_ufunc_db = None
def _lazy_init_db():
global _ufunc_db
if _ufunc_db is None:
_ufunc_db = {}
_fill_ufunc_db(_ufunc_db)
def get_ufuncs():
"""obtain a list of supported ufuncs in the db"""
_lazy_init_db()
return _ufunc_db.keys()
def get_ufunc_info(ufunc_key):
"""get the lowering information for the ufunc with key ufunc_key.
The lowering information is a dictionary that maps from a numpy
loop string (as given by the ufunc types attribute) to a function
that handles code generation for a scalar version of the ufunc
(that is, generates the "per element" operation").
raises a KeyError if the ufunc is not in the ufunc_db
"""
_lazy_init_db()
return _ufunc_db[ufunc_key]
def _fill_ufunc_db(ufunc_db):
# some of these imports would cause a problem of circular
# imports if done at global scope when importing the numba
# module.
from . import builtins, npyfuncs, cmathimpl
ufunc_db[np.negative] = {
'?->?': builtins.bool_invert_impl,
'b->b': builtins.int_negate_impl,
'B->B': builtins.int_negate_impl,
'h->h': builtins.int_negate_impl,
'H->H': builtins.int_negate_impl,
'i->i': builtins.int_negate_impl,
'I->I': builtins.int_negate_impl,
'l->l': builtins.int_negate_impl,
'L->L': builtins.int_negate_impl,
'q->q': builtins.int_negate_impl,
'Q->Q': builtins.int_negate_impl,
'f->f': builtins.real_negate_impl,
'd->d': builtins.real_negate_impl,
'F->F': builtins.complex_negate_impl,
'D->D': builtins.complex_negate_impl,
}
ufunc_db[np.absolute] = {
'?->?': builtins.int_abs_impl,
'b->b': builtins.int_abs_impl,
'B->B': builtins.uint_abs_impl,
'h->h': builtins.int_abs_impl,
'H->H': builtins.uint_abs_impl,
'i->i': builtins.int_abs_impl,
'I->I': builtins.uint_abs_impl,
'l->l': builtins.int_abs_impl,
'L->L': builtins.uint_abs_impl,
'q->q': builtins.int_abs_impl,
'Q->Q': builtins.uint_abs_impl,
'f->f': builtins.real_abs_impl,
'd->d': builtins.real_abs_impl,
'F->f': builtins.complex_abs_impl,
'D->d': builtins.complex_abs_impl,
}
ufunc_db[np.sign] = {
'b->b': builtins.int_sign_impl,
'B->B': builtins.int_sign_impl,
'h->h': builtins.int_sign_impl,
'H->H': builtins.int_sign_impl,
'i->i': builtins.int_sign_impl,
'I->I': builtins.int_sign_impl,
'l->l': builtins.int_sign_impl,
'L->L': builtins.int_sign_impl,
'q->q': builtins.int_sign_impl,
'Q->Q': builtins.int_sign_impl,
'f->f': builtins.real_sign_impl,
'd->d': builtins.real_sign_impl,
'F->F': npyfuncs.np_complex_sign_impl,
'D->D': npyfuncs.np_complex_sign_impl,
}
ufunc_db[np.add] = {
'??->?': builtins.int_or_impl,
'bb->b': builtins.int_add_impl,
'BB->B': builtins.int_add_impl,
'hh->h': builtins.int_add_impl,
'HH->H': builtins.int_add_impl,
'ii->i': builtins.int_add_impl,
'II->I': builtins.int_add_impl,
'll->l': builtins.int_add_impl,
'LL->L': builtins.int_add_impl,
'qq->q': builtins.int_add_impl,
'QQ->Q': builtins.int_add_impl,
'ff->f': builtins.real_add_impl,
'dd->d': builtins.real_add_impl,
'FF->F': builtins.complex_add_impl,
'DD->D': builtins.complex_add_impl,
}
ufunc_db[np.subtract] = {
'??->?': builtins.int_xor_impl,
'bb->b': builtins.int_sub_impl,
'BB->B': builtins.int_sub_impl,
'hh->h': builtins.int_sub_impl,
'HH->H': builtins.int_sub_impl,
'ii->i': builtins.int_sub_impl,
'II->I': builtins.int_sub_impl,
'll->l': builtins.int_sub_impl,
'LL->L': builtins.int_sub_impl,
'qq->q': builtins.int_sub_impl,
'QQ->Q': builtins.int_sub_impl,
'ff->f': builtins.real_sub_impl,
'dd->d': builtins.real_sub_impl,
'FF->F': builtins.complex_sub_impl,
'DD->D': builtins.complex_sub_impl,
}
ufunc_db[np.multiply] = {
'??->?': builtins.int_and_impl,
'bb->b': builtins.int_mul_impl,
'BB->B': builtins.int_mul_impl,
'hh->h': builtins.int_mul_impl,
'HH->H': builtins.int_mul_impl,
'ii->i': builtins.int_mul_impl,
'II->I': builtins.int_mul_impl,
'll->l': builtins.int_mul_impl,
'LL->L': builtins.int_mul_impl,
'qq->q': builtins.int_mul_impl,
'QQ->Q': builtins.int_mul_impl,
'ff->f': builtins.real_mul_impl,
'dd->d': builtins.real_mul_impl,
'FF->F': builtins.complex_mul_impl,
'DD->D': builtins.complex_mul_impl,
}
if np.divide != np.true_divide:
ufunc_db[np.divide] = {
'bb->b': npyfuncs.np_int_sdiv_impl,
'BB->B': npyfuncs.np_int_udiv_impl,
'hh->h': npyfuncs.np_int_sdiv_impl,
'HH->H': npyfuncs.np_int_udiv_impl,
'ii->i': npyfuncs.np_int_sdiv_impl,
'II->I': npyfuncs.np_int_udiv_impl,
'll->l': npyfuncs.np_int_sdiv_impl,
'LL->L': npyfuncs.np_int_udiv_impl,
'qq->q': npyfuncs.np_int_sdiv_impl,
'QQ->Q': npyfuncs.np_int_udiv_impl,
'ff->f': npyfuncs.np_real_div_impl,
'dd->d': npyfuncs.np_real_div_impl,
'FF->F': npyfuncs.np_complex_div_impl,
'DD->D': npyfuncs.np_complex_div_impl,
}
ufunc_db[np.true_divide] = {
'bb->d': npyfuncs.np_int_truediv_impl,
'BB->d': npyfuncs.np_int_truediv_impl,
'hh->d': npyfuncs.np_int_truediv_impl,
'HH->d': npyfuncs.np_int_truediv_impl,
'ii->d': npyfuncs.np_int_truediv_impl,
'II->d': npyfuncs.np_int_truediv_impl,
'll->d': npyfuncs.np_int_truediv_impl,
'LL->d': npyfuncs.np_int_truediv_impl,
'qq->d': npyfuncs.np_int_truediv_impl,
'QQ->d': npyfuncs.np_int_truediv_impl,
'ff->f': npyfuncs.np_real_div_impl,
'dd->d': npyfuncs.np_real_div_impl,
'FF->F': npyfuncs.np_complex_div_impl,
'DD->D': npyfuncs.np_complex_div_impl,
}
ufunc_db[np.floor_divide] = {
'bb->b': npyfuncs.np_int_sdiv_impl,
'BB->B': npyfuncs.np_int_udiv_impl,
'hh->h': npyfuncs.np_int_sdiv_impl,
'HH->H': npyfuncs.np_int_udiv_impl,
'ii->i': npyfuncs.np_int_sdiv_impl,
'II->I': npyfuncs.np_int_udiv_impl,
'll->l': npyfuncs.np_int_sdiv_impl,
'LL->L': npyfuncs.np_int_udiv_impl,
'qq->q': npyfuncs.np_int_sdiv_impl,
'QQ->Q': npyfuncs.np_int_udiv_impl,
'ff->f': npyfuncs.np_real_floor_div_impl,
'dd->d': npyfuncs.np_real_floor_div_impl,
'FF->F': npyfuncs.np_complex_floor_div_impl,
'DD->D': npyfuncs.np_complex_floor_div_impl,
}
ufunc_db[np.remainder] = {
'bb->b': npyfuncs.np_int_srem_impl,
'BB->B': npyfuncs.np_int_urem_impl,
'hh->h': npyfuncs.np_int_srem_impl,
'HH->H': npyfuncs.np_int_urem_impl,
'ii->i': npyfuncs.np_int_srem_impl,
'II->I': npyfuncs.np_int_urem_impl,
'll->l': npyfuncs.np_int_srem_impl,
'LL->L': npyfuncs.np_int_urem_impl,
'qq->q': npyfuncs.np_int_srem_impl,
'QQ->Q': npyfuncs.np_int_urem_impl,
'ff->f': npyfuncs.np_real_mod_impl,
'dd->d': npyfuncs.np_real_mod_impl,
}
ufunc_db[np.fmod] = {
'bb->b': npyfuncs.np_int_fmod_impl,
'BB->B': npyfuncs.np_int_fmod_impl,
'hh->h': npyfuncs.np_int_fmod_impl,
'HH->H': npyfuncs.np_int_fmod_impl,
'ii->i': npyfuncs.np_int_fmod_impl,
'II->I': npyfuncs.np_int_fmod_impl,
'll->l': npyfuncs.np_int_fmod_impl,
'LL->L': npyfuncs.np_int_fmod_impl,
'qq->q': npyfuncs.np_int_fmod_impl,
'QQ->Q': npyfuncs.np_int_fmod_impl,
'ff->f': npyfuncs.np_real_fmod_impl,
'dd->d': npyfuncs.np_real_fmod_impl,
}
ufunc_db[np.logaddexp] = {
'ff->f': npyfuncs.np_real_logaddexp_impl,
'dd->d': npyfuncs.np_real_logaddexp_impl,
}
ufunc_db[np.logaddexp2] = {
'ff->f': npyfuncs.np_real_logaddexp2_impl,
'dd->d': npyfuncs.np_real_logaddexp2_impl,
}
ufunc_db[np.power] = {
'bb->b': npyfuncs.np_int_power_impl,
'BB->B': npyfuncs.np_int_power_impl,
'hh->h': npyfuncs.np_int_power_impl,
'HH->H': npyfuncs.np_int_power_impl,
'ii->i': npyfuncs.np_int_power_impl,
'II->I': npyfuncs.np_int_power_impl,
'll->l': npyfuncs.np_int_power_impl,
'LL->L': npyfuncs.np_int_power_impl,
'qq->q': npyfuncs.np_int_power_impl,
'QQ->Q': npyfuncs.np_int_power_impl,
'ff->f': npyfuncs.np_real_power_impl,
'dd->d': npyfuncs.np_real_power_impl,
'FF->F': npyfuncs.np_complex_power_impl,
'DD->D': npyfuncs.np_complex_power_impl,
}
ufunc_db[np.rint] = {
'f->f': npyfuncs.np_real_rint_impl,
'd->d': npyfuncs.np_real_rint_impl,
'F->F': npyfuncs.np_complex_rint_impl,
'D->D': npyfuncs.np_complex_rint_impl,
}
ufunc_db[np.conjugate] = {
'b->b': builtins.real_conjugate_impl,
'B->B': builtins.real_conjugate_impl,
'h->h': builtins.real_conjugate_impl,
'H->H': builtins.real_conjugate_impl,
'i->i': builtins.real_conjugate_impl,
'I->I': builtins.real_conjugate_impl,
'l->l': builtins.real_conjugate_impl,
'L->L': builtins.real_conjugate_impl,
'q->q': builtins.real_conjugate_impl,
'Q->Q': builtins.real_conjugate_impl,
'f->f': builtins.real_conjugate_impl,
'd->d': builtins.real_conjugate_impl,
'F->F': builtins.complex_conjugate_impl,
'D->D': builtins.complex_conjugate_impl,
}
ufunc_db[np.exp] = {
'f->f': npyfuncs.np_real_exp_impl,
'd->d': npyfuncs.np_real_exp_impl,
'F->F': npyfuncs.np_complex_exp_impl,
'D->D': npyfuncs.np_complex_exp_impl,
}
ufunc_db[np.exp2] = {
'f->f': npyfuncs.np_real_exp2_impl,
'd->d': npyfuncs.np_real_exp2_impl,
'F->F': npyfuncs.np_complex_exp2_impl,
'D->D': npyfuncs.np_complex_exp2_impl,
}
ufunc_db[np.log] = {
'f->f': npyfuncs.np_real_log_impl,
'd->d': npyfuncs.np_real_log_impl,
'F->F': npyfuncs.np_complex_log_impl,
'D->D': npyfuncs.np_complex_log_impl,
}
ufunc_db[np.log2] = {
'f->f': npyfuncs.np_real_log2_impl,
'd->d': npyfuncs.np_real_log2_impl,
'F->F': npyfuncs.np_complex_log2_impl,
'D->D': npyfuncs.np_complex_log2_impl,
}
ufunc_db[np.log10] = {
'f->f': npyfuncs.np_real_log10_impl,
'd->d': npyfuncs.np_real_log10_impl,
'F->F': npyfuncs.np_complex_log10_impl,
'D->D': npyfuncs.np_complex_log10_impl,
}
ufunc_db[np.expm1] = {
'f->f': npyfuncs.np_real_expm1_impl,
'd->d': npyfuncs.np_real_expm1_impl,
'F->F': npyfuncs.np_complex_expm1_impl,
'D->D': npyfuncs.np_complex_expm1_impl,
}
ufunc_db[np.log1p] = {
'f->f': npyfuncs.np_real_log1p_impl,
'd->d': npyfuncs.np_real_log1p_impl,
'F->F': npyfuncs.np_complex_log1p_impl,
'D->D': npyfuncs.np_complex_log1p_impl,
}
ufunc_db[np.sqrt] = {
'f->f': npyfuncs.np_real_sqrt_impl,
'd->d': npyfuncs.np_real_sqrt_impl,
'F->F': npyfuncs.np_complex_sqrt_impl,
'D->D': npyfuncs.np_complex_sqrt_impl,
}
ufunc_db[np.square] = {
'b->b': npyfuncs.np_int_square_impl,
'B->B': npyfuncs.np_int_square_impl,
'h->h': npyfuncs.np_int_square_impl,
'H->H': npyfuncs.np_int_square_impl,
'i->i': npyfuncs.np_int_square_impl,
'I->I': npyfuncs.np_int_square_impl,
'l->l': npyfuncs.np_int_square_impl,
'L->L': npyfuncs.np_int_square_impl,
'q->q': npyfuncs.np_int_square_impl,
'Q->Q': npyfuncs.np_int_square_impl,
'f->f': npyfuncs.np_real_square_impl,
'd->d': npyfuncs.np_real_square_impl,
'F->F': npyfuncs.np_complex_square_impl,
'D->D': npyfuncs.np_complex_square_impl,
}
ufunc_db[np.reciprocal] = {
'b->b': npyfuncs.np_int_reciprocal_impl,
'B->B': npyfuncs.np_int_reciprocal_impl,
'h->h': npyfuncs.np_int_reciprocal_impl,
'H->H': npyfuncs.np_int_reciprocal_impl,
'i->i': npyfuncs.np_int_reciprocal_impl,
'I->I': npyfuncs.np_int_reciprocal_impl,
'l->l': npyfuncs.np_int_reciprocal_impl,
'L->L': npyfuncs.np_int_reciprocal_impl,
'q->q': npyfuncs.np_int_reciprocal_impl,
'Q->Q': npyfuncs.np_int_reciprocal_impl,
'f->f': npyfuncs.np_real_reciprocal_impl,
'd->d': npyfuncs.np_real_reciprocal_impl,
'F->F': npyfuncs.np_complex_reciprocal_impl,
'D->D': npyfuncs.np_complex_reciprocal_impl,
}
ufunc_db[np.sin] = {
'f->f': npyfuncs.np_real_sin_impl,
'd->d': npyfuncs.np_real_sin_impl,
'F->F': npyfuncs.np_complex_sin_impl,
'D->D': npyfuncs.np_complex_sin_impl,
}
ufunc_db[np.cos] = {
'f->f': npyfuncs.np_real_cos_impl,
'd->d': npyfuncs.np_real_cos_impl,
'F->F': npyfuncs.np_complex_cos_impl,
'D->D': npyfuncs.np_complex_cos_impl,
}
ufunc_db[np.tan] = {
'f->f': npyfuncs.np_real_tan_impl,
'd->d': npyfuncs.np_real_tan_impl,
'F->F': npyfuncs.np_complex_tan_impl,
'D->D': npyfuncs.np_complex_tan_impl,
}
ufunc_db[np.arcsin] = {
'f->f': npyfuncs.np_real_asin_impl,
'd->d': npyfuncs.np_real_asin_impl,
'F->F': npyfuncs.np_complex_asin_impl,
'D->D': npyfuncs.np_complex_asin_impl,
}
ufunc_db[np.arccos] = {
'f->f': npyfuncs.np_real_acos_impl,
'd->d': npyfuncs.np_real_acos_impl,
'F->F': cmathimpl.acos_impl,
'D->D': cmathimpl.acos_impl,
}
ufunc_db[np.arctan] = {
'f->f': npyfuncs.np_real_atan_impl,
'd->d': npyfuncs.np_real_atan_impl,
'F->F': npyfuncs.np_complex_atan_impl,
'D->D': npyfuncs.np_complex_atan_impl,
}
ufunc_db[np.arctan2] = {
'ff->f': npyfuncs.np_real_atan2_impl,
'dd->d': npyfuncs.np_real_atan2_impl,
}
ufunc_db[np.hypot] = {
'ff->f': npyfuncs.np_real_hypot_impl,
'dd->d': npyfuncs.np_real_hypot_impl,
}
ufunc_db[np.sinh] = {
'f->f': npyfuncs.np_real_sinh_impl,
'd->d': npyfuncs.np_real_sinh_impl,
'F->F': npyfuncs.np_complex_sinh_impl,
'D->D': npyfuncs.np_complex_sinh_impl,
}
ufunc_db[np.cosh] = {
'f->f': npyfuncs.np_real_cosh_impl,
'd->d': npyfuncs.np_real_cosh_impl,
'F->F': npyfuncs.np_complex_cosh_impl,
'D->D': npyfuncs.np_complex_cosh_impl,
}
ufunc_db[np.tanh] = {
'f->f': npyfuncs.np_real_tanh_impl,
'd->d': npyfuncs.np_real_tanh_impl,
'F->F': npyfuncs.np_complex_tanh_impl,
'D->D': npyfuncs.np_complex_tanh_impl,
}
ufunc_db[np.arcsinh] = {
'f->f': npyfuncs.np_real_asinh_impl,
'd->d': npyfuncs.np_real_asinh_impl,
'F->F': npyfuncs.np_complex_asinh_impl,
'D->D': npyfuncs.np_complex_asinh_impl,
}<|fim▁hole|> 'd->d': npyfuncs.np_real_acosh_impl,
'F->F': npyfuncs.np_complex_acosh_impl,
'D->D': npyfuncs.np_complex_acosh_impl,
}
ufunc_db[np.arctanh] = {
'f->f': npyfuncs.np_real_atanh_impl,
'd->d': npyfuncs.np_real_atanh_impl,
'F->F': npyfuncs.np_complex_atanh_impl,
'D->D': npyfuncs.np_complex_atanh_impl,
}
ufunc_db[np.deg2rad] = {
'f->f': npyfuncs.np_real_deg2rad_impl,
'd->d': npyfuncs.np_real_deg2rad_impl,
}
ufunc_db[np.radians] = ufunc_db[np.deg2rad]
ufunc_db[np.rad2deg] = {
'f->f': npyfuncs.np_real_rad2deg_impl,
'd->d': npyfuncs.np_real_rad2deg_impl,
}
ufunc_db[np.degrees] = ufunc_db[np.rad2deg]
ufunc_db[np.floor] = {
'f->f': npyfuncs.np_real_floor_impl,
'd->d': npyfuncs.np_real_floor_impl,
}
ufunc_db[np.ceil] = {
'f->f': npyfuncs.np_real_ceil_impl,
'd->d': npyfuncs.np_real_ceil_impl,
}
ufunc_db[np.trunc] = {
'f->f': npyfuncs.np_real_trunc_impl,
'd->d': npyfuncs.np_real_trunc_impl,
}
ufunc_db[np.fabs] = {
'f->f': npyfuncs.np_real_fabs_impl,
'd->d': npyfuncs.np_real_fabs_impl,
}
# logical ufuncs
ufunc_db[np.greater] = {
'??->?': builtins.int_ugt_impl,
'bb->?': builtins.int_sgt_impl,
'BB->?': builtins.int_ugt_impl,
'hh->?': builtins.int_sgt_impl,
'HH->?': builtins.int_ugt_impl,
'ii->?': builtins.int_sgt_impl,
'II->?': builtins.int_ugt_impl,
'll->?': builtins.int_sgt_impl,
'LL->?': builtins.int_ugt_impl,
'qq->?': builtins.int_sgt_impl,
'QQ->?': builtins.int_ugt_impl,
'ff->?': builtins.real_gt_impl,
'dd->?': builtins.real_gt_impl,
'FF->?': npyfuncs.np_complex_gt_impl,
'DD->?': npyfuncs.np_complex_gt_impl,
}
ufunc_db[np.greater_equal] = {
'??->?': builtins.int_uge_impl,
'bb->?': builtins.int_sge_impl,
'BB->?': builtins.int_uge_impl,
'hh->?': builtins.int_sge_impl,
'HH->?': builtins.int_uge_impl,
'ii->?': builtins.int_sge_impl,
'II->?': builtins.int_uge_impl,
'll->?': builtins.int_sge_impl,
'LL->?': builtins.int_uge_impl,
'qq->?': builtins.int_sge_impl,
'QQ->?': builtins.int_uge_impl,
'ff->?': builtins.real_ge_impl,
'dd->?': builtins.real_ge_impl,
'FF->?': npyfuncs.np_complex_ge_impl,
'DD->?': npyfuncs.np_complex_ge_impl,
}
ufunc_db[np.less] = {
'??->?': builtins.int_ult_impl,
'bb->?': builtins.int_slt_impl,
'BB->?': builtins.int_ult_impl,
'hh->?': builtins.int_slt_impl,
'HH->?': builtins.int_ult_impl,
'ii->?': builtins.int_slt_impl,
'II->?': builtins.int_ult_impl,
'll->?': builtins.int_slt_impl,
'LL->?': builtins.int_ult_impl,
'qq->?': builtins.int_slt_impl,
'QQ->?': builtins.int_ult_impl,
'ff->?': builtins.real_lt_impl,
'dd->?': builtins.real_lt_impl,
'FF->?': npyfuncs.np_complex_lt_impl,
'DD->?': npyfuncs.np_complex_lt_impl,
}
ufunc_db[np.less_equal] = {
'??->?': builtins.int_ule_impl,
'bb->?': builtins.int_sle_impl,
'BB->?': builtins.int_ule_impl,
'hh->?': builtins.int_sle_impl,
'HH->?': builtins.int_ule_impl,
'ii->?': builtins.int_sle_impl,
'II->?': builtins.int_ule_impl,
'll->?': builtins.int_sle_impl,
'LL->?': builtins.int_ule_impl,
'qq->?': builtins.int_sle_impl,
'QQ->?': builtins.int_ule_impl,
'ff->?': builtins.real_le_impl,
'dd->?': builtins.real_le_impl,
'FF->?': npyfuncs.np_complex_le_impl,
'DD->?': npyfuncs.np_complex_le_impl,
}
ufunc_db[np.not_equal] = {
'??->?': builtins.int_ne_impl,
'bb->?': builtins.int_ne_impl,
'BB->?': builtins.int_ne_impl,
'hh->?': builtins.int_ne_impl,
'HH->?': builtins.int_ne_impl,
'ii->?': builtins.int_ne_impl,
'II->?': builtins.int_ne_impl,
'll->?': builtins.int_ne_impl,
'LL->?': builtins.int_ne_impl,
'qq->?': builtins.int_ne_impl,
'QQ->?': builtins.int_ne_impl,
'ff->?': builtins.real_ne_impl,
'dd->?': builtins.real_ne_impl,
'FF->?': npyfuncs.np_complex_ne_impl,
'DD->?': npyfuncs.np_complex_ne_impl,
}
ufunc_db[np.equal] = {
'??->?': builtins.int_eq_impl,
'bb->?': builtins.int_eq_impl,
'BB->?': builtins.int_eq_impl,
'hh->?': builtins.int_eq_impl,
'HH->?': builtins.int_eq_impl,
'ii->?': builtins.int_eq_impl,
'II->?': builtins.int_eq_impl,
'll->?': builtins.int_eq_impl,
'LL->?': builtins.int_eq_impl,
'qq->?': builtins.int_eq_impl,
'QQ->?': builtins.int_eq_impl,
'ff->?': builtins.real_eq_impl,
'dd->?': builtins.real_eq_impl,
'FF->?': npyfuncs.np_complex_eq_impl,
'DD->?': npyfuncs.np_complex_eq_impl,
}
ufunc_db[np.logical_and] = {
'??->?': npyfuncs.np_logical_and_impl,
'bb->?': npyfuncs.np_logical_and_impl,
'BB->?': npyfuncs.np_logical_and_impl,
'hh->?': npyfuncs.np_logical_and_impl,
'HH->?': npyfuncs.np_logical_and_impl,
'ii->?': npyfuncs.np_logical_and_impl,
'II->?': npyfuncs.np_logical_and_impl,
'll->?': npyfuncs.np_logical_and_impl,
'LL->?': npyfuncs.np_logical_and_impl,
'qq->?': npyfuncs.np_logical_and_impl,
'QQ->?': npyfuncs.np_logical_and_impl,
'ff->?': npyfuncs.np_logical_and_impl,
'dd->?': npyfuncs.np_logical_and_impl,
'FF->?': npyfuncs.np_complex_logical_and_impl,
'DD->?': npyfuncs.np_complex_logical_and_impl,
}
ufunc_db[np.logical_or] = {
'??->?': npyfuncs.np_logical_or_impl,
'bb->?': npyfuncs.np_logical_or_impl,
'BB->?': npyfuncs.np_logical_or_impl,
'hh->?': npyfuncs.np_logical_or_impl,
'HH->?': npyfuncs.np_logical_or_impl,
'ii->?': npyfuncs.np_logical_or_impl,
'II->?': npyfuncs.np_logical_or_impl,
'll->?': npyfuncs.np_logical_or_impl,
'LL->?': npyfuncs.np_logical_or_impl,
'qq->?': npyfuncs.np_logical_or_impl,
'QQ->?': npyfuncs.np_logical_or_impl,
'ff->?': npyfuncs.np_logical_or_impl,
'dd->?': npyfuncs.np_logical_or_impl,
'FF->?': npyfuncs.np_complex_logical_or_impl,
'DD->?': npyfuncs.np_complex_logical_or_impl,
}
ufunc_db[np.logical_xor] = {
'??->?': npyfuncs.np_logical_xor_impl,
'bb->?': npyfuncs.np_logical_xor_impl,
'BB->?': npyfuncs.np_logical_xor_impl,
'hh->?': npyfuncs.np_logical_xor_impl,
'HH->?': npyfuncs.np_logical_xor_impl,
'ii->?': npyfuncs.np_logical_xor_impl,
'II->?': npyfuncs.np_logical_xor_impl,
'll->?': npyfuncs.np_logical_xor_impl,
'LL->?': npyfuncs.np_logical_xor_impl,
'qq->?': npyfuncs.np_logical_xor_impl,
'QQ->?': npyfuncs.np_logical_xor_impl,
'ff->?': npyfuncs.np_logical_xor_impl,
'dd->?': npyfuncs.np_logical_xor_impl,
'FF->?': npyfuncs.np_complex_logical_xor_impl,
'DD->?': npyfuncs.np_complex_logical_xor_impl,
}
ufunc_db[np.logical_not] = {
'?->?': npyfuncs.np_logical_not_impl,
'b->?': npyfuncs.np_logical_not_impl,
'B->?': npyfuncs.np_logical_not_impl,
'h->?': npyfuncs.np_logical_not_impl,
'H->?': npyfuncs.np_logical_not_impl,
'i->?': npyfuncs.np_logical_not_impl,
'I->?': npyfuncs.np_logical_not_impl,
'l->?': npyfuncs.np_logical_not_impl,
'L->?': npyfuncs.np_logical_not_impl,
'q->?': npyfuncs.np_logical_not_impl,
'Q->?': npyfuncs.np_logical_not_impl,
'f->?': npyfuncs.np_logical_not_impl,
'd->?': npyfuncs.np_logical_not_impl,
'F->?': npyfuncs.np_complex_logical_not_impl,
'D->?': npyfuncs.np_complex_logical_not_impl,
}
ufunc_db[np.maximum] = {
'??->?': npyfuncs.np_logical_or_impl,
'bb->b': npyfuncs.np_int_smax_impl,
'BB->B': npyfuncs.np_int_umax_impl,
'hh->h': npyfuncs.np_int_smax_impl,
'HH->H': npyfuncs.np_int_umax_impl,
'ii->i': npyfuncs.np_int_smax_impl,
'II->I': npyfuncs.np_int_umax_impl,
'll->l': npyfuncs.np_int_smax_impl,
'LL->L': npyfuncs.np_int_umax_impl,
'qq->q': npyfuncs.np_int_smax_impl,
'QQ->Q': npyfuncs.np_int_umax_impl,
'ff->f': npyfuncs.np_real_maximum_impl,
'dd->d': npyfuncs.np_real_maximum_impl,
'FF->F': npyfuncs.np_complex_maximum_impl,
'DD->D': npyfuncs.np_complex_maximum_impl,
}
ufunc_db[np.minimum] = {
'??->?': npyfuncs.np_logical_and_impl,
'bb->b': npyfuncs.np_int_smin_impl,
'BB->B': npyfuncs.np_int_umin_impl,
'hh->h': npyfuncs.np_int_smin_impl,
'HH->H': npyfuncs.np_int_umin_impl,
'ii->i': npyfuncs.np_int_smin_impl,
'II->I': npyfuncs.np_int_umin_impl,
'll->l': npyfuncs.np_int_smin_impl,
'LL->L': npyfuncs.np_int_umin_impl,
'qq->q': npyfuncs.np_int_smin_impl,
'QQ->Q': npyfuncs.np_int_umin_impl,
'ff->f': npyfuncs.np_real_minimum_impl,
'dd->d': npyfuncs.np_real_minimum_impl,
'FF->F': npyfuncs.np_complex_minimum_impl,
'DD->D': npyfuncs.np_complex_minimum_impl,
}
ufunc_db[np.fmax] = {
'??->?': npyfuncs.np_logical_or_impl,
'bb->b': npyfuncs.np_int_smax_impl,
'BB->B': npyfuncs.np_int_umax_impl,
'hh->h': npyfuncs.np_int_smax_impl,
'HH->H': npyfuncs.np_int_umax_impl,
'ii->i': npyfuncs.np_int_smax_impl,
'II->I': npyfuncs.np_int_umax_impl,
'll->l': npyfuncs.np_int_smax_impl,
'LL->L': npyfuncs.np_int_umax_impl,
'qq->q': npyfuncs.np_int_smax_impl,
'QQ->Q': npyfuncs.np_int_umax_impl,
'ff->f': npyfuncs.np_real_fmax_impl,
'dd->d': npyfuncs.np_real_fmax_impl,
'FF->F': npyfuncs.np_complex_fmax_impl,
'DD->D': npyfuncs.np_complex_fmax_impl,
}
ufunc_db[np.fmin] = {
'??->?': npyfuncs.np_logical_and_impl,
'bb->b': npyfuncs.np_int_smin_impl,
'BB->B': npyfuncs.np_int_umin_impl,
'hh->h': npyfuncs.np_int_smin_impl,
'HH->H': npyfuncs.np_int_umin_impl,
'ii->i': npyfuncs.np_int_smin_impl,
'II->I': npyfuncs.np_int_umin_impl,
'll->l': npyfuncs.np_int_smin_impl,
'LL->L': npyfuncs.np_int_umin_impl,
'qq->q': npyfuncs.np_int_smin_impl,
'QQ->Q': npyfuncs.np_int_umin_impl,
'ff->f': npyfuncs.np_real_fmin_impl,
'dd->d': npyfuncs.np_real_fmin_impl,
'FF->F': npyfuncs.np_complex_fmin_impl,
'DD->D': npyfuncs.np_complex_fmin_impl,
}
# misc floating functions
ufunc_db[np.isnan] = {
'f->?': npyfuncs.np_real_isnan_impl,
'd->?': npyfuncs.np_real_isnan_impl,
'F->?': npyfuncs.np_complex_isnan_impl,
'D->?': npyfuncs.np_complex_isnan_impl,
}
ufunc_db[np.isinf] = {
'f->?': npyfuncs.np_real_isinf_impl,
'd->?': npyfuncs.np_real_isinf_impl,
'F->?': npyfuncs.np_complex_isinf_impl,
'D->?': npyfuncs.np_complex_isinf_impl,
}
ufunc_db[np.isfinite] = {
'f->?': npyfuncs.np_real_isfinite_impl,
'd->?': npyfuncs.np_real_isfinite_impl,
'F->?': npyfuncs.np_complex_isfinite_impl,
'D->?': npyfuncs.np_complex_isfinite_impl,
}
ufunc_db[np.signbit] = {
'f->?': npyfuncs.np_real_signbit_impl,
'd->?': npyfuncs.np_real_signbit_impl,
}
ufunc_db[np.copysign] = {
'ff->f': npyfuncs.np_real_copysign_impl,
'dd->d': npyfuncs.np_real_copysign_impl,
}
ufunc_db[np.nextafter] = {
'ff->f': npyfuncs.np_real_nextafter_impl,
'dd->d': npyfuncs.np_real_nextafter_impl,
}
ufunc_db[np.spacing] = {
'f->f': npyfuncs.np_real_spacing_impl,
'd->d': npyfuncs.np_real_spacing_impl,
}
ufunc_db[np.ldexp] = {
'fi->f': npyfuncs.np_real_ldexp_impl,
'fl->f': npyfuncs.np_real_ldexp_impl,
'di->d': npyfuncs.np_real_ldexp_impl,
'dl->d': npyfuncs.np_real_ldexp_impl,
}
# bit twiddling functions
ufunc_db[np.bitwise_and] = {
'??->?': builtins.int_and_impl,
'bb->b': builtins.int_and_impl,
'BB->B': builtins.int_and_impl,
'hh->h': builtins.int_and_impl,
'HH->H': builtins.int_and_impl,
'ii->i': builtins.int_and_impl,
'II->I': builtins.int_and_impl,
'll->l': builtins.int_and_impl,
'LL->L': builtins.int_and_impl,
'qq->q': builtins.int_and_impl,
'QQ->Q': builtins.int_and_impl,
}
ufunc_db[np.bitwise_or] = {
'??->?': builtins.int_or_impl,
'bb->b': builtins.int_or_impl,
'BB->B': builtins.int_or_impl,
'hh->h': builtins.int_or_impl,
'HH->H': builtins.int_or_impl,
'ii->i': builtins.int_or_impl,
'II->I': builtins.int_or_impl,
'll->l': builtins.int_or_impl,
'LL->L': builtins.int_or_impl,
'qq->q': builtins.int_or_impl,
'QQ->Q': builtins.int_or_impl,
}
ufunc_db[np.bitwise_xor] = {
'??->?': builtins.int_xor_impl,
'bb->b': builtins.int_xor_impl,
'BB->B': builtins.int_xor_impl,
'hh->h': builtins.int_xor_impl,
'HH->H': builtins.int_xor_impl,
'ii->i': builtins.int_xor_impl,
'II->I': builtins.int_xor_impl,
'll->l': builtins.int_xor_impl,
'LL->L': builtins.int_xor_impl,
'qq->q': builtins.int_xor_impl,
'QQ->Q': builtins.int_xor_impl,
}
ufunc_db[np.invert] = { # aka np.bitwise_not
'?->?': builtins.bool_invert_impl,
'b->b': builtins.int_invert_impl,
'B->B': builtins.int_invert_impl,
'h->h': builtins.int_invert_impl,
'H->H': builtins.int_invert_impl,
'i->i': builtins.int_invert_impl,
'I->I': builtins.int_invert_impl,
'l->l': builtins.int_invert_impl,
'L->L': builtins.int_invert_impl,
'q->q': builtins.int_invert_impl,
'Q->Q': builtins.int_invert_impl,
}
ufunc_db[np.left_shift] = {
'bb->b': builtins.int_shl_impl,
'BB->B': builtins.int_shl_impl,
'hh->h': builtins.int_shl_impl,
'HH->H': builtins.int_shl_impl,
'ii->i': builtins.int_shl_impl,
'II->I': builtins.int_shl_impl,
'll->l': builtins.int_shl_impl,
'LL->L': builtins.int_shl_impl,
'qq->q': builtins.int_shl_impl,
'QQ->Q': builtins.int_shl_impl,
}
ufunc_db[np.right_shift] = {
'bb->b': builtins.int_shr_impl,
'BB->B': builtins.int_shr_impl,
'hh->h': builtins.int_shr_impl,
'HH->H': builtins.int_shr_impl,
'ii->i': builtins.int_shr_impl,
'II->I': builtins.int_shr_impl,
'll->l': builtins.int_shr_impl,
'LL->L': builtins.int_shr_impl,
'qq->q': builtins.int_shr_impl,
'QQ->Q': builtins.int_shr_impl,
}
# Inject datetime64 support
try:
from . import npdatetime
except NotImplementedError:
# Numpy 1.6
pass
else:
ufunc_db[np.negative].update({
'm->m': npdatetime.timedelta_neg_impl,
})
ufunc_db[np.absolute].update({
'm->m': npdatetime.timedelta_abs_impl,
})
ufunc_db[np.sign].update({
'm->m': npdatetime.timedelta_sign_impl,
})
ufunc_db[np.add].update({
'mm->m': npdatetime.timedelta_add_impl,
'Mm->M': npdatetime.datetime_plus_timedelta,
'mM->M': npdatetime.timedelta_plus_datetime,
})
ufunc_db[np.subtract].update({
'mm->m': npdatetime.timedelta_sub_impl,
'Mm->M': npdatetime.datetime_minus_timedelta,
'MM->m': npdatetime.datetime_minus_datetime,
})
ufunc_db[np.multiply].update({
'mq->m': npdatetime.timedelta_times_number,
'md->m': npdatetime.timedelta_times_number,
'qm->m': npdatetime.number_times_timedelta,
'dm->m': npdatetime.number_times_timedelta,
})
if np.divide != np.true_divide:
ufunc_db[np.divide].update({
'mq->m': npdatetime.timedelta_over_number,
'md->m': npdatetime.timedelta_over_number,
'mm->d': npdatetime.timedelta_over_timedelta,
})
ufunc_db[np.true_divide].update({
'mq->m': npdatetime.timedelta_over_number,
'md->m': npdatetime.timedelta_over_number,
'mm->d': npdatetime.timedelta_over_timedelta,
})
ufunc_db[np.floor_divide].update({
'mq->m': npdatetime.timedelta_over_number,
'md->m': npdatetime.timedelta_over_number,
})
ufunc_db[np.equal].update({
'MM->?': npdatetime.datetime_eq_datetime_impl,
'mm->?': npdatetime.timedelta_eq_timedelta_impl,
})
ufunc_db[np.not_equal].update({
'MM->?': npdatetime.datetime_ne_datetime_impl,
'mm->?': npdatetime.timedelta_ne_timedelta_impl,
})
ufunc_db[np.less].update({
'MM->?': npdatetime.datetime_lt_datetime_impl,
'mm->?': npdatetime.timedelta_lt_timedelta_impl,
})
ufunc_db[np.less_equal].update({
'MM->?': npdatetime.datetime_le_datetime_impl,
'mm->?': npdatetime.timedelta_le_timedelta_impl,
})
ufunc_db[np.greater].update({
'MM->?': npdatetime.datetime_gt_datetime_impl,
'mm->?': npdatetime.timedelta_gt_timedelta_impl,
})
ufunc_db[np.greater_equal].update({
'MM->?': npdatetime.datetime_ge_datetime_impl,
'mm->?': npdatetime.timedelta_ge_timedelta_impl,
})
ufunc_db[np.maximum].update({
'MM->M': npdatetime.datetime_max_impl,
'mm->m': npdatetime.timedelta_max_impl,
})
ufunc_db[np.minimum].update({
'MM->M': npdatetime.datetime_min_impl,
'mm->m': npdatetime.timedelta_min_impl,
})
# there is no difference for datetime/timedelta in maximum/fmax
# and minimum/fmin
ufunc_db[np.fmax].update({
'MM->M': npdatetime.datetime_max_impl,
'mm->m': npdatetime.timedelta_max_impl,
})
ufunc_db[np.fmin].update({
'MM->M': npdatetime.datetime_min_impl,
'mm->m': npdatetime.timedelta_min_impl,
})<|fim▁end|> |
ufunc_db[np.arccosh] = {
'f->f': npyfuncs.np_real_acosh_impl, |
<|file_name|>index.js<|end_file_name|><|fim▁begin|>"use strict"
var express = require('express');
var app = express();
var elasticsearch = require('elasticsearch');
var client = new elasticsearch.Client({
host: 'localhost:9200',
log: 'trace'
});
var router = express.Router();
router.get('/accidents', function(req, res) {
var query = {
index: 'wildmap',
type: 'accidents',
size: 10000,
body: {
query: {
bool: {
must: [
{
match_all: {}
}
]
}
}
}
}
var animal_type = req.query.animal_type;
var day_type = req.query.day_type;
var season = req.query.season;
if(animal_type && animal_type!="all"){
query.body.query.bool.must.push({
term: {
"accidents.pin.animal_type": animal_type
}
});
}
if(day_type && day_type!="all"){
query.body.query.bool.must.push({
term: {
"accidents.pin.day_type": day_type
}
});
}
if(season && season!="all"){
query.body.query.bool.must.push({
term: {
"accidents.pin.season": season
}
});
}
console.log(query);
client.search(query).then(function (resp) {
console.log(resp.hits.hits);<|fim▁hole|> return e._source.pin;
})
res.send(response);
}, function (err) {
console.log(err.message);
res.status(500).end();
});
});
app.use('/api', router);
var port = process.env.PORT || 8080;
app.listen(port);
console.log("Backend is running on port " + port);<|fim▁end|> | var response = resp.hits.hits.map(function(e){ |
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|><|fim▁hole|><|fim▁end|> | from .wiki import * |
<|file_name|>debuginfo.rs<|end_file_name|><|fim▁begin|>// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! # Debug Info Module
//!
//! This module serves the purpose of generating debug symbols. We use LLVM's
//! [source level debugging](http://llvm.org/docs/SourceLevelDebugging.html)
//! features for generating the debug information. The general principle is this:
//!
//! Given the right metadata in the LLVM IR, the LLVM code generator is able to
//! create DWARF debug symbols for the given code. The
//! [metadata](http://llvm.org/docs/LangRef.html#metadata-type) is structured much
//! like DWARF *debugging information entries* (DIE), representing type information
//! such as datatype layout, function signatures, block layout, variable location
//! and scope information, etc. It is the purpose of this module to generate correct
//! metadata and insert it into the LLVM IR.
//!
//! As the exact format of metadata trees may change between different LLVM
//! versions, we now use LLVM
//! [DIBuilder](http://llvm.org/docs/doxygen/html/classllvm_1_1DIBuilder.html) to
//! create metadata where possible. This will hopefully ease the adaption of this
//! module to future LLVM versions.
//!
//! The public API of the module is a set of functions that will insert the correct
//! metadata into the LLVM IR when called with the right parameters. The module is
//! thus driven from an outside client with functions like
//! `debuginfo::create_local_var_metadata(bcx: block, local: &ast::local)`.
//!
//! Internally the module will try to reuse already created metadata by utilizing a
//! cache. The way to get a shared metadata node when needed is thus to just call
//! the corresponding function in this module:
//!
//! let file_metadata = file_metadata(crate_context, path);
//!
//! The function will take care of probing the cache for an existing node for that
//! exact file path.
//!
//! All private state used by the module is stored within either the
//! CrateDebugContext struct (owned by the CrateContext) or the FunctionDebugContext
//! (owned by the FunctionContext).
//!
//! This file consists of three conceptual sections:
//! 1. The public interface of the module
//! 2. Module-internal metadata creation functions
//! 3. Minor utility functions
//!
//!
//! ## Recursive Types
//!
//! Some kinds of types, such as structs and enums can be recursive. That means that
//! the type definition of some type X refers to some other type which in turn
//! (transitively) refers to X. This introduces cycles into the type referral graph.
//! A naive algorithm doing an on-demand, depth-first traversal of this graph when
//! describing types, can get trapped in an endless loop when it reaches such a
//! cycle.
//!
//! For example, the following simple type for a singly-linked list...
//!
//! ```
//! struct List {
//! value: int,
//! tail: Option<Box<List>>,
//! }
//! ```
//!
//! will generate the following callstack with a naive DFS algorithm:
//!
//! ```
//! describe(t = List)
//! describe(t = int)
//! describe(t = Option<Box<List>>)
//! describe(t = Box<List>)
//! describe(t = List) // at the beginning again...
//! ...
//! ```
//!
//! To break cycles like these, we use "forward declarations". That is, when the
//! algorithm encounters a possibly recursive type (any struct or enum), it
//! immediately creates a type description node and inserts it into the cache
//! *before* describing the members of the type. This type description is just a
//! stub (as type members are not described and added to it yet) but it allows the
//! algorithm to already refer to the type. After the stub is inserted into the
//! cache, the algorithm continues as before. If it now encounters a recursive
//! reference, it will hit the cache and does not try to describe the type anew.
//!
//! This behaviour is encapsulated in the 'RecursiveTypeDescription' enum, which
//! represents a kind of continuation, storing all state needed to continue
//! traversal at the type members after the type has been registered with the cache.
//! (This implementation approach might be a tad over-engineered and may change in
//! the future)
//!
//!
//! ## Source Locations and Line Information
//!
//! In addition to data type descriptions the debugging information must also allow
//! to map machine code locations back to source code locations in order to be useful.
//! This functionality is also handled in this module. The following functions allow
//! to control source mappings:
//!
//! + set_source_location()
//! + clear_source_location()
//! + start_emitting_source_locations()
//!
//! `set_source_location()` allows to set the current source location. All IR
//! instructions created after a call to this function will be linked to the given
//! source location, until another location is specified with
//! `set_source_location()` or the source location is cleared with
//! `clear_source_location()`. In the later case, subsequent IR instruction will not
//! be linked to any source location. As you can see, this is a stateful API
//! (mimicking the one in LLVM), so be careful with source locations set by previous
//! calls. It's probably best to not rely on any specific state being present at a
//! given point in code.
//!
//! One topic that deserves some extra attention is *function prologues*. At the
//! beginning of a function's machine code there are typically a few instructions
//! for loading argument values into allocas and checking if there's enough stack
//! space for the function to execute. This *prologue* is not visible in the source
//! code and LLVM puts a special PROLOGUE END marker into the line table at the
//! first non-prologue instruction of the function. In order to find out where the
//! prologue ends, LLVM looks for the first instruction in the function body that is
//! linked to a source location. So, when generating prologue instructions we have
//! to make sure that we don't emit source location information until the 'real'
//! function body begins. For this reason, source location emission is disabled by
//! default for any new function being translated and is only activated after a call
//! to the third function from the list above, `start_emitting_source_locations()`.
//! This function should be called right before regularly starting to translate the
//! top-level block of the given function.
//!
//! There is one exception to the above rule: `llvm.dbg.declare` instruction must be
//! linked to the source location of the variable being declared. For function
//! parameters these `llvm.dbg.declare` instructions typically occur in the middle
//! of the prologue, however, they are ignored by LLVM's prologue detection. The
//! `create_argument_metadata()` and related functions take care of linking the
//! `llvm.dbg.declare` instructions to the correct source locations even while
//! source location emission is still disabled, so there is no need to do anything
//! special with source location handling here.
//!
//! ## Unique Type Identification
//!
//! In order for link-time optimization to work properly, LLVM needs a unique type
//! identifier that tells it across compilation units which types are the same as
//! others. This type identifier is created by TypeMap::get_unique_type_id_of_type()
//! using the following algorithm:
//!
//! (1) Primitive types have their name as ID
//! (2) Structs, enums and traits have a multipart identifier
//!
//! (1) The first part is the SVH (strict version hash) of the crate they were
//! originally defined in
//!
//! (2) The second part is the ast::NodeId of the definition in their original
//! crate
//!
//! (3) The final part is a concatenation of the type IDs of their concrete type
//! arguments if they are generic types.
//!
//! (3) Tuple-, pointer and function types are structurally identified, which means
//! that they are equivalent if their component types are equivalent (i.e. (int,
//! int) is the same regardless in which crate it is used).
//!
//! This algorithm also provides a stable ID for types that are defined in one crate
//! but instantiated from metadata within another crate. We just have to take care
//! to always map crate and node IDs back to the original crate context.
//!
//! As a side-effect these unique type IDs also help to solve a problem arising from
//! lifetime parameters. Since lifetime parameters are completely omitted in
//! debuginfo, more than one `Ty` instance may map to the same debuginfo type
//! metadata, that is, some struct `Struct<'a>` may have N instantiations with
//! different concrete substitutions for `'a`, and thus there will be N `Ty`
//! instances for the type `Struct<'a>` even though it is not generic otherwise.
//! Unfortunately this means that we cannot use `ty::type_id()` as cheap identifier
//! for type metadata---we have done this in the past, but it led to unnecessary
//! metadata duplication in the best case and LLVM assertions in the worst. However,
//! the unique type ID as described above *can* be used as identifier. Since it is
//! comparatively expensive to construct, though, `ty::type_id()` is still used
//! additionally as an optimization for cases where the exact same type has been
//! seen before (which is most of the time).
use self::VariableAccess::*;
use self::VariableKind::*;
use self::MemberOffset::*;
use self::MemberDescriptionFactory::*;
use self::RecursiveTypeDescription::*;
use self::EnumDiscriminantInfo::*;
use self::InternalDebugLocation::*;
use llvm;
use llvm::{ModuleRef, ContextRef, ValueRef};
use llvm::debuginfo::*;
use metadata::csearch;
use middle::subst::{self, Substs};
use trans::{self, adt, machine, type_of};
use trans::common::{self, NodeIdAndSpan, CrateContext, FunctionContext, Block,
C_bytes, NormalizingClosureTyper};
use trans::_match::{BindingInfo, TrByCopy, TrByMove, TrByRef};
use trans::monomorphize;
use trans::type_::Type;
use middle::ty::{self, Ty, ClosureTyper};
use middle::pat_util;
use session::config::{self, FullDebugInfo, LimitedDebugInfo, NoDebugInfo};
use util::nodemap::{DefIdMap, NodeMap, FnvHashMap, FnvHashSet};
use util::ppaux;
use libc::{c_uint, c_longlong};
use std::ffi::CString;
use std::cell::{Cell, RefCell};
use std::ptr;
use std::rc::{Rc, Weak};
use syntax::util::interner::Interner;
use syntax::codemap::{Span, Pos};
use syntax::{ast, codemap, ast_util, ast_map, attr};
use syntax::ast_util::PostExpansionMethod;
use syntax::parse::token::{self, special_idents};
const DW_LANG_RUST: c_uint = 0x9000;
#[allow(non_upper_case_globals)]
const DW_TAG_auto_variable: c_uint = 0x100;
#[allow(non_upper_case_globals)]
const DW_TAG_arg_variable: c_uint = 0x101;
#[allow(non_upper_case_globals)]
const DW_ATE_boolean: c_uint = 0x02;
#[allow(non_upper_case_globals)]
const DW_ATE_float: c_uint = 0x04;
#[allow(non_upper_case_globals)]
const DW_ATE_signed: c_uint = 0x05;
#[allow(non_upper_case_globals)]
const DW_ATE_unsigned: c_uint = 0x07;
#[allow(non_upper_case_globals)]
const DW_ATE_unsigned_char: c_uint = 0x08;
const UNKNOWN_LINE_NUMBER: c_uint = 0;
const UNKNOWN_COLUMN_NUMBER: c_uint = 0;
// ptr::null() doesn't work :(
const UNKNOWN_FILE_METADATA: DIFile = (0 as DIFile);
const UNKNOWN_SCOPE_METADATA: DIScope = (0 as DIScope);
const FLAGS_NONE: c_uint = 0;
//=-----------------------------------------------------------------------------
// Public Interface of debuginfo module
//=-----------------------------------------------------------------------------
#[derive(Copy, Debug, Hash, Eq, PartialEq, Clone)]
struct UniqueTypeId(ast::Name);
// The TypeMap is where the CrateDebugContext holds the type metadata nodes
// created so far. The metadata nodes are indexed by UniqueTypeId, and, for
// faster lookup, also by Ty. The TypeMap is responsible for creating
// UniqueTypeIds.
struct TypeMap<'tcx> {
// The UniqueTypeIds created so far
unique_id_interner: Interner<Rc<String>>,
// A map from UniqueTypeId to debuginfo metadata for that type. This is a 1:1 mapping.
unique_id_to_metadata: FnvHashMap<UniqueTypeId, DIType>,
// A map from types to debuginfo metadata. This is a N:1 mapping.
type_to_metadata: FnvHashMap<Ty<'tcx>, DIType>,
// A map from types to UniqueTypeId. This is a N:1 mapping.
type_to_unique_id: FnvHashMap<Ty<'tcx>, UniqueTypeId>
}
impl<'tcx> TypeMap<'tcx> {
fn new() -> TypeMap<'tcx> {
TypeMap {
unique_id_interner: Interner::new(),
type_to_metadata: FnvHashMap(),
unique_id_to_metadata: FnvHashMap(),
type_to_unique_id: FnvHashMap(),
}
}
// Adds a Ty to metadata mapping to the TypeMap. The method will fail if
// the mapping already exists.
fn register_type_with_metadata<'a>(&mut self,
cx: &CrateContext<'a, 'tcx>,
type_: Ty<'tcx>,
metadata: DIType) {
if self.type_to_metadata.insert(type_, metadata).is_some() {
cx.sess().bug(&format!("Type metadata for Ty '{}' is already in the TypeMap!",
ppaux::ty_to_string(cx.tcx(), type_))[]);
}
}
// Adds a UniqueTypeId to metadata mapping to the TypeMap. The method will
// fail if the mapping already exists.
fn register_unique_id_with_metadata(&mut self,
cx: &CrateContext,
unique_type_id: UniqueTypeId,
metadata: DIType) {
if self.unique_id_to_metadata.insert(unique_type_id, metadata).is_some() {
let unique_type_id_str = self.get_unique_type_id_as_string(unique_type_id);
cx.sess().bug(&format!("Type metadata for unique id '{}' is already in the TypeMap!",
&unique_type_id_str[])[]);
}
}
fn find_metadata_for_type(&self, type_: Ty<'tcx>) -> Option<DIType> {
self.type_to_metadata.get(&type_).cloned()
}
fn find_metadata_for_unique_id(&self, unique_type_id: UniqueTypeId) -> Option<DIType> {
self.unique_id_to_metadata.get(&unique_type_id).cloned()
}
// Get the string representation of a UniqueTypeId. This method will fail if
// the id is unknown.
fn get_unique_type_id_as_string(&self, unique_type_id: UniqueTypeId) -> Rc<String> {
let UniqueTypeId(interner_key) = unique_type_id;
self.unique_id_interner.get(interner_key)
}
// Get the UniqueTypeId for the given type. If the UniqueTypeId for the given
// type has been requested before, this is just a table lookup. Otherwise an
// ID will be generated and stored for later lookup.
fn get_unique_type_id_of_type<'a>(&mut self, cx: &CrateContext<'a, 'tcx>,
type_: Ty<'tcx>) -> UniqueTypeId {
// basic type -> {:name of the type:}
// tuple -> {tuple_(:param-uid:)*}
// struct -> {struct_:svh: / :node-id:_<(:param-uid:),*> }
// enum -> {enum_:svh: / :node-id:_<(:param-uid:),*> }
// enum variant -> {variant_:variant-name:_:enum-uid:}
// reference (&) -> {& :pointee-uid:}
// mut reference (&mut) -> {&mut :pointee-uid:}
// ptr (*) -> {* :pointee-uid:}
// mut ptr (*mut) -> {*mut :pointee-uid:}
// unique ptr (~) -> {~ :pointee-uid:}
// @-ptr (@) -> {@ :pointee-uid:}
// sized vec ([T; x]) -> {[:size:] :element-uid:}
// unsized vec ([T]) -> {[] :element-uid:}
// trait (T) -> {trait_:svh: / :node-id:_<(:param-uid:),*> }
// closure -> {<unsafe_> <once_> :store-sigil: |(:param-uid:),* <,_...>| -> \
// :return-type-uid: : (:bounds:)*}
// function -> {<unsafe_> <abi_> fn( (:param-uid:)* <,_...> ) -> \
// :return-type-uid:}
// unique vec box (~[]) -> {HEAP_VEC_BOX<:pointee-uid:>}
// gc box -> {GC_BOX<:pointee-uid:>}
match self.type_to_unique_id.get(&type_).cloned() {
Some(unique_type_id) => return unique_type_id,
None => { /* generate one */}
};
let mut unique_type_id = String::with_capacity(256);
unique_type_id.push('{');
match type_.sty {
ty::ty_bool |
ty::ty_char |
ty::ty_str |
ty::ty_int(_) |
ty::ty_uint(_) |
ty::ty_float(_) => {
push_debuginfo_type_name(cx, type_, false, &mut unique_type_id);
},
ty::ty_enum(def_id, substs) => {
unique_type_id.push_str("enum ");
from_def_id_and_substs(self, cx, def_id, substs, &mut unique_type_id);
},
ty::ty_struct(def_id, substs) => {
unique_type_id.push_str("struct ");
from_def_id_and_substs(self, cx, def_id, substs, &mut unique_type_id);
},
ty::ty_tup(ref component_types) if component_types.is_empty() => {
push_debuginfo_type_name(cx, type_, false, &mut unique_type_id);
},
ty::ty_tup(ref component_types) => {
unique_type_id.push_str("tuple ");
for &component_type in component_types {
let component_type_id =
self.get_unique_type_id_of_type(cx, component_type);
let component_type_id =
self.get_unique_type_id_as_string(component_type_id);
unique_type_id.push_str(&component_type_id[]);
}
},
ty::ty_uniq(inner_type) => {
unique_type_id.push('~');
let inner_type_id = self.get_unique_type_id_of_type(cx, inner_type);
let inner_type_id = self.get_unique_type_id_as_string(inner_type_id);
unique_type_id.push_str(&inner_type_id[]);
},
ty::ty_ptr(ty::mt { ty: inner_type, mutbl } ) => {
unique_type_id.push('*');
if mutbl == ast::MutMutable {
unique_type_id.push_str("mut");
}
let inner_type_id = self.get_unique_type_id_of_type(cx, inner_type);
let inner_type_id = self.get_unique_type_id_as_string(inner_type_id);
unique_type_id.push_str(&inner_type_id[]);
},
ty::ty_rptr(_, ty::mt { ty: inner_type, mutbl }) => {
unique_type_id.push('&');
if mutbl == ast::MutMutable {
unique_type_id.push_str("mut");
}
let inner_type_id = self.get_unique_type_id_of_type(cx, inner_type);
let inner_type_id = self.get_unique_type_id_as_string(inner_type_id);
unique_type_id.push_str(&inner_type_id[]);
},
ty::ty_vec(inner_type, optional_length) => {
match optional_length {
Some(len) => {
unique_type_id.push_str(&format!("[{}]", len)[]);
}
None => {
unique_type_id.push_str("[]");
}
};
let inner_type_id = self.get_unique_type_id_of_type(cx, inner_type);
let inner_type_id = self.get_unique_type_id_as_string(inner_type_id);
unique_type_id.push_str(&inner_type_id[]);
},
ty::ty_trait(ref trait_data) => {
unique_type_id.push_str("trait ");
let principal =
ty::erase_late_bound_regions(cx.tcx(),
&trait_data.principal);
from_def_id_and_substs(self,
cx,
principal.def_id,
principal.substs,
&mut unique_type_id);
},
ty::ty_bare_fn(_, &ty::BareFnTy{ unsafety, abi, ref sig } ) => {
if unsafety == ast::Unsafety::Unsafe {
unique_type_id.push_str("unsafe ");
}
unique_type_id.push_str(abi.name());
unique_type_id.push_str(" fn(");
let sig = ty::erase_late_bound_regions(cx.tcx(), sig);
for ¶meter_type in &sig.inputs {
let parameter_type_id =
self.get_unique_type_id_of_type(cx, parameter_type);
let parameter_type_id =
self.get_unique_type_id_as_string(parameter_type_id);
unique_type_id.push_str(¶meter_type_id[]);
unique_type_id.push(',');
}
if sig.variadic {
unique_type_id.push_str("...");
}
unique_type_id.push_str(")->");
match sig.output {
ty::FnConverging(ret_ty) => {
let return_type_id = self.get_unique_type_id_of_type(cx, ret_ty);
let return_type_id = self.get_unique_type_id_as_string(return_type_id);
unique_type_id.push_str(&return_type_id[]);
}
ty::FnDiverging => {
unique_type_id.push_str("!");
}
}
},
ty::ty_closure(def_id, _, substs) => {
let typer = NormalizingClosureTyper::new(cx.tcx());
let closure_ty = typer.closure_type(def_id, substs);
self.get_unique_type_id_of_closure_type(cx,
closure_ty,
&mut unique_type_id);
},
_ => {
cx.sess().bug(&format!("get_unique_type_id_of_type() - unexpected type: {}, {:?}",
&ppaux::ty_to_string(cx.tcx(), type_)[],
type_.sty)[])
}
};
unique_type_id.push('}');
// Trim to size before storing permanently
unique_type_id.shrink_to_fit();
let key = self.unique_id_interner.intern(Rc::new(unique_type_id));
self.type_to_unique_id.insert(type_, UniqueTypeId(key));
return UniqueTypeId(key);
fn from_def_id_and_substs<'a, 'tcx>(type_map: &mut TypeMap<'tcx>,
cx: &CrateContext<'a, 'tcx>,
def_id: ast::DefId,
substs: &subst::Substs<'tcx>,
output: &mut String) {
// First, find out the 'real' def_id of the type. Items inlined from
// other crates have to be mapped back to their source.
let source_def_id = if def_id.krate == ast::LOCAL_CRATE {
match cx.external_srcs().borrow().get(&def_id.node).cloned() {
Some(source_def_id) => {
// The given def_id identifies the inlined copy of a
// type definition, let's take the source of the copy.
source_def_id
}
None => def_id
}
} else {
def_id
};
// Get the crate hash as first part of the identifier.
let crate_hash = if source_def_id.krate == ast::LOCAL_CRATE {
cx.link_meta().crate_hash.clone()
} else {
cx.sess().cstore.get_crate_hash(source_def_id.krate)
};
output.push_str(crate_hash.as_str());
output.push_str("/");
output.push_str(&format!("{:x}", def_id.node)[]);
// Maybe check that there is no self type here.
let tps = substs.types.get_slice(subst::TypeSpace);
if tps.len() > 0 {
output.push('<');
for &type_parameter in tps {
let param_type_id =
type_map.get_unique_type_id_of_type(cx, type_parameter);
let param_type_id =
type_map.get_unique_type_id_as_string(param_type_id);
output.push_str(¶m_type_id[]);
output.push(',');
}
output.push('>');
}
}
}
fn get_unique_type_id_of_closure_type<'a>(&mut self,
cx: &CrateContext<'a, 'tcx>,
closure_ty: ty::ClosureTy<'tcx>,
unique_type_id: &mut String) {
let ty::ClosureTy { unsafety,
ref sig,
abi: _ } = closure_ty;
if unsafety == ast::Unsafety::Unsafe {
unique_type_id.push_str("unsafe ");
}
unique_type_id.push_str("|");
let sig = ty::erase_late_bound_regions(cx.tcx(), sig);
for ¶meter_type in &sig.inputs {
let parameter_type_id =
self.get_unique_type_id_of_type(cx, parameter_type);
let parameter_type_id =
self.get_unique_type_id_as_string(parameter_type_id);
unique_type_id.push_str(¶meter_type_id[]);
unique_type_id.push(',');
}
if sig.variadic {
unique_type_id.push_str("...");
}
unique_type_id.push_str("|->");
match sig.output {
ty::FnConverging(ret_ty) => {
let return_type_id = self.get_unique_type_id_of_type(cx, ret_ty);
let return_type_id = self.get_unique_type_id_as_string(return_type_id);
unique_type_id.push_str(&return_type_id[]);
}
ty::FnDiverging => {
unique_type_id.push_str("!");
}
}
}
// Get the UniqueTypeId for an enum variant. Enum variants are not really
// types of their own, so they need special handling. We still need a
// UniqueTypeId for them, since to debuginfo they *are* real types.
fn get_unique_type_id_of_enum_variant<'a>(&mut self,
cx: &CrateContext<'a, 'tcx>,
enum_type: Ty<'tcx>,
variant_name: &str)
-> UniqueTypeId {
let enum_type_id = self.get_unique_type_id_of_type(cx, enum_type);
let enum_variant_type_id = format!("{}::{}",
&self.get_unique_type_id_as_string(enum_type_id)[],
variant_name);
let interner_key = self.unique_id_interner.intern(Rc::new(enum_variant_type_id));
UniqueTypeId(interner_key)
}
}
// Returns from the enclosing function if the type metadata with the given
// unique id can be found in the type map
macro_rules! return_if_metadata_created_in_meantime {
($cx: expr, $unique_type_id: expr) => (
match debug_context($cx).type_map
.borrow()
.find_metadata_for_unique_id($unique_type_id) {
Some(metadata) => return MetadataCreationResult::new(metadata, true),
None => { /* proceed normally */ }
};
)
}
/// A context object for maintaining all state needed by the debuginfo module.
pub struct CrateDebugContext<'tcx> {
llcontext: ContextRef,
builder: DIBuilderRef,
current_debug_location: Cell<InternalDebugLocation>,
created_files: RefCell<FnvHashMap<String, DIFile>>,
created_enum_disr_types: RefCell<DefIdMap<DIType>>,
type_map: RefCell<TypeMap<'tcx>>,
namespace_map: RefCell<FnvHashMap<Vec<ast::Name>, Rc<NamespaceTreeNode>>>,
// This collection is used to assert that composite types (structs, enums,
// ...) have their members only set once:
composite_types_completed: RefCell<FnvHashSet<DIType>>,
}
impl<'tcx> CrateDebugContext<'tcx> {
pub fn new(llmod: ModuleRef) -> CrateDebugContext<'tcx> {
debug!("CrateDebugContext::new");
let builder = unsafe { llvm::LLVMDIBuilderCreate(llmod) };
// DIBuilder inherits context from the module, so we'd better use the same one
let llcontext = unsafe { llvm::LLVMGetModuleContext(llmod) };
return CrateDebugContext {
llcontext: llcontext,
builder: builder,
current_debug_location: Cell::new(UnknownLocation),
created_files: RefCell::new(FnvHashMap()),
created_enum_disr_types: RefCell::new(DefIdMap()),
type_map: RefCell::new(TypeMap::new()),
namespace_map: RefCell::new(FnvHashMap()),
composite_types_completed: RefCell::new(FnvHashSet()),
};
}
}
pub enum FunctionDebugContext {
RegularContext(Box<FunctionDebugContextData>),
DebugInfoDisabled,
FunctionWithoutDebugInfo,
}
impl FunctionDebugContext {
fn get_ref<'a>(&'a self,
cx: &CrateContext,
span: Span)
-> &'a FunctionDebugContextData {
match *self {
FunctionDebugContext::RegularContext(box ref data) => data,
FunctionDebugContext::DebugInfoDisabled => {
cx.sess().span_bug(span,
FunctionDebugContext::debuginfo_disabled_message());
}
FunctionDebugContext::FunctionWithoutDebugInfo => {
cx.sess().span_bug(span,
FunctionDebugContext::should_be_ignored_message());
}
}
}
fn debuginfo_disabled_message() -> &'static str {
"debuginfo: Error trying to access FunctionDebugContext although debug info is disabled!"
}
fn should_be_ignored_message() -> &'static str {
"debuginfo: Error trying to access FunctionDebugContext for function that should be \
ignored by debug info!"
}
}
struct FunctionDebugContextData {
scope_map: RefCell<NodeMap<DIScope>>,
fn_metadata: DISubprogram,
argument_counter: Cell<uint>,
source_locations_enabled: Cell<bool>,
}
enum VariableAccess<'a> {
// The llptr given is an alloca containing the variable's value
DirectVariable { alloca: ValueRef },
// The llptr given is an alloca containing the start of some pointer chain
// leading to the variable's content.
IndirectVariable { alloca: ValueRef, address_operations: &'a [i64] }
}
enum VariableKind {
ArgumentVariable(uint /*index*/),
LocalVariable,
CapturedVariable,
}
/// Create any deferred debug metadata nodes
pub fn finalize(cx: &CrateContext) {
if cx.dbg_cx().is_none() {
return;
}
debug!("finalize");
let _ = compile_unit_metadata(cx);
if needs_gdb_debug_scripts_section(cx) {
// Add a .debug_gdb_scripts section to this compile-unit. This will
// cause GDB to try and load the gdb_load_rust_pretty_printers.py file,
// which activates the Rust pretty printers for binary this section is
// contained in.
get_or_insert_gdb_debug_scripts_section_global(cx);
}
unsafe {
llvm::LLVMDIBuilderFinalize(DIB(cx));
llvm::LLVMDIBuilderDispose(DIB(cx));
// Debuginfo generation in LLVM by default uses a higher
// version of dwarf than OS X currently understands. We can
// instruct LLVM to emit an older version of dwarf, however,
// for OS X to understand. For more info see #11352
// This can be overridden using --llvm-opts -dwarf-version,N.
// Android has the same issue (#22398)
if cx.sess().target.target.options.is_like_osx ||
cx.sess().target.target.options.is_like_android {
llvm::LLVMRustAddModuleFlag(cx.llmod(),
"Dwarf Version\0".as_ptr() as *const _,
2)
}
// Prevent bitcode readers from deleting the debug info.
let ptr = "Debug Info Version\0".as_ptr();
llvm::LLVMRustAddModuleFlag(cx.llmod(), ptr as *const _,
llvm::LLVMRustDebugMetadataVersion);
};
}
/// Creates debug information for the given global variable.
///
/// Adds the created metadata nodes directly to the crate's IR.
pub fn create_global_var_metadata(cx: &CrateContext,
node_id: ast::NodeId,
global: ValueRef) {
if cx.dbg_cx().is_none() {
return;
}
// Don't create debuginfo for globals inlined from other crates. The other
// crate should already contain debuginfo for it. More importantly, the
// global might not even exist in un-inlined form anywhere which would lead
// to a linker errors.
if cx.external_srcs().borrow().contains_key(&node_id) {
return;
}
let var_item = cx.tcx().map.get(node_id);
let (ident, span) = match var_item {
ast_map::NodeItem(item) => {
match item.node {
ast::ItemStatic(..) => (item.ident, item.span),
ast::ItemConst(..) => (item.ident, item.span),
_ => {
cx.sess()
.span_bug(item.span,
&format!("debuginfo::\
create_global_var_metadata() -
Captured var-id refers to \
unexpected ast_item variant: {:?}",
var_item)[])
}
}
},
_ => cx.sess().bug(&format!("debuginfo::create_global_var_metadata() \
- Captured var-id refers to unexpected \
ast_map variant: {:?}",
var_item)[])
};
let (file_metadata, line_number) = if span != codemap::DUMMY_SP {
let loc = span_start(cx, span);
(file_metadata(cx, &loc.file.name[]), loc.line as c_uint)
} else {
(UNKNOWN_FILE_METADATA, UNKNOWN_LINE_NUMBER)
};
let is_local_to_unit = is_node_local_to_unit(cx, node_id);
let variable_type = ty::node_id_to_type(cx.tcx(), node_id);
let type_metadata = type_metadata(cx, variable_type, span);
let namespace_node = namespace_for_item(cx, ast_util::local_def(node_id));
let var_name = token::get_ident(ident).to_string();
let linkage_name =
namespace_node.mangled_name_of_contained_item(&var_name[]);
let var_scope = namespace_node.scope;
let var_name = CString::from_slice(var_name.as_bytes());
let linkage_name = CString::from_slice(linkage_name.as_bytes());
unsafe {
llvm::LLVMDIBuilderCreateStaticVariable(DIB(cx),
var_scope,
var_name.as_ptr(),
linkage_name.as_ptr(),
file_metadata,
line_number,
type_metadata,
is_local_to_unit,
global,
ptr::null_mut());
}
}
/// Creates debug information for the given local variable.
///
/// This function assumes that there's a datum for each pattern component of the
/// local in `bcx.fcx.lllocals`.
/// Adds the created metadata nodes directly to the crate's IR.
pub fn create_local_var_metadata(bcx: Block, local: &ast::Local) {
if bcx.unreachable.get() ||
fn_should_be_ignored(bcx.fcx) ||
bcx.sess().opts.debuginfo != FullDebugInfo {
return;
}
let cx = bcx.ccx();
let def_map = &cx.tcx().def_map;
let locals = bcx.fcx.lllocals.borrow();
pat_util::pat_bindings(def_map, &*local.pat, |_, node_id, span, var_ident| {
let datum = match locals.get(&node_id) {
Some(datum) => datum,
None => {
bcx.sess().span_bug(span,
&format!("no entry in lllocals table for {}",
node_id)[]);
}
};
if unsafe { llvm::LLVMIsAAllocaInst(datum.val) } == ptr::null_mut() {
cx.sess().span_bug(span, "debuginfo::create_local_var_metadata() - \
Referenced variable location is not an alloca!");
}
let scope_metadata = scope_metadata(bcx.fcx, node_id, span);
declare_local(bcx,
var_ident.node,
datum.ty,
scope_metadata,
DirectVariable { alloca: datum.val },
LocalVariable,
span);
})
}
/// Creates debug information for a variable captured in a closure.
///
/// Adds the created metadata nodes directly to the crate's IR.
pub fn create_captured_var_metadata<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
node_id: ast::NodeId,
env_pointer: ValueRef,
env_index: uint,
captured_by_ref: bool,
span: Span) {
if bcx.unreachable.get() ||
fn_should_be_ignored(bcx.fcx) ||
bcx.sess().opts.debuginfo != FullDebugInfo {
return;
}
let cx = bcx.ccx();
let ast_item = cx.tcx().map.find(node_id);
let variable_ident = match ast_item {
None => {
cx.sess().span_bug(span, "debuginfo::create_captured_var_metadata: node not found");
}
Some(ast_map::NodeLocal(pat)) | Some(ast_map::NodeArg(pat)) => {
match pat.node {
ast::PatIdent(_, ref path1, _) => {
path1.node
}
_ => {
cx.sess()
.span_bug(span,
&format!(
"debuginfo::create_captured_var_metadata() - \
Captured var-id refers to unexpected \
ast_map variant: {:?}",
ast_item)[]);
}
}
}
_ => {
cx.sess()
.span_bug(span,
&format!("debuginfo::create_captured_var_metadata() - \
Captured var-id refers to unexpected \
ast_map variant: {:?}",
ast_item)[]);
}
};
let variable_type = common::node_id_type(bcx, node_id);
let scope_metadata = bcx.fcx.debug_context.get_ref(cx, span).fn_metadata;
// env_pointer is the alloca containing the pointer to the environment,
// so it's type is **EnvironmentType. In order to find out the type of
// the environment we have to "dereference" two times.
let llvm_env_data_type = common::val_ty(env_pointer).element_type()
.element_type();
let byte_offset_of_var_in_env = machine::llelement_offset(cx,
llvm_env_data_type,
env_index);
let address_operations = unsafe {
[llvm::LLVMDIBuilderCreateOpDeref(),
llvm::LLVMDIBuilderCreateOpPlus(),
byte_offset_of_var_in_env as i64,
llvm::LLVMDIBuilderCreateOpDeref()]
};
let address_op_count = if captured_by_ref {
address_operations.len()
} else {
address_operations.len() - 1
};
let variable_access = IndirectVariable {
alloca: env_pointer,
address_operations: &address_operations[..address_op_count]
};
declare_local(bcx,
variable_ident,
variable_type,
scope_metadata,
variable_access,
CapturedVariable,
span);
}
/// Creates debug information for a local variable introduced in the head of a
/// match-statement arm.
///
/// Adds the created metadata nodes directly to the crate's IR.
pub fn create_match_binding_metadata<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
variable_ident: ast::Ident,
binding: BindingInfo<'tcx>) {
if bcx.unreachable.get() ||
fn_should_be_ignored(bcx.fcx) ||
bcx.sess().opts.debuginfo != FullDebugInfo {
return;
}
let scope_metadata = scope_metadata(bcx.fcx, binding.id, binding.span);
let aops = unsafe {
[llvm::LLVMDIBuilderCreateOpDeref()]
};
// Regardless of the actual type (`T`) we're always passed the stack slot (alloca)
// for the binding. For ByRef bindings that's a `T*` but for ByMove bindings we
// actually have `T**`. So to get the actual variable we need to dereference once
// more. For ByCopy we just use the stack slot we created for the binding.
let var_access = match binding.trmode {
TrByCopy(llbinding) => DirectVariable {
alloca: llbinding
},
TrByMove => IndirectVariable {
alloca: binding.llmatch,
address_operations: &aops
},
TrByRef => DirectVariable {
alloca: binding.llmatch
}
};
declare_local(bcx,
variable_ident,
binding.ty,
scope_metadata,
var_access,
LocalVariable,
binding.span);
}
/// Creates debug information for the given function argument.
///
/// This function assumes that there's a datum for each pattern component of the
/// argument in `bcx.fcx.lllocals`.
/// Adds the created metadata nodes directly to the crate's IR.
pub fn create_argument_metadata(bcx: Block, arg: &ast::Arg) {
if bcx.unreachable.get() ||
fn_should_be_ignored(bcx.fcx) ||
bcx.sess().opts.debuginfo != FullDebugInfo {
return;
}
let def_map = &bcx.tcx().def_map;
let scope_metadata = bcx
.fcx
.debug_context
.get_ref(bcx.ccx(), arg.pat.span)
.fn_metadata;
let locals = bcx.fcx.lllocals.borrow();
pat_util::pat_bindings(def_map, &*arg.pat, |_, node_id, span, var_ident| {
let datum = match locals.get(&node_id) {
Some(v) => v,
None => {
bcx.sess().span_bug(span,
&format!("no entry in lllocals table for {}",
node_id)[]);
}
};
if unsafe { llvm::LLVMIsAAllocaInst(datum.val) } == ptr::null_mut() {
bcx.sess().span_bug(span, "debuginfo::create_argument_metadata() - \
Referenced variable location is not an alloca!");
}
let argument_index = {
let counter = &bcx
.fcx
.debug_context
.get_ref(bcx.ccx(), span)
.argument_counter;
let argument_index = counter.get();
counter.set(argument_index + 1);
argument_index
};
declare_local(bcx,
var_ident.node,
datum.ty,
scope_metadata,
DirectVariable { alloca: datum.val },
ArgumentVariable(argument_index),
span);
})
}
pub fn get_cleanup_debug_loc_for_ast_node<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
node_id: ast::NodeId,
node_span: Span,
is_block: bool)
-> NodeIdAndSpan {
// A debug location needs two things:
// (1) A span (of which only the beginning will actually be used)
// (2) An AST node-id which will be used to look up the lexical scope
// for the location in the functions scope-map
//
// This function will calculate the debug location for compiler-generated
// cleanup calls that are executed when control-flow leaves the
// scope identified by `node_id`.
//
// For everything but block-like things we can simply take id and span of
// the given expression, meaning that from a debugger's view cleanup code is
// executed at the same source location as the statement/expr itself.
//
// Blocks are a special case. Here we want the cleanup to be linked to the
// closing curly brace of the block. The *scope* the cleanup is executed in
// is up to debate: It could either still be *within* the block being
// cleaned up, meaning that locals from the block are still visible in the
// debugger.
// Or it could be in the scope that the block is contained in, so any locals
// from within the block are already considered out-of-scope and thus not
// accessible in the debugger anymore.
//
// The current implementation opts for the second option: cleanup of a block
// already happens in the parent scope of the block. The main reason for
// this decision is that scoping becomes controlflow dependent when variable
// shadowing is involved and it's impossible to decide statically which
// scope is actually left when the cleanup code is executed.
// In practice it shouldn't make much of a difference.
let mut cleanup_span = node_span;
if is_block {
// Not all blocks actually have curly braces (e.g. simple closure
// bodies), in which case we also just want to return the span of the
// whole expression.
let code_snippet = cx.sess().codemap().span_to_snippet(node_span);
if let Ok(code_snippet) = code_snippet {
let bytes = code_snippet.as_bytes();
if bytes.len() > 0 && &bytes[bytes.len()-1..] == b"}" {
cleanup_span = Span {
lo: node_span.hi - codemap::BytePos(1),
hi: node_span.hi,
expn_id: node_span.expn_id
};
}
}
}
NodeIdAndSpan {
id: node_id,
span: cleanup_span
}
}
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
pub enum DebugLoc {
At(ast::NodeId, Span),
None
}
impl DebugLoc {
pub fn apply(&self, fcx: &FunctionContext) {
match *self {
DebugLoc::At(node_id, span) => {
set_source_location(fcx, node_id, span);
}
DebugLoc::None => {
clear_source_location(fcx);
}
}
}
}
pub trait ToDebugLoc {
fn debug_loc(&self) -> DebugLoc;
}
impl ToDebugLoc for ast::Expr {
fn debug_loc(&self) -> DebugLoc {
DebugLoc::At(self.id, self.span)
}
}
impl ToDebugLoc for NodeIdAndSpan {
fn debug_loc(&self) -> DebugLoc {
DebugLoc::At(self.id, self.span)
}
}
impl ToDebugLoc for Option<NodeIdAndSpan> {
fn debug_loc(&self) -> DebugLoc {
match *self {
Some(NodeIdAndSpan { id, span }) => DebugLoc::At(id, span),
None => DebugLoc::None
}
}
}
/// Sets the current debug location at the beginning of the span.
///
/// Maps to a call to llvm::LLVMSetCurrentDebugLocation(...). The node_id
/// parameter is used to reliably find the correct visibility scope for the code
/// position.
pub fn set_source_location(fcx: &FunctionContext,
node_id: ast::NodeId,
span: Span) {
match fcx.debug_context {
FunctionDebugContext::DebugInfoDisabled => return,
FunctionDebugContext::FunctionWithoutDebugInfo => {
set_debug_location(fcx.ccx, UnknownLocation);
return;
}
FunctionDebugContext::RegularContext(box ref function_debug_context) => {
let cx = fcx.ccx;
debug!("set_source_location: {}", cx.sess().codemap().span_to_string(span));
if function_debug_context.source_locations_enabled.get() {
let loc = span_start(cx, span);
let scope = scope_metadata(fcx, node_id, span);
set_debug_location(cx, InternalDebugLocation::new(scope,
loc.line,
loc.col.to_usize()));
} else {
set_debug_location(cx, UnknownLocation);
}
}
}
}
/// Clears the current debug location.
///
/// Instructions generated hereafter won't be assigned a source location.
pub fn clear_source_location(fcx: &FunctionContext) {
if fn_should_be_ignored(fcx) {
return;
}
set_debug_location(fcx.ccx, UnknownLocation);
}
/// Enables emitting source locations for the given functions.
///
/// Since we don't want source locations to be emitted for the function prelude,
/// they are disabled when beginning to translate a new function. This functions
/// switches source location emitting on and must therefore be called before the
/// first real statement/expression of the function is translated.
pub fn start_emitting_source_locations(fcx: &FunctionContext) {
match fcx.debug_context {
FunctionDebugContext::RegularContext(box ref data) => {
data.source_locations_enabled.set(true)
},
_ => { /* safe to ignore */ }
}
}
/// Creates the function-specific debug context.
///
/// Returns the FunctionDebugContext for the function which holds state needed
/// for debug info creation. The function may also return another variant of the
/// FunctionDebugContext enum which indicates why no debuginfo should be created
/// for the function.
pub fn create_function_debug_context<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
fn_ast_id: ast::NodeId,
param_substs: &Substs<'tcx>,
llfn: ValueRef) -> FunctionDebugContext {
if cx.sess().opts.debuginfo == NoDebugInfo {
return FunctionDebugContext::DebugInfoDisabled;
}
// Clear the debug location so we don't assign them in the function prelude.
// Do this here already, in case we do an early exit from this function.
set_debug_location(cx, UnknownLocation);
if fn_ast_id == ast::DUMMY_NODE_ID {
// This is a function not linked to any source location, so don't
// generate debuginfo for it.
return FunctionDebugContext::FunctionWithoutDebugInfo;
}
let empty_generics = ast_util::empty_generics();
let fnitem = cx.tcx().map.get(fn_ast_id);
let (ident, fn_decl, generics, top_level_block, span, has_path) = match fnitem {
ast_map::NodeItem(ref item) => {
if contains_nodebug_attribute(&item.attrs) {
return FunctionDebugContext::FunctionWithoutDebugInfo;
}
match item.node {
ast::ItemFn(ref fn_decl, _, _, ref generics, ref top_level_block) => {
(item.ident, &**fn_decl, generics, &**top_level_block, item.span, true)
}
_ => {
cx.sess().span_bug(item.span,
"create_function_debug_context: item bound to non-function");
}
}
}
ast_map::NodeImplItem(ref item) => {
match **item {
ast::MethodImplItem(ref method) => {
if contains_nodebug_attribute(&method.attrs) {
return FunctionDebugContext::FunctionWithoutDebugInfo;
}
(method.pe_ident(),
method.pe_fn_decl(),
method.pe_generics(),
method.pe_body(),
method.span,
true)
}
ast::TypeImplItem(ref typedef) => {
cx.sess().span_bug(typedef.span,
"create_function_debug_context() \
called on associated type?!")
}
}
}
ast_map::NodeExpr(ref expr) => {
match expr.node {
ast::ExprClosure(_, ref fn_decl, ref top_level_block) => {
let name = format!("fn{}", token::gensym("fn"));
let name = token::str_to_ident(&name[]);
(name, &**fn_decl,
// This is not quite right. It should actually inherit
// the generics of the enclosing function.
&empty_generics,
&**top_level_block,
expr.span,
// Don't try to lookup the item path:
false)
}
_ => cx.sess().span_bug(expr.span,
"create_function_debug_context: expected an expr_fn_block here")
}
}
ast_map::NodeTraitItem(ref trait_method) => {
match **trait_method {
ast::ProvidedMethod(ref method) => {
if contains_nodebug_attribute(&method.attrs) {
return FunctionDebugContext::FunctionWithoutDebugInfo;
}
(method.pe_ident(),
method.pe_fn_decl(),
method.pe_generics(),
method.pe_body(),
method.span,
true)
}
_ => {
cx.sess()
.bug(&format!("create_function_debug_context: \
unexpected sort of node: {:?}",
fnitem)[])
}
}
}
ast_map::NodeForeignItem(..) |
ast_map::NodeVariant(..) |
ast_map::NodeStructCtor(..) => {
return FunctionDebugContext::FunctionWithoutDebugInfo;
}
_ => cx.sess().bug(&format!("create_function_debug_context: \
unexpected sort of node: {:?}",
fnitem)[])
};
// This can be the case for functions inlined from another crate
if span == codemap::DUMMY_SP {
return FunctionDebugContext::FunctionWithoutDebugInfo;
}
let loc = span_start(cx, span);
let file_metadata = file_metadata(cx, &loc.file.name[]);
let function_type_metadata = unsafe {
let fn_signature = get_function_signature(cx,
fn_ast_id,
&*fn_decl,
param_substs,
span);
llvm::LLVMDIBuilderCreateSubroutineType(DIB(cx), file_metadata, fn_signature)
};
// Get_template_parameters() will append a `<...>` clause to the function
// name if necessary.
let mut function_name = String::from_str(&token::get_ident(ident));
let template_parameters = get_template_parameters(cx,
generics,
param_substs,
file_metadata,
&mut function_name);
// There is no ast_map::Path for ast::ExprClosure-type functions. For now,
// just don't put them into a namespace. In the future this could be improved
// somehow (storing a path in the ast_map, or construct a path using the
// enclosing function).
let (linkage_name, containing_scope) = if has_path {
let namespace_node = namespace_for_item(cx, ast_util::local_def(fn_ast_id));
let linkage_name = namespace_node.mangled_name_of_contained_item(
&function_name[]);
let containing_scope = namespace_node.scope;
(linkage_name, containing_scope)
} else {
(function_name.clone(), file_metadata)
};
// Clang sets this parameter to the opening brace of the function's block,
// so let's do this too.
let scope_line = span_start(cx, top_level_block.span).line;
let is_local_to_unit = is_node_local_to_unit(cx, fn_ast_id);
let function_name = CString::from_slice(function_name.as_bytes());
let linkage_name = CString::from_slice(linkage_name.as_bytes());
let fn_metadata = unsafe {
llvm::LLVMDIBuilderCreateFunction(
DIB(cx),
containing_scope,
function_name.as_ptr(),
linkage_name.as_ptr(),
file_metadata,
loc.line as c_uint,
function_type_metadata,
is_local_to_unit,
true,
scope_line as c_uint,
FlagPrototyped as c_uint,
cx.sess().opts.optimize != config::No,
llfn,
template_parameters,
ptr::null_mut())
};
let scope_map = create_scope_map(cx,
&fn_decl.inputs,
&*top_level_block,
fn_metadata,
fn_ast_id);
// Initialize fn debug context (including scope map and namespace map)
let fn_debug_context = box FunctionDebugContextData {
scope_map: RefCell::new(scope_map),
fn_metadata: fn_metadata,
argument_counter: Cell::new(1),
source_locations_enabled: Cell::new(false),
};
return FunctionDebugContext::RegularContext(fn_debug_context);
fn get_function_signature<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
fn_ast_id: ast::NodeId,
fn_decl: &ast::FnDecl,
param_substs: &Substs<'tcx>,
error_reporting_span: Span) -> DIArray {
if cx.sess().opts.debuginfo == LimitedDebugInfo {
return create_DIArray(DIB(cx), &[]);
}
let mut signature = Vec::with_capacity(fn_decl.inputs.len() + 1);
// Return type -- llvm::DIBuilder wants this at index 0
assert_type_for_node_id(cx, fn_ast_id, error_reporting_span);
let return_type = ty::node_id_to_type(cx.tcx(), fn_ast_id);
let return_type = monomorphize::apply_param_substs(cx.tcx(),
param_substs,
&return_type);
if ty::type_is_nil(return_type) {
signature.push(ptr::null_mut())
} else {
signature.push(type_metadata(cx, return_type, codemap::DUMMY_SP));
}
// Arguments types
for arg in &fn_decl.inputs {
assert_type_for_node_id(cx, arg.pat.id, arg.pat.span);
let arg_type = ty::node_id_to_type(cx.tcx(), arg.pat.id);
let arg_type = monomorphize::apply_param_substs(cx.tcx(),
param_substs,
&arg_type);
signature.push(type_metadata(cx, arg_type, codemap::DUMMY_SP));
}
return create_DIArray(DIB(cx), &signature[]);
}
fn get_template_parameters<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
generics: &ast::Generics,
param_substs: &Substs<'tcx>,
file_metadata: DIFile,
name_to_append_suffix_to: &mut String)
-> DIArray
{
let self_type = param_substs.self_ty();
let self_type = monomorphize::normalize_associated_type(cx.tcx(), &self_type);
// Only true for static default methods:
let has_self_type = self_type.is_some();
if !generics.is_type_parameterized() && !has_self_type {
return create_DIArray(DIB(cx), &[]);
}
name_to_append_suffix_to.push('<');
// The list to be filled with template parameters:
let mut template_params: Vec<DIDescriptor> =
Vec::with_capacity(generics.ty_params.len() + 1);
// Handle self type
if has_self_type {
let actual_self_type = self_type.unwrap();
// Add self type name to <...> clause of function name
let actual_self_type_name = compute_debuginfo_type_name(
cx,
actual_self_type,
true);
name_to_append_suffix_to.push_str(&actual_self_type_name[]);
if generics.is_type_parameterized() {
name_to_append_suffix_to.push_str(",");
}
// Only create type information if full debuginfo is enabled
if cx.sess().opts.debuginfo == FullDebugInfo {
let actual_self_type_metadata = type_metadata(cx,
actual_self_type,
codemap::DUMMY_SP);
let ident = special_idents::type_self;
let ident = token::get_ident(ident);
let name = CString::from_slice(ident.as_bytes());
let param_metadata = unsafe {
llvm::LLVMDIBuilderCreateTemplateTypeParameter(
DIB(cx),
file_metadata,
name.as_ptr(),
actual_self_type_metadata,
ptr::null_mut(),
0,
0)
};
template_params.push(param_metadata);
}
}
// Handle other generic parameters
let actual_types = param_substs.types.get_slice(subst::FnSpace);
for (index, &ast::TyParam{ ident, .. }) in generics.ty_params.iter().enumerate() {
let actual_type = actual_types[index];
// Add actual type name to <...> clause of function name
let actual_type_name = compute_debuginfo_type_name(cx,
actual_type,
true);
name_to_append_suffix_to.push_str(&actual_type_name[]);
if index != generics.ty_params.len() - 1 {
name_to_append_suffix_to.push_str(",");
}
// Again, only create type information if full debuginfo is enabled
if cx.sess().opts.debuginfo == FullDebugInfo {
let actual_type_metadata = type_metadata(cx, actual_type, codemap::DUMMY_SP);
let ident = token::get_ident(ident);
let name = CString::from_slice(ident.as_bytes());
let param_metadata = unsafe {
llvm::LLVMDIBuilderCreateTemplateTypeParameter(
DIB(cx),
file_metadata,
name.as_ptr(),
actual_type_metadata,
ptr::null_mut(),
0,
0)
};
template_params.push(param_metadata);
}
}
name_to_append_suffix_to.push('>');
return create_DIArray(DIB(cx), &template_params[]);
}
}
//=-----------------------------------------------------------------------------
// Module-Internal debug info creation functions
//=-----------------------------------------------------------------------------
fn is_node_local_to_unit(cx: &CrateContext, node_id: ast::NodeId) -> bool
{
// The is_local_to_unit flag indicates whether a function is local to the
// current compilation unit (i.e. if it is *static* in the C-sense). The
// *reachable* set should provide a good approximation of this, as it
// contains everything that might leak out of the current crate (by being
// externally visible or by being inlined into something externally visible).
// It might better to use the `exported_items` set from `driver::CrateAnalysis`
// in the future, but (atm) this set is not available in the translation pass.
!cx.reachable().contains(&node_id)
}
#[allow(non_snake_case)]
fn create_DIArray(builder: DIBuilderRef, arr: &[DIDescriptor]) -> DIArray {
return unsafe {
llvm::LLVMDIBuilderGetOrCreateArray(builder, arr.as_ptr(), arr.len() as u32)
};
}
fn compile_unit_metadata(cx: &CrateContext) -> DIDescriptor {
let work_dir = &cx.sess().working_dir;
let compile_unit_name = match cx.sess().local_crate_source_file {
None => fallback_path(cx),
Some(ref abs_path) => {
if abs_path.is_relative() {
cx.sess().warn("debuginfo: Invalid path to crate's local root source file!");
fallback_path(cx)
} else {
match abs_path.path_relative_from(work_dir) {
Some(ref p) if p.is_relative() => {
// prepend "./" if necessary
let dotdot = b"..";
let prefix: &[u8] = &[dotdot[0], ::std::old_path::SEP_BYTE];
let mut path_bytes = p.as_vec().to_vec();
if &path_bytes[..2] != prefix &&
&path_bytes[..2] != dotdot {
path_bytes.insert(0, prefix[0]);
path_bytes.insert(1, prefix[1]);
}
CString::from_vec(path_bytes)
}
_ => fallback_path(cx)
}
}
}
};
debug!("compile_unit_metadata: {:?}", compile_unit_name);
let producer = format!("rustc version {}",
(option_env!("CFG_VERSION")).expect("CFG_VERSION"));
let compile_unit_name = compile_unit_name.as_ptr();
let work_dir = CString::from_slice(work_dir.as_vec());
let producer = CString::from_slice(producer.as_bytes());
let flags = "\0";
let split_name = "\0";
return unsafe {
llvm::LLVMDIBuilderCreateCompileUnit(
debug_context(cx).builder,
DW_LANG_RUST,
compile_unit_name,
work_dir.as_ptr(),
producer.as_ptr(),
cx.sess().opts.optimize != config::No,
flags.as_ptr() as *const _,<|fim▁hole|> 0,
split_name.as_ptr() as *const _)
};
fn fallback_path(cx: &CrateContext) -> CString {
CString::from_slice(cx.link_meta().crate_name.as_bytes())
}
}
fn declare_local<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
variable_ident: ast::Ident,
variable_type: Ty<'tcx>,
scope_metadata: DIScope,
variable_access: VariableAccess,
variable_kind: VariableKind,
span: Span) {
let cx: &CrateContext = bcx.ccx();
let filename = span_start(cx, span).file.name.clone();
let file_metadata = file_metadata(cx, &filename[]);
let name = token::get_ident(variable_ident);
let loc = span_start(cx, span);
let type_metadata = type_metadata(cx, variable_type, span);
let (argument_index, dwarf_tag) = match variable_kind {
ArgumentVariable(index) => (index as c_uint, DW_TAG_arg_variable),
LocalVariable |
CapturedVariable => (0, DW_TAG_auto_variable)
};
let name = CString::from_slice(name.as_bytes());
match (variable_access, [].as_slice()) {
(DirectVariable { alloca }, address_operations) |
(IndirectVariable {alloca, address_operations}, _) => {
let metadata = unsafe {
llvm::LLVMDIBuilderCreateVariable(
DIB(cx),
dwarf_tag,
scope_metadata,
name.as_ptr(),
file_metadata,
loc.line as c_uint,
type_metadata,
cx.sess().opts.optimize != config::No,
0,
address_operations.as_ptr(),
address_operations.len() as c_uint,
argument_index)
};
set_debug_location(cx, InternalDebugLocation::new(scope_metadata,
loc.line,
loc.col.to_usize()));
unsafe {
let instr = llvm::LLVMDIBuilderInsertDeclareAtEnd(
DIB(cx),
alloca,
metadata,
address_operations.as_ptr(),
address_operations.len() as c_uint,
bcx.llbb);
llvm::LLVMSetInstDebugLocation(trans::build::B(bcx).llbuilder, instr);
}
}
}
match variable_kind {
ArgumentVariable(_) | CapturedVariable => {
assert!(!bcx.fcx
.debug_context
.get_ref(cx, span)
.source_locations_enabled
.get());
set_debug_location(cx, UnknownLocation);
}
_ => { /* nothing to do */ }
}
}
fn file_metadata(cx: &CrateContext, full_path: &str) -> DIFile {
match debug_context(cx).created_files.borrow().get(full_path) {
Some(file_metadata) => return *file_metadata,
None => ()
}
debug!("file_metadata: {}", full_path);
// FIXME (#9639): This needs to handle non-utf8 paths
let work_dir = cx.sess().working_dir.as_str().unwrap();
let file_name =
if full_path.starts_with(work_dir) {
&full_path[work_dir.len() + 1..full_path.len()]
} else {
full_path
};
let file_name = CString::from_slice(file_name.as_bytes());
let work_dir = CString::from_slice(work_dir.as_bytes());
let file_metadata = unsafe {
llvm::LLVMDIBuilderCreateFile(DIB(cx), file_name.as_ptr(),
work_dir.as_ptr())
};
let mut created_files = debug_context(cx).created_files.borrow_mut();
created_files.insert(full_path.to_string(), file_metadata);
return file_metadata;
}
/// Finds the scope metadata node for the given AST node.
fn scope_metadata(fcx: &FunctionContext,
node_id: ast::NodeId,
error_reporting_span: Span)
-> DIScope {
let scope_map = &fcx.debug_context
.get_ref(fcx.ccx, error_reporting_span)
.scope_map;
match scope_map.borrow().get(&node_id).cloned() {
Some(scope_metadata) => scope_metadata,
None => {
let node = fcx.ccx.tcx().map.get(node_id);
fcx.ccx.sess().span_bug(error_reporting_span,
&format!("debuginfo: Could not find scope info for node {:?}",
node)[]);
}
}
}
fn diverging_type_metadata(cx: &CrateContext) -> DIType {
unsafe {
llvm::LLVMDIBuilderCreateBasicType(
DIB(cx),
"!\0".as_ptr() as *const _,
bytes_to_bits(0),
bytes_to_bits(0),
DW_ATE_unsigned)
}
}
fn basic_type_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
t: Ty<'tcx>) -> DIType {
debug!("basic_type_metadata: {:?}", t);
let (name, encoding) = match t.sty {
ty::ty_tup(ref elements) if elements.is_empty() =>
("()".to_string(), DW_ATE_unsigned),
ty::ty_bool => ("bool".to_string(), DW_ATE_boolean),
ty::ty_char => ("char".to_string(), DW_ATE_unsigned_char),
ty::ty_int(int_ty) => match int_ty {
ast::TyIs(_) => ("isize".to_string(), DW_ATE_signed),
ast::TyI8 => ("i8".to_string(), DW_ATE_signed),
ast::TyI16 => ("i16".to_string(), DW_ATE_signed),
ast::TyI32 => ("i32".to_string(), DW_ATE_signed),
ast::TyI64 => ("i64".to_string(), DW_ATE_signed)
},
ty::ty_uint(uint_ty) => match uint_ty {
ast::TyUs(_) => ("usize".to_string(), DW_ATE_unsigned),
ast::TyU8 => ("u8".to_string(), DW_ATE_unsigned),
ast::TyU16 => ("u16".to_string(), DW_ATE_unsigned),
ast::TyU32 => ("u32".to_string(), DW_ATE_unsigned),
ast::TyU64 => ("u64".to_string(), DW_ATE_unsigned)
},
ty::ty_float(float_ty) => match float_ty {
ast::TyF32 => ("f32".to_string(), DW_ATE_float),
ast::TyF64 => ("f64".to_string(), DW_ATE_float),
},
_ => cx.sess().bug("debuginfo::basic_type_metadata - t is invalid type")
};
let llvm_type = type_of::type_of(cx, t);
let (size, align) = size_and_align_of(cx, llvm_type);
let name = CString::from_slice(name.as_bytes());
let ty_metadata = unsafe {
llvm::LLVMDIBuilderCreateBasicType(
DIB(cx),
name.as_ptr(),
bytes_to_bits(size),
bytes_to_bits(align),
encoding)
};
return ty_metadata;
}
fn pointer_type_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
pointer_type: Ty<'tcx>,
pointee_type_metadata: DIType)
-> DIType {
let pointer_llvm_type = type_of::type_of(cx, pointer_type);
let (pointer_size, pointer_align) = size_and_align_of(cx, pointer_llvm_type);
let name = compute_debuginfo_type_name(cx, pointer_type, false);
let name = CString::from_slice(name.as_bytes());
let ptr_metadata = unsafe {
llvm::LLVMDIBuilderCreatePointerType(
DIB(cx),
pointee_type_metadata,
bytes_to_bits(pointer_size),
bytes_to_bits(pointer_align),
name.as_ptr())
};
return ptr_metadata;
}
//=-----------------------------------------------------------------------------
// Common facilities for record-like types (structs, enums, tuples)
//=-----------------------------------------------------------------------------
enum MemberOffset {
FixedMemberOffset { bytes: uint },
// For ComputedMemberOffset, the offset is read from the llvm type definition
ComputedMemberOffset
}
// Description of a type member, which can either be a regular field (as in
// structs or tuples) or an enum variant
struct MemberDescription {
name: String,
llvm_type: Type,
type_metadata: DIType,
offset: MemberOffset,
flags: c_uint
}
// A factory for MemberDescriptions. It produces a list of member descriptions
// for some record-like type. MemberDescriptionFactories are used to defer the
// creation of type member descriptions in order to break cycles arising from
// recursive type definitions.
enum MemberDescriptionFactory<'tcx> {
StructMDF(StructMemberDescriptionFactory<'tcx>),
TupleMDF(TupleMemberDescriptionFactory<'tcx>),
EnumMDF(EnumMemberDescriptionFactory<'tcx>),
VariantMDF(VariantMemberDescriptionFactory<'tcx>)
}
impl<'tcx> MemberDescriptionFactory<'tcx> {
fn create_member_descriptions<'a>(&self, cx: &CrateContext<'a, 'tcx>)
-> Vec<MemberDescription> {
match *self {
StructMDF(ref this) => {
this.create_member_descriptions(cx)
}
TupleMDF(ref this) => {
this.create_member_descriptions(cx)
}
EnumMDF(ref this) => {
this.create_member_descriptions(cx)
}
VariantMDF(ref this) => {
this.create_member_descriptions(cx)
}
}
}
}
// A description of some recursive type. It can either be already finished (as
// with FinalMetadata) or it is not yet finished, but contains all information
// needed to generate the missing parts of the description. See the documentation
// section on Recursive Types at the top of this file for more information.
enum RecursiveTypeDescription<'tcx> {
UnfinishedMetadata {
unfinished_type: Ty<'tcx>,
unique_type_id: UniqueTypeId,
metadata_stub: DICompositeType,
llvm_type: Type,
member_description_factory: MemberDescriptionFactory<'tcx>,
},
FinalMetadata(DICompositeType)
}
fn create_and_register_recursive_type_forward_declaration<'a, 'tcx>(
cx: &CrateContext<'a, 'tcx>,
unfinished_type: Ty<'tcx>,
unique_type_id: UniqueTypeId,
metadata_stub: DICompositeType,
llvm_type: Type,
member_description_factory: MemberDescriptionFactory<'tcx>)
-> RecursiveTypeDescription<'tcx> {
// Insert the stub into the TypeMap in order to allow for recursive references
let mut type_map = debug_context(cx).type_map.borrow_mut();
type_map.register_unique_id_with_metadata(cx, unique_type_id, metadata_stub);
type_map.register_type_with_metadata(cx, unfinished_type, metadata_stub);
UnfinishedMetadata {
unfinished_type: unfinished_type,
unique_type_id: unique_type_id,
metadata_stub: metadata_stub,
llvm_type: llvm_type,
member_description_factory: member_description_factory,
}
}
impl<'tcx> RecursiveTypeDescription<'tcx> {
// Finishes up the description of the type in question (mostly by providing
// descriptions of the fields of the given type) and returns the final type metadata.
fn finalize<'a>(&self, cx: &CrateContext<'a, 'tcx>) -> MetadataCreationResult {
match *self {
FinalMetadata(metadata) => MetadataCreationResult::new(metadata, false),
UnfinishedMetadata {
unfinished_type,
unique_type_id,
metadata_stub,
llvm_type,
ref member_description_factory,
..
} => {
// Make sure that we have a forward declaration of the type in
// the TypeMap so that recursive references are possible. This
// will always be the case if the RecursiveTypeDescription has
// been properly created through the
// create_and_register_recursive_type_forward_declaration() function.
{
let type_map = debug_context(cx).type_map.borrow();
if type_map.find_metadata_for_unique_id(unique_type_id).is_none() ||
type_map.find_metadata_for_type(unfinished_type).is_none() {
cx.sess().bug(&format!("Forward declaration of potentially recursive type \
'{}' was not found in TypeMap!",
ppaux::ty_to_string(cx.tcx(), unfinished_type))
[]);
}
}
// ... then create the member descriptions ...
let member_descriptions =
member_description_factory.create_member_descriptions(cx);
// ... and attach them to the stub to complete it.
set_members_of_composite_type(cx,
metadata_stub,
llvm_type,
&member_descriptions[]);
return MetadataCreationResult::new(metadata_stub, true);
}
}
}
}
//=-----------------------------------------------------------------------------
// Structs
//=-----------------------------------------------------------------------------
// Creates MemberDescriptions for the fields of a struct
struct StructMemberDescriptionFactory<'tcx> {
fields: Vec<ty::field<'tcx>>,
is_simd: bool,
span: Span,
}
impl<'tcx> StructMemberDescriptionFactory<'tcx> {
fn create_member_descriptions<'a>(&self, cx: &CrateContext<'a, 'tcx>)
-> Vec<MemberDescription> {
if self.fields.len() == 0 {
return Vec::new();
}
let field_size = if self.is_simd {
machine::llsize_of_alloc(cx, type_of::type_of(cx, self.fields[0].mt.ty)) as uint
} else {
0xdeadbeef
};
self.fields.iter().enumerate().map(|(i, field)| {
let name = if field.name == special_idents::unnamed_field.name {
"".to_string()
} else {
token::get_name(field.name).to_string()
};
let offset = if self.is_simd {
assert!(field_size != 0xdeadbeef);
FixedMemberOffset { bytes: i * field_size }
} else {
ComputedMemberOffset
};
MemberDescription {
name: name,
llvm_type: type_of::type_of(cx, field.mt.ty),
type_metadata: type_metadata(cx, field.mt.ty, self.span),
offset: offset,
flags: FLAGS_NONE,
}
}).collect()
}
}
fn prepare_struct_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
struct_type: Ty<'tcx>,
def_id: ast::DefId,
substs: &subst::Substs<'tcx>,
unique_type_id: UniqueTypeId,
span: Span)
-> RecursiveTypeDescription<'tcx> {
let struct_name = compute_debuginfo_type_name(cx, struct_type, false);
let struct_llvm_type = type_of::type_of(cx, struct_type);
let (containing_scope, _) = get_namespace_and_span_for_item(cx, def_id);
let struct_metadata_stub = create_struct_stub(cx,
struct_llvm_type,
&struct_name[],
unique_type_id,
containing_scope);
let mut fields = ty::struct_fields(cx.tcx(), def_id, substs);
// The `Ty` values returned by `ty::struct_fields` can still contain
// `ty_projection` variants, so normalize those away.
for field in &mut fields {
field.mt.ty = monomorphize::normalize_associated_type(cx.tcx(), &field.mt.ty);
}
create_and_register_recursive_type_forward_declaration(
cx,
struct_type,
unique_type_id,
struct_metadata_stub,
struct_llvm_type,
StructMDF(StructMemberDescriptionFactory {
fields: fields,
is_simd: ty::type_is_simd(cx.tcx(), struct_type),
span: span,
})
)
}
//=-----------------------------------------------------------------------------
// Tuples
//=-----------------------------------------------------------------------------
// Creates MemberDescriptions for the fields of a tuple
struct TupleMemberDescriptionFactory<'tcx> {
component_types: Vec<Ty<'tcx>>,
span: Span,
}
impl<'tcx> TupleMemberDescriptionFactory<'tcx> {
fn create_member_descriptions<'a>(&self, cx: &CrateContext<'a, 'tcx>)
-> Vec<MemberDescription> {
self.component_types.iter().map(|&component_type| {
MemberDescription {
name: "".to_string(),
llvm_type: type_of::type_of(cx, component_type),
type_metadata: type_metadata(cx, component_type, self.span),
offset: ComputedMemberOffset,
flags: FLAGS_NONE,
}
}).collect()
}
}
fn prepare_tuple_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
tuple_type: Ty<'tcx>,
component_types: &[Ty<'tcx>],
unique_type_id: UniqueTypeId,
span: Span)
-> RecursiveTypeDescription<'tcx> {
let tuple_name = compute_debuginfo_type_name(cx, tuple_type, false);
let tuple_llvm_type = type_of::type_of(cx, tuple_type);
create_and_register_recursive_type_forward_declaration(
cx,
tuple_type,
unique_type_id,
create_struct_stub(cx,
tuple_llvm_type,
&tuple_name[],
unique_type_id,
UNKNOWN_SCOPE_METADATA),
tuple_llvm_type,
TupleMDF(TupleMemberDescriptionFactory {
component_types: component_types.to_vec(),
span: span,
})
)
}
//=-----------------------------------------------------------------------------
// Enums
//=-----------------------------------------------------------------------------
// Describes the members of an enum value: An enum is described as a union of
// structs in DWARF. This MemberDescriptionFactory provides the description for
// the members of this union; so for every variant of the given enum, this factory
// will produce one MemberDescription (all with no name and a fixed offset of
// zero bytes).
struct EnumMemberDescriptionFactory<'tcx> {
enum_type: Ty<'tcx>,
type_rep: Rc<adt::Repr<'tcx>>,
variants: Rc<Vec<Rc<ty::VariantInfo<'tcx>>>>,
discriminant_type_metadata: Option<DIType>,
containing_scope: DIScope,
file_metadata: DIFile,
span: Span,
}
impl<'tcx> EnumMemberDescriptionFactory<'tcx> {
fn create_member_descriptions<'a>(&self, cx: &CrateContext<'a, 'tcx>)
-> Vec<MemberDescription> {
match *self.type_rep {
adt::General(_, ref struct_defs, _) => {
let discriminant_info = RegularDiscriminant(self.discriminant_type_metadata
.expect(""));
struct_defs
.iter()
.enumerate()
.map(|(i, struct_def)| {
let (variant_type_metadata,
variant_llvm_type,
member_desc_factory) =
describe_enum_variant(cx,
self.enum_type,
struct_def,
&*(*self.variants)[i],
discriminant_info,
self.containing_scope,
self.span);
let member_descriptions = member_desc_factory
.create_member_descriptions(cx);
set_members_of_composite_type(cx,
variant_type_metadata,
variant_llvm_type,
&member_descriptions[]);
MemberDescription {
name: "".to_string(),
llvm_type: variant_llvm_type,
type_metadata: variant_type_metadata,
offset: FixedMemberOffset { bytes: 0 },
flags: FLAGS_NONE
}
}).collect()
},
adt::Univariant(ref struct_def, _) => {
assert!(self.variants.len() <= 1);
if self.variants.len() == 0 {
vec![]
} else {
let (variant_type_metadata,
variant_llvm_type,
member_description_factory) =
describe_enum_variant(cx,
self.enum_type,
struct_def,
&*(*self.variants)[0],
NoDiscriminant,
self.containing_scope,
self.span);
let member_descriptions =
member_description_factory.create_member_descriptions(cx);
set_members_of_composite_type(cx,
variant_type_metadata,
variant_llvm_type,
&member_descriptions[]);
vec![
MemberDescription {
name: "".to_string(),
llvm_type: variant_llvm_type,
type_metadata: variant_type_metadata,
offset: FixedMemberOffset { bytes: 0 },
flags: FLAGS_NONE
}
]
}
}
adt::RawNullablePointer { nndiscr: non_null_variant_index, nnty, .. } => {
// As far as debuginfo is concerned, the pointer this enum
// represents is still wrapped in a struct. This is to make the
// DWARF representation of enums uniform.
// First create a description of the artificial wrapper struct:
let non_null_variant = &(*self.variants)[non_null_variant_index as uint];
let non_null_variant_name = token::get_name(non_null_variant.name);
// The llvm type and metadata of the pointer
let non_null_llvm_type = type_of::type_of(cx, nnty);
let non_null_type_metadata = type_metadata(cx, nnty, self.span);
// The type of the artificial struct wrapping the pointer
let artificial_struct_llvm_type = Type::struct_(cx,
&[non_null_llvm_type],
false);
// For the metadata of the wrapper struct, we need to create a
// MemberDescription of the struct's single field.
let sole_struct_member_description = MemberDescription {
name: match non_null_variant.arg_names {
Some(ref names) => token::get_ident(names[0]).to_string(),
None => "".to_string()
},
llvm_type: non_null_llvm_type,
type_metadata: non_null_type_metadata,
offset: FixedMemberOffset { bytes: 0 },
flags: FLAGS_NONE
};
let unique_type_id = debug_context(cx).type_map
.borrow_mut()
.get_unique_type_id_of_enum_variant(
cx,
self.enum_type,
&non_null_variant_name);
// Now we can create the metadata of the artificial struct
let artificial_struct_metadata =
composite_type_metadata(cx,
artificial_struct_llvm_type,
&non_null_variant_name,
unique_type_id,
&[sole_struct_member_description],
self.containing_scope,
self.file_metadata,
codemap::DUMMY_SP);
// Encode the information about the null variant in the union
// member's name.
let null_variant_index = (1 - non_null_variant_index) as uint;
let null_variant_name = token::get_name((*self.variants)[null_variant_index].name);
let union_member_name = format!("RUST$ENCODED$ENUM${}${}",
0,
null_variant_name);
// Finally create the (singleton) list of descriptions of union
// members.
vec![
MemberDescription {
name: union_member_name,
llvm_type: artificial_struct_llvm_type,
type_metadata: artificial_struct_metadata,
offset: FixedMemberOffset { bytes: 0 },
flags: FLAGS_NONE
}
]
},
adt::StructWrappedNullablePointer { nonnull: ref struct_def,
nndiscr,
ref discrfield, ..} => {
// Create a description of the non-null variant
let (variant_type_metadata, variant_llvm_type, member_description_factory) =
describe_enum_variant(cx,
self.enum_type,
struct_def,
&*(*self.variants)[nndiscr as uint],
OptimizedDiscriminant,
self.containing_scope,
self.span);
let variant_member_descriptions =
member_description_factory.create_member_descriptions(cx);
set_members_of_composite_type(cx,
variant_type_metadata,
variant_llvm_type,
&variant_member_descriptions[]);
// Encode the information about the null variant in the union
// member's name.
let null_variant_index = (1 - nndiscr) as uint;
let null_variant_name = token::get_name((*self.variants)[null_variant_index].name);
let discrfield = discrfield.iter()
.skip(1)
.map(|x| x.to_string())
.collect::<Vec<_>>().connect("$");
let union_member_name = format!("RUST$ENCODED$ENUM${}${}",
discrfield,
null_variant_name);
// Create the (singleton) list of descriptions of union members.
vec![
MemberDescription {
name: union_member_name,
llvm_type: variant_llvm_type,
type_metadata: variant_type_metadata,
offset: FixedMemberOffset { bytes: 0 },
flags: FLAGS_NONE
}
]
},
adt::CEnum(..) => cx.sess().span_bug(self.span, "This should be unreachable.")
}
}
}
// Creates MemberDescriptions for the fields of a single enum variant.
struct VariantMemberDescriptionFactory<'tcx> {
args: Vec<(String, Ty<'tcx>)>,
discriminant_type_metadata: Option<DIType>,
span: Span,
}
impl<'tcx> VariantMemberDescriptionFactory<'tcx> {
fn create_member_descriptions<'a>(&self, cx: &CrateContext<'a, 'tcx>)
-> Vec<MemberDescription> {
self.args.iter().enumerate().map(|(i, &(ref name, ty))| {
MemberDescription {
name: name.to_string(),
llvm_type: type_of::type_of(cx, ty),
type_metadata: match self.discriminant_type_metadata {
Some(metadata) if i == 0 => metadata,
_ => type_metadata(cx, ty, self.span)
},
offset: ComputedMemberOffset,
flags: FLAGS_NONE
}
}).collect()
}
}
#[derive(Copy)]
enum EnumDiscriminantInfo {
RegularDiscriminant(DIType),
OptimizedDiscriminant,
NoDiscriminant
}
// Returns a tuple of (1) type_metadata_stub of the variant, (2) the llvm_type
// of the variant, and (3) a MemberDescriptionFactory for producing the
// descriptions of the fields of the variant. This is a rudimentary version of a
// full RecursiveTypeDescription.
fn describe_enum_variant<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
enum_type: Ty<'tcx>,
struct_def: &adt::Struct<'tcx>,
variant_info: &ty::VariantInfo<'tcx>,
discriminant_info: EnumDiscriminantInfo,
containing_scope: DIScope,
span: Span)
-> (DICompositeType, Type, MemberDescriptionFactory<'tcx>) {
let variant_llvm_type =
Type::struct_(cx, &struct_def.fields
.iter()
.map(|&t| type_of::type_of(cx, t))
.collect::<Vec<_>>()
[],
struct_def.packed);
// Could do some consistency checks here: size, align, field count, discr type
let variant_name = token::get_name(variant_info.name);
let variant_name = &variant_name;
let unique_type_id = debug_context(cx).type_map
.borrow_mut()
.get_unique_type_id_of_enum_variant(
cx,
enum_type,
variant_name);
let metadata_stub = create_struct_stub(cx,
variant_llvm_type,
variant_name,
unique_type_id,
containing_scope);
// Get the argument names from the enum variant info
let mut arg_names: Vec<_> = match variant_info.arg_names {
Some(ref names) => {
names.iter()
.map(|ident| {
token::get_ident(*ident).to_string()
}).collect()
}
None => variant_info.args.iter().map(|_| "".to_string()).collect()
};
// If this is not a univariant enum, there is also the discriminant field.
match discriminant_info {
RegularDiscriminant(_) => arg_names.insert(0, "RUST$ENUM$DISR".to_string()),
_ => { /* do nothing */ }
};
// Build an array of (field name, field type) pairs to be captured in the factory closure.
let args: Vec<(String, Ty)> = arg_names.iter()
.zip(struct_def.fields.iter())
.map(|(s, &t)| (s.to_string(), t))
.collect();
let member_description_factory =
VariantMDF(VariantMemberDescriptionFactory {
args: args,
discriminant_type_metadata: match discriminant_info {
RegularDiscriminant(discriminant_type_metadata) => {
Some(discriminant_type_metadata)
}
_ => None
},
span: span,
});
(metadata_stub, variant_llvm_type, member_description_factory)
}
fn prepare_enum_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
enum_type: Ty<'tcx>,
enum_def_id: ast::DefId,
unique_type_id: UniqueTypeId,
span: Span)
-> RecursiveTypeDescription<'tcx> {
let enum_name = compute_debuginfo_type_name(cx, enum_type, false);
let (containing_scope, definition_span) = get_namespace_and_span_for_item(cx, enum_def_id);
let loc = span_start(cx, definition_span);
let file_metadata = file_metadata(cx, &loc.file.name[]);
let variants = ty::enum_variants(cx.tcx(), enum_def_id);
let enumerators_metadata: Vec<DIDescriptor> = variants
.iter()
.map(|v| {
let token = token::get_name(v.name);
let name = CString::from_slice(token.as_bytes());
unsafe {
llvm::LLVMDIBuilderCreateEnumerator(
DIB(cx),
name.as_ptr(),
v.disr_val as u64)
}
})
.collect();
let discriminant_type_metadata = |inttype| {
// We can reuse the type of the discriminant for all monomorphized
// instances of an enum because it doesn't depend on any type parameters.
// The def_id, uniquely identifying the enum's polytype acts as key in
// this cache.
let cached_discriminant_type_metadata = debug_context(cx).created_enum_disr_types
.borrow()
.get(&enum_def_id).cloned();
match cached_discriminant_type_metadata {
Some(discriminant_type_metadata) => discriminant_type_metadata,
None => {
let discriminant_llvm_type = adt::ll_inttype(cx, inttype);
let (discriminant_size, discriminant_align) =
size_and_align_of(cx, discriminant_llvm_type);
let discriminant_base_type_metadata =
type_metadata(cx,
adt::ty_of_inttype(cx.tcx(), inttype),
codemap::DUMMY_SP);
let discriminant_name = get_enum_discriminant_name(cx, enum_def_id);
let name = CString::from_slice(discriminant_name.as_bytes());
let discriminant_type_metadata = unsafe {
llvm::LLVMDIBuilderCreateEnumerationType(
DIB(cx),
containing_scope,
name.as_ptr(),
UNKNOWN_FILE_METADATA,
UNKNOWN_LINE_NUMBER,
bytes_to_bits(discriminant_size),
bytes_to_bits(discriminant_align),
create_DIArray(DIB(cx), &enumerators_metadata),
discriminant_base_type_metadata)
};
debug_context(cx).created_enum_disr_types
.borrow_mut()
.insert(enum_def_id, discriminant_type_metadata);
discriminant_type_metadata
}
}
};
let type_rep = adt::represent_type(cx, enum_type);
let discriminant_type_metadata = match *type_rep {
adt::CEnum(inttype, _, _) => {
return FinalMetadata(discriminant_type_metadata(inttype))
},
adt::RawNullablePointer { .. } |
adt::StructWrappedNullablePointer { .. } |
adt::Univariant(..) => None,
adt::General(inttype, _, _) => Some(discriminant_type_metadata(inttype)),
};
let enum_llvm_type = type_of::type_of(cx, enum_type);
let (enum_type_size, enum_type_align) = size_and_align_of(cx, enum_llvm_type);
let unique_type_id_str = debug_context(cx)
.type_map
.borrow()
.get_unique_type_id_as_string(unique_type_id);
let enum_name = CString::from_slice(enum_name.as_bytes());
let unique_type_id_str = CString::from_slice(unique_type_id_str.as_bytes());
let enum_metadata = unsafe {
llvm::LLVMDIBuilderCreateUnionType(
DIB(cx),
containing_scope,
enum_name.as_ptr(),
UNKNOWN_FILE_METADATA,
UNKNOWN_LINE_NUMBER,
bytes_to_bits(enum_type_size),
bytes_to_bits(enum_type_align),
0, // Flags
ptr::null_mut(),
0, // RuntimeLang
unique_type_id_str.as_ptr())
};
return create_and_register_recursive_type_forward_declaration(
cx,
enum_type,
unique_type_id,
enum_metadata,
enum_llvm_type,
EnumMDF(EnumMemberDescriptionFactory {
enum_type: enum_type,
type_rep: type_rep.clone(),
variants: variants,
discriminant_type_metadata: discriminant_type_metadata,
containing_scope: containing_scope,
file_metadata: file_metadata,
span: span,
}),
);
fn get_enum_discriminant_name(cx: &CrateContext,
def_id: ast::DefId)
-> token::InternedString {
let name = if def_id.krate == ast::LOCAL_CRATE {
cx.tcx().map.get_path_elem(def_id.node).name()
} else {
csearch::get_item_path(cx.tcx(), def_id).last().unwrap().name()
};
token::get_name(name)
}
}
/// Creates debug information for a composite type, that is, anything that
/// results in a LLVM struct.
///
/// Examples of Rust types to use this are: structs, tuples, boxes, vecs, and enums.
fn composite_type_metadata(cx: &CrateContext,
composite_llvm_type: Type,
composite_type_name: &str,
composite_type_unique_id: UniqueTypeId,
member_descriptions: &[MemberDescription],
containing_scope: DIScope,
// Ignore source location information as long as it
// can't be reconstructed for non-local crates.
_file_metadata: DIFile,
_definition_span: Span)
-> DICompositeType {
// Create the (empty) struct metadata node ...
let composite_type_metadata = create_struct_stub(cx,
composite_llvm_type,
composite_type_name,
composite_type_unique_id,
containing_scope);
// ... and immediately create and add the member descriptions.
set_members_of_composite_type(cx,
composite_type_metadata,
composite_llvm_type,
member_descriptions);
return composite_type_metadata;
}
fn set_members_of_composite_type(cx: &CrateContext,
composite_type_metadata: DICompositeType,
composite_llvm_type: Type,
member_descriptions: &[MemberDescription]) {
// In some rare cases LLVM metadata uniquing would lead to an existing type
// description being used instead of a new one created in create_struct_stub.
// This would cause a hard to trace assertion in DICompositeType::SetTypeArray().
// The following check makes sure that we get a better error message if this
// should happen again due to some regression.
{
let mut composite_types_completed =
debug_context(cx).composite_types_completed.borrow_mut();
if composite_types_completed.contains(&composite_type_metadata) {
let (llvm_version_major, llvm_version_minor) = unsafe {
(llvm::LLVMVersionMajor(), llvm::LLVMVersionMinor())
};
let actual_llvm_version = llvm_version_major * 1000000 + llvm_version_minor * 1000;
let min_supported_llvm_version = 3 * 1000000 + 4 * 1000;
if actual_llvm_version < min_supported_llvm_version {
cx.sess().warn(&format!("This version of rustc was built with LLVM \
{}.{}. Rustc just ran into a known \
debuginfo corruption problem thatoften \
occurs with LLVM versions below 3.4. \
Please use a rustc built with anewer \
version of LLVM.",
llvm_version_major,
llvm_version_minor)[]);
} else {
cx.sess().bug("debuginfo::set_members_of_composite_type() - \
Already completed forward declaration re-encountered.");
}
} else {
composite_types_completed.insert(composite_type_metadata);
}
}
let member_metadata: Vec<DIDescriptor> = member_descriptions
.iter()
.enumerate()
.map(|(i, member_description)| {
let (member_size, member_align) = size_and_align_of(cx, member_description.llvm_type);
let member_offset = match member_description.offset {
FixedMemberOffset { bytes } => bytes as u64,
ComputedMemberOffset => machine::llelement_offset(cx, composite_llvm_type, i)
};
let member_name = CString::from_slice(member_description.name.as_bytes());
unsafe {
llvm::LLVMDIBuilderCreateMemberType(
DIB(cx),
composite_type_metadata,
member_name.as_ptr(),
UNKNOWN_FILE_METADATA,
UNKNOWN_LINE_NUMBER,
bytes_to_bits(member_size),
bytes_to_bits(member_align),
bytes_to_bits(member_offset),
member_description.flags,
member_description.type_metadata)
}
})
.collect();
unsafe {
let type_array = create_DIArray(DIB(cx), &member_metadata[]);
llvm::LLVMDICompositeTypeSetTypeArray(DIB(cx), composite_type_metadata, type_array);
}
}
// A convenience wrapper around LLVMDIBuilderCreateStructType(). Does not do any
// caching, does not add any fields to the struct. This can be done later with
// set_members_of_composite_type().
fn create_struct_stub(cx: &CrateContext,
struct_llvm_type: Type,
struct_type_name: &str,
unique_type_id: UniqueTypeId,
containing_scope: DIScope)
-> DICompositeType {
let (struct_size, struct_align) = size_and_align_of(cx, struct_llvm_type);
let unique_type_id_str = debug_context(cx).type_map
.borrow()
.get_unique_type_id_as_string(unique_type_id);
let name = CString::from_slice(struct_type_name.as_bytes());
let unique_type_id = CString::from_slice(unique_type_id_str.as_bytes());
let metadata_stub = unsafe {
// LLVMDIBuilderCreateStructType() wants an empty array. A null
// pointer will lead to hard to trace and debug LLVM assertions
// later on in llvm/lib/IR/Value.cpp.
let empty_array = create_DIArray(DIB(cx), &[]);
llvm::LLVMDIBuilderCreateStructType(
DIB(cx),
containing_scope,
name.as_ptr(),
UNKNOWN_FILE_METADATA,
UNKNOWN_LINE_NUMBER,
bytes_to_bits(struct_size),
bytes_to_bits(struct_align),
0,
ptr::null_mut(),
empty_array,
0,
ptr::null_mut(),
unique_type_id.as_ptr())
};
return metadata_stub;
}
fn fixed_vec_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
unique_type_id: UniqueTypeId,
element_type: Ty<'tcx>,
len: Option<u64>,
span: Span)
-> MetadataCreationResult {
let element_type_metadata = type_metadata(cx, element_type, span);
return_if_metadata_created_in_meantime!(cx, unique_type_id);
let element_llvm_type = type_of::type_of(cx, element_type);
let (element_type_size, element_type_align) = size_and_align_of(cx, element_llvm_type);
let (array_size_in_bytes, upper_bound) = match len {
Some(len) => (element_type_size * len, len as c_longlong),
None => (0, -1)
};
let subrange = unsafe {
llvm::LLVMDIBuilderGetOrCreateSubrange(DIB(cx), 0, upper_bound)
};
let subscripts = create_DIArray(DIB(cx), &[subrange]);
let metadata = unsafe {
llvm::LLVMDIBuilderCreateArrayType(
DIB(cx),
bytes_to_bits(array_size_in_bytes),
bytes_to_bits(element_type_align),
element_type_metadata,
subscripts)
};
return MetadataCreationResult::new(metadata, false);
}
fn vec_slice_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
vec_type: Ty<'tcx>,
element_type: Ty<'tcx>,
unique_type_id: UniqueTypeId,
span: Span)
-> MetadataCreationResult {
let data_ptr_type = ty::mk_ptr(cx.tcx(), ty::mt {
ty: element_type,
mutbl: ast::MutImmutable
});
let element_type_metadata = type_metadata(cx, data_ptr_type, span);
return_if_metadata_created_in_meantime!(cx, unique_type_id);
let slice_llvm_type = type_of::type_of(cx, vec_type);
let slice_type_name = compute_debuginfo_type_name(cx, vec_type, true);
let member_llvm_types = slice_llvm_type.field_types();
assert!(slice_layout_is_correct(cx,
&member_llvm_types[],
element_type));
let member_descriptions = [
MemberDescription {
name: "data_ptr".to_string(),
llvm_type: member_llvm_types[0],
type_metadata: element_type_metadata,
offset: ComputedMemberOffset,
flags: FLAGS_NONE
},
MemberDescription {
name: "length".to_string(),
llvm_type: member_llvm_types[1],
type_metadata: type_metadata(cx, cx.tcx().types.uint, span),
offset: ComputedMemberOffset,
flags: FLAGS_NONE
},
];
assert!(member_descriptions.len() == member_llvm_types.len());
let loc = span_start(cx, span);
let file_metadata = file_metadata(cx, &loc.file.name[]);
let metadata = composite_type_metadata(cx,
slice_llvm_type,
&slice_type_name[],
unique_type_id,
&member_descriptions,
UNKNOWN_SCOPE_METADATA,
file_metadata,
span);
return MetadataCreationResult::new(metadata, false);
fn slice_layout_is_correct<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
member_llvm_types: &[Type],
element_type: Ty<'tcx>)
-> bool {
member_llvm_types.len() == 2 &&
member_llvm_types[0] == type_of::type_of(cx, element_type).ptr_to() &&
member_llvm_types[1] == cx.int_type()
}
}
fn subroutine_type_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
unique_type_id: UniqueTypeId,
signature: &ty::PolyFnSig<'tcx>,
span: Span)
-> MetadataCreationResult
{
let signature = ty::erase_late_bound_regions(cx.tcx(), signature);
let mut signature_metadata: Vec<DIType> = Vec::with_capacity(signature.inputs.len() + 1);
// return type
signature_metadata.push(match signature.output {
ty::FnConverging(ret_ty) => match ret_ty.sty {
ty::ty_tup(ref tys) if tys.is_empty() => ptr::null_mut(),
_ => type_metadata(cx, ret_ty, span)
},
ty::FnDiverging => diverging_type_metadata(cx)
});
// regular arguments
for &argument_type in &signature.inputs {
signature_metadata.push(type_metadata(cx, argument_type, span));
}
return_if_metadata_created_in_meantime!(cx, unique_type_id);
return MetadataCreationResult::new(
unsafe {
llvm::LLVMDIBuilderCreateSubroutineType(
DIB(cx),
UNKNOWN_FILE_METADATA,
create_DIArray(DIB(cx), &signature_metadata[]))
},
false);
}
// FIXME(1563) This is all a bit of a hack because 'trait pointer' is an ill-
// defined concept. For the case of an actual trait pointer (i.e., Box<Trait>,
// &Trait), trait_object_type should be the whole thing (e.g, Box<Trait>) and
// trait_type should be the actual trait (e.g., Trait). Where the trait is part
// of a DST struct, there is no trait_object_type and the results of this
// function will be a little bit weird.
fn trait_pointer_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
trait_type: Ty<'tcx>,
trait_object_type: Option<Ty<'tcx>>,
unique_type_id: UniqueTypeId)
-> DIType {
// The implementation provided here is a stub. It makes sure that the trait
// type is assigned the correct name, size, namespace, and source location.
// But it does not describe the trait's methods.
let def_id = match trait_type.sty {
ty::ty_trait(ref data) => data.principal_def_id(),
_ => {
let pp_type_name = ppaux::ty_to_string(cx.tcx(), trait_type);
cx.sess().bug(&format!("debuginfo: Unexpected trait-object type in \
trait_pointer_metadata(): {}",
&pp_type_name[])[]);
}
};
let trait_object_type = trait_object_type.unwrap_or(trait_type);
let trait_type_name =
compute_debuginfo_type_name(cx, trait_object_type, false);
let (containing_scope, _) = get_namespace_and_span_for_item(cx, def_id);
let trait_llvm_type = type_of::type_of(cx, trait_object_type);
composite_type_metadata(cx,
trait_llvm_type,
&trait_type_name[],
unique_type_id,
&[],
containing_scope,
UNKNOWN_FILE_METADATA,
codemap::DUMMY_SP)
}
fn type_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
t: Ty<'tcx>,
usage_site_span: Span)
-> DIType {
// Get the unique type id of this type.
let unique_type_id = {
let mut type_map = debug_context(cx).type_map.borrow_mut();
// First, try to find the type in TypeMap. If we have seen it before, we
// can exit early here.
match type_map.find_metadata_for_type(t) {
Some(metadata) => {
return metadata;
},
None => {
// The Ty is not in the TypeMap but maybe we have already seen
// an equivalent type (e.g. only differing in region arguments).
// In order to find out, generate the unique type id and look
// that up.
let unique_type_id = type_map.get_unique_type_id_of_type(cx, t);
match type_map.find_metadata_for_unique_id(unique_type_id) {
Some(metadata) => {
// There is already an equivalent type in the TypeMap.
// Register this Ty as an alias in the cache and
// return the cached metadata.
type_map.register_type_with_metadata(cx, t, metadata);
return metadata;
},
None => {
// There really is no type metadata for this type, so
// proceed by creating it.
unique_type_id
}
}
}
}
};
debug!("type_metadata: {:?}", t);
let sty = &t.sty;
let MetadataCreationResult { metadata, already_stored_in_typemap } = match *sty {
ty::ty_bool |
ty::ty_char |
ty::ty_int(_) |
ty::ty_uint(_) |
ty::ty_float(_) => {
MetadataCreationResult::new(basic_type_metadata(cx, t), false)
}
ty::ty_tup(ref elements) if elements.is_empty() => {
MetadataCreationResult::new(basic_type_metadata(cx, t), false)
}
ty::ty_enum(def_id, _) => {
prepare_enum_metadata(cx, t, def_id, unique_type_id, usage_site_span).finalize(cx)
}
ty::ty_vec(typ, len) => {
fixed_vec_metadata(cx, unique_type_id, typ, len.map(|x| x as u64), usage_site_span)
}
ty::ty_str => {
fixed_vec_metadata(cx, unique_type_id, cx.tcx().types.i8, None, usage_site_span)
}
ty::ty_trait(..) => {
MetadataCreationResult::new(
trait_pointer_metadata(cx, t, None, unique_type_id),
false)
}
ty::ty_uniq(ty) | ty::ty_ptr(ty::mt{ty, ..}) | ty::ty_rptr(_, ty::mt{ty, ..}) => {
match ty.sty {
ty::ty_vec(typ, None) => {
vec_slice_metadata(cx, t, typ, unique_type_id, usage_site_span)
}
ty::ty_str => {
vec_slice_metadata(cx, t, cx.tcx().types.u8, unique_type_id, usage_site_span)
}
ty::ty_trait(..) => {
MetadataCreationResult::new(
trait_pointer_metadata(cx, ty, Some(t), unique_type_id),
false)
}
_ => {
let pointee_metadata = type_metadata(cx, ty, usage_site_span);
match debug_context(cx).type_map
.borrow()
.find_metadata_for_unique_id(unique_type_id) {
Some(metadata) => return metadata,
None => { /* proceed normally */ }
};
MetadataCreationResult::new(pointer_type_metadata(cx, t, pointee_metadata),
false)
}
}
}
ty::ty_bare_fn(_, ref barefnty) => {
subroutine_type_metadata(cx, unique_type_id, &barefnty.sig, usage_site_span)
}
ty::ty_closure(def_id, _, substs) => {
let typer = NormalizingClosureTyper::new(cx.tcx());
let sig = typer.closure_type(def_id, substs).sig;
subroutine_type_metadata(cx, unique_type_id, &sig, usage_site_span)
}
ty::ty_struct(def_id, substs) => {
prepare_struct_metadata(cx,
t,
def_id,
substs,
unique_type_id,
usage_site_span).finalize(cx)
}
ty::ty_tup(ref elements) => {
prepare_tuple_metadata(cx,
t,
&elements[],
unique_type_id,
usage_site_span).finalize(cx)
}
_ => {
cx.sess().bug(&format!("debuginfo: unexpected type in type_metadata: {:?}",
sty)[])
}
};
{
let mut type_map = debug_context(cx).type_map.borrow_mut();
if already_stored_in_typemap {
// Also make sure that we already have a TypeMap entry entry for the unique type id.
let metadata_for_uid = match type_map.find_metadata_for_unique_id(unique_type_id) {
Some(metadata) => metadata,
None => {
let unique_type_id_str =
type_map.get_unique_type_id_as_string(unique_type_id);
let error_message = format!("Expected type metadata for unique \
type id '{}' to already be in \
the debuginfo::TypeMap but it \
was not. (Ty = {})",
&unique_type_id_str[],
ppaux::ty_to_string(cx.tcx(), t));
cx.sess().span_bug(usage_site_span, &error_message[]);
}
};
match type_map.find_metadata_for_type(t) {
Some(metadata) => {
if metadata != metadata_for_uid {
let unique_type_id_str =
type_map.get_unique_type_id_as_string(unique_type_id);
let error_message = format!("Mismatch between Ty and \
UniqueTypeId maps in \
debuginfo::TypeMap. \
UniqueTypeId={}, Ty={}",
&unique_type_id_str[],
ppaux::ty_to_string(cx.tcx(), t));
cx.sess().span_bug(usage_site_span, &error_message[]);
}
}
None => {
type_map.register_type_with_metadata(cx, t, metadata);
}
}
} else {
type_map.register_type_with_metadata(cx, t, metadata);
type_map.register_unique_id_with_metadata(cx, unique_type_id, metadata);
}
}
metadata
}
struct MetadataCreationResult {
metadata: DIType,
already_stored_in_typemap: bool
}
impl MetadataCreationResult {
fn new(metadata: DIType, already_stored_in_typemap: bool) -> MetadataCreationResult {
MetadataCreationResult {
metadata: metadata,
already_stored_in_typemap: already_stored_in_typemap
}
}
}
#[derive(Copy, PartialEq)]
enum InternalDebugLocation {
KnownLocation { scope: DIScope, line: uint, col: uint },
UnknownLocation
}
impl InternalDebugLocation {
fn new(scope: DIScope, line: uint, col: uint) -> InternalDebugLocation {
KnownLocation {
scope: scope,
line: line,
col: col,
}
}
}
fn set_debug_location(cx: &CrateContext, debug_location: InternalDebugLocation) {
if debug_location == debug_context(cx).current_debug_location.get() {
return;
}
let metadata_node;
match debug_location {
KnownLocation { scope, line, .. } => {
// Always set the column to zero like Clang and GCC
let col = UNKNOWN_COLUMN_NUMBER;
debug!("setting debug location to {} {}", line, col);
unsafe {
metadata_node = llvm::LLVMDIBuilderCreateDebugLocation(
debug_context(cx).llcontext,
line as c_uint,
col as c_uint,
scope,
ptr::null_mut());
}
}
UnknownLocation => {
debug!("clearing debug location ");
metadata_node = ptr::null_mut();
}
};
unsafe {
llvm::LLVMSetCurrentDebugLocation(cx.raw_builder(), metadata_node);
}
debug_context(cx).current_debug_location.set(debug_location);
}
//=-----------------------------------------------------------------------------
// Utility Functions
//=-----------------------------------------------------------------------------
fn contains_nodebug_attribute(attributes: &[ast::Attribute]) -> bool {
attributes.iter().any(|attr| {
let meta_item: &ast::MetaItem = &*attr.node.value;
match meta_item.node {
ast::MetaWord(ref value) => &value[] == "no_debug",
_ => false
}
})
}
/// Return codemap::Loc corresponding to the beginning of the span
fn span_start(cx: &CrateContext, span: Span) -> codemap::Loc {
cx.sess().codemap().lookup_char_pos(span.lo)
}
fn size_and_align_of(cx: &CrateContext, llvm_type: Type) -> (u64, u64) {
(machine::llsize_of_alloc(cx, llvm_type), machine::llalign_of_min(cx, llvm_type) as u64)
}
fn bytes_to_bits(bytes: u64) -> u64 {
bytes * 8
}
#[inline]
fn debug_context<'a, 'tcx>(cx: &'a CrateContext<'a, 'tcx>)
-> &'a CrateDebugContext<'tcx> {
let debug_context: &'a CrateDebugContext<'tcx> = cx.dbg_cx().as_ref().unwrap();
debug_context
}
#[inline]
#[allow(non_snake_case)]
fn DIB(cx: &CrateContext) -> DIBuilderRef {
cx.dbg_cx().as_ref().unwrap().builder
}
fn fn_should_be_ignored(fcx: &FunctionContext) -> bool {
match fcx.debug_context {
FunctionDebugContext::RegularContext(_) => false,
_ => true
}
}
fn assert_type_for_node_id(cx: &CrateContext,
node_id: ast::NodeId,
error_reporting_span: Span) {
if !cx.tcx().node_types.borrow().contains_key(&node_id) {
cx.sess().span_bug(error_reporting_span,
"debuginfo: Could not find type for node id!");
}
}
fn get_namespace_and_span_for_item(cx: &CrateContext, def_id: ast::DefId)
-> (DIScope, Span) {
let containing_scope = namespace_for_item(cx, def_id).scope;
let definition_span = if def_id.krate == ast::LOCAL_CRATE {
cx.tcx().map.span(def_id.node)
} else {
// For external items there is no span information
codemap::DUMMY_SP
};
(containing_scope, definition_span)
}
// This procedure builds the *scope map* for a given function, which maps any
// given ast::NodeId in the function's AST to the correct DIScope metadata instance.
//
// This builder procedure walks the AST in execution order and keeps track of
// what belongs to which scope, creating DIScope DIEs along the way, and
// introducing *artificial* lexical scope descriptors where necessary. These
// artificial scopes allow GDB to correctly handle name shadowing.
fn create_scope_map(cx: &CrateContext,
args: &[ast::Arg],
fn_entry_block: &ast::Block,
fn_metadata: DISubprogram,
fn_ast_id: ast::NodeId)
-> NodeMap<DIScope> {
let mut scope_map = NodeMap();
let def_map = &cx.tcx().def_map;
struct ScopeStackEntry {
scope_metadata: DIScope,
ident: Option<ast::Ident>
}
let mut scope_stack = vec!(ScopeStackEntry { scope_metadata: fn_metadata,
ident: None });
scope_map.insert(fn_ast_id, fn_metadata);
// Push argument identifiers onto the stack so arguments integrate nicely
// with variable shadowing.
for arg in args {
pat_util::pat_bindings(def_map, &*arg.pat, |_, node_id, _, path1| {
scope_stack.push(ScopeStackEntry { scope_metadata: fn_metadata,
ident: Some(path1.node) });
scope_map.insert(node_id, fn_metadata);
})
}
// Clang creates a separate scope for function bodies, so let's do this too.
with_new_scope(cx,
fn_entry_block.span,
&mut scope_stack,
&mut scope_map,
|cx, scope_stack, scope_map| {
walk_block(cx, fn_entry_block, scope_stack, scope_map);
});
return scope_map;
// local helper functions for walking the AST.
fn with_new_scope<F>(cx: &CrateContext,
scope_span: Span,
scope_stack: &mut Vec<ScopeStackEntry> ,
scope_map: &mut NodeMap<DIScope>,
inner_walk: F) where
F: FnOnce(&CrateContext, &mut Vec<ScopeStackEntry>, &mut NodeMap<DIScope>),
{
// Create a new lexical scope and push it onto the stack
let loc = cx.sess().codemap().lookup_char_pos(scope_span.lo);
let file_metadata = file_metadata(cx, &loc.file.name[]);
let parent_scope = scope_stack.last().unwrap().scope_metadata;
let scope_metadata = unsafe {
llvm::LLVMDIBuilderCreateLexicalBlock(
DIB(cx),
parent_scope,
file_metadata,
loc.line as c_uint,
loc.col.to_usize() as c_uint)
};
scope_stack.push(ScopeStackEntry { scope_metadata: scope_metadata,
ident: None });
inner_walk(cx, scope_stack, scope_map);
// pop artificial scopes
while scope_stack.last().unwrap().ident.is_some() {
scope_stack.pop();
}
if scope_stack.last().unwrap().scope_metadata != scope_metadata {
cx.sess().span_bug(scope_span, "debuginfo: Inconsistency in scope management.");
}
scope_stack.pop();
}
fn walk_block(cx: &CrateContext,
block: &ast::Block,
scope_stack: &mut Vec<ScopeStackEntry> ,
scope_map: &mut NodeMap<DIScope>) {
scope_map.insert(block.id, scope_stack.last().unwrap().scope_metadata);
// The interesting things here are statements and the concluding expression.
for statement in &block.stmts {
scope_map.insert(ast_util::stmt_id(&**statement),
scope_stack.last().unwrap().scope_metadata);
match statement.node {
ast::StmtDecl(ref decl, _) =>
walk_decl(cx, &**decl, scope_stack, scope_map),
ast::StmtExpr(ref exp, _) |
ast::StmtSemi(ref exp, _) =>
walk_expr(cx, &**exp, scope_stack, scope_map),
ast::StmtMac(..) => () // Ignore macros (which should be expanded anyway).
}
}
if let Some(ref exp) = block.expr {
walk_expr(cx, &**exp, scope_stack, scope_map);
}
}
fn walk_decl(cx: &CrateContext,
decl: &ast::Decl,
scope_stack: &mut Vec<ScopeStackEntry> ,
scope_map: &mut NodeMap<DIScope>) {
match *decl {
codemap::Spanned { node: ast::DeclLocal(ref local), .. } => {
scope_map.insert(local.id, scope_stack.last().unwrap().scope_metadata);
walk_pattern(cx, &*local.pat, scope_stack, scope_map);
if let Some(ref exp) = local.init {
walk_expr(cx, &**exp, scope_stack, scope_map);
}
}
_ => ()
}
}
fn walk_pattern(cx: &CrateContext,
pat: &ast::Pat,
scope_stack: &mut Vec<ScopeStackEntry> ,
scope_map: &mut NodeMap<DIScope>) {
let def_map = &cx.tcx().def_map;
// Unfortunately, we cannot just use pat_util::pat_bindings() or
// ast_util::walk_pat() here because we have to visit *all* nodes in
// order to put them into the scope map. The above functions don't do that.
match pat.node {
ast::PatIdent(_, ref path1, ref sub_pat_opt) => {
// Check if this is a binding. If so we need to put it on the
// scope stack and maybe introduce an artificial scope
if pat_util::pat_is_binding(def_map, &*pat) {
let ident = path1.node;
// LLVM does not properly generate 'DW_AT_start_scope' fields
// for variable DIEs. For this reason we have to introduce
// an artificial scope at bindings whenever a variable with
// the same name is declared in *any* parent scope.
//
// Otherwise the following error occurs:
//
// let x = 10;
//
// do_something(); // 'gdb print x' correctly prints 10
//
// {
// do_something(); // 'gdb print x' prints 0, because it
// // already reads the uninitialized 'x'
// // from the next line...
// let x = 100;
// do_something(); // 'gdb print x' correctly prints 100
// }
// Is there already a binding with that name?
// N.B.: this comparison must be UNhygienic... because
// gdb knows nothing about the context, so any two
// variables with the same name will cause the problem.
let need_new_scope = scope_stack
.iter()
.any(|entry| entry.ident.iter().any(|i| i.name == ident.name));
if need_new_scope {
// Create a new lexical scope and push it onto the stack
let loc = cx.sess().codemap().lookup_char_pos(pat.span.lo);
let file_metadata = file_metadata(cx, &loc.file.name[]);
let parent_scope = scope_stack.last().unwrap().scope_metadata;
let scope_metadata = unsafe {
llvm::LLVMDIBuilderCreateLexicalBlock(
DIB(cx),
parent_scope,
file_metadata,
loc.line as c_uint,
loc.col.to_usize() as c_uint)
};
scope_stack.push(ScopeStackEntry {
scope_metadata: scope_metadata,
ident: Some(ident)
});
} else {
// Push a new entry anyway so the name can be found
let prev_metadata = scope_stack.last().unwrap().scope_metadata;
scope_stack.push(ScopeStackEntry {
scope_metadata: prev_metadata,
ident: Some(ident)
});
}
}
scope_map.insert(pat.id, scope_stack.last().unwrap().scope_metadata);
if let Some(ref sub_pat) = *sub_pat_opt {
walk_pattern(cx, &**sub_pat, scope_stack, scope_map);
}
}
ast::PatWild(_) => {
scope_map.insert(pat.id, scope_stack.last().unwrap().scope_metadata);
}
ast::PatEnum(_, ref sub_pats_opt) => {
scope_map.insert(pat.id, scope_stack.last().unwrap().scope_metadata);
if let Some(ref sub_pats) = *sub_pats_opt {
for p in sub_pats {
walk_pattern(cx, &**p, scope_stack, scope_map);
}
}
}
ast::PatStruct(_, ref field_pats, _) => {
scope_map.insert(pat.id, scope_stack.last().unwrap().scope_metadata);
for &codemap::Spanned {
node: ast::FieldPat { pat: ref sub_pat, .. },
..
} in field_pats.iter() {
walk_pattern(cx, &**sub_pat, scope_stack, scope_map);
}
}
ast::PatTup(ref sub_pats) => {
scope_map.insert(pat.id, scope_stack.last().unwrap().scope_metadata);
for sub_pat in sub_pats {
walk_pattern(cx, &**sub_pat, scope_stack, scope_map);
}
}
ast::PatBox(ref sub_pat) | ast::PatRegion(ref sub_pat, _) => {
scope_map.insert(pat.id, scope_stack.last().unwrap().scope_metadata);
walk_pattern(cx, &**sub_pat, scope_stack, scope_map);
}
ast::PatLit(ref exp) => {
scope_map.insert(pat.id, scope_stack.last().unwrap().scope_metadata);
walk_expr(cx, &**exp, scope_stack, scope_map);
}
ast::PatRange(ref exp1, ref exp2) => {
scope_map.insert(pat.id, scope_stack.last().unwrap().scope_metadata);
walk_expr(cx, &**exp1, scope_stack, scope_map);
walk_expr(cx, &**exp2, scope_stack, scope_map);
}
ast::PatVec(ref front_sub_pats, ref middle_sub_pats, ref back_sub_pats) => {
scope_map.insert(pat.id, scope_stack.last().unwrap().scope_metadata);
for sub_pat in front_sub_pats {
walk_pattern(cx, &**sub_pat, scope_stack, scope_map);
}
if let Some(ref sub_pat) = *middle_sub_pats {
walk_pattern(cx, &**sub_pat, scope_stack, scope_map);
}
for sub_pat in back_sub_pats {
walk_pattern(cx, &**sub_pat, scope_stack, scope_map);
}
}
ast::PatMac(_) => {
cx.sess().span_bug(pat.span, "debuginfo::create_scope_map() - \
Found unexpanded macro.");
}
}
}
fn walk_expr(cx: &CrateContext,
exp: &ast::Expr,
scope_stack: &mut Vec<ScopeStackEntry> ,
scope_map: &mut NodeMap<DIScope>) {
scope_map.insert(exp.id, scope_stack.last().unwrap().scope_metadata);
match exp.node {
ast::ExprLit(_) |
ast::ExprBreak(_) |
ast::ExprAgain(_) |
ast::ExprPath(_) |
ast::ExprQPath(_) => {}
ast::ExprCast(ref sub_exp, _) |
ast::ExprAddrOf(_, ref sub_exp) |
ast::ExprField(ref sub_exp, _) |
ast::ExprTupField(ref sub_exp, _) |
ast::ExprParen(ref sub_exp) =>
walk_expr(cx, &**sub_exp, scope_stack, scope_map),
ast::ExprBox(ref place, ref sub_expr) => {
place.as_ref().map(
|e| walk_expr(cx, &**e, scope_stack, scope_map));
walk_expr(cx, &**sub_expr, scope_stack, scope_map);
}
ast::ExprRet(ref exp_opt) => match *exp_opt {
Some(ref sub_exp) => walk_expr(cx, &**sub_exp, scope_stack, scope_map),
None => ()
},
ast::ExprUnary(_, ref sub_exp) => {
walk_expr(cx, &**sub_exp, scope_stack, scope_map);
}
ast::ExprAssignOp(_, ref lhs, ref rhs) |
ast::ExprIndex(ref lhs, ref rhs) |
ast::ExprBinary(_, ref lhs, ref rhs) => {
walk_expr(cx, &**lhs, scope_stack, scope_map);
walk_expr(cx, &**rhs, scope_stack, scope_map);
}
ast::ExprRange(ref start, ref end) => {
start.as_ref().map(|e| walk_expr(cx, &**e, scope_stack, scope_map));
end.as_ref().map(|e| walk_expr(cx, &**e, scope_stack, scope_map));
}
ast::ExprVec(ref init_expressions) |
ast::ExprTup(ref init_expressions) => {
for ie in init_expressions {
walk_expr(cx, &**ie, scope_stack, scope_map);
}
}
ast::ExprAssign(ref sub_exp1, ref sub_exp2) |
ast::ExprRepeat(ref sub_exp1, ref sub_exp2) => {
walk_expr(cx, &**sub_exp1, scope_stack, scope_map);
walk_expr(cx, &**sub_exp2, scope_stack, scope_map);
}
ast::ExprIf(ref cond_exp, ref then_block, ref opt_else_exp) => {
walk_expr(cx, &**cond_exp, scope_stack, scope_map);
with_new_scope(cx,
then_block.span,
scope_stack,
scope_map,
|cx, scope_stack, scope_map| {
walk_block(cx, &**then_block, scope_stack, scope_map);
});
match *opt_else_exp {
Some(ref else_exp) =>
walk_expr(cx, &**else_exp, scope_stack, scope_map),
_ => ()
}
}
ast::ExprIfLet(..) => {
cx.sess().span_bug(exp.span, "debuginfo::create_scope_map() - \
Found unexpanded if-let.");
}
ast::ExprWhile(ref cond_exp, ref loop_body, _) => {
walk_expr(cx, &**cond_exp, scope_stack, scope_map);
with_new_scope(cx,
loop_body.span,
scope_stack,
scope_map,
|cx, scope_stack, scope_map| {
walk_block(cx, &**loop_body, scope_stack, scope_map);
})
}
ast::ExprWhileLet(..) => {
cx.sess().span_bug(exp.span, "debuginfo::create_scope_map() - \
Found unexpanded while-let.");
}
ast::ExprForLoop(..) => {
cx.sess().span_bug(exp.span, "debuginfo::create_scope_map() - \
Found unexpanded for loop.");
}
ast::ExprMac(_) => {
cx.sess().span_bug(exp.span, "debuginfo::create_scope_map() - \
Found unexpanded macro.");
}
ast::ExprLoop(ref block, _) |
ast::ExprBlock(ref block) => {
with_new_scope(cx,
block.span,
scope_stack,
scope_map,
|cx, scope_stack, scope_map| {
walk_block(cx, &**block, scope_stack, scope_map);
})
}
ast::ExprClosure(_, ref decl, ref block) => {
with_new_scope(cx,
block.span,
scope_stack,
scope_map,
|cx, scope_stack, scope_map| {
for &ast::Arg { pat: ref pattern, .. } in &decl.inputs {
walk_pattern(cx, &**pattern, scope_stack, scope_map);
}
walk_block(cx, &**block, scope_stack, scope_map);
})
}
ast::ExprCall(ref fn_exp, ref args) => {
walk_expr(cx, &**fn_exp, scope_stack, scope_map);
for arg_exp in args {
walk_expr(cx, &**arg_exp, scope_stack, scope_map);
}
}
ast::ExprMethodCall(_, _, ref args) => {
for arg_exp in args {
walk_expr(cx, &**arg_exp, scope_stack, scope_map);
}
}
ast::ExprMatch(ref discriminant_exp, ref arms, _) => {
walk_expr(cx, &**discriminant_exp, scope_stack, scope_map);
// For each arm we have to first walk the pattern as these might
// introduce new artificial scopes. It should be sufficient to
// walk only one pattern per arm, as they all must contain the
// same binding names.
for arm_ref in arms {
let arm_span = arm_ref.pats[0].span;
with_new_scope(cx,
arm_span,
scope_stack,
scope_map,
|cx, scope_stack, scope_map| {
for pat in &arm_ref.pats {
walk_pattern(cx, &**pat, scope_stack, scope_map);
}
if let Some(ref guard_exp) = arm_ref.guard {
walk_expr(cx, &**guard_exp, scope_stack, scope_map)
}
walk_expr(cx, &*arm_ref.body, scope_stack, scope_map);
})
}
}
ast::ExprStruct(_, ref fields, ref base_exp) => {
for &ast::Field { expr: ref exp, .. } in fields {
walk_expr(cx, &**exp, scope_stack, scope_map);
}
match *base_exp {
Some(ref exp) => walk_expr(cx, &**exp, scope_stack, scope_map),
None => ()
}
}
ast::ExprInlineAsm(ast::InlineAsm { ref inputs,
ref outputs,
.. }) => {
// inputs, outputs: Vec<(String, P<Expr>)>
for &(_, ref exp) in inputs {
walk_expr(cx, &**exp, scope_stack, scope_map);
}
for &(_, ref exp, _) in outputs {
walk_expr(cx, &**exp, scope_stack, scope_map);
}
}
}
}
}
//=-----------------------------------------------------------------------------
// Type Names for Debug Info
//=-----------------------------------------------------------------------------
// Compute the name of the type as it should be stored in debuginfo. Does not do
// any caching, i.e. calling the function twice with the same type will also do
// the work twice. The `qualified` parameter only affects the first level of the
// type name, further levels (i.e. type parameters) are always fully qualified.
fn compute_debuginfo_type_name<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
t: Ty<'tcx>,
qualified: bool)
-> String {
let mut result = String::with_capacity(64);
push_debuginfo_type_name(cx, t, qualified, &mut result);
result
}
// Pushes the name of the type as it should be stored in debuginfo on the
// `output` String. See also compute_debuginfo_type_name().
fn push_debuginfo_type_name<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
t: Ty<'tcx>,
qualified: bool,
output: &mut String) {
match t.sty {
ty::ty_bool => output.push_str("bool"),
ty::ty_char => output.push_str("char"),
ty::ty_str => output.push_str("str"),
ty::ty_int(ast::TyIs(_)) => output.push_str("isize"),
ty::ty_int(ast::TyI8) => output.push_str("i8"),
ty::ty_int(ast::TyI16) => output.push_str("i16"),
ty::ty_int(ast::TyI32) => output.push_str("i32"),
ty::ty_int(ast::TyI64) => output.push_str("i64"),
ty::ty_uint(ast::TyUs(_)) => output.push_str("usize"),
ty::ty_uint(ast::TyU8) => output.push_str("u8"),
ty::ty_uint(ast::TyU16) => output.push_str("u16"),
ty::ty_uint(ast::TyU32) => output.push_str("u32"),
ty::ty_uint(ast::TyU64) => output.push_str("u64"),
ty::ty_float(ast::TyF32) => output.push_str("f32"),
ty::ty_float(ast::TyF64) => output.push_str("f64"),
ty::ty_struct(def_id, substs) |
ty::ty_enum(def_id, substs) => {
push_item_name(cx, def_id, qualified, output);
push_type_params(cx, substs, output);
},
ty::ty_tup(ref component_types) => {
output.push('(');
for &component_type in component_types {
push_debuginfo_type_name(cx, component_type, true, output);
output.push_str(", ");
}
if !component_types.is_empty() {
output.pop();
output.pop();
}
output.push(')');
},
ty::ty_uniq(inner_type) => {
output.push_str("Box<");
push_debuginfo_type_name(cx, inner_type, true, output);
output.push('>');
},
ty::ty_ptr(ty::mt { ty: inner_type, mutbl } ) => {
output.push('*');
match mutbl {
ast::MutImmutable => output.push_str("const "),
ast::MutMutable => output.push_str("mut "),
}
push_debuginfo_type_name(cx, inner_type, true, output);
},
ty::ty_rptr(_, ty::mt { ty: inner_type, mutbl }) => {
output.push('&');
if mutbl == ast::MutMutable {
output.push_str("mut ");
}
push_debuginfo_type_name(cx, inner_type, true, output);
},
ty::ty_vec(inner_type, optional_length) => {
output.push('[');
push_debuginfo_type_name(cx, inner_type, true, output);
match optional_length {
Some(len) => {
output.push_str(&format!("; {}", len));
}
None => { /* nothing to do */ }
};
output.push(']');
},
ty::ty_trait(ref trait_data) => {
let principal = ty::erase_late_bound_regions(cx.tcx(), &trait_data.principal);
push_item_name(cx, principal.def_id, false, output);
push_type_params(cx, principal.substs, output);
},
ty::ty_bare_fn(_, &ty::BareFnTy{ unsafety, abi, ref sig } ) => {
if unsafety == ast::Unsafety::Unsafe {
output.push_str("unsafe ");
}
if abi != ::syntax::abi::Rust {
output.push_str("extern \"");
output.push_str(abi.name());
output.push_str("\" ");
}
output.push_str("fn(");
let sig = ty::erase_late_bound_regions(cx.tcx(), sig);
if sig.inputs.len() > 0 {
for ¶meter_type in &sig.inputs {
push_debuginfo_type_name(cx, parameter_type, true, output);
output.push_str(", ");
}
output.pop();
output.pop();
}
if sig.variadic {
if sig.inputs.len() > 0 {
output.push_str(", ...");
} else {
output.push_str("...");
}
}
output.push(')');
match sig.output {
ty::FnConverging(result_type) if ty::type_is_nil(result_type) => {}
ty::FnConverging(result_type) => {
output.push_str(" -> ");
push_debuginfo_type_name(cx, result_type, true, output);
}
ty::FnDiverging => {
output.push_str(" -> !");
}
}
},
ty::ty_closure(..) => {
output.push_str("closure");
}
ty::ty_err |
ty::ty_infer(_) |
ty::ty_open(_) |
ty::ty_projection(..) |
ty::ty_param(_) => {
cx.sess().bug(&format!("debuginfo: Trying to create type name for \
unexpected type: {}", ppaux::ty_to_string(cx.tcx(), t))[]);
}
}
fn push_item_name(cx: &CrateContext,
def_id: ast::DefId,
qualified: bool,
output: &mut String) {
ty::with_path(cx.tcx(), def_id, |path| {
if qualified {
if def_id.krate == ast::LOCAL_CRATE {
output.push_str(crate_root_namespace(cx));
output.push_str("::");
}
let mut path_element_count = 0;
for path_element in path {
let name = token::get_name(path_element.name());
output.push_str(&name);
output.push_str("::");
path_element_count += 1;
}
if path_element_count == 0 {
cx.sess().bug("debuginfo: Encountered empty item path!");
}
output.pop();
output.pop();
} else {
let name = token::get_name(path.last()
.expect("debuginfo: Empty item path?")
.name());
output.push_str(&name);
}
});
}
// Pushes the type parameters in the given `Substs` to the output string.
// This ignores region parameters, since they can't reliably be
// reconstructed for items from non-local crates. For local crates, this
// would be possible but with inlining and LTO we have to use the least
// common denominator - otherwise we would run into conflicts.
fn push_type_params<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
substs: &subst::Substs<'tcx>,
output: &mut String) {
if substs.types.is_empty() {
return;
}
output.push('<');
for &type_parameter in substs.types.iter() {
push_debuginfo_type_name(cx, type_parameter, true, output);
output.push_str(", ");
}
output.pop();
output.pop();
output.push('>');
}
}
//=-----------------------------------------------------------------------------
// Namespace Handling
//=-----------------------------------------------------------------------------
struct NamespaceTreeNode {
name: ast::Name,
scope: DIScope,
parent: Option<Weak<NamespaceTreeNode>>,
}
impl NamespaceTreeNode {
fn mangled_name_of_contained_item(&self, item_name: &str) -> String {
fn fill_nested(node: &NamespaceTreeNode, output: &mut String) {
match node.parent {
Some(ref parent) => fill_nested(&*parent.upgrade().unwrap(), output),
None => {}
}
let string = token::get_name(node.name);
output.push_str(&format!("{}", string.len())[]);
output.push_str(&string);
}
let mut name = String::from_str("_ZN");
fill_nested(self, &mut name);
name.push_str(&format!("{}", item_name.len())[]);
name.push_str(item_name);
name.push('E');
name
}
}
fn crate_root_namespace<'a>(cx: &'a CrateContext) -> &'a str {
&cx.link_meta().crate_name[]
}
fn namespace_for_item(cx: &CrateContext, def_id: ast::DefId) -> Rc<NamespaceTreeNode> {
ty::with_path(cx.tcx(), def_id, |path| {
// prepend crate name if not already present
let krate = if def_id.krate == ast::LOCAL_CRATE {
let crate_namespace_ident = token::str_to_ident(crate_root_namespace(cx));
Some(ast_map::PathMod(crate_namespace_ident.name))
} else {
None
};
let mut path = krate.into_iter().chain(path).peekable();
let mut current_key = Vec::new();
let mut parent_node: Option<Rc<NamespaceTreeNode>> = None;
// Create/Lookup namespace for each element of the path.
loop {
// Emulate a for loop so we can use peek below.
let path_element = match path.next() {
Some(e) => e,
None => break
};
// Ignore the name of the item (the last path element).
if path.peek().is_none() {
break;
}
let name = path_element.name();
current_key.push(name);
let existing_node = debug_context(cx).namespace_map.borrow()
.get(¤t_key).cloned();
let current_node = match existing_node {
Some(existing_node) => existing_node,
None => {
// create and insert
let parent_scope = match parent_node {
Some(ref node) => node.scope,
None => ptr::null_mut()
};
let namespace_name = token::get_name(name);
let namespace_name = CString::from_slice(namespace_name
.as_bytes());
let scope = unsafe {
llvm::LLVMDIBuilderCreateNameSpace(
DIB(cx),
parent_scope,
namespace_name.as_ptr(),
// cannot reconstruct file ...
ptr::null_mut(),
// ... or line information, but that's not so important.
0)
};
let node = Rc::new(NamespaceTreeNode {
name: name,
scope: scope,
parent: parent_node.map(|parent| parent.downgrade()),
});
debug_context(cx).namespace_map.borrow_mut()
.insert(current_key.clone(), node.clone());
node
}
};
parent_node = Some(current_node);
}
match parent_node {
Some(node) => node,
None => {
cx.sess().bug(&format!("debuginfo::namespace_for_item(): \
path too short for {:?}",
def_id)[]);
}
}
})
}
//=-----------------------------------------------------------------------------
// .debug_gdb_scripts binary section
//=-----------------------------------------------------------------------------
/// Inserts a side-effect free instruction sequence that makes sure that the
/// .debug_gdb_scripts global is referenced, so it isn't removed by the linker.
pub fn insert_reference_to_gdb_debug_scripts_section_global(ccx: &CrateContext) {
if needs_gdb_debug_scripts_section(ccx) {
let empty = CString::from_slice(b"");
let gdb_debug_scripts_section_global =
get_or_insert_gdb_debug_scripts_section_global(ccx);
unsafe {
let volative_load_instruction =
llvm::LLVMBuildLoad(ccx.raw_builder(),
gdb_debug_scripts_section_global,
empty.as_ptr());
llvm::LLVMSetVolatile(volative_load_instruction, llvm::True);
}
}
}
/// Allocates the global variable responsible for the .debug_gdb_scripts binary
/// section.
fn get_or_insert_gdb_debug_scripts_section_global(ccx: &CrateContext)
-> llvm::ValueRef {
let section_var_name = b"__rustc_debug_gdb_scripts_section__\0";
let section_var = unsafe {
llvm::LLVMGetNamedGlobal(ccx.llmod(),
section_var_name.as_ptr() as *const _)
};
if section_var == ptr::null_mut() {
let section_name = b".debug_gdb_scripts\0";
let section_contents = b"\x01gdb_load_rust_pretty_printers.py\0";
unsafe {
let llvm_type = Type::array(&Type::i8(ccx),
section_contents.len() as u64);
let section_var = llvm::LLVMAddGlobal(ccx.llmod(),
llvm_type.to_ref(),
section_var_name.as_ptr()
as *const _);
llvm::LLVMSetSection(section_var, section_name.as_ptr() as *const _);
llvm::LLVMSetInitializer(section_var, C_bytes(ccx, section_contents));
llvm::LLVMSetGlobalConstant(section_var, llvm::True);
llvm::LLVMSetUnnamedAddr(section_var, llvm::True);
llvm::SetLinkage(section_var, llvm::Linkage::LinkOnceODRLinkage);
// This should make sure that the whole section is not larger than
// the string it contains. Otherwise we get a warning from GDB.
llvm::LLVMSetAlignment(section_var, 1);
section_var
}
} else {
section_var
}
}
fn needs_gdb_debug_scripts_section(ccx: &CrateContext) -> bool {
let omit_gdb_pretty_printer_section =
attr::contains_name(&ccx.tcx()
.map
.krate()
.attrs,
"omit_gdb_pretty_printer_section");
!omit_gdb_pretty_printer_section &&
!ccx.sess().target.target.options.is_like_osx &&
!ccx.sess().target.target.options.is_like_windows &&
ccx.sess().opts.debuginfo != NoDebugInfo
}<|fim▁end|> | |
<|file_name|>renderer.js<|end_file_name|><|fim▁begin|>const fs = require('fs');
const electron = require('electron');
const cl = require('node-opencl');
const RUN_GAMELOOP_SYNC = true;
const GAMETICK_CL = true;
const INIT_OPENGL = false;
let canvas; // canvas dom element
let gl; // opengl context
let clCtx; // opencl context
let glProgram; // opengl shader program
let clProgram; // opencl program
let clBuildDevice;
let clQueue;
// let clKernelMove;
let clKernelGol;
let clBufferAlive;
let clBufferImageData;
let
inputIndex = 0,
outputIndex = 1;
let locPosition; // location of position variable in frag shader
let locTexCoord; // location of texture coords variable in frag shader
let locSampler; // location of sampler in frag shader
let vertexCoordBuffer; // buffer for vertext coordinates
let texCoordBuffer; // buffer for texture coordinate
let texture; // texture
let imageData; // uint8array for texture data
let textureWidth = 1600;
let textureHeight = 900;
let gridWidth = 1600;
let gridHeight = 900;
const bytesPerPixel = 4; // bytes per pixel in imageData: R,G,B,A
const pixelTotal = textureWidth * textureHeight;
const bytesTotal = pixelTotal * bytesPerPixel;
const cellsTotal = gridWidth * gridHeight;
let cellNeighbors;
const cellAlive = [];
const frameTimes = [];
let frameTimesIndex = 0;
let lastRenderTime;
let fps = 0;
let fpsDisplay;
const FRAMETIMES_TO_KEEP = 10;
function init() {
(async () => {
initDom();
initDrawData();
if (INIT_OPENGL) {
initOpenGL();
}
initData();
await initOpenCL();
initGame();
initEvents();
startGameLoop();
render();
})();
}
function initDom() {
canvas = document.getElementById('glscreen');
fpsDisplay = document.getElementById('fps');
}
function initDrawData() {
imageData = new Uint8Array(bytesTotal);
}
function initOpenGL() {
gl = canvas.getContext('experimental-webgl');
canvas.width = window.innerWidth;
canvas.height = window.innerHeight;
gl.viewport(0, 0, gl.drawingBufferWidth, gl.drawingBufferHeight);
// init vertex buffer
vertexCoordBuffer = gl.createBuffer();
gl.bindBuffer(gl.ARRAY_BUFFER, vertexCoordBuffer);
gl.bufferData(
gl.ARRAY_BUFFER,
new Float32Array([
-1.0, -1.0,
1.0, -1.0,
-1.0, 1.0,
-1.0, 1.0,
1.0, -1.0,
1.0, 1.0]),
gl.STATIC_DRAW
);
// ------ SHADER SETUP
const vertexShader = gl.createShader(gl.VERTEX_SHADER);
gl.shaderSource(vertexShader, fs.readFileSync(__dirname + '/shader-vertex.glsl', 'utf-8'));
gl.compileShader(vertexShader);
console.log('vertexShaderLog', gl.getShaderInfoLog(vertexShader));
const fragmentShader = gl.createShader(gl.FRAGMENT_SHADER);
gl.shaderSource(fragmentShader, fs.readFileSync(__dirname + '/shader-fragment.glsl', 'utf-8'));
gl.compileShader(fragmentShader);
console.log('fragmentShaderLog', gl.getShaderInfoLog(fragmentShader));
glProgram = gl.createProgram();
gl.attachShader(glProgram, vertexShader);
gl.attachShader(glProgram, fragmentShader);
gl.linkProgram(glProgram);
console.log('glProgramLog', gl.getProgramInfoLog(glProgram));
gl.useProgram(glProgram);
// ---
locPosition = gl.getAttribLocation(glProgram, 'a_position');
gl.enableVertexAttribArray(locPosition);
// provide texture coordinates for the rectangle.
locTexCoord = gl.getAttribLocation(glProgram, 'a_texCoord');
gl.enableVertexAttribArray(locTexCoord);
// ------ TEXTURE SETUP
texCoordBuffer = gl.createBuffer();
gl.bindBuffer(gl.ARRAY_BUFFER, texCoordBuffer);
gl.bufferData(gl.ARRAY_BUFFER, new Float32Array([
0.0, 0.0,
1.0, 0.0,
0.0, 1.0,
0.0, 1.0,
1.0, 0.0,
1.0, 1.0]), gl.STATIC_DRAW);
// init texture to be all solid
for (let i = 0; i < pixelTotal; i++) {
const offset = i * bytesPerPixel;
imageData[offset + 3] = 255;
}
texture = gl.createTexture();
locSampler = gl.getUniformLocation(glProgram, 'u_sampler');
}
function initData() {
cellNeighbors = new Uint32Array(cellsTotal * 8);
cellAlive[0] = new Uint8Array(cellsTotal);
cellAlive[1] = new Uint8Array(cellsTotal);
// GOL: Cells
let index = 0, indexNeighbors = 0;
const maxX = gridWidth - 1;
const maxY = gridHeight - 1;
for (let y = 0; y < gridHeight; y++) {
const
prevRow = (y - 1) * gridWidth,
thisRow = prevRow + gridWidth,
nextRow = thisRow + gridWidth;
for (let x = 0; x < gridWidth; x++) {
cellNeighbors[indexNeighbors++] = (prevRow + x - 1 + cellsTotal) % cellsTotal;
cellNeighbors[indexNeighbors++] = (prevRow + x + cellsTotal) % cellsTotal;
cellNeighbors[indexNeighbors++] = (prevRow + x + 1 + cellsTotal) % cellsTotal;
cellNeighbors[indexNeighbors++] = (thisRow + x - 1 + cellsTotal) % cellsTotal;
cellNeighbors[indexNeighbors++] = (thisRow + x + 1) % cellsTotal;
cellNeighbors[indexNeighbors++] = (nextRow + x - 1) % cellsTotal;
cellNeighbors[indexNeighbors++] = (nextRow + x) % cellsTotal;
cellNeighbors[indexNeighbors++] = (nextRow + x + 1) % cellsTotal;
cellAlive[0][index++] = (Math.random() > 0.85) + 0;
}
}
}
async function initOpenCL() {
// --- Init opencl
// Best case we'd init a shared opengl/opencl context here, but node-opencl doesn't currently support that
const platforms = cl.getPlatformIDs();
for(let i = 0; i < platforms.length; i++)
console.info(`Platform ${i}: ${cl.getPlatformInfo(platforms[i], cl.PLATFORM_NAME)}`);
const platform = platforms[0];
const devices = cl.getDeviceIDs(platform, cl.DEVICE_TYPE_ALL);
for(let i = 0; i < devices.length; i++)
console.info(` Devices ${i}: ${cl.getDeviceInfo(devices[i], cl.DEVICE_NAME)}`);
console.info('creating context');
clCtx = cl.createContext([cl.CONTEXT_PLATFORM, platform], devices);
// prepare opencl program
// const clProgramSource = fs.readFileSync(__dirname + '/program.opencl', 'utf-8');
// GOL
const clProgramSource = fs.readFileSync(__dirname + '/gol.opencl', 'utf-8');
clProgram = cl.createProgramWithSource(clCtx, clProgramSource);
cl.buildProgram(clProgram);
// create kernels
// build kernel for first device
clBuildDevice = cl.getContextInfo(clCtx, cl.CONTEXT_DEVICES)[0];
console.info('Using device: ' + cl.getDeviceInfo(clBuildDevice, cl.DEVICE_NAME));
try {
// clKernelMove = cl.createKernel(clProgram, 'kmove');
clKernelGol = cl.createKernel(clProgram, 'kgol');
} catch(err) {
console.error(cl.getProgramBuildInfo(clProgram, clBuildDevice, cl.PROGRAM_BUILD_LOG));
process.exit(-1);
}
// create buffers
const clBufferNeighbors = cl.createBuffer(clCtx, cl.MEM_READ_ONLY, cellNeighbors.byteLength);
clBufferAlive = [
cl.createBuffer(clCtx, cl.MEM_READ_WRITE, cellAlive[inputIndex].byteLength),
cl.createBuffer(clCtx, cl.MEM_READ_WRITE, cellAlive[outputIndex].byteLength)
];
clBufferImageData = cl.createBuffer(clCtx, cl.MEM_WRITE_ONLY, imageData.byteLength);
// will be set when needed so we can swap em
// cl.setKernelArg(clKernelGol, 0, 'uchar*', clBufferAlive[0]);
// cl.setKernelArg(clKernelGol, 1, 'uchar*', clBufferAlive[1]);
cl.setKernelArg(clKernelGol, 2, 'uint*', clBufferNeighbors);
cl.setKernelArg(clKernelGol, 3, 'uchar*', clBufferImageData);
// create queue
if (cl.createCommandQueueWithProperties !== undefined) {
clQueue = cl.createCommandQueueWithProperties(clCtx, clBuildDevice, []); // OpenCL 2
} else {
clQueue = cl.createCommandQueue(clCtx, clBuildDevice, null); // OpenCL 1.x
}
process.stdout.write('enqueue writes\n');
cl.enqueueWriteBuffer(clQueue, clBufferAlive[0], true, 0, cellAlive[inputIndex].byteLength, cellAlive[inputIndex], null);
cl.enqueueWriteBuffer(clQueue, clBufferAlive[1], true, 0, cellAlive[outputIndex].byteLength, cellAlive[outputIndex], null);
process.stdout.write('writes done\n');
}
function initGame() {
}
function initEvents() {
window.addEventListener('resize', () => {
canvas.width = window.innerWidth;
canvas.height = window.innerHeight;
});
}
// ----------- GAME LOOP
let lastLoopTime;
const timePerTick = 50; // ms
let timeSinceLastLoop = 0;
let tickCounter = 0;
function startGameLoop() {
lastLoopTime = Date.now();
if (!RUN_GAMELOOP_SYNC) {
gameLoop();
}
}
function gameLoop() {
const now = Date.now();
timeSinceLastLoop += now - lastLoopTime;
lastLoopTime = now;
while(timeSinceLastLoop > timePerTick) {
if (GAMETICK_CL) {
gameTickCl();
} else {
gameTick();
}
timeSinceLastLoop -= timePerTick;
}
if (!RUN_GAMELOOP_SYNC) {
setTimeout(gameLoop, timePerTick - timeSinceLastLoop);
}
}
function gameTickCl() {
process.stdout.write('gametick cl\n');
cl.setKernelArg(clKernelGol, 0, 'uchar*', clBufferAlive[inputIndex]);
cl.setKernelArg(clKernelGol, 1, 'uchar*', clBufferAlive[outputIndex]);
process.stdout.write('gametick cl 1\n');
cl.enqueueNDRangeKernel(clQueue, clKernelGol, 1, null, [cellsTotal], null);<|fim▁hole|> process.stdout.write('gametick cl done\n');
inputIndex = !inputIndex + 0;
outputIndex = !inputIndex + 0;
}
function gameTick() {
tickCounter++;
const input = cellAlive[inputIndex];
const output = cellAlive[outputIndex];
for (let i = 0, n = 0; i < cellsTotal; i++) {
const sum =
input[cellNeighbors[n++]]
+ input[cellNeighbors[n++]]
+ input[cellNeighbors[n++]]
+ input[cellNeighbors[n++]]
+ input[cellNeighbors[n++]]
+ input[cellNeighbors[n++]]
+ input[cellNeighbors[n++]]
+ input[cellNeighbors[n++]];
// sum < 2 -> !(sum & 4294967294)
// sum > 3 -> (sum & 12) 4 bit set OR 8 bit set
const newAlive = (sum === 3 || (sum === 2 && input[i])) + 0
output[i] = newAlive;
imageData[i * 4] = newAlive * 255;
}
// set computed value to red
inputIndex = !inputIndex + 0;
outputIndex = !inputIndex + 0;
}
// ----------- RENDER
function renderOpenGL() {
gl.clearColor(1.0, 0.0, 0.0, 1.0);
gl.clear(gl.COLOR_BUFFER_BIT);
gl.vertexAttribPointer(locPosition, 2, gl.FLOAT, false, 0, 0);
gl.bindBuffer(gl.ARRAY_BUFFER, texCoordBuffer);
gl.vertexAttribPointer(locTexCoord, 2, gl.FLOAT, false, 0, 0);
// texture
gl.bindTexture(gl.TEXTURE_2D, texture);
gl.texImage2D(gl.TEXTURE_2D, 0, gl.RGBA, textureWidth, textureHeight, 0, gl.RGBA, gl.UNSIGNED_BYTE, imageData);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_S, gl.CLAMP_TO_EDGE);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_T, gl.CLAMP_TO_EDGE);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, gl.NEAREST);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MAG_FILTER, gl.NEAREST);
gl.bindTexture(gl.TEXTURE_2D, null);
gl.activeTexture(gl.TEXTURE0);
gl.bindTexture(gl.TEXTURE_2D, texture);
gl.uniform1i(locSampler, 0);
// draw
gl.bindBuffer(gl.ARRAY_BUFFER, vertexCoordBuffer);
gl.drawArrays(gl.TRIANGLES, 0, 6);
// console.log(electron.screen.getCursorScreenPoint());
}
function render() {
window.requestAnimationFrame(render);
if (RUN_GAMELOOP_SYNC) {
gameLoop();
}
const now = Date.now();
if (lastRenderTime) {
frameTimes[frameTimesIndex] = now - lastRenderTime;
frameTimesIndex = (frameTimesIndex + 1) % FRAMETIMES_TO_KEEP;
if (frameTimes.length >= FRAMETIMES_TO_KEEP) {
fps = 1000 * frameTimes.length / frameTimes.reduce((pv, cv) => pv + cv);
// do not update every frame
if ((frameTimesIndex % 5) === 0) {
fpsDisplay.innerHTML = fps.toFixed(2);
}
}
}
lastRenderTime = now;
if (INIT_OPENGL) {
renderOpenGL();
}
}
init();<|fim▁end|> | process.stdout.write('gametick cl 2\n');
cl.enqueueReadBuffer(clQueue, clBufferImageData, true, 0, imageData.byteLength, imageData); |
<|file_name|>server.py<|end_file_name|><|fim▁begin|>import sys
import json
import flask
from flask import Flask
from flask.ext.sqlalchemy import SQLAlchemy
from flask.ext.cors import origin
from parse import ProfileParser, IHMEParser
from flask import render_template
from flask import Response
from werkzeug.contrib.fixers import ProxyFix
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///test.db'
db = SQLAlchemy(app)
class Value(db.Model):
id = db.Column(db.Integer, primary_key=True)
country = db.Column(db.String(120))
code = db.Column(db.String(20))
value = db.Column(db.String(120))
def __init__(self, country, code, value):
self.country = country
self.code = code
self.value = value
def __repr__(self):
return '<Code %s Value %r>' % (self.code, self.value)
class DALY(db.Model):
id = db.Column(db.Integer, primary_key=True)
country = db.Column(db.String(120))
code = db.Column(db.String(20))
rank = db.Column(db.Integer)
cause = db.Column(db.String(30))
perc = db.Column(db.Float)
color = db.Column(db.String(10))
def __init__(self, country, code, rank, cause, perc, color):
self.country = country
self.code = code
self.rank = rank
self.cause = cause
self.perc = perc
self.color = color
def __repr__(self):
return '<Code %s>' % (self.code)
def populate_database(fn_profile, fn_daly):
parser = ProfileParser(fn_profile)
for country, data in parser.parse_profiles():
for code, value in data.items():
v = Value(country, code, value)
db.session.add(v)
db.session.commit()
parser = IHMEParser(fn_daly)
for data in parser.parse_data():
d = DALY(data["country"], data["code"], data["rank"], data["cause"], data["perc"], data["color"])
db.session.add(d)
db.session.commit()
@app.route("/api/profiles", methods=["GET", "POST"])
@origin('*')
def api_profiles():
country = flask.request.values["country"]
values = dict([
(v.code, v.value) for v in Value.query.filter_by(country=country) if v.code<|fim▁hole|> response=json.dumps(values, indent=4), status=200, mimetype="application/json"
)
@app.route("/api/daly", methods=["GET", "POST"])
@origin('*')
def api_daly():
country = flask.request.values["country"]
values = [
{
"rank" : v.rank, "text" : v.cause,
"value" : v.perc, "color" : v.color
} for v in DALY.query.filter_by(country=country).order_by('rank') if v.code and v.rank <= 10
]
return Response(
response=json.dumps(values, indent=4), status=200, mimetype="application/json"
)
app.wsgi_app = ProxyFix(app.wsgi_app)
if __name__ == "__main__":
if len(sys.argv) == 4 and sys.argv[1] == "populate":
populate_database(sys.argv[2], sys.argv[3])
else:
app.run(debug=True)<|fim▁end|> | ])
return Response( |
<|file_name|>NotifyD7Networks.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
#
# Copyright (C) 2019 Chris Caron <[email protected]>
# All rights reserved.
#
# This code is licensed under the MIT License.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files(the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and / or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions :
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# To use this service you will need a D7 Networks account from their website
# at https://d7networks.com/
#
# After you've established your account you can get your api login credentials
# (both user and password) from the API Details section from within your
# account profile area: https://d7networks.com/accounts/profile/
import re
import six
import requests
import base64
from json import dumps
from json import loads
from .NotifyBase import NotifyBase
from ..URLBase import PrivacyMode
from ..common import NotifyType
from ..utils import parse_list
from ..utils import parse_bool
from ..AppriseLocale import gettext_lazy as _
# Extend HTTP Error Messages
D7NETWORKS_HTTP_ERROR_MAP = {
401: 'Invalid Argument(s) Specified.',
403: 'Unauthorized - Authentication Failure.',
412: 'A Routing Error Occured',
500: 'A Serverside Error Occured Handling the Request.',
}
# Some Phone Number Detection
IS_PHONE_NO = re.compile(r'^\+?(?P<phone>[0-9\s)(+-]+)\s*$')
# Priorities
class D7SMSPriority(object):
"""
D7 Networks SMS Message Priority
"""
LOW = 0
MODERATE = 1
NORMAL = 2
HIGH = 3
D7NETWORK_SMS_PRIORITIES = (
D7SMSPriority.LOW,
D7SMSPriority.MODERATE,
D7SMSPriority.NORMAL,
D7SMSPriority.HIGH,
)
class NotifyD7Networks(NotifyBase):
"""
A wrapper for D7 Networks Notifications
"""
# The default descriptive name associated with the Notification
service_name = 'D7 Networks'
# The services URL
service_url = 'https://d7networks.com/'
# All notification requests are secure
secure_protocol = 'd7sms'
# Allow 300 requests per minute.
# 60/300 = 0.2
request_rate_per_sec = 0.20
# A URL that takes you to the setup/help of the specific protocol
setup_url = 'https://github.com/caronc/apprise/wiki/Notify_d7networks'
# D7 Networks batch notification URL
notify_batch_url = 'http://rest-api.d7networks.com/secure/sendbatch'
# D7 Networks single notification URL
notify_url = 'http://rest-api.d7networks.com/secure/send'
# The maximum length of the body
body_maxlen = 160
# A title can not be used for SMS Messages. Setting this to zero will
# cause any title (if defined) to get placed into the message body.
title_maxlen = 0
# Define object templates
templates = (
'{schema}://{user}:{password}@{targets}',
)
# Define our template tokens
template_tokens = dict(NotifyBase.template_tokens, **{
'user': {
'name': _('Username'),
'type': 'string',
'required': True,
},
'password': {
'name': _('Password'),
'type': 'string',
'private': True,
'required': True,
},
'target_phone': {
'name': _('Target Phone No'),
'type': 'string',
'prefix': '+',
'regex': (r'^[0-9\s)(+-]+$', 'i'),
'map_to': 'targets',
},
'targets': {
'name': _('Targets'),
'type': 'list:string',
},
})
# Define our template arguments
template_args = dict(NotifyBase.template_args, **{
'priority': {
'name': _('Priority'),
'type': 'choice:int',
'min': D7SMSPriority.LOW,
'max': D7SMSPriority.HIGH,
'values': D7NETWORK_SMS_PRIORITIES,
# The website identifies that the default priority is low; so
# this plugin will honor that same default
'default': D7SMSPriority.LOW,
},
'batch': {
'name': _('Batch Mode'),
'type': 'bool',
'default': False,
},
'to': {
'alias_of': 'targets',
},
'source': {
# Originating address,In cases where the rewriting of the sender's
# address is supported or permitted by the SMS-C. This is used to
# transmit the message, this number is transmitted as the
# originating address and is completely optional.
'name': _('Originating Address'),
'type': 'string',
'map_to': 'source',
},
'from': {
'alias_of': 'source',
},
})
def __init__(self, targets=None, priority=None, source=None, batch=False,
**kwargs):
"""
Initialize D7 Networks Object
"""
super(NotifyD7Networks, self).__init__(**kwargs)
# The Priority of the message
if priority not in D7NETWORK_SMS_PRIORITIES:
self.priority = self.template_args['priority']['default']
else:
self.priority = priority
# Prepare Batch Mode Flag
self.batch = batch
# Setup our source address (if defined)
self.source = None \
if not isinstance(source, six.string_types) else source.strip()
# Parse our targets
self.targets = list()
for target in parse_list(targets):
# Validate targets and drop bad ones:
result = IS_PHONE_NO.match(target)
if result:
# Further check our phone # for it's digit count
# if it's less than 10, then we can assume it's
# a poorly specified phone no and spit a warning
result = ''.join(re.findall(r'\d+', result.group('phone')))
if len(result) < 11 or len(result) > 14:
self.logger.warning(
'Dropped invalid phone # '
'({}) specified.'.format(target),
)
continue
# store valid phone number
self.targets.append(result)
continue
self.logger.warning(
'Dropped invalid phone # ({}) specified.'.format(target))
if len(self.targets) == 0:
msg = 'There are no valid targets identified to notify.'
self.logger.warning(msg)
raise TypeError(msg)
return
def send(self, body, title='', notify_type=NotifyType.INFO, **kwargs):
"""
Depending on whether we are set to batch mode or single mode this
redirects to the appropriate handling
"""
# error tracking (used for function return)
has_error = False
auth = '{user}:{password}'.format(
user=self.user, password=self.password)
if six.PY3:
# Python 3's versio of b64encode() expects a byte array and not
# a string. To accomodate this, we encode the content here
auth = auth.encode('utf-8')
# Prepare our headers
headers = {
'User-Agent': self.app_id,
'Accept': 'application/json',<|fim▁hole|> # Our URL varies depending if we're doing a batch mode or not
url = self.notify_batch_url if self.batch else self.notify_url
# use the list directly
targets = list(self.targets)
while len(targets):
if self.batch:
# Prepare our payload
payload = {
'globals': {
'priority': self.priority,
'from': self.source if self.source else self.app_id,
},
'messages': [{
'to': self.targets,
'content': body,
}],
}
# Reset our targets so we don't keep going. This is required
# because we're in batch mode; we only need to loop once.
targets = []
else:
# We're not in a batch mode; so get our next target
# Get our target(s) to notify
target = targets.pop(0)
# Prepare our payload
payload = {
'priority': self.priority,
'content': body,
'to': target,
'from': self.source if self.source else self.app_id,
}
# Some Debug Logging
self.logger.debug(
'D7 Networks POST URL: {} (cert_verify={})'.format(
url, self.verify_certificate))
self.logger.debug('D7 Networks Payload: {}' .format(payload))
# Always call throttle before any remote server i/o is made
self.throttle()
try:
r = requests.post(
url,
data=dumps(payload),
headers=headers,
verify=self.verify_certificate,
)
if r.status_code not in (
requests.codes.created, requests.codes.ok):
# We had a problem
status_str = \
NotifyBase.http_response_code_lookup(
r.status_code, D7NETWORKS_HTTP_ERROR_MAP)
try:
# Update our status response if we can
json_response = loads(r.content)
status_str = json_response.get('message', status_str)
except (AttributeError, TypeError, ValueError):
# ValueError = r.content is Unparsable
# TypeError = r.content is None
# AttributeError = r is None
# We could not parse JSON response.
# We will just use the status we already have.
pass
self.logger.warning(
'Failed to send D7 Networks SMS notification to {}: '
'{}{}error={}.'.format(
', '.join(target) if self.batch else target,
status_str,
', ' if status_str else '',
r.status_code))
self.logger.debug(
'Response Details:\r\n{}'.format(r.content))
# Mark our failure
has_error = True
continue
else:
if self.batch:
count = len(self.targets)
try:
# Get our message delivery count if we can
json_response = loads(r.content)
count = int(json_response.get(
'data', {}).get('messageCount', -1))
except (AttributeError, TypeError, ValueError):
# ValueError = r.content is Unparsable
# TypeError = r.content is None
# AttributeError = r is None
# We could not parse JSON response. Assume that
# our delivery is okay for now.
pass
if count != len(self.targets):
has_error = True
self.logger.info(
'Sent D7 Networks batch SMS notification to '
'{} of {} target(s).'.format(
count, len(self.targets)))
else:
self.logger.info(
'Sent D7 Networks SMS notification to {}.'.format(
target))
self.logger.debug(
'Response Details:\r\n{}'.format(r.content))
except requests.RequestException as e:
self.logger.warning(
'A Connection error occured sending D7 Networks:%s ' % (
', '.join(self.targets)) + 'notification.'
)
self.logger.debug('Socket Exception: %s' % str(e))
# Mark our failure
has_error = True
continue
return not has_error
def url(self, privacy=False, *args, **kwargs):
"""
Returns the URL built dynamically based on specified arguments.
"""
# Define any arguments set
args = {
'format': self.notify_format,
'overflow': self.overflow_mode,
'verify': 'yes' if self.verify_certificate else 'no',
'batch': 'yes' if self.batch else 'no',
}
if self.priority != self.template_args['priority']['default']:
args['priority'] = str(self.priority)
if self.source:
args['from'] = self.source
return '{schema}://{user}:{password}@{targets}/?{args}'.format(
schema=self.secure_protocol,
user=NotifyD7Networks.quote(self.user, safe=''),
password=self.pprint(
self.password, privacy, mode=PrivacyMode.Secret, safe=''),
targets='/'.join(
[NotifyD7Networks.quote(x, safe='') for x in self.targets]),
args=NotifyD7Networks.urlencode(args))
@staticmethod
def parse_url(url):
"""
Parses the URL and returns enough arguments that can allow
us to substantiate this object.
"""
results = NotifyBase.parse_url(url, verify_host=False)
if not results:
# We're done early as we couldn't load the results
return results
# Initialize our targets
results['targets'] = list()
# The store our first target stored in the hostname
results['targets'].append(NotifyD7Networks.unquote(results['host']))
# Get our entries; split_path() looks after unquoting content for us
# by default
results['targets'].extend(
NotifyD7Networks.split_path(results['fullpath']))
# Set our priority
if 'priority' in results['qsd'] and len(results['qsd']['priority']):
_map = {
'l': D7SMSPriority.LOW,
'0': D7SMSPriority.LOW,
'm': D7SMSPriority.MODERATE,
'1': D7SMSPriority.MODERATE,
'n': D7SMSPriority.NORMAL,
'2': D7SMSPriority.NORMAL,
'h': D7SMSPriority.HIGH,
'3': D7SMSPriority.HIGH,
}
try:
results['priority'] = \
_map[results['qsd']['priority'][0].lower()]
except KeyError:
# No priority was set
pass
# Support the 'from' and 'source' variable so that we can support
# targets this way too.
# The 'from' makes it easier to use yaml configuration
if 'from' in results['qsd'] and len(results['qsd']['from']):
results['source'] = \
NotifyD7Networks.unquote(results['qsd']['from'])
if 'source' in results['qsd'] and len(results['qsd']['source']):
results['source'] = \
NotifyD7Networks.unquote(results['qsd']['source'])
# Get Batch Mode Flag
results['batch'] = \
parse_bool(results['qsd'].get('batch', False))
# Support the 'to' variable so that we can support targets this way too
# The 'to' makes it easier to use yaml configuration
if 'to' in results['qsd'] and len(results['qsd']['to']):
results['targets'] += \
NotifyD7Networks.parse_list(results['qsd']['to'])
return results<|fim▁end|> | 'Authorization': 'Basic {}'.format(base64.b64encode(auth))
}
|
<|file_name|>rabbit.go<|end_file_name|><|fim▁begin|>package cmd
import (
"fmt"
"time"
"github.com/spf13/cobra"
"github.com/streadway/amqp"
)
// rabbitCmd represents the rabbit command
var rabbitCmd = &cobra.Command{
Use: "rabbit",
Short: "A brief description of your command",
Run: func(cmd *cobra.Command, args []string) {
fmt.Println("Connecting to RabbitMQ ...")
conn, _ := amqp.Dial(amqpURL)
defer conn.Close()
ch, _ := conn.Channel()
defer ch.Close()
q, _ := ch.QueueDeclare(
"DemoQueue", //name
true, //durable
false, //delete when unused
false, //exclusive
false, //no-wait<|fim▁hole|> )
body := "Hello world!"
//Publish to the queue
ch.Publish(
"", //exchange
q.Name, //routing key
false, //mandatory
false, //immediate
amqp.Publishing{
ContentType: "text/plain",
Body: []byte(body),
})
msgs, _ := ch.Consume(
"DemoQueue", //queue
"", //consumer
true, //auto-ack
false, //exclusive
false, //no-local
false, //no-wait
nil, //args
)
msgCount := 0
go func() {
for d := range msgs {
msgCount++
fmt.Printf("\nMessage Count: %d, Message Body: %s\n", msgCount, d.Body)
}
}()
select {
case <-time.After(time.Second * 2):
fmt.Printf("Total Messages Fetched: %d\n", msgCount)
fmt.Println("No more messages in queue. Timing out...")
}
},
}
func init() {
rootCmd.AddCommand(rabbitCmd)
rabbitCmd.Flags().StringVarP(&amqpURL, "amqp", "q", "", "URL of RabbitMQ.")
}<|fim▁end|> | nil, //arguements |
<|file_name|>export.py<|end_file_name|><|fim▁begin|>from flask import Blueprint
from flask import flash
from flask import make_response, render_template
from flask_login import current_user
from markupsafe import Markup
from app.helpers.data_getter import DataGetter
from app.helpers.auth import AuthManager
from app.helpers.exporters.ical import ICalExporter
from app.helpers.exporters.pentabarfxml import PentabarfExporter
from app.helpers.exporters.xcal import XCalExporter
from app.helpers.permission_decorators import can_access
event_export = Blueprint('event_export', __name__, url_prefix='/events/<int:event_id>/export')
@event_export.route('/')
@can_access
def display_export_view(event_id):
event = DataGetter.get_event(event_id)
export_jobs = DataGetter.get_export_jobs(event_id)
user = current_user
if not AuthManager.is_verified_user():
flash(Markup("Your account is unverified. "
"Please verify by clicking on the confirmation link that has been emailed to you."
'<br>Did not get the email? Please <a href="/resend_email/" class="alert-link"> '
'click here to resend the confirmation.</a>'))
return render_template(<|fim▁hole|>
@event_export.route('/pentabarf.xml')
@can_access
def pentabarf_export_view(event_id):
response = make_response(PentabarfExporter.export(event_id))
response.headers["Content-Type"] = "application/xml"
response.headers["Content-Disposition"] = "attachment; filename=pentabarf.xml"
return response
@event_export.route('/calendar.ical')
@can_access
def ical_export_view(event_id):
response = make_response(ICalExporter.export(event_id))
response.headers["Content-Type"] = "text/calendar"
response.headers["Content-Disposition"] = "attachment; filename=calendar.ics"
return response
@event_export.route('/calendar.xcs')
@can_access
def xcal_export_view(event_id):
response = make_response(XCalExporter.export(event_id))
response.headers["Content-Type"] = "text/calendar"
response.headers["Content-Disposition"] = "attachment; filename=calendar.xcs"
return response<|fim▁end|> | 'gentelella/admin/event/export/export.html', event=event, export_jobs=export_jobs,
current_user=user
)
|
<|file_name|>link_functions.py<|end_file_name|><|fim▁begin|># Copyright (c) 2012-2015 The GPy authors (see AUTHORS.txt)
# Licensed under the BSD 3-clause license (see LICENSE.txt)
import numpy as np
import scipy
from ..util.univariate_Gaussian import std_norm_cdf, std_norm_pdf
import scipy as sp
from ..util.misc import safe_exp, safe_square, safe_cube, safe_quad, safe_three_times
class GPTransformation(object):
"""
Link function class for doing non-Gaussian likelihoods approximation
:param Y: observed output (Nx1 numpy.darray)
.. note:: Y values allowed depend on the likelihood_function used
"""
def __init__(self):
pass
def transf(self,f):
"""
Gaussian process tranformation function, latent space -> output space
"""
raise NotImplementedError
def dtransf_df(self,f):
"""
derivative of transf(f) w.r.t. f
"""
raise NotImplementedError
def d2transf_df2(self,f):
"""
second derivative of transf(f) w.r.t. f
"""
raise NotImplementedError
def d3transf_df3(self,f):
"""
third derivative of transf(f) w.r.t. f
"""
raise NotImplementedError
def to_dict(self):
raise NotImplementedError
def _to_dict(self):
return {}
@staticmethod
def from_dict(input_dict):
import copy
input_dict = copy.deepcopy(input_dict)
link_class = input_dict.pop('class')
import GPy
link_class = eval(link_class)
return link_class._from_dict(link_class, input_dict)
@staticmethod
def _from_dict(link_class, input_dict):
return link_class(**input_dict)
class Identity(GPTransformation):
"""
.. math::
g(f) = f
"""
def transf(self,f):
return f
def dtransf_df(self,f):
return np.ones_like(f)
def d2transf_df2(self,f):
return np.zeros_like(f)
def d3transf_df3(self,f):
return np.zeros_like(f)
def to_dict(self):
input_dict = super(Identity, self)._to_dict()
input_dict["class"] = "GPy.likelihoods.link_functions.Identity"
return input_dict
class Probit(GPTransformation):
"""
.. math::
g(f) = \\Phi^{-1} (mu)
"""
def transf(self,f):
return std_norm_cdf(f)
def dtransf_df(self,f):
return std_norm_pdf(f)
def d2transf_df2(self,f):
return -f * std_norm_pdf(f)
def d3transf_df3(self,f):
return (safe_square(f)-1.)*std_norm_pdf(f)
def to_dict(self):
input_dict = super(Probit, self)._to_dict()
input_dict["class"] = "GPy.likelihoods.link_functions.Probit"
return input_dict
class Cloglog(GPTransformation):
"""
Complementary log-log link
.. math::
p(f) = 1 - e^{-e^f}
or
f = \log (-\log(1-p))
"""
def transf(self,f):
ef = safe_exp(f)
return 1-np.exp(-ef)
def dtransf_df(self,f):
ef = safe_exp(f)
return np.exp(f-ef)
def d2transf_df2(self,f):
ef = safe_exp(f)
return -np.exp(f-ef)*(ef-1.)
def d3transf_df3(self,f):
ef = safe_exp(f)
ef2 = safe_square(ef)
three_times_ef = safe_three_times(ef)
r_val = np.exp(f-ef)*(1.-three_times_ef + ef2)
return r_val
class Log(GPTransformation):
"""
.. math::
g(f) = \\log(\\mu)
"""
def transf(self,f):
return safe_exp(f)
def dtransf_df(self,f):
return safe_exp(f)
def d2transf_df2(self,f):
return safe_exp(f)
def d3transf_df3(self,f):
return safe_exp(f)
class Log_ex_1(GPTransformation):
"""
.. math::
<|fim▁hole|> g(f) = \\log(\\exp(\\mu) - 1)
"""
def transf(self,f):
return scipy.special.log1p(safe_exp(f))
def dtransf_df(self,f):
ef = safe_exp(f)
return ef/(1.+ef)
def d2transf_df2(self,f):
ef = safe_exp(f)
aux = ef/(1.+ef)
return aux*(1.-aux)
def d3transf_df3(self,f):
ef = safe_exp(f)
aux = ef/(1.+ef)
daux_df = aux*(1.-aux)
return daux_df - (2.*aux*daux_df)
class Reciprocal(GPTransformation):
def transf(self,f):
return 1./f
def dtransf_df(self, f):
f2 = safe_square(f)
return -1./f2
def d2transf_df2(self, f):
f3 = safe_cube(f)
return 2./f3
def d3transf_df3(self,f):
f4 = safe_quad(f)
return -6./f4
class Heaviside(GPTransformation):
"""
.. math::
g(f) = I_{x \\geq 0}
"""
def transf(self,f):
#transformation goes here
return np.where(f>0, 1, 0)
def dtransf_df(self,f):
raise NotImplementedError("This function is not differentiable!")
def d2transf_df2(self,f):
raise NotImplementedError("This function is not differentiable!")<|fim▁end|> | |
<|file_name|>main.cpp<|end_file_name|><|fim▁begin|>/*
* Copyright (C) Dag Henning Liodden Sørbø <[email protected]>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "drawingview.h"
#include <QApplication>
#include "randomgenerator.h"
#include "drawingcontroller.h"
#include "drawingsetupcontroller.h"
#include "lotwindow.h"
#include "app.h"
#include <QLibraryInfo>
#include <QTranslator>
#include "color.h"
#include "settingshandler.h"
#include "selectlanguagedialog.h"
#include "updatereminder.h"
#include "updateview.h"
#include "i18n.h"
bool setLanguageToSystemLanguage() {
QString langCountry = QLocale().system().name();
QString lang = langCountry.left(2);
if (lang == "nb" || lang == "nn") {
lang = "no";
}
bool langIsSupported = false;
for (int i = 0; i < NUM_LANGUAGES; ++i) {
if (LANGUAGES[i][1] == lang) {
langIsSupported = true;
}
}
if (langIsSupported) {
SettingsHandler::setLanguage(lang);
}
return langIsSupported;
}
int setupLanguage(QApplication& app) {
if (!SettingsHandler::hasLanguage()) {
bool ok = setLanguageToSystemLanguage();
if (!ok) {
SelectLanguageDialog dialog;
if (dialog.exec() == QDialog::Rejected) {
return -1;
}
}
}
QString language = SettingsHandler::language();
if (language != "en") {
QTranslator* translator = new QTranslator();
QString filename = QString(language).append(".qm");
translator->load(filename, ":/app/translations");
app.installTranslator(translator);
}
return 0;
}
int main(int argc, char *argv[])
{
RandomGenerator::init();
qRegisterMetaTypeStreamOperators<Color>("Color");
QApplication app(argc, argv);
app.setApplicationName(APPLICATION_NAME);
app.setApplicationDisplayName(APPLICATION_NAME);
app.setApplicationVersion(APPLICATION_VERSION);
app.setOrganizationName(ORG_NAME);
app.setOrganizationDomain(ORG_DOMAIN);
QIcon icon(":/gui/icons/lots.svg");
app.setWindowIcon(icon);
#ifdef Q_OS_MAC
app.setAttribute(Qt::AA_DontShowIconsInMenus, true);
#endif
SettingsHandler::initialize(ORG_NAME, APPLICATION_NAME);
if (int res = setupLanguage(app) != 0) {
return res;
}
DrawingSetupController setupController;
DrawingSetupDialog setupDialog(&setupController);
DrawingController controller;
DrawingView drawingView(&controller, &setupDialog);
controller.setDrawingView(&drawingView);
UpdateView updateView(&setupDialog);
UpdateReminder reminder([&](UpdateInfo info) {
if (!info.hasError && info.hasUpdate) {
updateView.setUpdateInfo(info);
updateView.show();
}
});
if (!SettingsHandler::autoUpdatesDisabled()) {
reminder.checkForUpdate();
}<|fim▁hole|> return app.exec();
}<|fim▁end|> | |
<|file_name|>AshtaVargaChartData.java<|end_file_name|><|fim▁begin|>/**
* AshtavargaChartData.java
* Created On 2006, Mar 31, 2006 5:12:23 PM
* @author E. Rajasekar
*/
package app.astrosoft.beans;
import java.util.EnumMap;
import app.astrosoft.consts.AshtavargaName;
import app.astrosoft.consts.AstrosoftTableColumn;
import app.astrosoft.consts.Rasi;
import app.astrosoft.core.Ashtavarga;
import app.astrosoft.export.Exportable;
import app.astrosoft.export.Exporter;
import app.astrosoft.ui.table.ColumnMetaData;
import app.astrosoft.ui.table.DefaultColumnMetaData;
import app.astrosoft.ui.table.Table;
import app.astrosoft.ui.table.TableData;
import app.astrosoft.ui.table.TableRowData;
public class AshtaVargaChartData extends AbstractChartData implements Exportable{
private EnumMap<Rasi, Integer> varga;
public AshtaVargaChartData(AshtavargaName name, EnumMap<Rasi, Integer> varga) {
super();
this.varga = varga;
chartName = name.toString();
int count = Ashtavarga.getCount(name);
if ( count != -1){
chartName = chartName + " ( " +String.valueOf(count) + " ) ";
}
}
public Table getChartHouseTable(final Rasi rasi) {
Table ashtavargaTable = new Table(){
public TableData<TableRowData> getTableData() {
return new TableData<TableRowData>(){
public TableRowData getRow(final int index){
return new TableRowData(){
public Object getColumnData(AstrosoftTableColumn col) {
return (index == 1) ? varga.get(rasi) : null;
}
};
}
public int getRowCount() {
return 2;
}
<|fim▁hole|>
}
public ColumnMetaData getColumnMetaData() {
return colMetaData;
}
};
return ashtavargaTable;
}
@Override
public DefaultColumnMetaData getHouseTableColMetaData() {
return new DefaultColumnMetaData(AstrosoftTableColumn.C1){
@Override
public Class getColumnClass(AstrosoftTableColumn col) {
return Integer.class;
}
};
}
public EnumMap<Rasi, Integer> getVarga() {
return varga;
}
public void doExport(Exporter e) {
e.export(this);
}
}<|fim▁end|> | }; |
<|file_name|>hashsplit.py<|end_file_name|><|fim▁begin|>import math, os
from bup import _helpers, helpers
from bup.helpers import sc_page_size
_fmincore = getattr(helpers, 'fmincore', None)
BLOB_MAX = 8192*4 # 8192 is the "typical" blob size for bupsplit
BLOB_READ_SIZE = 1024*1024
MAX_PER_TREE = 256
progress_callback = None
fanout = 16
GIT_MODE_FILE = 0100644
GIT_MODE_TREE = 040000
GIT_MODE_SYMLINK = 0120000
assert(GIT_MODE_TREE != 40000) # 0xxx should be treated as octal
# The purpose of this type of buffer is to avoid copying on peek(), get(),
# and eat(). We do copy the buffer contents on put(), but that should
# be ok if we always only put() large amounts of data at a time.
class Buf:
def __init__(self):
self.data = ''
self.start = 0
def put(self, s):
if s:
self.data = buffer(self.data, self.start) + s
self.start = 0
def peek(self, count):
return buffer(self.data, self.start, count)
def eat(self, count):
self.start += count
def get(self, count):
v = buffer(self.data, self.start, count)
self.start += count
return v
def used(self):
return len(self.data) - self.start
def _fadvise_pages_done(fd, first_page, count):
assert(first_page >= 0)
assert(count >= 0)
if count > 0:
_helpers.fadvise_done(fd,
first_page * sc_page_size,
count * sc_page_size)
def _nonresident_page_regions(status_bytes, max_region_len=None):
"""Return (start_page, count) pairs in ascending start_page order for
each contiguous region of nonresident pages indicated by the
mincore() status_bytes. Limit the number of pages in each region
to max_region_len."""
assert(max_region_len is None or max_region_len > 0)
start = None
for i, x in enumerate(status_bytes):
in_core = x & helpers.MINCORE_INCORE
if start is None:
if not in_core:
start = i
else:
count = i - start
if in_core:
yield (start, count)
start = None
elif max_region_len and count >= max_region_len:
yield (start, count)
start = i
if start is not None:
yield (start, len(status_bytes) - start)
def _uncache_ours_upto(fd, offset, first_region, remaining_regions):
"""Uncache the pages of fd indicated by first_region and
remaining_regions that are before offset, where each region is a
(start_page, count) pair. The final region must have a start_page
of None."""
rstart, rlen = first_region
while rstart is not None and (rstart + rlen) * sc_page_size <= offset:
_fadvise_pages_done(fd, rstart, rlen)
rstart, rlen = next(remaining_regions, (None, None))
return (rstart, rlen)
def readfile_iter(files, progress=None):
for filenum,f in enumerate(files):
ofs = 0
b = ''
fd = rpr = rstart = rlen = None
if _fmincore and hasattr(f, 'fileno'):
fd = f.fileno()
max_chunk = max(1, (8 * 1024 * 1024) / sc_page_size)
rpr = _nonresident_page_regions(_fmincore(fd), max_chunk)
rstart, rlen = next(rpr, (None, None))
while 1:
if progress:
progress(filenum, len(b))
b = f.read(BLOB_READ_SIZE)
ofs += len(b)
if rpr:
rstart, rlen = _uncache_ours_upto(fd, ofs, (rstart, rlen), rpr)
if not b:
break
yield b
if rpr:
rstart, rlen = _uncache_ours_upto(fd, ofs, (rstart, rlen), rpr)
def _splitbuf(buf, basebits, fanbits):
while 1:
b = buf.peek(buf.used())
(ofs, bits) = _helpers.splitbuf(b)<|fim▁hole|> ofs = BLOB_MAX
level = 0
else:
level = (bits-basebits)//fanbits # integer division
buf.eat(ofs)
yield buffer(b, 0, ofs), level
else:
break
while buf.used() >= BLOB_MAX:
# limit max blob size
yield buf.get(BLOB_MAX), 0
def _hashsplit_iter(files, progress):
assert(BLOB_READ_SIZE > BLOB_MAX)
basebits = _helpers.blobbits()
fanbits = int(math.log(fanout or 128, 2))
buf = Buf()
for inblock in readfile_iter(files, progress):
buf.put(inblock)
for buf_and_level in _splitbuf(buf, basebits, fanbits):
yield buf_and_level
if buf.used():
yield buf.get(buf.used()), 0
def _hashsplit_iter_keep_boundaries(files, progress):
for real_filenum,f in enumerate(files):
if progress:
def prog(filenum, nbytes):
# the inner _hashsplit_iter doesn't know the real file count,
# so we'll replace it here.
return progress(real_filenum, nbytes)
else:
prog = None
for buf_and_level in _hashsplit_iter([f], progress=prog):
yield buf_and_level
def hashsplit_iter(files, keep_boundaries, progress):
if keep_boundaries:
return _hashsplit_iter_keep_boundaries(files, progress)
else:
return _hashsplit_iter(files, progress)
total_split = 0
def split_to_blobs(makeblob, files, keep_boundaries, progress):
global total_split
for (blob, level) in hashsplit_iter(files, keep_boundaries, progress):
sha = makeblob(blob)
total_split += len(blob)
if progress_callback:
progress_callback(len(blob))
yield (sha, len(blob), level)
def _make_shalist(l):
ofs = 0
l = list(l)
total = sum(size for mode,sha,size, in l)
vlen = len('%x' % total)
shalist = []
for (mode, sha, size) in l:
shalist.append((mode, '%0*x' % (vlen,ofs), sha))
ofs += size
assert(ofs == total)
return (shalist, total)
def _squish(maketree, stacks, n):
i = 0
while i < n or len(stacks[i]) >= MAX_PER_TREE:
while len(stacks) <= i+1:
stacks.append([])
if len(stacks[i]) == 1:
stacks[i+1] += stacks[i]
elif stacks[i]:
(shalist, size) = _make_shalist(stacks[i])
tree = maketree(shalist)
stacks[i+1].append((GIT_MODE_TREE, tree, size))
stacks[i] = []
i += 1
def split_to_shalist(makeblob, maketree, files,
keep_boundaries, progress=None):
sl = split_to_blobs(makeblob, files, keep_boundaries, progress)
assert(fanout != 0)
if not fanout:
shal = []
for (sha,size,level) in sl:
shal.append((GIT_MODE_FILE, sha, size))
return _make_shalist(shal)[0]
else:
stacks = [[]]
for (sha,size,level) in sl:
stacks[0].append((GIT_MODE_FILE, sha, size))
_squish(maketree, stacks, level)
#log('stacks: %r\n' % [len(i) for i in stacks])
_squish(maketree, stacks, len(stacks)-1)
#log('stacks: %r\n' % [len(i) for i in stacks])
return _make_shalist(stacks[-1])[0]
def split_to_blob_or_tree(makeblob, maketree, files,
keep_boundaries, progress=None):
shalist = list(split_to_shalist(makeblob, maketree,
files, keep_boundaries, progress))
if len(shalist) == 1:
return (shalist[0][0], shalist[0][2])
elif len(shalist) == 0:
return (GIT_MODE_FILE, makeblob(''))
else:
return (GIT_MODE_TREE, maketree(shalist))
def open_noatime(name):
fd = _helpers.open_noatime(name)
try:
return os.fdopen(fd, 'rb', 1024*1024)
except:
try:
os.close(fd)
except:
pass
raise<|fim▁end|> | if ofs:
if ofs > BLOB_MAX: |
<|file_name|>ua_utils.py<|end_file_name|><|fim▁begin|>"""
Usefull method and classes not belonging anywhere and depending on opcua library
"""
from dateutil import parser
from datetime import datetime
from enum import Enum, IntEnum
import uuid
from opcua import ua
from opcua.ua.uaerrors import UaError
def val_to_string(val):
"""
convert a python object or python-opcua object to a string
which should be easy to understand for human
easy to modify, and not too hard to parse back ....not easy
meant for UI or command lines
"""
if isinstance(val, (list, tuple)):
res = []
for v in val:
res.append(val_to_string(v))
return "[" + ", ".join(res) + "]"
if hasattr(val, "to_string"):
val = val.to_string()
elif isinstance(val, ua.StatusCode):
val = val.name
elif isinstance(val, (Enum, IntEnum)):
val = val.name
elif isinstance(val, ua.DataValue):
val = variant_to_string(val.Value)
elif isinstance(val, ua.XmlElement):
val = val.Value
elif isinstance(val, str):
pass
elif isinstance(val, bytes):
val = str(val)
elif isinstance(val, datetime):
val = val.isoformat()
elif isinstance(val, (int, float)):
val = str(val)
else:
# FIXME: Some types are probably missing!
val = str(val)
return val
def variant_to_string(var):
"""
convert a variant to a string which should be easy to understand for human
easy to modify, and not too hard to parse back ....not easy
meant for UI or command lines
"""
return val_to_string(var.Value)
def string_to_val(string, vtype):
"""
Convert back a string to a python or python-opcua object
Note: no error checking is done here, supplying null strings could raise exceptions (datetime and guid)
"""
string = string.strip()
if string.startswith("["):
string = string[1:-1]
var = []
for s in string.split(","):
s = s.strip()
val = string_to_val(s, vtype)
var.append(val)
return var
if vtype == ua.VariantType.Null:
val = None
elif vtype == ua.VariantType.Boolean:
if string in ("True", "true", "on", "On", "1"):
val = True
else:
val = False
elif vtype in (ua.VariantType.SByte, ua.VariantType.Int16, ua.VariantType.Int32, ua.VariantType.Int64):
val = int(string)
elif vtype in (ua.VariantType.Byte, ua.VariantType.UInt16, ua.VariantType.UInt32, ua.VariantType.UInt64):
val = int(string)
elif vtype in (ua.VariantType.Float, ua.VariantType.Double):
val = float(string)
elif vtype == ua.VariantType.XmlElement:
val = ua.XmlElement(string)
elif vtype == ua.VariantType.String:
val = string
elif vtype == ua.VariantType.ByteString:
val = string.encode("utf-8")
elif vtype in (ua.VariantType.NodeId, ua.VariantType.ExpandedNodeId):
val = ua.NodeId.from_string(string)
elif vtype == ua.VariantType.QualifiedName:
val = ua.QualifiedName.from_string(string)
elif vtype == ua.VariantType.DateTime:
val = parser.parse(string)
elif vtype == ua.VariantType.LocalizedText:
val = ua.LocalizedText(string)
elif vtype == ua.VariantType.StatusCode:
val = ua.StatusCode(string)
elif vtype == ua.VariantType.Guid:
val = uuid.UUID(string)
else:
# FIXME: Some types are probably missing!
raise NotImplementedError
return val
def string_to_variant(string, vtype):
"""
convert back a string to an ua.Variant
"""
return ua.Variant(string_to_val(string, vtype), vtype)
def get_node_children(node, nodes=None):
"""
Get recursively all children of a node
"""
if nodes is None:
nodes = [node]
for child in node.get_children():
nodes.append(child)
get_node_children(child, nodes)
return nodes
def get_node_subtypes(node, nodes=None):
if nodes is None:
nodes = [node]
for child in node.get_children(refs=ua.ObjectIds.HasSubtype):
nodes.append(child)
get_node_subtypes(child, nodes)
return nodes
def get_node_supertypes(node, includeitself=False, skipbase=True):
"""
return get all subtype parents of node recursive
:param node: can be a ua.Node or ua.NodeId
:param includeitself: include also node to the list
:param skipbase don't include the toplevel one
:returns list of ua.Node, top parent first
"""
parents = []
if includeitself:
parents.append(node)
parents.extend(_get_node_supertypes(node))
if skipbase and len(parents) > 1:
parents = parents[:-1]
return parents
def _get_node_supertypes(node):
"""
recursive implementation of get_node_derived_from_types
"""
basetypes = []
parent = get_node_supertype(node)
if parent:
basetypes.append(parent)
basetypes.extend(_get_node_supertypes(parent))<|fim▁hole|>
def get_node_supertype(node):
"""
return node supertype or None
"""
supertypes = node.get_referenced_nodes(refs=ua.ObjectIds.HasSubtype,
direction=ua.BrowseDirection.Inverse,
includesubtypes=True)
if supertypes:
return supertypes[0]
else:
return None
def is_child_present(node, browsename):
"""
return if a browsename is present a child from the provide node
:param node: node wherein to find the browsename
:param browsename: browsename to search
:returns returne True if the browsename is present else False
"""
child_descs = node.get_children_descriptions()
for child_desc in child_descs:
if child_desc.BrowseName == browsename:
return True
return False
def data_type_to_variant_type(dtype_node):
"""
Given a Node datatype, find out the variant type to encode
data. This is not exactly straightforward...
"""
base = get_base_data_type(dtype_node)
if base.nodeid.Identifier != 29:
return ua.VariantType(base.nodeid.Identifier)
else:
# we have an enumeration, value is a Int32
return ua.VariantType.Int32
def get_base_data_type(datatype):
"""
Looks up the base datatype of the provided datatype Node
The base datatype is either:
A primitive type (ns=0, i<=21) or a complex one (ns=0 i>21 and i<=30) like Enum and Struct.
Args:
datatype: NodeId of a datype of a variable
Returns:
NodeId of datatype base or None in case base datype can not be determined
"""
base = datatype
while base:
if base.nodeid.NamespaceIndex == 0 and isinstance(base.nodeid.Identifier, int) and base.nodeid.Identifier <= 30:
return base
base = get_node_supertype(base)
raise ua.UaError("Datatype must be a subtype of builtin types {0!s}".format(datatype))
def get_nodes_of_namespace(server, namespaces=None):
"""
Get the nodes of one or more namespaces .
Args:
server: opc ua server to use
namespaces: list of string uri or int indexes of the namespace to export
Returns:
List of nodes that are part of the provided namespaces
"""
if namespaces is None:
namespaces = []
ns_available = server.get_namespace_array()
if not namespaces:
namespaces = ns_available[1:]
elif isinstance(namespaces, (str, int)):
namespaces = [namespaces]
# make sure all namespace are indexes (if needed convert strings to indexes)
namespace_indexes = [n if isinstance(n, int) else ns_available.index(n) for n in namespaces]
# filter nodeis based on the provide namespaces and convert the nodeid to a node
nodes = [server.get_node(nodeid) for nodeid in server.iserver.aspace.keys()
if nodeid.NamespaceIndex != 0 and nodeid.NamespaceIndex in namespace_indexes]
return nodes
def get_default_value(uatype):
if isinstance(uatype, ua.VariantType):
return ua.get_default_values(uatype)
elif hasattr(ua.VariantType, uatype):
return ua.get_default_value(getattr(ua.VariantType, uatype))
else:
return getattr(ua, uatype)()<|fim▁end|> |
return basetypes
|
<|file_name|>HelpDialog.js<|end_file_name|><|fim▁begin|>/*
* ../../../..//localization/fr/HelpDialog.js
*
* Copyright (c) 2009-2018 The MathJax Consortium
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
<|fim▁hole|> *
* Copyright (c) 2009-2018 The MathJax Consortium
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
MathJax.Localization.addTranslation("fr", "HelpDialog", {
version: "2.7.5",
isLoaded: true,
strings: {
Help: "Aide MathJax",
MathJax:
"*MathJax* est une biblioth\u00E8que JavaScript qui permet aux auteurs de pages d\u2019inclure des math\u00E9matiques dans leurs pages web. En tant que lecteur, vous n\u2019avez rien besoin de faire pour que cela fonctionne.",
Browsers:
"*Navigateurs*: MathJax fonctionne avec tous les navigateurs modernes y compris Internet Explorer 6, Firefox 3, Chrome 0.2, Safari 2, Opera 9.6 et leurs versions sup\u00E9rieures ainsi que la plupart des navigateurs pour mobiles et tablettes.",
Menu:
"*Menu math*: MathJax ajoute un menu contextuel aux \u00E9quations. Cliquez-droit ou Ctrl-cliquez sur n\u2019importe quelle formule math\u00E9matique pour acc\u00E9der au menu.",
ShowMath:
"*Afficher les maths comme* vous permet d\u2019afficher le balisage source de la formule pour copier-coller (comme MathML ou dans son format d\u2019origine).",
Settings:
"*Param\u00E8tres* vous donne le contr\u00F4le sur les fonctionnalit\u00E9s de MathJax, comme la taille des math\u00E9matiques, et le m\u00E9canisme utilis\u00E9 pour afficher les \u00E9quations.",
Language:
"*Langue* vous laisse s\u00E9lectionner la langue utilis\u00E9e par MathJax pour ses menus et ses messages d\u2019avertissement.",
Zoom:
"*Zoom des maths*: Si vous avez des difficult\u00E9s \u00E0 lire une \u00E9quation, MathJax peut l\u2019agrandir pour vous aider \u00E0 mieux la voir.",
Accessibilty:
"*Accessibilit\u00E9*: MathJax travaillera automatiquement avec les lecteurs d\u2019\u00E9cran pour rendre les math\u00E9matiques accessibles aux malvoyants.",
Fonts:
"*Polices*: MathJax utilisera certaines polices math\u00E9matiques si elles sont install\u00E9es sur votre ordinateur\u202F; sinon, il utilisera les polices trouv\u00E9es sur le web. Bien que ce ne soit pas obligatoire, des polices install\u00E9es localement acc\u00E9l\u00E9reront la composition. Nous vous sugg\u00E9rons d\u2019installer les [polices STIX](%1).",
CloseDialog: "Fermer la bo\u00EEte de dialogue d\u2019aide"
}
});
MathJax.Ajax.loadComplete("[MathJax]/localization/fr/HelpDialog.js");<|fim▁end|> | /*************************************************************
*
* MathJax/localization/fr/HelpDialog.js |
<|file_name|>PackagePool.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# -*- coding: iso-8859-2 -*-
import string
import copy
import os
import gzip
import gtk
import commands
try:
from backports import lzma
except ImportError:
from lzma import LZMAFile as lzma
import singletons
from common import *
import common; _ = common._
from Source import *
from Package import *
from Category import *
def czfind(istr):
l = len(istr)
i = 0
word = ''
flag = False
while i < l:
if istr[i] == '\n':
if flag:
flag = False
else:
break
word = ''
elif istr[i] == ' ':
flag = True
else:
word += istr[i]
i += 1
return word
class PackagePool:
"""This class retrieves and structures every packages that are accessible
from the system."""
HASHED_LEN = 2 # used by GetUpgradeableState
def __init__(self):
self.initialized = False
def Init(self):
"""Reinitialize the inner state of the package pool. Must be called
in case of manipulating the package registry."""
self.initialized = False
self.all_packages = []
self.package_name_pool = {}
self.installed_package_names = {}
singletons.application.DisplayProgress(_('Reading package sources'))
self.RegisterUpgradablePackages()
self.all_sources = self.GetSources()
singletons.application.DisplayProgress(_('Querying installed packages'))
self.RegisterInstalledPackages()
self.RegisterInstallablePackages()
self.initialized = True
def GetSources(self):
"""Retrieve and return the Source objects containted in urpmi.cfg."""
def get_source_name(line):
"""Extract the source name from the line of the file."""
prev_chr = line[0]
name = line[0]
for c in line[1:-1]:
if c == ' ':
if prev_chr == '\\':
name += ' '
else:
break
elif c != '\\':
name += c
prev_chr = c
return name
file = open('/etc/urpmi/urpmi.cfg')
sources = []
word = ''
flag0 = False
flag1 = False
name_flag = False
while 1:
c = file.read(1)
if c == '':
break
elif c == '{':
if flag0 == False:
flag0 = True
else:
name_flag = False
name = get_source_name(word)
source = Source(name)
source.hdlist = czfind(commands.getoutput('find /var/lib/urpmi/ | grep cz | grep ' + name))
print 'HL:', source.hdlist
if source.hdlist != '':
sources.append(source)
word = ''
elif c == '}':
if flag1 == False:
flag1 = True
name_flag = True
else:
name_flag = True
elif name_flag == True and c not in ['\\', '\n']:
word += c
return sources
def GetActiveSources(self, new=False):
"""Return the active Source objects."""
if new:
all_sources = self.GetSources()
else:
all_sources = self.all_sources
return [source for source in all_sources if not source.ignore]
def RegisterInstalledPackages(self):
"""Retrieve a dictionary containing every installed packages on the system."""
file = os.popen('rpmquery --all "--queryformat=%{name}-%{version}-%{release}.%{arch}:%{size}:%{group}:%{installtime}\n"')
for line in file:
fields = line.strip().split(':')
name = fields[0]
size = int(fields[1])
category = fields[2]
btime = int(fields[3])
self.AddPackage(name, size, category, time=btime)
def RegisterInstallablePackages(self):
"""Get the list of every packages that are installable on the system."""
for source in self.GetActiveSources():
#disable gzip file = gzip.open(source.hdlist)
# print "DEBUG " + lzma.open(source.hdlist).read()
file = lzma(source.hdlist)
for line in file:
if line[:6] != '@info@':
continue
fields = line.strip()[7:].split('@')
longname = fields[0]
size = int(fields[2])
category = fields[3]
self.AddPackage(longname, size, category, source)
def RegisterUpgradablePackages(self):
upl = commands.getoutput('urpmq --auto-select --whatrequires').split()
l = len (upl)
i = 0
self.upgradable_packages = []
self.upgradable_packages_long = []
while i < l:
self.upgradable_packages.append(self.generate_shortname(upl[i]))
self.upgradable_packages_long.append(upl[i])
i += 1
def generate_shortname(self, longname):
"""Generate shortname from a longname. This is a workaround if association failed."""
print("LONGN:", longname)
pos1 = longname.rfind("-")
if pos1 > 0:
pos2 = longname[:pos1].rfind("-")
return longname[:pos2]
else:
return ""
def RegisterCategory(self, category_str, package):
"""Register category 'category' in the category tree."""
category_path = category_str.split('/')
current_category = self.category_tree
for subcategory_name in category_path:
current_category = current_category.GetSubCategory(subcategory_name)
current_category.AddPackage(package)
def AddPackage(self, longname, size, category, source=None, time=-1):
"""Add package to the registry."""
if self.package_name_pool.has_key(longname):
self.package_name_pool[longname].AddSource(source)
return
package = Package()
package.longname = longname
package.shortname = self.generate_shortname(longname) ### Ezt raktam be !!!
package.size = size
package.category = category
if source:
package.AddSource(source)
package.is_installed = False
else:
package.is_installed = True
package.time = time
if len(package.longname) >= 3:
if package.longname.lower().find('lib') != -1:
package.is_library = True
else:
package.is_library = False
else:
package.is_library = False
if package.shortname in self.upgradable_packages and package.is_installed:
package.is_upgradable = True
else:
package.is_upgradable = False
self.package_name_pool[longname] = package
self.all_packages.append(package)
def GetPackagesContainingDescription(self, text):
"""Get the list of every packages that are installable on the system."""
active_sources = self.GetActiveSources() #[source for source in self.all_sources if not source.ignore]
containing_longnames = {}
for source in active_sources:<|fim▁hole|> for line in file:
if line[:9] == '@summary@':
fields = line.strip().split('@')
description = fields[2]
elif line[:6] == '@info@':
fields = line.strip().split('@')
longname = fields[2]
if description.lower().find(text) != -1:
containing_longnames[longname] = True
return containing_longnames
FILTER_PACKAGENAME = 0
FILTER_DESCRIPTION = 1
FILTER_FILENAME = 2
def GetPackagesContainingFiles(self, search_text):
pass
# active_sources = self.GetActiveSources()
# active_source_paths = ''
# containing_longnames = {}
# for source in active_sources:
# active_source_paths += escape(source.hdlist) + ' '
# command = 'parsehdlist --fileswinfo ' + active_source_paths + ' | grep ".*:files:.*'+escape(search_text)+'.*"'
# file = os.popen(command)
# for line in file:
# containing_longnames[ line.split(':')[0] ] = True
# return containing_longnames
def Filter(self, application, library, installed, noninstalled, search_mode, search_text):
"""Filter packages."""
# reset pacage registry
self.packages = []
self.category_tree = Category('root')
search_text = search_text.lower()
if search_mode == self.FILTER_DESCRIPTION:
containing_longnames = self.GetPackagesContainingDescription(search_text)
elif search_mode == self.FILTER_FILENAME:
containing_longnames = self.GetPackagesContainingFiles(search_text)
for source in self.all_sources:
source.packages = []
for package in self.all_packages:
inst = (package.is_installed and installed) or (not package.is_installed and noninstalled)
ptype = (package.is_library and library) or (not package.is_library and application)
if search_mode == self.FILTER_PACKAGENAME:
search_inc = package.longname.lower().find(search_text)!=-1
elif search_mode == self.FILTER_DESCRIPTION:
search_inc = containing_longnames.has_key(package.longname)
elif search_mode == self.FILTER_FILENAME:
search_inc = containing_longnames.has_key(package.shortname)
else:
search_inc = True
included = inst and ptype and search_inc
if included:
for source in package.sources:
source.AddPackage(package)
self.RegisterCategory(package.category, package)
self.packages.append(package)<|fim▁end|> | file = lzma.open(source.hdlist) |
<|file_name|>migrateToPostgres.ts<|end_file_name|><|fim▁begin|>/*
Copyright 2019 The Matrix.org Foundation C.I.C.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/**
* This script will allow you to migrate your NeDB database
* to a postgres one.
*/
import { Logging, MatrixUser, UserBridgeStore, RoomBridgeStore, EventBridgeStore } from "matrix-appservice-bridge";
import NeDB from "nedb";
import * as path from "path";
import { promisify } from "util";
import { NedbDatastore } from "../datastore/NedbDatastore";
import { PgDatastore } from "../datastore/postgres/PgDatastore";
import { BridgedRoom } from "../BridgedRoom";
import { SlackGhost } from "../SlackGhost";
import { Datastore } from "../datastore/Models";
import { SlackClientFactory } from "../SlackClientFactory";
Logging.configure({ console: "info" });
const log = Logging.get("script");
const POSTGRES_URL = process.argv[2];
const NEDB_DIRECTORY = process.argv[3] || "";
const USER_PREFIX = process.argv[4] || "slack_";
const main = async () => {
if (!POSTGRES_URL) {
log.error("You must specify the postgres url (ex: postgresql://user:pass@host/database");
throw Error("");
}
const pgres = new PgDatastore(POSTGRES_URL);
await pgres.ensureSchema();
const config = {
autoload: false,
};
const teamStore = new NeDB({ filename: path.join(NEDB_DIRECTORY, "teams.db"), ...config});
const roomStore = new NeDB({ filename: path.join(NEDB_DIRECTORY, "room-store.db"), ...config});
const userStore = new NeDB({ filename: path.join(NEDB_DIRECTORY, "user-store.db"), ...config});
const eventStore = new NeDB({ filename: path.join(NEDB_DIRECTORY, "event-store.db"), ...config});
try {
await promisify(teamStore.loadDatabase).bind(teamStore)();
await promisify(roomStore.loadDatabase).bind(roomStore)();
await promisify(userStore.loadDatabase).bind(userStore)();
await promisify(eventStore.loadDatabase).bind(eventStore)();
} catch (ex) {
log.error("Couldn't load datastores");
log.error("Ensure you have given the correct path to the database.");
throw ex;
}
const nedb = new NedbDatastore(
new UserBridgeStore(userStore),
new RoomBridgeStore(roomStore),
new EventBridgeStore(eventStore),
teamStore,
);
try {
const startedAt = Date.now();
await migrateFromNedb(nedb, pgres);
log.info(`Completed migration in ${Math.round(Date.now() - startedAt)}ms`);
} catch (ex) {
log.error("An error occured while migrating databases:");
log.error(ex);
log.error("Your existing databases have not been modified, but you may need to drop the postgres table and start over");
}
};
export const migrateFromNedb = async(nedb: NedbDatastore, targetDs: Datastore): Promise<void> => {
const allRooms = await nedb.getAllRooms();
const allEvents = await nedb.getAllEvents();
// the format has changed quite a bit.
const allTeams = (await nedb.getAllTeams()) as any[];
const allSlackUsers = await nedb.getAllUsers(false);
const allMatrixUsers = await nedb.getAllUsers(true);
const slackClientFactory = new SlackClientFactory(targetDs);
log.info(`Migrating ${allRooms.length} rooms`);
log.info(`Migrating ${allTeams.length} teams`);
log.info(`Migrating ${allEvents.length} events`);
log.info(`Migrating ${allSlackUsers.length} slack users`);
log.info(`Migrating ${allMatrixUsers.length} matrix users`);
const teamTokenMap: Map<string, string> = new Map(); // token -> teamId.
const preTeamMigrations = async() => Promise.all(allRooms.map(async (room) => {
// This is an old format remote
const remote = (room.remote as any);
const at = remote.slack_bot_token || remote.access_token;
if (!at) {
return;
}
try {
const teamId = await slackClientFactory.upsertTeamByToken(at);
log.info("Got team from token:", teamId);
teamTokenMap.set(at, teamId);
} catch (ex) {
log.warn("Failed to get team token for slack token:", ex);
}
}));
const teamMigrations = async() => Promise.all(allTeams.map(async (team, i) => {
if (team.bot_token && !teamTokenMap.has(team.bot_token)) {
let teamId: string;
try {
teamId = await slackClientFactory.upsertTeamByToken(team.bot_token);
} catch (ex) {
log.warn("Team token is not valid:", ex);
return;
}
log.info("Got team from token:", teamId);
teamTokenMap.set(team.bot_token, teamId);
} else {
log.info(`Skipped team (${i + 1}/${allTeams.length})`);
}
log.info(`Migrated team (${i + 1}/${allTeams.length})`);
}));
const roomMigrations = async() => Promise.all(allRooms.map(async (room, i) => {
const token = (room.remote as any).slack_bot_token;
if (!room.remote.slack_team_id && token) {
room.remote.slack_team_id = teamTokenMap.get(token);
}
await targetDs.upsertRoom(BridgedRoom.fromEntry(null as any, room));
log.info(`Migrated room ${room.id} (${i + 1}/${allRooms.length})`);
}));
const eventMigrations = async() => Promise.all(allEvents.map(async (event, i) => {
await targetDs.upsertEvent(event);
log.info(`Migrated event ${event.eventId} ${event.slackTs} (${i + 1}/${allEvents.length})`);
}));
const slackUserMigrations = async() => Promise.all(allSlackUsers.map(async (user, i) => {
if (!user.id) {
// Cannot migrate without ID.
return;
}
if (!user.slack_id || !user.team_id) {
const localpart = user.id.split(":")[0];
// XXX: we are making an assumption here that the prefix ends with _
const parts = localpart.slice(USER_PREFIX.length + 1).split("_"); // Remove any prefix.
// If we encounter more parts than expected, the domain may be underscored
while (parts.length > 2) {
parts[0] = `${parts.shift()}_${parts[0]}`;
}
const existingTeam = readyTeams.find((t) => t.domain === parts[0]);
if (!existingTeam) {
log.warn("No existing team could be found for", user.id);
return;
}
user.slack_id = parts[1];
user.team_id = existingTeam!.id;
}
const ghost = SlackGhost.fromEntry(null as any, user);
await targetDs.upsertUser(ghost);
log.info(`Migrated slack user ${user.id} (${i + 1}/${allSlackUsers.length})`);
}));
const matrixUserMigrations = async() => Promise.all(allMatrixUsers.map(async (user, i) => {
const mxUser = new MatrixUser(user.id, user as unknown as Record<string, unknown>);
await targetDs.storeMatrixUser(mxUser);
log.info(`Migrated matrix user ${mxUser.getId()} (${i + 1}/${allMatrixUsers.length})`);
}));
log.info("Starting eventMigrations");
await eventMigrations();
log.info("Finished eventMigrations");
log.info("Starting preTeamMigrations");
await preTeamMigrations();
log.info("Finished preTeamMigrations");
log.info("Starting teamMigrations");
await teamMigrations();
log.info("Finished teamMigrations");
const readyTeams = await targetDs.getAllTeams();<|fim▁hole|> await slackUserMigrations();
log.info("Finished slackUserMigrations");
log.info("Starting matrixUserMigrations");
await matrixUserMigrations();
log.info("Finished matrixUserMigrations");
};
main().then(() => {
log.info("finished");
}).catch((err) => {
log.error("failed:", err);
});<|fim▁end|> | log.info("Starting roomMigrations");
await roomMigrations();
log.info("Finished roomMigrations");
log.info("Starting slackUserMigrations"); |
<|file_name|>exercise.rs<|end_file_name|><|fim▁begin|>use regex::Regex;
use serde::Deserialize;
use std::fmt::{self, Display, Formatter};
use std::fs::{remove_file, File};
use std::io::Read;
use std::path::PathBuf;
use std::process::{self, Command, Output};
const RUSTC_COLOR_ARGS: &[&str] = &["--color", "always"];
const I_AM_DONE_REGEX: &str = r"(?m)^\s*///?\s*I\s+AM\s+NOT\s+DONE";
const CONTEXT: usize = 2;
fn temp_file() -> String {
format!("./temp_{}", process::id())
}
#[derive(Deserialize, Copy, Clone)]
#[serde(rename_all = "lowercase")]
pub enum Mode {
Compile,
Test,
}
#[derive(Deserialize)]
pub struct ExerciseList {
pub exercises: Vec<Exercise>,
}
#[derive(Deserialize)]
pub struct Exercise {
pub name: String,
pub path: PathBuf,
pub mode: Mode,
pub hint: String,
}
#[derive(PartialEq, Debug)]
pub enum State {
Done,
Pending(Vec<ContextLine>),
}
#[derive(PartialEq, Debug)]
pub struct ContextLine {
pub line: String,
pub number: usize,
pub important: bool,
}
impl Exercise {
pub fn compile(&self) -> Output {
match self.mode {
Mode::Compile => Command::new("rustc")
.args(&[self.path.to_str().unwrap(), "-o", &temp_file()])
.args(RUSTC_COLOR_ARGS)
.output(),
Mode::Test => Command::new("rustc")
.args(&["--test", self.path.to_str().unwrap(), "-o", &temp_file()])
.args(RUSTC_COLOR_ARGS)
.output(),
}
.expect("Failed to run 'compile' command.")
}
pub fn run(&self) -> Output {
Command::new(&temp_file())
.output()
.expect("Failed to run 'run' command")
}
pub fn clean(&self) {
let _ignored = remove_file(&temp_file());
}
pub fn state(&self) -> State {
let mut source_file =
File::open(&self.path).expect("We were unable to open the exercise file!");
let source = {
let mut s = String::new();
source_file
.read_to_string(&mut s)
.expect("We were unable to read the exercise file!");
s
};
let re = Regex::new(I_AM_DONE_REGEX).unwrap();
if !re.is_match(&source) {
return State::Done;
}
let matched_line_index = source
.lines()
.enumerate()
.filter_map(|(i, line)| if re.is_match(line) { Some(i) } else { None })
.next()
.expect("This should not happen at all");
let min_line = ((matched_line_index as i32) - (CONTEXT as i32)).max(0) as usize;
let max_line = matched_line_index + CONTEXT;
let context = source
.lines()
.enumerate()
.filter(|&(i, _)| i >= min_line && i <= max_line)
.map(|(i, line)| ContextLine {
line: line.to_string(),
number: i + 1,
important: i == matched_line_index,
})
.collect();
State::Pending(context)
}
}
impl Display for Exercise {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
write!(f, "{}", self.path.to_str().unwrap())
}
}
#[cfg(test)]
mod test {
use super::*;
use std::path::Path;
#[test]
fn test_clean() {
File::create(&temp_file()).unwrap();
let exercise = Exercise {
name: String::from("example"),
path: PathBuf::from("example.rs"),
mode: Mode::Test,
hint: String::from(""),
};
exercise.clean();
assert!(!Path::new(&temp_file()).exists());
}
#[test]
fn test_pending_state() {
let exercise = Exercise {<|fim▁hole|> mode: Mode::Compile,
hint: String::new(),
};
let state = exercise.state();
let expected = vec![
ContextLine {
line: "// fake_exercise".to_string(),
number: 1,
important: false,
},
ContextLine {
line: "".to_string(),
number: 2,
important: false,
},
ContextLine {
line: "// I AM NOT DONE".to_string(),
number: 3,
important: true,
},
ContextLine {
line: "".to_string(),
number: 4,
important: false,
},
ContextLine {
line: "fn main() {".to_string(),
number: 5,
important: false,
},
];
assert_eq!(state, State::Pending(expected));
}
#[test]
fn test_finished_exercise() {
let exercise = Exercise {
name: "finished_exercise".into(),
path: PathBuf::from("tests/fixture/state/finished_exercise.rs"),
mode: Mode::Compile,
hint: String::new(),
};
assert_eq!(exercise.state(), State::Done);
}
}<|fim▁end|> | name: "pending_exercise".into(),
path: PathBuf::from("tests/fixture/state/pending_exercise.rs"), |
<|file_name|>Li2_atomize.py<|end_file_name|><|fim▁begin|>import os
from ase.structure import molecule
from ase.io import read, write
from ase.parallel import rank
from gpaw import GPAW, restart
import warnings
# cmr calls all available methods in ase.atoms detected by the module inspect.
# Therefore also deprecated methods are called - and we choose to silence those warnings.
warnings.filterwarnings('ignore', 'ase.atoms.*deprecated',)
import cmr
#from cmr.tools.log import Log
#cmr.logger.set_message_selection(Log.MSG_TYPE_ALL)
calculate = True
recalculate = True
analyse_from_dir = True # analyse local cmr files
upload_to_db = False # upload cmr files to the database
analyse_from_db = False # analyse database
create_group = True # group calculations beloging to a given reaction
clean = False
if create_group: assert analyse_from_dir or analyse_from_db
if analyse_from_db: assert upload_to_db
symbol = 'Li'
# define the project in order to find it in the database!
project_id = 'my first project: atomize'
vacuum = 3.5
# calculator parameters
xc = 'LDA'
mode = 'lcao'<|fim▁hole|>cmr_params_template = {
'db_keywords': [project_id],
# add project_id also as a field to support search across projects
'project_id': project_id,
# user's tags
'U_vacuum': vacuum,
'U_xc': xc,
'U_mode': mode,
'U_h': h,
}
if calculate:
# molecule
formula = symbol + '2'
# set formula name to be written into the cmr file
cmr_params = cmr_params_template.copy()
cmr_params['U_formula'] = formula
cmrfile = formula + '.cmr'
system = molecule(formula)
system.center(vacuum=vacuum)
# Note: Molecules do not need broken cell symmetry!
if 0:
system.cell[1, 1] += 0.01
system.cell[2, 2] += 0.02
# Hund rule (for atoms)
hund = (len(system) == 1)
cmr_params['U_hund'] = hund
# first calculation: LDA lcao
calc = GPAW(mode=mode, xc=xc, h=h, hund=hund, txt=formula + '.txt')
system.set_calculator(calc)
e = system.get_potential_energy()
# write gpw file
calc.write(formula)
# add total energy to users tags
cmr_params['U_potential_energy'] = e
# write the information 'as in' corresponding trajectory file
# plus cmr_params into cmr file
write(cmrfile, system, cmr_params=cmr_params)
del calc
# atom
formula = symbol
# set formula name to be written into the cmr file
cmr_params = cmr_params_template.copy()
cmr_params['U_formula'] = formula
cmrfile = formula + '.cmr'
system = molecule(formula)
system.center(vacuum=vacuum)
# Note: Li does not need broken cell symmetry! Many other atoms do!
if 0:
system.cell[1, 1] += 0.01
system.cell[2, 2] += 0.02
# Hund rule (for atoms)
hund = (len(system) == 1)
cmr_params['U_hund'] = hund
# first calculation: LDA lcao
calc = GPAW(mode=mode, xc=xc, h=h, hund=hund, txt=formula + '.txt')
system.set_calculator(calc)
e = system.get_potential_energy()
# write gpw file
calc.write(formula)
# add total energy to users tags
cmr_params['U_potential_energy'] = e
# write the information 'as in' corresponding trajectory file
# plus cmr_params into cmr file
write(cmrfile, system, cmr_params=cmr_params)
del calc
if recalculate:
# now calculate PBE energies on LDA orbitals
# molecule
formula = symbol + '2'
system, calc = restart(formula, txt=None)
ediff = calc.get_xc_difference('PBE')
cmrfile = formula + '.cmr'
# add new results to the cmrfile
data = cmr.read(cmrfile)
data.set_user_variable('U_potential_energy_PBE', data['U_potential_energy'] + ediff)
data.write(cmrfile)
del calc
# atom
formula = symbol
system, calc = restart(formula, txt=None)
ediff = calc.get_xc_difference('PBE')
cmrfile = formula + '.cmr'
# add new results to the cmrfile
data = cmr.read(cmrfile)
data.set_user_variable('U_potential_energy_PBE', data['U_potential_energy'] + ediff)
data.write(cmrfile)
del calc
if analyse_from_dir:
# analyze the results from cmr files in the local directory
from cmr.ui import DirectoryReader
# read all compounds in the project with lcao and LDA orbitals
reader = DirectoryReader(directory='.', ext='.cmr')
all = reader.find(name_value_list=[('U_mode', 'lcao'), ('U_xc', 'LDA')],
keyword_list=[project_id])
if rank == 0:
print 'results from cmr files in the local directory'
# print requested results
# column_length=0 aligns data in the table (-1 : data unaligned is default)
all.print_table(column_length=0,
columns=['U_formula', 'U_vacuum',
'U_xc', 'U_h', 'U_hund',
'U_potential_energy', 'U_potential_energy_PBE',
'ase_temperature'])
# access the results directly and calculate atomization energies
f2 = symbol + '2'
f1 = symbol
if rank == 0:
# results are accesible only on master rank
r2 = all.get('U_formula', f2)
r1 = all.get('U_formula', f1)
# calculate atomization energies (ea)
ea_LDA = 2 * r1['U_potential_energy'] - r2['U_potential_energy']
ea_PBE = 2 * r1['U_potential_energy_PBE'] - r2['U_potential_energy_PBE']
print 'atomization energy [eV] ' + xc + ' = ' + str(ea_LDA)
print 'atomization energy [eV] PBE = ' + str(ea_PBE)
if create_group:
# ea_LDA and ea_PBE define a group
group = cmr.create_group();
group.add(r1['db_hash']);
group.add(r2['db_hash']);
group.set_user_variable('U_ea_LDA', ea_LDA)
group.set_user_variable('U_ea_PBE', ea_PBE)
group.set_user_variable('U_description', 'atomization energy [eV]')
group.set_user_variable('U_reaction', '2 * ' + symbol + ' - ' + symbol + '2')
group.set_user_variable('db_keywords', [project_id])
group.set_user_variable('project_id', project_id)
group.write(symbol + '2_atomize_from_dir.cmr');
if True:
all = reader.find(keyword_list=[project_id])
if rank == 0:
print 'contents of the cmr files present in the local directory'
# print requested results
# column_length=0 aligns data in the table (-1 : data unaligned is default)
all.print_table(column_length=0,
columns=['U_formula', 'U_vacuum',
'U_xc', 'U_h', 'U_hund',
'U_potential_energy', 'U_potential_energy_PBE',
'ase_temperature', 'U_reaction', 'U_ea_LDA', 'U_ea_PBE', 'U_description'])
if upload_to_db:
# upload cmr files to the database
if rank == 0:
os.system('cmr --commit ' + symbol + '*.cmr')
if analyse_from_db:
# analyze the results from the database
# analysis can only be performed on rank 0!!
from cmr.ui import DBReader
reader = DBReader()
all = reader.find(name_value_list=[('U_mode', 'lcao'),
('U_xc', 'LDA'),
#('db_user', '')
],
keyword_list=[project_id])
if rank == 0:
print 'results from the database'
# print requested results
# column_length=0 aligns data in the table (-1 : data unaligned is default)
all.print_table(column_length=0,
columns=['U_formula', 'U_vacuum',
'U_xc', 'U_h', 'U_hund',
'U_potential_energy', 'U_potential_energy_PBE',
'ase_temperature'])
# access the results directly and calculate atomization energies
f2 = symbol + '2'
f1 = symbol
# results are accesible only on master rank
r1 = all.get('U_formula', f1)
r2 = all.get('U_formula', f2)
# check if results were successfully retrieved, otherwise we have to wait
if r1 is None or r2 is None:
print "Results are not yet in the database. Wait, and try again."
else:
# calculate atomization energies (ea)
ea_LDA = 2 * r1['U_potential_energy'] - r2['U_potential_energy']
ea_PBE = 2 * r1['U_potential_energy_PBE'] - r2['U_potential_energy_PBE']
if rank == 0:
print 'atomization energy [eV] ' + xc + ' = ' + str(ea_LDA)
print 'atomization energy [eV] PBE = ' + str(ea_PBE)
if create_group:
# ea_LDA and ea_PBE define a group
group = cmr.create_group();
group.add(r1['db_hash']);
group.add(r2['db_hash']);
group.set_user_variable('U_ea_LDA', ea_LDA)
group.set_user_variable('U_ea_PBE', ea_PBE)
group.set_user_variable('U_description', 'atomization energy [eV] (from database)')
group.set_user_variable('U_reaction', '2 * ' + symbol + ' - ' + symbol + '2')
group.set_user_variable('db_keywords', [project_id])
group.set_user_variable('project_id', project_id)
group.write(symbol + '2_atomize_from_db.cmr');
group.write(".cmr");
if True:
all = reader.find(keyword_list=[project_id])
if rank == 0:
print 'contents of the database'
# print requested results
# column_length=0 aligns data in the table (-1 : data unaligned is default)
all.print_table(column_length=0,
columns=['U_formula', 'U_vacuum',
'U_xc', 'U_h', 'U_hund',
'U_potential_energy', 'U_potential_energy_PBE',
'ase_temperature', 'U_reaction', 'U_ea_LDA', 'U_ea_PBE', 'U_description'])
if clean:
if rank == 0:
for file in [symbol + '.cmr', symbol + '.gpw', symbol + '.txt',
symbol + '2.cmr', symbol + '2.gpw', symbol + '2.txt',
symbol + '2_atomize_from_dir.cmr',
symbol + '2_atomize_from_db.cmr']:
if os.path.exists(file): os.unlink(file)<|fim▁end|> | h = 0.20
|
<|file_name|>index.ts<|end_file_name|><|fim▁begin|>export * from './keep';
export * from './room';
<|fim▁hole|><|fim▁end|> | export * from './thing';
export * from './thingType'; |
<|file_name|>mod.rs<|end_file_name|><|fim▁begin|>pub mod account;
pub mod data;
pub mod default;
pub mod func;
// pub use crate::account::{Account, BankDetail, Position, Trade, Transfer, Order, QIFI};
// pub use crate::data::{Bar, DataItem, Tick, L2X};<|fim▁hole|><|fim▁end|> | // pub use crate::func::{from_bson_, from_serde_value, from_str, from_string, to_doc};
// |
<|file_name|>11_only_specific_values.rs<|end_file_name|><|fim▁begin|>extern crate clap;
use clap::{App, Arg};
fn main() {
// If you have arguments of specific values you want to test for, you can use the
// .possible_values() method of Arg
//
// This allows you specify the valid values for that argument. If the user does not use one of
// those specific values, they will receive a graceful exit with error message informing them
// of the mistake, and what the possible valid values are
//
// For this example, assume you want one positional argument of either "fast" or "slow"
// i.e. the only possible ways to run the program are "myprog fast" or "myprog slow"
let mode_vals = ["fast", "slow"];
let matches = App::new("myapp").about("does awesome things")
.arg(Arg::with_name("MODE")
.help("What mode to run the program in")
.index(1)
.possible_values(&mode_vals)
.required(true))
.get_matches();
// Note, it's safe to call unwrap() because the arg is required
match matches.value_of("MODE").unwrap() {
"fast" => {
// Do fast things...
},
"slow" => {
// Do slow things...
},
_ => unreachable!()<|fim▁hole|><|fim▁end|> | }
} |
<|file_name|>test_plugins.py<|end_file_name|><|fim▁begin|>import imp
import os.path
import pkgutil
import six
import unittest
import streamlink.plugins
from streamlink import Streamlink
class PluginTestMeta(type):
def __new__(mcs, name, bases, dict):
plugin_path = os.path.dirname(streamlink.plugins.__file__)
plugins = []
for loader, pname, ispkg in pkgutil.iter_modules([plugin_path]):
file, pathname, desc = imp.find_module(pname, [plugin_path])
module = imp.load_module(pname, file, pathname, desc)
if hasattr(module, "__plugin__"):
plugins.append((pname))
session = Streamlink()
def gentest(pname):
def load_plugin_test(self):
# Reset file variable to ensure it is still open when doing
# load_plugin else python might open the plugin source .py
# using ascii encoding instead of utf-8.
# See also open() call here: imp._HackedGetData.get_data
file, pathname, desc = imp.find_module(pname, [plugin_path])
session.load_plugin(pname, file, pathname, desc)
# validate that can_handle_url does not fail<|fim▁hole|> for pname in plugins:
dict['test_{0}_load'.format(pname)] = gentest(pname)
return type.__new__(mcs, name, bases, dict)
@six.add_metaclass(PluginTestMeta)
class TestPlugins(unittest.TestCase):
"""
Test that each plugin can be loaded and does not fail when calling can_handle_url.
"""<|fim▁end|> | session.plugins[pname].can_handle_url("http://test.com")
return load_plugin_test
|
<|file_name|>touch.js.uncompressed.js<|end_file_name|><|fim▁begin|><|fim▁hole|>oid sha256:1668f227ab9bbb326809d8430e0c9d1105688b38b177ab927f0528eb9593a652
size 7357<|fim▁end|> | version https://git-lfs.github.com/spec/v1 |
<|file_name|>and.rs<|end_file_name|><|fim▁begin|>// Copyright 2014 Strahinja Val Markovic
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use super::{Expression, ParseState, ParseResult};
macro_rules! and( ( $ex:expr ) => ( &base::And::new( $ex ) ); );
pub struct And<'a> {
expr: &'a ( Expression + 'a )
}
impl<'a> And<'a> {
pub fn new( expr: &Expression ) -> And {
And { expr: expr }
}
}
impl<'b> Expression for And<'b> {
fn apply<'a>( &self, parse_state: &ParseState<'a> ) ->
Option< ParseResult<'a> > {
match self.expr.apply( parse_state ) {
Some( _ ) => Some( ParseResult::fromParseState( *parse_state ) ),
_ => None
}
}
}
#[cfg(test)]
mod tests {
use base;
use base::{ParseResult, Expression};
#[test]
fn And_Match_WithLiteral() {
let orig_state = input_state!( "foo" );
match and!( lit!( "foo" ) ).apply( &orig_state ) {
Some( ParseResult{ nodes, parse_state } ) => {
assert!( nodes.is_empty() );<|fim▁hole|> }
#[test]
fn And_Match_WithCharClass() {
let orig_state = input_state!( "c" );
match and!( class!( "a-z" ) ).apply( &orig_state ) {
Some( ParseResult{ nodes, parse_state } ) => {
assert!( nodes.is_empty() );
assert_eq!( parse_state, orig_state );
}
_ => panic!( "No match." )
}
}
#[test]
fn And_NoMatch() {
assert!( and!( class!( "a-z" ) ).apply( &input_state!( "0" ) ).is_none() );
assert!( and!( lit!( "x" ) ).apply( &input_state!( "y" ) ).is_none() );
}
}<|fim▁end|> | assert_eq!( parse_state, orig_state );
}
_ => panic!( "No match." )
} |
<|file_name|>TabsSpec.js<|end_file_name|><|fim▁begin|>import React from 'react';
import ReactTestUtils from 'react/lib/ReactTestUtils';
import ReactDOM from 'react-dom';
import Nav from '../src/Nav';
import NavItem from '../src/NavItem';
import Tab from '../src/Tab';
import TabPane from '../src/TabPane';
import Tabs from '../src/Tabs';
import ValidComponentChildren from '../src/utils/ValidComponentChildren';
import { render } from './helpers';
describe('<Tabs>', () => {
it('Should show the correct tab', () => {
const instance = ReactTestUtils.renderIntoDocument(
<Tabs id="test" defaultActiveKey={1}>
<Tab title="Tab 1" eventKey={1}>Tab 1 content</Tab>
<Tab title="Tab 2" eventKey={2}>Tab 2 content</Tab>
</Tabs>
);
const panes = ReactTestUtils.scryRenderedComponentsWithType(instance, TabPane);
assert.ok(ReactDOM.findDOMNode(panes[0]).className.match(/\bactive\b/));
assert.ok(!ReactDOM.findDOMNode(panes[1]).className.match(/\bactive\b/));
const nav = ReactTestUtils.findRenderedComponentWithType(instance, Nav);
assert.equal(nav.context.$bs_tabContainer.activeKey, 1);
});
it('Should only show the tabs with `Tab.props.title` set', () => {
const instance = ReactTestUtils.renderIntoDocument(
<Tabs id="test" defaultActiveKey={3}>
<Tab title="Tab 1" eventKey={1}>Tab 1 content</Tab>
<Tab eventKey={2}>Tab 2 content</Tab>
<Tab title="Tab 2" eventKey={3}>Tab 3 content</Tab>
</Tabs>
);
const nav = ReactTestUtils.findRenderedComponentWithType(instance, Nav);
assert.equal(ValidComponentChildren.count(nav.props.children), 2);
});
it('Should allow tab to have React components', () => {
const tabTitle = (
<strong className="special-tab">Tab 2</strong>
);
const instance = ReactTestUtils.renderIntoDocument(
<Tabs id="test" defaultActiveKey={2}>
<Tab title="Tab 1" eventKey={1}>Tab 1 content</Tab>
<Tab title={tabTitle} eventKey={2}>Tab 2 content</Tab>
</Tabs>
);
const nav = ReactTestUtils.findRenderedComponentWithType(instance, Nav);
assert.ok(ReactTestUtils.findRenderedDOMComponentWithClass(nav, 'special-tab'));
});
it('Should call onSelect when tab is selected', (done) => {
function onSelect(key) {
assert.equal(key, '2');
done();
}
const tab2 = <span className="tab2">Tab2</span>;
const instance = ReactTestUtils.renderIntoDocument(
<Tabs id="test" onSelect={onSelect} activeKey={1}>
<Tab title="Tab 1" eventKey="1">Tab 1 content</Tab>
<Tab title={tab2} eventKey="2">Tab 2 content</Tab><|fim▁hole|>
ReactTestUtils.Simulate.click(
ReactTestUtils.findRenderedDOMComponentWithClass(instance, 'tab2')
);
});
it('Should have children with the correct DOM properties', () => {
const instance = ReactTestUtils.renderIntoDocument(
<Tabs id="test" defaultActiveKey={1}>
<Tab title="Tab 1" className="custom" eventKey={1}>Tab 1 content</Tab>
<Tab title="Tab 2" tabClassName="tcustom" eventKey={2}>Tab 2 content</Tab>
</Tabs>
);
const panes = ReactTestUtils.scryRenderedComponentsWithType(instance, Tab);
const navs = ReactTestUtils.scryRenderedComponentsWithType(instance, NavItem);
assert.ok(ReactDOM.findDOMNode(panes[0]).className.match(/\bcustom\b/));
assert.ok(ReactDOM.findDOMNode(navs[1]).className.match(/\btcustom\b/));
assert.equal(ReactDOM.findDOMNode(panes[0]).id, 'test-pane-1');
});
it('Should show the correct first tab with no active key value', () => {
const instance = ReactTestUtils.renderIntoDocument(
<Tabs id="test">
<Tab title="Tab 1" eventKey={1}>Tab 1 content</Tab>
<Tab title="Tab 2" eventKey={2}>Tab 2 content</Tab>
</Tabs>
);
const panes = ReactTestUtils.scryRenderedComponentsWithType(instance, TabPane);
assert.ok(ReactDOM.findDOMNode(panes[0]).className.match(/\bactive\b/));
assert.ok(!ReactDOM.findDOMNode(panes[1]).className.match(/\bactive\b/));
const nav = ReactTestUtils.findRenderedComponentWithType(instance, Nav);
assert.equal(nav.context.$bs_tabContainer.activeKey, 1);
});
it('Should show the correct first tab with children array', () => {
const panes = [0, 1].map(index => (
<Tab
key={index}
eventKey={index}
title={`Tab #${index}`}
>
<div>
content
</div>
</Tab>
));
let instance = ReactTestUtils.renderIntoDocument(
<Tabs id="test">
{panes}
{null}
</Tabs>
);
const nav = ReactTestUtils.findRenderedComponentWithType(instance, Nav);
assert.equal(nav.context.$bs_tabContainer.activeKey, 0);
});
it('Should show the correct tab when selected', () => {
const tab1 = <span className="tab1">Tab 1</span>;
const instance = ReactTestUtils.renderIntoDocument(
<Tabs id="test" defaultActiveKey={2} animation={false}>
<Tab title={tab1} eventKey={1}>Tab 1 content</Tab>
<Tab title="Tab 2" eventKey={2}>Tab 2 content</Tab>
</Tabs>
);
const panes = ReactTestUtils.scryRenderedComponentsWithType(instance, TabPane);
ReactTestUtils.Simulate.click(
ReactTestUtils.findRenderedDOMComponentWithClass(instance, 'tab1')
);
assert.ok(ReactDOM.findDOMNode(panes[0]).className.match(/\bactive\b/));
assert.ok(!ReactDOM.findDOMNode(panes[1]).className.match(/\bactive\b/));
const nav = ReactTestUtils.findRenderedComponentWithType(instance, Nav);
assert.equal(nav.context.$bs_tabContainer.activeKey, 1);
});
it('Should mount initial tab and no others when unmountOnExit is true and animation is false', () => {
const tab1 = <span className="tab1">Tab 1</span>;
const instance = ReactTestUtils.renderIntoDocument(
<Tabs id="test" defaultActiveKey={1} animation={false} unmountOnExit>
<Tab title={tab1} eventKey={1}>Tab 1 content</Tab>
<Tab title="Tab 2" eventKey={2}>Tab 2 content</Tab>
<Tab title="Tab 3" eventKey={3}>Tab 3 content</Tab>
</Tabs>
);
const panes = ReactTestUtils.scryRenderedComponentsWithType(instance, TabPane);
expect(ReactDOM.findDOMNode(panes[0])).to.exist;
expect(ReactDOM.findDOMNode(panes[1])).to.not.exist;
expect(ReactDOM.findDOMNode(panes[2])).to.not.exist;
});
it('Should mount the correct tab when selected and unmount the previous when unmountOnExit is true and animation is false', () => {
const tab1 = <span className="tab1">Tab 1</span>;
const instance = ReactTestUtils.renderIntoDocument(
<Tabs id="test" defaultActiveKey={2} animation={false} unmountOnExit>
<Tab title={tab1} eventKey={1}>Tab 1 content</Tab>
<Tab title="Tab 2" eventKey={2}>Tab 2 content</Tab>
</Tabs>
);
const panes = ReactTestUtils.scryRenderedComponentsWithType(instance, TabPane);
ReactTestUtils.Simulate.click(
ReactTestUtils.findRenderedDOMComponentWithClass(instance, 'tab1')
);
expect(ReactDOM.findDOMNode(panes[0])).to.exist;
expect(ReactDOM.findDOMNode(panes[1])).to.not.exist;
const nav = ReactTestUtils.findRenderedComponentWithType(instance, Nav);
assert.equal(nav.context.$bs_tabContainer.activeKey, 1);
});
it('Should treat active key of null as nothing selected', () => {
const instance = ReactTestUtils.renderIntoDocument(
<Tabs id="test" activeKey={null} onSelect={()=>{}}>
<Tab title="Tab 1" eventKey={1}>Tab 1 content</Tab>
<Tab title="Tab 2" eventKey={2}>Tab 2 content</Tab>
</Tabs>
);
const nav = ReactTestUtils.findRenderedComponentWithType(instance, Nav);
expect(nav.context.$bs_tabContainer.activeKey).to.not.exist;
});
it('Should pass default bsStyle (of "tabs") to Nav', () => {
const instance = ReactTestUtils.renderIntoDocument(
<Tabs id="test" defaultActiveKey={1} animation={false}>
<Tab title="Tab 1" eventKey={1}>Tab 1 content</Tab>
<Tab title="Tab 2" eventKey={2}>Tab 2 content</Tab>
</Tabs>
);
assert.ok(ReactTestUtils.findRenderedDOMComponentWithClass(instance, 'nav-tabs'));
});
it('Should pass bsStyle to Nav', () => {
const instance = ReactTestUtils.renderIntoDocument(
<Tabs id="test" bsStyle="pills" defaultActiveKey={1} animation={false}>
<Tab title="Tab 1" eventKey={1}>Tab 1 content</Tab>
<Tab title="Tab 2" eventKey={2}>Tab 2 content</Tab>
</Tabs>
);
assert.ok(ReactTestUtils.findRenderedDOMComponentWithClass(instance, 'nav-pills'));
});
it('Should pass disabled to Nav', () => {
const instance = ReactTestUtils.renderIntoDocument(
<Tabs id="test" defaultActiveKey={1}>
<Tab title="Tab 1" eventKey={1}>Tab 1 content</Tab>
<Tab title="Tab 2" eventKey={2} disabled>Tab 2 content</Tab>
</Tabs>
);
assert.ok(ReactTestUtils.findRenderedDOMComponentWithClass(instance, 'disabled'));
});
it('Should not show content when clicking disabled tab', () => {
const tab1 = <span className="tab1">Tab 1</span>;
const instance = ReactTestUtils.renderIntoDocument(
<Tabs id="test" defaultActiveKey={2} animation={false}>
<Tab title={tab1} eventKey={1} disabled>Tab 1 content</Tab>
<Tab title="Tab 2" eventKey={2}>Tab 2 content</Tab>
</Tabs>
);
const panes = ReactTestUtils.scryRenderedComponentsWithType(instance, TabPane);
ReactTestUtils.Simulate.click(
ReactTestUtils.findRenderedDOMComponentWithClass(instance, 'tab1')
);
assert.ok(!ReactDOM.findDOMNode(panes[0]).className.match(/\bactive\b/));
assert.ok(ReactDOM.findDOMNode(panes[1]).className.match(/\bactive\b/));
const nav = ReactTestUtils.findRenderedComponentWithType(instance, Nav);
assert.equal(nav.context.$bs_tabContainer.activeKey, 2);
});
describe('active state invariants', () => {
let mountPoint;
beforeEach(() => {
mountPoint = document.createElement('div');
document.body.appendChild(mountPoint);
});
afterEach(() => {
ReactDOM.unmountComponentAtNode(mountPoint);
document.body.removeChild(mountPoint);
});
[true, false].forEach(animation => {
it(`should correctly set "active" after Tab is removed with "animation=${animation}"`, () => {
const instance = render(
<Tabs
id="test"
activeKey={2}
animation={animation}
onSelect={() => {}}
>
<Tab title="Tab 1" eventKey={1}>Tab 1 content</Tab>
<Tab title="Tab 2" eventKey={2}>Tab 2 content</Tab>
</Tabs>
, mountPoint);
const panes = ReactTestUtils.scryRenderedComponentsWithType(instance, TabPane);
assert.ok(!ReactDOM.findDOMNode(panes[0]).className.match(/\bactive\b/));
assert.ok(ReactDOM.findDOMNode(panes[1]).className.match(/\bactive\b/));
// second tab has been removed
render(
<Tabs
id="test"
activeKey={1}
animation={animation}
onSelect={() => {}}
>
<Tab title="Tab 1" eventKey={1}>Tab 1 content</Tab>
</Tabs>
, mountPoint).refs.inner;
assert.ok(ReactDOM.findDOMNode(panes[0]).className.match(/\bactive\b/));
});
});
});
describe('Web Accessibility', () => {
let instance;
beforeEach(() => {
instance = ReactTestUtils.renderIntoDocument(
<Tabs defaultActiveKey={2} id="test">
<Tab title="Tab 1" eventKey={1}>Tab 1 content</Tab>
<Tab title="Tab 2" eventKey={2}>Tab 2 content</Tab>
</Tabs>
);
});
it('Should generate ids from parent id', () => {
const tabs = ReactTestUtils.scryRenderedComponentsWithType(instance, NavItem);
tabs.every(tab =>
assert.ok(tab.props['aria-controls'] && tab.props.id));
});
it('Should add aria-labelledby', () => {
const panes = ReactTestUtils.scryRenderedDOMComponentsWithClass(instance, 'tab-pane');
assert.equal(panes[0].getAttribute('aria-labelledby'), 'test-tab-1');
assert.equal(panes[1].getAttribute('aria-labelledby'), 'test-tab-2');
});
it('Should add aria-controls', () => {
const tabs = ReactTestUtils.scryRenderedComponentsWithType(instance, NavItem);
assert.equal(tabs[0].props['aria-controls'], 'test-pane-1');
assert.equal(tabs[1].props['aria-controls'], 'test-pane-2');
});
it('Should add role=tablist to the nav', () => {
const nav = ReactTestUtils.findRenderedComponentWithType(instance, Nav);
assert.equal(nav.props.role, 'tablist');
});
it('Should add aria-selected to the nav item for the selected tab', () => {
const tabs = ReactTestUtils.scryRenderedComponentsWithType(instance, NavItem);
const link1 = ReactTestUtils.findRenderedDOMComponentWithTag(tabs[0], 'a');
const link2 = ReactTestUtils.findRenderedDOMComponentWithTag(tabs[1], 'a');
assert.equal(link1.getAttribute('aria-selected'), 'false');
assert.equal(link2.getAttribute('aria-selected'), 'true');
});
});
it('Should not pass className to Nav', () => {
const instance = ReactTestUtils.renderIntoDocument(
<Tabs id="test" bsStyle="pills" defaultActiveKey={1} animation={false}>
<Tab title="Tab 1" eventKey={1} className="my-tab-class">Tab 1 content</Tab>
<Tab title="Tab 2" eventKey={2}>Tab 2 content</Tab>
</Tabs>
);
const myTabClass = ReactTestUtils.findRenderedDOMComponentWithClass(instance, 'my-tab-class');
const myNavItem = ReactTestUtils.scryRenderedDOMComponentsWithClass(instance, 'nav-pills')[0];
assert.notDeepEqual(myTabClass, myNavItem);
});
it('Should pass className, Id, and style to Tabs', () => {
const instance = ReactTestUtils.renderIntoDocument(
<Tabs
bsStyle="pills"
defaultActiveKey={1}
animation={false}
className="my-tabs-class"
id="my-tabs-id"
style={{ opacity: 0.5 }}
/>
);
assert.equal(ReactDOM.findDOMNode(instance).getAttribute('class'), 'my-tabs-class');
assert.equal(ReactDOM.findDOMNode(instance).getAttribute('id'), 'my-tabs-id');
// Decimal point string depends on locale
assert.equal(parseFloat(ReactDOM.findDOMNode(instance).style.opacity), 0.5);
});
it('should derive bsClass from parent', () => {
const instance = ReactTestUtils.renderIntoDocument(
<Tabs id="test" bsClass="my-tabs">
<Tab eventKey={1} title="Tab 1" />
<Tab eventKey={2} title="Tab 2" bsClass="my-pane" />
</Tabs>
);
assert.lengthOf(ReactTestUtils.scryRenderedDOMComponentsWithClass(instance, 'my-tabs-pane'), 2);
assert.lengthOf(ReactTestUtils.scryRenderedDOMComponentsWithClass(instance, 'my-pane'), 0);
});
});<|fim▁end|> | </Tabs>
); |
<|file_name|>LineComment.java<|end_file_name|><|fim▁begin|>/*
* Copyright (C) 2013 Raquel Pau and Albert Coroleu.
*
* Walkmod is free software: you can redistribute it and/or modify it under the terms of the GNU
* Lesser General Public License as published by the Free Software Foundation, either version 3 of
* the License, or (at your option) any later version.
*
* Walkmod is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even
* the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser
* General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License along with Walkmod. If
* not, see <http://www.gnu.org/licenses/>.
*/
package org.walkmod.javalang.ast;
import org.walkmod.javalang.visitors.GenericVisitor;
import org.walkmod.javalang.visitors.VoidVisitor;
/**
* <p>
* AST node that represent line comments.
* </p>
* Line comments are started with "//" and finish at the end of the line ("\n").
*
* @author Julio Vilmar Gesser
*/
public final class LineComment extends Comment {
public LineComment() {}
public LineComment(String content) {
super(content);
}
public LineComment(int beginLine, int beginColumn, int endLine, int endColumn, String content) {
super(beginLine, beginColumn, endLine, endColumn, content);
}
@Override
public <R, A> R accept(GenericVisitor<R, A> v, A arg) {
if (!check()) {
return null;
}
return v.visit(this, arg);
}
@Override
public <A> void accept(VoidVisitor<A> v, A arg) {
if (check()) {
v.visit(this, arg);
}
}
@Override
public LineComment clone() throws CloneNotSupportedException {
return new LineComment(getContent());<|fim▁hole|> }
}<|fim▁end|> | |
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|><|fim▁hole|><|fim▁end|> | from .net import Net |
<|file_name|>ko.rs<|end_file_name|><|fim▁begin|>/************************************************************************
* *
* Copyright 2014 Urban Hafner, Thomas Poinsot *
* Copyright 2015 Urban Hafner, Igor Polyakov *
* *
* This file is part of Iomrascálaí. *
* *
* Iomrascálaí is free software: you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation, either version 3 of the License, or *
* (at your option) any later version. *
* *
* Iomrascálaí is distributed in the hope that it will be useful, *
* but WITHOUT ANY WARRANTY; without even the implied warranty of *
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
* GNU General Public License for more details. *
* *
* You should have received a copy of the GNU General Public License *
* along with Iomrascálaí. If not, see <http://www.gnu.org/licenses/>. *
* *
************************************************************************/
#![cfg(test)]
use board::Black;
use board::IllegalMove;
use board::Play;
use board::White;
use game::Game;
use ruleset::AnySizeTrompTaylor;
use sgf::Parser;
<|fim▁hole|>
#[test]
fn replaying_directly_on_a_ko_point_should_be_illegal() {
let mut g = Game::new(19, 6.5, AnySizeTrompTaylor);
g = g.play(Play(Black, 4, 4)).unwrap();
g = g.play(Play(White, 5, 4)).unwrap();
g = g.play(Play(Black, 3, 3)).unwrap();
g = g.play(Play(White, 4, 3)).unwrap();
g = g.play(Play(Black, 3, 5)).unwrap();
g = g.play(Play(White, 4, 5)).unwrap();
g = g.play(Play(Black, 2, 4)).unwrap();
g = g.play(Play(White, 3, 4)).unwrap();
let ko = g.play(Play(Black, 4, 4));
match ko {
Err(e) => assert_eq!(e, IllegalMove::Ko),
Ok(_) => panic!("expected Err")
}
}
#[test]
fn positional_super_ko_should_be_illegal() {
let parser = Parser::from_path(Path::new("fixtures/sgf/positional-superko.sgf")).unwrap();
let game = parser.game().unwrap();
let super_ko = game.play(Play(White, 2, 9));
match super_ko {
Err(e) => assert_eq!(e, IllegalMove::SuperKo),
Ok(_) => panic!("expected Err")
}
}
#[test]
fn not_a_super_ko() {
let parser = Parser::from_path(Path::new("fixtures/sgf/not-superko.sgf")).unwrap();
let game = parser.game().unwrap();
let no_super_ko = game.play(Play(Black, 1, 1));
match no_super_ko {
Err(e) => panic!("No err expected, got {}", e),
Ok(_) => {}
}
}
#[test]
fn not_a_super_ko2() {
let parser = Parser::from_path(Path::new("fixtures/sgf/not-superko2.sgf")).unwrap();
let game = parser.game().unwrap();
let no_super_ko = game.play(Play(Black, 13, 12));
match no_super_ko {
Err(e) => panic!("No err expected, got {}", e),
Ok(_) => {}
}
}<|fim▁end|> | use std::path::Path; |
<|file_name|>app.js<|end_file_name|><|fim▁begin|>'use strict';
var angular = require('angular');
var angularMessages = require('angular-messages');
var satellizer = require('satellizer');
var material = require('angular-material');
angular.module('sheltrApp', [
require('angular-ui-router'),
require('./controllers'),
require('./services'),
'ngMaterial',
'ngMessages',
'satellizer',
])
.run([
'$animate',
'$rootScope',
'$state',
'$stateParams',
'$auth',
function($animate, $rootScope, $state, $stateParams, $auth) {
$animate.enabled(true);
$rootScope.$state = $state;
$rootScope.$stateParams = $stateParams;
$rootScope.isAuthenticated = function() {
return $auth.isAuthenticated();
};
},
])
.config([
'$stateProvider',
'$authProvider',
'$urlRouterProvider',
function(
$stateProvider,
$authProvider,
$urlRouterProvider) {
$urlRouterProvider.otherwise('/login');
$authProvider.tokenPrefix = 'sheltr';
$authProvider.loginUrl = '/api/authenticate';
function skipIfLoggedIn($q, $auth) {
var deferred = $q.defer();
if ($auth.isAuthenticated()) {
deferred.reject();
} else {
deferred.resolve();
}
return deferred.promise;
}
function loginRequired($q, $location, $auth) {
var deferred = $q.defer();
if ($auth.isAuthenticated()) {
deferred.resolve();
} else {
$location.path('/login');
}
return deferred.promise;
}
function isAuthorized(permission) {
return function($q, $location, $auth) {
var deferred = $q.defer();
var payload = $auth.getPayload();
if (payload[permission]) {
deferred.resolve();
} else {
$location.path('/');
}
return deferred.promise;
};
}
$stateProvider
.state('login', {
url: '/login',
templateUrl: 'views/login.form.html',
controller: 'LoginController',
resolve: {
skipIfLoggedIn: skipIfLoggedIn,
},
})
.state('logout', {
url: '/logout',
template: null,
controller: 'LogoutController',
resolve: {
loginRequired: loginRequired,
},
})<|fim▁hole|>
.state('signup', {
url: '/applicants/new',
templateUrl: 'views/applicant.view.html',
controller: 'SignupController',
resolve: {
loginRequired: loginRequired,
},
})
.state('home', {
url: '/',
templateUrl: 'views/home.html',
controller: 'HomeController',
resolve: {
loginRequired: loginRequired,
},
})
.state('applicant', {
url: '/applicants/:id',
templateUrl: 'views/applicant.view.html',
controller: 'ApplicantController',
resolve: {
loginRequired: loginRequired,
},
})
.state('organization', {
url: '/organization',
templateUrl: 'views/org.view.html',
controller: 'OrgController',
resolve: {
adminRequired: isAuthorized('admin'),
},
});
},
]);<|fim▁end|> | |
<|file_name|>appointment_form.js<|end_file_name|><|fim▁begin|>function appointment(input)
{
Ti.include('/ui/common/helpers/dateTime.js');
Ti.include('/ui/common/database/users_db.js');
Ti.include('/ui/common/database/children_db.js');
Ti.include('/ui/common/database/relationships_db.js');
Ti.include('/ui/common/database/records_db.js');
Ti.include('/ui/common/database/incidents_db.js');
Ti.include('/ui/common/database/entries_db.js');
Ti.include('/ui/common/database/activities_db.js');
Ti.include('/ui/common/database/appointments_db.js');
Ti.include('/ui/common/database/treatments_db.js');
Ti.include('/ui/common/cloud/appcelerator/objects.js');
var appointment = {
id: input.id?input.id:null,
entry_id: input.entry_id?input.entry_id:null,
diagnosis: input.diagnosis?input.diagnosis:null,
complete: (input.complete == 1)?true:false,
date: input.date?input.date:timeFormatted(new Date).date,
time: input.time?input.time:timeFormatted(new Date).time,
symptoms: input.symptoms?input.symptoms:[],
doctor: input.doctor?input.doctor:{
name: null,
location: null,
street: null,
city: null,
state: null,
zip: null,
country: 'USA',
},
activities: input.activities?input.activities:[],
treatments: input.treatments?input.treatments:[],
}
var symptoms_string='';
for(var i=0;i < appointment.symptoms.length; i++) {
symptoms_string += appointment.symptoms[i];
if(i != appointment.symptoms.length -1) symptoms_string += ', ';
}
var window = Ti.UI.createWindow({
backgroundColor:'white',
navBarHidden: 'true',
windowSoftInputMode: Ti.UI.Android.SOFT_INPUT_ADJUST_PAN,
});
window.result = null;
var windowTitleBar = require('ui/handheld/windowNavBar');
windowTitleBar = new windowTitleBar('100%', 'Appointment', 'Cancel', 'Save');
window.add(windowTitleBar);
var warning = Ti.UI.createView({
top: 70,
width: '100%',
zIndex: 3,
height: 70,
backgroundColor: 'red',
borderColor: 'red'
});
warning.add(Ti.UI.createLabel({ text: 'NOTE: This is for personal records, it does not schedule an actual appointment',
textAlign: 'center',
color: 'white',
width: Titanium.Platform.displayCaps.platformWidth*0.90,
}));
window.add(warning);
var cancel_btn = windowTitleBar.leftNavButton;
cancel_btn.addEventListener('click', function() {
window.close();
});
var save_btn = windowTitleBar.rightNavButton;
save_btn.addEventListener('click', function() {
if(table.scrollable == false) { return; }
var name_test=false, dateTime_test=false, symptoms_test=false;
if(!isValidDateTime(date.text+' '+time.text) && complete_switcher.value == false) { alert('You may have entered a date that has already passed. Kindly recheck'); }
else { dateTime_test = true; }
//Remove the whitespace then test to make sure there are only letters
var onlyLetters = /^[a-zA-Z]/.test(name.value.replace(/\s/g, ''));
if(name.value != null && name.value.length > 1 && onlyLetters) { name_test = true; }
else { alert('Doctors name must be longer than one character and contain only letters'); }
if(symptoms_field.value == null || symptoms_field.value == '') {
alert('You must list at least one symptom');
}
else { symptoms_test=true; }
if(dateTime_test && name_test && symptoms_test)
{
if(diagnosis.value != null) { diagnosis.value = diagnosis.value.replace("'", "''"); } //If diagnosis exists, remove quotes before submitting
if(appointment.id == null) {
if(!Titanium.Network.online) {
alert('Error:\n You are not connected to the internet. Cannot create new appointment');
return;
}
var entry_id = '"'+appointment.entry_id+'"';
appointment.id = insertAppointmentLocal(entry_id,appointment.date,appointment.time,diagnosis.value);
appointment.doctor.id = insertDoctorForAppointmentLocal(appointment.id,name.value,location.value,street.value,city.value,state.value,zip.value,country.value);
createObjectACS('appointments', { id: appointment.id, entry_id: appointment.entry_id,
date: appointment.date, time: appointment.time, complete: complete_switcher.value, diagnosis: diagnosis.value, });
}
else {
updateAppointmentLocal(appointment.id,appointment.date,appointment.time,diagnosis.value);
updateDoctorForAppointmentLocal(appointment.id,name.value,location.value,street.value,city.value,state.value,zip.value,country.value);
}
deleteSymptomsForAppointmentLocal(appointment.id);
appointment.symptoms.splice(0, appointment.symptoms.length);
if(symptoms_field.value != null) {
if(symptoms_field.value.length > 1) {
var final_symptoms = symptoms_field.value.split(',');
for(var i=0;i < final_symptoms.length;i++) {
if(final_symptoms[i].length < 2) continue;
final_symptoms[i] = final_symptoms[i].replace(/^\s\s*/, ''); // Remove Preceding white space
insertSymptomForAppointmentLocal(appointment.id,final_symptoms[i]);
appointment.symptoms.push(final_symptoms[i]);
}
}
}
updateAppointmentCompleteStatus(appointment.id,complete_switcher.value);
updateRecordTimesForEntryLocal(appointment.entry_id,timeFormatted(new Date()).date,timeFormatted(new Date()).time);
appointment.doctor.name = name.value;
appointment.doctor.location = location.value;
appointment.doctor.street = street.value;
appointment.doctor.city = city.value;
appointment.doctor.state = state.value;
appointment.doctor.zip = zip.value;
appointment.doctor.country = country.value;
appointment.complete = complete_switcher.value;
appointment.diagnosis = diagnosis.value;
window.result = appointment;
window.close();
}
});
var table = Ti.UI.createTableView({ top: 140, separatorColor: 'transparent', });
var sectionDetails = Ti.UI.createTableViewSection({ headerTitle: 'Doctor Details(*=required)' });
sectionDetails.add(Ti.UI.createTableViewRow({ height: 45, }));
sectionDetails.add(Ti.UI.createTableViewRow({ height: 45, }));
sectionDetails.add(Ti.UI.createTableViewRow({ height: 135, }));
var name_title = Titanium.UI.createLabel({ text: '*Name', color: 'black', left: 15, font: { fontWeight: 'bold', fontSize: 18, }, });
var name = Ti.UI.createTextField({ hintText: 'eg: James Smith', color: 'black', value: appointment.doctor.name, left: '40%', width: '60%' });
var location_title = Titanium.UI.createLabel({ text: 'Location', color: 'black', left: 15, font: { fontWeight: 'bold', fontSize: 18, }, });
var location = Ti.UI.createTextField({ hintText: 'Clinic/Hospital name', color: 'black', value: appointment.doctor.location, left: '40%', width: '60%' });
var address_title = Titanium.UI.createLabel({ text: 'Address', color: 'black', left: 15, font: { fontWeight: 'bold', fontSize: 18, }, });
var street = Ti.UI.createTextField({ hintText: 'Street', color: 'black', value: appointment.doctor.street, borderColor: '#CCC', leftButtonPadding: 5, height: 45, width: '60%', left: '40%', top: 0 });
var city = Ti.UI.createTextField({ hintText: 'City', color: 'black', value: appointment.doctor.city, borderColor: '#CCC', leftButtonPadding: 5, left: '40%', height: 45, width: '40%', top: 45 });
var state = Ti.UI.createTextField({ hintText: 'State', color: 'black', value: appointment.doctor.state, borderColor: '#CCC', leftButtonPadding: 5, left: '80%', height: 45, width: '20%', top: 45 });
var zip = Ti.UI.createTextField({ hintText: 'ZIP', color: 'black', value: appointment.doctor.zip, borderColor: '#CCC', leftButtonPadding: 5, left: '40%', height: 45, width: '20%', top: 90 });
var country = Ti.UI.createTextField({ hintText: 'Country', color: 'black', value: appointment.doctor.country, borderColor: '#CCC', leftButtonPadding: 5, left: '60%', height: 45, width: '40%', top: 90 });
sectionDetails.rows[0].add(name_title);
sectionDetails.rows[0].add(name);
sectionDetails.rows[1].add(location_title);
sectionDetails.rows[1].add(location);
sectionDetails.rows[2].add(address_title);
sectionDetails.rows[2].add(street);
sectionDetails.rows[2].add(city);
sectionDetails.rows[2].add(state);
sectionDetails.rows[2].add(zip);
sectionDetails.rows[2].add(country);
var sectionDateTime = Ti.UI.createTableViewSection({ headerTitle: 'Date and Time(tap to change)' });
sectionDateTime.add(Ti.UI.createTableViewRow({ height: 45, }));
var date = Ti.UI.createLabel({ text: appointment.date, color: 'black', left: 15, font: { fontWeight: 'bold', fontSize: 18, }, });
var time = Ti.UI.createLabel({ text: appointment.time, color: 'black', left: 160, font: { fontWeight: 'bold', fontSize: 18, }, });
sectionDateTime.rows[0].add(date);
sectionDateTime.rows[0].add(time);
var sectionSymptoms = Ti.UI.createTableViewSection({ headerTitle: '*Symptoms(list using commas)' });
sectionSymptoms.add(Ti.UI.createTableViewRow({ height: 90, selectedBackgroundColor: 'white' }));
var symptoms_field = Ti.UI.createTextArea({ hintText: 'Seperate each symptom by comma', value: symptoms_string, width: '100%', top: 5, font: { fontSize: 17 }, height: 70, borderRadius: 10 });
sectionSymptoms.rows[0].add(symptoms_field);
var sectionDiagnosis = Ti.UI.createTableViewSection();
sectionDiagnosis.add(Ti.UI.createTableViewRow({ selectedBackgroundColor: 'white' }));
sectionDiagnosis.add(Ti.UI.createTableViewRow({ selectedBackgroundColor: 'white' }));
var complete_title = Ti.UI.createLabel({ text: 'Complete', color: 'black', left: 15, font: { fontWeight: 'bold', fontSize: 18, }, });
var complete_switcher = Ti.UI.createSwitch({ value: appointment.complete, left: '50%', });
var diagnosis_title = Ti.UI.createLabel({ text: 'Diagnosis', left: 15, font: { fontWeight: 'bold', fontSize: 18, }, });
var diagnosis = Ti.UI.createTextField({ hintText: 'Enter here', value: appointment.diagnosis, width: '50%', left: '50%' });
sectionDiagnosis.rows[0].add(complete_title);
sectionDiagnosis.rows[0].add(complete_switcher);
sectionDiagnosis.rows[1].add(diagnosis_title);
sectionDiagnosis.rows[1].add(diagnosis);
table.data = [sectionDateTime, sectionDetails, sectionSymptoms, sectionDiagnosis ];
window.add(table);
date.addEventListener('click', function(e) {
modalPicker = require('ui/common/helpers/modalPicker');
var modalPicker = new modalPicker(Ti.UI.PICKER_TYPE_DATE_AND_TIME,null,date.text); <|fim▁hole|> callback: function(e) {
if (e.cancel) {
} else {
//var result = timeFormatted(e.value);
date.text = e.value.toDateString();
appointment.date = date.text;
}
}
});
});
time.addEventListener('click', function(e) {
modalPicker = require('ui/common/helpers/modalPicker');
var modalPicker = new modalPicker(Ti.UI.PICKER_TYPE_DATE_AND_TIME,null,date.text+' '+time.text);
modalPicker.showTimePickerDialog({
value: new Date(date.text+' '+time.text),
callback: function(e) {
if (e.cancel) {
} else {
var result = timeFormatted(e.value);
time.text = result.time;
appointment.time = time.text;
}
}
});
});
return window;
}
module.exports = appointment;<|fim▁end|> |
modalPicker.showDatePickerDialog({
value: new Date(date.text), |
<|file_name|>BigQueryColumnMetadataReader.java<|end_file_name|><|fim▁begin|>package com.exasol.adapter.dialects.bigquery;
import java.sql.Connection;
import java.sql.Types;
import com.exasol.adapter.AdapterProperties;
import com.exasol.adapter.dialects.IdentifierConverter;
import com.exasol.adapter.jdbc.BaseColumnMetadataReader;
import com.exasol.adapter.jdbc.JdbcTypeDescription;
import com.exasol.adapter.metadata.DataType;
/**
* This class implements BigQuery-specific reading of column metadata.
*/
public class BigQueryColumnMetadataReader extends BaseColumnMetadataReader {
/**
* Create a new instance of the {@link BigQueryColumnMetadataReader}.
*
* @param connection connection to the remote data source<|fim▁hole|> * @param identifierConverter converter between source and Exasol identifiers
*/
public BigQueryColumnMetadataReader(final Connection connection, final AdapterProperties properties,
final IdentifierConverter identifierConverter) {
super(connection, properties, identifierConverter);
}
@Override
public DataType mapJdbcType(final JdbcTypeDescription jdbcTypeDescription) {
if (jdbcTypeDescription.getJdbcType() == Types.TIME) {
return DataType.createVarChar(30, DataType.ExaCharset.UTF8);
}
return super.mapJdbcType(jdbcTypeDescription);
}
}<|fim▁end|> | * @param properties user-defined adapter properties |
<|file_name|>20141017-permalinks.py<|end_file_name|><|fim▁begin|>from redwind import app, db, util
from redwind.models import Post
import itertools
db.engine.execute('alter table post add column historic_path varchar(256)')
db.engine.execute('update post set historic_path = path')
for post in Post.query.all():<|fim▁hole|> post.path = '{}/{:02d}/{}'.format(post.published.year,
post.published.month,
post.slug)
db.session.commit()<|fim▁end|> | print(post.historic_path)
if not post.slug:
post.slug = post.generate_slug() |
<|file_name|>mod.rs<|end_file_name|><|fim▁begin|>//
// imag - the personal information management suite for the commandline
// Copyright (C) 2015-2020 Matthias Beyer <[email protected]> and contributors
//
// This library is free software; you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public
// License as published by the Free Software Foundation; version
// 2.1 of the License.
//<|fim▁hole|>// Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public
// License along with this library; if not, write to the Free Software
// Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
//
pub mod grep;
pub mod length;<|fim▁end|> | // This library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
<|file_name|>network.js<|end_file_name|><|fim▁begin|>exports.netCheck = function() {
var url = "http://api.openbeerdatabase.com/v1/beers.json";
var client = Ti.Network.createHTTPClient({<|fim▁hole|> var data = JSON.parse(this.responseText);
var beers = data.beers;
for ( i = 0,
j = beers.length; i < j; i++) {
var post = {
title : beers[i].name,
desc : beers[i].description
};
newCrud.create(post);
}
},
onerror : function(evt) {
if (!Ti.Network.online) {
alert("Could not find connection!");
var newCrud = new crud();
newCrud.read();
}
}
});
client.open("GET", url);
client.send();
};<|fim▁end|> | onload : function(evt) {
var newCrud = new crud();
newCrud.dele(); |
<|file_name|>clean_mac_info_plist.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# Jonas Schnelli, 2013
# make sure the MineCoin-Qt.app contains the right plist (including the right version)
# fix made because of serval bugs in Qt mac deployment (https://bugreports.qt-project.org/browse/QTBUG-21267)
from string import Template
from datetime import date
bitcoinDir = "./";
inFile = bitcoinDir+"/share/qt/Info.plist"
outFile = "MineCoin-Qt.app/Contents/Info.plist"
version = "unknown";
fileForGrabbingVersion = bitcoinDir+"bitcoin-qt.pro"
for line in open(fileForGrabbingVersion):
lineArr = line.replace(" ", "").split("=");
if lineArr[0].startswith("VERSION"):
version = lineArr[1].replace("\n", "");
fIn = open(inFile, "r")
fileContent = fIn.read()
s = Template(fileContent)
newFileContent = s.substitute(VERSION=version,YEAR=date.today().year)
fOut = open(outFile, "w");
fOut.write(newFileContent);<|fim▁hole|>print "Info.plist fresh created"<|fim▁end|> | |
<|file_name|>make_ml_output_summary.py<|end_file_name|><|fim▁begin|>import sys
import socket
import os
import os.path
from optparse import OptionParser
#import scipy as scp
import numpy as np
import matplotlib.pyplot as plt
import pylab
import genome_management.kg_file_handling as kgf
import math
def file_exists(ls,file):
for f in ls:
if(f==file):
return 1
return 0
def mkdir(dir,file):
ls_dir = os.listdir(dir)
if(not(file_exists(ls_dir,file))):
command = "mkdir %s/%s"%(dir,file)
os.system(command)
return "%s/%s"%(dir,file)
class region_info:
def __init__(self,name,chr,start,end,TID):
self.name = name
self.chr = chr
self.start = start
self.end = end
self.frequencies_by_pop = {}
self.cps_by_genome = {}
self.transcript_id = TID
self.TID = TID
self.cps_all = []
self.pop_by_genome = {}
def add_info_from_genome(self,cp,genome):
if(not(genome.pop in self.frequencies_by_pop)):
self.frequencies_by_pop[genome.pop] = []
self.frequencies_by_pop[genome.pop].append(cp)
self.cps_by_genome[genome.genome_name] = cp
self.pop_by_genome[genome.genome_name] = genome.pop
self.cps_all.append(cp)
#def get_var(self):
# self.vars = {}
#self.cps_all = np.array(self.cps_all)
# varT = self.cps_all.var()
# self.vars["all"]=varT
# self.means = {}
# meanT = self.cps_all.mean(1)
# self.means["all"] = meanT
# for pop,copies_by_pop in self.frequencies_by_pop.iteritems():
# copies_by_pop = np.array(copies_by_pop)
# self.vars[pop] = self.summary[:,pop_index].var(1)
# self.means[pop] = self.summary[:,pop_index].mean(1)
# self.vsts = {}
# self.fsts = {}
# for pop,pop_index in self.indivs_by_pop.iteritems():
# for pop_2,pop_index_2 in self.indivs_by_pop.iteritems():
# n_pop = float(pop_index.shape[0])
# n_pop_2 = float(pop_index_2.shape[0])
# both_pops = np.r_[self.indivs_by_pop[pop],self.indivs_by_pop[pop_2]]
# var_both = self.summary[:,both_pops].var(1)
# N = n_pop+n_pop_2
# self.vsts["_".join([pop,pop_2])] = (var_both - ((self.vars[pop]*n_pop+self.vars[pop_2]*n_pop_2)/N)) / var_both
<|fim▁hole|>
def make_output_file(region,region_info,outdir,cell_line_info,genome_info):
outfile_name = "%s/%s_pop_summary.csv"%(outdir,region_info.name)
FOUT = open(outfile_name,'w')
FOUT.write("indiv,cp,pop,cell lines fixed, cell lines in Nitrogen,coverage\n")
for indiv,cp in region_info.cps_by_genome.iteritems():
pop = region_info.pop_by_genome[indiv]
output = indiv in cell_line_info and cell_line_info[indiv] or ""
output = "%s,%d,%s,%s,%f\n"%(indiv,cp,pop,output,genome_info.genomes[indiv].coverage)
FOUT.write(output)
print output
def make_simple_plot(region,region_info,outdir,cell_line_info,genome_info):
plt.rc('grid',color='0.75',linestyle='l',linewidth='0.1')
f=plt.figure()
f.set_figwidth(6)
f.set_figheight(6)
axescolor = '#f6f6f6'
left, width = 0.1, 0.8
rect1 = [left, 0.1, width, 0.8] #left, bottom, width, height
ax = f.add_axes(rect1)
colors = {'Yoruba':'r','European':'b','Asian':'g'}
for indiv,cp in region_info.cps_by_genome.iteritems():
cvg = genome_info.genomes[indiv].coverage
fixed_cell_line = cell_line_info[indiv].split(",")[0].rstrip() == "yes"
liquid_nitrogen_cell_line = cell_line_info[indiv].split(",")[1].rstrip() == "yes"
color = colors[genome_info.genomes[indiv].pop]
ax.plot(np.array([cvg]),np.array([cp]),'%so'%(color))
ax.set_xlabel("cvg",size=20)
ax.set_ylabel("copy",size=20)
ax.set_title("%s"%(region_info.name),size=20)
f.savefig("%s/%s_copy_vs_cvg.pdf"%(outdir,region_info.name),format='pdf')
plt.close(1)
def make_histogram(region,region_info,outdir,great_ape_gene_hashes):
print region_info.name
plt.rc('grid',color='0.75',linestyle='l',linewidth='0.1')
f=plt.figure()
f.set_figwidth(10)
f.set_figheight(10)
nbins=0
mx=0
mn=100
do_apes=True
great_ape_cps = {}
if do_apes:
for ape,gene_hash in great_ape_gene_hashes.iteritems():
if not region_info.TID in gene_hash:
do_apes=False
print "ID does not exist for APE"
print region_info.TID
break
great_ape_cps[ape] = gene_hash[region_info.TID]
mx=int(max(great_ape_cps[ape],mx))
mn=int(min(great_ape_cps[ape],mn))
axescolor = '#f6f6f6'
left, width = 0.1, 0.8
rect1 = [left, 0.1, width, 0.8] #left, bottom, width, height
for pop,freq_info in region_info.frequencies_by_pop.iteritems():
#nbins = int(round(max(nbins,max(freq_info))))
mx=int(max(max(freq_info),mx))
mn=int(min(min(freq_info),mn))
#nbins+=1
nbins = mx-mn+1
labels = []
pop_to_hists = {}
for pop,freq_info in region_info.frequencies_by_pop.iteritems():
print pop,freq_info
pop_to_hists[pop] = np.histogram(np.array(freq_info),bins=nbins,range=[mn,mx],normed=True,new=True)[0]
print np.histogram(np.array(freq_info),bins=nbins,range=[mn,mx],normed=True,new=True)
print pop_to_hists[pop]
x = np.arange(mn,mx+1)
width=.25
print x
for i in range(x.shape[0]):
labels.append(str(x[i]))
ax = f.add_axes(rect1)
bars = {}
leg = []
leg_colors = []
lines = []
k=0
colors = ['r','g','b','o']
starty = .9
sub=.03
i=0
for pop,freqs in region_info.frequencies_by_pop.iteritems():
med = np.median(np.array(freqs))
sig2 = np.array(freqs).var()
leg.append("%s med: %d var: %.1f"%(pop,int(med),sig2))
i+=1
for pop,hist in pop_to_hists.iteritems():
bars[pop] = ax.bar(x+k*width,hist,width,color=colors[k],alpha=0.5)
leg_colors.append(colors[k])
#ax.legend(bars[pop][0],pop)
lines.append(bars[pop][0])
k+=1
ape_colors = ['orange','purple','yellow','brown']
k=0
if do_apes:
for ape,cp in great_ape_cps.iteritems():
bars_ape = ax.bar(np.array([cp]),np.array([.1]),width/2,color=ape_colors[k],alpha=.8)
leg.append("%s %f"%(ape,cp))
lines.append(bars_ape[0])
k+=1
ax.set_xticks(x+width*k/2)
ax.set_xticklabels(labels,size=20)
ax.grid(color='k',linestyle='--',linewidth=1,alpha=.3)
yticklabels = [str(x) for x in np.arange(0,1,.1)]
ax.set_yticklabels(yticklabels,size=20)
ax.set_ylabel("%",size=20)
ax.set_xlabel("cp number",size=20)
ax.legend(lines,leg)
ax.set_title("%s"%(region_info.name),size=20)
f.savefig("%s/%s_pop_hist.pdf"%(outdir,region_info.name),format='pdf')
plt.close(1)
return
k=0
for pop,ihist in percent_hists.iteritems():
percent_hists[pop] = ihist/ihist.sum()
#jhplot(x,hist,"|%s"%(colors[k]))
#hist(x)
vlines(x+float(k)/3,zeros,percent_hists[pop],color=colors[k],linewidth=7)
k+=1
leg.append(pop)
#legend(leg)
title("percent")
print leg
legend(leg)
f.get_axes()[0].xaxis.set_ticks(range(21))
#f.add_axes([0,40,0,1],xticks=[0,1,2,3,4,5,6,8,9,10,11,12,13,14,15,16,17,18,19,20],label='axis2',axisbg='g')
#[0,1,2,3,4,5,6,8,9,10,11,12,13,14,15,16,17,18,19,20])
f=figure(2)
k=0
for pop,ihist in mode_hists.iteritems():
mode_hists[pop] = ihist/ihist.sum()
#plot(x,hist,"|%s"%(colors[k]))
#hist(x)
vlines(x+float(k)/5,zeros,mode_hists[pop],color=colors[k],linewidth=7)
k+=1
legend(leg)
title("Predicted copy number %s"%(name))
xlabel("predicted copy number")
ylabel("percentage of population")
f.get_axes()[0].xaxis.set_ticks(range(21))
savefig("%smode_hist.png"%(name),format='png')
print percent_hists
print mode_hists
def load_plot_regions(fn_regions):
if fn_regions == None: return []
plot_regions = []
for line in open(fn_regions,'r').readlines():
if line[0] == "#": continue
print line
sline = line.split()
uID = "%s:"%(sline[1])
uID += ":".join(sline[2:5])
plot_regions.append(uID)
print uID
return plot_regions
def get_transcript_ids(fn_transcript_id):
print fn_transcript_id
gene_id_list = open(fn_transcript_id,'r').readlines()
transcript_ids = {}
for gene_info in gene_id_list:
(TID,name,chr,start,end,unmasked_len,GCp) = gene_info.split()
transcript_ids["%s:%s:%s"%(chr,start,end)] = {"tid":TID,"chr":chr,"start":start,"end":end,"unmasked":unmasked_len,"GC":GCp}
return transcript_ids
def get_cp_by_gene(gene_file):
cps_by_TID = {}
for line in open(gene_file,'r').readlines():
if len(line.split()) == 0: continue
(chr,start,end,TID,cp) = line.split()
cps_by_TID[TID] = float(cp)
return cps_by_TID
def get_calkan_cp_calls(fn_great_ape_cps_files):
calkan_cp_calls = {}
if(fn_great_ape_cps_files!=None):
for line in open(fn_great_ape_cps_files,'r').readlines():
(genome,gene_file) = line.split()
calkan_cp_calls[genome] = get_cp_by_gene(gene_file)
return calkan_cp_calls
if __name__=='__main__':
opts = OptionParser()
opts.add_option('','--input_file_name',dest='input_file_name')
opts.add_option('','--input_genomes',dest='fn_input_genomes')
opts.add_option('','--outdir',dest='outdir')
opts.add_option('','--sex_pop_index',dest='fn_sex_pop_index')
#opts.add_option('','--analysis_dir',dest='fn_analysis_dir')
opts.add_option('','--input_regions',dest='input_regions',default=None)
opts.add_option('','--out_file',dest='outfile',default=None)
opts.add_option('','--regress',dest='regress',action='store_true',default=False)
opts.add_option('','--plot_regions',dest='plot_regions',default=None)
opts.add_option('','--do_plotting',action="store_true",dest='do_plotting',default=False)
opts.add_option('','--great_ape_cps_files',dest='fn_great_ape_cps_files',default=None)
opts.add_option('','--cell_line_information',dest='fn_cell_line_info',default=None)
opts.add_option('','--output_coverage',dest='output_cvg',action='store_true',default=False)
opts.add_option('','--simple_plot',dest='simple_plot',action='store_true',default=False)
opts.add_option('','--input_dir',dest='input_dir',default=None)
#opts.add_option('','--transcript_id_file',dest='fn_transcript_id')
#opts.add_option('','--call_metric',dest='outfile',default="summary")
#opts.add_option('','--out_genomes',dest='fn_out_genomes')
(o, args) = opts.parse_args()
great_ape_cps = get_calkan_cp_calls(o.fn_great_ape_cps_files)
cell_line_info = {}
if o.fn_cell_line_info != None:
read_cell_line_info = open(o.fn_cell_line_info,'r').readlines()
for cell_line_line in read_cell_line_info:
(name,cells_fixed,in_nitrogen) = cell_line_line.split(",")
cell_line_info[name] = "%s,%s"%(cells_fixed,in_nitrogen.rstrip())
print cell_line_info[name]
mkdir("./",o.outdir)
print "loading genome information"
genome_info = kgf.genome_info(o.fn_input_genomes,o.fn_sex_pop_index,QC_check=o.output_cvg)
print "done"
regions_by_uID = {}
#print o.input_regions
expected_len = 0
if o.input_regions != None:
for l in open(o.input_regions,'r').readlines():
expected_len+= (l[0]!="#") and 1
input_genomes = open(o.fn_input_genomes,'r').readlines()
plot_regions = load_plot_regions(o.plot_regions)
outstr = "\t".join(["name", "chr", "start", "end", "TID"])
for input_genomes_line in input_genomes:
(genome_id,fn_wssd_dir,fn_bac_dir,chunk_dir,primary_analysis_dir) = input_genomes_line.split()
if genome_id[0] == "#": continue
genome_ob = genome_info.genomes[genome_id]
if o.input_dir is None:
input_file = "%s/%s/ml_region_analysis/%s"%(primary_analysis_dir,genome_id,o.input_file_name)
else:
input_file = "%s/%s_%s"%(o.input_dir,o.input_file_name,genome_id)
print input_file
##########check the output file exists
#if(not(os.path.exists("%s/%s/ml_region_analysis/%s"%(primary_analysis_dir,genome_id,o.input_file_name)))):
if not os.path.exists(input_file):
print "%s does not appear to exist" % (input_file)
print
print '%s my have failed previous QC or may still be running' % (genome_id)
continue
##############check the output file is of the correct length
#################here we coudl also put "take the first n"
#analyzed_by_ml_lines = open("%s/%s/ml_region_analysis/%s"%(primary_analysis_dir,genome_id,o.input_file_name)).readlines()
analyzed_by_ml_lines = open(input_file, "r").readlines()
if(len(analyzed_by_ml_lines) != expected_len):
print "expected:%d encountered:%d" % (expected_len, len(analyzed_by_ml_lines))
print "expected number of lines in %s does not match that in %s" % (analyzed_by_ml_lines, o.input_regions)
#continue
print "\t getting information %s" %(genome_id)
outstr += "\t%s" % genome_id
for analysis_line in analyzed_by_ml_lines:
(name,TID,chr,start,end,cp,bywnd_cp,median,ll,regressed_cp,regressed_cp_by_wnd,regressed_cp_median) = analysis_line.split()
if o.regress:
cp = float(regressed_cp_median)
else:
cp = float(median)
uID = "%s:%s:%s:%s"%(TID,chr,start,end)
if(not(uID in regions_by_uID)):
regions_by_uID[uID] = region_info(name,chr,start,end,TID)
regions_by_uID[uID].add_info_from_genome(cp,genome_ob)
outstr+="\n"
for region_uID, region_inf in regions_by_uID.iteritems():
outstr+="\t".join([region_inf.name,region_inf.chr,region_inf.start,region_inf.end,region_inf.transcript_id])
#for genome_id,genome in genome_info.genomes.iteritems():
for input_genomes_line in input_genomes:
(genome_id,fn_wssd_dir,fn_bac_dir,chunk_dir,primary_analysis_dir) = input_genomes_line.split()
if genome_id[0] =="#": continue
if genome_id in region_inf.cps_by_genome:
#print genome_id
outstr+="\t%f"%(region_inf.cps_by_genome[genome_id])
else:
print "ERROR genome_id not in region_info"
print genome_id
print region_inf.cps_by_genome
sys.exit(1)
outstr+="\n"
# print outstr
if o.outfile != None:
open("%s/%s"%(o.outdir,o.outfile),'w').write(outstr)
#print percent_hists[pop]
#print hist
# percent_hists[pop]=ihist + percent_hists[pop]
# mode_hists[pop][np.where(ihist==np.amax(ihist))[0]]+=1<|fim▁end|> | |
<|file_name|>view-service.ts<|end_file_name|><|fim▁begin|><|fim▁hole|>
@Injectable()
export class warstockrptService {
constructor(private _dataserver: DataService, private _router: Router) { }
}<|fim▁end|> | import { Injectable } from '@angular/core';
import { DataService } from '../../../_service/dataconnect';
import { Router } from '@angular/router'; |
<|file_name|>serializers.py<|end_file_name|><|fim▁begin|>from django.contrib.auth import get_user_model
from rest_framework.fields import CharField
from rest_framework.serializers import ModelSerializer
from grandchallenge.challenges.models import Challenge
from grandchallenge.components.serializers import (
ComponentInterfaceValueSerializer,
)
from grandchallenge.evaluation.models import (
Evaluation,
Phase,
Submission,
)
class UserSerializer(ModelSerializer):
class Meta:<|fim▁hole|>class ChallengeSerializer(ModelSerializer):
class Meta:
model = Challenge
fields = (
"title",
"short_name",
)
class PhaseSerializer(ModelSerializer):
challenge = ChallengeSerializer()
class Meta:
model = Phase
fields = (
"challenge",
"title",
"slug",
)
class SubmissionSerializer(ModelSerializer):
phase = PhaseSerializer()
creator = UserSerializer()
class Meta:
model = Submission
fields = (
"pk",
"phase",
"created",
"creator",
"comment",
"predictions_file",
"supplementary_file",
"supplementary_url",
)
class EvaluationSerializer(ModelSerializer):
submission = SubmissionSerializer()
outputs = ComponentInterfaceValueSerializer(many=True)
status = CharField(source="get_status_display", read_only=True)
title = CharField(read_only=True)
class Meta:
model = Evaluation
fields = (
"pk",
"method",
"submission",
"created",
"published",
"outputs",
"rank",
"rank_score",
"rank_per_metric",
"status",
"title",
)<|fim▁end|> | model = get_user_model()
fields = ("username",)
|
<|file_name|>browser_util.d.ts<|end_file_name|><|fim▁begin|>/**
* @license
* Copyright Google Inc. All Rights Reserved.
*
* Use of this source code is governed by an MIT-style license that can be
* found in the LICENSE file at https://angular.io/license
*/
import { NgZone } from '@angular/core';
export declare let browserDetection: BrowserDetection;
export declare class BrowserDetection {
private _overrideUa;
private _ua;
static setup(): void;
constructor(ua: string);
isFirefox: boolean;
isAndroid: boolean;
isEdge: boolean;
isIE: boolean;
isWebkit: boolean;
isIOS7: boolean;<|fim▁hole|> isOldChrome: boolean;
}
export declare function dispatchEvent(element: any, eventType: any): void;
export declare function el(html: string): HTMLElement;
export declare function normalizeCSS(css: string): string;
export declare function stringifyElement(el: any): string;
export declare function createNgZone(): NgZone;<|fim▁end|> | isSlow: boolean;
supportsNativeIntlApi: boolean;
isChromeDesktop: boolean; |
<|file_name|>hashlib.py<|end_file_name|><|fim▁begin|># $Id$
#
# Copyright (C) 2005 Gregory P. Smith ([email protected])
# Licensed to PSF under a Contributor Agreement.
#
__doc__ = """hashlib module - A common interface to many hash functions.
new(name, string='') - returns a new hash object implementing the
given hash function; initializing the hash
using the given string data.
Named constructor functions are also available, these are much faster
than using new():
md5(), sha1(), sha224(), sha256(), sha384(), and sha512()
More algorithms may be available on your platform but the above are guaranteed
to exist. See the algorithms_guaranteed and algorithms_available attributes
to find out what algorithm names can be passed to new().
NOTE: If you want the adler32 or crc32 hash functions they are available in
the zlib module.
Choose your hash function wisely. Some have known collision weaknesses.
sha384 and sha512 will be slow on 32 bit platforms.
Hash objects have these methods:
- update(arg): Update the hash object with the string arg. Repeated calls
are equivalent to a single call with the concatenation of all
the arguments.
- digest(): Return the digest of the strings passed to the update() method
so far. This may contain non-ASCII characters, including
NUL bytes.
- hexdigest(): Like digest() except the digest is returned as a string of
double length, containing only hexadecimal digits.
- copy(): Return a copy (clone) of the hash object. This can be used to
efficiently compute the digests of strings that share a common
initial substring.
For example, to obtain the digest of the string 'Nobody inspects the
spammish repetition':
>>> import hashlib
>>> m = hashlib.md5()
>>> m.update("Nobody inspects")
>>> m.update(" the spammish repetition")
>>> m.digest()
'\\xbbd\\x9c\\x83\\xdd\\x1e\\xa5\\xc9\\xd9\\xde\\xc9\\xa1\\x8d\\xf0\\xff\\xe9'
More condensed:
>>> hashlib.sha224("Nobody inspects the spammish repetition").hexdigest()
'a4337bc45a8fc544c03f52dc550cd6e1e87021bc896588bd79e901e2'
"""
# This tuple and __get_builtin_constructor() must be modified if a new
# always available algorithm is added.
__always_supported = ('md5', 'sha1', 'sha224', 'sha256', 'sha384', 'sha512')
algorithms_guaranteed = set(__always_supported)
algorithms_available = set(__always_supported)
algorithms = __always_supported
__all__ = __always_supported + ('new', 'algorithms_guaranteed',
'algorithms_available', 'algorithms',
'pbkdf2_hmac')
def __get_builtin_constructor(name):
try:
if name in ('SHA1', 'sha1'):
import _sha
return _sha.new
elif name in ('MD5', 'md5'):
import _md5
return _md5.new
elif name in ('SHA256', 'sha256', 'SHA224', 'sha224'):
import _sha256
bs = name[3:]
if bs == '256':
return _sha256.sha256
elif bs == '224':
return _sha256.sha224
elif name in ('SHA512', 'sha512', 'SHA384', 'sha384'):
import _sha512
bs = name[3:]
if bs == '512':
return _sha512.sha512
elif bs == '384':
return _sha512.sha384
except ImportError:
pass # no extension module, this hash is unsupported.
raise ValueError('unsupported hash type ' + name)
def __get_openssl_constructor(name):
try:
f = getattr(_hashlib, 'openssl_' + name)
# Allow the C module to raise ValueError. The function will be
# defined but the hash not actually available thanks to OpenSSL.
f()
# Use the C function directly (very fast)
return f
except (AttributeError, ValueError):
return __get_builtin_constructor(name)
def __py_new(name, string=''):
"""new(name, string='') - Return a new hashing object using the named algorithm;
optionally initialized with a string.
"""
return __get_builtin_constructor(name)(string)
def __hash_new(name, string=''):
"""new(name, string='') - Return a new hashing object using the named algorithm;
optionally initialized with a string.
"""
try:
return _hashlib.new(name, string)
except ValueError:
# If the _hashlib module (OpenSSL) doesn't support the named
# hash, try using our builtin implementations.
# This allows for SHA224/256 and SHA384/512 support even though
# the OpenSSL library prior to 0.9.8 doesn't provide them.
return __get_builtin_constructor(name)(string)
<|fim▁hole|>
try:
import _hashlib
new = __hash_new
__get_hash = __get_openssl_constructor
algorithms_available = algorithms_available.union(
_hashlib.openssl_md_meth_names)
except ImportError:
new = __py_new
__get_hash = __get_builtin_constructor
for __func_name in __always_supported:
# try them all, some may not work due to the OpenSSL
# version not supporting that algorithm.
try:
globals()[__func_name] = __get_hash(__func_name)
except ValueError:
import logging
logging.exception('code for hash %s was not found.', __func_name)
try:
# OpenSSL's PKCS5_PBKDF2_HMAC requires OpenSSL 1.0+ with HMAC and SHA
from _hashlib import pbkdf2_hmac
except ImportError:
import binascii
import struct
_trans_5C = b"".join(chr(x ^ 0x5C) for x in range(256))
_trans_36 = b"".join(chr(x ^ 0x36) for x in range(256))
def pbkdf2_hmac(hash_name, password, salt, iterations, dklen=None):
"""Password based key derivation function 2 (PKCS #5 v2.0)
This Python implementations based on the hmac module about as fast
as OpenSSL's PKCS5_PBKDF2_HMAC for short passwords and much faster
for long passwords.
"""
if not isinstance(hash_name, str):
raise TypeError(hash_name)
if not isinstance(password, (bytes, bytearray)):
password = bytes(buffer(password))
if not isinstance(salt, (bytes, bytearray)):
salt = bytes(buffer(salt))
# Fast inline HMAC implementation
inner = new(hash_name)
outer = new(hash_name)
blocksize = getattr(inner, 'block_size', 64)
if len(password) > blocksize:
password = new(hash_name, password).digest()
password = password + b'\x00' * (blocksize - len(password))
inner.update(password.translate(_trans_36))
outer.update(password.translate(_trans_5C))
def prf(msg, inner=inner, outer=outer):
# PBKDF2_HMAC uses the password as key. We can re-use the same
# digest objects and just update copies to skip initialization.
icpy = inner.copy()
ocpy = outer.copy()
icpy.update(msg)
ocpy.update(icpy.digest())
return ocpy.digest()
if iterations < 1:
raise ValueError(iterations)
if dklen is None:
dklen = outer.digest_size
if dklen < 1:
raise ValueError(dklen)
hex_format_string = "%%0%ix" % (new(hash_name).digest_size * 2)
dkey = b''
loop = 1
while len(dkey) < dklen:
prev = prf(salt + struct.pack(b'>I', loop))
rkey = int(binascii.hexlify(prev), 16)
for i in xrange(iterations - 1):
prev = prf(prev)
rkey ^= int(binascii.hexlify(prev), 16)
loop += 1
dkey += binascii.unhexlify(hex_format_string % rkey)
return dkey[:dklen]
# Cleanup locals()
del __always_supported, __func_name, __get_hash
del __py_new, __hash_new, __get_openssl_constructor<|fim▁end|> | |
<|file_name|>logsettings.py<|end_file_name|><|fim▁begin|>"""Get log settings."""
import os
import platform
import sys
from logging.handlers import SysLogHandler
LOG_LEVELS = ['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL']
def get_logger_config(log_dir,
logging_env="no_env",
tracking_filename="tracking.log",
edx_filename="edx.log",
dev_env=False,
syslog_addr=None,
debug=False,
local_loglevel='INFO',
console_loglevel=None,
service_variant=None):
"""
Return the appropriate logging config dictionary. You should assign the
result of this to the LOGGING var in your settings. The reason it's done
this way instead of registering directly is because I didn't want to worry
about resetting the logging state if this is called multiple times when
settings are extended.
If dev_env is set to true logging will not be done via local rsyslogd,
instead, tracking and application logs will be dropped in log_dir.
"tracking_filename" and "edx_filename" are ignored unless dev_env
is set to true since otherwise logging is handled by rsyslogd.
"""
# Revert to INFO if an invalid string is passed in
if local_loglevel not in LOG_LEVELS:
local_loglevel = 'INFO'
if console_loglevel is None or console_loglevel not in LOG_LEVELS:
console_loglevel = 'DEBUG' if debug else 'INFO'
if service_variant is None:
# default to a blank string so that if SERVICE_VARIANT is not
# set we will not log to a sub directory
service_variant = ''
hostname = platform.node().split(".")[0]
syslog_format = ("[service_variant={service_variant}]"
"[%(name)s][env:{logging_env}] %(levelname)s "
"[{hostname} %(process)d] [%(filename)s:%(lineno)d] "
"- %(message)s").format(service_variant=service_variant,
logging_env=logging_env,
hostname=hostname)
handlers = ['console', 'local']
if syslog_addr:
handlers.append('syslogger-remote')
logger_config = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'standard': {
'format': '%(asctime)s %(levelname)s %(process)d '
'[%(name)s] %(filename)s:%(lineno)d - %(message)s',
},
'syslog_format': {'format': syslog_format},
'raw': {'format': '%(message)s'},
},
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse',
}
},
'handlers': {
'console': {
'level': console_loglevel,
'class': 'logging.StreamHandler',
'formatter': 'standard',
'stream': sys.stderr,
},
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
},
'newrelic': {
'level': 'ERROR',
'class': 'lms.lib.newrelic_logging.NewRelicHandler',
'formatter': 'raw',
}
},
'loggers': {
'tracking': {
'handlers': ['tracking'],<|fim▁hole|> },
'': {
'handlers': handlers,
'level': 'DEBUG',
'propagate': False
},
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
if syslog_addr:
logger_config['handlers'].update({
'syslogger-remote': {
'level': 'INFO',
'class': 'logging.handlers.SysLogHandler',
'address': syslog_addr,
'formatter': 'syslog_format',
},
})
if dev_env:
tracking_file_loc = os.path.join(log_dir, tracking_filename)
edx_file_loc = os.path.join(log_dir, edx_filename)
logger_config['handlers'].update({
'local': {
'class': 'logging.handlers.RotatingFileHandler',
'level': local_loglevel,
'formatter': 'standard',
'filename': edx_file_loc,
'maxBytes': 1024 * 1024 * 2,
'backupCount': 5,
},
'tracking': {
'level': 'DEBUG',
'class': 'logging.handlers.RotatingFileHandler',
'filename': tracking_file_loc,
'formatter': 'raw',
'maxBytes': 1024 * 1024 * 2,
'backupCount': 5,
},
})
else:
# for production environments we will only
# log INFO and up
logger_config['loggers']['']['level'] = 'INFO'
logger_config['handlers'].update({
'local': {
'level': local_loglevel,
'class': 'logging.handlers.SysLogHandler',
'address': '/dev/log',
'formatter': 'syslog_format',
'facility': SysLogHandler.LOG_LOCAL0,
},
'tracking': {
'level': 'DEBUG',
'class': 'logging.handlers.SysLogHandler',
'address': '/dev/log',
'facility': SysLogHandler.LOG_LOCAL1,
'formatter': 'raw',
},
})
return logger_config<|fim▁end|> | 'level': 'DEBUG',
'propagate': False, |
<|file_name|>database.go<|end_file_name|><|fim▁begin|>package redis
import (
"bytes"
"encoding/gob"
"github.com/iris-framework/iris/adaptors/sessions/sessiondb/redis/service"
)
// Database the redis database for q sessions
type Database struct {
redis *service.Service
}
// New returns a new redis database
func New(cfg ...service.Config) *Database {
return &Database{redis: service.New(cfg...)}
}
// Config returns the configuration for the redis server bridge, you can change them
func (d *Database) Config() *service.Config {
return d.redis.Config
}
// Load loads the values to the underline
func (d *Database) Load(sid string) map[string]interface{} {
values := make(map[string]interface{})
if !d.redis.Connected { //yes, check every first time's session for valid redis connection
d.redis.Connect()
_, err := d.redis.PingPong()
if err != nil {
if err != nil {
// don't use to get the logger, just prin these to the console... atm
///TODO: Find a way to use the iris' defined logger via an optional interface to Database.
// println("Redis Connection error on Connect: " + err.Error())
// println("But don't panic, auto-switching to memory store right now!")
}
}
}
//fetch the values from this session id and copy-> store them
val, err := d.redis.GetBytes(sid)
if err == nil {
// err removed because of previous TODO
DeserializeBytes(val, &values)
}
return values
}
// serialize the values to be stored as strings inside the Redis, we panic at any serialization error here
func serialize(values map[string]interface{}) []byte {
val, err := SerializeBytes(values)
if err != nil {
println("On redisstore.serialize: " + err.Error())
}
return val
}
// Update updates the real redis store
func (d *Database) Update(sid string, newValues map[string]interface{}) {
if len(newValues) == 0 {
go d.redis.Delete(sid)
} else {
go d.redis.Set(sid, serialize(newValues)) //set/update all the values
}
}
// SerializeBytes serializa bytes using gob encoder and returns them
func SerializeBytes(m interface{}) ([]byte, error) {
buf := new(bytes.Buffer)
enc := gob.NewEncoder(buf)
err := enc.Encode(m)
if err == nil {
return buf.Bytes(), nil<|fim▁hole|> }
return nil, err
}
// DeserializeBytes converts the bytes to an object using gob decoder
func DeserializeBytes(b []byte, m interface{}) error {
dec := gob.NewDecoder(bytes.NewBuffer(b))
return dec.Decode(m) //no reference here otherwise doesn't work because of go remote object
}<|fim▁end|> | |
<|file_name|>AbstractGuiElement.java<|end_file_name|><|fim▁begin|>/*
* This file is part of jGui API, licensed under the MIT License (MIT).
*
* Copyright (c) 2016 johni0702 <https://github.com/johni0702>
* Copyright (c) contributors
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
package de.johni0702.minecraft.gui.element;
import de.johni0702.minecraft.gui.GuiRenderer;
import de.johni0702.minecraft.gui.RenderInfo;
import de.johni0702.minecraft.gui.container.GuiContainer;
import de.johni0702.minecraft.gui.utils.lwjgl.Dimension;
import de.johni0702.minecraft.gui.utils.lwjgl.Point;
import de.johni0702.minecraft.gui.utils.lwjgl.ReadableDimension;
import de.johni0702.minecraft.gui.versions.MCVer;
import net.minecraft.client.MinecraftClient;
import net.minecraft.util.Identifier;
public abstract class AbstractGuiElement<T extends AbstractGuiElement<T>> implements GuiElement<T> {
protected static final Identifier TEXTURE = new Identifier("jgui", "gui.png");
private final MinecraftClient minecraft = MCVer.getMinecraft();
private GuiContainer container;
private GuiElement tooltip;
private boolean enabled = true;
protected Dimension minSize, maxSize;
/**
* The last size this element was render at layer 0.
* May be {@code null} when this element has not yet been rendered.
*/
private ReadableDimension lastSize;
public AbstractGuiElement() {
}
public AbstractGuiElement(GuiContainer container) {
container.addElements(null, this);
}
protected abstract T getThis();
@Override
public void layout(ReadableDimension size, RenderInfo renderInfo) {
if (size == null) {
if (getContainer() == null) {
throw new RuntimeException("Any top containers must implement layout(null, ...) themselves!");
}
getContainer().layout(size, renderInfo.layer(renderInfo.layer + getLayer()));
return;
}
if (renderInfo.layer == 0) {
lastSize = size;
}
}
@Override
public void draw(GuiRenderer renderer, ReadableDimension size, RenderInfo renderInfo) {
}
@Override
public T setEnabled(boolean enabled) {
this.enabled = enabled;
return getThis();
}
@Override
public T setEnabled() {
return setEnabled(true);
}
@Override
public T setDisabled() {
return setEnabled(false);
}
@Override
public GuiElement getTooltip(RenderInfo renderInfo) {
if (tooltip != null && lastSize != null) {
Point mouse = new Point(renderInfo.mouseX, renderInfo.mouseY);
if (container != null) {
container.convertFor(this, mouse);
}
if (mouse.getX() > 0
&& mouse.getY() > 0
&& mouse.getX() < lastSize.getWidth()
&& mouse.getY() < lastSize.getHeight()) {
return tooltip;
}
}
return null;
}
@Override
public T setTooltip(GuiElement tooltip) {
this.tooltip = tooltip;
return getThis();
}
<|fim▁hole|> }
public T setMinSize(ReadableDimension minSize) {
this.minSize = new Dimension(minSize);
return getThis();
}
public T setMaxSize(ReadableDimension maxSize) {
this.maxSize = new Dimension(maxSize);
return getThis();
}
public T setSize(ReadableDimension size) {
setMinSize(size);
return setMaxSize(size);
}
public T setSize(int width, int height) {
return setSize(new Dimension(width, height));
}
public T setWidth(int width) {
if (minSize == null) {
minSize = new Dimension(width, 0);
} else {
minSize.setWidth(width);
}
if (maxSize == null) {
maxSize = new Dimension(width, Integer.MAX_VALUE);
} else {
maxSize.setWidth(width);
}
return getThis();
}
public T setHeight(int height) {
if (minSize == null) {
minSize = new Dimension(0, height);
} else {
minSize.setHeight(height);
}
if (maxSize == null) {
maxSize = new Dimension(Integer.MAX_VALUE, height);
} else {
maxSize.setHeight(height);
}
return getThis();
}
public int getLayer() {
return 0;
}
@Override
public ReadableDimension getMinSize() {
ReadableDimension calcSize = calcMinSize();
if (minSize == null) {
return calcSize;
} else {
if (minSize.getWidth() >= calcSize.getWidth() && minSize.getHeight() >= calcSize.getHeight()) {
return minSize;
} else {
return new Dimension(
Math.max(calcSize.getWidth(), minSize.getWidth()),
Math.max(calcSize.getHeight(), minSize.getHeight())
);
}
}
}
protected abstract ReadableDimension calcMinSize();
@Override
public ReadableDimension getMaxSize() {
return maxSize == null ? new Dimension(Integer.MAX_VALUE, Integer.MAX_VALUE) : maxSize;
}
public MinecraftClient getMinecraft() {
return this.minecraft;
}
public GuiContainer getContainer() {
return this.container;
}
public boolean isEnabled() {
return this.enabled;
}
protected ReadableDimension getLastSize() {
return this.lastSize;
}
}<|fim▁end|> | @Override
public T setContainer(GuiContainer container) {
this.container = container;
return getThis(); |
<|file_name|>config.js<|end_file_name|><|fim▁begin|>var dest = './public';
var src = './src';
module.exports = {
browserSync: {
server: {
// Serve up our build folder
baseDir: dest
},
open: false,
https: false
},
sass: {
src: [
src + '/assets/css/*.{sass,scss}',
src + '/assets/css/**/*.{sass,scss}',
src + '/assets/css/**/**/*.{sass,scss}'
],
dest: dest + '/css',
settings: {
// indentedSyntax: true, // Enable .sass syntax!
imagePath: 'images' // Used by the image-url helper<|fim▁hole|> },
browserify: {
// A separate bundle will be generated for each
// bundle config in the list below
bundleConfigs: [/*{
entries: src + '/javascript/global.coffee',
dest: dest,
outputName: 'global.js',
// Additional file extentions to make optional
extensions: ['.coffee', '.hbs'],
// list of modules to make require-able externally
require: [ 'jquery', 'backbone/node_modules/underscore' ]
// See https://github.com/greypants/gulp-starter/issues/87 for note about
// why this is 'backbone/node_modules/underscore' and not 'underscore'
}, */{
entries: [ src + '/app/app.module.js' ],
dest: dest + '/js',
outputName: 'app.js',
// list of externally available modules to exclude from the bundle
external: [ 'jquery', 'underscore' ]
}]
},
moveAssets: {
src: [
src + '/*.html',
src + '/app/**/**/*.html',
src + '/assets/libs/*'
],
dest: dest
},
templateCache: {
src: src,
dest: dest
},
jshint: {
src: [
src + '/app/*.js',
src + '/app/**/*.js',
src + '/app/**/**/*.js',
src + '/app/**/**/**/*.js'
]
}
};<|fim▁end|> | } |
<|file_name|>test_unsafe_proxy.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
# (c) 2018 Matt Martz <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
from ansible.module_utils.six import PY3
from ansible.utils.unsafe_proxy import AnsibleUnsafe, AnsibleUnsafeBytes, AnsibleUnsafeText, wrap_var
def test_wrap_var_text():
assert isinstance(wrap_var(u'foo'), AnsibleUnsafeText)
def test_wrap_var_bytes():
assert isinstance(wrap_var(b'foo'), AnsibleUnsafeBytes)
def test_wrap_var_string():
if PY3:
assert isinstance(wrap_var('foo'), AnsibleUnsafeText)
else:
assert isinstance(wrap_var('foo'), AnsibleUnsafeBytes)
def test_wrap_var_dict():
assert isinstance(wrap_var(dict(foo='bar')), dict)
assert not isinstance(wrap_var(dict(foo='bar')), AnsibleUnsafe)
assert isinstance(wrap_var(dict(foo=u'bar'))['foo'], AnsibleUnsafeText)
def test_wrap_var_dict_None():
assert wrap_var(dict(foo=None))['foo'] is None
assert not isinstance(wrap_var(dict(foo=None))['foo'], AnsibleUnsafe)
def test_wrap_var_list():
assert isinstance(wrap_var(['foo']), list)
assert not isinstance(wrap_var(['foo']), AnsibleUnsafe)
assert isinstance(wrap_var([u'foo'])[0], AnsibleUnsafeText)
def test_wrap_var_list_None():
assert wrap_var([None])[0] is None
assert not isinstance(wrap_var([None])[0], AnsibleUnsafe)
def test_wrap_var_set():
assert isinstance(wrap_var(set(['foo'])), set)
assert not isinstance(wrap_var(set(['foo'])), AnsibleUnsafe)
for item in wrap_var(set([u'foo'])):
assert isinstance(item, AnsibleUnsafeText)
def test_wrap_var_set_None():
for item in wrap_var(set([None])):
assert item is None
assert not isinstance(item, AnsibleUnsafe)
def test_wrap_var_tuple():
assert isinstance(wrap_var(('foo',)), tuple)
assert not isinstance(wrap_var(('foo',)), AnsibleUnsafe)
assert isinstance(wrap_var(('foo',))[0], AnsibleUnsafe)
def test_wrap_var_tuple_None():
assert wrap_var((None,))[0] is None
assert not isinstance(wrap_var((None,))[0], AnsibleUnsafe)
def test_wrap_var_None():
assert wrap_var(None) is None
assert not isinstance(wrap_var(None), AnsibleUnsafe)
def test_wrap_var_unsafe_text():
assert isinstance(wrap_var(AnsibleUnsafeText(u'foo')), AnsibleUnsafeText)
def test_wrap_var_unsafe_bytes():
assert isinstance(wrap_var(AnsibleUnsafeBytes(b'foo')), AnsibleUnsafeBytes)
def test_wrap_var_no_ref():
thing = {
'foo': {
'bar': 'baz'
},
'bar': ['baz', 'qux'],
'baz': ('qux',),
'none': None,
'text': 'text',
}
wrapped_thing = wrap_var(thing)
thing is not wrapped_thing
thing['foo'] is not wrapped_thing['foo']
thing['bar'][0] is not wrapped_thing['bar'][0]
thing['baz'][0] is not wrapped_thing['baz'][0]
thing['none'] is not wrapped_thing['none']
thing['text'] is not wrapped_thing['text']
def test_AnsibleUnsafeText():
assert isinstance(AnsibleUnsafeText(u'foo'), AnsibleUnsafe)<|fim▁hole|>def test_AnsibleUnsafeBytes():
assert isinstance(AnsibleUnsafeBytes(b'foo'), AnsibleUnsafe)<|fim▁end|> | |
<|file_name|>models.py<|end_file_name|><|fim▁begin|>from django.db import models
class Tip(models.Model):<|fim▁hole|> text = models.TextField(max_length=1000)
date = models.DateField(auto_now_add=True)
class Meta:
ordering = ['-date']
def __unicode__(self):
return '%s ' % (self.text)
class CurrentTip(models.Model):
index = models.IntegerField(max_length=4,default=0)
def __unicode__(self):
return '%s ' % (self.index)<|fim▁end|> | |
<|file_name|>jester_vectorize.py<|end_file_name|><|fim▁begin|>from src.utils import glove
import numpy as np
import string
class jester_vectorize():
def __init__(self, user_interactions, content, user_vector_type, content_vector_type, **support_files):
"""Set up the Jester Vectorizer.
Args:
user_interactions (rdd): The raw data of users interactions with
the system. For Jester, each "row" is as follows:
Row(joke_id, rating, user_id)
content (rdd): The raw data about the items in the dataset. For
Jester, each row is as follows: Row(joke_id, joke_text)
user_vector_type (str): The type of user vector desired. One of
'ratings', 'pos_ratings', 'ratings_to_interact', or None.
content_vector_type: The type of content vector desired. One of
'glove' or None.
support_files: Only one support file is used for this class:
glove_model: An instantiated glove model.
"""
self.user_vector_type = user_vector_type
self.content_vector_type = content_vector_type
self.user_interactions = user_interactions
self.content = content
# If no support files were passed in, initialize an empty support file
if support_files:
self.support_files = support_files
else:
self.support_files = {}
def get_user_vector(self):
"""Produce an RDD containing tuples of the form (user, item, rating).
There are three options when producing these user vectors:
ratings: The ratings the users assigned
pos_ratings: Only ratings > 0, all others are discarded
ratings_to_interact: Positive ratings are mapped to 1, negative to -1.
"""
uir = self.user_interactions.map(lambda row: (row.user_id, row.joke_id, row.rating))
if self.user_vector_type == 'ratings':
return uir
elif self.user_vector_type == 'pos_ratings':
return uir.filter(lambda (u, i, r): r > 0)
elif self.user_vector_type == 'ratings_to_interact':
return uir.map(lambda (u, i, r): (u, i, 1 if r > 0 else -1))
elif self.user_vector_type == 'none' or self.user_vector_type is None:
return None
else:
print "Please choose a user_vector_type between 'ratings', 'pos_ratings', 'ratings_to_interact', and 'none'"
return None
<|fim▁hole|>
glove: Use the Stanford GloVe model to sum vector ratings of all
the words in the joke.
"""
if self.content_vector_type == 'glove':
# The model is initialized by the user and passed in via the
# support_file object
glove_model = self.support_files["glove_model"]
# Transformation function
def joke_to_glove(row, glove):
vector = np.zeros(glove.vector_size)
for chunk in row.joke_text.split():
word = chunk.lower().strip(string.punctuation)
vector += glove[word]
return (row.joke_id, vector)
# Run the transformation function over the data
return self.content.map(lambda row: joke_to_glove(row, glove_model))
elif self.content_vector_type == 'none' or self.content_vector_type is None:
return None
else:
print "Please choose a content_vector_type between 'glove' or None"
return None<|fim▁end|> | def get_content_vector(self):
"""Produce an RDD containing tuples of the form (item, content_vector).
There is one method of producing content vectors: |
<|file_name|>AccumuloConstantPcjIntegrationTest.java<|end_file_name|><|fim▁begin|>package org.apache.rya.indexing.external;
import java.net.UnknownHostException;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.List;
import org.apache.accumulo.core.client.AccumuloException;
import org.apache.accumulo.core.client.AccumuloSecurityException;
import org.apache.accumulo.core.client.Connector;
import org.apache.accumulo.core.client.TableExistsException;
import org.apache.accumulo.core.client.TableNotFoundException;
import org.apache.accumulo.core.client.mock.MockInstance;
import org.apache.accumulo.core.client.security.tokens.PasswordToken;
import org.apache.rya.indexing.pcj.storage.PcjException;
import org.apache.rya.indexing.pcj.storage.accumulo.PcjVarOrderFactory;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.openrdf.model.URI;
import org.openrdf.model.impl.LiteralImpl;
import org.openrdf.model.impl.URIImpl;
import org.openrdf.model.vocabulary.RDF;
import org.openrdf.model.vocabulary.RDFS;
import org.openrdf.query.BindingSet;
import org.openrdf.query.MalformedQueryException;
import org.openrdf.query.QueryEvaluationException;
import org.openrdf.query.QueryLanguage;
import org.openrdf.query.QueryResultHandlerException;
import org.openrdf.query.TupleQueryResultHandler;
import org.openrdf.query.TupleQueryResultHandlerException;
import org.openrdf.repository.RepositoryException;
import org.openrdf.repository.sail.SailRepository;
import org.openrdf.repository.sail.SailRepositoryConnection;
import org.openrdf.sail.SailException;
import com.google.common.base.Optional;
import org.apache.rya.api.persist.RyaDAOException;
import org.apache.rya.rdftriplestore.inference.InferenceEngineException;
public class AccumuloConstantPcjIntegrationTest {
private SailRepositoryConnection conn, pcjConn;
private SailRepository repo, pcjRepo;
private Connector accCon;
String prefix = "table_";
String tablename = "table_INDEX_";
URI obj, obj2, subclass, subclass2, talksTo;
@Before
public void init() throws RepositoryException,
TupleQueryResultHandlerException, QueryEvaluationException,
MalformedQueryException, AccumuloException,
AccumuloSecurityException, TableExistsException,
TableNotFoundException, RyaDAOException, InferenceEngineException,
NumberFormatException, UnknownHostException, SailException {
repo = PcjIntegrationTestingUtil.getNonPcjRepo(prefix, "instance");
conn = repo.getConnection();
pcjRepo = PcjIntegrationTestingUtil.getPcjRepo(prefix, "instance");
pcjConn = pcjRepo.getConnection();
final URI sub = new URIImpl("uri:entity");
subclass = new URIImpl("uri:class");
obj = new URIImpl("uri:obj");
talksTo = new URIImpl("uri:talksTo");
conn.add(sub, RDF.TYPE, subclass);
conn.add(sub, RDFS.LABEL, new LiteralImpl("label"));
conn.add(sub, talksTo, obj);
final URI sub2 = new URIImpl("uri:entity2");
subclass2 = new URIImpl("uri:class2");
obj2 = new URIImpl("uri:obj2");
conn.add(sub2, RDF.TYPE, subclass2);
conn.add(sub2, RDFS.LABEL, new LiteralImpl("label2"));
conn.add(sub2, talksTo, obj2);
accCon = new MockInstance("instance").getConnector("root",new PasswordToken(""));
}
@After
public void close() throws RepositoryException, AccumuloException,
AccumuloSecurityException, TableNotFoundException {
PcjIntegrationTestingUtil.closeAndShutdown(conn, repo);
PcjIntegrationTestingUtil.closeAndShutdown(pcjConn, pcjRepo);
PcjIntegrationTestingUtil.deleteCoreRyaTables(accCon, prefix);
PcjIntegrationTestingUtil.deleteIndexTables(accCon, 2, prefix);
}
@Test
public void testEvaluateTwoIndexVarInstantiate1() throws PcjException,
RepositoryException, AccumuloException, AccumuloSecurityException,
TableNotFoundException, TableExistsException,
MalformedQueryException, SailException, QueryEvaluationException,
TupleQueryResultHandlerException {
final URI superclass = new URIImpl("uri:superclass");
final URI superclass2 = new URIImpl("uri:superclass2");
conn.add(subclass, RDF.TYPE, superclass);
conn.add(subclass2, RDF.TYPE, superclass2);
conn.add(obj, RDFS.LABEL, new LiteralImpl("label"));
conn.add(obj2, RDFS.LABEL, new LiteralImpl("label2"));
conn.add(obj, RDFS.LABEL, new LiteralImpl("label"));
conn.add(obj2, RDFS.LABEL, new LiteralImpl("label2"));
final String indexSparqlString = ""//
+ "SELECT ?dog ?pig ?duck " //
+ "{" //
+ " ?pig a ?dog . "//
+ " ?pig <http://www.w3.org/2000/01/rdf-schema#label> ?duck "//
+ "}";//
final String indexSparqlString2 = ""//
+ "SELECT ?o ?f ?e ?c ?l " //
+ "{" //
+ " ?e <uri:talksTo> ?o . "//
+ " ?o <http://www.w3.org/2000/01/rdf-schema#label> ?l. "//
+ " ?c a ?f . " //
+ "}";//
final String queryString = ""//
+ "SELECT ?c ?l ?f ?o " //
+ "{" //
+ " <uri:entity> a ?c . "//
+ " <uri:entity> <http://www.w3.org/2000/01/rdf-schema#label> ?l. "//
+ " <uri:entity> <uri:talksTo> ?o . "//
+ " ?o <http://www.w3.org/2000/01/rdf-schema#label> ?l. "//
+ " ?c a ?f . " //
+ "}";//
PcjIntegrationTestingUtil.createAndPopulatePcj(conn, accCon, tablename + 1,
indexSparqlString, new String[] { "dog", "pig", "duck" },
Optional.<PcjVarOrderFactory> absent());
PcjIntegrationTestingUtil.createAndPopulatePcj(conn, accCon, tablename + 2,
indexSparqlString2, new String[] { "o", "f", "e", "c", "l" },
Optional.<PcjVarOrderFactory> absent());
final CountingResultHandler crh1 = new CountingResultHandler();
final CountingResultHandler crh2 = new CountingResultHandler();
conn.prepareTupleQuery(QueryLanguage.SPARQL, queryString)
.evaluate(crh1);
PcjIntegrationTestingUtil.deleteCoreRyaTables(accCon, prefix);
pcjConn.prepareTupleQuery(QueryLanguage.SPARQL, queryString).evaluate(crh2);
Assert.assertEquals(crh1.getCount(), crh2.getCount());
}
@Test
public void testEvaluateThreeIndexVarInstantiate() throws PcjException,
RepositoryException, AccumuloException, AccumuloSecurityException,
TableNotFoundException, TableExistsException,
MalformedQueryException, SailException, QueryEvaluationException,
TupleQueryResultHandlerException {
final URI superclass = new URIImpl("uri:superclass");
final URI superclass2 = new URIImpl("uri:superclass2");
final URI sub = new URIImpl("uri:entity");
subclass = new URIImpl("uri:class");
obj = new URIImpl("uri:obj");
talksTo = new URIImpl("uri:talksTo");
final URI howlsAt = new URIImpl("uri:howlsAt");
final URI subType = new URIImpl("uri:subType");
conn.add(subclass, RDF.TYPE, superclass);
conn.add(subclass2, RDF.TYPE, superclass2);
conn.add(obj, RDFS.LABEL, new LiteralImpl("label"));
conn.add(obj2, RDFS.LABEL, new LiteralImpl("label2"));
conn.add(sub, howlsAt, superclass);
conn.add(superclass, subType, obj);
conn.add(obj, RDFS.LABEL, new LiteralImpl("label"));
conn.add(obj2, RDFS.LABEL, new LiteralImpl("label2"));
final String indexSparqlString = ""//
+ "SELECT ?dog ?pig ?duck " //
+ "{" //
+ " ?pig a ?dog . "//
+ " ?pig <http://www.w3.org/2000/01/rdf-schema#label> ?duck "//
+ "}";//
final String indexSparqlString2 = ""//
+ "SELECT ?o ?f ?e ?c ?l " //
+ "{" //
+ " ?e <uri:talksTo> ?o . "//
+ " ?o <http://www.w3.org/2000/01/rdf-schema#label> ?l. "//
+ " ?c a ?f . " //
+ "}";//
final String indexSparqlString3 = ""//
+ "SELECT ?wolf ?sheep ?chicken " //
+ "{" //
+ " ?wolf <uri:howlsAt> ?sheep . "//
+ " ?sheep <uri:subType> ?chicken. "//
+ "}";//
final String queryString = ""//
+ "SELECT ?c ?l ?f ?o " //
+ "{" //
+ " <uri:entity> a ?c . "//
+ " <uri:entity> <http://www.w3.org/2000/01/rdf-schema#label> ?l. "//
+ " <uri:entity> <uri:talksTo> ?o . "//
+ " ?o <http://www.w3.org/2000/01/rdf-schema#label> ?l. "//
+ " ?c a ?f . " //
+ " <uri:entity> <uri:howlsAt> ?f. "//
+ " ?f <uri:subType> <uri:obj>. "//
+ "}";//
PcjIntegrationTestingUtil.createAndPopulatePcj(conn, accCon, tablename + 1,
indexSparqlString, new String[] { "dog", "pig", "duck" },
Optional.<PcjVarOrderFactory> absent());
PcjIntegrationTestingUtil.createAndPopulatePcj(conn, accCon, tablename + 2,
indexSparqlString2, new String[] { "o", "f", "e", "c", "l" },
Optional.<PcjVarOrderFactory> absent());
PcjIntegrationTestingUtil.createAndPopulatePcj(conn, accCon, tablename + 3,
indexSparqlString3,
new String[] { "wolf", "sheep", "chicken" },
Optional.<PcjVarOrderFactory> absent());
final CountingResultHandler crh1 = new CountingResultHandler();
final CountingResultHandler crh2 = new CountingResultHandler();
conn.prepareTupleQuery(QueryLanguage.SPARQL, queryString)
.evaluate(crh1);
PcjIntegrationTestingUtil.deleteCoreRyaTables(accCon, prefix);
pcjConn.prepareTupleQuery(QueryLanguage.SPARQL, queryString).evaluate(
crh2);
Assert.assertEquals(crh1.getCount(), crh2.getCount());
}
@Test
public void testEvaluateFilterInstantiate() throws RepositoryException,
PcjException, MalformedQueryException, SailException,
QueryEvaluationException, TableNotFoundException,
TupleQueryResultHandlerException, AccumuloException,
AccumuloSecurityException {
final URI e1 = new URIImpl("uri:e1");
final URI e2 = new URIImpl("uri:e2");
final URI e3 = new URIImpl("uri:e3");
final URI f1 = new URIImpl("uri:f1");
final URI f2 = new URIImpl("uri:f2");
final URI f3 = new URIImpl("uri:f3");
final URI g1 = new URIImpl("uri:g1");
final URI g2 = new URIImpl("uri:g2");
final URI g3 = new URIImpl("uri:g3");
conn.add(e1, talksTo, f1);
conn.add(f1, talksTo, g1);
conn.add(g1, talksTo, e1);
conn.add(e2, talksTo, f2);
conn.add(f2, talksTo, g2);
conn.add(g2, talksTo, e2);
conn.add(e3, talksTo, f3);
conn.add(f3, talksTo, g3);
conn.add(g3, talksTo, e3);
final String queryString = ""//
+ "SELECT ?x ?y ?z " //
+ "{" //
+ "Filter(?x = <uri:e1>) . " //
+ " ?x <uri:talksTo> ?y. " //<|fim▁hole|> final String indexSparqlString = ""//
+ "SELECT ?a ?b ?c ?d " //
+ "{" //
+ "Filter(?a = ?d) . " //
+ " ?a <uri:talksTo> ?b. " //
+ " ?b <uri:talksTo> ?c. " //
+ " ?c <uri:talksTo> ?d. " //
+ "}";//
PcjIntegrationTestingUtil.createAndPopulatePcj(conn, accCon, tablename + 1,
indexSparqlString, new String[] { "a", "b", "c", "d" },
Optional.<PcjVarOrderFactory> absent());
final CountingResultHandler crh1 = new CountingResultHandler();
final CountingResultHandler crh2 = new CountingResultHandler();
conn.prepareTupleQuery(QueryLanguage.SPARQL, queryString)
.evaluate(crh1);
PcjIntegrationTestingUtil.deleteCoreRyaTables(accCon, prefix);
pcjConn.prepareTupleQuery(QueryLanguage.SPARQL, queryString).evaluate(crh2);
Assert.assertEquals(crh1.getCount(), crh2.getCount());
}
@Test
public void testEvaluateCompoundFilterInstantiate()
throws RepositoryException, PcjException, MalformedQueryException,
SailException, QueryEvaluationException,
TableNotFoundException,
TupleQueryResultHandlerException, AccumuloException, AccumuloSecurityException {
final URI e1 = new URIImpl("uri:e1");
final URI f1 = new URIImpl("uri:f1");
conn.add(e1, talksTo, e1);
conn.add(e1, talksTo, f1);
conn.add(f1, talksTo, e1);
final String queryString = ""//
+ "SELECT ?x ?y ?z " //
+ "{" //
+ "Filter(?x = <uri:e1> && ?y = <uri:e1>) . " //
+ " ?x <uri:talksTo> ?y. " //
+ " ?y <uri:talksTo> ?z. " //
+ " ?z <uri:talksTo> <uri:e1>. " //
+ "}";//
final String indexSparqlString = ""//
+ "SELECT ?a ?b ?c ?d " //
+ "{" //
+ "Filter(?a = ?d && ?b = ?d) . " //
+ " ?a <uri:talksTo> ?b. " //
+ " ?b <uri:talksTo> ?c. " //
+ " ?c <uri:talksTo> ?d. " //
+ "}";//
PcjIntegrationTestingUtil.createAndPopulatePcj(conn, accCon, tablename + 1,
indexSparqlString, new String[] { "a", "b", "c", "d" },
Optional.<PcjVarOrderFactory> absent());
final CountingResultHandler crh1 = new CountingResultHandler();
final CountingResultHandler crh2 = new CountingResultHandler();
conn.prepareTupleQuery(QueryLanguage.SPARQL, queryString)
.evaluate(crh1);
PcjIntegrationTestingUtil.deleteCoreRyaTables(accCon, prefix);
pcjConn.prepareTupleQuery(QueryLanguage.SPARQL, queryString).evaluate(
crh2);
Assert.assertEquals(2, crh1.getCount());
Assert.assertEquals(crh1.getCount(), crh2.getCount());
}
public static class CountingResultHandler implements
TupleQueryResultHandler {
private int count = 0;
public int getCount() {
return count;
}
public void resetCount() {
count = 0;
}
@Override
public void startQueryResult(final List<String> arg0)
throws TupleQueryResultHandlerException {
}
@Override
public void handleSolution(final BindingSet arg0)
throws TupleQueryResultHandlerException {
count++;
}
@Override
public void endQueryResult() throws TupleQueryResultHandlerException {
}
@Override
public void handleBoolean(final boolean arg0)
throws QueryResultHandlerException {
}
@Override
public void handleLinks(final List<String> arg0)
throws QueryResultHandlerException {
}
}
}<|fim▁end|> | + " ?y <uri:talksTo> ?z. " //
+ " ?z <uri:talksTo> <uri:e1>. " //
+ "}";//
|
<|file_name|>project.go<|end_file_name|><|fim▁begin|>package rancher
import (
"github.com/Sirupsen/logrus"
"github.com/docker/libcompose/logger"
"github.com/docker/libcompose/lookup"
"github.com/docker/libcompose/project"
)
func NewProject(context *Context) (*project.Project, error) {
context.ConfigLookup = &lookup.FileConfigLookup{}
context.EnvironmentLookup = &lookup.OsEnvLookup{}
context.LoggerFactory = logger.NewColorLoggerFactory()
context.ServiceFactory = &RancherServiceFactory{
Context: context,
}
p := project.NewProject(&context.Context)
err := p.Parse()
if err != nil {
return nil, err<|fim▁hole|> }
if err = context.open(); err != nil {
logrus.Errorf("Failed to open project %s: %v", p.Name, err)
return nil, err
}
context.SidekickInfo = NewSidekickInfo(p)
return p, err
}<|fim▁end|> | |
<|file_name|>mod.rs<|end_file_name|><|fim▁begin|>pub type clock_t = ::c_uint;
pub type suseconds_t = ::c_int;
pub type dev_t = u64;
pub type blksize_t = ::int32_t;
pub type fsblkcnt_t = ::uint64_t;
pub type fsfilcnt_t = ::uint64_t;
pub type idtype_t = ::c_int;
s! {
pub struct aiocb {
pub aio_offset: ::off_t,
pub aio_buf: *mut ::c_void,
pub aio_nbytes: ::size_t,
pub aio_fildes: ::c_int,
pub aio_lio_opcode: ::c_int,
pub aio_reqprio: ::c_int,
pub aio_sigevent: ::sigevent,
_state: ::c_int,
_errno: ::c_int,
_retval: ::ssize_t
}
pub struct dirent {
pub d_fileno: ::ino_t,
pub d_reclen: u16,
pub d_namlen: u16,
pub d_type: u8,
pub d_name: [::c_char; 512],
}
pub struct glob_t {
pub gl_pathc: ::size_t,
pub gl_matchc: ::size_t,
pub gl_offs: ::size_t,
pub gl_flags: ::c_int,
pub gl_pathv: *mut *mut ::c_char,
__unused3: *mut ::c_void,
__unused4: *mut ::c_void,
__unused5: *mut ::c_void,
__unused6: *mut ::c_void,
__unused7: *mut ::c_void,
__unused8: *mut ::c_void,
}
pub struct sigevent {
pub sigev_notify: ::c_int,
pub sigev_signo: ::c_int,
pub sigev_value: ::sigval,
__unused1: *mut ::c_void, //actually a function pointer
pub sigev_notify_attributes: *mut ::c_void
}
pub struct sigset_t {
__bits: [u32; 4],
}
pub struct stat {
pub st_dev: ::dev_t,
pub st_mode: ::mode_t,
pub st_ino: ::ino_t,
pub st_nlink: ::nlink_t,
pub st_uid: ::uid_t,
pub st_gid: ::gid_t,
pub st_rdev: ::dev_t,
pub st_atime: ::time_t,
pub st_atimensec: ::c_long,
pub st_mtime: ::time_t,
pub st_mtimensec: ::c_long,
pub st_ctime: ::time_t,
pub st_ctimensec: ::c_long,
pub st_birthtime: ::time_t,
pub st_birthtimensec: ::c_long,
pub st_size: ::off_t,
pub st_blocks: ::blkcnt_t,
pub st_blksize: ::blksize_t,
pub st_flags: ::uint32_t,
pub st_gen: ::uint32_t,
pub st_spare: [::uint32_t; 2],
}
pub struct statvfs {
pub f_flag: ::c_ulong,
pub f_bsize: ::c_ulong,
pub f_frsize: ::c_ulong,
pub f_iosize: ::c_ulong,
pub f_blocks: ::fsblkcnt_t,
pub f_bfree: ::fsblkcnt_t,
pub f_bavail: ::fsblkcnt_t,
pub f_bresvd: ::fsblkcnt_t,
pub f_files: ::fsfilcnt_t,
pub f_ffree: ::fsfilcnt_t,
pub f_favail: ::fsfilcnt_t,
pub f_fresvd: ::fsfilcnt_t,
pub f_syncreads: ::uint64_t,
pub f_syncwrites: ::uint64_t,
pub f_asyncreads: ::uint64_t,
pub f_asyncwrites: ::uint64_t,
pub f_fsidx: ::fsid_t,
pub f_fsid: ::c_ulong,
pub f_namemax: ::c_ulong,
pub f_owner: ::uid_t,
pub f_spare: [::uint32_t; 4],
pub f_fstypename: [::c_char; 32],
pub f_mntonname: [::c_char; 1024],
pub f_mntfromname: [::c_char; 1024],
}
pub struct addrinfo {
pub ai_flags: ::c_int,
pub ai_family: ::c_int,
pub ai_socktype: ::c_int,
pub ai_protocol: ::c_int,
pub ai_addrlen: ::socklen_t,
pub ai_canonname: *mut ::c_char,
pub ai_addr: *mut ::sockaddr,
pub ai_next: *mut ::addrinfo,
}
pub struct sockaddr_storage {
pub ss_len: u8,
pub ss_family: ::sa_family_t,
__ss_pad1: [u8; 6],
__ss_pad2: i64,
__ss_pad3: [u8; 112],
}
pub struct siginfo_t {
pub si_signo: ::c_int,
pub si_code: ::c_int,
pub si_errno: ::c_int,
__pad1: ::c_int,
pub si_addr: *mut ::c_void,
__pad2: [u64; 13],
}
pub struct pthread_attr_t {
pta_magic: ::c_uint,
pta_flags: ::c_int,
pta_private: *mut ::c_void,
}
pub struct pthread_mutex_t {
ptm_magic: ::c_uint,
ptm_errorcheck: ::c_uchar,
ptm_pad1: [u8; 3],
ptm_interlock: ::c_uchar,
ptm_pad2: [u8; 3],
ptm_owner: ::pthread_t,
ptm_waiters: *mut u8,
ptm_recursed: ::c_uint,
ptm_spare2: *mut ::c_void,
}
pub struct pthread_mutexattr_t {
ptma_magic: ::c_uint,
ptma_private: *mut ::c_void,
}
pub struct pthread_rwlockattr_t {
ptra_magic: ::c_uint,
ptra_private: *mut ::c_void,
}
pub struct pthread_cond_t {
ptc_magic: ::c_uint,
ptc_lock: ::c_uchar,
ptc_waiters_first: *mut u8,
ptc_waiters_last: *mut u8,
ptc_mutex: *mut ::pthread_mutex_t,
ptc_private: *mut ::c_void,
}
pub struct pthread_condattr_t {
ptca_magic: ::c_uint,
ptca_private: *mut ::c_void,
}
pub struct pthread_rwlock_t {
ptr_magic: ::c_uint,
ptr_interlock: ::c_uchar,
ptr_rblocked_first: *mut u8,
ptr_rblocked_last: *mut u8,
ptr_wblocked_first: *mut u8,
ptr_wblocked_last: *mut u8,
ptr_nreaders: ::c_uint,
ptr_owner: ::pthread_t,
ptr_private: *mut ::c_void,
}
pub struct kevent {
pub ident: ::uintptr_t,
pub filter: ::uint32_t,
pub flags: ::uint32_t,
pub fflags: ::uint32_t,
pub data: ::int64_t,
pub udata: ::intptr_t,
}
pub struct dqblk {
pub dqb_bhardlimit: ::uint32_t,
pub dqb_bsoftlimit: ::uint32_t,
pub dqb_curblocks: ::uint32_t,
pub dqb_ihardlimit: ::uint32_t,
pub dqb_isoftlimit: ::uint32_t,
pub dqb_curinodes: ::uint32_t,
pub dqb_btime: ::int32_t,
pub dqb_itime: ::int32_t,
}
pub struct Dl_info {
pub dli_fname: *const ::c_char,
pub dli_fbase: *mut ::c_void,
pub dli_sname: *const ::c_char,
pub dli_saddr: *const ::c_void,
}
pub struct lconv {
pub decimal_point: *mut ::c_char,
pub thousands_sep: *mut ::c_char,
pub grouping: *mut ::c_char,
pub int_curr_symbol: *mut ::c_char,
pub currency_symbol: *mut ::c_char,
pub mon_decimal_point: *mut ::c_char,
pub mon_thousands_sep: *mut ::c_char,
pub mon_grouping: *mut ::c_char,
pub positive_sign: *mut ::c_char,
pub negative_sign: *mut ::c_char,
pub int_frac_digits: ::c_char,
pub frac_digits: ::c_char,
pub p_cs_precedes: ::c_char,
pub p_sep_by_space: ::c_char,
pub n_cs_precedes: ::c_char,
pub n_sep_by_space: ::c_char,
pub p_sign_posn: ::c_char,
pub n_sign_posn: ::c_char,
pub int_p_cs_precedes: ::c_char,
pub int_n_cs_precedes: ::c_char,
pub int_p_sep_by_space: ::c_char,
pub int_n_sep_by_space: ::c_char,
pub int_p_sign_posn: ::c_char,
pub int_n_sign_posn: ::c_char,
}
pub struct if_data {
pub ifi_type: ::c_uchar,
pub ifi_addrlen: ::c_uchar,
pub ifi_hdrlen: ::c_uchar,
pub ifi_link_state: ::c_int,
pub ifi_mtu: u64,
pub ifi_metric: u64,
pub ifi_baudrate: u64,
pub ifi_ipackets: u64,
pub ifi_ierrors: u64,
pub ifi_opackets: u64,
pub ifi_oerrors: u64,
pub ifi_collisions: u64,
pub ifi_ibytes: u64,
pub ifi_obytes: u64,
pub ifi_imcasts: u64,
pub ifi_omcasts: u64,
pub ifi_iqdrops: u64,
pub ifi_noproto: u64,
pub ifi_lastchange: ::timespec,
}
pub struct if_msghdr {
pub ifm_msglen: ::c_ushort,
pub ifm_version: ::c_uchar,
pub ifm_type: ::c_uchar,
pub ifm_addrs: ::c_int,
pub ifm_flags: ::c_int,
pub ifm_index: ::c_ushort,
pub ifm_data: if_data,
}
}
pub const AT_FDCWD: ::c_int = -100;
pub const AT_EACCESS: ::c_int = 0x100;
pub const AT_SYMLINK_NOFOLLOW: ::c_int = 0x200;
pub const AT_SYMLINK_FOLLOW: ::c_int = 0x400;
pub const AT_REMOVEDIR: ::c_int = 0x800;
pub const LC_COLLATE_MASK: ::c_int = (1 << ::LC_COLLATE);
pub const LC_CTYPE_MASK: ::c_int = (1 << ::LC_CTYPE);
pub const LC_MONETARY_MASK: ::c_int = (1 << ::LC_MONETARY);
pub const LC_NUMERIC_MASK: ::c_int = (1 << ::LC_NUMERIC);
pub const LC_TIME_MASK: ::c_int = (1 << ::LC_TIME);
pub const LC_MESSAGES_MASK: ::c_int = (1 << ::LC_MESSAGES);
pub const LC_ALL_MASK: ::c_int = !0;
pub const ERA: ::nl_item = 52;
pub const ERA_D_FMT: ::nl_item = 53;
pub const ERA_D_T_FMT: ::nl_item = 54;
pub const ERA_T_FMT: ::nl_item = 55;
pub const ALT_DIGITS: ::nl_item = 56;
pub const O_CLOEXEC: ::c_int = 0x400000;
pub const O_ALT_IO: ::c_int = 0x40000;
pub const O_NOSIGPIPE: ::c_int = 0x1000000;
pub const O_SEARCH: ::c_int = 0x800000;
pub const O_DIRECTORY: ::c_int = 0x200000;
pub const O_DIRECT : ::c_int = 0x00080000;
pub const O_RSYNC : ::c_int = 0x00020000;
pub const MS_SYNC : ::c_int = 0x4;
pub const MS_INVALIDATE : ::c_int = 0x2;
pub const RLIM_NLIMITS: ::c_int = 12;
pub const ENOATTR : ::c_int = 93;
pub const EILSEQ : ::c_int = 85;
pub const EOVERFLOW : ::c_int = 84;
pub const ECANCELED : ::c_int = 87;
pub const EIDRM : ::c_int = 82;
pub const ENOMSG : ::c_int = 83;
pub const ENOTSUP : ::c_int = 86;
pub const ELAST : ::c_int = 96;
pub const F_DUPFD_CLOEXEC : ::c_int = 12;
pub const F_CLOSEM: ::c_int = 10;
pub const F_GETNOSIGPIPE: ::c_int = 13;
pub const F_SETNOSIGPIPE: ::c_int = 14;
pub const F_MAXFD: ::c_int = 11;
pub const IPV6_JOIN_GROUP: ::c_int = 12;
pub const IPV6_LEAVE_GROUP: ::c_int = 13;
pub const SOCK_CONN_DGRAM: ::c_int = 6;
pub const SOCK_DCCP: ::c_int = SOCK_CONN_DGRAM;
pub const SOCK_NOSIGPIPE: ::c_int = 0x40000000;
pub const SOCK_FLAGS_MASK: ::c_int = 0xf0000000;
pub const SO_SNDTIMEO: ::c_int = 0x100b;
pub const SO_RCVTIMEO: ::c_int = 0x100c;
pub const SO_ACCEPTFILTER: ::c_int = 0x1000;
pub const SO_TIMESTAMP: ::c_int = 0x2000;
pub const SO_OVERFLOWED: ::c_int = 0x1009;
pub const SO_NOHEADER: ::c_int = 0x100a;
// https://github.com/NetBSD/src/blob/trunk/sys/net/if.h#L373
pub const IFF_UP: ::c_int = 0x0001; // interface is up
pub const IFF_BROADCAST: ::c_int = 0x0002; // broadcast address valid
pub const IFF_DEBUG: ::c_int = 0x0004; // turn on debugging
pub const IFF_LOOPBACK: ::c_int = 0x0008; // is a loopback net
pub const IFF_POINTOPOINT: ::c_int = 0x0010; // interface is point-to-point link
pub const IFF_NOTRAILERS: ::c_int = 0x0020; // avoid use of trailers
pub const IFF_RUNNING: ::c_int = 0x0040; // resources allocated
pub const IFF_NOARP: ::c_int = 0x0080; // no address resolution protocol
pub const IFF_PROMISC: ::c_int = 0x0100; // receive all packets
pub const IFF_ALLMULTI: ::c_int = 0x0200; // receive all multicast packets
pub const IFF_OACTIVE: ::c_int = 0x0400; // transmission in progress
pub const IFF_SIMPLEX: ::c_int = 0x0800; // can't hear own transmissions
pub const IFF_LINK0: ::c_int = 0x1000; // per link layer defined bit
pub const IFF_LINK1: ::c_int = 0x2000; // per link layer defined bit
pub const IFF_LINK2: ::c_int = 0x4000; // per link layer defined bit
pub const IFF_MULTICAST: ::c_int = 0x8000; // supports multicast
// sys/netinet/in.h
// Protocols (RFC 1700)
// NOTE: These are in addition to the constants defined in src/unix/mod.rs
// IPPROTO_IP defined in src/unix/mod.rs
/// Hop-by-hop option header
pub const IPPROTO_HOPOPTS: ::c_int = 0;
// IPPROTO_ICMP defined in src/unix/mod.rs
/// group mgmt protocol
pub const IPPROTO_IGMP: ::c_int = 2;
/// gateway^2 (deprecated)
pub const IPPROTO_GGP: ::c_int = 3;
/// for compatibility
pub const IPPROTO_IPIP: ::c_int = 4;
// IPPROTO_TCP defined in src/unix/mod.rs
/// exterior gateway protocol
pub const IPPROTO_EGP: ::c_int = 8;
/// pup
pub const IPPROTO_PUP: ::c_int = 12;
// IPPROTO_UDP defined in src/unix/mod.rs
/// xns idp
pub const IPPROTO_IDP: ::c_int = 22;
/// tp-4 w/ class negotiation
pub const IPPROTO_TP: ::c_int = 29;
/// DCCP
pub const IPPROTO_DCCP: ::c_int = 33;
// IPPROTO_IPV6 defined in src/unix/mod.rs
/// IP6 routing header
pub const IPPROTO_ROUTING: ::c_int = 43;
/// IP6 fragmentation header
pub const IPPROTO_FRAGMENT: ::c_int = 44;
/// resource reservation
pub const IPPROTO_RSVP: ::c_int = 46;
/// General Routing Encap.
pub const IPPROTO_GRE: ::c_int = 47;
/// IP6 Encap Sec. Payload
pub const IPPROTO_ESP: ::c_int = 50;
/// IP6 Auth Header
pub const IPPROTO_AH: ::c_int = 51;
/// IP Mobility RFC 2004
pub const IPPROTO_MOBILE: ::c_int = 55;
/// IPv6 ICMP
pub const IPPROTO_IPV6_ICMP: ::c_int = 58;
// IPPROTO_ICMPV6 defined in src/unix/mod.rs
/// IP6 no next header
pub const IPPROTO_NONE: ::c_int = 59;
/// IP6 destination option
pub const IPPROTO_DSTOPTS: ::c_int = 60;
/// ISO cnlp
pub const IPPROTO_EON: ::c_int = 80;
/// Ethernet-in-IP
pub const IPPROTO_ETHERIP: ::c_int = 97;
/// encapsulation header
pub const IPPROTO_ENCAP: ::c_int = 98;
/// Protocol indep. multicast
pub const IPPROTO_PIM: ::c_int = 103;
/// IP Payload Comp. Protocol
pub const IPPROTO_IPCOMP: ::c_int = 108;
/// VRRP RFC 2338
pub const IPPROTO_VRRP: ::c_int = 112;
/// Common Address Resolution Protocol
pub const IPPROTO_CARP: ::c_int = 112;
/// L2TPv3
// TEMP: Disabled for now; this constant was added to NetBSD on 2017-02-16,
// but isn't yet supported by the NetBSD rumprun kernel image used for
// libc testing.
//pub const IPPROTO_L2TP: ::c_int = 115;
/// SCTP
pub const IPPROTO_SCTP: ::c_int = 132;
/// PFSYNC
pub const IPPROTO_PFSYNC: ::c_int = 240;
pub const IPPROTO_MAX: ::c_int = 256;
/// last return value of *_input(), meaning "all job for this pkt is done".
pub const IPPROTO_DONE: ::c_int = 257;
/// sysctl placeholder for (FAST_)IPSEC
pub const CTL_IPPROTO_IPSEC: ::c_int = 258;
pub const AF_OROUTE: ::c_int = 17;
pub const AF_ARP: ::c_int = 28;
pub const pseudo_AF_KEY: ::c_int = 29;
pub const pseudo_AF_HDRCMPLT: ::c_int = 30;
pub const AF_BLUETOOTH: ::c_int = 31;
pub const AF_IEEE80211: ::c_int = 32;
pub const AF_MPLS: ::c_int = 33;
pub const AF_ROUTE: ::c_int = 34;
pub const AF_MAX: ::c_int = 35;
pub const NET_MAXID: ::c_int = AF_MAX;
pub const NET_RT_DUMP: ::c_int = 1;
pub const NET_RT_FLAGS: ::c_int = 2;
pub const NET_RT_OOIFLIST: ::c_int = 3;
pub const NET_RT_OIFLIST: ::c_int = 4;
pub const NET_RT_IFLIST: ::c_int = 5;
pub const NET_RT_MAXID: ::c_int = 6;
pub const PF_OROUTE: ::c_int = AF_OROUTE;
pub const PF_ARP: ::c_int = AF_ARP;
pub const PF_KEY: ::c_int = pseudo_AF_KEY;
pub const PF_BLUETOOTH: ::c_int = AF_BLUETOOTH;
pub const PF_MPLS: ::c_int = AF_MPLS;
pub const PF_ROUTE: ::c_int = AF_ROUTE;
pub const PF_MAX: ::c_int = AF_MAX;
pub const MSG_NBIO: ::c_int = 0x1000;
pub const MSG_WAITFORONE: ::c_int = 0x2000;
pub const MSG_NOTIFICATION: ::c_int = 0x4000;
pub const SCM_TIMESTAMP: ::c_int = 0x08;
pub const SCM_CREDS: ::c_int = 0x10;
pub const O_DSYNC : ::c_int = 0x10000;
pub const MAP_RENAME : ::c_int = 0x20;
pub const MAP_NORESERVE : ::c_int = 0x40;
pub const MAP_HASSEMAPHORE : ::c_int = 0x200;
pub const MAP_WIRED: ::c_int = 0x800;
pub const DCCP_TYPE_REQUEST: ::c_int = 0;
pub const DCCP_TYPE_RESPONSE: ::c_int = 1;
pub const DCCP_TYPE_DATA: ::c_int = 2;
pub const DCCP_TYPE_ACK: ::c_int = 3;
pub const DCCP_TYPE_DATAACK: ::c_int = 4;
pub const DCCP_TYPE_CLOSEREQ: ::c_int = 5;<|fim▁hole|>pub const DCCP_TYPE_MOVE: ::c_int = 8;
pub const DCCP_FEATURE_CC: ::c_int = 1;
pub const DCCP_FEATURE_ECN: ::c_int = 2;
pub const DCCP_FEATURE_ACKRATIO: ::c_int = 3;
pub const DCCP_FEATURE_ACKVECTOR: ::c_int = 4;
pub const DCCP_FEATURE_MOBILITY: ::c_int = 5;
pub const DCCP_FEATURE_LOSSWINDOW: ::c_int = 6;
pub const DCCP_FEATURE_CONN_NONCE: ::c_int = 8;
pub const DCCP_FEATURE_IDENTREG: ::c_int = 7;
pub const DCCP_OPT_PADDING: ::c_int = 0;
pub const DCCP_OPT_DATA_DISCARD: ::c_int = 1;
pub const DCCP_OPT_SLOW_RECV: ::c_int = 2;
pub const DCCP_OPT_BUF_CLOSED: ::c_int = 3;
pub const DCCP_OPT_CHANGE_L: ::c_int = 32;
pub const DCCP_OPT_CONFIRM_L: ::c_int = 33;
pub const DCCP_OPT_CHANGE_R: ::c_int = 34;
pub const DCCP_OPT_CONFIRM_R: ::c_int = 35;
pub const DCCP_OPT_INIT_COOKIE: ::c_int = 36;
pub const DCCP_OPT_NDP_COUNT: ::c_int = 37;
pub const DCCP_OPT_ACK_VECTOR0: ::c_int = 38;
pub const DCCP_OPT_ACK_VECTOR1: ::c_int = 39;
pub const DCCP_OPT_RECV_BUF_DROPS: ::c_int = 40;
pub const DCCP_OPT_TIMESTAMP: ::c_int = 41;
pub const DCCP_OPT_TIMESTAMP_ECHO: ::c_int = 42;
pub const DCCP_OPT_ELAPSEDTIME: ::c_int = 43;
pub const DCCP_OPT_DATACHECKSUM: ::c_int = 44;
pub const DCCP_REASON_UNSPEC: ::c_int = 0;
pub const DCCP_REASON_CLOSED: ::c_int = 1;
pub const DCCP_REASON_INVALID: ::c_int = 2;
pub const DCCP_REASON_OPTION_ERR: ::c_int = 3;
pub const DCCP_REASON_FEA_ERR: ::c_int = 4;
pub const DCCP_REASON_CONN_REF: ::c_int = 5;
pub const DCCP_REASON_BAD_SNAME: ::c_int = 6;
pub const DCCP_REASON_BAD_COOKIE: ::c_int = 7;
pub const DCCP_REASON_INV_MOVE: ::c_int = 8;
pub const DCCP_REASON_UNANSW_CH: ::c_int = 10;
pub const DCCP_REASON_FRUITLESS_NEG: ::c_int = 11;
pub const DCCP_CCID: ::c_int = 1;
pub const DCCP_CSLEN: ::c_int = 2;
pub const DCCP_MAXSEG: ::c_int = 4;
pub const DCCP_SERVICE: ::c_int = 8;
pub const DCCP_NDP_LIMIT: ::c_int = 16;
pub const DCCP_SEQ_NUM_LIMIT: ::c_int = 16777216;
pub const DCCP_MAX_OPTIONS: ::c_int = 32;
pub const DCCP_MAX_PKTS: ::c_int = 100;
pub const _PC_LINK_MAX : ::c_int = 1;
pub const _PC_MAX_CANON : ::c_int = 2;
pub const _PC_MAX_INPUT : ::c_int = 3;
pub const _PC_NAME_MAX : ::c_int = 4;
pub const _PC_PATH_MAX : ::c_int = 5;
pub const _PC_PIPE_BUF : ::c_int = 6;
pub const _PC_CHOWN_RESTRICTED : ::c_int = 7;
pub const _PC_NO_TRUNC : ::c_int = 8;
pub const _PC_VDISABLE : ::c_int = 9;
pub const _PC_SYNC_IO : ::c_int = 10;
pub const _PC_FILESIZEBITS : ::c_int = 11;
pub const _PC_SYMLINK_MAX : ::c_int = 12;
pub const _PC_2_SYMLINKS : ::c_int = 13;
pub const _PC_ACL_EXTENDED : ::c_int = 14;
pub const _PC_MIN_HOLE_SIZE : ::c_int = 15;
pub const _SC_SYNCHRONIZED_IO : ::c_int = 31;
pub const _SC_IOV_MAX : ::c_int = 32;
pub const _SC_MAPPED_FILES : ::c_int = 33;
pub const _SC_MEMLOCK : ::c_int = 34;
pub const _SC_MEMLOCK_RANGE : ::c_int = 35;
pub const _SC_MEMORY_PROTECTION : ::c_int = 36;
pub const _SC_LOGIN_NAME_MAX : ::c_int = 37;
pub const _SC_MONOTONIC_CLOCK : ::c_int = 38;
pub const _SC_CLK_TCK : ::c_int = 39;
pub const _SC_ATEXIT_MAX : ::c_int = 40;
pub const _SC_THREADS : ::c_int = 41;
pub const _SC_SEMAPHORES : ::c_int = 42;
pub const _SC_BARRIERS : ::c_int = 43;
pub const _SC_TIMERS : ::c_int = 44;
pub const _SC_SPIN_LOCKS : ::c_int = 45;
pub const _SC_READER_WRITER_LOCKS : ::c_int = 46;
pub const _SC_GETGR_R_SIZE_MAX : ::c_int = 47;
pub const _SC_GETPW_R_SIZE_MAX : ::c_int = 48;
pub const _SC_CLOCK_SELECTION : ::c_int = 49;
pub const _SC_ASYNCHRONOUS_IO : ::c_int = 50;
pub const _SC_AIO_LISTIO_MAX : ::c_int = 51;
pub const _SC_AIO_MAX : ::c_int = 52;
pub const _SC_MESSAGE_PASSING : ::c_int = 53;
pub const _SC_MQ_OPEN_MAX : ::c_int = 54;
pub const _SC_MQ_PRIO_MAX : ::c_int = 55;
pub const _SC_PRIORITY_SCHEDULING : ::c_int = 56;
pub const _SC_THREAD_DESTRUCTOR_ITERATIONS : ::c_int = 57;
pub const _SC_THREAD_KEYS_MAX : ::c_int = 58;
pub const _SC_THREAD_STACK_MIN : ::c_int = 59;
pub const _SC_THREAD_THREADS_MAX : ::c_int = 60;
pub const _SC_THREAD_ATTR_STACKADDR : ::c_int = 61;
pub const _SC_THREAD_ATTR_STACKSIZE : ::c_int = 62;
pub const _SC_THREAD_PRIORITY_SCHEDULING : ::c_int = 63;
pub const _SC_THREAD_PRIO_INHERIT : ::c_int = 64;
pub const _SC_THREAD_PRIO_PROTECT : ::c_int = 65;
pub const _SC_THREAD_PROCESS_SHARED : ::c_int = 66;
pub const _SC_THREAD_SAFE_FUNCTIONS : ::c_int = 67;
pub const _SC_TTY_NAME_MAX : ::c_int = 68;
pub const _SC_HOST_NAME_MAX : ::c_int = 69;
pub const _SC_PASS_MAX : ::c_int = 70;
pub const _SC_REGEXP : ::c_int = 71;
pub const _SC_SHELL : ::c_int = 72;
pub const _SC_SYMLOOP_MAX : ::c_int = 73;
pub const _SC_V6_ILP32_OFF32 : ::c_int = 74;
pub const _SC_V6_ILP32_OFFBIG : ::c_int = 75;
pub const _SC_V6_LP64_OFF64 : ::c_int = 76;
pub const _SC_V6_LPBIG_OFFBIG : ::c_int = 77;
pub const _SC_2_PBS : ::c_int = 80;
pub const _SC_2_PBS_ACCOUNTING : ::c_int = 81;
pub const _SC_2_PBS_CHECKPOINT : ::c_int = 82;
pub const _SC_2_PBS_LOCATE : ::c_int = 83;
pub const _SC_2_PBS_MESSAGE : ::c_int = 84;
pub const _SC_2_PBS_TRACK : ::c_int = 85;
pub const _SC_SPAWN : ::c_int = 86;
pub const _SC_SHARED_MEMORY_OBJECTS : ::c_int = 87;
pub const _SC_TIMER_MAX : ::c_int = 88;
pub const _SC_SEM_NSEMS_MAX : ::c_int = 89;
pub const _SC_CPUTIME : ::c_int = 90;
pub const _SC_THREAD_CPUTIME : ::c_int = 91;
pub const _SC_DELAYTIMER_MAX : ::c_int = 92;
// These two variables will be supported in NetBSD 8.0
// pub const _SC_SIGQUEUE_MAX : ::c_int = 93;
// pub const _SC_REALTIME_SIGNALS : ::c_int = 94;
pub const _SC_PHYS_PAGES : ::c_int = 121;
pub const _SC_NPROCESSORS_CONF : ::c_int = 1001;
pub const _SC_NPROCESSORS_ONLN : ::c_int = 1002;
pub const _SC_SCHED_RT_TS : ::c_int = 2001;
pub const _SC_SCHED_PRI_MIN : ::c_int = 2002;
pub const _SC_SCHED_PRI_MAX : ::c_int = 2003;
pub const FD_SETSIZE: usize = 0x100;
pub const ST_NOSUID: ::c_ulong = 8;
pub const PTHREAD_MUTEX_INITIALIZER: pthread_mutex_t = pthread_mutex_t {
ptm_magic: 0x33330003,
ptm_errorcheck: 0,
ptm_interlock: 0,
ptm_waiters: 0 as *mut _,
ptm_owner: 0,
ptm_pad1: [0; 3],
ptm_pad2: [0; 3],
ptm_recursed: 0,
ptm_spare2: 0 as *mut _,
};
pub const PTHREAD_COND_INITIALIZER: pthread_cond_t = pthread_cond_t {
ptc_magic: 0x55550005,
ptc_lock: 0,
ptc_waiters_first: 0 as *mut _,
ptc_waiters_last: 0 as *mut _,
ptc_mutex: 0 as *mut _,
ptc_private: 0 as *mut _,
};
pub const PTHREAD_RWLOCK_INITIALIZER: pthread_rwlock_t = pthread_rwlock_t {
ptr_magic: 0x99990009,
ptr_interlock: 0,
ptr_rblocked_first: 0 as *mut _,
ptr_rblocked_last: 0 as *mut _,
ptr_wblocked_first: 0 as *mut _,
ptr_wblocked_last: 0 as *mut _,
ptr_nreaders: 0,
ptr_owner: 0,
ptr_private: 0 as *mut _,
};
pub const PTHREAD_MUTEX_NORMAL: ::c_int = 0;
pub const PTHREAD_MUTEX_ERRORCHECK: ::c_int = 1;
pub const PTHREAD_MUTEX_RECURSIVE: ::c_int = 2;
pub const PTHREAD_MUTEX_DEFAULT: ::c_int = PTHREAD_MUTEX_NORMAL;
pub const EVFILT_AIO: ::uint32_t = 2;
pub const EVFILT_PROC: ::uint32_t = 4;
pub const EVFILT_READ: ::uint32_t = 0;
pub const EVFILT_SIGNAL: ::uint32_t = 5;
pub const EVFILT_TIMER: ::uint32_t = 6;
pub const EVFILT_VNODE: ::uint32_t = 3;
pub const EVFILT_WRITE: ::uint32_t = 1;
pub const EV_ADD: ::uint32_t = 0x1;
pub const EV_DELETE: ::uint32_t = 0x2;
pub const EV_ENABLE: ::uint32_t = 0x4;
pub const EV_DISABLE: ::uint32_t = 0x8;
pub const EV_ONESHOT: ::uint32_t = 0x10;
pub const EV_CLEAR: ::uint32_t = 0x20;
pub const EV_RECEIPT: ::uint32_t = 0x40;
pub const EV_DISPATCH: ::uint32_t = 0x80;
pub const EV_FLAG1: ::uint32_t = 0x2000;
pub const EV_ERROR: ::uint32_t = 0x4000;
pub const EV_EOF: ::uint32_t = 0x8000;
pub const EV_SYSFLAGS: ::uint32_t = 0xf000;
pub const NOTE_LOWAT: ::uint32_t = 0x00000001;
pub const NOTE_DELETE: ::uint32_t = 0x00000001;
pub const NOTE_WRITE: ::uint32_t = 0x00000002;
pub const NOTE_EXTEND: ::uint32_t = 0x00000004;
pub const NOTE_ATTRIB: ::uint32_t = 0x00000008;
pub const NOTE_LINK: ::uint32_t = 0x00000010;
pub const NOTE_RENAME: ::uint32_t = 0x00000020;
pub const NOTE_REVOKE: ::uint32_t = 0x00000040;
pub const NOTE_EXIT: ::uint32_t = 0x80000000;
pub const NOTE_FORK: ::uint32_t = 0x40000000;
pub const NOTE_EXEC: ::uint32_t = 0x20000000;
pub const NOTE_PDATAMASK: ::uint32_t = 0x000fffff;
pub const NOTE_PCTRLMASK: ::uint32_t = 0xf0000000;
pub const NOTE_TRACK: ::uint32_t = 0x00000001;
pub const NOTE_TRACKERR: ::uint32_t = 0x00000002;
pub const NOTE_CHILD: ::uint32_t = 0x00000004;
pub const TMP_MAX : ::c_uint = 308915776;
pub const NI_MAXHOST: ::socklen_t = 1025;
pub const RTLD_NOLOAD: ::c_int = 0x2000;
pub const RTLD_LOCAL: ::c_int = 0x200;
pub const CTL_MAXNAME: ::c_int = 12;
pub const SYSCTL_NAMELEN: ::c_int = 32;
pub const SYSCTL_DEFSIZE: ::c_int = 8;
pub const CTLTYPE_NODE: ::c_int = 1;
pub const CTLTYPE_INT: ::c_int = 2;
pub const CTLTYPE_STRING: ::c_int = 3;
pub const CTLTYPE_QUAD: ::c_int = 4;
pub const CTLTYPE_STRUCT: ::c_int = 5;
pub const CTLTYPE_BOOL: ::c_int = 6;
pub const CTLFLAG_READONLY: ::c_int = 0x00000000;
pub const CTLFLAG_READWRITE: ::c_int = 0x00000070;
pub const CTLFLAG_ANYWRITE: ::c_int = 0x00000080;
pub const CTLFLAG_PRIVATE: ::c_int = 0x00000100;
pub const CTLFLAG_PERMANENT: ::c_int = 0x00000200;
pub const CTLFLAG_OWNDATA: ::c_int = 0x00000400;
pub const CTLFLAG_IMMEDIATE: ::c_int = 0x00000800;
pub const CTLFLAG_HEX: ::c_int = 0x00001000;
pub const CTLFLAG_ROOT: ::c_int = 0x00002000;
pub const CTLFLAG_ANYNUMBER: ::c_int = 0x00004000;
pub const CTLFLAG_HIDDEN: ::c_int = 0x00008000;
pub const CTLFLAG_ALIAS: ::c_int = 0x00010000;
pub const CTLFLAG_MMAP: ::c_int = 0x00020000;
pub const CTLFLAG_OWNDESC: ::c_int = 0x00040000;
pub const CTLFLAG_UNSIGNED: ::c_int = 0x00080000;
pub const SYSCTL_VERS_MASK: ::c_int = 0xff000000;
pub const SYSCTL_VERS_0: ::c_int = 0x00000000;
pub const SYSCTL_VERS_1: ::c_int = 0x01000000;
pub const SYSCTL_VERSION: ::c_int = SYSCTL_VERS_1;
pub const CTL_EOL: ::c_int = -1;
pub const CTL_QUERY: ::c_int = -2;
pub const CTL_CREATE: ::c_int = -3;
pub const CTL_CREATESYM: ::c_int = -4;
pub const CTL_DESTROY: ::c_int = -5;
pub const CTL_MMAP: ::c_int = -6;
pub const CTL_DESCRIBE: ::c_int = -7;
pub const CTL_UNSPEC: ::c_int = 0;
pub const CTL_KERN: ::c_int = 1;
pub const CTL_VM: ::c_int = 2;
pub const CTL_VFS: ::c_int = 3;
pub const CTL_NET: ::c_int = 4;
pub const CTL_DEBUG: ::c_int = 5;
pub const CTL_HW: ::c_int = 6;
pub const CTL_MACHDEP: ::c_int = 7;
pub const CTL_USER: ::c_int = 8;
pub const CTL_DDB: ::c_int = 9;
pub const CTL_PROC: ::c_int = 10;
pub const CTL_VENDOR: ::c_int = 11;
pub const CTL_EMUL: ::c_int = 12;
pub const CTL_SECURITY: ::c_int = 13;
pub const CTL_MAXID: ::c_int = 14;
pub const KERN_OSTYPE: ::c_int = 1;
pub const KERN_OSRELEASE: ::c_int = 2;
pub const KERN_OSREV: ::c_int = 3;
pub const KERN_VERSION: ::c_int = 4;
pub const KERN_MAXVNODES: ::c_int = 5;
pub const KERN_MAXPROC: ::c_int = 6;
pub const KERN_MAXFILES: ::c_int = 7;
pub const KERN_ARGMAX: ::c_int = 8;
pub const KERN_SECURELVL: ::c_int = 9;
pub const KERN_HOSTNAME: ::c_int = 10;
pub const KERN_HOSTID: ::c_int = 11;
pub const KERN_CLOCKRATE: ::c_int = 12;
pub const KERN_VNODE: ::c_int = 13;
pub const KERN_PROC: ::c_int = 14;
pub const KERN_FILE: ::c_int = 15;
pub const KERN_PROF: ::c_int = 16;
pub const KERN_POSIX1: ::c_int = 17;
pub const KERN_NGROUPS: ::c_int = 18;
pub const KERN_JOB_CONTROL: ::c_int = 19;
pub const KERN_SAVED_IDS: ::c_int = 20;
pub const KERN_OBOOTTIME: ::c_int = 21;
pub const KERN_DOMAINNAME: ::c_int = 22;
pub const KERN_MAXPARTITIONS: ::c_int = 23;
pub const KERN_RAWPARTITION: ::c_int = 24;
pub const KERN_NTPTIME: ::c_int = 25;
pub const KERN_TIMEX: ::c_int = 26;
pub const KERN_AUTONICETIME: ::c_int = 27;
pub const KERN_AUTONICEVAL: ::c_int = 28;
pub const KERN_RTC_OFFSET: ::c_int = 29;
pub const KERN_ROOT_DEVICE: ::c_int = 30;
pub const KERN_MSGBUFSIZE: ::c_int = 31;
pub const KERN_FSYNC: ::c_int = 32;
pub const KERN_OLDSYSVMSG: ::c_int = 33;
pub const KERN_OLDSYSVSEM: ::c_int = 34;
pub const KERN_OLDSYSVSHM: ::c_int = 35;
pub const KERN_OLDSHORTCORENAME: ::c_int = 36;
pub const KERN_SYNCHRONIZED_IO: ::c_int = 37;
pub const KERN_IOV_MAX: ::c_int = 38;
pub const KERN_MBUF: ::c_int = 39;
pub const KERN_MAPPED_FILES: ::c_int = 40;
pub const KERN_MEMLOCK: ::c_int = 41;
pub const KERN_MEMLOCK_RANGE: ::c_int = 42;
pub const KERN_MEMORY_PROTECTION: ::c_int = 43;
pub const KERN_LOGIN_NAME_MAX: ::c_int = 44;
pub const KERN_DEFCORENAME: ::c_int = 45;
pub const KERN_LOGSIGEXIT: ::c_int = 46;
pub const KERN_PROC2: ::c_int = 47;
pub const KERN_PROC_ARGS: ::c_int = 48;
pub const KERN_FSCALE: ::c_int = 49;
pub const KERN_CCPU: ::c_int = 50;
pub const KERN_CP_TIME: ::c_int = 51;
pub const KERN_OLDSYSVIPC_INFO: ::c_int = 52;
pub const KERN_MSGBUF: ::c_int = 53;
pub const KERN_CONSDEV: ::c_int = 54;
pub const KERN_MAXPTYS: ::c_int = 55;
pub const KERN_PIPE: ::c_int = 56;
pub const KERN_MAXPHYS: ::c_int = 57;
pub const KERN_SBMAX: ::c_int = 58;
pub const KERN_TKSTAT: ::c_int = 59;
pub const KERN_MONOTONIC_CLOCK: ::c_int = 60;
pub const KERN_URND: ::c_int = 61;
pub const KERN_LABELSECTOR: ::c_int = 62;
pub const KERN_LABELOFFSET: ::c_int = 63;
pub const KERN_LWP: ::c_int = 64;
pub const KERN_FORKFSLEEP: ::c_int = 65;
pub const KERN_POSIX_THREADS: ::c_int = 66;
pub const KERN_POSIX_SEMAPHORES: ::c_int = 67;
pub const KERN_POSIX_BARRIERS: ::c_int = 68;
pub const KERN_POSIX_TIMERS: ::c_int = 69;
pub const KERN_POSIX_SPIN_LOCKS: ::c_int = 70;
pub const KERN_POSIX_READER_WRITER_LOCKS: ::c_int = 71;
pub const KERN_DUMP_ON_PANIC: ::c_int = 72;
pub const KERN_SOMAXKVA: ::c_int = 73;
pub const KERN_ROOT_PARTITION: ::c_int = 74;
pub const KERN_DRIVERS: ::c_int = 75;
pub const KERN_BUF: ::c_int = 76;
pub const KERN_FILE2: ::c_int = 77;
pub const KERN_VERIEXEC: ::c_int = 78;
pub const KERN_CP_ID: ::c_int = 79;
pub const KERN_HARDCLOCK_TICKS: ::c_int = 80;
pub const KERN_ARND: ::c_int = 81;
pub const KERN_SYSVIPC: ::c_int = 82;
pub const KERN_BOOTTIME: ::c_int = 83;
pub const KERN_EVCNT: ::c_int = 84;
pub const KERN_MAXID: ::c_int = 85;
pub const KERN_PROC_ALL: ::c_int = 0;
pub const KERN_PROC_PID: ::c_int = 1;
pub const KERN_PROC_PGRP: ::c_int = 2;
pub const KERN_PROC_SESSION: ::c_int = 3;
pub const KERN_PROC_TTY: ::c_int = 4;
pub const KERN_PROC_UID: ::c_int = 5;
pub const KERN_PROC_RUID: ::c_int = 6;
pub const KERN_PROC_GID: ::c_int = 7;
pub const KERN_PROC_RGID: ::c_int = 8;
pub const KERN_PROC_ARGV: ::c_int = 1;
pub const KERN_PROC_NARGV: ::c_int = 2;
pub const KERN_PROC_ENV: ::c_int = 3;
pub const KERN_PROC_NENV: ::c_int = 4;
pub const KERN_PROC_PATHNAME: ::c_int = 5;
pub const EAI_SYSTEM: ::c_int = 11;
pub const AIO_CANCELED: ::c_int = 1;
pub const AIO_NOTCANCELED: ::c_int = 2;
pub const AIO_ALLDONE: ::c_int = 3;
pub const LIO_NOP: ::c_int = 0;
pub const LIO_WRITE: ::c_int = 1;
pub const LIO_READ: ::c_int = 2;
pub const LIO_WAIT: ::c_int = 1;
pub const LIO_NOWAIT: ::c_int = 0;
pub const SIGEV_NONE: ::c_int = 0;
pub const SIGEV_SIGNAL: ::c_int = 1;
pub const SIGEV_THREAD: ::c_int = 2;
pub const WSTOPPED: ::c_int = 0x00000002; // same as WUNTRACED
pub const WCONTINUED: ::c_int = 0x00000010;
pub const WEXITED: ::c_int = 0x000000020;
pub const WNOWAIT: ::c_int = 0x00010000;
pub const P_ALL: idtype_t = 0;
pub const P_PID: idtype_t = 1;
pub const P_PGID: idtype_t = 4;
pub const B460800: ::speed_t = 460800;
pub const B921600: ::speed_t = 921600;
pub const ONOCR: ::tcflag_t = 0x20;
pub const ONLRET: ::tcflag_t = 0x40;
pub const CDTRCTS: ::tcflag_t = 0x00020000;
pub const CHWFLOW: ::tcflag_t = ::MDMBUF | ::CRTSCTS | ::CDTRCTS;
pub const SOCK_CLOEXEC: ::c_int = 0x10000000;
pub const SOCK_NONBLOCK: ::c_int = 0x20000000;
// dirfd() is a macro on netbsd to access
// the first field of the struct where dirp points to:
// http://cvsweb.netbsd.org/bsdweb.cgi/src/include/dirent.h?rev=1.36
f! {
pub fn dirfd(dirp: *mut ::DIR) -> ::c_int {
unsafe { *(dirp as *const ::c_int) }
}
pub fn WIFCONTINUED(status: ::c_int) -> bool {
status == 0xffff
}
}
extern {
pub fn aio_read(aiocbp: *mut aiocb) -> ::c_int;
pub fn aio_write(aiocbp: *mut aiocb) -> ::c_int;
pub fn aio_fsync(op: ::c_int, aiocbp: *mut aiocb) -> ::c_int;
pub fn aio_error(aiocbp: *const aiocb) -> ::c_int;
pub fn aio_return(aiocbp: *mut aiocb) -> ::ssize_t;
#[link_name = "__aio_suspend50"]
pub fn aio_suspend(aiocb_list: *const *const aiocb, nitems: ::c_int,
timeout: *const ::timespec) -> ::c_int;
pub fn aio_cancel(fd: ::c_int, aiocbp: *mut aiocb) -> ::c_int;
pub fn lio_listio(mode: ::c_int, aiocb_list: *const *mut aiocb,
nitems: ::c_int, sevp: *mut sigevent) -> ::c_int;
pub fn lutimes(file: *const ::c_char, times: *const ::timeval) -> ::c_int;
pub fn getnameinfo(sa: *const ::sockaddr,
salen: ::socklen_t,
host: *mut ::c_char,
hostlen: ::socklen_t,
serv: *mut ::c_char,
sevlen: ::socklen_t,
flags: ::c_int) -> ::c_int;
pub fn mprotect(addr: *mut ::c_void, len: ::size_t, prot: ::c_int)
-> ::c_int;
pub fn sysctl(name: *const ::c_int,
namelen: ::c_uint,
oldp: *mut ::c_void,
oldlenp: *mut ::size_t,
newp: *const ::c_void,
newlen: ::size_t)
-> ::c_int;
pub fn sysctlbyname(name: *const ::c_char,
oldp: *mut ::c_void,
oldlenp: *mut ::size_t,
newp: *const ::c_void,
newlen: ::size_t)
-> ::c_int;
#[link_name = "__kevent50"]
pub fn kevent(kq: ::c_int,
changelist: *const ::kevent,
nchanges: ::size_t,
eventlist: *mut ::kevent,
nevents: ::size_t,
timeout: *const ::timespec) -> ::c_int;
#[link_name = "__mount50"]
pub fn mount(src: *const ::c_char,
target: *const ::c_char,
flags: ::c_int,
data: *mut ::c_void,
size: ::size_t) -> ::c_int;
pub fn ptrace(request: ::c_int,
pid: ::pid_t,
addr: *mut ::c_void,
data: ::c_int) -> ::c_int;
pub fn pthread_setname_np(t: ::pthread_t,
name: *const ::c_char,
arg: *mut ::c_void) -> ::c_int;
pub fn pthread_getattr_np(native: ::pthread_t,
attr: *mut ::pthread_attr_t) -> ::c_int;
pub fn pthread_attr_getguardsize(attr: *const ::pthread_attr_t,
guardsize: *mut ::size_t) -> ::c_int;
pub fn pthread_attr_getstack(attr: *const ::pthread_attr_t,
stackaddr: *mut *mut ::c_void,
stacksize: *mut ::size_t) -> ::c_int;
#[link_name = "__sigtimedwait50"]
pub fn sigtimedwait(set: *const sigset_t,
info: *mut siginfo_t,
timeout: *const ::timespec) -> ::c_int;
pub fn sigwaitinfo(set: *const sigset_t,
info: *mut siginfo_t) -> ::c_int;
pub fn duplocale(base: ::locale_t) -> ::locale_t;
pub fn freelocale(loc: ::locale_t);
pub fn localeconv_l(loc: ::locale_t) -> *mut lconv;
pub fn newlocale(mask: ::c_int,
locale: *const ::c_char,
base: ::locale_t) -> ::locale_t;
#[link_name = "__settimeofday50"]
pub fn settimeofday(tv: *const ::timeval, tz: *const ::c_void) -> ::c_int;
}
mod other;
pub use self::other::*;<|fim▁end|> | pub const DCCP_TYPE_CLOSE: ::c_int = 6;
pub const DCCP_TYPE_RESET: ::c_int = 7; |
<|file_name|>doctest.py<|end_file_name|><|fim▁begin|>""" discover and run doctests in modules and test files."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import inspect
import platform
import sys
import traceback
from contextlib import contextmanager
import pytest
from _pytest._code.code import ExceptionInfo
from _pytest._code.code import ReprFileLocation
from _pytest._code.code import TerminalRepr
from _pytest.compat import safe_getattr
from _pytest.fixtures import FixtureRequest
DOCTEST_REPORT_CHOICE_NONE = "none"
DOCTEST_REPORT_CHOICE_CDIFF = "cdiff"
DOCTEST_REPORT_CHOICE_NDIFF = "ndiff"
DOCTEST_REPORT_CHOICE_UDIFF = "udiff"
DOCTEST_REPORT_CHOICE_ONLY_FIRST_FAILURE = "only_first_failure"
DOCTEST_REPORT_CHOICES = (
DOCTEST_REPORT_CHOICE_NONE,
DOCTEST_REPORT_CHOICE_CDIFF,
DOCTEST_REPORT_CHOICE_NDIFF,
DOCTEST_REPORT_CHOICE_UDIFF,
DOCTEST_REPORT_CHOICE_ONLY_FIRST_FAILURE,
)
# Lazy definition of runner class
RUNNER_CLASS = None
def pytest_addoption(parser):
parser.addini(
"doctest_optionflags",
"option flags for doctests",
type="args",
default=["ELLIPSIS"],
)
parser.addini(
"doctest_encoding", "encoding used for doctest files", default="utf-8"
)
group = parser.getgroup("collect")
group.addoption(
"--doctest-modules",
action="store_true",
default=False,
help="run doctests in all .py modules",
dest="doctestmodules",
)
group.addoption(
"--doctest-report",
type=str.lower,
default="udiff",
help="choose another output format for diffs on doctest failure",
choices=DOCTEST_REPORT_CHOICES,
dest="doctestreport",
)
group.addoption(
"--doctest-glob",
action="append",
default=[],
metavar="pat",
help="doctests file matching pattern, default: test*.txt",
dest="doctestglob",
)
group.addoption(
"--doctest-ignore-import-errors",
action="store_true",
default=False,
help="ignore doctest ImportErrors",
dest="doctest_ignore_import_errors",
)
group.addoption(
"--doctest-continue-on-failure",
action="store_true",
default=False,
help="for a given doctest, continue to run after the first failure",
dest="doctest_continue_on_failure",
)
def pytest_collect_file(path, parent):
config = parent.config
if path.ext == ".py":
if config.option.doctestmodules and not _is_setup_py(config, path, parent):
return DoctestModule(path, parent)
elif _is_doctest(config, path, parent):
return DoctestTextfile(path, parent)
def _is_setup_py(config, path, parent):
if path.basename != "setup.py":
return False
contents = path.read()
return "setuptools" in contents or "distutils" in contents
def _is_doctest(config, path, parent):
if path.ext in (".txt", ".rst") and parent.session.isinitpath(path):
return True
globs = config.getoption("doctestglob") or ["test*.txt"]
for glob in globs:
if path.check(fnmatch=glob):
return True
return False
class ReprFailDoctest(TerminalRepr):
def __init__(self, reprlocation_lines):
# List of (reprlocation, lines) tuples
self.reprlocation_lines = reprlocation_lines
def toterminal(self, tw):
for reprlocation, lines in self.reprlocation_lines:
for line in lines:
tw.line(line)
reprlocation.toterminal(tw)
class MultipleDoctestFailures(Exception):
def __init__(self, failures):
super(MultipleDoctestFailures, self).__init__()
self.failures = failures
def _init_runner_class():
import doctest
class PytestDoctestRunner(doctest.DebugRunner):
"""
Runner to collect failures. Note that the out variable in this case is
a list instead of a stdout-like object
"""
def __init__(
self, checker=None, verbose=None, optionflags=0, continue_on_failure=True
):
doctest.DebugRunner.__init__(
self, checker=checker, verbose=verbose, optionflags=optionflags
)
self.continue_on_failure = continue_on_failure
def report_failure(self, out, test, example, got):
failure = doctest.DocTestFailure(test, example, got)
if self.continue_on_failure:
out.append(failure)
else:
raise failure
def report_unexpected_exception(self, out, test, example, exc_info):
failure = doctest.UnexpectedException(test, example, exc_info)
if self.continue_on_failure:
out.append(failure)
else:
raise failure
return PytestDoctestRunner
def _get_runner(checker=None, verbose=None, optionflags=0, continue_on_failure=True):
# We need this in order to do a lazy import on doctest
global RUNNER_CLASS
if RUNNER_CLASS is None:
RUNNER_CLASS = _init_runner_class()
return RUNNER_CLASS(
checker=checker,
verbose=verbose,
optionflags=optionflags,
continue_on_failure=continue_on_failure,
)
class DoctestItem(pytest.Item):
def __init__(self, name, parent, runner=None, dtest=None):
super(DoctestItem, self).__init__(name, parent)
self.runner = runner
self.dtest = dtest
self.obj = None
self.fixture_request = None
def setup(self):
if self.dtest is not None:
self.fixture_request = _setup_fixtures(self)
globs = dict(getfixture=self.fixture_request.getfixturevalue)
for name, value in self.fixture_request.getfixturevalue(
"doctest_namespace"
).items():
globs[name] = value
self.dtest.globs.update(globs)
def runtest(self):
_check_all_skipped(self.dtest)
self._disable_output_capturing_for_darwin()
failures = []
self.runner.run(self.dtest, out=failures)
if failures:
raise MultipleDoctestFailures(failures)
def _disable_output_capturing_for_darwin(self):
"""
Disable output capturing. Otherwise, stdout is lost to doctest (#985)
"""
if platform.system() != "Darwin":
return
capman = self.config.pluginmanager.getplugin("capturemanager")
if capman:
capman.suspend_global_capture(in_=True)
out, err = capman.read_global_capture()
sys.stdout.write(out)
sys.stderr.write(err)
def repr_failure(self, excinfo):
import doctest
failures = None
if excinfo.errisinstance((doctest.DocTestFailure, doctest.UnexpectedException)):
failures = [excinfo.value]
elif excinfo.errisinstance(MultipleDoctestFailures):
failures = excinfo.value.failures
if failures is not None:
reprlocation_lines = []
for failure in failures:
example = failure.example
test = failure.test
filename = test.filename
if test.lineno is None:
lineno = None
else:
lineno = test.lineno + example.lineno + 1
message = type(failure).__name__
reprlocation = ReprFileLocation(filename, lineno, message)
checker = _get_checker()
report_choice = _get_report_choice(
self.config.getoption("doctestreport")
)
if lineno is not None:
lines = failure.test.docstring.splitlines(False)
# add line numbers to the left of the error message
lines = [
"%03d %s" % (i + test.lineno + 1, x)
for (i, x) in enumerate(lines)
]
# trim docstring error lines to 10
lines = lines[max(example.lineno - 9, 0) : example.lineno + 1]
else:
lines = [
"EXAMPLE LOCATION UNKNOWN, not showing all tests of that example"
]
indent = ">>>"
for line in example.source.splitlines():
lines.append("??? %s %s" % (indent, line))
indent = "..."
if isinstance(failure, doctest.DocTestFailure):
lines += checker.output_difference(
example, failure.got, report_choice
).split("\n")
else:
inner_excinfo = ExceptionInfo(failure.exc_info)
lines += ["UNEXPECTED EXCEPTION: %s" % repr(inner_excinfo.value)]
lines += traceback.format_exception(*failure.exc_info)
reprlocation_lines.append((reprlocation, lines))
return ReprFailDoctest(reprlocation_lines)
else:
return super(DoctestItem, self).repr_failure(excinfo)
def reportinfo(self):
return self.fspath, self.dtest.lineno, "[doctest] %s" % self.name
def _get_flag_lookup():
import doctest
return dict(
DONT_ACCEPT_TRUE_FOR_1=doctest.DONT_ACCEPT_TRUE_FOR_1,
DONT_ACCEPT_BLANKLINE=doctest.DONT_ACCEPT_BLANKLINE,
NORMALIZE_WHITESPACE=doctest.NORMALIZE_WHITESPACE,
ELLIPSIS=doctest.ELLIPSIS,
IGNORE_EXCEPTION_DETAIL=doctest.IGNORE_EXCEPTION_DETAIL,
COMPARISON_FLAGS=doctest.COMPARISON_FLAGS,
ALLOW_UNICODE=_get_allow_unicode_flag(),
ALLOW_BYTES=_get_allow_bytes_flag(),
)
def get_optionflags(parent):
optionflags_str = parent.config.getini("doctest_optionflags")
flag_lookup_table = _get_flag_lookup()
flag_acc = 0
for flag in optionflags_str:
flag_acc |= flag_lookup_table[flag]
return flag_acc
def _get_continue_on_failure(config):
continue_on_failure = config.getvalue("doctest_continue_on_failure")
if continue_on_failure:
# We need to turn off this if we use pdb since we should stop at
# the first failure
if config.getvalue("usepdb"):
continue_on_failure = False
return continue_on_failure
class DoctestTextfile(pytest.Module):
obj = None
def collect(self):
import doctest
# inspired by doctest.testfile; ideally we would use it directly,
# but it doesn't support passing a custom checker
encoding = self.config.getini("doctest_encoding")
text = self.fspath.read_text(encoding)
filename = str(self.fspath)
name = self.fspath.basename
globs = {"__name__": "__main__"}
optionflags = get_optionflags(self)
runner = _get_runner(
verbose=0,
optionflags=optionflags,
checker=_get_checker(),
continue_on_failure=_get_continue_on_failure(self.config),
)
_fix_spoof_python2(runner, encoding)
parser = doctest.DocTestParser()
test = parser.get_doctest(text, globs, name, filename, 0)
if test.examples:
yield DoctestItem(test.name, self, runner, test)
def _check_all_skipped(test):
"""raises pytest.skip() if all examples in the given DocTest have the SKIP
option set.
"""
import doctest
all_skipped = all(x.options.get(doctest.SKIP, False) for x in test.examples)
if all_skipped:
pytest.skip("all tests skipped by +SKIP option")
def _is_mocked(obj):
"""
returns if a object is possibly a mock object by checking the existence of a highly improbable attribute
"""
return (
safe_getattr(obj, "pytest_mock_example_attribute_that_shouldnt_exist", None)
is not None
)
@contextmanager
def _patch_unwrap_mock_aware():
"""
contextmanager which replaces ``inspect.unwrap`` with a version
that's aware of mock objects and doesn't recurse on them
"""
real_unwrap = getattr(inspect, "unwrap", None)
if real_unwrap is None:
yield
else:
def _mock_aware_unwrap(obj, stop=None):
if stop is None:
return real_unwrap(obj, stop=_is_mocked)
else:
return real_unwrap(obj, stop=lambda obj: _is_mocked(obj) or stop(obj))
inspect.unwrap = _mock_aware_unwrap
try:
yield
finally:
inspect.unwrap = real_unwrap
class DoctestModule(pytest.Module):
def collect(self):
import doctest
class MockAwareDocTestFinder(doctest.DocTestFinder):
"""
a hackish doctest finder that overrides stdlib internals to fix a stdlib bug
https://github.com/pytest-dev/pytest/issues/3456
https://bugs.python.org/issue25532
"""
def _find(self, tests, obj, name, module, source_lines, globs, seen):
if _is_mocked(obj):
return
with _patch_unwrap_mock_aware():
doctest.DocTestFinder._find(
self, tests, obj, name, module, source_lines, globs, seen
)
if self.fspath.basename == "conftest.py":
module = self.config.pluginmanager._importconftest(self.fspath)
else:
try:
module = self.fspath.pyimport()
except ImportError:
if self.config.getvalue("doctest_ignore_import_errors"):
pytest.skip("unable to import module %r" % self.fspath)
else:
raise
# uses internal doctest module parsing mechanism
finder = MockAwareDocTestFinder()
optionflags = get_optionflags(self)
runner = _get_runner(
verbose=0,
optionflags=optionflags,
checker=_get_checker(),
continue_on_failure=_get_continue_on_failure(self.config),
)
for test in finder.find(module, module.__name__):
if test.examples: # skip empty doctests
yield DoctestItem(test.name, self, runner, test)
def _setup_fixtures(doctest_item):
"""
Used by DoctestTextfile and DoctestItem to setup fixture information.
"""
def func():
pass
doctest_item.funcargs = {}
fm = doctest_item.session._fixturemanager
doctest_item._fixtureinfo = fm.getfixtureinfo(
node=doctest_item, func=func, cls=None, funcargs=False
)
fixture_request = FixtureRequest(doctest_item)
fixture_request._fillfixtures()
return fixture_request
def _get_checker():
"""
Returns a doctest.OutputChecker subclass that takes in account the
ALLOW_UNICODE option to ignore u'' prefixes in strings and ALLOW_BYTES
to strip b'' prefixes.
Useful when the same doctest should run in Python 2 and Python 3.
An inner class is used to avoid importing "doctest" at the module
level.
"""
if hasattr(_get_checker, "LiteralsOutputChecker"):
return _get_checker.LiteralsOutputChecker()
import doctest
import re
class LiteralsOutputChecker(doctest.OutputChecker):
"""
Copied from doctest_nose_plugin.py from the nltk project:
https://github.com/nltk/nltk
Further extended to also support byte literals.
"""
_unicode_literal_re = re.compile(r"(\W|^)[uU]([rR]?[\'\"])", re.UNICODE)
_bytes_literal_re = re.compile(r"(\W|^)[bB]([rR]?[\'\"])", re.UNICODE)
def check_output(self, want, got, optionflags):
res = doctest.OutputChecker.check_output(self, want, got, optionflags)
if res:
return True
allow_unicode = optionflags & _get_allow_unicode_flag()
allow_bytes = optionflags & _get_allow_bytes_flag()
if not allow_unicode and not allow_bytes:
return False
else: # pragma: no cover
def remove_prefixes(regex, txt):
return re.sub(regex, r"\1\2", txt)
if allow_unicode:
want = remove_prefixes(self._unicode_literal_re, want)
got = remove_prefixes(self._unicode_literal_re, got)
if allow_bytes:
want = remove_prefixes(self._bytes_literal_re, want)
got = remove_prefixes(self._bytes_literal_re, got)
res = doctest.OutputChecker.check_output(self, want, got, optionflags)
return res
_get_checker.LiteralsOutputChecker = LiteralsOutputChecker
return _get_checker.LiteralsOutputChecker()
def _get_allow_unicode_flag():
"""
Registers and returns the ALLOW_UNICODE flag.
"""
import doctest
return doctest.register_optionflag("ALLOW_UNICODE")
def _get_allow_bytes_flag():
"""
Registers and returns the ALLOW_BYTES flag.
"""
import doctest
return doctest.register_optionflag("ALLOW_BYTES")
def _get_report_choice(key):
"""
This function returns the actual `doctest` module flag value, we want to do it as late as possible to avoid
importing `doctest` and all its dependencies when parsing options, as it adds overhead and breaks tests.
"""
import doctest
return {
DOCTEST_REPORT_CHOICE_UDIFF: doctest.REPORT_UDIFF,
DOCTEST_REPORT_CHOICE_CDIFF: doctest.REPORT_CDIFF,
DOCTEST_REPORT_CHOICE_NDIFF: doctest.REPORT_NDIFF,
DOCTEST_REPORT_CHOICE_ONLY_FIRST_FAILURE: doctest.REPORT_ONLY_FIRST_FAILURE,<|fim▁hole|>
def _fix_spoof_python2(runner, encoding):
"""
Installs a "SpoofOut" into the given DebugRunner so it properly deals with unicode output. This
should patch only doctests for text files because they don't have a way to declare their
encoding. Doctests in docstrings from Python modules don't have the same problem given that
Python already decoded the strings.
This fixes the problem related in issue #2434.
"""
from _pytest.compat import _PY2
if not _PY2:
return
from doctest import _SpoofOut
class UnicodeSpoof(_SpoofOut):
def getvalue(self):
result = _SpoofOut.getvalue(self)
if encoding and isinstance(result, bytes):
result = result.decode(encoding)
return result
runner._fakeout = UnicodeSpoof()
@pytest.fixture(scope="session")
def doctest_namespace():
"""
Fixture that returns a :py:class:`dict` that will be injected into the namespace of doctests.
"""
return dict()<|fim▁end|> | DOCTEST_REPORT_CHOICE_NONE: 0,
}[key] |
<|file_name|>binary_tree.rs<|end_file_name|><|fim▁begin|>// Copyright (c) 2015 Takeru Ohta <[email protected]>
//
// This software is released under the MIT License,
// see the LICENSE file at the top-level directory.
extern crate dawg;
use dawg::binary_tree::Builder;
#[test]
fn build() {
let mut b = Builder::new();<|fim▁hole|>}
#[test]
fn search_common_prefix() {
let trie = words()
.iter()
.fold(Builder::new(), |mut b, w| {
b.insert(w.bytes()).ok().unwrap();
b
})
.finish();
assert_eq!(0, trie.search_common_prefix("hoge".bytes()).count());
assert_eq!(vec![(0, 3)],
trie.search_common_prefix("abc".bytes()).collect::<Vec<_>>());
assert_eq!(vec![(4, 2), (5, 4)],
trie.search_common_prefix("cddrr".bytes()).collect::<Vec<_>>());
}
fn words() -> [&'static str; 7] {
["abc", "b", "bbb", "car", "cd", "cddr", "cdr"]
}<|fim▁end|> | for w in words().iter() {
assert!(b.insert(w.bytes()).is_ok());
}
assert_eq!(words().len(), b.finish().len()); |
<|file_name|>iotjs_module_timer.cpp<|end_file_name|><|fim▁begin|>/* Copyright 2015 Samsung Electronics Co., Ltd.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "iotjs_def.h"
#include "iotjs_module_timer.h"
#include "iotjs_handlewrap.h"
namespace iotjs {
class TimerWrap : public HandleWrap {
public:
explicit TimerWrap(Environment* env, JObject& jtimer)
: HandleWrap(jtimer, reinterpret_cast<uv_handle_t*>(&_handle))
, _jcallback(NULL) {
// Initialze timer handler.
uv_timer_init(env->loop(), &_handle);
}
// Timer timeout callback handler.
void OnTimeout();
// Timer close callback handler.
void OnClose();
// Start timer.
int Start(int64_t timeout, int64_t repeat, JObject& jcallback);
// Stop & close timer.
int Stop();
// Retreive javascript callback function.
JObject* jcallback() { return _jcallback; }
protected:
// timer handle.
uv_timer_t _handle;
// Javascript callback function.
JObject* _jcallback;
};
// This function is called from uv when timeout expires.
static void TimeoutHandler(uv_timer_t* handle) {
// Find timer wrap from handle.
HandleWrap* handle_wrap = HandleWrap::FromHandle((uv_handle_t*)handle);
TimerWrap* timer_wrap = reinterpret_cast<TimerWrap*>(handle_wrap);
// Call the timeout handler.
timer_wrap->OnTimeout();
}
void TimerWrap::OnTimeout() {
// Verification.
IOTJS_ASSERT(jobject().IsObject());
IOTJS_ASSERT(_jcallback != NULL);
IOTJS_ASSERT(_jcallback->IsFunction());
// Call javascirpt timeout callback function.
MakeCallback(*_jcallback, jobject(), JArgList::Empty());
}
// Start timer.
int TimerWrap::Start(int64_t timeout, int64_t repeat, JObject& jcallback) {
// We should not have javascript callback handler yet.
IOTJS_ASSERT(_jcallback == NULL);
IOTJS_ASSERT(jcallback.IsFunction());
// Create new Javascirpt function reference for the callback function.
_jcallback = new JObject(jcallback);
// Start uv timer.
return uv_timer_start(&_handle,
TimeoutHandler,
timeout,
repeat);
}
// This function is called from uv after timer close.
static void OnTimerClose(uv_handle_t* handle) {
// Find timer wrap from handle.
HandleWrap* handle_wrap = HandleWrap::FromHandle(handle);
TimerWrap* timer_wrap = reinterpret_cast<TimerWrap*>(handle_wrap);
// Call the close handler.
timer_wrap->OnClose();
}
void TimerWrap::OnClose() {
// If we have javascript timeout callback reference, release it.
if (_jcallback != NULL) {
delete _jcallback;
_jcallback = NULL;
}
}
int TimerWrap::Stop() {
// Close timer.
if (!uv_is_closing(__handle)) {
Close(OnTimerClose);
}
return 0;
}
JHANDLER_FUNCTION(Start) {
// Check parameters.
JHANDLER_CHECK(handler.GetThis()->IsObject());
JHANDLER_CHECK(handler.GetArgLength() >= 3);
JHANDLER_CHECK(handler.GetArg(0)->IsNumber());
JHANDLER_CHECK(handler.GetArg(1)->IsNumber());
JHANDLER_CHECK(handler.GetArg(2)->IsFunction());
JObject* jtimer = handler.GetThis();
// Take timer wrap.
TimerWrap* timer_wrap = reinterpret_cast<TimerWrap*>(jtimer->GetNative());
IOTJS_ASSERT(timer_wrap != NULL);
IOTJS_ASSERT(timer_wrap->jobject().IsObject());
// parameters.
int64_t timeout = handler.GetArg(0)->GetInt64();
int64_t repeat = handler.GetArg(1)->GetInt64();
JObject* jcallback = handler.GetArg(2);
// We do not permit double start.
JHANDLER_CHECK(timer_wrap->jcallback() == NULL);
// Start timer.
int res = timer_wrap->Start(timeout, repeat, *jcallback);
JObject ret(res);
handler.Return(ret);
return true;
}
JHANDLER_FUNCTION(Stop) {
JHANDLER_CHECK(handler.GetThis()->IsObject());
JObject* jtimer = handler.GetThis();
TimerWrap* timer_wrap = reinterpret_cast<TimerWrap*>(jtimer->GetNative());
IOTJS_ASSERT(timer_wrap != NULL);
IOTJS_ASSERT(timer_wrap->jobject().IsObject());<|fim▁hole|> int res = timer_wrap->Stop();
JObject ret(res);
handler.Return(ret);
return true;
}
JHANDLER_FUNCTION(Timer) {
JHANDLER_CHECK(handler.GetThis()->IsObject());
Environment* env = Environment::GetEnv();
JObject* jtimer = handler.GetThis();
TimerWrap* timer_wrap = new TimerWrap(env, *jtimer);
IOTJS_ASSERT(timer_wrap->jobject().IsObject());
IOTJS_ASSERT(jtimer->GetNative() != 0);;
return true;
}
JObject* InitTimer() {
Module* module = GetBuiltinModule(MODULE_TIMER);
JObject* timer = module->module;
if (timer == NULL) {
timer = new JObject(Timer);
JObject prototype;
timer->SetProperty("prototype", prototype);
prototype.SetMethod("start", Start);
prototype.SetMethod("stop", Stop);
module->module = timer;
}
return timer;
}
} // namespace iotjs<|fim▁end|> |
// Stop timer. |
<|file_name|>db.js<|end_file_name|><|fim▁begin|>"use strict";
const mongoose = require('mongoose');<|fim▁hole|>
module.exports = (()=>{
mongoose.connect('mongodb://192.168.56.101:30000/blog');
let db = mongoose.connection;
db.on('error', function(err){
console.log(err);
});
db.once('open', (err)=> {
console.log('connect success');
})
})();<|fim▁end|> | |
<|file_name|>test_db_rule_enforcement.py<|end_file_name|><|fim▁begin|># Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0<|fim▁hole|>#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import bson
import mock
from st2common.models.db.rule_enforcement import RuleEnforcementDB
from st2common.persistence.rule_enforcement import RuleEnforcement
from st2common.transport.publishers import PoolPublisher
from st2common.exceptions.db import StackStormDBObjectNotFoundError
from st2tests import DbTestCase
SKIP_DELETE = False
@mock.patch.object(PoolPublisher, 'publish', mock.MagicMock())
class RuleEnforcementModelTest(DbTestCase):
def test_ruleenforcment_crud(self):
saved = RuleEnforcementModelTest._create_save_rule_enforcement()
retrieved = RuleEnforcement.get_by_id(saved.id)
self.assertEqual(saved.rule.ref, retrieved.rule.ref,
'Same rule enforcement was not returned.')
self.assertTrue(retrieved.enforced_at is not None)
# test update
RULE_ID = str(bson.ObjectId())
self.assertEqual(retrieved.rule.id, None)
retrieved.rule.id = RULE_ID
saved = RuleEnforcement.add_or_update(retrieved)
retrieved = RuleEnforcement.get_by_id(saved.id)
self.assertEqual(retrieved.rule.id, RULE_ID,
'Update to rule enforcement failed.')
# cleanup
RuleEnforcementModelTest._delete([retrieved])
try:
retrieved = RuleEnforcement.get_by_id(saved.id)
except StackStormDBObjectNotFoundError:
retrieved = None
self.assertIsNone(retrieved, 'managed to retrieve after delete.')
@staticmethod
def _create_save_rule_enforcement():
created = RuleEnforcementDB(trigger_instance_id=str(bson.ObjectId()),
rule={'ref': 'foo_pack.foo_rule',
'uid': 'rule:foo_pack:foo_rule'},
execution_id=str(bson.ObjectId()))
return RuleEnforcement.add_or_update(created)
@staticmethod
def _delete(model_objects):
global SKIP_DELETE
if SKIP_DELETE:
return
for model_object in model_objects:
model_object.delete()<|fim▁end|> | # (the 'License'); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at |
<|file_name|>CallMacroStep.java<|end_file_name|><|fim▁begin|>package org.museautomation.core.step;
import org.jetbrains.annotations.*;
import org.museautomation.core.*;
import org.museautomation.core.context.*;
import org.museautomation.core.step.descriptor.*;
import org.museautomation.core.steptask.*;
import org.museautomation.core.values.*;
import org.museautomation.core.values.descriptor.*;
import java.util.*;
/**
* Executes the steps contained within a Macro.
*
* Note that this does NOT execute those steps within a separate variable scope, despite this class extending
* ScopedGroup. It overrides #isCreateNewVariableScope to disable that behavior. That seems a bit strange, but
* CallFunction builds on the basic function of CallMacroStep and it needs to be scoped. We need multiple-inheritance
* to do this cleanly (yuck), but this will have to suffice.
*
* @see Macro
* @author Christopher L Merrill (see LICENSE.txt for license details)
*/
@MuseTypeId("callmacro")
@MuseStepName("Macro")
@MuseInlineEditString("call macro {id}")
@MuseStepIcon("glyph:FontAwesome:EXTERNAL_LINK")
@MuseStepTypeGroup("Structure")
@MuseStepLongDescription("The 'id' source is resolved to a string and used to find the macro in the project. The steps within the macro are then executed as children of the call-macro step, within the same variable scope as the parent. This means that steps within the macro have access to the same variables as the caller.")
@MuseSubsourceDescriptor(displayName = "Macro name", description = "The name (resource id) of the macro to call", type = SubsourceDescriptor.Type.Named, name = CallMacroStep.ID_PARAM)
public class CallMacroStep extends ScopedGroup
{
@SuppressWarnings("unused") // called via reflection
public CallMacroStep(StepConfiguration config, MuseProject project)
{
super(config, project);
_config = config;
_project = project;
}
@Override
protected StepExecutionContext createStepExecutionContextForChildren(StepExecutionContext context) throws MuseExecutionError
{
String id = getStepsId(context);
ContainsStep resource = _project.getResourceStorage().getResource(id, ContainsStep.class);<|fim▁hole|>
StepConfiguration step = resource.getStep();
List<StepConfiguration> steps;
if (step.getChildren() != null && step.getChildren().size() > 0)
steps = step.getChildren();
else
{
steps = new ArrayList<>();
steps.add(step);
}
context.getStepLocator().loadSteps(steps);
context.raiseEvent(DynamicStepLoadingEventType.create(_config, steps));
return new ListOfStepsExecutionContext(context.getParent(), steps, isCreateNewVariableScope(), this);
}
/**
* Get the id of the project resource that contains the steps that should be run.
*/
@NotNull
@SuppressWarnings("WeakerAccess")
protected String getStepsId(StepExecutionContext context) throws MuseExecutionError
{
MuseValueSource id_source = getValueSource(_config, ID_PARAM, true, context.getProject());
return BaseValueSource.getValue(id_source, context, false, String.class);
}
@Override
protected boolean isCreateNewVariableScope()
{
return false;
}
protected MuseProject _project;
private StepConfiguration _config;
public final static String ID_PARAM = "id";
public final static String TYPE_ID = CallMacroStep.class.getAnnotation(MuseTypeId.class).value();
}<|fim▁end|> | if (resource == null)
throw new StepExecutionError("unable to locate project resource, id=" + id); |
<|file_name|>Sprite.ts<|end_file_name|><|fim▁begin|>module example.components {
import Component = artemis.Component;
import PooledComponent = artemis.PooledComponent;
import Pooled = artemis.annotations.Pooled;
import Point = PIXI.Point;
import Container = PIXI.Container;
import Texture = PIXI.Texture;
import ZSprite = PIXI.Sprite;
/**
* ZSprite!?! Is that SAP?
* Careful with that axe, Eugene.
*/
export enum Layer {
DEFAULT,
BACKGROUND,
TEXT,
ACTORS_1,
ACTORS_2,
ACTORS_3,
PARTICLES
// getLayerId() {
// return ordinal();
// }
}
@Pooled()
export class Sprite extends PooledComponent {
public static className = 'Sprite';
public layer:Layer;
public name:string;
public sprite_:ZSprite;
initialize(name?:string|Function, lambda?) {
switch(typeof name) {
case 'string':
this.name = <string>name;
var s = this.sprite_ = new ZSprite(Texture.fromFrame(`${this.name}.png`));
s.scale.set(1 / window.devicePixelRatio);
s.anchor.set(.5, .5);
break;
case 'function':
this.sprite_ = new ZSprite();
lambda = name;
break;
}
if (lambda) lambda(this);
}
<|fim▁hole|> }
removeFrom(layer:Container) {
layer.removeChild(this.sprite_);
}
public reset() {
this.sprite_ = null;
}
}
Sprite.prototype.layer = Layer.DEFAULT;
Sprite.prototype.name = '';
Sprite.prototype.sprite_ = null;
}<|fim▁end|> | addTo(layer:Container) {
layer.addChild(this.sprite_); |
<|file_name|>generateGeotiff.py<|end_file_name|><|fim▁begin|># This script generates GeoTiff files based Corine land cover data
# Usage: python generateGeotiff.py berryName
# berryName is optional. If not provided all output layers are generated.
# Licensed under the MIT license
from osgeo import gdal, ogr, gdalconst
import sys
gdal.UseExceptions()
gdal.AllRegister()
# Paths for input and output. These may be adjusted as needed.
src_filename = "../../aineisto/Clc2012_FI20m.tif"
dstPath = "../../output"
berries = ["mustikka", "puolukka", "karpalo", "vadelma"]
if len(sys.argv) > 1:
berries = [sys.argv[1]]
# WARNING: these values are not based on scientific research.
corineToBerryIndex = dict()
corineToBerryIndex["mustikka"] = dict()
corineToBerryIndex["mustikka"][24] = 70
corineToBerryIndex["mustikka"][25] = 80
corineToBerryIndex["mustikka"][27] = 50
corineToBerryIndex["mustikka"][28] = 60
corineToBerryIndex["puolukka"] = dict()
corineToBerryIndex["puolukka"][24] = 80
corineToBerryIndex["puolukka"][25] = 60
corineToBerryIndex["karpalo"] = dict()
corineToBerryIndex["karpalo"][40] = 50
corineToBerryIndex["karpalo"][42] = 80
corineToBerryIndex["vadelma"] = dict()
corineToBerryIndex["vadelma"][36] = 80
corineToBerryIndex["vadelma"][35] = 60
# Normalize values so that the highest value in output is always 100
normalizationFactor = 100.0 / 80.0
srcDs = gdal.Open(src_filename)
corineBand = srcDs.GetRasterBand(1)
xSize = corineBand.XSize
ySize = corineBand.YSize
print "Input raster size is ", xSize, ySize
<|fim▁hole|> dstDs.SetProjection(srcDs.GetProjection())
array = corineBand.ReadAsArray(0, 0, xSize, ySize)
for x in range(0, xSize):
indexes = corineToBerryIndex[berry]
if x % 500 == 0:
print `round(100.0 * x / xSize)` + " % of " + berry + " done"
for y in range(0, ySize):
origVal = array[y,x]
if origVal in indexes:
finalVal = int(indexes[origVal] * normalizationFactor)
else:
finalVal = 0
array[y,x] = finalVal
dstBand = dstDs.GetRasterBand(1)
dstBand.WriteArray(array, 0, 0)
# Once we're done, close properly the dataset
dstBand = None
dstDs = None
corineBand = None
srcDs = None<|fim▁end|> | for berry in berries:
driver = srcDs.GetDriver()
dstDs = driver.Create(dstPath + "/" + berry + ".tif", xSize, ySize, 1, gdal.GDT_UInt16, options = ['COMPRESS=LZW'])
dstDs.SetGeoTransform(srcDs.GetGeoTransform()) |
<|file_name|>emnist.py<|end_file_name|><|fim▁begin|>import shutil
import numpy as np
import dill
import gzip
import os
import subprocess
import struct
from array import array
import warnings
from dps import cfg
from dps.utils import image_to_string, cd, resize_image
# This link seems not to work anymore...
# emnist_url = 'https://cloudstor.aarnet.edu.au/plus/index.php/s/54h3OuGJhFLwAlQ/download'
emnist_url = 'http://www.itl.nist.gov/iaui/vip/cs_links/EMNIST/gzip.zip'
template = 'emnist-byclass-{}-{}-idx{}-ubyte.gz'
emnist_gz_names = [
template.format('test', 'images', 3),
template.format('test', 'labels', 1),
template.format('train', 'images', 3),
template.format('train', 'labels', 1)
]
def emnist_classes():
return (
[str(i) for i in range(10)]
+ [chr(i + ord('A')) for i in range(26)]
+ [chr(i + ord('a')) for i in range(26)]
)
emnist_filenames = [c + ".pklz" for c in emnist_classes()]
def _validate_emnist(path):
if not os.path.isdir(path):
return False
return set(os.listdir(path)) == set(emnist_filenames)
def _download_emnist(data_dir):
"""
Download the emnist data. Result is that a directory called "emnist_raw"
is created inside `data_dir` which contains 4 files.
Parameters
----------
path: str
Path to directory where files should be stored.
"""
emnist_raw_dir = os.path.join(data_dir, "emnist_raw")
os.makedirs(emnist_raw_dir, exist_ok=True)
with cd(emnist_raw_dir):
if not os.path.exists('gzip.zip'):
print("Downloading...")
command = "wget --output-document=gzip.zip {}".format(emnist_url).split()
subprocess.run(command, check=True)
else:
print("Found existing copy of gzip.zip, not downloading.")
print("Extracting...")
for fname in emnist_gz_names:
if not os.path.exists(fname):
subprocess.run('unzip gzip.zip gzip/{}'.format(fname), shell=True, check=True)
shutil.move('gzip/{}'.format(fname), '.')
else:
print("{} already exists, skipping extraction.".format(fname))
try:
shutil.rmtree('gzip')
except FileNotFoundError:
pass
return emnist_raw_dir
def _emnist_load_helper(path_img, path_lbl):
with gzip.open(path_lbl, 'rb') as file:
magic, size = struct.unpack(">II", file.read(8))
if magic != 2049:
raise ValueError('Magic number mismatch, expected 2049,'
'got {}'.format(magic))
labels = array("B", file.read())
with gzip.open(path_img, 'rb') as file:
magic, size, rows, cols = struct.unpack(">IIII", file.read(16))
if magic != 2051:
raise ValueError('Magic number mismatch, expected 2051,'
'got {}'.format(magic))
image_data = array("B", file.read())
images = np.zeros((size, rows * cols), dtype=np.uint8)
for i in range(size):
images[i][:] = image_data[i * rows * cols:(i + 1) * rows * cols]
return np.array(images, dtype=np.uint8), np.array(labels, dtype=np.uint8)
def maybe_convert_emnist_shape(path, shape):
""" Create a version of emnist on disk that is reshaped to the desired shape.
Images are stored on disk as uint8.
"""
if shape == (28, 28):
return
shape_dir = os.path.join(path, 'emnist_{}_by_{}'.format(*shape))
if os.path.isdir(shape_dir):
return
emnist_dir = os.path.join(path, 'emnist')
print("Converting (28, 28) EMNIST dataset to {}...".format(shape))
try:
shutil.rmtree(shape_dir)
except FileNotFoundError:
pass
os.makedirs(shape_dir, exist_ok=False)
classes = ''.join(
[str(i) for i in range(10)]
+ [chr(i + ord('A')) for i in range(26)]
+ [chr(i + ord('a')) for i in range(26)]
)
for i, cls in enumerate(sorted(classes)):
with gzip.open(os.path.join(emnist_dir, str(cls) + '.pklz'), 'rb') as f:
_x = dill.load(f)
new_x = []
for img in _x[:10]:
img = resize_image(img, shape, preserve_range=True)
new_x.append(img)
print(cls)
print(image_to_string(_x[0]))
_x = np.array(new_x, dtype=_x.dtype)
print(image_to_string(_x[0]))
path_i = os.path.join(shape_dir, cls + '.pklz')
with gzip.open(path_i, 'wb') as f:
dill.dump(_x, f, protocol=dill.HIGHEST_PROTOCOL)
def maybe_download_emnist(data_dir, quiet=0, shape=None):
"""
Download emnist data if it hasn't already been downloaded. Do some
post-processing to put it in a more useful format. End result is a directory
called `emnist-byclass` which contains a separate pklz file for each emnist
class.
Pixel values of stored images are uint8 values up to 255.
Images for each class are put into a numpy array with shape (n_images_in_class, 28, 28).
This numpy array is pickled and stored in a zip file with name <class char>.pklz.
Parameters
----------
data_dir: str
Directory where files should be stored.
"""
emnist_dir = os.path.join(data_dir, 'emnist')
if _validate_emnist(emnist_dir):
print("EMNIST data seems to be present already.")
else:
print("EMNIST data not found, downloading and processing...")
try:
shutil.rmtree(emnist_dir)
except FileNotFoundError:
pass
raw_dir = _download_emnist(data_dir)
with cd(raw_dir):
images, labels = _emnist_load_helper(emnist_gz_names[0], emnist_gz_names[1])
images1, labels1 = _emnist_load_helper(emnist_gz_names[2], emnist_gz_names[3])
with cd(data_dir):
os.makedirs('emnist', exist_ok=False)
print("Processing...")
with cd('emnist'):
x = np.concatenate((images, images1), 0)
y = np.concatenate((labels, labels1), 0)
# Give images the right orientation so that plt.imshow(x[0]) just works.
x = np.moveaxis(x.reshape(-1, 28, 28), 1, 2)
for i in sorted(set(y.flatten())):
keep = y == i
x_i = x[keep.flatten(), :]
if i >= 36:
char = chr(i-36+ord('a'))
elif i >= 10:
char = chr(i-10+ord('A'))
else:
char = str(i)
if quiet >= 2:
pass
elif quiet == 1:
print(char)
elif quiet <= 0:
print(char)
print(image_to_string(x_i[0, ...]))
file_i = char + '.pklz'
with gzip.open(file_i, 'wb') as f:
dill.dump(x_i, f, protocol=dill.HIGHEST_PROTOCOL)
if shape is not None:
maybe_convert_emnist_shape(data_dir, shape)
def load_emnist(
classes, balance=False, include_blank=False,
shape=None, n_examples=None, example_range=None, show=False, path=None):<|fim▁hole|>
Elements of `classes` pick out which emnist classes to load, but different labels
end up getting returned because most classifiers require that the labels
be in range(len(classes)). We return a dictionary `class_map` which maps from
elements of `classes` down to range(len(classes)).
Pixel values of returned images are integers in the range 0-255, but stored as float32.
Returned X array has shape (n_images,) + shape.
Parameters
----------
path: str
Path to data directory, assumed to contain a sub-directory called `emnist`.
classes: list of character from the set (0-9, A-Z, a-z)
Each character is the name of a class to load.
balance: bool
If True, will ensure that all classes are balanced by removing elements
from classes that are larger than the minimu-size class.
include_blank: bool
If True, includes an additional class that consists of blank images.
shape: (int, int)
Shape of the images.
n_examples: int
Maximum number of examples returned. If not supplied, return all available data.
example_range: pair of floats
Pair of floats specifying, for each class, the range of examples that should be used.
Each element of the pair is a number in (0, 1), and the second number should be larger.
show: bool
If True, prints out an image from each class.
"""
if path is None:
path = cfg.data_dir
maybe_download_emnist(path, shape=shape)
emnist_dir = os.path.join(path, 'emnist')
classes = list(classes) + []
needs_reshape = False
if shape and shape != (28, 28):
resized_dir = os.path.join(path, 'emnist_{}_by_{}'.format(*shape))
if _validate_emnist(resized_dir):
emnist_dir = resized_dir
else:
needs_reshape = True
if example_range is not None:
assert 0.0 <= example_range[0] < example_range[1] <= 1.0
x, y = [], []
class_count = []
classes = sorted([str(s) for s in classes])
for i, cls in enumerate(classes):
with gzip.open(os.path.join(emnist_dir, str(cls) + '.pklz'), 'rb') as f:
_x = dill.load(f)
if example_range is not None:
low = int(example_range[0] * len(_x))
high = int(example_range[1] * len(_x))
_x = _x[low:high, ...]
x.append(_x)
y.extend([i] * _x.shape[0])
if show:
print(cls)
indices_to_show = np.random.choice(len(_x), size=100)
for i in indices_to_show:
print(image_to_string(_x[i]))
class_count.append(_x.shape[0])
x = np.concatenate(x, axis=0)
if include_blank:
min_class_count = min(class_count)
blanks = np.zeros((min_class_count,) + x.shape[1:], dtype=np.uint8)
x = np.concatenate((x, blanks), axis=0)
blank_idx = len(classes)
y.extend([blank_idx] * min_class_count)
blank_symbol = ' '
classes.append(blank_symbol)
y = np.array(y)
if balance:
min_class_count = min(class_count)
keep_x, keep_y = [], []
for i, cls in enumerate(classes):
keep_indices = np.nonzero(y == i)[0]
keep_indices = keep_indices[:min_class_count]
keep_x.append(x[keep_indices])
keep_y.append(y[keep_indices])
x = np.concatenate(keep_x, axis=0)
y = np.concatenate(keep_y, axis=0)
order = np.random.permutation(x.shape[0])
x = x[order]
y = y[order]
if n_examples:
x = x[:n_examples]
y = y[:n_examples]
if needs_reshape:
if x.shape[0] > 10000:
warnings.warn(
"Performing an online resize of a large number of images ({}), "
"consider creating and storing the resized dataset.".format(x.shape[0])
)
x = [resize_image(img, shape) for img in x]
x = np.uint8(x)
if show:
indices_to_show = np.random.choice(len(x), size=200)
for i in indices_to_show:
print(y[i])
print(image_to_string(x[i]))
return x, y, classes<|fim▁end|> | """ Load emnist data from disk by class. |
<|file_name|>0002_auto_20160628_1024.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations<|fim▁hole|>class Migration(migrations.Migration):
dependencies = [
('catalogue', '0001_initial'),
]
operations = [
migrations.RenameModel(
old_name='Cd',
new_name='Release',
),
]<|fim▁end|> | |
<|file_name|>JavadocCompletionProposal.java<|end_file_name|><|fim▁begin|>/*******************************************************************************
* Copyright (c) 2000, 2016 IBM Corporation and others.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Eclipse Public License v1.0
* which accompanies this distribution, and is available at
* http://www.eclipse.org/legal/epl-v10.html
*
* Based on org.eclipse.jdt.internal.ui.text.javadoc.JavaDocAutoIndentStrategy
*
* Contributors:
* IBM Corporation - initial API and implementation
* Red Hat, Inc - decoupling from jdt.ui
*******************************************************************************/
package org.eclipse.jdt.ls.core.internal.contentassist;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.commons.lang3.StringUtils;
import org.eclipse.core.runtime.CoreException;
import org.eclipse.core.runtime.IProgressMonitor;
import org.eclipse.jdt.core.ICompilationUnit;
import org.eclipse.jdt.core.IJavaElement;
import org.eclipse.jdt.core.IJavaProject;
import org.eclipse.jdt.core.IMember;
import org.eclipse.jdt.core.IMethod;
import org.eclipse.jdt.core.ISourceRange;
import org.eclipse.jdt.core.IType;
import org.eclipse.jdt.core.JavaModelException;
import org.eclipse.jdt.core.manipulation.CodeGeneration;
import org.eclipse.jdt.internal.core.manipulation.StubUtility;
import org.eclipse.jdt.internal.corext.util.MethodOverrideTester;
import org.eclipse.jdt.internal.corext.util.SuperTypeHierarchyCache;
import org.eclipse.jdt.ls.core.internal.JDTUtils;
import org.eclipse.jdt.ls.core.internal.JavaLanguageServerPlugin;
import org.eclipse.jdt.ls.core.internal.handlers.CompletionResolveHandler;
import org.eclipse.jdt.ls.core.internal.handlers.JsonRpcHelpers;
import org.eclipse.jface.text.BadLocationException;
import org.eclipse.jface.text.IDocument;
import org.eclipse.jface.text.IRegion;
import org.eclipse.jface.text.Region;
import org.eclipse.jface.text.TextUtilities;
import org.eclipse.lsp4j.CompletionItem;
import org.eclipse.lsp4j.CompletionItemKind;
import org.eclipse.lsp4j.InsertTextFormat;
import org.eclipse.lsp4j.Range;
import org.eclipse.lsp4j.TextEdit;
public class JavadocCompletionProposal {
private static final String ASTERISK = "*";
private static final String WHITESPACES = " \t";
public static final String JAVA_DOC_COMMENT = "Javadoc comment";
public List<CompletionItem> getProposals(ICompilationUnit cu, int offset, CompletionProposalRequestor collector, IProgressMonitor monitor) throws JavaModelException {
if (cu == null) {
throw new IllegalArgumentException("Compilation unit must not be null"); //$NON-NLS-1$
}
List<CompletionItem> result = new ArrayList<>();
IDocument d = JsonRpcHelpers.toDocument(cu.getBuffer());
if (offset < 0 || d.getLength() == 0) {
return result;
}
try {
int p = (offset == d.getLength() ? offset - 1 : offset);
IRegion line = d.getLineInformationOfOffset(p);
String lineStr = d.get(line.getOffset(), line.getLength()).trim();
if (!lineStr.startsWith("/**")) {
return result;
}
if (!hasEndJavadoc(d, offset)) {
return result;
}
String text = collector.getContext().getToken() == null ? "" : new String(collector.getContext().getToken());
StringBuilder buf = new StringBuilder(text);
IRegion prefix = findPrefixRange(d, line);
String indentation = d.get(prefix.getOffset(), prefix.getLength());
int lengthToAdd = Math.min(offset - prefix.getOffset(), prefix.getLength());
buf.append(indentation.substring(0, lengthToAdd));
String lineDelimiter = TextUtilities.getDefaultLineDelimiter(d);
ICompilationUnit unit = cu;
try {
unit.reconcile(ICompilationUnit.NO_AST, false, null, null);
String string = createJavaDocTags(d, offset, indentation, lineDelimiter, unit);
if (string != null && !string.trim().equals(ASTERISK)) {
buf.append(string);
} else {
return result;
}
int nextNonWS = findEndOfWhiteSpace(d, offset, d.getLength());
if (!Character.isWhitespace(d.getChar(nextNonWS))) {
buf.append(lineDelimiter);
}
} catch (CoreException e) {
// ignore
}
final CompletionItem ci = new CompletionItem();
Range range = JDTUtils.toRange(unit, offset, 0);
boolean isSnippetSupported = JavaLanguageServerPlugin.getPreferencesManager().getClientPreferences().isCompletionSnippetsSupported();
String replacement = prepareTemplate(buf.toString(), lineDelimiter, isSnippetSupported);
ci.setTextEdit(new TextEdit(range, replacement));
ci.setFilterText(JAVA_DOC_COMMENT);
ci.setLabel(JAVA_DOC_COMMENT);
ci.setSortText(SortTextHelper.convertRelevance(0));
ci.setKind(CompletionItemKind.Snippet);
ci.setInsertTextFormat(isSnippetSupported ? InsertTextFormat.Snippet : InsertTextFormat.PlainText);
String documentation = prepareTemplate(buf.toString(), lineDelimiter, false);
if (documentation.indexOf(lineDelimiter) == 0) {
documentation = documentation.replaceFirst(lineDelimiter, "");
}
ci.setDocumentation(documentation);
Map<String, String> data = new HashMap<>(3);
data.put(CompletionResolveHandler.DATA_FIELD_URI, JDTUtils.toURI(cu));
data.put(CompletionResolveHandler.DATA_FIELD_REQUEST_ID, "0");
data.put(CompletionResolveHandler.DATA_FIELD_PROPOSAL_ID, "0");
ci.setData(data);
result.add(ci);
} catch (BadLocationException excp) {
// stop work
}
return result;
}
private String prepareTemplate(String text, String lineDelimiter, boolean addGap) {
boolean endWithLineDelimiter = text.endsWith(lineDelimiter);
String[] lines = text.split(lineDelimiter);
StringBuilder buf = new StringBuilder();
for (int i = 0; i < lines.length; i++) {
String line = lines[i];
if (addGap) {
String stripped = StringUtils.stripStart(line, WHITESPACES);
if (stripped.startsWith(ASTERISK)) {
if (!stripped.equals(ASTERISK)) {
int index = line.indexOf(ASTERISK);
buf.append(line.substring(0, index + 1));
buf.append(" ${0}");
buf.append(lineDelimiter);
}
addGap = false;
}
}
buf.append(StringUtils.stripEnd(line, WHITESPACES));
if (i < lines.length - 1 || endWithLineDelimiter) {
buf.append(lineDelimiter);
}
}
return buf.toString();
}
private IRegion findPrefixRange(IDocument document, IRegion line) throws BadLocationException {
int lineOffset = line.getOffset();
int lineEnd = lineOffset + line.getLength();
int indentEnd = findEndOfWhiteSpace(document, lineOffset, lineEnd);
if (indentEnd < lineEnd && document.getChar(indentEnd) == '*') {
indentEnd++;
while (indentEnd < lineEnd && document.getChar(indentEnd) == ' ') {
indentEnd++;
}
}
return new Region(lineOffset, indentEnd - lineOffset);
}
private int findEndOfWhiteSpace(IDocument document, int offset, int end) throws BadLocationException {
while (offset < end) {
char c = document.getChar(offset);
if (c != ' ' && c != '\t') {
return offset;
}
offset++;
}
return end;
}
private boolean hasEndJavadoc(IDocument document, int offset) throws BadLocationException {
int pos = -1;
while (offset < document.getLength()) {
char c = document.getChar(offset);
if (!Character.isWhitespace(c) && !(c == '*')) {
pos = offset;
break;
}
offset++;
}
if (document.getLength() >= pos + 2 && document.get(pos - 1, 2).equals("*/")) {
return true;
}
return false;
}
private String createJavaDocTags(IDocument document, int offset, String indentation, String lineDelimiter, ICompilationUnit unit) throws CoreException, BadLocationException {
IJavaElement element = unit.getElementAt(offset);
if (element == null) {
return null;
}
switch (element.getElementType()) {
case IJavaElement.TYPE:
return createTypeTags(document, offset, indentation, lineDelimiter, (IType) element);
case IJavaElement.METHOD:
return createMethodTags(document, offset, indentation, lineDelimiter, (IMethod) element);
default:
return null;
}
}
private String createTypeTags(IDocument document, int offset, String indentation, String lineDelimiter, IType type) throws CoreException, BadLocationException {
if (!accept(offset, type)) {
return null;
}
String[] typeParamNames = StubUtility.getTypeParameterNames(type.getTypeParameters());
String comment = CodeGeneration.getTypeComment(type.getCompilationUnit(), type.getTypeQualifiedName('.'), typeParamNames, lineDelimiter);
if (comment != null) {
return prepareTemplateComment(comment.trim(), indentation, type.getJavaProject(), lineDelimiter);
}
return null;
}
private boolean accept(int offset, IMember member) throws JavaModelException {
ISourceRange nameRange = member.getNameRange();
if (nameRange == null) {
return false;
}
int srcOffset = nameRange.getOffset();
return srcOffset > offset;
}
private String createMethodTags(IDocument document, int offset, String indentation, String lineDelimiter, IMethod method) throws CoreException, BadLocationException {
if (!accept(offset, method)) {
return null;
}
IMethod inheritedMethod = getInheritedMethod(method);
String comment = CodeGeneration.getMethodComment(method, inheritedMethod, lineDelimiter);
if (comment != null) {
comment = comment.trim();
boolean javadocComment = comment.startsWith("/**"); //$NON-NLS-1$
if (javadocComment) {
return prepareTemplateComment(comment, indentation, method.getJavaProject(), lineDelimiter);<|fim▁hole|>
private String prepareTemplateComment(String comment, String indentation, IJavaProject project, String lineDelimiter) {
// trim comment start and end if any
if (comment.endsWith("*/")) {
comment = comment.substring(0, comment.length() - 2);
}
comment = comment.trim();
if (comment.startsWith("/*")) { //$NON-NLS-1$
if (comment.length() > 2 && comment.charAt(2) == '*') {
comment = comment.substring(3); // remove '/**'
} else {
comment = comment.substring(2); // remove '/*'
}
}
// trim leading spaces, but not new lines
int nonSpace = 0;
int len = comment.length();
while (nonSpace < len && Character.getType(comment.charAt(nonSpace)) == Character.SPACE_SEPARATOR) {
nonSpace++;
}
comment = comment.substring(nonSpace);
return comment;
}
private IMethod getInheritedMethod(IMethod method) throws JavaModelException {
IType declaringType = method.getDeclaringType();
MethodOverrideTester tester = SuperTypeHierarchyCache.getMethodOverrideTester(declaringType);
return tester.findOverriddenMethod(method, true);
}
}<|fim▁end|> | }
}
return null;
} |
<|file_name|>authorized_keys.py<|end_file_name|><|fim▁begin|>from __future__ import absolute_import, print_function
import os
import pwd
import grp
import sys
import subprocess
from .command import Command
class AuthorizedKeysCommand(Command):
"""
Get authorized keys for a user using NSS and SSSD.
"""
@staticmethod
def configure_parser(parser):
"""
Configure an argument parser with arguments for this command.
"""
parser.add_argument(
'-u', '--user',
default=os.getenv("USER", None),
required=True,
help="username")
parser.add_argument(
'--include-group',
action='store_true',
help="retrieve ssh keys for everyone in the user's primary group")
def __init__(self, config, args):
"""
Create the command.
"""
self.config = config
self.args = args
def run(self):
"""
Run the command.
"""
# verify the sssd ssh helper is available
if not os.path.exists("/bin/sss_ssh_authorizedkeys"):
print("can't locate sssd ssh helper!", file=sys.stderr)
sys.exit(1)
# determine the users we need to retrieve keys for
users = set([self.args.user])
if self.args.include_group:
try:
# retrieve the user's passwd entry
user_passwd = pwd.getpwnam(self.args.user)
except KeyError as e:
print(<|fim▁hole|>
try:
# retrieve the user's primary group
user_group = grp.getgrgid(user_passwd[3])
except KeyError as e:
print(
"failed to retrieve user's primary group: {0}".format(e),
file=sys.stderr)
sys.exit(1)
# update the list of users
users.update(user_group[3])
# retrieve user keys
for user in users:
try:
# call the SSSD SSH helper script
subprocess.check_call(["/bin/sss_ssh_authorizedkeys", user])
except subprocess.CalledProcessError:
# handle a non-zero exit code
print(
"failed to retrieve keys for user {0}".format(user),
file=sys.stderr)
sys.exit(1)<|fim▁end|> | "failed to retrieve user passwd entry: {0}".format(e),
file=sys.stderr)
sys.exit(1) |
<|file_name|>Home.js<|end_file_name|><|fim▁begin|>import React from 'react';
function Home(props) {
return (
<div id='react-home'>
<h1>BC</h1>
<h2>FreeCodeCamp Projects using React</h2>
</div>
);
}
<|fim▁hole|><|fim▁end|> | export default Home |
<|file_name|>helper.py<|end_file_name|><|fim▁begin|>"""A library of helper functions for the CherryPy test suite."""
import datetime
import io
import logging
import os
import re
import subprocess
import sys
import time
import unittest
import warnings
import portend
import pytest
import six
from cheroot.test import webtest
import cherrypy
from cherrypy._cpcompat import text_or_bytes, HTTPSConnection, ntob
from cherrypy.lib import httputil
from cherrypy.lib import gctools
log = logging.getLogger(__name__)
thisdir = os.path.abspath(os.path.dirname(__file__))
serverpem = os.path.join(os.getcwd(), thisdir, 'test.pem')
class Supervisor(object):
"""Base class for modeling and controlling servers during testing."""
def __init__(self, **kwargs):
for k, v in kwargs.items():
if k == 'port':
setattr(self, k, int(v))
setattr(self, k, v)
def log_to_stderr(msg, level):
return sys.stderr.write(msg + os.linesep)
class LocalSupervisor(Supervisor):
"""Base class for modeling/controlling servers which run in the same
process.
When the server side runs in a different process, start/stop can dump all
state between each test module easily. When the server side runs in the
same process as the client, however, we have to do a bit more work to
ensure config and mounted apps are reset between tests.
"""
using_apache = False
using_wsgi = False
def __init__(self, **kwargs):
for k, v in kwargs.items():
setattr(self, k, v)
cherrypy.server.httpserver = self.httpserver_class
# This is perhaps the wrong place for this call but this is the only
# place that i've found so far that I KNOW is early enough to set this.
cherrypy.config.update({'log.screen': False})
engine = cherrypy.engine
if hasattr(engine, 'signal_handler'):
engine.signal_handler.subscribe()
if hasattr(engine, 'console_control_handler'):
engine.console_control_handler.subscribe()
def start(self, modulename=None):
"""Load and start the HTTP server."""
if modulename:
# Unhook httpserver so cherrypy.server.start() creates a new
# one (with config from setup_server, if declared).
cherrypy.server.httpserver = None
cherrypy.engine.start()
self.sync_apps()
def sync_apps(self):
"""Tell the server about any apps which the setup functions mounted."""
pass
def stop(self):
td = getattr(self, 'teardown', None)
if td:
td()
cherrypy.engine.exit()
servers_copy = list(six.iteritems(getattr(cherrypy, 'servers', {})))
for name, server in servers_copy:
server.unsubscribe()
del cherrypy.servers[name]
class NativeServerSupervisor(LocalSupervisor):
"""Server supervisor for the builtin HTTP server."""
httpserver_class = 'cherrypy._cpnative_server.CPHTTPServer'
using_apache = False
using_wsgi = False
def __str__(self):
return 'Builtin HTTP Server on %s:%s' % (self.host, self.port)
class LocalWSGISupervisor(LocalSupervisor):
"""Server supervisor for the builtin WSGI server."""
httpserver_class = 'cherrypy._cpwsgi_server.CPWSGIServer'
using_apache = False
using_wsgi = True
def __str__(self):
return 'Builtin WSGI Server on %s:%s' % (self.host, self.port)
def sync_apps(self):
"""Hook a new WSGI app into the origin server."""
cherrypy.server.httpserver.wsgi_app = self.get_app()
def get_app(self, app=None):
"""Obtain a new (decorated) WSGI app to hook into the origin server."""
if app is None:
app = cherrypy.tree
if self.validate:
try:
from wsgiref import validate
except ImportError:
warnings.warn(
'Error importing wsgiref. The validator will not run.')
else:
# wraps the app in the validator
app = validate.validator(app)
return app
def get_cpmodpy_supervisor(**options):
from cherrypy.test import modpy
sup = modpy.ModPythonSupervisor(**options)
sup.template = modpy.conf_cpmodpy
return sup
def get_modpygw_supervisor(**options):
from cherrypy.test import modpy
sup = modpy.ModPythonSupervisor(**options)
sup.template = modpy.conf_modpython_gateway
sup.using_wsgi = True
return sup
def get_modwsgi_supervisor(**options):
from cherrypy.test import modwsgi
return modwsgi.ModWSGISupervisor(**options)
def get_modfcgid_supervisor(**options):
from cherrypy.test import modfcgid
return modfcgid.ModFCGISupervisor(**options)
def get_modfastcgi_supervisor(**options):
from cherrypy.test import modfastcgi
return modfastcgi.ModFCGISupervisor(**options)
def get_wsgi_u_supervisor(**options):
cherrypy.server.wsgi_version = ('u', 0)
return LocalWSGISupervisor(**options)
class CPWebCase(webtest.WebCase):
script_name = ''
scheme = 'http'
available_servers = {'wsgi': LocalWSGISupervisor,
'wsgi_u': get_wsgi_u_supervisor,
'native': NativeServerSupervisor,
'cpmodpy': get_cpmodpy_supervisor,
'modpygw': get_modpygw_supervisor,
'modwsgi': get_modwsgi_supervisor,
'modfcgid': get_modfcgid_supervisor,
'modfastcgi': get_modfastcgi_supervisor,
}
default_server = 'wsgi'
@classmethod
def _setup_server(cls, supervisor, conf):
v = sys.version.split()[0]
log.info('Python version used to run this test script: %s' % v)
log.info('CherryPy version: %s' % cherrypy.__version__)
if supervisor.scheme == 'https':
ssl = ' (ssl)'
else:
ssl = ''
log.info('HTTP server version: %s%s' % (supervisor.protocol, ssl))
log.info('PID: %s' % os.getpid())
cherrypy.server.using_apache = supervisor.using_apache
cherrypy.server.using_wsgi = supervisor.using_wsgi
if sys.platform[:4] == 'java':
cherrypy.config.update({'server.nodelay': False})
if isinstance(conf, text_or_bytes):
parser = cherrypy.lib.reprconf.Parser()
conf = parser.dict_from_file(conf).get('global', {})
else:
conf = conf or {}
baseconf = conf.copy()
baseconf.update({'server.socket_host': supervisor.host,
'server.socket_port': supervisor.port,
'server.protocol_version': supervisor.protocol,
'environment': 'test_suite',
})
if supervisor.scheme == 'https':
# baseconf['server.ssl_module'] = 'builtin'
baseconf['server.ssl_certificate'] = serverpem
baseconf['server.ssl_private_key'] = serverpem
# helper must be imported lazily so the coverage tool
# can run against module-level statements within cherrypy.
# Also, we have to do "from cherrypy.test import helper",
# exactly like each test module does, because a relative import
# would stick a second instance of webtest in sys.modules,
# and we wouldn't be able to globally override the port anymore.
if supervisor.scheme == 'https':
webtest.WebCase.HTTP_CONN = HTTPSConnection
return baseconf
@classmethod
def setup_class(cls):
''
# Creates a server
conf = {
'scheme': 'http',
'protocol': 'HTTP/1.1',
'port': 54583,
'host': '127.0.0.1',
'validate': False,
'server': 'wsgi',
}
supervisor_factory = cls.available_servers.get(
conf.get('server', 'wsgi'))
if supervisor_factory is None:
raise RuntimeError('Unknown server in config: %s' % conf['server'])
supervisor = supervisor_factory(**conf)
# Copied from "run_test_suite"
cherrypy.config.reset()
baseconf = cls._setup_server(supervisor, conf)
cherrypy.config.update(baseconf)
setup_client()
if hasattr(cls, 'setup_server'):
# Clear the cherrypy tree and clear the wsgi server so that
# it can be updated with the new root
cherrypy.tree = cherrypy._cptree.Tree()
cherrypy.server.httpserver = None
cls.setup_server()
# Add a resource for verifying there are no refleaks
# to *every* test class.
cherrypy.tree.mount(gctools.GCRoot(), '/gc')
cls.do_gc_test = True
supervisor.start(cls.__module__)
cls.supervisor = supervisor
@classmethod
def teardown_class(cls):
''
if hasattr(cls, 'setup_server'):
cls.supervisor.stop()
do_gc_test = False
def test_gc(self):
if not self.do_gc_test:
return
self.getPage('/gc/stats')
try:
self.assertBody('Statistics:')
except Exception:
'Failures occur intermittently. See #1420'
def prefix(self):
return self.script_name.rstrip('/')
def base(self):
if ((self.scheme == 'http' and self.PORT == 80) or
(self.scheme == 'https' and self.PORT == 443)):
port = ''
else:
port = ':%s' % self.PORT
return '%s://%s%s%s' % (self.scheme, self.HOST, port,
self.script_name.rstrip('/'))
def exit(self):
sys.exit()
def getPage(self, url, headers=None, method='GET', body=None,
protocol=None, raise_subcls=None):
"""Open the url. Return status, headers, body.
`raise_subcls` must be a tuple with the exceptions classes
or a single exception class that are not going to be considered
a socket.error regardless that they were are subclass of a
socket.error and therefore not considered for a connection retry.
"""
if self.script_name:
url = httputil.urljoin(self.script_name, url)
return webtest.WebCase.getPage(self, url, headers, method, body,
protocol, raise_subcls)
def skip(self, msg='skipped '):
pytest.skip(msg)
def assertErrorPage(self, status, message=None, pattern=''):
"""Compare the response body with a built in error page.
The function will optionally look for the regexp pattern,
within the exception embedded in the error page."""
# This will never contain a traceback
page = cherrypy._cperror.get_error_page(status, message=message)
# First, test the response body without checking the traceback.
# Stick a match-all group (.*) in to grab the traceback.
def esc(text):
return re.escape(ntob(text))
epage = re.escape(page)
epage = epage.replace(
esc('<pre id="traceback"></pre>'),
esc('<pre id="traceback">') + b'(.*)' + esc('</pre>'))
m = re.match(epage, self.body, re.DOTALL)
if not m:
self._handlewebError(
'Error page does not match; expected:\n' + page)
return
# Now test the pattern against the traceback
if pattern is None:
# Special-case None to mean that there should be *no* traceback.
if m and m.group(1):
self._handlewebError('Error page contains traceback')
else:
if (m is None) or (
not re.search(ntob(re.escape(pattern), self.encoding),
m.group(1))):
msg = 'Error page does not contain %s in traceback'
self._handlewebError(msg % repr(pattern))
date_tolerance = 2
def assertEqualDates(self, dt1, dt2, seconds=None):
"""Assert abs(dt1 - dt2) is within Y seconds."""
if seconds is None:
seconds = self.date_tolerance
if dt1 > dt2:
diff = dt1 - dt2
else:
diff = dt2 - dt1
if not diff < datetime.timedelta(seconds=seconds):
raise AssertionError('%r and %r are not within %r seconds.' %
(dt1, dt2, seconds))
def _test_method_sorter(_, x, y):
"""Monkeypatch the test sorter to always run test_gc last in each suite."""
if x == 'test_gc':
return 1
if y == 'test_gc':
return -1
if x > y:
return 1
if x < y:
return -1
return 0
unittest.TestLoader.sortTestMethodsUsing = _test_method_sorter
def setup_client():
"""Set up the WebCase classes to match the server's socket settings."""
webtest.WebCase.PORT = cherrypy.server.socket_port
webtest.WebCase.HOST = cherrypy.server.socket_host
if cherrypy.server.ssl_certificate:
CPWebCase.scheme = 'https'
# --------------------------- Spawning helpers --------------------------- #
class CPProcess(object):
pid_file = os.path.join(thisdir, 'test.pid')
config_file = os.path.join(thisdir, 'test.conf')
config_template = """[global]
server.socket_host: '%(host)s'
server.socket_port: %(port)s
checker.on: False
log.screen: False
log.error_file: r'%(error_log)s'
log.access_file: r'%(access_log)s'
%(ssl)s
%(extra)s
"""
error_log = os.path.join(thisdir, 'test.error.log')
access_log = os.path.join(thisdir, 'test.access.log')
def __init__(self, wait=False, daemonize=False, ssl=False,
socket_host=None, socket_port=None):
self.wait = wait
self.daemonize = daemonize
self.ssl = ssl
self.host = socket_host or cherrypy.server.socket_host
self.port = socket_port or cherrypy.server.socket_port
def write_conf(self, extra=''):
if self.ssl:
serverpem = os.path.join(thisdir, 'test.pem')
ssl = """
server.ssl_certificate: r'%s'
server.ssl_private_key: r'%s'
""" % (serverpem, serverpem)
else:
ssl = ''
conf = self.config_template % {<|fim▁hole|> 'error_log': self.error_log,
'access_log': self.access_log,
'ssl': ssl,
'extra': extra,
}
with io.open(self.config_file, 'w', encoding='utf-8') as f:
f.write(six.text_type(conf))
def start(self, imports=None):
"""Start cherryd in a subprocess."""
portend.free(self.host, self.port, timeout=1)
args = [
'-m',
'cherrypy',
'-c', self.config_file,
'-p', self.pid_file,
]
r"""
Command for running cherryd server with autoreload enabled
Using
```
['-c',
"__requires__ = 'CherryPy'; \
import pkg_resources, re, sys; \
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]); \
sys.exit(\
pkg_resources.load_entry_point(\
'CherryPy', 'console_scripts', 'cherryd')())"]
```
doesn't work as it's impossible to reconstruct the `-c`'s contents.
Ref: https://github.com/cherrypy/cherrypy/issues/1545
"""
if not isinstance(imports, (list, tuple)):
imports = [imports]
for i in imports:
if i:
args.append('-i')
args.append(i)
if self.daemonize:
args.append('-d')
env = os.environ.copy()
# Make sure we import the cherrypy package in which this module is
# defined.
grandparentdir = os.path.abspath(os.path.join(thisdir, '..', '..'))
if env.get('PYTHONPATH', ''):
env['PYTHONPATH'] = os.pathsep.join(
(grandparentdir, env['PYTHONPATH']))
else:
env['PYTHONPATH'] = grandparentdir
self._proc = subprocess.Popen([sys.executable] + args, env=env)
if self.wait:
self.exit_code = self._proc.wait()
else:
portend.occupied(self.host, self.port, timeout=5)
# Give the engine a wee bit more time to finish STARTING
if self.daemonize:
time.sleep(2)
else:
time.sleep(1)
def get_pid(self):
if self.daemonize:
return int(open(self.pid_file, 'rb').read())
return self._proc.pid
def join(self):
"""Wait for the process to exit."""
if self.daemonize:
return self._join_daemon()
self._proc.wait()
def _join_daemon(self):
try:
try:
# Mac, UNIX
os.wait()
except AttributeError:
# Windows
try:
pid = self.get_pid()
except IOError:
# Assume the subprocess deleted the pidfile on shutdown.
pass
else:
os.waitpid(pid, 0)
except OSError:
x = sys.exc_info()[1]
if x.args != (10, 'No child processes'):
raise<|fim▁end|> | 'host': self.host,
'port': self.port, |
<|file_name|>EDMLoop_neg_slope.py<|end_file_name|><|fim▁begin|># Import a whole load of stuff
from System.IO import *
from System.Drawing import *
from System.Runtime.Remoting import *
from System.Threading import *
from System.Windows.Forms import *
from System.Xml.Serialization import *
from System import *
from Analysis.EDM import *
from DAQ.Environment import *
from EDMConfig import *
def saveBlockConfig(path, config):
fs = FileStream(path, FileMode.Create)
s = XmlSerializer(BlockConfig)
s.Serialize(fs,config)
fs.Close()
def loadBlockConfig(path):
fs = FileStream(path, FileMode.Open)
s = XmlSerializer(BlockConfig)
bc = s.Deserialize(fs)
fs.Close()
return bc
def writeLatestBlockNotificationFile(cluster, blockIndex):
fs = FileStream(Environs.FileSystem.Paths["settingsPath"] + "\\BlockHead\\latestBlock.txt", FileMode.Create)
sw = StreamWriter(fs)
sw.WriteLine(cluster + "\t" + str(blockIndex))
sw.Close()
fs.Close()
def checkYAGAndFix():
interlockFailed = hc.YAGInterlockFailed;
if (interlockFailed):
bh.StopPattern();
bh.StartPattern();
def printWaveformCode(bc, name):
print(name + ": " + str(bc.GetModulationByName(name).Waveform.Code) + " -- " + str(bc.GetModulationByName(name).Waveform.Inverted))
def prompt(text):
sys.stdout.write(text)
return sys.stdin.readline().strip()
def measureParametersAndMakeBC(cluster, eState, bState, rfState, scramblerV, probePolAngle, pumpPolAngle):
fileSystem = Environs.FileSystem
print("Measuring parameters ...")
bh.StopPattern()
hc.UpdateRFPowerMonitor()
hc.UpdateRFFrequencyMonitor()
bh.StartPattern()
hc.UpdateBCurrentMonitor()
hc.UpdateVMonitor()
hc.UpdateI2AOMFreqMonitor()
print("V plus: " + str(hc.CPlusMonitorVoltage * hc.CPlusMonitorScale))
print("V minus: " + str(hc.CMinusMonitorVoltage * hc.CMinusMonitorScale))
print("Bias: " + str(hc.BiasCurrent))
print("B step: " + str(abs(hc.FlipStepCurrent)))
print("DB step: " + str(abs(hc.CalStepCurrent)))
# load a default BlockConfig and customise it appropriately
settingsPath = fileSystem.Paths["settingsPath"] + "\\BlockHead\\"
bc = loadBlockConfig(settingsPath + "default.xml")
bc.Settings["cluster"] = cluster
bc.Settings["eState"] = eState
bc.Settings["bState"] = bState
bc.Settings["rfState"] = rfState
bc.Settings["phaseScramblerV"] = scramblerV
bc.Settings["probePolarizerAngle"] = probePolAngle
bc.Settings["pumpPolarizerAngle"] = pumpPolAngle
bc.Settings["ePlus"] = hc.CPlusMonitorVoltage * hc.CPlusMonitorScale
bc.Settings["eMinus"] = hc.CMinusMonitorVoltage * hc.CMinusMonitorScale
bc.GetModulationByName("B").Centre = (hc.BiasCurrent)/1000
bc.GetModulationByName("B").Step = abs(hc.FlipStepCurrent)/1000
bc.GetModulationByName("DB").Step = abs(hc.CalStepCurrent)/1000
# these next 3, seemingly redundant, lines are to preserve backward compatibility
bc.GetModulationByName("B").PhysicalCentre = (hc.BiasCurrent)/1000
bc.GetModulationByName("B").PhysicalStep = abs(hc.FlipStepCurrent)/1000
bc.GetModulationByName("DB").PhysicalStep = abs(hc.CalStepCurrent)/1000
bc.GetModulationByName("RF1A").Centre = hc.RF1AttCentre
bc.GetModulationByName("RF1A").Step = hc.RF1AttStep
bc.GetModulationByName("RF1A").PhysicalCentre = hc.RF1PowerCentre
bc.GetModulationByName("RF1A").PhysicalStep = hc.RF1PowerStep
bc.GetModulationByName("RF2A").Centre = hc.RF2AttCentre
bc.GetModulationByName("RF2A").Step = hc.RF2AttStep
bc.GetModulationByName("RF2A").PhysicalCentre = hc.RF2PowerCentre
bc.GetModulationByName("RF2A").PhysicalStep = hc.RF2PowerStep
bc.GetModulationByName("RF1F").Centre = hc.RF1FMCentre
bc.GetModulationByName("RF1F").Step = hc.RF1FMStep
bc.GetModulationByName("RF1F").PhysicalCentre = hc.RF1FrequencyCentre
bc.GetModulationByName("RF1F").PhysicalStep = hc.RF1FrequencyStep
bc.GetModulationByName("RF2F").Centre = hc.RF2FMCentre
bc.GetModulationByName("RF2F").Step = hc.RF2FMStep
bc.GetModulationByName("RF2F").PhysicalCentre = hc.RF2FrequencyCentre
bc.GetModulationByName("RF2F").PhysicalStep = hc.RF2FrequencyStep
bc.GetModulationByName("LF1").Centre = hc.FLPZTVoltage
bc.GetModulationByName("LF1").Step = hc.FLPZTStep
bc.GetModulationByName("LF1").PhysicalCentre = hc.I2LockAOMFrequencyCentre
bc.GetModulationByName("LF1").PhysicalStep = hc.I2LockAOMFrequencyStep
# generate the waveform codes
print("Generating waveform codes ...")
eWave = bc.GetModulationByName("E").Waveform
eWave.Name = "E"
lf1Wave = bc.GetModulationByName("LF1").Waveform
lf1Wave.Name = "LF1"
ws = WaveformSetGenerator.GenerateWaveforms( (eWave, lf1Wave), ("B","DB","PI","RF1A","RF2A","RF1F","RF2F") )
bc.GetModulationByName("B").Waveform = ws["B"]
bc.GetModulationByName("DB").Waveform = ws["DB"]
bc.GetModulationByName("PI").Waveform = ws["PI"]
bc.GetModulationByName("RF1A").Waveform = ws["RF1A"]
bc.GetModulationByName("RF2A").Waveform = ws["RF2A"]
bc.GetModulationByName("RF1F").Waveform = ws["RF1F"]
bc.GetModulationByName("RF2F").Waveform = ws["RF2F"]
# change the inversions of the static codes E and LF1
bc.GetModulationByName("E").Waveform.Inverted = WaveformSetGenerator.RandomBool()
bc.GetModulationByName("LF1").Waveform.Inverted = WaveformSetGenerator.RandomBool()
# print the waveform codes
# printWaveformCode(bc, "E")
# printWaveformCode(bc, "B")
# printWaveformCode(bc, "DB")
# printWaveformCode(bc, "PI")
# printWaveformCode(bc, "RF1A")
# printWaveformCode(bc, "RF2A")
# printWaveformCode(bc, "RF1F")
# printWaveformCode(bc, "RF2F")
# printWaveformCode(bc, "LF1")
# store e-switch info in block config
print("Storing E switch parameters ...")
bc.Settings["eRampDownTime"] = hc.ERampDownTime
bc.Settings["eRampDownDelay"] = hc.ERampDownDelay
bc.Settings["eBleedTime"] = hc.EBleedTime
bc.Settings["eSwitchTime"] = hc.ESwitchTime
bc.Settings["eRampUpTime"] = hc.ERampUpTime
bc.Settings["eRampUpDelay"] = hc.ERampUpDelay
# this is for legacy analysis compatibility
bc.Settings["eDischargeTime"] = hc.ERampDownTime + hc.ERampDownDelay
bc.Settings["eChargeTime"] = hc.ERampUpTime + hc.ERampUpDelay
# store the E switch asymmetry in the block
bc.Settings["E0PlusBoost"] = hc.E0PlusBoost
return bc
# lock gains
# microamps of current per volt of control input
kSteppingBiasCurrentPerVolt = 1000.0
# max change in the b-bias voltage per block
kBMaxChange = 0.05
# volts of rf*a input required per cal's worth of offset
kRFAVoltsPerCal = 3.2
kRFAMaxChange = 0.1
# volts of rf*f input required per cal's worth of offset
kRFFVoltsPerCal = 8
kRFFMaxChange = 0.1
def updateLocks(bState):
pmtChannelValues = bh.DBlock.ChannelValues[0]
# note the weird python syntax for a one element list
sigIndex = pmtChannelValues.GetChannelIndex(("SIG",))
sigValue = pmtChannelValues.GetValue(sigIndex)
bIndex = pmtChannelValues.GetChannelIndex(("B",))
bValue = pmtChannelValues.GetValue(bIndex)
#bError = pmtChannelValues.GetError(bIndex)
dbIndex = pmtChannelValues.GetChannelIndex(("DB",))
dbValue = pmtChannelValues.GetValue(dbIndex)
#dbError = pmtChannelValues.GetError(dbIndex)
rf1aIndex = pmtChannelValues.GetChannelIndex(("RF1A","DB"))
rf1aValue = pmtChannelValues.GetValue(rf1aIndex)
#rf1aError = pmtChannelValues.GetError(rf1aIndex)
rf2aIndex = pmtChannelValues.GetChannelIndex(("RF2A","DB"))
rf2aValue = pmtChannelValues.GetValue(rf2aIndex)
#rf2aError = pmtChannelValues.GetError(rf2aIndex)
rf1fIndex = pmtChannelValues.GetChannelIndex(("RF1F","DB"))
rf1fValue = pmtChannelValues.GetValue(rf1fIndex)
#rf1fError = pmtChannelValues.GetError(rf1fIndex)
rf2fIndex = pmtChannelValues.GetChannelIndex(("RF2F","DB"))
rf2fValue = pmtChannelValues.GetValue(rf2fIndex)
#rf2fError = pmtChannelValues.GetError(rf2fIndex)
lf1Index = pmtChannelValues.GetChannelIndex(("LF1",))
lf1Value = pmtChannelValues.GetValue(lf1Index)
#lf1Error = pmtChannelValues.GetError(lf1Index)
lf1dbIndex = pmtChannelValues.GetChannelIndex(("LF1","DB"))
lf1dbValue = pmtChannelValues.GetValue(lf1dbIndex)
print "SIG: " + str(sigValue)
print "B: " + str(bValue) + " DB: " + str(dbValue)
print "RF1A: " + str(rf1aValue) + " RF2A: " + str(rf2aValue)
print "RF1F: " + str(rf1fValue) + " RF2F: " + str(rf2fValue)
print "LF1: " + str(lf1Value) + " LF1.DB: " + str(lf1dbValue)
# B bias lock
# the sign of the feedback depends on the b-state
if bState:
feedbackSign = 1
else:
feedbackSign = -1
deltaBias = - (1.0/8.0) * feedbackSign * (hc.CalStepCurrent * (bValue / dbValue)) / kSteppingBiasCurrentPerVolt
deltaBias = windowValue(deltaBias, -kBMaxChange, kBMaxChange)
print "Attempting to change stepping B bias by " + str(deltaBias) + " V."
newBiasVoltage = windowValue( hc.SteppingBiasVoltage - deltaBias, 0, 5)
hc.SetSteppingBBiasVoltage( newBiasVoltage )
# RFA locks
deltaRF1A = - (1.0/3.0) * (rf1aValue / dbValue) * kRFAVoltsPerCal
deltaRF1A = windowValue(deltaRF1A, -kRFAMaxChange, kRFAMaxChange)
print "Attempting to change RF1A by " + str(deltaRF1A) + " V."
newRF1A = windowValue( hc.RF1AttCentre - deltaRF1A, hc.RF1AttStep, 5 - hc.RF1AttStep)
hc.SetRF1AttCentre( newRF1A )
#
deltaRF2A = - (1.0/3.0) * (rf2aValue / dbValue) * kRFAVoltsPerCal
deltaRF2A = windowValue(deltaRF2A, -kRFAMaxChange, kRFAMaxChange)
print "Attempting to change RF2A by " + str(deltaRF2A) + " V."
newRF2A = windowValue( hc.RF2AttCentre - deltaRF2A, hc.RF2AttStep, 5 - hc.RF2AttStep )
hc.SetRF2AttCentre( newRF2A )
# RFF locks
deltaRF1F = - (1.0/4.0) * (rf1fValue / dbValue) * kRFFVoltsPerCal
deltaRF1F = windowValue(deltaRF1F, -kRFFMaxChange, kRFFMaxChange)
print "Attempting to change RF1F by " + str(deltaRF1F) + " V."
newRF1F = windowValue( hc.RF1FMCentre - deltaRF1F, hc.RF1FMStep, 5 - hc.RF1FMStep)
hc.SetRF1FMCentre( newRF1F )
#
deltaRF2F = - (1.0/4.0) * (rf2fValue / dbValue) * kRFFVoltsPerCal
deltaRF2F = windowValue(deltaRF2F, -kRFFMaxChange, kRFFMaxChange)
print "Attempting to change RF2F by " + str(deltaRF2F) + " V."
newRF2F = windowValue( hc.RF2FMCentre - deltaRF2F, hc.RF2FMStep, 5 - hc.RF2FMStep )
hc.SetRF2FMCentre( newRF2F )
# Laser frequency lock (-ve multiplier in f0 mode and +ve in f1)
deltaLF1 = 1.25 * (lf1Value / dbValue) # I think this should be +ve (but that doesn't work)
deltaLF1 = windowValue(deltaLF1, -0.1, 0.1)
print "Attempting to change LF1 by " + str(deltaLF1) + " V."
newLF1 = windowValue( hc.FLPZTVoltage - deltaLF1, hc.FLPZTStep, 5 - hc.FLPZTStep )
hc.SetFLPZTVoltage( newLF1 )
def windowValue(value, minValue, maxValue):
if ( (value < maxValue) & (value > minValue) ):
return value
else:
if (value < minValue):
return minValue
else:
return maxValue
kTargetRotationPeriod = 10
kReZeroLeakageMonitorsPeriod = 10
r = Random()
def EDMGo():
# Setup
f = None
fileSystem = Environs.FileSystem
dataPath = fileSystem.GetDataDirectory(fileSystem.Paths["edmDataPath"])
settingsPath = fileSystem.Paths["settingsPath"] + "\\BlockHead\\"
print("Data directory is : " + dataPath)
print("")
suggestedClusterName = fileSystem.GenerateNextDataFileName()
sm.SelectProfile("Scan B")
# User inputs data
cluster = prompt("Cluster name [" + suggestedClusterName +"]: ")
if cluster == "":
cluster = suggestedClusterName
print("Using cluster " + suggestedClusterName)
eState = hc.EManualState
print("E-state: " + str(eState))
bState = hc.BManualState
print("B-state: " + str(bState))
rfState = hc.RFManualState
print("rf-state: " + str(rfState))
# this is to make sure the B current monitor is in a sensible state
hc.UpdateBCurrentMonitor()
# randomise Ramsey phase
scramblerV = 0.724774 * r.NextDouble()
hc.SetScramblerVoltage(scramblerV)
# randomise polarizations
probePolAngle = 360.0 * r.NextDouble()
hc.SetProbePolarizerAngle(probePolAngle)
pumpPolAngle = 360.0 * r.NextDouble()
hc.SetPumpPolarizerAngle(pumpPolAngle)
bc = measureParametersAndMakeBC(cluster, eState, bState, rfState, scramblerV, probePolAngle, pumpPolAngle)
# loop and take data
blockIndex = 0
maxBlockIndex = 10000
while blockIndex < maxBlockIndex:
<|fim▁hole|> # save the block config and load into blockhead
print("Saving temp config.")
bc.Settings["clusterIndex"] = blockIndex
tempConfigFile ='%(p)stemp%(c)s_%(i)s.xml' % {'p': settingsPath, 'c': cluster, 'i': blockIndex}
saveBlockConfig(tempConfigFile, bc)
System.Threading.Thread.Sleep(500)
print("Loading temp config.")
bh.LoadConfig(tempConfigFile)
# take the block and save it
print("Running ...")
bh.AcquireAndWait()
print("Done.")
blockPath = '%(p)s%(c)s_%(i)s.zip' % {'p': dataPath, 'c': cluster, 'i': blockIndex}
bh.SaveBlock(blockPath)
print("Saved block "+ str(blockIndex) + ".")
# give mma a chance to analyse the block
print("Notifying Mathematica and waiting ...")
writeLatestBlockNotificationFile(cluster, blockIndex)
System.Threading.Thread.Sleep(5000)
print("Done.")
# increment and loop
File.Delete(tempConfigFile)
checkYAGAndFix()
blockIndex = blockIndex + 1
updateLocks(bState)
# randomise Ramsey phase
scramblerV = 0.724774 * r.NextDouble()
hc.SetScramblerVoltage(scramblerV)
# randomise polarizations
probePolAngle = 360.0 * r.NextDouble()
hc.SetProbePolarizerAngle(probePolAngle)
pumpPolAngle = 360.0 * r.NextDouble()
hc.SetPumpPolarizerAngle(pumpPolAngle)
bc = measureParametersAndMakeBC(cluster, eState, bState, rfState, scramblerV, probePolAngle, pumpPolAngle)
hc.StepTarget(1)
# do things that need periodically doing
# if ((blockIndex % kTargetRotationPeriod) == 0):
# print("Rotating target.")
# hc.StepTarget(10)
pmtChannelValues = bh.DBlock.ChannelValues[0]
dbIndex = pmtChannelValues.GetChannelIndex(("DB",))
dbValue = pmtChannelValues.GetValue(dbIndex)
if (dbValue < 8.4):
print("Dodgy spot target rotation.")
hc.StepTarget(5)
if ((blockIndex % kReZeroLeakageMonitorsPeriod) == 0):
print("Recalibrating leakage monitors.")
hc.EnableEField( False )
System.Threading.Thread.Sleep(10000)
hc.EnableBleed( True )
System.Threading.Thread.Sleep(1000)
hc.EnableBleed( False )
System.Threading.Thread.Sleep(5000)
hc.CalibrateIMonitors()
hc.EnableEField( True )
bh.StopPattern()
def run_script():
EDMGo()<|fim▁end|> | print("Acquiring block " + str(blockIndex) + " ...")
|
<|file_name|>context.py<|end_file_name|><|fim▁begin|># for accessing babusca library.
import os
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import smatrix<|fim▁hole|>import g2
import generators<|fim▁end|> | import scattering
import g1 |
<|file_name|>decorators.py<|end_file_name|><|fim▁begin|># This file is part of Navitia,
# the software to build cool stuff with public transport.
#
# Hope you'll enjoy and contribute to this project,
# powered by Canal TP (www.canaltp.fr).
# Help us simplify mobility and open public transport:
# a non ending quest to the responsive locomotion way of traveling!
#
# LICENCE: This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Stay tuned using
# twitter @navitia
# channel `#navitia` on riot https://riot.im/app/#/room/#navitia:matrix.org
# https://groups.google.com/d/forum/navitia
# www.navitia.io
from __future__ import absolute_import, print_function, unicode_literals, division
from jormungandr.interfaces.v1.serializer import serialize_with
<|fim▁hole|>
def get_serializer(serpy):
return serialize_with(serpy)
def get_obj_serializer(obj):
return get_serializer(serpy=obj.output_type_serializer)<|fim▁end|> | |
<|file_name|>settings.py<|end_file_name|><|fim▁begin|>import os
import django
from unipath import Path
BASE_DIR = Path(os.path.abspath(__file__))
BOOKTYPE_SITE_NAME = ''
BOOKTYPE_SITE_DIR = 'tests'
THIS_BOOKTYPE_SERVER = ''
BOOKTYPE_URL = ''
BOOKTYPE_ROOT = BASE_DIR.parent
STATIC_ROOT = BASE_DIR.parent.child("static")
STATIC_URL = '{}/static/'.format(BOOKTYPE_URL)
DATA_ROOT = BASE_DIR.parent.child("data")
DATA_URL = '{}/data/'.format(BOOKTYPE_URL)
MEDIA_ROOT = DATA_ROOT
MEDIA_URL = DATA_URL
# DEBUG
DEBUG = TEMPLATE_DEBUG = True
# PROFILE
PROFILE_ACTIVE = 'test'
if django.VERSION[:2] < (1, 6):
TEST_RUNNER = 'discover_runner.DiscoverRunner'
TEST_DISCOVER_TOP_LEVEL = BASE_DIR.parent.parent.child('lib')
TEST_DISCOVER_PATTERN = 'test*.py'
ROOT_URLCONF = 'urls'
SOUTH_TESTS_MIGRATE = False
SKIP_SOUTH_TESTS = True
<|fim▁hole|>PROFILE_IMAGE_UPLOAD_DIR = 'profile_images/'
# E-MAIL
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
# CACHES
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
}
}
PASSWORD_HASHERS = (
'django.contrib.auth.hashers.MD5PasswordHasher',
)
# DATABASE
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:',
'USER': '',
'PASSWORD': '',
'HOST': '',
'PORT': ''
}
}
# REDIS
REDIS_HOST = 'localhost'
REDIS_PORT = 6379
REDIS_DB = 0
REDIS_PASSWORD = None
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.middleware.transaction.TransactionMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'booktype.apps.core.middleware.SecurityMiddleware',
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.admin',
'django.contrib.messages',
'django.contrib.staticfiles',
'django_celery_results',
# list of booki apps
'booki.editor',
'booktypecontrol',
# needed for translation engine
'booktype',
# list of booktype apps
'booktype.apps.core',
'booktype.apps.portal',
'booktype.apps.loadsave',
'booktype.apps.importer',
'booktype.apps.convert',
'booktype.apps.edit',
'booktype.apps.reader',
'booktype.apps.account',
'booktype.apps.themes',
'booki.messaging',
'sputnik',
)
if django.VERSION[:2] < (1, 6):
INSTALLED_APPS += ('discover_runner', )
if django.VERSION[:2] < (1, 7):
INSTALLED_APPS += ('south', )
# this is for pep8
standard_format = {
'format': "[%(asctime)s] %(levelname)s [%(name)s:%(lineno)s] %(message)s",
'datefmt': "%d/%b/%Y %H:%M:%S"
}
# LOGGING
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'standard': standard_format,
},
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'null': {
'level': 'DEBUG',
'class': 'logging.NullHandler',
}
},
'loggers': {
'django': {
'handlers': ['null'],
'propagate': True,
'level': 'WARN',
},
'django.db.backends': {
'handlers': ['null'],
'level': 'DEBUG',
'propagate': False,
},
'django.request': {
'handlers': ['null'],
'level': 'ERROR',
'propagate': True,
},
'booktype': {
'handlers': ['null'],
'level': 'INFO'
}
}
}
# READ CONFIGURAION
# from booki.utils import config
#
# try:
# BOOKTYPE_CONFIG = config.loadConfiguration()
# except config.ConfigurationError:
# BOOKTYPE_CONFIG = {}
BOOKTYPE_NAME = BOOKTYPE_SITE_NAME
BOOKI_NAME = BOOKTYPE_NAME
BOOKI_ROOT = BOOKTYPE_ROOT
BOOKI_URL = BOOKTYPE_URL
THIS_BOOKI_SERVER = THIS_BOOKTYPE_SERVER
BOOKI_MAINTENANCE_MODE = False<|fim▁end|> | SECRET_KEY = 'enc*ln*vp^o2p1p6of8ip9v5_tt6r#fh2-!-@pl0ur^6ul6e)l'
COVER_IMAGE_UPLOAD_DIR = 'cover_images/' |
<|file_name|>main.rs<|end_file_name|><|fim▁begin|>use std::fs::File;
use std::io::prelude::*;
use std::collections::HashMap;
fn get_input() -> i32 {
let mut file = File::open("input.txt").unwrap();
let mut content = String::new();
file.read_to_string(&mut content).unwrap();
content.parse().unwrap()
}
#[derive(Copy, Clone, Debug)]
enum Direction {
XPos,
XNeg,
YPos,
YNeg
}
#[derive(Copy, Clone, Debug)]
struct State {
stride: i32,
left: i32,
x: i32,
y: i32,
dir: Direction
}
impl State {
fn move_next(self: &mut State) {
if self.left > 0 {
match self.dir {
Direction::XPos => self.x += 1,
Direction::XNeg => self.x -= 1,
Direction::YPos => self.y += 1,
Direction::YNeg => self.y -= 1
}
self.left -= 1;
} else {
match self.dir {
Direction::XPos => {
self.dir = Direction::YPos;
self.y += 1;
},
Direction::YPos => {
self.stride += 1;
self.dir = Direction::XNeg;
self.x -= 1;
},
Direction::XNeg => {<|fim▁hole|> },
Direction::YNeg => {
self.stride += 1;
self.dir = Direction::XPos;
self.x += 1;
}
}
self.left = self.stride - 1;
}
}
fn move_next_value(self: &mut State, grid: &mut HashMap<(i32, i32), i32>) -> i32 {
self.move_next();
let mut v = 0;
v += grid.get(&(self.x + 1, self.y)).unwrap_or(&0);
v += grid.get(&(self.x - 1, self.y)).unwrap_or(&0);
v += grid.get(&(self.x, self.y + 1)).unwrap_or(&0);
v += grid.get(&(self.x, self.y - 1)).unwrap_or(&0);
v += grid.get(&(self.x + 1, self.y + 1)).unwrap_or(&0);
v += grid.get(&(self.x + 1, self.y - 1)).unwrap_or(&0);
v += grid.get(&(self.x - 1, self.y + 1)).unwrap_or(&0);
v += grid.get(&(self.x - 1, self.y - 1)).unwrap_or(&0);
grid.insert((self.x, self.y), v);
v
}
}
fn part_1(input: i32) -> i32 {
let mut state = State { stride: 1, left: 1, x: 0, y: 0, dir: Direction::XPos };
for _ in 1 .. input {
state.move_next();
}
state.x.abs() + state.y.abs()
}
fn part_2(input: i32) -> i32 {
let mut state = State { stride: 1, left: 1, x: 0, y: 0, dir: Direction::XPos };
let mut grid = HashMap::new();
grid.insert((0, 0), 1);
let mut v = 1;
while v <= input {
v = state.move_next_value(&mut grid);
}
v
}
fn main() {
let input = get_input();
println!("Part 1 distance: {}", part_1(input));
println!("Part 2 value: {}", part_2(input));
}<|fim▁end|> | self.dir = Direction::YNeg;
self.y -= 1; |
<|file_name|>VoldemortNativeRequestHandler.java<|end_file_name|><|fim▁begin|>package voldemort.server.protocol.vold;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.IOException;
import java.nio.ByteBuffer;
import org.apache.log4j.Logger;
import voldemort.VoldemortException;
import voldemort.common.VoldemortOpCode;
import voldemort.common.nio.ByteBufferBackedInputStream;
import voldemort.common.nio.ByteBufferContainer;
import voldemort.server.RequestRoutingType;
import voldemort.server.StoreRepository;
import voldemort.server.protocol.AbstractRequestHandler;
import voldemort.server.protocol.RequestHandler;
import voldemort.server.protocol.StreamRequestHandler;
import voldemort.store.ErrorCodeMapper;
import voldemort.store.Store;
import voldemort.utils.ByteArray;
import voldemort.versioning.ObsoleteVersionException;
/**
* Server-side request handler for voldemort native client protocol
*
*
*/
public class VoldemortNativeRequestHandler extends AbstractRequestHandler implements RequestHandler {
private static final Logger logger = Logger.getLogger(VoldemortNativeRequestHandler.class);
private final int protocolVersion;
public VoldemortNativeRequestHandler(ErrorCodeMapper errorMapper,
StoreRepository repository,
int protocolVersion) {
super(errorMapper, repository);
if(protocolVersion < 0 || protocolVersion > 3)
throw new IllegalArgumentException("Unknown protocol version: " + protocolVersion);
this.protocolVersion = protocolVersion;
}
private ClientRequestHandler getClientRequestHanlder(byte opCode,
Store<ByteArray, byte[], byte[]> store)
throws IOException {
switch(opCode) {
case VoldemortOpCode.GET_OP_CODE:
return new GetRequestHandler(store, protocolVersion);
case VoldemortOpCode.GET_ALL_OP_CODE:
return new GetAllRequestHandler(store, protocolVersion);
case VoldemortOpCode.PUT_OP_CODE:
return new PutRequestHandler(store, protocolVersion);
case VoldemortOpCode.DELETE_OP_CODE:
return new DeleteRequestHandler(store, protocolVersion);
case VoldemortOpCode.GET_VERSION_OP_CODE:
return new GetVersionRequestHandler(store, protocolVersion);
default:
throw new IOException("Unknown op code: " + opCode);
}
}
@Override
public StreamRequestHandler handleRequest(final DataInputStream inputStream,
final DataOutputStream outputStream)
throws IOException {
return handleRequest(inputStream, outputStream, null);
}
private void clearBuffer(ByteBufferContainer outputContainer) {<|fim▁hole|> }
@Override
public StreamRequestHandler handleRequest(final DataInputStream inputStream,
final DataOutputStream outputStream,
final ByteBufferContainer outputContainer)
throws IOException {
long startTimeMs = -1;
long startTimeNs = -1;
if(logger.isDebugEnabled()) {
startTimeMs = System.currentTimeMillis();
startTimeNs = System.nanoTime();
}
byte opCode = inputStream.readByte();
String storeName = inputStream.readUTF();
RequestRoutingType routingType = getRoutingType(inputStream);
Store<ByteArray, byte[], byte[]> store = getStore(storeName, routingType);
if(store == null) {
clearBuffer(outputContainer);
writeException(outputStream, new VoldemortException("No store named '" + storeName
+ "'."));
return null;
}
ClientRequestHandler requestHandler = getClientRequestHanlder(opCode, store);
try {
requestHandler.parseRequest(inputStream);
requestHandler.processRequest();
} catch ( VoldemortException e) {
// Put generates lot of ObsoleteVersionExceptions, suppress them
// they are harmless and indicates normal mode of operation.
if(!(e instanceof ObsoleteVersionException)) {
logger.error("Store" + storeName + ". Error: " + e.getMessage());
}
clearBuffer(outputContainer);
writeException(outputStream, e);
return null;
}
// We are done with Input, clear the buffers
clearBuffer(outputContainer);
int size = requestHandler.getResponseSize();
if(outputContainer != null) {
outputContainer.growBuffer(size);
}
requestHandler.writeResponse(outputStream);
outputStream.flush();
if(logger.isDebugEnabled()) {
String debugPrefix = "OpCode " + opCode + " started at: " + startTimeMs
+ " handlerRef: " + System.identityHashCode(inputStream)
+ " Elapsed : " + (System.nanoTime() - startTimeNs) + " ns, ";
logger.debug(debugPrefix + requestHandler.getDebugMessage() );
}
return null;
}
private RequestRoutingType getRoutingType(DataInputStream inputStream) throws IOException {
RequestRoutingType routingType = RequestRoutingType.NORMAL;
if(protocolVersion > 0) {
boolean isRouted = inputStream.readBoolean();
routingType = RequestRoutingType.getRequestRoutingType(isRouted, false);
}
if(protocolVersion > 1) {
int routingTypeCode = inputStream.readByte();
routingType = RequestRoutingType.getRequestRoutingType(routingTypeCode);
}
return routingType;
}
/**
* This is pretty ugly. We end up mimicking the request logic here, so this
* needs to stay in sync with handleRequest.
*/
@Override
public boolean isCompleteRequest(final ByteBuffer buffer) throws VoldemortException {
DataInputStream inputStream = new DataInputStream(new ByteBufferBackedInputStream(buffer));
try {
byte opCode = inputStream.readByte();
// Store Name
inputStream.readUTF();
// Store routing type
getRoutingType(inputStream);
switch(opCode) {
case VoldemortOpCode.GET_VERSION_OP_CODE:
if(!GetVersionRequestHandler.isCompleteRequest(inputStream, buffer))
return false;
break;
case VoldemortOpCode.GET_OP_CODE:
if(!GetRequestHandler.isCompleteRequest(inputStream, buffer, protocolVersion))
return false;
break;
case VoldemortOpCode.GET_ALL_OP_CODE:
if(!GetAllRequestHandler.isCompleteRequest(inputStream, buffer, protocolVersion))
return false;
break;
case VoldemortOpCode.PUT_OP_CODE: {
if(!PutRequestHandler.isCompleteRequest(inputStream, buffer, protocolVersion))
return false;
break;
}
case VoldemortOpCode.DELETE_OP_CODE: {
if(!DeleteRequestHandler.isCompleteRequest(inputStream, buffer))
return false;
break;
}
default:
throw new VoldemortException(" Unrecognized Voldemort OpCode " + opCode);
}
// This should not happen, if we reach here and if buffer has more
// data, there is something wrong.
if(buffer.hasRemaining()) {
logger.info(" Probably a client bug, Discarding additional bytes in isCompleteRequest. Opcode "
+ opCode
+ " remaining bytes " + buffer.remaining());
}
return true;
} catch(IOException e) {
// This could also occur if the various methods we call into
// re-throw a corrupted value error as some other type of exception.
// For example, updating the position on a buffer past its limit
// throws an InvalidArgumentException.
if(logger.isDebugEnabled())
logger.debug("Probable partial read occurred causing exception", e);
return false;
}
}
private void writeException(DataOutputStream stream, VoldemortException e) throws IOException {
short code = getErrorMapper().getCode(e);
stream.writeShort(code);
stream.writeUTF(e.getMessage());
stream.flush();
}
}<|fim▁end|> | if(outputContainer != null) {
outputContainer.getBuffer().clear();
} |
<|file_name|>png_unittest.py<|end_file_name|><|fim▁begin|># Copyright (C) 2012 Balazs Ankes ([email protected]) University of Szeged
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for png.py."""
import unittest
from png import PNGChecker
from webkitpy.common.system.filesystem_mock import MockFileSystem
from webkitpy.common.system.systemhost_mock import MockSystemHost
class PNGCheckerTest(unittest.TestCase):<|fim▁hole|> """Tests PNGChecker class."""
def test_init(self):
"""Test __init__() method."""
def mock_handle_style_error(self):
pass
checker = PNGChecker("test/config", mock_handle_style_error, MockSystemHost())
self.assertEqual(checker._file_path, "test/config")
self.assertEqual(checker._handle_style_error, mock_handle_style_error)
def test_check(self):
errors = []
def mock_handle_style_error(line_number, category, confidence, message):
error = (line_number, category, confidence, message)
errors.append(error)
fs = MockFileSystem()
file_path = "foo.png"
fs.write_binary_file(file_path, "Dummy binary data")
errors = []
checker = PNGChecker(file_path, mock_handle_style_error, MockSystemHost(os_name='linux', filesystem=fs))
checker.check()
self.assertEqual(len(errors), 0)
file_path = "foo-expected.png"
fs.write_binary_file(file_path, "Dummy binary data")
errors = []
checker = PNGChecker(file_path, mock_handle_style_error, MockSystemHost(os_name='linux', filesystem=fs))
checker.check()
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0], (0, 'image/png', 5, 'Image lacks a checksum. Generate pngs using run-webkit-tests to ensure they have a checksum.'))<|fim▁end|> | |
<|file_name|>map.go<|end_file_name|><|fim▁begin|>package hashmap
// Map is a persistent associative data structure mapping keys to values. It
// is immutable, and supports near-O(1) operations to create modified version of
// the map that shares the underlying data structure. Because it is immutable,
// all of its methods are safe for concurrent use.
type Map interface {
Len() int
// Index returns whether there is a value associated with the given key, and
// that value or nil.
Index(k interface{}) (interface{}, bool)
// Assoc returns an almost identical map, with the given key associated with
// the given value.
Assoc(k, v interface{}) Map
// Dissoc returns an almost identical map, with the given key associated
// with no value.
Dissoc(k interface{}) Map
// Iterator returns an iterator over the map.<|fim▁hole|>}
// Iterator is an iterator over map elements. It can be used like this:
//
// for it := m.Iterator(); it.HasElem(); it.Next() {
// key, value := it.Elem()
// // do something with elem...
// }
type Iterator interface {
// Elem returns the current key-value pair.
Elem() (interface{}, interface{})
// HasElem returns whether the iterator is pointing to an element.
HasElem() bool
// Next moves the iterator to the next position.
Next()
}
// HasKey reports whether a Map has the given key.
func HasKey(m Map, k interface{}) bool {
_, ok := m.Index(k)
return ok
}<|fim▁end|> | Iterator() Iterator |
<|file_name|>main.rs<|end_file_name|><|fim▁begin|>#![feature(slice_position_elem, iter_arith)]
#[macro_use] extern crate libeuler;
/// 2520 is the smallest number that can be divided by each of the numbers from 1 to 10 without any remainder.
///
/// What is the smallest positive number that is evenly divisible by all of the numbers from 1 to 20?
fn main() {
solutions!{
inputs: (max_factor: i64 = 20)
sol naive {
let primes = prime_factors_less_than(&max_factor);
let mut needed_factors = Vec::new();
for factors in primes.iter() {
let mut f = needed_factors.clone();
let still_needed: Vec<&i64> = factors.iter()
.filter(|&fac| {
if f.contains(fac) {
let pos = f.position_elem(fac).unwrap();
f.swap_remove(pos);
false
} else {
true
}
}).collect();
for v in still_needed {
needed_factors.push(v.clone());
}
}
needed_factors.iter().map(|&i| i).product::<i64>()
}
};
}
fn factors(value: &i64) -> Vec<i64> {
let mut factor = 2;
let mut v = value.clone();
let mut retval = Vec::new();
while v > 1 {
if v % factor == 0 {
retval.push(factor);
v /= factor;
} else {
factor += 1;
}<|fim▁hole|>}
fn prime_factors_less_than(max: &i64) -> Vec<Vec<i64>> {
let mut retval = Vec::new();
for i in 1..*max {
retval.push(factors(&i));
}
retval
}<|fim▁end|> | }
retval |
<|file_name|>pattern_match.rs<|end_file_name|><|fim▁begin|>use std::env;
fn main() {
let a: i32 = env::args().nth(0).unwrap().parse::<i32>().unwrap();
let b = match a {
1 => true,
2 => true,
3 | 4 => true,
_ => false,
};
println!("{}", b);<|fim▁hole|>}<|fim▁end|> | |
<|file_name|>update.contribution.ts<|end_file_name|><|fim▁begin|>/*---------------------------------------------------------------------------------------------
* Copyright (c) Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See License.txt in the project root for license information.
*--------------------------------------------------------------------------------------------*/
import 'vs/platform/update/common/update.config.contribution';
import { localize } from 'vs/nls';
import { Registry } from 'vs/platform/registry/common/platform';
import { IWorkbenchContributionsRegistry, Extensions as WorkbenchExtensions } from 'vs/workbench/common/contributions';
import { IWorkbenchActionRegistry, Extensions as ActionExtensions } from 'vs/workbench/common/actions';
import { SyncActionDescriptor, MenuRegistry, MenuId } from 'vs/platform/actions/common/actions';
import { ShowCurrentReleaseNotesAction, ProductContribution, UpdateContribution, CheckForVSCodeUpdateAction, CONTEXT_UPDATE_STATE, SwitchProductQualityContribution } from 'vs/workbench/contrib/update/browser/update';
import { LifecyclePhase } from 'vs/platform/lifecycle/common/lifecycle';
import product from 'vs/platform/product/common/product';
import { StateType } from 'vs/platform/update/common/update';
const workbench = Registry.as<IWorkbenchContributionsRegistry>(WorkbenchExtensions.Workbench);
workbench.registerWorkbenchContribution(ProductContribution, LifecyclePhase.Restored);
workbench.registerWorkbenchContribution(UpdateContribution, LifecyclePhase.Restored);
workbench.registerWorkbenchContribution(SwitchProductQualityContribution, LifecyclePhase.Restored);
const actionRegistry = Registry.as<IWorkbenchActionRegistry>(ActionExtensions.WorkbenchActions);
// Editor<|fim▁hole|>actionRegistry
.registerWorkbenchAction(SyncActionDescriptor.from(ShowCurrentReleaseNotesAction), `${product.nameShort}: Show Release Notes`, product.nameShort);
actionRegistry
.registerWorkbenchAction(SyncActionDescriptor.from(CheckForVSCodeUpdateAction), `${product.nameShort}: Check for Update`, product.nameShort, CONTEXT_UPDATE_STATE.isEqualTo(StateType.Idle));
// Menu
if (ShowCurrentReleaseNotesAction.AVAILABE) {
MenuRegistry.appendMenuItem(MenuId.MenubarHelpMenu, {
group: '1_welcome',
command: {
id: ShowCurrentReleaseNotesAction.ID,
title: localize({ key: 'miReleaseNotes', comment: ['&& denotes a mnemonic'] }, "&&Release Notes")
},
order: 4
});
}<|fim▁end|> | |
<|file_name|>users-routes.Spec.js<|end_file_name|><|fim▁begin|>"use strict";
var chai = require('chai');<|fim▁hole|>
var user1 = {
username: 'Laura',
password: 'tiger',
address: {
street: '123 Main St',
city: 'Beaverton',
state: 'OR',
zip: 97007,
},
_id: '',
};
chai.use(chaiHttp);
function chaiRequest() {
return chai.request(`localhost:${port}`);
}
describe('Single Resource REST API', function() {
before(function(done) {
app.listen(port, done);
});
it('POST /users request should add a user to DB', function(done) {
chaiRequest()
.post('/users')
.send(user1)
.end(function(err, res) {
expect(res).to.have.status(200);
//expect(res.text).to.have.string('Welcome to');
expect(res.body).to.have.property('_id');
user1._id = res.body._id;
expect(res.body.username).to.equal(user1.username);
done();
});
});
it('GET /users/:id request for user1 ID should user1 from DB', function(done) {
chaiRequest()
.get('/users/' + user1._id)
.end(function(err, res) {
expect(res).to.have.status(200);
expect(res.body._id).to.equal(user1._id);
expect(res.body.username).to.equal(user1.username);
done();
});
});
it('GET /users request should return all users from DB', function(done) {
chaiRequest()
.get('/users')
.end(function(err, res) {
expect(res).to.have.status(200);
expect(res.body.length).to.be.above(0);
done();
});
});
it('GET /users/:id request for INVALID ID should return empty object', function(done) {
chaiRequest()
.get('/users/999999')
.end(function(err, res) {
expect(res.body).to.be.empty;
done();
});
});
// it('PUT /users/:id request for user1 ID should update user1 password in DB', function(done) {
// chaiRequest()
// .put('/users/' + user1._id)
// .send({password: 'NewPassword'})
// .end(function(err, res) {
// console.log(res.redirects);
// expect(res).to.have.status(200);
// expect(res.body._id).to.equal(user1._id);
// expect(res.body.password).to.equal('NewPassword');
// done();
// });
// });
it('DELETE /users/:id should delete user1 from DB', function(done) {
chaiRequest()
.del('/users/' + user1._id)
.end(function(err, res) {
expect(err).to.be.null;
expect(res).to.have.status(200);
expect(res).to.be.json;
expect(res.body.message).to.equal('ID: ' + user1._id + ' deleted from DB');
done();
});
});
});<|fim▁end|> | var chaiHttp = require('chai-http');
var expect = require('chai').expect;
var app = require('../app');
var port = 3001; |
<|file_name|>code_generator.py<|end_file_name|><|fim▁begin|>#
# Copyright 2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.<|fim▁hole|># the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
""" A code generator (needed by ModToolAdd) """
from templates import Templates
import Cheetah.Template
from util_functions import str_to_fancyc_comment
from util_functions import str_to_python_comment
from util_functions import strip_default_values
from util_functions import strip_arg_types
from util_functions import strip_arg_types_grc
class GRMTemplate(Cheetah.Template.Template):
""" An extended template class """
def __init__(self, src, searchList):
self.grtypelist = {
'sync': 'sync_block',
'sink': 'sync_block',
'source': 'sync_block',
'decimator': 'sync_decimator',
'interpolator': 'sync_interpolator',
'general': 'block',
'tagged_stream': 'tagged_stream_block',
'hier': 'hier_block2',
'noblock': ''}
searchList['str_to_fancyc_comment'] = str_to_fancyc_comment
searchList['str_to_python_comment'] = str_to_python_comment
searchList['strip_default_values'] = strip_default_values
searchList['strip_arg_types'] = strip_arg_types
searchList['strip_arg_types_grc'] = strip_arg_types_grc
Cheetah.Template.Template.__init__(self, src, searchList=searchList)
self.grblocktype = self.grtypelist[searchList['blocktype']]
if searchList['is_component']:
self.include_dir_prefix = "gnuradio/" + searchList['modname']
else:
self.include_dir_prefix = searchList['modname']
def get_template(tpl_id, **kwargs):
""" Return the template given by tpl_id, parsed through Cheetah """
return str(GRMTemplate(Templates[tpl_id], searchList=kwargs))<|fim▁end|> | #
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to |
<|file_name|>mod.rs<|end_file_name|><|fim▁begin|>pub mod mpsc;
pub mod notify;
pub mod broadcast;
mod mutex;
<|fim▁hole|>pub use self::mutex::Mutex;<|fim▁end|> |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.