prompt
large_stringlengths 70
991k
| completion
large_stringlengths 0
1.02k
|
---|---|
<|file_name|>nileServer.js<|end_file_name|><|fim▁begin|>const express = require('express');
const path = require('path');
const fs = require('fs');
const bodyParser = require('body-parser')
// const formidable = require('formidable');
// const createTorrent = require('create-torrent');
// const WebTorrent = require('webtorrent');
const socketController = require('./socketController');
// max # of sockets to keep open
const socketLimit = 1;
// takes in Node Server instance and returns Express Router<|fim▁hole|>module.exports = function nileServer(server) {
// Pass server instance to use socket controller
const socket = new socketController(server, socketLimit);
// create nile.js mini-app through express Router
const nileServer = express.Router();
// endpoint for receiving magnet URI from Broadcaster
nileServer.post('/magnet', (req, res, next) => {
socket.emitNewMagnet(req.body.magnetURI);
res.sendStatus(200);
});
return nileServer;
}<|fim▁end|>
| |
<|file_name|>_y.py<|end_file_name|><|fim▁begin|>import _plotly_utils.basevalidators
class YValidator(_plotly_utils.basevalidators.DataArrayValidator):
def __init__(self, plotly_name="y", parent_name="bar", **kwargs):
super(YValidator, self).__init__(<|fim▁hole|> parent_name=parent_name,
anim=kwargs.pop("anim", True),
edit_type=kwargs.pop("edit_type", "calc+clearAxisTypes"),
role=kwargs.pop("role", "data"),
**kwargs
)<|fim▁end|>
|
plotly_name=plotly_name,
|
<|file_name|>table_cache.py<|end_file_name|><|fim▁begin|>"""Cache util functions for ReSDKTables."""
import os
import pickle
import sys
from shutil import rmtree
from typing import Any
from resdk.__about__ import __version__
def _default_cache_dir() -> str:
"""Return default cache directory specific for the current OS.
Code originally from Orange3.misc.environ.
"""
if sys.platform == "darwin":
base = os.path.expanduser("~/Library/Caches")
elif sys.platform == "win32":
base = os.getenv("APPDATA", os.path.expanduser("~/AppData/Local"))
elif os.name == "posix":
base = os.getenv("XDG_CACHE_HOME", os.path.expanduser("~/.cache"))
else:
base = os.path.expanduser("~/.cache")
return base
def cache_dir_resdk_base() -> str:
"""Return base ReSDK cache directory."""
return os.path.join(_default_cache_dir(), "ReSDK")
def cache_dir_resdk() -> str:
"""Return ReSDK cache directory."""
v = __version__
if "dev" in v:
# remove git commit hash
v = v[: v.find("dev") + 3]
base = os.path.join(cache_dir_resdk_base(), v)
if sys.platform == "win32":
# On Windows cache and data dir are the same.
# Microsoft suggest using a Cache subdirectory
return os.path.join(base, "Cache")
else:
return base
def clear_cache_dir_resdk() -> None:
"""Delete all cache files from the default cache directory."""<|fim▁hole|>
def load_pickle(pickle_file: str) -> Any:
"""Load object from the pickle file.
:param pickle_file: file path
:return: un-pickled object
"""
if os.path.exists(pickle_file):
with open(pickle_file, "rb") as handle:
return pickle.load(handle)
def save_pickle(obj: Any, pickle_file: str, override=False) -> None:
"""Save given object into a pickle file.
:param obj: object to bi pickled
:param pickle_file: file path
:param override: if True than override existing file
:return:
"""
if not os.path.exists(pickle_file) or override:
with open(pickle_file, "wb") as handle:
pickle.dump(obj, handle, protocol=pickle.HIGHEST_PROTOCOL)<|fim▁end|>
|
cache_dir = cache_dir_resdk_base()
if os.path.exists(cache_dir):
rmtree(cache_dir)
|
<|file_name|>test_retention.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
import types
from datetime import datetime, timedelta
from django.utils.timezone import now as timezone_now
from zerver.lib.test_classes import ZulipTestCase
from zerver.lib.upload import create_attachment
from zerver.models import Message, Realm, Recipient, UserProfile, UserMessage, ArchivedUserMessage, \
ArchivedMessage, Attachment, ArchivedAttachment
from zerver.lib.retention import get_expired_messages, move_message_to_archive
from typing import Any, List
from six.moves import range
class TestRetentionLib(ZulipTestCase):
"""
Test receiving expired messages retention tool.
"""
def setUp(self):
# type: () -> None
super(TestRetentionLib, self).setUp()
self.zulip_realm = self._set_realm_message_retention_value('zulip', 30)
self.mit_realm = self._set_realm_message_retention_value('zephyr', 100)
@staticmethod
def _set_realm_message_retention_value(realm_str, retention_period):
# type: (str, int) -> Realm
realm = Realm.objects.get(string_id=realm_str)
realm.message_retention_days = retention_period
realm.save()
return realm
@staticmethod
def _change_messages_pub_date(msgs_ids, pub_date):
# type: (List[int], datetime) -> Any
messages = Message.objects.filter(id__in=msgs_ids).order_by('id')
messages.update(pub_date=pub_date)
return messages
def _make_mit_messages(self, message_quantity, pub_date):
# type: (int, datetime) -> Any
# send messages from mit.edu realm and change messages pub date
sender = self.mit_user('espuser')
recipient = self.mit_user('starnine')
msgs_ids = [self.send_message(sender.email, recipient.email, Recipient.PERSONAL) for i in
range(message_quantity)]
mit_messages = self._change_messages_pub_date(msgs_ids, pub_date)
return mit_messages
def test_expired_messages_result_type(self):
# type: () -> None
# Check return type of get_expired_message method.
result = get_expired_messages()
self.assertIsInstance(result, types.GeneratorType)
def test_no_expired_messages(self):
# type: () -> None
result = list(get_expired_messages())
self.assertFalse(result)
def test_expired_messages_in_each_realm(self):
# type: () -> None
# Check result realm messages order and result content
# when all realm has expired messages.
expired_mit_messages = self._make_mit_messages(3, timezone_now() - timedelta(days=101))
self._make_mit_messages(4, timezone_now() - timedelta(days=50))
zulip_messages_ids = Message.objects.order_by('id').filter(
sender__realm=self.zulip_realm).values_list('id', flat=True)[3:10]
expired_zulip_messages = self._change_messages_pub_date(zulip_messages_ids,
timezone_now() - timedelta(days=31))
# Iterate by result
expired_messages_result = [messages_list for messages_list in get_expired_messages()]
self.assertEqual(len(expired_messages_result), 2)
# Check mit.edu realm expired messages.
self.assertEqual(len(expired_messages_result[0]['expired_messages']), 3)
self.assertEqual(expired_messages_result[0]['realm_id'], self.mit_realm.id)
# Check zulip.com realm expired messages.
self.assertEqual(len(expired_messages_result[1]['expired_messages']), 7)
self.assertEqual(expired_messages_result[1]['realm_id'], self.zulip_realm.id)
# Compare expected messages ids with result messages ids.
self.assertEqual(
sorted([message.id for message in expired_mit_messages]),
[message.id for message in expired_messages_result[0]['expired_messages']]
)
self.assertEqual(
sorted([message.id for message in expired_zulip_messages]),
[message.id for message in expired_messages_result[1]['expired_messages']]
)
def test_expired_messages_in_one_realm(self):
# type: () -> None
# Check realm with expired messages and messages
# with one day to expiration data.
expired_mit_messages = self._make_mit_messages(5, timezone_now() - timedelta(days=101))
actual_mit_messages = self._make_mit_messages(3, timezone_now() - timedelta(days=99))
expired_messages_result = list(get_expired_messages())
expired_mit_messages_ids = [message.id for message in expired_mit_messages]
expired_mit_messages_result_ids = [message.id for message in
expired_messages_result[0]['expired_messages']]
actual_mit_messages_ids = [message.id for message in actual_mit_messages]
self.assertEqual(len(expired_messages_result), 1)
self.assertEqual(len(expired_messages_result[0]['expired_messages']), 5)
self.assertEqual(expired_messages_result[0]['realm_id'], self.mit_realm.id)
# Compare expected messages ids with result messages ids.
self.assertEqual(
sorted(expired_mit_messages_ids),
expired_mit_messages_result_ids
)
# Check actual mit.edu messages are not contained in expired messages list
self.assertEqual(
set(actual_mit_messages_ids) - set(expired_mit_messages_ids),
set(actual_mit_messages_ids)
)
class TestMoveMessageToArchive(ZulipTestCase):
def setUp(self):
# type: () -> None
super(TestMoveMessageToArchive, self).setUp()
self.sender = '[email protected]'
self.recipient = '[email protected]'
def _create_attachments(self):
# type: () -> None
sample_size = 10
dummy_files = [
('zulip.txt', '1/31/4CBjtTLYZhk66pZrF8hnYGwc/zulip.txt', sample_size),
('temp_file.py', '1/31/4CBjtTLYZhk66pZrF8hnYGwc/temp_file.py', sample_size),
('abc.py', '1/31/4CBjtTLYZhk66pZrF8hnYGwc/abc.py', sample_size)
]
user_profile = self.example_user('hamlet')
for file_name, path_id, size in dummy_files:
create_attachment(file_name, path_id, user_profile, size)
def _check_messages_before_archiving(self, msg_id):
# type: (int) -> List
user_messages_ids_before = list(UserMessage.objects.filter(
message_id=msg_id).order_by('id').values_list('id', flat=True))
self.assertEqual(ArchivedUserMessage.objects.count(), 0)
self.assertEqual(ArchivedMessage.objects.count(), 0)
return user_messages_ids_before
def _check_messages_after_archiving(self, msg_id, user_msgs_ids_before):
# type: (int, List[int]) -> None
self.assertEqual(ArchivedMessage.objects.filter(id=msg_id).count(), 1)
self.assertEqual(Message.objects.filter(id=msg_id).count(), 0)
self.assertEqual(UserMessage.objects.filter(message_id=msg_id).count(), 0)
arc_user_messages_ids_after = list(ArchivedUserMessage.objects.filter(
message_id=msg_id).order_by('id').values_list('id', flat=True))
self.assertEqual(arc_user_messages_ids_after, user_msgs_ids_before)
def test_personal_message_archiving(self):
# type: ()-> None
msg_id = self.send_message(self.sender, [self.recipient], Recipient.PERSONAL)
user_messages_ids_before = self._check_messages_before_archiving(msg_id)
move_message_to_archive(message_id=msg_id)
self._check_messages_after_archiving(msg_id, user_messages_ids_before)
def test_stream_message_archiving(self):
# type: ()-> None
msg_id = self.send_message(self.sender, "Verona", Recipient.STREAM)
user_messages_ids_before = self._check_messages_before_archiving(msg_id)
move_message_to_archive(message_id=msg_id)
self._check_messages_after_archiving(msg_id, user_messages_ids_before)
def test_archiving_message_second_time(self):<|fim▁hole|> msg_id = self.send_message(self.sender, "Verona", Recipient.STREAM)
user_messages_ids_before = self._check_messages_before_archiving(msg_id)
move_message_to_archive(message_id=msg_id)
self._check_messages_after_archiving(msg_id, user_messages_ids_before)
with self.assertRaises(Message.DoesNotExist):
move_message_to_archive(message_id=msg_id)
def test_archiving_message_with_attachment(self):
# type: () -> None
self._create_attachments()
body = """Some files here ...[zulip.txt](
http://localhost:9991/user_uploads/1/31/4CBjtTLYZhk66pZrF8hnYGwc/zulip.txt)
http://localhost:9991/user_uploads/1/31/4CBjtTLYZhk66pZrF8hnYGwc/temp_file.py ....
Some more.... http://localhost:9991/user_uploads/1/31/4CBjtTLYZhk66pZrF8hnYGwc/abc.py
"""
msg_id = self.send_message(self.sender, [self.recipient], Recipient.PERSONAL, body)
user_messages_ids_before = self._check_messages_before_archiving(msg_id)
attachments_ids_before = list(Attachment.objects.filter(
messages__id=msg_id).order_by("id").values_list("id", flat=True))
self.assertEqual(ArchivedAttachment.objects.count(), 0)
move_message_to_archive(message_id=msg_id)
self._check_messages_after_archiving(msg_id, user_messages_ids_before)
self.assertEqual(Attachment.objects.count(), 0)
arc_attachments_ids_after = list(ArchivedAttachment.objects.filter(
messages__id=msg_id).order_by("id").values_list("id", flat=True))
self.assertEqual(attachments_ids_before, arc_attachments_ids_after)
def test_archiving_message_with_shared_attachment(self):
# type: () -> None
# Check do not removing attachments which is used in other messages.
self._create_attachments()
body = """Some files here ...[zulip.txt](
http://localhost:9991/user_uploads/1/31/4CBjtTLYZhk66pZrF8hnYGwc/zulip.txt)
http://localhost:9991/user_uploads/1/31/4CBjtTLYZhk66pZrF8hnYGwc/temp_file.py ....
Some more.... http://localhost:9991/user_uploads/1/31/4CBjtTLYZhk66pZrF8hnYGwc/abc.py
"""
msg_id = self.send_message(self.sender, [self.recipient], Recipient.PERSONAL, body)
msg_id_shared_attachments = self.send_message(self.recipient, [self.sender],
Recipient.PERSONAL, body)
user_messages_ids_before = self._check_messages_before_archiving(msg_id)
attachments_ids_before = list(Attachment.objects.filter(
messages__id=msg_id).order_by("id").values_list("id", flat=True))
self.assertEqual(ArchivedAttachment.objects.count(), 0)
move_message_to_archive(message_id=msg_id)
self._check_messages_after_archiving(msg_id, user_messages_ids_before)
self.assertEqual(Attachment.objects.count(), 3)
arc_attachments_ids_after = list(ArchivedAttachment.objects.filter(
messages__id=msg_id).order_by("id").values_list("id", flat=True))
self.assertEqual(attachments_ids_before, arc_attachments_ids_after)
move_message_to_archive(message_id=msg_id_shared_attachments)
self.assertEqual(Attachment.objects.count(), 0)<|fim▁end|>
|
# type: ()-> None
|
<|file_name|>eq_string.rs<|end_file_name|><|fim▁begin|>extern crate env_logger;
extern crate must;
use must::*;
<|fim▁hole|> let _ = env_logger::init();
eq("", "").unwrap();
}
#[test]
#[should_panic(expected = "Diff:
foo
-bar
baz
+bar
quux")]
fn str_fail() {
let _ = env_logger::init();
env::set_var(COLOR_ENV, "none");
let err = eq("foo
bar
baz
quux",
"foo
baz
bar
quux")
.unwrap_err();
env::remove_var(COLOR_ENV);
panic!("{}", err);
}<|fim▁end|>
|
use std::env;
#[test]
fn ok() {
|
<|file_name|>mod.rs<|end_file_name|><|fim▁begin|>// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//<|fim▁hole|>// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![allow(missing_docs)]
#![allow(non_camel_case_types)]
#![allow(unused_imports)]
#![allow(dead_code)]
#![allow(unused_unsafe)]
#![allow(unused_mut)]
use prelude::v1::*;
use ffi::CStr;
use io::{self, ErrorKind};
use libc;
use num::{Int, SignedInt};
use num;
use old_io::{self, IoResult, IoError};
use str;
use sys_common::mkerr_libc;
macro_rules! helper_init { (static $name:ident: Helper<$m:ty>) => (
static $name: Helper<$m> = Helper {
lock: ::sync::MUTEX_INIT,
cond: ::sync::CONDVAR_INIT,
chan: ::cell::UnsafeCell { value: 0 as *mut Sender<$m> },
signal: ::cell::UnsafeCell { value: 0 },
initialized: ::cell::UnsafeCell { value: false },
shutdown: ::cell::UnsafeCell { value: false },
};
) }
pub mod backtrace;
pub mod c;
pub mod condvar;
pub mod ext;
pub mod fd;
pub mod fs; // support for std::old_io
pub mod fs2; // support for std::fs
pub mod helper_signal;
pub mod mutex;
pub mod net;
pub mod os;
pub mod os_str;
pub mod pipe;
pub mod pipe2;
pub mod process;
pub mod process2;
pub mod rwlock;
pub mod stack_overflow;
pub mod sync;
pub mod tcp;
pub mod thread;
pub mod thread_local;
pub mod time;
pub mod timer;
pub mod tty;
pub mod udp;
pub mod stdio;
pub mod addrinfo {
pub use sys_common::net::get_host_addresses;
pub use sys_common::net::get_address_name;
}
// FIXME: move these to c module
pub type sock_t = self::fs::fd_t;
pub type wrlen = libc::size_t;
pub type msglen_t = libc::size_t;
pub unsafe fn close_sock(sock: sock_t) { let _ = libc::close(sock); }
pub fn last_error() -> IoError {
decode_error_detailed(os::errno() as i32)
}
pub fn last_net_error() -> IoError {
last_error()
}
extern "system" {
fn gai_strerror(errcode: libc::c_int) -> *const libc::c_char;
}
pub fn last_gai_error(s: libc::c_int) -> IoError {
let mut err = decode_error(s);
err.detail = Some(unsafe {
let data = CStr::from_ptr(gai_strerror(s));
str::from_utf8(data.to_bytes()).unwrap().to_string()
});
err
}
/// Convert an `errno` value into a high-level error variant and description.
pub fn decode_error(errno: i32) -> IoError {
// FIXME: this should probably be a bit more descriptive...
let (kind, desc) = match errno {
libc::EOF => (old_io::EndOfFile, "end of file"),
libc::ECONNREFUSED => (old_io::ConnectionRefused, "connection refused"),
libc::ECONNRESET => (old_io::ConnectionReset, "connection reset"),
libc::EPERM | libc::EACCES =>
(old_io::PermissionDenied, "permission denied"),
libc::EPIPE => (old_io::BrokenPipe, "broken pipe"),
libc::ENOTCONN => (old_io::NotConnected, "not connected"),
libc::ECONNABORTED => (old_io::ConnectionAborted, "connection aborted"),
libc::EADDRNOTAVAIL => (old_io::ConnectionRefused, "address not available"),
libc::EADDRINUSE => (old_io::ConnectionRefused, "address in use"),
libc::ENOENT => (old_io::FileNotFound, "no such file or directory"),
libc::EISDIR => (old_io::InvalidInput, "illegal operation on a directory"),
libc::ENOSYS => (old_io::IoUnavailable, "function not implemented"),
libc::EINVAL => (old_io::InvalidInput, "invalid argument"),
libc::ENOTTY =>
(old_io::MismatchedFileTypeForOperation,
"file descriptor is not a TTY"),
libc::ETIMEDOUT => (old_io::TimedOut, "operation timed out"),
libc::ECANCELED => (old_io::TimedOut, "operation aborted"),
libc::consts::os::posix88::EEXIST =>
(old_io::PathAlreadyExists, "path already exists"),
// These two constants can have the same value on some systems,
// but different values on others, so we can't use a match
// clause
x if x == libc::EAGAIN || x == libc::EWOULDBLOCK =>
(old_io::ResourceUnavailable, "resource temporarily unavailable"),
_ => (old_io::OtherIoError, "unknown error")
};
IoError { kind: kind, desc: desc, detail: None }
}
pub fn decode_error_detailed(errno: i32) -> IoError {
let mut err = decode_error(errno);
err.detail = Some(os::error_string(errno));
err
}
pub fn decode_error_kind(errno: i32) -> ErrorKind {
match errno as libc::c_int {
libc::ECONNREFUSED => ErrorKind::ConnectionRefused,
libc::ECONNRESET => ErrorKind::ConnectionReset,
libc::EPERM | libc::EACCES => ErrorKind::PermissionDenied,
libc::EPIPE => ErrorKind::BrokenPipe,
libc::ENOTCONN => ErrorKind::NotConnected,
libc::ECONNABORTED => ErrorKind::ConnectionAborted,
libc::EADDRNOTAVAIL => ErrorKind::ConnectionRefused,
libc::EADDRINUSE => ErrorKind::ConnectionRefused,
libc::ENOENT => ErrorKind::FileNotFound,
libc::EISDIR => ErrorKind::InvalidInput,
libc::EINTR => ErrorKind::Interrupted,
libc::EINVAL => ErrorKind::InvalidInput,
libc::ENOTTY => ErrorKind::MismatchedFileTypeForOperation,
libc::ETIMEDOUT => ErrorKind::TimedOut,
libc::ECANCELED => ErrorKind::TimedOut,
libc::consts::os::posix88::EEXIST => ErrorKind::PathAlreadyExists,
// These two constants can have the same value on some systems,
// but different values on others, so we can't use a match
// clause
x if x == libc::EAGAIN || x == libc::EWOULDBLOCK =>
ErrorKind::ResourceUnavailable,
_ => ErrorKind::Other,
}
}
#[inline]
pub fn retry<T, F> (mut f: F) -> T where
T: SignedInt,
F: FnMut() -> T,
{
let one: T = Int::one();
loop {
let n = f();
if n == -one && os::errno() == libc::EINTR as i32 { }
else { return n }
}
}
pub fn cvt<T: SignedInt>(t: T) -> io::Result<T> {
let one: T = Int::one();
if t == -one {
Err(io::Error::last_os_error())
} else {
Ok(t)
}
}
pub fn cvt_r<T, F>(mut f: F) -> io::Result<T>
where T: SignedInt, F: FnMut() -> T
{
loop {
match cvt(f()) {
Err(ref e) if e.kind() == ErrorKind::Interrupted => {}
other => return other,
}
}
}
pub fn ms_to_timeval(ms: u64) -> libc::timeval {
libc::timeval {
tv_sec: (ms / 1000) as libc::time_t,
tv_usec: ((ms % 1000) * 1000) as libc::suseconds_t,
}
}
pub fn wouldblock() -> bool {
let err = os::errno();
err == libc::EWOULDBLOCK as i32 || err == libc::EAGAIN as i32
}
pub fn set_nonblocking(fd: sock_t, nb: bool) {
let set = nb as libc::c_int;
mkerr_libc(retry(|| unsafe { c::ioctl(fd, c::FIONBIO, &set) })).unwrap();
}
// nothing needed on unix platforms
pub fn init_net() {}<|fim▁end|>
| |
<|file_name|>EnemyFormation.ts<|end_file_name|><|fim▁begin|>import {scrheight, scrwidth} from "./canvas";
import {easeInOutQuad} from "./ease";
import Enemy, {IEnemyConfig} from "./Enemy";
import {addListener, emit, Events} from "./EventListener";
import {dt, player} from "./game";
import {images} from "./imageLoader";
import {PI2, randRange, SimpleHarmonicMotion as HarmonicMotion} from "./math";
export interface IFormationSubProcessor {
process(f: Formation);
}
export default class Formation {
[index: number]: (any?) => boolean | void;
public enemyList: Enemy[];
// initialy it will start at the left side of the screen with random y
public x: number = scrwidth + 500;
public y: number = scrheight * Math.random();
private numEnemy: number;
constructor(
enemyConfigList: IEnemyConfig[],
public selfPositionProcessor: IFormationSubProcessor,
public enemyPositionProcess: IFormationSubProcessor,
public cost = 100,
) {
this.enemyList = enemyConfigList.map((x) => new Enemy().init(x));
this.numEnemy = enemyConfigList.length;
addListener(this, [Events.process]);
}
public [Events.process]() {
for (let i = this.enemyList.length; i--; ) {
const u = this.enemyList[i];
if (!u) {
continue;
}
if (u.isdead()) {
--this.numEnemy;
this.enemyList[i] = null;
}
}
this.selfPositionProcessor.process(this);
this.enemyPositionProcess.process(this);
if (!this.numEnemy) {
emit(Events.enemyFormationDead, this);
}
return this.numEnemy === 0;
}
// return position corresponding to the formation position
// but will make an object with size width and height fit the screen
public getFitPossition(width: number, height: number) {
return [
(this.x / scrwidth) * (scrwidth - width) + width / 2,
(this.y / scrheight) * (scrheight - height) + height / 2,
];
}
public isDead() {
return this.numEnemy === 0;
}
public forceDead() {
for (const u of this.enemyList) {
if (u && u.x < -2 * images[u.config.imageId].width) {
u.live = 0;
}
}
}
}
// Every class name with postfix "SPP" is used as selfPositionProcessor in Formation.
// Every class name with postfix "EPP" is used as enemyPOsitionProcessor in Formation.
export class StraightForwardSPP implements IFormationSubProcessor {
constructor(
public speed: number = 400,
public angle: number = randRange([Math.PI / 8, Math.PI / 6]) + Math.PI / 2,
public bound: boolean = true,
) {}
public process(f: Formation) {
f.x += this.speed * Math.cos(this.angle) * dt;
f.y += this.speed * Math.sin(this.angle) * dt;
if (f.y > scrheight) {
if (this.bound) {
this.angle = PI2 - this.angle;
f.y = 2 * scrheight - f.y;
} else {
f.y -= scrheight;
}
}
if (f.y < 0) {
if (this.bound) {
this.angle = PI2 - this.angle;
f.y *= -1;
} else {
f.y += scrheight;
}
}
f.forceDead();
}
}
export class RandomPositionSPP implements IFormationSubProcessor {
public currentX: number;
public currentY: number;
public dtx: number;
public dty: number;
public currentTime: number;
constructor(
public moveTime = 1.5,
public tpp = .05, // toward player probability
) {
this.currentTime = 2 * this.moveTime;
}
public process(f: Formation) {
this.currentTime += dt;
if (this.currentTime >= this.moveTime) {
this.currentX = f.x;
this.currentY = f.y;
this.dty = scrheight * Math.random();
if (Math.random() <= this.tpp && this.currentTime < 2 * this.moveTime) {
this.dtx = player.x;
} else {
this.dtx = scrwidth * (Math.random() / 2 + .5);
}
this.dtx -= f.x;
this.dty -= f.y;
this.currentTime = 0;
}
f.x = easeInOutQuad(this.currentTime, this.currentX, this.dtx, this.moveTime);
f.y = easeInOutQuad(this.currentTime, this.currentY, this.dty, this.moveTime);
}
}
// export class TowardPlayerSPP implements IFormationSubProcessor {
// constructor(public px: number = 300, public pyRatio = 1.2) {}
// public process(f: Formation) {
// f.x -= this.px * dt;
// const d = player.y - f.y;
// const s = d < 0 ? -1 : +(d > 0);
// if (d * s > 200) {
// f.y += d * dt * this.pyRatio;
// } else if (d * s > 40) {
// f.y += 200 * s * dt * this.pyRatio;
// }
// f.forceDead();
// }
// }
export class PolygonEPP implements IFormationSubProcessor {
private hm: HarmonicMotion;
constructor(public radius = 100, public period = 3) {
this.hm = new HarmonicMotion(radius, period);
}
public process(f: Formation) {
this.hm.process(dt);
const [x, y] = f.getFitPossition(2 * this.radius, 2 * this.radius);
if (f.enemyList[0]) {
f.enemyList[0].x = x;
f.enemyList[0].y = y;
}
const timeoffset = this.hm.period / (f.enemyList.length - 1);
for (let i = 1, t = 0; i < f.enemyList.length; ++i, t += timeoffset) {
const u = f.enemyList[i];
if (!u) {
continue;
}
u.x = x + this.hm.getX(t);
u.y = y + this.hm.getY(t);
}
}
}
export class StraightLineEPP implements IFormationSubProcessor {
constructor(public offset = 100) {
}
public process(f: Formation) {
let angle = Math.PI;
let bound = true;
if (f.selfPositionProcessor instanceof StraightForwardSPP) {
angle = f.selfPositionProcessor.angle;
bound = f.selfPositionProcessor.bound;
}
const px = -this.offset * Math.cos(angle);
let py = -this.offset * Math.sin(angle);
let x = f.x;
let y = f.y;
for (const u of f.enemyList) {<|fim▁hole|> if (u) {
u.x = x;
u.y = y;
}
x += px;
y += py;
if (y > scrheight) {
if (bound) {
py = -py;
y = 2 * scrheight - y;
} else {
y -= scrheight;
}
}
if (y < 0) {
if (bound) {
py = -py;
y = -y;
} else {
y += scrheight;
}
}
}
}
}
export class WallEPP implements IFormationSubProcessor {
constructor(public enemyPerLine = 1, public offset = 100) {}
public process(f: Formation) {
const numberOfLine = Math.floor(f.enemyList.length / this.enemyPerLine);
const w = (numberOfLine - 1) * this.offset;
const h = (this.enemyPerLine - 1) * this.offset;
let [x, y] = f.getFitPossition(w, h);
x -= w / 2;
y -= h / 2;
for (let i = numberOfLine; i--; ) {
for (let j = this.enemyPerLine; j--; ) {
const u = f.enemyList[i * this.enemyPerLine + j];
if (u) {
u.x = x + i * this.offset;
u.y = y + j * this.offset;
}
}
}
}
}
export class PyramidEPP implements IFormationSubProcessor {
constructor(public offset = 100) {}
public process(f: Formation) {
let maxLine = 0;
while ((maxLine + 1) * maxLine / 2 < f.enemyList.length) {
++maxLine;
}
const s = (maxLine - 1) * this.offset;
// tslint:disable prefer-const
let [x, y] = f.getFitPossition(s, s);
// tslint:enable prefer-const
x -= s / 2;
for (let i = -1; ++i < maxLine; ) {
const rs = i * this.offset;
for (let j = -1; ++j <= i; ) {
const u = f.enemyList[i * (i + 1) / 2 + j];
if (u) {
u.x = x + rs;
u.y = y - rs / 2 + j * this.offset;
}
}
}
}
}<|fim▁end|>
| |
<|file_name|>Instance.hpp<|end_file_name|><|fim▁begin|>#ifndef INSTANCE_H
#define INSTANCE_H
#include "ECStd.hpp"
#include "Graphics.hpp"
#include "InputMan.hpp"
class Instance
{
bool _running;
public:
const uint WIDTH = 1024,
HEIGHT = 768;
SDL_Surface* surface;
InputMan* in;
Bitmap* screen;
Instance();
virtual ~Instance();
// Call this to stop the main loop
void stop(void);
// The main run loop - unmodifiable by inheritors
// Call to start
bool run(void);
/* Abstract Methods (Must be implemented) */
// Called when the main loop startd
virtual bool on_start(void) = 0;
// Called on stop
virtual void on_stop(void) = 0;<|fim▁hole|>
// Called by the main loop. Use to 'tick' components (eg entities, level)
virtual void tick(void) = 0;
};
#endif<|fim▁end|>
|
// Called by the main loop. ALL RENDERING SHOULD BE DONE HERE.
virtual void render(Graphics* g) = 0;
|
<|file_name|>util.py<|end_file_name|><|fim▁begin|><|fim▁hole|>from twiml_server import app
def make_json_app():
def make_json_error(ex):
response = jsonify(message=str(ex))
response.status_code = (ex.code
if isinstance(ex, HTTPException)
else 500)
return response
for code in default_exceptions.iterkeys():
app.error_handler_spec[None][code] = make_json_error<|fim▁end|>
|
import types
from flask import jsonify
from werkzeug.exceptions import default_exceptions
from werkzeug.exceptions import HTTPException
|
<|file_name|>tests.rs<|end_file_name|><|fim▁begin|>use {MessageBuilder, Priority};
use notification::NotificationBuilder;
use std::collections::HashMap;
#[test]
fn should_create_new_message() {
let msg = MessageBuilder::new("token").finalize();
assert_eq!(msg.to, "token");
}
#[test]
fn should_set_registration_ids() {
let msg = MessageBuilder::new("token").finalize();
assert_eq!(msg.registration_ids, None);
let msg = MessageBuilder::new("token")
.registration_ids(vec!["id1"]).finalize();
assert_eq!(msg.registration_ids, Some(vec!["id1".to_string()]));
}
#[test]
fn should_set_collapse_key() {
let msg = MessageBuilder::new("token").finalize();
assert_eq!(msg.collapse_key, None);
let msg = MessageBuilder::new("token")
.collapse_key("key").finalize();
assert_eq!(msg.collapse_key, Some("key"));
}
#[test]
fn should_set_priority() {
let msg = MessageBuilder::new("token").finalize();
assert_eq!(msg.priority, None);
let msg = MessageBuilder::new("token")
.priority(Priority::Normal).finalize();
<|fim▁hole|>
#[test]
fn should_set_content_available() {
let msg = MessageBuilder::new("token").finalize();
assert_eq!(msg.content_available, None);
let msg = MessageBuilder::new("token")
.content_available(true).finalize();
assert_eq!(msg.content_available, Some(true));
}
#[test]
fn should_set_delay_while_idle() {
let msg = MessageBuilder::new("token").finalize();
assert_eq!(msg.delay_while_idle, None);
let msg = MessageBuilder::new("token")
.delay_while_idle(true).finalize();
assert_eq!(msg.delay_while_idle, Some(true));
}
#[test]
fn should_set_time_to_live() {
let msg = MessageBuilder::new("token").finalize();
assert_eq!(msg.time_to_live, None);
let msg = MessageBuilder::new("token")
.time_to_live(10).finalize();
assert_eq!(msg.time_to_live, Some(10));
}
#[test]
fn should_set_restricted_package_name() {
let msg = MessageBuilder::new("token").finalize();
assert_eq!(msg.restricted_package_name, None);
let msg = MessageBuilder::new("token")
.restricted_package_name("name").finalize();
assert_eq!(msg.restricted_package_name, Some("name"));
}
#[test]
fn should_set_dry_run() {
let msg = MessageBuilder::new("token").finalize();
assert_eq!(msg.dry_run, None);
let msg = MessageBuilder::new("token")
.dry_run(true).finalize();
assert_eq!(msg.dry_run, Some(true));
}
#[test]
fn should_set_data() {
let msg = MessageBuilder::new("token").finalize();
assert_eq!(msg.data, None);
let mut data = HashMap::new();
data.insert("my", "data");
let msg = MessageBuilder::new("token")
.data(data).finalize();
assert_eq!(msg.data.unwrap().get("my"), Some(&"data".to_string()));
}
#[test]
fn should_set_notifications() {
let msg = MessageBuilder::new("token").finalize();
assert_eq!(msg.notification, None);
let nm = NotificationBuilder::new("title").finalize();
let msg = MessageBuilder::new("token")
.notification(nm).finalize();
assert!(msg.notification != None);
}<|fim▁end|>
|
assert_eq!(msg.priority, Some(Priority::Normal));
}
|
<|file_name|>subclass.py<|end_file_name|><|fim▁begin|># !/usr/bin/env python3
# _*_ coding:utf8 _*_
# Power by zuosc 2016-10-23
'subclass demo 继承和多态'<|fim▁hole|>
class Animal(object):
def run(self):
print('Animal is running......')
class Dog(Animal):
def run(self):
print('Dog is running.....')
def eat(self):
print('dog is eating......')
class Cat(Animal):
pass
dog = Dog()
dog.run()<|fim▁end|>
| |
<|file_name|>patchDiff.js<|end_file_name|><|fim▁begin|>import * as R from 'ramda';
import { getPatchPath } from 'xod-project';
import { isAmong } from 'xod-func-tools';
import { def } from './types';
import { CHANGE_TYPES } from './constants';
const isEqualPatchPaths = def(
'isEqualPatchPaths :: Patch -> Patch -> Boolean',
R.useWith(R.equals, [getPatchPath, getPatchPath])
);
const createPatchChange = def(
'createPatchChange :: AnyChangeType -> Patch -> AnyPatchChange',
(changeType, patch) =>
R.compose(
R.when(
() =>
changeType === CHANGE_TYPES.ADDED ||
changeType === CHANGE_TYPES.MODIFIED,
R.assoc('data', patch)
),
R.applySpec({
path: getPatchPath,
changeType: R.always(changeType),
})
)(patch)
);
export const calculateAdded = def(
'calculateAdded :: [Patch] -> [Patch] -> [AddedPatchChange]',
R.compose(
R.map(createPatchChange(CHANGE_TYPES.ADDED)),
R.flip(R.differenceWith(isEqualPatchPaths))
)
);
export const calculateModified = def(
'calculateModified :: [Patch] -> [Patch] -> [ModifiedPatchChange]',
(before, after) => {
const beforeIds = R.map(getPatchPath, before);
return R.compose(
R.map(createPatchChange(CHANGE_TYPES.MODIFIED)),
R.difference(R.__, before),
R.filter(R.compose(isAmong(beforeIds), getPatchPath))
)(after);
}
);
export const calculateDeleted = def(
'calculateDeleted :: [Patch] -> [Patch] -> [DeletedPatchChange]',
R.compose(
R.map(createPatchChange(CHANGE_TYPES.DELETED)),
R.differenceWith(isEqualPatchPaths)
)
);
export const calculateDiff = def(
'calculateDiff :: [Patch] -> [Patch] -> [AnyPatchChange]',<|fim▁hole|> calculateDeleted,
])
);<|fim▁end|>
|
R.converge(R.unapply(R.unnest), [
calculateAdded,
calculateModified,
|
<|file_name|>lib.js<|end_file_name|><|fim▁begin|>document.onmousemove = moveDefence;
var width = 1200;
var height = 600;
var ballPerSeconds = 1;
var canvas = document.getElementById('myCanvas');
var ctx = canvas.getContext('2d');
var allBalls = new Array();
var defence = {
start: 0,
end: (Math.PI) / 3,
jiao: 0
};
var HP = 100;
var draw = drawInGame;
var score = 0;
function moveDefence(evt) {
if (!evt) {
evt = window.event
}
var xx = evt.clientX - width * 0.5;
var yy = evt.clientY - height * 0.5;
if (yy >= 0 && xx >= 0) {
defence.jiao = Math.atan(yy / xx)
}
if (yy >= 0 && xx < 0) {
defence.jiao = Math.PI + Math.atan(yy / xx)
}
if (yy < 0 && xx >= 0) {
defence.jiao = Math.atan(yy / xx)
}
if (yy < 0 && xx < 0) {
defence.jiao = Math.atan(yy / xx) - Math.PI
}
defence.start = defence.jiao - (Math.PI / 3);
defence.end = defence.jiao + (Math.PI / 3)
}
function Ball() {
if (Math.random() <= 0.25) {
this.x = 2;
this.y = height * Math.random()
}
if ((Math.random() > 0.25) && (Math.random() <= 0.5)) {
this.x = 998;
this.y = height * Math.random()
}
if ((Math.random() < 0.75) && (Math.random() > 0.5)) {
this.y = 798;
this.x = width * Math.random()
}
if (Math.random() >= 0.75) {
this.y = 2;
this.x = width * Math.random()
}
this.act = function() {
this.x = this.x + 10;
this.y = this.y + 10
}
}
function create() {
var cre;
for (cre = 0; cre < ballPerSeconds; cre++) {
var ball = new Ball();
allBalls.push(ball)
}
}
function drawEnd() {
ctx.fillStyle = 'black';
ctx.fillRect(0, 0, width, height);
ctx.font = "Bold 60px Arial";
ctx.textAlign = "center";
ctx.fillStyle = "#FFFFFF";
ctx.fillText("游戏结束", width * 0.5, height * 0.5);
ctx.font = "Bold 40px Arial";
ctx.textAlign = "center";
ctx.fillStyle = "#FFFFFF";
ctx.fillText("得分:", width * 0.5, height * 0.5 + 60);
ctx.font = "Bold 40px Arial";
ctx.textAlign = "center";
ctx.fillStyle = "#FFFFFF";
ctx.fillText(score.toString(), width * 0.5, height * 0.5 + 100)
}
function drawInGame() {
ctx.fillStyle = 'black';
ctx.fillRect(0, 0, width, height);
var i;
ctx.beginPath();
ctx.arc(width * 0.5, height * 0.5, 60, defence.start, defence.end, false);
ctx.fillStyle = "#00A67C";
ctx.fill();
ctx.beginPath();
ctx.arc(width * 0.5, height * 0.5, 56, 0, Math.PI * 2, true);
ctx.fillStyle = "#000000";
ctx.fill();
ctx.beginPath();
ctx.arc(width * 0.5, height * 0.5, 5, 0, Math.PI * 2, true);
ctx.fillStyle = "#B7F200";
ctx.fill();
for (i = 0; i < allBalls.length; i++) {
ctx.beginPath();
ctx.arc(allBalls[i].x, allBalls[i].y, 5, 0, Math.PI * 2, true);
ctx.fillStyle = "#EF002A";<|fim▁hole|> }
ctx.fillStyle = "#DE0052";
ctx.fillRect(0, 0, HP * 3, 25);
ctx.font = "Bold 20px Arial";
ctx.textAlign = "left";
ctx.fillStyle = "#FFFFFF";
ctx.fillText(HP.toString(), 20, 20);
ctx.font = "Bold 20px Arial";
ctx.textAlign = "left";
ctx.fillStyle = "#EE6B9C";
scoretext = "得分:" + score.toString();
ctx.fillText(scoretext, 20, 50)
}
function act() {
for (var i = 0; i < allBalls.length; i++) {
var ax = width * 0.5 - allBalls[i].x;
var by = height * 0.5 - allBalls[i].y;
var movex = 1.5 * ax * (1.5 / Math.sqrt(ax * ax + by * by));
var movey = 1.5 * by * (1.5 / Math.sqrt(ax * ax + by * by));
allBalls[i].x = allBalls[i].x + movex;
allBalls[i].y = allBalls[i].y + movey
}
}
function check() {
for (var i = 0; i < allBalls.length; i++) {
var ax = allBalls[i].x - width * 0.5;
var by = allBalls[i].y - height * 0.5;
var distance = Math.sqrt(ax * ax + by * by);
var angel;
if (by >= 0 && ax >= 0) {
angel = Math.atan(by / ax)
}
if (by >= 0 && ax < 0) {
angel = Math.PI + Math.atan(by / ax)
}
if (by < 0 && ax >= 0) {
angel = Math.atan(by / ax)
}
if (by < 0 && ax < 0) {
angel = Math.atan(by / ax) - Math.PI
}
if (distance <= 63 && distance >= 57 && ((angel > defence.start && angel < defence.end) || (angel + 2 * Math.PI > defence.start && angel + 2 * Math.PI < defence.end) || (angel - 2 * Math.PI > defence.start && angel - 2 * Math.PI < defence.end))) {
allBalls.splice(i, 1);
if (HP < 100)
HP = HP + 2;
score = score + Math.floor(1000 / HP)
}
if (distance <= 5) {
allBalls.splice(i, 1);
HP = HP - 10;
if (HP < 0) {
draw = drawEnd;
window.clearInterval(int)
}
}
}
}
function start() {
act();
check();
draw()
}
var int = setInterval("start()", 30);
setInterval("create()", 500);<|fim▁end|>
|
ctx.fill()
|
<|file_name|>JPC.py<|end_file_name|><|fim▁begin|># coding: utf-8
from geventwebsocket.handler import WebSocketHandler
from gevent import pywsgi, sleep
import json
import MySQLdb
class JPC:
#
# 初期化
#
def __init__(self, filepath_config):
import hashlib
# 設定ファイルをロード
fp = open(filepath_config, 'r')
config = json.load(fp)
fp.close()
# 設定をクラス変数に格納
self.host = config['host']
self.port = config['port']
self.langlist = json.load(open(config['langfile'], 'r'))
self.enckey = hashlib.md5(config['key']).digest()
self.db_host = config['db_host']
self.db_name = config['db_name']
self.db_username = config['db_username']
self.db_password = config['db_password']
return
#
# チェック
#
def execute(self):
import codecs
import commands
import os
import pwd
# 情報を取得
code = self.packet['code']
lang = self.packet['lang']
script = self.langlist['compile'][lang]
extension = self.langlist['extension'][lang]
# 必要なデータを生成
filepath_in = self.randstr(8) + extension
filepath_out = self.randstr(8)
username = self.randstr(16)
# /tmpに移動
os.chdir('/tmp/')
# ユーザーを作成する
try:
os.system("useradd -M {0}".format(username))
pwnam = pwd.getpwnam(username)
except Exception:
return
# コードを生成
fp = codecs.open(filepath_in, 'w', 'utf-8')
fp.write(code)
fp.close()
# コンパイル
compile_result = commands.getoutput(
script.format(input=filepath_in, output=filepath_out)
)
# コードを削除
try:
os.remove(filepath_in)
except Exception:
pass
# コンパイル結果を送信
try:
self.ws.send(json.dumps({'compile': compile_result}))
except Exception:
pass
# コンパイルできているか
if not os.path.exists(filepath_out):
print("[INFO] コンパイルに失敗しました。")
return
# 実行ファイルの権限を変更
try:
os.chmod(filepath_out, 0500)
os.chown(filepath_out, pwnam.pw_uid, pwnam.pw_gid)
# 出力例も一応
os.chown(self.record['output_code'], pwnam.pw_uid, pwnam.pw_gid)
except Exception:
try:
os.remove(filepath_out)
os.system("userdel -r {0}".format(username))
except Exception:
print("[ERROR] /tmp/{0}の削除に失敗しました。".format(filepath_out))
print("[ERROR] ユーザー{0}の削除に失敗しました。".format(username))
return
# チェックする
clear = True
for n in range(int(self.record['exec_time'])):
print("[INFO] {0}回目の試行が開始されました。".format(n + 1))
# 実行開始を宣言
try:
self.ws.send(json.dumps({'attempt': n + 1}))
except Exception:
pass
# 入力を生成
self.input_data = commands.getoutput(
self.record['input_code'] + " " + str(n)
)
# 出力を生成
self.output_data = self.run_command(username, self.record['output_code'])
# 実行結果を取得
result = self.run_command(username, './'+filepath_out)
#print "Input : ", self.input_data
#print "Answer : ", self.output_data
#print "Result : ", result
# タイムアウト
if result == False:
self.ws.send(json.dumps({'failure': n + 1}))
clear = False
print("[INFO] タイムアウトしました。")
continue
# 結果が違う
if self.output_data.rstrip('\n') != result.rstrip('\n'):
self.ws.send(json.dumps({'failure': n + 1}))
clear = False
print("[INFO] 結果に誤りがあります。")
continue
# 実行結果を宣言
try:
self.ws.send(json.dumps({'success': n + 1}))
print("[INFO] チェックが成功しました。")
except Exception:
pass
# 成功通知
if clear:
self.ws.send('{"complete":"success"}')
self.update_db()
else:
self.ws.send('{"complete":"failure"}')
# 実行ファイルを削除
try:
os.remove(filepath_out)
os.system("userdel -r {0}".format(username))
except Exception:
print("[ERROR] /tmp/{0}の削除に失敗しました。".format(filepath_out))
print("[ERROR] ユーザー{0}の削除に失敗しました。".format(username))
return
#
# コマンドを制限付きで実行
#
def run_command(self, username, filepath):
import subprocess
import time
import sys
# プロセスを生成
proc = subprocess.Popen(
[
'su',
username,
'-c',
'ulimit -v {0}; {1}'.format(
str(self.record['limit_memory']),
filepath
)<|fim▁hole|> stdout = subprocess.PIPE,
stderr = subprocess.PIPE,
stdin = subprocess.PIPE,
)
# 入力を送る
proc.stdin.write(self.input_data.rstrip('\n') + '\n')
proc.stdin.close()
# 時間制限を設定
deadline = time.time() + float(self.record['limit_time']) / 1000.0
while time.time() < deadline and proc.poll() == None:
time.sleep(0.20)
# タイムアウト
if proc.poll() == None:
if float(sys.version[:3]) >= 2.6:
proc.terminate()
return False
# 正常終了
stdout = proc.stdout.read()
return stdout
#
# 点数を追加
#
def update_db(self):
import time
cursor = self.db.cursor(MySQLdb.cursors.DictCursor)
# スコアを追加
cursor.execute("UPDATE account SET score=score+{score} WHERE user='{user}';".format(score=int(self.record['score']), user=self.user))
# 解答済み問題を追加
cursor.execute("UPDATE account SET solved=concat('{id},', solved) WHERE user='{user}';".format(id=self.record['id'], user=self.user))
# 解答数をインクリメント
cursor.execute("UPDATE problem SET solved=solved+1 WHERE id={id};".format(id=self.record['id']))
# 解答ユーザーを更新
cursor.execute("UPDATE problem SET solved_user='{user}' WHERE id={id};".format(user=self.user, id=self.record['id']))
# 解答時間を更新
cursor.execute("UPDATE problem SET last_date='{date}' WHERE id={id};".format(date=time.strftime('%Y-%m-%d %H:%M:%S'), id=self.record['id']))
cursor.close()
self.db.commit()
return
#
# 新規要求を処理
#
def handle(self, env, response):
self.ws = env['wsgi.websocket']
print("[INFO] 新しい要求を受信しました。")
# 要求を取得
self.packet = self.ws.receive()
if not self.analyse_packet(): return
# 問題を取得
self.get_problem()
# 実行
self.execute()
return
#
# 問題の詳細を取得
#
def get_problem(self):
cursor = self.db.cursor(MySQLdb.cursors.DictCursor)
cursor.execute("SELECT * FROM problem WHERE id={id};".format(id=self.packet['id']))
self.record = cursor.fetchall()[0]
cursor.close()
return
#
# データを解析
#
def analyse_packet(self):
from Crypto.Cipher import AES
# パケットをJSONとして展開
try:
self.packet = json.loads(self.packet)
except Exception:
print("[ERROR] JSONの展開に失敗しました。")
return False
# データの整合性を確認
if not self.check_payload():
print("[ERROR] 不正なデータであると判別されました。")
self.ws.send('{"error":"無効なデータが送信されました。"}')
return False
# ユーザー名を復号化
iv = self.packet['iv'].decode('base64')
enc_user = self.packet['user'].decode('base64')
aes = AES.new(self.enckey, AES.MODE_CBC, iv)
self.user = aes.decrypt(enc_user).replace('\x00', '')
print("[INFO] この試行のユーザーは{0}です。".format(self.user))
# エスケープ
self.user = MySQLdb.escape_string(self.user)
self.packet['id'] = int(self.packet['id'])
return True
#
# payloadが有効かを調べる
#
def check_payload(self):
# 最低限の情報が記載されているか
if 'lang' not in self.packet : return False
if 'code' not in self.packet : return False
if 'id' not in self.packet : return False
if 'iv' not in self.packet : return False
if 'user' not in self.packet : return False
# 言語が使用可能か
if 'compile' not in self.langlist : return False
if 'extension' not in self.langlist : return False
if self.packet['lang'] not in self.langlist['compile'] : return False
if self.packet['lang'] not in self.langlist['extension'] : return False
# データが正しい
return True
#
# ランダムな文字列を生成
#
def randstr(self, length):
import random
import string
return ''.join([
random.choice(string.ascii_letters + string.digits)
for i in range(length)
])
#
# リクエストを受ける
#
def procon(self, env, response):
path = env['PATH_INFO']
if path == "/":
return self.handle(env, response)
return
#
# サーバーを稼働させる
#
def run(self):
# サーバー初期化
server = pywsgi.WSGIServer(
(self.host, self.port),
self.procon,
handler_class = WebSocketHandler
)
# SQLへの接続
self.db = MySQLdb.connect(host = self.db_host,
db = self.db_name,
user = self.db_username,
passwd = self.db_password,
charset = 'utf8',
)
# サーバー稼働
server.serve_forever()
return<|fim▁end|>
|
],
|
<|file_name|>filters.py<|end_file_name|><|fim▁begin|># encoding: utf-8
'''Template filters
'''
def j(s):
"""Escape for JavaScript or encode as JSON"""
pass
<|fim▁hole|> from cjson import encode as _json
except ImportError:
try:
from minjson import write as _json
except ImportError:
import re
_RE = re.compile(r'(["\'\\])')
def _json(s):
return repr(_RE.sub(r'\\\1', s)).replace('\\\\','\\')
j = _json<|fim▁end|>
|
try:
|
<|file_name|>FrameworkObject.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
'''
Copyright (c) 2013, LastSeal S.A.
Copyright (c) 2011-2012, Joaquin G. Duo
All rights reserved.
This code is distributed under BSD 3-clause License.
For details check the LICENSE file in the root of the project.
'''
class FrameworkObject(object):
pass<|fim▁hole|>
def smokeTestModule():
FrameworkObject()
if __name__ == '__main__':
smokeTestModule()<|fim▁end|>
| |
<|file_name|>0001_initial.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
('contenttypes', '0002_remove_content_type_name'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('sites', '0001_initial'),
]
<|fim▁hole|> fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('object_pk', models.TextField(verbose_name='object ID')),
('user_name', models.CharField(max_length=50, verbose_name="user's name", blank=True)),
('user_email', models.EmailField(max_length=254, verbose_name="user's email address", blank=True)),
('user_url', models.URLField(verbose_name="user's URL", blank=True)),
('comment', models.TextField(max_length=3000, verbose_name='comment')),
('submit_date', models.DateTimeField(default=None, verbose_name='date/time submitted', db_index=True)),
('ip_address', models.GenericIPAddressField(unpack_ipv4=True, null=True, verbose_name='IP address', blank=True)),
('is_public', models.BooleanField(default=True, help_text='Uncheck this box to make the comment effectively disappear from the site.', verbose_name='is public')),
('is_removed', models.BooleanField(default=False, help_text='Check this box if the comment is inappropriate. A "This comment has been removed" message will be displayed instead.', verbose_name='is removed')),
('content_type', models.ForeignKey(related_name='content_type_set_for_comment', verbose_name='content type', to='contenttypes.ContentType', on_delete=models.CASCADE)),
('site', models.ForeignKey(to='sites.Site', on_delete=models.CASCADE)),
('user', models.ForeignKey(related_name='comment_comments', verbose_name='user', blank=True, to=settings.AUTH_USER_MODEL, null=True, on_delete=models.CASCADE)),
],
options={
'ordering': ('submit_date',),
'abstract': False,
'verbose_name': 'comment',
'verbose_name_plural': 'comments',
'permissions': [('can_moderate', 'Can moderate comments')],
},
),
]<|fim▁end|>
|
operations = [
migrations.CreateModel(
name='Comment',
|
<|file_name|>calibration.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 15 14:09:59 2017
@author: SaintlyVi
"""
import pandas as pd
import numpy as np
from support import writeLog
def uncertaintyStats(submodel):
"""
Creates a dict with statistics for observed hourly profiles for a given year.
Use evaluation.evalhelpers.observedHourlyProfiles() to generate the input dataframe.
"""
allstats = list()
for c in submodel['class'].unique():
stats = submodel[submodel['class']==c].describe()
stats['customer_class'] = c
stats.reset_index(inplace=True)
stats.set_index(['customer_class','index'], inplace=True)
allstats.append(stats)
df = pd.concat(allstats)
return df[['AnswerID_count','valid_obs_ratio']]
def dataIntegrity(submodels, min_answerid, min_obsratio):
"""
This function returns the slice of submodels that meet the specified minimum uncertainty requirements. Submodels must form part of the same experiment (eg demand summary and hourly profiles).
"""
if isinstance(submodels, list):
models = submodels
else:
models = [submodels]
validmodels = pd.DataFrame(columns = ['submodel_name','valid_data','uncertainty_index',
'valid_unit_count', 'unit'])
for m in models:
name = m.name
valid_data = m[(m.AnswerID_count>=min_answerid) & (m.valid_obs_ratio>=min_obsratio)]
uix = len(valid_data) / len(m)
try:
valid_unit_count = valid_data['valid_hours'].sum()
unit = 'total_valid_hours'
except:
valid_unit_count = valid_data['AnswerID_count'].sum()
unit = 'valid_AnswerID_count'
validmodels = validmodels.append({'submodel_name':name,
'valid_data':valid_data,
'uncertainty_index':uix,
'valid_unit_count':valid_unit_count,
'unit':unit}, ignore_index=True)
validmodels.set_index('submodel_name', drop=True, inplace=True)
return validmodels
def modelSimilarity(ex_submodel, ex_ts, valid_new_submodel, new_ts, submod_type):
"""
This function calcualtes the evaluation measure for the run.
ex_submodel = (DataFrame) either existing/expert demand_summary or hourly_profiles submodel
valid_new_submodel = (DataFrame) output from dataIntegrity function
-> only want to compare valid data
submod_type = (str) one of [ds, hp]
-> ds=demand_summary, hp=hourly_profiles
"""
if submod_type == 'ds':
index_cols = ['class','YearsElectrified']
elif submod_type == 'hp':
index_cols = ['class','YearsElectrified','month','daytype','hour']
else:
return(print('Valid submod_type is one of [ds, hp] -> ds=demand_summary, hp=hourly_profiles.'))
merged_sub = ex_submodel.merge(valid_new_submodel, how='left', on=index_cols)
simvec = merged_sub[new_ts] - merged_sub[ex_ts]
simvec.dropna(inplace=True)
simveccount = len(simvec)
eucliddist = np.sqrt(sum(simvec**2))
return eucliddist, simveccount, merged_sub
def logCalibration(bm_model, year, exp_model, min_answerid = 2, min_obsratio = 0.85):
"""
This function logs the evaluation results of the run.
ex_model = [demand_summary, hourly_profiles, ds_val_col_name, hp_val_col_name]
"""
#Generate data model
ods = pd.read_csv('data/experimental_model/'+exp_model+'/demand_summary_'+year+'.csv')
ohp = pd.read_csv('data/experimental_model/'+exp_model+'/hourly_profiles_'+year+'.csv')
#Check data integrity
ods.name = 'demand_summary'
ohp.name = 'hourly_profiles'
validmodels = dataIntegrity([ods, ohp], min_answerid, min_obsratio)
valid_new_ds = validmodels.at['demand_summary','valid_data']
valid_new_hp = validmodels.at['hourly_profiles','valid_data']
new_dsts = 'M_kw_mean'
new_hpts = 'kva_mean'
#Fetch benchmark model
bm_ds = bm_model[0]
bm_hp = bm_model[1]
bm_dsts = bm_model[2]
bm_hpts = bm_model[3]
#Calculate model similarity
euclid_ds, count_ds, slice_ex_ds = modelSimilarity(bm_ds, bm_dsts, valid_new_ds, new_dsts, 'ds')
euclid_hp, count_hp, sliced_ex_hp = modelSimilarity(bm_hp, bm_hpts, valid_new_hp, new_hpts, 'hp')
#Prepare and write logs
ds_uix = validmodels.at['demand_summary','uncertainty_index']
ds_vuc = validmodels.at['demand_summary','valid_unit_count']
ds_unit = validmodels.at['demand_summary','unit']
hp_uix = validmodels.at['hourly_profiles','uncertainty_index']
hp_vuc = validmodels.at['hourly_profiles','valid_unit_count']
hp_unit = validmodels.at['hourly_profiles','unit']
loglineds = [year, exp_model, ods.name, min_answerid, min_obsratio, ds_uix, ds_vuc,
ds_unit, euclid_ds, count_ds]
loglinehp = [year, exp_model, ohp.name, min_answerid, min_obsratio, hp_uix, hp_vuc, <|fim▁hole|> 'uncertainty_ix','valid_unit_count','unit','sim_eucliddist','sim_count'])
writeLog(log_lines,'log_calibration')<|fim▁end|>
|
hp_unit, euclid_hp, count_hp]
log_lines = pd.DataFrame([loglineds, loglinehp], columns = ['year','experiment',
'submodel','min_answerid_count','min_valid_obsratio',
|
<|file_name|>word2vec_basic.py<|end_file_name|><|fim▁begin|># Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import print_function
import collections
import math
import os
import random
import zipfile
import numpy as np
from six.moves import urllib
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
# Step 1: Download the data.
url = 'http://mattmahoney.net/dc/'
def maybe_download(filename, expected_bytes):
"""Download a file if not present, and make sure it's the right size."""
if not os.path.exists(filename):
filename, _ = urllib.request.urlretrieve(url + filename, filename)
statinfo = os.stat(filename)
if statinfo.st_size == expected_bytes:
print('Found and verified', filename)
else:
print(statinfo.st_size)
raise Exception(
'Failed to verify ' + filename + '. Can you get to it with a browser?')
return filename
filename = maybe_download('text8.zip', 31344016)
# Read the data into a string.
def read_data(filename):
f = zipfile.ZipFile(filename)
for name in f.namelist():
return f.read(name).split()
f.close()
words = read_data(filename)
print('Data size', len(words))
# Step 2: Build the dictionary and replace rare words with UNK token.
vocabulary_size = 50000
def build_dataset(words):
count = [['UNK', -1]]
count.extend(collections.Counter(words).most_common(vocabulary_size - 1))
dictionary = dict()
for word, _ in count:
dictionary[word] = len(dictionary)
data = list()
unk_count = 0
for word in words:
if word in dictionary:
index = dictionary[word]
else:
index = 0 # dictionary['UNK']
unk_count += 1
data.append(index)
count[0][1] = unk_count
reverse_dictionary = dict(zip(dictionary.values(), dictionary.keys()))
return data, count, dictionary, reverse_dictionary
data, count, dictionary, reverse_dictionary = build_dataset(words)
del words # Hint to reduce memory.
print('Most common words (+UNK)', count[:5])
print('Sample data', data[:10])
data_index = 0
# Step 3: Function to generate a training batch for the skip-gram model.
def generate_batch(batch_size, num_skips, skip_window):
global data_index
assert batch_size % num_skips == 0
assert num_skips <= 2 * skip_window
batch = np.ndarray(shape=(batch_size), dtype=np.int32)
labels = np.ndarray(shape=(batch_size, 1), dtype=np.int32)
span = 2 * skip_window + 1 # [ skip_window target skip_window ]
buffer = collections.deque(maxlen=span)
for _ in range(span):
buffer.append(data[data_index])
data_index = (data_index + 1) % len(data)
for i in range(batch_size // num_skips):
target = skip_window # target label at the center of the buffer
targets_to_avoid = [ skip_window ]
for j in range(num_skips):
while target in targets_to_avoid:
target = random.randint(0, span - 1)
targets_to_avoid.append(target)
batch[i * num_skips + j] = buffer[skip_window]
labels[i * num_skips + j, 0] = buffer[target]
buffer.append(data[data_index])
data_index = (data_index + 1) % len(data)
return batch, labels
batch, labels = generate_batch(batch_size=8, num_skips=2, skip_window=1)
for i in range(8):
print(batch[i], '->', labels[i, 0])
print(reverse_dictionary[batch[i]], '->', reverse_dictionary[labels[i, 0]])
# Step 4: Build and train a skip-gram model.
batch_size = 128
embedding_size = 128 # Dimension of the embedding vector.
skip_window = 1 # How many words to consider left and right.
num_skips = 2 # How many times to reuse an input to generate a label.
# We pick a random validation set to sample nearest neighbors. Here we limit the
# validation samples to the words that have a low numeric ID, which by<|fim▁hole|>valid_size = 16 # Random set of words to evaluate similarity on.
valid_window = 100 # Only pick dev samples in the head of the distribution.
valid_examples = np.random.choice(valid_window, valid_size, replace=False)
num_sampled = 64 # Number of negative examples to sample.
graph = tf.Graph()
with graph.as_default():
# Input data.
train_inputs = tf.placeholder(tf.int32, shape=[batch_size])
train_labels = tf.placeholder(tf.int32, shape=[batch_size, 1])
valid_dataset = tf.constant(valid_examples, dtype=tf.int32)
# Ops and variables pinned to the CPU because of missing GPU implementation
with tf.device('/cpu:0'):
# Look up embeddings for inputs.
embeddings = tf.Variable(
tf.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0))
embed = tf.nn.embedding_lookup(embeddings, train_inputs)
# Construct the variables for the NCE loss
nce_weights = tf.Variable(
tf.truncated_normal([vocabulary_size, embedding_size],
stddev=1.0 / math.sqrt(embedding_size)))
nce_biases = tf.Variable(tf.zeros([vocabulary_size]))
# Compute the average NCE loss for the batch.
# tf.nce_loss automatically draws a new sample of the negative labels each
# time we evaluate the loss.
loss = tf.reduce_mean(
tf.nn.nce_loss(nce_weights, nce_biases, embed, train_labels,
num_sampled, vocabulary_size))
# Construct the SGD optimizer using a learning rate of 1.0.
optimizer = tf.train.GradientDescentOptimizer(1.0).minimize(loss)
# Compute the cosine similarity between minibatch examples and all embeddings.
norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keep_dims=True))
normalized_embeddings = embeddings / norm
valid_embeddings = tf.nn.embedding_lookup(
normalized_embeddings, valid_dataset)
similarity = tf.matmul(
valid_embeddings, normalized_embeddings, transpose_b=True)
# Step 5: Begin training.
num_steps = 100001
with tf.Session(graph=graph) as session:
# We must initialize all variables before we use them.
tf.initialize_all_variables().run()
print("Initialized")
average_loss = 0
for step in xrange(num_steps):
batch_inputs, batch_labels = generate_batch(
batch_size, num_skips, skip_window)
feed_dict = {train_inputs : batch_inputs, train_labels : batch_labels}
# We perform one update step by evaluating the optimizer op (including it
# in the list of returned values for session.run()
_, loss_val = session.run([optimizer, loss], feed_dict=feed_dict)
average_loss += loss_val
if step % 2000 == 0:
if step > 0:
average_loss /= 2000
# The average loss is an estimate of the loss over the last 2000 batches.
print("Average loss at step ", step, ": ", average_loss)
average_loss = 0
# Note that this is expensive (~20% slowdown if computed every 500 steps)
if step % 10000 == 0:
sim = similarity.eval()
for i in xrange(valid_size):
valid_word = reverse_dictionary[valid_examples[i]]
top_k = 8 # number of nearest neighbors
nearest = (-sim[i, :]).argsort()[1:top_k+1]
log_str = "Nearest to %s:" % valid_word
for k in xrange(top_k):
close_word = reverse_dictionary[nearest[k]]
log_str = "%s %s," % (log_str, close_word)
print(log_str)
final_embeddings = normalized_embeddings.eval()
# Step 6: Visualize the embeddings.
def plot_with_labels(low_dim_embs, labels, filename='tsne.png'):
assert low_dim_embs.shape[0] >= len(labels), "More labels than embeddings"
plt.figure(figsize=(18, 18)) #in inches
for i, label in enumerate(labels):
x, y = low_dim_embs[i,:]
plt.scatter(x, y)
plt.annotate(label,
xy=(x, y),
xytext=(5, 2),
textcoords='offset points',
ha='right',
va='bottom')
plt.savefig(filename)
try:
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
tsne = TSNE(perplexity=30, n_components=2, init='pca', n_iter=5000)
plot_only = 500
low_dim_embs = tsne.fit_transform(final_embeddings[:plot_only,:])
labels = [reverse_dictionary[i] for i in xrange(plot_only)]
plot_with_labels(low_dim_embs, labels)
except ImportError:
print("Please install sklearn and matplotlib to visualize embeddings.")<|fim▁end|>
|
# construction are also the most frequent.
|
<|file_name|>bitcoin_th_TH.ts<|end_file_name|><|fim▁begin|><?xml version="1.0" ?><!DOCTYPE TS><TS language="th_TH" version="2.0">
<defaultcodec>UTF-8</defaultcodec>
<context>
<name>AboutDialog</name>
<message>
<location filename="../forms/aboutdialog.ui" line="+14"/>
<source>About Linkcoin</source>
<translation>เกี่ยวกับ บิตคอย์น</translation>
</message>
<message>
<location line="+39"/>
<source><b>Linkcoin</b> version</source>
<translation><b>บิตคอย์น<b>รุ่น</translation>
</message>
<message>
<location line="+57"/>
<source>
This is experimental software.
Distributed under the MIT/X11 software license, see the accompanying file COPYING or http://www.opensource.org/licenses/mit-license.php.
This product includes software developed by the OpenSSL Project for use in the OpenSSL Toolkit (http://www.openssl.org/) and cryptographic software written by Eric Young ([email protected]) and UPnP software written by Thomas Bernard.</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../aboutdialog.cpp" line="+14"/>
<source>Copyright</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<source>The Linkcoin developers</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>AddressBookPage</name>
<message>
<location filename="../forms/addressbookpage.ui" line="+14"/>
<source>Address Book</source>
<translation>สมุดรายชื่อ</translation>
</message>
<message>
<location line="+19"/>
<source>Double-click to edit address or label</source>
<translation>ดับเบิลคลิก เพื่อแก้ไขที่อยู่ หรือชื่อ</translation>
</message>
<message>
<location line="+27"/>
<source>Create a new address</source>
<translation>สร้างที่อยู่ใหม่</translation>
</message>
<message>
<location line="+14"/>
<source>Copy the currently selected address to the system clipboard</source>
<translation>คัดลอกที่อยู่ที่ถูกเลือกไปยัง คลิปบอร์ดของระบบ</translation>
</message>
<message>
<location line="-11"/>
<source>&New Address</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../addressbookpage.cpp" line="+63"/>
<source>These are your Linkcoin addresses for receiving payments. You may want to give a different one to each sender so you can keep track of who is paying you.</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../forms/addressbookpage.ui" line="+14"/>
<source>&Copy Address</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+11"/>
<source>Show &QR Code</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+11"/>
<source>Sign a message to prove you own a Linkcoin address</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Sign &Message</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+25"/>
<source>Delete the currently selected address from the list</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+27"/>
<source>Export the data in the current tab to a file</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>&Export</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-44"/>
<source>Verify a message to ensure it was signed with a specified Linkcoin address</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>&Verify Message</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+14"/>
<source>&Delete</source>
<translation>ลบ</translation>
</message>
<message>
<location filename="../addressbookpage.cpp" line="-5"/>
<source>These are your Linkcoin addresses for sending payments. Always check the amount and the receiving address before sending coins.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+13"/>
<source>Copy &Label</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>&Edit</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Send &Coins</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+260"/>
<source>Export Address Book Data</source>
<translation>ส่งออกรายชื่อทั้งหมด</translation>
</message>
<message>
<location line="+1"/>
<source>Comma separated file (*.csv)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+13"/>
<source>Error exporting</source>
<translation>ส่งออกผิดพลาด</translation>
</message>
<message>
<location line="+0"/>
<source>Could not write to file %1.</source>
<translation>ไม่สามารถเขียนไปยังไฟล์ %1</translation>
</message>
</context>
<context>
<name>AddressTableModel</name>
<message>
<location filename="../addresstablemodel.cpp" line="+144"/>
<source>Label</source>
<translation>ชื่อ</translation>
</message>
<message>
<location line="+0"/>
<source>Address</source>
<translation>ที่อยู่</translation>
</message>
<message>
<location line="+36"/>
<source>(no label)</source>
<translation>(ไม่มีชื่อ)</translation>
</message>
</context>
<context>
<name>AskPassphraseDialog</name>
<message>
<location filename="../forms/askpassphrasedialog.ui" line="+26"/>
<source>Passphrase Dialog</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+21"/>
<source>Enter passphrase</source>
<translation>ใส่รหัสผ่าน</translation>
</message>
<message>
<location line="+14"/>
<source>New passphrase</source>
<translation>รหัสผา่นใหม่</translation>
</message>
<message>
<location line="+14"/>
<source>Repeat new passphrase</source>
<translation>กรุณากรอกรหัสผ่านใหม่อีกครั้งหนึ่ง</translation>
</message>
<message>
<location filename="../askpassphrasedialog.cpp" line="+33"/>
<source>Enter the new passphrase to the wallet.<br/>Please use a passphrase of <b>10 or more random characters</b>, or <b>eight or more words</b>.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Encrypt wallet</source>
<translation>กระเป๋าสตางค์ที่เข้ารหัส</translation>
</message>
<message>
<location line="+3"/>
<source>This operation needs your wallet passphrase to unlock the wallet.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>Unlock wallet</source>
<translation>เปิดกระเป๋าสตางค์</translation>
</message>
<message>
<location line="+3"/>
<source>This operation needs your wallet passphrase to decrypt the wallet.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>Decrypt wallet</source>
<translation>ถอดรหัสกระเป๋าสตางค์</translation>
</message>
<message>
<location line="+3"/>
<source>Change passphrase</source>
<translation>เปลี่ยนรหัสผ่าน</translation>
</message>
<message>
<location line="+1"/>
<source>Enter the old and new passphrase to the wallet.</source>
<translation>กรอกรหัสผ่านเก่าและรหัสผ่านใหม่สำหรับกระเป๋าสตางค์</translation>
</message>
<message>
<location line="+46"/>
<source>Confirm wallet encryption</source>
<translation>ยืนยันการเข้ารหัสกระเป๋าสตางค์</translation>
</message>
<message>
<location line="+1"/>
<source>Warning: If you encrypt your wallet and lose your passphrase, you will <b>LOSE ALL OF YOUR LITECOINS</b>!</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<source>Are you sure you wish to encrypt your wallet?</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+15"/>
<source>IMPORTANT: Any previous backups you have made of your wallet file should be replaced with the newly generated, encrypted wallet file. For security reasons, previous backups of the unencrypted wallet file will become useless as soon as you start using the new, encrypted wallet.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+100"/>
<location line="+24"/>
<source>Warning: The Caps Lock key is on!</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-130"/>
<location line="+58"/>
<source>Wallet encrypted</source>
<translation>กระเป๋าสตางค์ถูกเข้ารหัสเรียบร้อยแล้ว</translation>
</message>
<message>
<location line="-56"/>
<source>Linkcoin will close now to finish the encryption process. Remember that encrypting your wallet cannot fully protect your linkcoins from being stolen by malware infecting your computer.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+13"/>
<location line="+7"/>
<location line="+42"/>
<location line="+6"/>
<source>Wallet encryption failed</source>
<translation>การเข้ารหัสกระเป๋าสตางค์ผิดพลาด</translation>
</message>
<message>
<location line="-54"/>
<source>Wallet encryption failed due to an internal error. Your wallet was not encrypted.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<location line="+48"/>
<source>The supplied passphrases do not match.</source>
<translation>รหัสผ่านที่คุณกรอกไม่ตรงกัน</translation>
</message>
<message>
<location line="-37"/>
<source>Wallet unlock failed</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<location line="+11"/>
<location line="+19"/>
<source>The passphrase entered for the wallet decryption was incorrect.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-20"/>
<source>Wallet decryption failed</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+14"/>
<source>Wallet passphrase was successfully changed.</source>
<translation type="unfinished"/>
</message>
</context><|fim▁hole|> <name>BitcoinGUI</name>
<message>
<location filename="../bitcoingui.cpp" line="+233"/>
<source>Sign &message...</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+280"/>
<source>Synchronizing with network...</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-349"/>
<source>&Overview</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Show general overview of wallet</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+20"/>
<source>&Transactions</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Browse transaction history</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>Edit the list of stored addresses and labels</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-14"/>
<source>Show the list of addresses for receiving payments</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+31"/>
<source>E&xit</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Quit application</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+4"/>
<source>Show information about Linkcoin</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>About &Qt</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Show information about Qt</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>&Options...</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+6"/>
<source>&Encrypt Wallet...</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>&Backup Wallet...</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>&Change Passphrase...</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+285"/>
<source>Importing blocks from disk...</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Reindexing blocks on disk...</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-347"/>
<source>Send coins to a Linkcoin address</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+49"/>
<source>Modify configuration options for Linkcoin</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+9"/>
<source>Backup wallet to another location</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Change the passphrase used for wallet encryption</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+6"/>
<source>&Debug window</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Open debugging and diagnostic console</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-4"/>
<source>&Verify message...</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-165"/>
<location line="+530"/>
<source>Linkcoin</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-530"/>
<source>Wallet</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+101"/>
<source>&Send</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>&Receive</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+14"/>
<source>&Addresses</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+22"/>
<source>&About Linkcoin</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+9"/>
<source>&Show / Hide</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Show or hide the main Window</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Encrypt the private keys that belong to your wallet</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>Sign messages with your Linkcoin addresses to prove you own them</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Verify messages to ensure they were signed with specified Linkcoin addresses</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+28"/>
<source>&File</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>&Settings</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+6"/>
<source>&Help</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+9"/>
<source>Tabs toolbar</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+17"/>
<location line="+10"/>
<source>[testnet]</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+47"/>
<source>Linkcoin client</source>
<translation type="unfinished"/>
</message>
<message numerus="yes">
<location line="+141"/>
<source>%n active connection(s) to Linkcoin network</source>
<translation type="unfinished"><numerusform></numerusform></translation>
</message>
<message>
<location line="+22"/>
<source>No block source available...</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+12"/>
<source>Processed %1 of %2 (estimated) blocks of transaction history.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+4"/>
<source>Processed %1 blocks of transaction history.</source>
<translation type="unfinished"/>
</message>
<message numerus="yes">
<location line="+20"/>
<source>%n hour(s)</source>
<translation type="unfinished"><numerusform></numerusform></translation>
</message>
<message numerus="yes">
<location line="+4"/>
<source>%n day(s)</source>
<translation type="unfinished"><numerusform></numerusform></translation>
</message>
<message numerus="yes">
<location line="+4"/>
<source>%n week(s)</source>
<translation type="unfinished"><numerusform></numerusform></translation>
</message>
<message>
<location line="+4"/>
<source>%1 behind</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+14"/>
<source>Last received block was generated %1 ago.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Transactions after this will not yet be visible.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+22"/>
<source>Error</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Warning</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Information</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+70"/>
<source>This transaction is over the size limit. You can still send it for a fee of %1, which goes to the nodes that process your transaction and helps to support the network. Do you want to pay the fee?</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-140"/>
<source>Up to date</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+31"/>
<source>Catching up...</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+113"/>
<source>Confirm transaction fee</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+8"/>
<source>Sent transaction</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<source>Incoming transaction</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Date: %1
Amount: %2
Type: %3
Address: %4
</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+33"/>
<location line="+23"/>
<source>URI handling</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-23"/>
<location line="+23"/>
<source>URI can not be parsed! This can be caused by an invalid Linkcoin address or malformed URI parameters.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+17"/>
<source>Wallet is <b>encrypted</b> and currently <b>unlocked</b></source>
<translation type="unfinished"/>
</message>
<message>
<location line="+8"/>
<source>Wallet is <b>encrypted</b> and currently <b>locked</b></source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoin.cpp" line="+111"/>
<source>A fatal error occurred. Linkcoin can no longer continue safely and will quit.</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>ClientModel</name>
<message>
<location filename="../clientmodel.cpp" line="+104"/>
<source>Network Alert</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>EditAddressDialog</name>
<message>
<location filename="../forms/editaddressdialog.ui" line="+14"/>
<source>Edit Address</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+11"/>
<source>&Label</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+10"/>
<source>The label associated with this address book entry</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>&Address</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+10"/>
<source>The address associated with this address book entry. This can only be modified for sending addresses.</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../editaddressdialog.cpp" line="+21"/>
<source>New receiving address</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+4"/>
<source>New sending address</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Edit receiving address</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+4"/>
<source>Edit sending address</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+76"/>
<source>The entered address "%1" is already in the address book.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-5"/>
<source>The entered address "%1" is not a valid Linkcoin address.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+10"/>
<source>Could not unlock wallet.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>New key generation failed.</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>GUIUtil::HelpMessageBox</name>
<message>
<location filename="../guiutil.cpp" line="+424"/>
<location line="+12"/>
<source>Linkcoin-Qt</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-12"/>
<source>version</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Usage:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>command-line options</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+4"/>
<source>UI options</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Set language, for example "de_DE" (default: system locale)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Start minimized</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Show splash screen on startup (default: 1)</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>OptionsDialog</name>
<message>
<location filename="../forms/optionsdialog.ui" line="+14"/>
<source>Options</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+16"/>
<source>&Main</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+6"/>
<source>Optional transaction fee per kB that helps make sure your transactions are processed quickly. Most transactions are 1 kB.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+15"/>
<source>Pay transaction &fee</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+31"/>
<source>Automatically start Linkcoin after logging in to the system.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>&Start Linkcoin on system login</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+35"/>
<source>Reset all client options to default.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>&Reset Options</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+13"/>
<source>&Network</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+6"/>
<source>Automatically open the Linkcoin client port on the router. This only works when your router supports UPnP and it is enabled.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Map port using &UPnP</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>Connect to the Linkcoin network through a SOCKS proxy (e.g. when connecting through Tor).</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>&Connect through SOCKS proxy:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+9"/>
<source>Proxy &IP:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+19"/>
<source>IP address of the proxy (e.g. 127.0.0.1)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>&Port:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+19"/>
<source>Port of the proxy (e.g. 9050)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>SOCKS &Version:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+13"/>
<source>SOCKS version of the proxy (e.g. 5)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+36"/>
<source>&Window</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+6"/>
<source>Show only a tray icon after minimizing the window.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>&Minimize to the tray instead of the taskbar</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>Minimize instead of exit the application when the window is closed. When this option is enabled, the application will be closed only after selecting Quit in the menu.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>M&inimize on close</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+21"/>
<source>&Display</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+8"/>
<source>User Interface &language:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+13"/>
<source>The user interface language can be set here. This setting will take effect after restarting Linkcoin.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+11"/>
<source>&Unit to show amounts in:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+13"/>
<source>Choose the default subdivision unit to show in the interface and when sending coins.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+9"/>
<source>Whether to show Linkcoin addresses in the transaction list or not.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>&Display addresses in transaction list</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+71"/>
<source>&OK</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>&Cancel</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+10"/>
<source>&Apply</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../optionsdialog.cpp" line="+53"/>
<source>default</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+130"/>
<source>Confirm options reset</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Some settings may require a client restart to take effect.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<source>Do you want to proceed?</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+42"/>
<location line="+9"/>
<source>Warning</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-9"/>
<location line="+9"/>
<source>This setting will take effect after restarting Linkcoin.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+29"/>
<source>The supplied proxy address is invalid.</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>OverviewPage</name>
<message>
<location filename="../forms/overviewpage.ui" line="+14"/>
<source>Form</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+50"/>
<location line="+166"/>
<source>The displayed information may be out of date. Your wallet automatically synchronizes with the Linkcoin network after a connection is established, but this process has not completed yet.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-124"/>
<source>Balance:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+29"/>
<source>Unconfirmed:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-78"/>
<source>Wallet</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+107"/>
<source>Immature:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+13"/>
<source>Mined balance that has not yet matured</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+46"/>
<source><b>Recent transactions</b></source>
<translation type="unfinished"/>
</message>
<message>
<location line="-101"/>
<source>Your current balance</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+29"/>
<source>Total of transactions that have yet to be confirmed, and do not yet count toward the current balance</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../overviewpage.cpp" line="+116"/>
<location line="+1"/>
<source>out of sync</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>PaymentServer</name>
<message>
<location filename="../paymentserver.cpp" line="+107"/>
<source>Cannot start linkcoin: click-to-pay handler</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>QRCodeDialog</name>
<message>
<location filename="../forms/qrcodedialog.ui" line="+14"/>
<source>QR Code Dialog</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+59"/>
<source>Request Payment</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+56"/>
<source>Amount:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-44"/>
<source>Label:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+19"/>
<source>Message:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+71"/>
<source>&Save As...</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../qrcodedialog.cpp" line="+62"/>
<source>Error encoding URI into QR Code.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+40"/>
<source>The entered amount is invalid, please check.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+23"/>
<source>Resulting URI too long, try to reduce the text for label / message.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+25"/>
<source>Save QR Code</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<source>PNG Images (*.png)</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>RPCConsole</name>
<message>
<location filename="../forms/rpcconsole.ui" line="+46"/>
<source>Client name</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+10"/>
<location line="+23"/>
<location line="+26"/>
<location line="+23"/>
<location line="+23"/>
<location line="+36"/>
<location line="+53"/>
<location line="+23"/>
<location line="+23"/>
<location filename="../rpcconsole.cpp" line="+339"/>
<source>N/A</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-217"/>
<source>Client version</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-45"/>
<source>&Information</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+68"/>
<source>Using OpenSSL version</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+49"/>
<source>Startup time</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+29"/>
<source>Network</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>Number of connections</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+23"/>
<source>On testnet</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+23"/>
<source>Block chain</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>Current number of blocks</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+23"/>
<source>Estimated total blocks</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+23"/>
<source>Last block time</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+52"/>
<source>&Open</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+16"/>
<source>Command-line options</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>Show the Linkcoin-Qt help message to get a list with possible Linkcoin command-line options.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>&Show</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+24"/>
<source>&Console</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-260"/>
<source>Build date</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-104"/>
<source>Linkcoin - Debug window</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+25"/>
<source>Linkcoin Core</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+279"/>
<source>Debug log file</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>Open the Linkcoin debug log file from the current data directory. This can take a few seconds for large log files.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+102"/>
<source>Clear console</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../rpcconsole.cpp" line="-30"/>
<source>Welcome to the Linkcoin RPC console.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Use up and down arrows to navigate history, and <b>Ctrl-L</b> to clear screen.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Type <b>help</b> for an overview of available commands.</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>SendCoinsDialog</name>
<message>
<location filename="../forms/sendcoinsdialog.ui" line="+14"/>
<location filename="../sendcoinsdialog.cpp" line="+124"/>
<location line="+5"/>
<location line="+5"/>
<location line="+5"/>
<location line="+6"/>
<location line="+5"/>
<location line="+5"/>
<source>Send Coins</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+50"/>
<source>Send to multiple recipients at once</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Add &Recipient</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+20"/>
<source>Remove all transaction fields</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Clear &All</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+22"/>
<source>Balance:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+10"/>
<source>123.456 BTC</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+31"/>
<source>Confirm the send action</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>S&end</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../sendcoinsdialog.cpp" line="-59"/>
<source><b>%1</b> to %2 (%3)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>Confirm send coins</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Are you sure you want to send %1?</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<source> and </source>
<translation type="unfinished"/>
</message>
<message>
<location line="+23"/>
<source>The recipient address is not valid, please recheck.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>The amount to pay must be larger than 0.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>The amount exceeds your balance.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>The total exceeds your balance when the %1 transaction fee is included.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+6"/>
<source>Duplicate address found, can only send to each address once per send operation.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>Error: Transaction creation failed!</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>Error: The transaction was rejected. This might happen if some of the coins in your wallet were already spent, such as if you used a copy of wallet.dat and coins were spent in the copy but not marked as spent here.</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>SendCoinsEntry</name>
<message>
<location filename="../forms/sendcoinsentry.ui" line="+14"/>
<source>Form</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+15"/>
<source>A&mount:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+13"/>
<source>Pay &To:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+34"/>
<source>The address to send the payment to (e.g. Ler4HNAEfwYhBmGXcFP2Po1NpRUEiK8km2)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+60"/>
<location filename="../sendcoinsentry.cpp" line="+26"/>
<source>Enter a label for this address to add it to your address book</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-78"/>
<source>&Label:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+28"/>
<source>Choose address from address book</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+10"/>
<source>Alt+A</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>Paste address from clipboard</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+10"/>
<source>Alt+P</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>Remove this recipient</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../sendcoinsentry.cpp" line="+1"/>
<source>Enter a Linkcoin address (e.g. Ler4HNAEfwYhBmGXcFP2Po1NpRUEiK8km2)</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>SignVerifyMessageDialog</name>
<message>
<location filename="../forms/signverifymessagedialog.ui" line="+14"/>
<source>Signatures - Sign / Verify a Message</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+13"/>
<source>&Sign Message</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+6"/>
<source>You can sign messages with your addresses to prove you own them. Be careful not to sign anything vague, as phishing attacks may try to trick you into signing your identity over to them. Only sign fully-detailed statements you agree to.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+18"/>
<source>The address to sign the message with (e.g. Ler4HNAEfwYhBmGXcFP2Po1NpRUEiK8km2)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+10"/>
<location line="+213"/>
<source>Choose an address from the address book</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-203"/>
<location line="+213"/>
<source>Alt+A</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-203"/>
<source>Paste address from clipboard</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+10"/>
<source>Alt+P</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+12"/>
<source>Enter the message you want to sign here</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>Signature</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+27"/>
<source>Copy the current signature to the system clipboard</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+21"/>
<source>Sign the message to prove you own this Linkcoin address</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Sign &Message</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+14"/>
<source>Reset all sign message fields</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<location line="+146"/>
<source>Clear &All</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-87"/>
<source>&Verify Message</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+6"/>
<source>Enter the signing address, message (ensure you copy line breaks, spaces, tabs, etc. exactly) and signature below to verify the message. Be careful not to read more into the signature than what is in the signed message itself, to avoid being tricked by a man-in-the-middle attack.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+21"/>
<source>The address the message was signed with (e.g. Ler4HNAEfwYhBmGXcFP2Po1NpRUEiK8km2)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+40"/>
<source>Verify the message to ensure it was signed with the specified Linkcoin address</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Verify &Message</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+14"/>
<source>Reset all verify message fields</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../signverifymessagedialog.cpp" line="+27"/>
<location line="+3"/>
<source>Enter a Linkcoin address (e.g. Ler4HNAEfwYhBmGXcFP2Po1NpRUEiK8km2)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-2"/>
<source>Click "Sign Message" to generate signature</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Enter Linkcoin signature</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+82"/>
<location line="+81"/>
<source>The entered address is invalid.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-81"/>
<location line="+8"/>
<location line="+73"/>
<location line="+8"/>
<source>Please check the address and try again.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-81"/>
<location line="+81"/>
<source>The entered address does not refer to a key.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-73"/>
<source>Wallet unlock was cancelled.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+8"/>
<source>Private key for the entered address is not available.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+12"/>
<source>Message signing failed.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>Message signed.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+59"/>
<source>The signature could not be decoded.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<location line="+13"/>
<source>Please check the signature and try again.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<source>The signature did not match the message digest.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>Message verification failed.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>Message verified.</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>SplashScreen</name>
<message>
<location filename="../splashscreen.cpp" line="+22"/>
<source>The Linkcoin developers</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>[testnet]</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>TransactionDesc</name>
<message>
<location filename="../transactiondesc.cpp" line="+20"/>
<source>Open until %1</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+6"/>
<source>%1/offline</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>%1/unconfirmed</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>%1 confirmations</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+18"/>
<source>Status</source>
<translation type="unfinished"/>
</message>
<message numerus="yes">
<location line="+7"/>
<source>, broadcast through %n node(s)</source>
<translation type="unfinished"><numerusform></numerusform></translation>
</message>
<message>
<location line="+4"/>
<source>Date</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>Source</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<source>Generated</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<location line="+17"/>
<source>From</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<location line="+22"/>
<location line="+58"/>
<source>To</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-77"/>
<location line="+2"/>
<source>own address</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-2"/>
<source>label</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+37"/>
<location line="+12"/>
<location line="+45"/>
<location line="+17"/>
<location line="+30"/>
<source>Credit</source>
<translation type="unfinished"/>
</message>
<message numerus="yes">
<location line="-102"/>
<source>matures in %n more block(s)</source>
<translation type="unfinished"><numerusform></numerusform></translation>
</message>
<message>
<location line="+2"/>
<source>not accepted</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+44"/>
<location line="+8"/>
<location line="+15"/>
<location line="+30"/>
<source>Debit</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-39"/>
<source>Transaction fee</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+16"/>
<source>Net amount</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+6"/>
<source>Message</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Comment</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Transaction ID</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Generated coins must mature 120 blocks before they can be spent. When you generated this block, it was broadcast to the network to be added to the block chain. If it fails to get into the chain, its state will change to "not accepted" and it won't be spendable. This may occasionally happen if another node generates a block within a few seconds of yours.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>Debug information</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+8"/>
<source>Transaction</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Inputs</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+23"/>
<source>Amount</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>true</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<source>false</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-209"/>
<source>, has not been successfully broadcast yet</source>
<translation type="unfinished"/>
</message>
<message numerus="yes">
<location line="-35"/>
<source>Open for %n more block(s)</source>
<translation type="unfinished"><numerusform></numerusform></translation>
</message>
<message>
<location line="+70"/>
<source>unknown</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>TransactionDescDialog</name>
<message>
<location filename="../forms/transactiondescdialog.ui" line="+14"/>
<source>Transaction details</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+6"/>
<source>This pane shows a detailed description of the transaction</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>TransactionTableModel</name>
<message>
<location filename="../transactiontablemodel.cpp" line="+225"/>
<source>Date</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<source>Type</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<source>Address</source>
<translation>ที่อยู่</translation>
</message>
<message>
<location line="+0"/>
<source>Amount</source>
<translation type="unfinished"/>
</message>
<message numerus="yes">
<location line="+57"/>
<source>Open for %n more block(s)</source>
<translation type="unfinished"><numerusform></numerusform></translation>
</message>
<message>
<location line="+3"/>
<source>Open until %1</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Offline (%1 confirmations)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Unconfirmed (%1 of %2 confirmations)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Confirmed (%1 confirmations)</source>
<translation type="unfinished"/>
</message>
<message numerus="yes">
<location line="+8"/>
<source>Mined balance will be available when it matures in %n more block(s)</source>
<translation type="unfinished"><numerusform></numerusform></translation>
</message>
<message>
<location line="+5"/>
<source>This block was not received by any other nodes and will probably not be accepted!</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Generated but not accepted</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+43"/>
<source>Received with</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Received from</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Sent to</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Payment to yourself</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Mined</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+38"/>
<source>(n/a)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+199"/>
<source>Transaction status. Hover over this field to show number of confirmations.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Date and time that the transaction was received.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Type of transaction.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Destination address of transaction.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Amount removed from or added to balance.</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>TransactionView</name>
<message>
<location filename="../transactionview.cpp" line="+52"/>
<location line="+16"/>
<source>All</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-15"/>
<source>Today</source>
<translation>วันนี้</translation>
</message>
<message>
<location line="+1"/>
<source>This week</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>This month</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Last month</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>This year</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Range...</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+11"/>
<source>Received with</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Sent to</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>To yourself</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Mined</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Other</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>Enter address or label to search</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>Min amount</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+34"/>
<source>Copy address</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Copy label</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Copy amount</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Copy transaction ID</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Edit label</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Show transaction details</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+139"/>
<source>Export Transaction Data</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Comma separated file (*.csv)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+8"/>
<source>Confirmed</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Date</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Type</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Label</source>
<translation>ชื่อ</translation>
</message>
<message>
<location line="+1"/>
<source>Address</source>
<translation>ที่อยู่</translation>
</message>
<message>
<location line="+1"/>
<source>Amount</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>ID</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+4"/>
<source>Error exporting</source>
<translation>ส่งออกผิดพลาด</translation>
</message>
<message>
<location line="+0"/>
<source>Could not write to file %1.</source>
<translation>ไม่สามารถเขียนไปยังไฟล์ %1</translation>
</message>
<message>
<location line="+100"/>
<source>Range:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+8"/>
<source>to</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>WalletModel</name>
<message>
<location filename="../walletmodel.cpp" line="+193"/>
<source>Send Coins</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>WalletView</name>
<message>
<location filename="../walletview.cpp" line="+42"/>
<source>&Export</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Export the data in the current tab to a file</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+193"/>
<source>Backup Wallet</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<source>Wallet Data (*.dat)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Backup Failed</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<source>There was an error trying to save the wallet data to the new location.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+4"/>
<source>Backup Successful</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<source>The wallet data was successfully saved to the new location.</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>bitcoin-core</name>
<message>
<location filename="../bitcoinstrings.cpp" line="+94"/>
<source>Linkcoin version</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+102"/>
<source>Usage:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-29"/>
<source>Send command to -server or linkcoind</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-23"/>
<source>List commands</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-12"/>
<source>Get help for a command</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+24"/>
<source>Options:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+24"/>
<source>Specify configuration file (default: linkcoin.conf)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Specify pid file (default: linkcoind.pid)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-1"/>
<source>Specify data directory</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-9"/>
<source>Set database cache size in megabytes (default: 25)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-28"/>
<source>Listen for connections on <port> (default: 9333 or testnet: 19333)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>Maintain at most <n> connections to peers (default: 125)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-48"/>
<source>Connect to a node to retrieve peer addresses, and disconnect</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+82"/>
<source>Specify your own public address</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Threshold for disconnecting misbehaving peers (default: 100)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-134"/>
<source>Number of seconds to keep misbehaving peers from reconnecting (default: 86400)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-29"/>
<source>An error occurred while setting up the RPC port %u for listening on IPv4: %s</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+27"/>
<source>Listen for JSON-RPC connections on <port> (default: 9332 or testnet: 19332)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+37"/>
<source>Accept command line and JSON-RPC commands</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+76"/>
<source>Run in the background as a daemon and accept commands</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+37"/>
<source>Use the test network</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-112"/>
<source>Accept connections from outside (default: 1 if no -proxy or -connect)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-80"/>
<source>%s, you must set a rpcpassword in the configuration file:
%s
It is recommended you use the following random password:
rpcuser=linkcoinrpc
rpcpassword=%s
(you do not need to remember this password)
The username and password MUST NOT be the same.
If the file does not exist, create it with owner-readable-only file permissions.
It is also recommended to set alertnotify so you are notified of problems;
for example: alertnotify=echo %%s | mail -s "Linkcoin Alert" [email protected]
</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+17"/>
<source>An error occurred while setting up the RPC port %u for listening on IPv6, falling back to IPv4: %s</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Bind to given address and always listen on it. Use [host]:port notation for IPv6</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Cannot obtain a lock on data directory %s. Linkcoin is probably already running.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Error: The transaction was rejected! This might happen if some of the coins in your wallet were already spent, such as if you used a copy of wallet.dat and coins were spent in the copy but not marked as spent here.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+4"/>
<source>Error: This transaction requires a transaction fee of at least %s because of its amount, complexity, or use of recently received funds!</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Execute command when a relevant alert is received (%s in cmd is replaced by message)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Execute command when a wallet transaction changes (%s in cmd is replaced by TxID)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+11"/>
<source>Set maximum size of high-priority/low-fee transactions in bytes (default: 27000)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+6"/>
<source>This is a pre-release test build - use at your own risk - do not use for mining or merchant applications</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>Warning: -paytxfee is set very high! This is the transaction fee you will pay if you send a transaction.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Warning: Displayed transactions may not be correct! You may need to upgrade, or other nodes may need to upgrade.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Warning: Please check that your computer's date and time are correct! If your clock is wrong Linkcoin will not work properly.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Warning: error reading wallet.dat! All keys read correctly, but transaction data or address book entries might be missing or incorrect.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Warning: wallet.dat corrupt, data salvaged! Original wallet.dat saved as wallet.{timestamp}.bak in %s; if your balance or transactions are incorrect you should restore from a backup.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+14"/>
<source>Attempt to recover private keys from a corrupt wallet.dat</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Block creation options:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>Connect only to the specified node(s)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Corrupted block database detected</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Discover own IP address (default: 1 when listening and no -externalip)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Do you want to rebuild the block database now?</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Error initializing block database</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Error initializing wallet database environment %s!</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Error loading block database</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+4"/>
<source>Error opening block database</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Error: Disk space is low!</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Error: Wallet locked, unable to create transaction!</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Error: system error: </source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Failed to listen on any port. Use -listen=0 if you want this.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Failed to read block info</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Failed to read block</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Failed to sync block index</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Failed to write block index</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Failed to write block info</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Failed to write block</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Failed to write file info</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Failed to write to coin database</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Failed to write transaction index</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Failed to write undo data</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Find peers using DNS lookup (default: 1 unless -connect)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Generate coins (default: 0)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>How many blocks to check at startup (default: 288, 0 = all)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>How thorough the block verification is (0-4, default: 3)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+19"/>
<source>Not enough file descriptors available.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+8"/>
<source>Rebuild block chain index from current blk000??.dat files</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+16"/>
<source>Set the number of threads to service RPC calls (default: 4)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+26"/>
<source>Verifying blocks...</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Verifying wallet...</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-69"/>
<source>Imports blocks from external blk000??.dat file</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-76"/>
<source>Set the number of script verification threads (up to 16, 0 = auto, <0 = leave that many cores free, default: 0)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+77"/>
<source>Information</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Invalid -tor address: '%s'</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Invalid amount for -minrelaytxfee=<amount>: '%s'</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Invalid amount for -mintxfee=<amount>: '%s'</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+8"/>
<source>Maintain a full transaction index (default: 0)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Maximum per-connection receive buffer, <n>*1000 bytes (default: 5000)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Maximum per-connection send buffer, <n>*1000 bytes (default: 1000)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Only accept block chain matching built-in checkpoints (default: 1)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Only connect to nodes in network <net> (IPv4, IPv6 or Tor)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Output extra debugging information. Implies all other -debug* options</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Output extra network debugging information</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Prepend debug output with timestamp</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>SSL options: (see the Linkcoin Wiki for SSL setup instructions)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Select the version of socks proxy to use (4-5, default: 5)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Send trace/debug info to console instead of debug.log file</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Send trace/debug info to debugger</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>Set maximum block size in bytes (default: 250000)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Set minimum block size in bytes (default: 0)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Shrink debug.log file on client startup (default: 1 when no -debug)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Signing transaction failed</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Specify connection timeout in milliseconds (default: 5000)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+4"/>
<source>System error: </source>
<translation type="unfinished"/>
</message>
<message>
<location line="+4"/>
<source>Transaction amount too small</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Transaction amounts must be positive</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Transaction too large</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>Use UPnP to map the listening port (default: 0)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Use UPnP to map the listening port (default: 1 when listening)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Use proxy to reach tor hidden services (default: same as -proxy)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Username for JSON-RPC connections</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+4"/>
<source>Warning</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Warning: This version is obsolete, upgrade required!</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>You need to rebuild the databases using -reindex to change -txindex</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>wallet.dat corrupt, salvage failed</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-50"/>
<source>Password for JSON-RPC connections</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-67"/>
<source>Allow JSON-RPC connections from specified IP address</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+76"/>
<source>Send commands to node running on <ip> (default: 127.0.0.1)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-120"/>
<source>Execute command when the best block changes (%s in cmd is replaced by block hash)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+147"/>
<source>Upgrade wallet to latest format</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-21"/>
<source>Set key pool size to <n> (default: 100)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-12"/>
<source>Rescan the block chain for missing wallet transactions</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+35"/>
<source>Use OpenSSL (https) for JSON-RPC connections</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-26"/>
<source>Server certificate file (default: server.cert)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Server private key (default: server.pem)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-151"/>
<source>Acceptable ciphers (default: TLSv1+HIGH:!SSLv2:!aNULL:!eNULL:!AH:!3DES:@STRENGTH)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+165"/>
<source>This help message</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+6"/>
<source>Unable to bind to %s on this computer (bind returned error %d, %s)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-91"/>
<source>Connect through socks proxy</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-10"/>
<source>Allow DNS lookups for -addnode, -seednode and -connect</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+55"/>
<source>Loading addresses...</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-35"/>
<source>Error loading wallet.dat: Wallet corrupted</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Error loading wallet.dat: Wallet requires newer version of Linkcoin</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+93"/>
<source>Wallet needed to be rewritten: restart Linkcoin to complete</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-95"/>
<source>Error loading wallet.dat</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+28"/>
<source>Invalid -proxy address: '%s'</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+56"/>
<source>Unknown network specified in -onlynet: '%s'</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-1"/>
<source>Unknown -socks proxy version requested: %i</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-96"/>
<source>Cannot resolve -bind address: '%s'</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Cannot resolve -externalip address: '%s'</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+44"/>
<source>Invalid amount for -paytxfee=<amount>: '%s'</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Invalid amount</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-6"/>
<source>Insufficient funds</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+10"/>
<source>Loading block index...</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-57"/>
<source>Add a node to connect to and attempt to keep the connection open</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-25"/>
<source>Unable to bind to %s on this computer. Linkcoin is probably already running.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+64"/>
<source>Fee per KB to add to transactions you send</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+19"/>
<source>Loading wallet...</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-52"/>
<source>Cannot downgrade wallet</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Cannot write default address</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+64"/>
<source>Rescanning...</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-57"/>
<source>Done loading</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+82"/>
<source>To use the %s option</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-74"/>
<source>Error</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-31"/>
<source>You must set rpcpassword=<password> in the configuration file:
%s
If the file does not exist, create it with owner-readable-only file permissions.</source>
<translation type="unfinished"/>
</message>
</context>
</TS><|fim▁end|>
|
<context>
|
<|file_name|>factories.py<|end_file_name|><|fim▁begin|># pylint: disable=missing-docstring
from datetime import datetime, timedelta
import factory
import pytz
from factory.django import DjangoModelFactory
from factory.fuzzy import FuzzyText
from oauth2_provider.models import AccessToken, Application, RefreshToken
from openedx.core.djangoapps.oauth_dispatch.models import ApplicationAccess
from common.djangoapps.student.tests.factories import UserFactory
class ApplicationFactory(DjangoModelFactory):
class Meta:
model = Application
user = factory.SubFactory(UserFactory)
client_id = factory.Sequence('client_{}'.format)
client_secret = 'some_secret'
client_type = 'confidential'
authorization_grant_type = Application.CLIENT_CONFIDENTIAL
name = FuzzyText(prefix='name', length=8)
class ApplicationAccessFactory(DjangoModelFactory):
class Meta:
model = ApplicationAccess
application = factory.SubFactory(ApplicationFactory)<|fim▁hole|>
class AccessTokenFactory(DjangoModelFactory):
class Meta:
model = AccessToken
django_get_or_create = ('user', 'application')
token = FuzzyText(length=32)
expires = datetime.now(pytz.UTC) + timedelta(days=1)
class RefreshTokenFactory(DjangoModelFactory):
class Meta:
model = RefreshToken
django_get_or_create = ('user', 'application')
token = FuzzyText(length=32)<|fim▁end|>
|
scopes = ['grades:read']
|
<|file_name|>api.py<|end_file_name|><|fim▁begin|><|fim▁hole|>
mod = flask.Blueprint('api', __name__)<|fim▁end|>
|
import flask
|
<|file_name|>wow.js<|end_file_name|><|fim▁begin|>version https://git-lfs.github.com/spec/v1<|fim▁hole|><|fim▁end|>
|
oid sha256:4fc049501415815d5fa555bc735c359c381441d2107851b32b30ae5ba192a892
size 11548
|
<|file_name|>wavedrom.hpp<|end_file_name|><|fim▁begin|>/**
* @file wavedrom.hpp
* @author Jeramie Vens
* @date March 7, 2016: Initial version
* @brief This is the main include for the Wavedrom C++ Library
*
*/
#ifndef WAVEDROM_HPP
#define WAVEDROM_HPP
#include "../libwavedrom/group.hpp"
#include "../libwavedrom/signal.hpp"
/**
* @brief The Wavedrom Library encases all of its code in the wavedrom namespace.
*/
namespace wavedrom
{
<|fim▁hole|>/**
* @brief A wavedrom object.
* @details The wavedrom object is the main entry point of the wavedrom
* library. It encapsulates all the supported features of
* Wavedrom.
*/
class Wavedrom : public Group
{
public:
/**
* @brief Create a new wavedrom object.
*/
Wavedrom();
virtual ~Wavedrom();
/**
* @brief Export this wavedrom object to a Wavedrom JSON format
* @details This will allocate a new c string containing a valid
* Wavedrom JSON formmated descrption of this waveform.
* The exported JSON description can be converted to an
* image using http://wavedrom.com/editor.html
* @return An allocated cstring that contains the JSON object.
*/
char * Export();
};
}
#endif<|fim▁end|>
| |
<|file_name|>BlockLowPassVector.hpp<|end_file_name|><|fim▁begin|>/****************************************************************************
*
* Copyright (C) 2012 PX4 Development Team. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* 3. Neither the name PX4 nor the names of its contributors may be
* used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
****************************************************************************/
/**
* @file blocks.h
*
* Controller library code
*/
#pragma once
#include <px4_platform_common/defines.h>
#include <assert.h>
#include <time.h>
#include <stdlib.h>
#include <math.h>
#include <mathlib/math/test/test.hpp>
#include <mathlib/math/filter/LowPassFilter2p.hpp>
#include "block/Block.hpp"
#include "block/BlockParam.hpp"
#include "matrix/math.hpp"
namespace control
{
template<class Type, size_t M>
class __EXPORT BlockLowPassVector: public Block
{<|fim▁hole|>public:
// methods
BlockLowPassVector(SuperBlock *parent,
const char *name) :
Block(parent, name),
_state(),
_fCut(this, "") // only one parameter, no need to name
{
for (size_t i = 0; i < M; i++) {
_state(i) = 0.0f / 0.0f;
}
}
virtual ~BlockLowPassVector() = default;
matrix::Vector<Type, M> update(const matrix::Matrix<Type, M, 1> &input)
{
for (size_t i = 0; i < M; i++) {
if (!PX4_ISFINITE(getState()(i))) {
setState(input);
}
}
float b = 2 * float(M_PI) * getFCut() * getDt();
float a = b / (1 + b);
setState(input * a + getState() * (1 - a));
return getState();
}
// accessors
matrix::Vector<Type, M> getState() { return _state; }
float getFCut() { return _fCut.get(); }
void setState(const matrix::Vector<Type, M> &state) { _state = state; }
private:
// attributes
matrix::Vector<Type, M> _state;
control::BlockParamFloat _fCut;
};
} // namespace control<|fim▁end|>
| |
<|file_name|>slugify.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
"""
Copyright © Val Neekman ([Neekware Inc.](http://neekware.com))
[ [email protected], [@vneekman](https://twitter.com/vneekman) ]
All rights reserved.
Redistribution and use in source and binary forms,
with or without modification,
are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of this project nor the names of its contributors may be
used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
source: https://github.com/un33k/python-slugify
"""
__version__ = '0.0.7'
__all__ = ['slugify']
import re
import unicodedata
import types
import sys
from htmlentitydefs import name2codepoint
from unidecode import unidecode
# character entity reference
CHAR_ENTITY_REXP = re.compile('&(%s);' % '|'.join(name2codepoint))
# decimal character reference
DECIMAL_REXP = re.compile('&#(\d+);')
# hexadecimal character reference
HEX_REXP = re.compile('&#x([\da-fA-F]+);')
REPLACE1_REXP = re.compile(r'[\']+')
REPLACE2_REXP = re.compile(r'[^-a-z0-9]+')
REMOVE_REXP = re.compile('-{2,}')
def smart_truncate(string, max_length=0, word_boundaries=False, separator=' '):
""" Truncate a string """
string = string.strip(separator)
if not max_length:
return string
<|fim▁hole|> if len(string) < max_length:
return string
if not word_boundaries:
return string[:max_length].strip(separator)
if separator not in string:
return string[:max_length]
truncated = ''
for word in string.split(separator):
if word:
next_len = len(truncated) + len(word) + len(separator)
if next_len <= max_length:
truncated += '{0}{1}'.format(word, separator)
if not truncated:
truncated = string[:max_length]
return truncated.strip(separator)
def slugify(text, entities=True, decimal=True, hexadecimal=True, max_length=0,
word_boundary=False, separator='-'):
""" Make a slug from the given text """
# text to unicode
if type(text) != types.UnicodeType:
text = unicode(text, 'utf-8', 'ignore')
# decode unicode
text = unidecode(text)
# text back to unicode
if type(text) != types.UnicodeType:
text = unicode(text, 'utf-8', 'ignore')
# character entity reference
if entities:
text = CHAR_ENTITY_REXP.sub(lambda m:
unichr(name2codepoint[m.group(1)]), text)
# decimal character reference
if decimal:
try:
text = DECIMAL_REXP.sub(lambda m: unichr(int(m.group(1))), text)
except:
pass
# hexadecimal character reference
if hexadecimal:
try:
text = HEX_REXP.sub(lambda m: unichr(int(m.group(1), 16)), text)
except:
pass
# translate
text = unicodedata.normalize('NFKD', text)
if sys.version_info < (3,):
text = text.encode('ascii', 'ignore')
# replace unwanted characters
text = REPLACE1_REXP.sub('', text.lower())
# replace ' with nothing instead with -
text = REPLACE2_REXP.sub('-', text.lower())
# remove redundant -
text = REMOVE_REXP.sub('-', text).strip('-')
# smart truncate if requested
if max_length > 0:
text = smart_truncate(text, max_length, word_boundary, '-')
if separator != '-':
text = text.replace('-', separator)
return text<|fim▁end|>
| |
<|file_name|>env.js<|end_file_name|><|fim▁begin|>const defaultEnv = process.env.NODE_ENV || 'development';
function getEnv(name = defaultEnv) {<|fim▁hole|>}
const env = getEnv();
module.exports = env;<|fim▁end|>
|
const isProduction = name === 'production' || name === 'prod';
const isDev = !isProduction;
return { name, isProduction, isDev, getEnv };
|
<|file_name|>ArchiveNM.py<|end_file_name|><|fim▁begin|># Python 3: ArchiveNM.py
# Function:
# This will collect the files in /home/postgres that
# need to be sent to a new Natural Message machine
# that is being initialized. This currently grabs
# directory server and shard server files.
# It can also be used as an archiver.
import datetime
import tarfile
import os
import sys
# For the version code, enter the format used
# in the naturalmsg_svr_#_#_#.py files
test_or_prod = 'prod'
version = '0_0_5'
DSTAMP = datetime.datetime.now().strftime('%Y%m%d%H%M%S')
# (do not add a trailing slash on directory names)
pgm_dir = '/var/natmsg'
sql_dir = '/home/postgres/shard/sql/' + test_or_prod
function_dir = '/home/postgres/shard/sql/' + test_or_prod + '/functions'
pgm_files = ('naturalmsg-svr' + version + '.py',
'shardfunc_cp' + version + '.py')
sql_files = ( \
'0001create_db.sh',
'0002create_tables.sql',
'0005shardserver.sql',
'0007shardbig.sql',
'0020payment.sql',
'0500sysmon.sql',
'blog01.sql' \
)
function_files = ( \
'nm_blog_entry_newest.sql',
'read_inbasket_stage010.sql',
'read_inbasket_stage020.sql',
'read_inbasket_stage030.sql',<|fim▁hole|> 'shard_burn.sql',
'shard_delete_db_entries.sql',
'shard_delete.sql',
'shard_expire_big.sql',
'shard_expire.sql',
'shard_id_exists.sql',
'smd_create0010.sql',
'sysmon001.sql' \
)
tar_fname_base = 'NatMsgSQLArchive' + version
tar_fname = tar_fname_base + '.tar'
if os.path.isfile(tar_fname):
# The tar file already exists, rename it
try:
os.renames(tar_fname, tar_fname_base + '-' + DSTAMP + '.tar')
except:
print('Error renaming an existing tar file: ' + tar_fname)
print('Maybe you do not have permission.')
sys.exit(12)
t = tarfile.TarFile(tar_fname, mode='w')
for f in pgm_files:
# the full path is already specified in the file list.
t.add(os.path.normpath(pgm_dir + '/' + f))
for f in sql_files:
t.add(os.path.normpath(sql_dir + '/' + f))
for f in function_files:
t.add(os.path.normpath(function_dir + '/' + f))
t.close()<|fim▁end|>
|
'scan_shard_delete.sql',
|
<|file_name|>CMutex.cpp<|end_file_name|><|fim▁begin|>//============== IV: Multiplayer - http://code.iv-multiplayer.com ==============
//
// File: CMutex.cpp
// Project: Shared
// Author(s): jenksta
// License: See LICENSE in root directory
//
//==============================================================================
#include "CMutex.h"
#include <SharedUtility.h>
CMutex::CMutex()
{
// Create the mutex
#ifdef WIN32
#ifdef USE_CRITICAL_SECTION
InitializeCriticalSection(&m_criticalSection);
#else
m_hMutex = CreateMutex(NULL, FALSE, NULL);
#endif
#else
pthread_mutex_init(&m_mutex, NULL);
#endif
// Set the lock count to its default value
m_iLockCount = 0;
}
CMutex::~CMutex()
{
// Delete the mutex
#ifdef WIN32
#ifdef USE_CRITICAL_SECTION
DeleteCriticalSection(&m_criticalSection);
#else
CloseHandle(m_hMutex);
#endif
#else
pthread_mutex_destroy(&m_mutex);
#endif
}
void CMutex::Lock()
{
// Lock the mutex
#ifdef WIN32
#ifdef USE_CRITICAL_SECTION
EnterCriticalSection(&m_criticalSection);
#else
WaitForSingleObject(m_hMutex, INFINITE);
#endif
#else
pthread_mutex_lock(&m_mutex);
#endif
// Increment the lock count
m_iLockCount++;
}
bool CMutex::TryLock(unsigned int uiTimeOutMilliseconds)
{
// Attempt to lock the mutex
bool bLocked = false;
#if defined(WIN32) && !defined(USE_CRITICAL_SECTION)
bLocked = (WaitForSingleObject(m_hMutex, uiTimeOutMilliseconds) == 0);
#else
if(uiTimeOutMilliseconds == 0)
{
#ifdef WIN32
bLocked = (TryEnterCriticalSection(&m_criticalSection) != 0);
#else
bLocked = pthread_mutex_trylock(&m_mutex);
#endif
}
else
{
unsigned long ulEndTime = (SharedUtility::GetTime() + uiTimeOutMilliseconds);
while(SharedUtility::GetTime() < ulEndTime)
{
#ifdef WIN32
if(TryEnterCriticalSection(&m_criticalSection))
#else
if(pthread_mutex_trylock(&m_mutex))
#endif
{
bLocked = true;
break;
}
}
}
#endif
// Did the mutex lock successfully?
if(bLocked)
{
// Increment the lock count
m_iLockCount++;
}
return bLocked;
}
void CMutex::Unlock()<|fim▁hole|> m_iLockCount--;
// Unlock the mutex
#ifdef WIN32
#ifdef USE_CRITICAL_SECTION
LeaveCriticalSection(&m_criticalSection);
#else
ReleaseMutex(m_hMutex);
#endif
#else
pthread_mutex_unlock(&m_mutex);
#endif
}<|fim▁end|>
|
{
// Decrement the lock count
|
<|file_name|>jquery.squeezebox.js<|end_file_name|><|fim▁begin|>// Replacement for jquery.ui.accordion to avoid dealing with
// jquery.ui theming.
//
// Usage: $('#container').squeezebox(options);
// where the direct child elements of '#container' are
// sequential pairs of header/panel elements, and options
// is an optional object with any of the following properties:
//
// activeHeaderClass: Class name to apply to the active header
// headerSelector: Selector for the header elements
// nextPanelSelector: Selector for the next panel from a header
// speed: Animation speed
(function($) {
$.fn.squeezebox = function(options) {
// Default options.
options = $.extend({
activeHeaderClass: 'squeezebox-header-on',
headerSelector: '> *:even',
nextPanelSelector: ':first',
speed: 500
}, options);
var headers = this.find(options.headerSelector);
// When a header is clicked, iterate through each of the
// headers, getting their corresponding panels, and opening
// the panel for the header that was clicked (slideDown),
// closing the others (slideUp).
headers.click(function() {
var clicked = this;
$.each(headers, function(i, header) {
var panel = $(header).next(options.nextPanelSelector);
if (clicked == header) {<|fim▁hole|> } else {
panel.slideUp(options.speed);
$(header).removeClass(options.activeHeaderClass);
}
});
});
};
})(jQuery);<|fim▁end|>
|
panel.slideDown(options.speed);
$(header).addClass(options.activeHeaderClass);
|
<|file_name|>main.go<|end_file_name|><|fim▁begin|>// Copyright 2017-2019 the u-root Authors. All rights reserved
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
import (
"flag"
"fmt"
"log"
"os"
"path"
"path/filepath"
"github.com/u-root/u-root/pkg/boot/jsonboot"
"github.com/u-root/u-root/pkg/mount"
"github.com/u-root/u-root/pkg/mount/block"
)
// TODO backward compatibility for BIOS mode with partition type 0xee
// TODO use a proper parser for grub config (see grub.go)
var (
flagBaseMountPoint = flag.String("m", "/mnt", "Base mount point where to mount partitions")
flagDryRun = flag.Bool("dryrun", false, "Do not actually kexec into the boot config")
flagDebug = flag.Bool("d", false, "Print debug output")
flagConfigIdx = flag.Int("config", -1, "Specify the index of the configuration to boot. The order is determined by the menu entries in the Grub config")
flagGrubMode = flag.Bool("grub", false, "Use GRUB mode, i.e. look for valid Grub/Grub2 configuration in default locations to boot a kernel. GRUB mode ignores -kernel/-initramfs/-cmdline")
flagKernelPath = flag.String("kernel", "", "Specify the path of the kernel to execute. If using -grub, this argument is ignored")
flagInitramfsPath = flag.String("initramfs", "", "Specify the path of the initramfs to load. If using -grub, this argument is ignored")
flagKernelCmdline = flag.String("cmdline", "", "Specify the kernel command line. If using -grub, this argument is ignored")
flagDeviceGUID = flag.String("guid", "", "GUID of the device where the kernel (and optionally initramfs) are located. Ignored if -grub is set or if -kernel is not specified")
)
var debug = func(string, ...interface{}) {}
// mountByGUID looks for a partition with the given GUID, and tries to mount it
// in a subdirectory under the specified mount point. The subdirectory has the
// same name of the device (e.g. /your/base/mountpoint/sda1).
// If more than one partition is found with the given GUID, the first that is
// found is used.
// This function returns a mount.Mountpoint object, or an error if any.
func mountByGUID(devices block.BlockDevices, guid, baseMountpoint string) (*mount.MountPoint, error) {
log.Printf("Looking for partition with GUID %s", guid)
partitions := devices.FilterPartType(guid)
if len(partitions) == 0 {
return nil, fmt.Errorf("no partitions with GUID %s", guid)
}
log.Printf("Partitions with GUID %s: %+v", guid, partitions)
if len(partitions) > 1 {
log.Printf("Warning: more than one partition found with the given GUID. Using the first one")
}
mountpath := filepath.Join(baseMountpoint, partitions[0].Name)
return partitions[0].Mount(mountpath, mount.MS_RDONLY)
}
// BootGrubMode tries to boot a kernel in GRUB mode. GRUB mode means:
// * look for the partition with the specified GUID, and mount it
// * if no GUID is specified, mount all of the specified devices
// * try to mount the device(s) using any of the kernel-supported filesystems
// * look for a GRUB configuration in various well-known locations
// * build a list of valid boot configurations from the found GRUB configuration files
// * try to boot every valid boot configuration until one succeeds
//
// The first parameter, `devices` is a list of block.BlockDev . The function
// will look for bootable configurations on these devices
// The second parameter, `baseMountPoint`, is the directory where the mount
// points for each device will be created.
// The third parameter, `guid`, is the partition GUID to look for. If it is an
// empty string, will search boot configurations on all of the specified devices
// instead.
// The fourth parameter, `dryrun`, will not boot the found configurations if set
// to true.
func BootGrubMode(devices block.BlockDevices, baseMountpoint string, guid string, dryrun bool, configIdx int) error {
var mounted []*mount.MountPoint
if guid == "" {
// try mounting all the available devices, with all the supported file
// systems
debug("trying to mount all the available block devices with all the supported file system types")
for _, dev := range devices {
mountpath := filepath.Join(baseMountpoint, dev.Name)
if mountpoint, err := dev.Mount(mountpath, mount.MS_RDONLY); err != nil {
debug("Failed to mount %s on %s: %v", dev, mountpath, err)
} else {
mounted = append(mounted, mountpoint)
}
}
} else {
mount, err := mountByGUID(devices, guid, baseMountpoint)
if err != nil {
return err
}
mounted = append(mounted, mount)
}
log.Printf("mounted: %+v", mounted)
defer func() {
// clean up
for _, mountpoint := range mounted {
if err := mountpoint.Unmount(mount.MNT_DETACH); err != nil {
debug("Failed to unmount %v: %v", mountpoint, err)
}
}
}()
// search for a valid grub config and extracts the boot configuration
bootconfigs := make([]jsonboot.BootConfig, 0)
for _, mountpoint := range mounted {
bootconfigs = append(bootconfigs, ScanGrubConfigs(devices, mountpoint.Path)...)
}
if len(bootconfigs) == 0 {
return fmt.Errorf("No boot configuration found")
}
log.Printf("Found %d boot configs", len(bootconfigs))
for _, cfg := range bootconfigs {
debug("%+v", cfg)
}
for n, cfg := range bootconfigs {
log.Printf(" %d: %s\n", n, cfg.Name)
}
if configIdx > -1 {
for n, cfg := range bootconfigs {
if configIdx == n {
if dryrun {
debug("Dry-run mode: will not boot the found configuration")
debug("Boot configuration: %+v", cfg)
return nil
}
if err := cfg.Boot(); err != nil {
log.Printf("Failed to boot kernel %s: %v", cfg.Kernel, err)
}
}
}
log.Printf("Invalid arg -config %d: there are only %d bootconfigs available\n", configIdx, len(bootconfigs))
return nil
}
if dryrun {
cfg := bootconfigs[0]
debug("Dry-run mode: will not boot the found configuration")
debug("Boot configuration: %+v", cfg)
return nil
}
// try to kexec into every boot config kernel until one succeeds
for _, cfg := range bootconfigs {
debug("Trying boot configuration %+v", cfg)
if err := cfg.Boot(); err != nil {
log.Printf("Failed to boot kernel %s: %v", cfg.Kernel, err)
}
}
// if we reach this point, no boot configuration succeeded
log.Print("No boot configuration succeeded")
return nil
}
// BootPathMode tries to boot a kernel in PATH mode. This means:
// * look for a partition with the given GUID and mount it
// * look for the kernel and initramfs in the provided locations
// * boot the kernel with the provided command line
//
// The first parameter, `devices` is a list of block.BlockDev . The function
// will look for bootable configurations on these devices
// The second parameter, `baseMountPoint`, is the directory where the mount
// points for each device will be created.<|fim▁hole|>// The fourth parameter, `dryrun`, will not boot the found configurations if set
// to true.
func BootPathMode(devices block.BlockDevices, baseMountpoint string, guid string, dryrun bool) error {
mount, err := mountByGUID(devices, guid, baseMountpoint)
if err != nil {
return err
}
fullKernelPath := path.Join(mount.Path, *flagKernelPath)
fullInitramfsPath := path.Join(mount.Path, *flagInitramfsPath)
cfg := jsonboot.BootConfig{
Kernel: fullKernelPath,
Initramfs: fullInitramfsPath,
KernelArgs: *flagKernelCmdline,
}
debug("Trying boot configuration %+v", cfg)
if dryrun {
log.Printf("Dry-run, will not actually boot")
} else {
if err := cfg.Boot(); err != nil {
return fmt.Errorf("Failed to boot kernel %s: %v", cfg.Kernel, err)
}
}
return nil
}
func main() {
flag.Parse()
if *flagGrubMode && *flagKernelPath != "" {
log.Fatal("Options -grub and -kernel are mutually exclusive")
}
if *flagDebug {
debug = log.Printf
}
// Get all the available block devices
devices, err := block.GetBlockDevices()
if err != nil {
log.Fatal(err)
}
// print partition info
if *flagDebug {
for _, dev := range devices {
log.Printf("Device: %+v", dev)
table, err := dev.GPTTable()
if err != nil {
continue
}
log.Printf(" Table: %+v", table)
for _, part := range table.Partitions {
log.Printf(" Partition: %+v\n", part)
if !part.IsEmpty() {
log.Printf(" UUID: %s\n", part.Type.String())
}
}
}
}
// TODO boot from EFI system partitions.
if *flagGrubMode {
if err := BootGrubMode(devices, *flagBaseMountPoint, *flagDeviceGUID, *flagDryRun, *flagConfigIdx); err != nil {
log.Fatal(err)
}
} else if *flagKernelPath != "" {
if err := BootPathMode(devices, *flagBaseMountPoint, *flagDeviceGUID, *flagDryRun); err != nil {
log.Fatal(err)
}
} else {
log.Fatal("You must specify either -grub or -kernel")
}
os.Exit(1)
}<|fim▁end|>
|
// The third parameter, `guid`, is the partition GUID to look for.
|
<|file_name|>store.js<|end_file_name|><|fim▁begin|>/**
* 对Storage的封装
* Author : smohan
* Website : https://smohan.net
* Date: 2017/10/12
* 参数1:布尔值, true : sessionStorage, 无论get,delete,set都得申明
* 参数1:string key 名
* 参数2:null,用作删除,其他用作设置
* 参数3:string,用于设置键名前缀
* 如果是sessionStorage,即参数1是个布尔值,且为true,
* 无论设置/删除/获取都应该指明,而localStorage无需指明
*/
const MEMORY_CACHE = Object.create(null)
export default function () {
let isSession = false,
name, value, prefix
let args = arguments
if (typeof args[0] === 'boolean') {
isSession = args[0]
args = [].slice.call(args, 1)
}
name = args[0]
value = args[1]
prefix = args[2] === undefined ? '_mo_data_' : args[2]
const Storage = isSession ? window.sessionStorage : window.localStorage
if (!name || typeof name !== 'string') {
throw new Error('name must be a string')
}
let cacheKey = (prefix && typeof prefix === 'string') ? (prefix + name) : name
if (value === null) { //remove
delete MEMORY_CACHE[cacheKey]
return Storage.removeItem(cacheKey)
} else if (!value && value !== 0) { //get
if (MEMORY_CACHE[cacheKey]) {
return MEMORY_CACHE[cacheKey]<|fim▁hole|> } catch (e) {}
return _value
} else { //set
MEMORY_CACHE[cacheKey] = value
return Storage.setItem(cacheKey, JSON.stringify(value))
}
}<|fim▁end|>
|
}
let _value = undefined
try {
_value = JSON.parse(Storage.getItem(cacheKey))
|
<|file_name|>test_mullerbrownpot.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python3
import copy
import matplotlib.pyplot as plt
import numpy as np
import pytest
from pysisyphus.plotters.AnimPlot import AnimPlot
from pysisyphus.calculators.MullerBrownPot import MullerBrownPot
#from pysisyphus.calculators.MullerBrownSympyPot import MullerBrownPot
from pysisyphus.cos.NEB import NEB
from pysisyphus.cos.SimpleZTS import SimpleZTS
from pysisyphus.optimizers.FIRE import FIRE
from pysisyphus.optimizers.BFGS import BFGS
from pysisyphus.optimizers.LBFGS import LBFGS
from pysisyphus.Geometry import Geometry
from pysisyphus.optimizers.SteepestDescent import SteepestDescent
KWARGS = {
"images": 4,
"max_cycles": 100,
"max_step": 0.02,
"convergence": {
"max_force_thresh": 0.1,
"rms_force_thresh": 0.02,
"max_step_thresh": 0.005,
"rms_step_thresh": 0.001,
},
"dump": False,
}
def get_geoms(keys=("B","C","TSA","A")):
coords_dict = {
"A": (-0.558, 1.442, 0), # Minimum A
"B": (0.6215, 0.02838, 0), # Minimum B
"C": (-0.05, 0.467, 0), # Minimum C
"AC": (-0.57, 0.8, 0), # Between A and C
"TSA": (-0.822, 0.624, 0) # Saddle point A
}
coords = [np.array(coords_dict[k]) for k in keys]
atoms = ("H")
geoms = [Geometry(atoms, c) for c in coords]
return geoms
def run_cos_opt(cos, Opt, images, **kwargs):
cos.interpolate(images)
opt = Opt(cos, **kwargs)
for img in cos.images:
img.set_calculator(MullerBrownPot())
opt.run()
return opt
def animate(opt):
xlim = (-1.75, 1.25)
ylim = (-0.5, 2.25)
levels=(-150, -15, 40)
ap = AnimPlot(MullerBrownPot(), opt, xlim=xlim, ylim=ylim, levels=levels)
ap.animate()
@pytest.mark.sd
def test_steepest_descent_neb():
kwargs = copy.copy(KWARGS)
kwargs["images"] = 4
neb = NEB(get_geoms())
opt = run_cos_opt(neb, SteepestDescent, **kwargs)
assert(opt.is_converged)
assert(opt.cur_cycle == 56)
return opt
@pytest.mark.sd
def test_steepest_descent_straight_neb():
"""Something is really really wrong here."""
kwargs = copy.copy(KWARGS)
kwargs["images"] = 10
kwargs["max_cycles"] = 100
convergence = {
"max_force_thresh": 1.16,
"rms_force_thresh": 0.27,
"max_step_thresh": 0.021,
"rms_step_thresh": 0.005,
}
kwargs["convergence"] = convergence
neb = NEB(get_geoms(("A", "B")))
opt = run_cos_opt(neb, SteepestDescent, **kwargs)
assert(opt.is_converged)
assert(opt.cur_cycle == 62)
return opt
@pytest.mark.bfgs
def test_bfgs_straight_neb():
"""Something is really really wrong here."""
kwargs = copy.copy(KWARGS)
kwargs["images"] = 10
convergence = {
"max_force_thresh": 5.0,
"rms_force_thresh": 1,
"max_step_thresh": 0.002,
"rms_step_thresh": 0.0006,
}
kwargs["convergence"] = convergence
neb = NEB(get_geoms(("A", "B")))
opt = run_cos_opt(neb, BFGS, **kwargs)
assert(opt.is_converged)
assert(opt.cur_cycle == 45)
return opt
@pytest.mark.lbfgs
def test_lbfgs_neb():
kwargs = copy.copy(KWARGS)
kwargs["images"] = 3
kwargs["fix_ends"] = True
k_min = 1000
k_max = k_min+10
neb = NEB(get_geoms(("A", "B")), k_min=k_min, k_max=k_max, fix_ends=True)
from pysisyphus.optimizers.ConjugateGradient import ConjugateGradient
# from pysisyphus.optimizers.LBFGS_mod import LBFGS
opt = run_cos_opt(neb, LBFGS, **kwargs)
# assert(opt.is_converged)
# assert(opt.cur_cycle == 45)
return opt
@pytest.mark.sd
def test_steepest_descent_neb_more_images():
kwargs = copy.copy(KWARGS)
kwargs["images"] = 7
convergence = {
"max_force_thresh": 0.6,
"rms_force_thresh": 0.13,
"max_step_thresh": 0.015,
"rms_step_thresh": 0.0033,
}
kwargs["convergence"] = convergence
neb = NEB(get_geoms())
opt = run_cos_opt(neb, SteepestDescent, **kwargs)
assert(opt.is_converged)
assert(opt.cur_cycle == 41)
return opt
@pytest.mark.fire
def test_fire_neb():
kwargs = copy.copy(KWARGS)
kwargs["dt"] = 0.01
kwargs["dt_max"] = 0.1
neb = NEB(get_geoms())
opt = run_cos_opt(neb, FIRE, **kwargs)
assert(opt.is_converged)
assert(opt.cur_cycle == 76)
return opt
def test_equal_szts():
kwargs = copy.copy(KWARGS)
convergence = {
"rms_force_thresh": 2.4,
}
kwargs["convergence"] = convergence
szts_equal = SimpleZTS(get_geoms(), param="equal")
opt = run_cos_opt(szts_equal, SteepestDescent, **kwargs)
assert(opt.is_converged)
assert(opt.cur_cycle == 17)
return opt
def test_equal_szts_straight():
kwargs = copy.copy(KWARGS)
kwargs["images"] = 10
kwargs["max_step"] = 0.04<|fim▁hole|> }
kwargs["convergence"] = convergence
szts_equal = SimpleZTS(get_geoms(("A", "B")), param="equal")
opt = run_cos_opt(szts_equal, SteepestDescent, **kwargs)
return opt
def test_equal_szts_more_images():
kwargs = copy.copy(KWARGS)
kwargs["images"] = 7
convergence = {
"rms_force_thresh": 2.4,
}
kwargs["convergence"] = convergence
szts_equal = SimpleZTS(get_geoms(), param="equal")
opt = run_cos_opt(szts_equal, SteepestDescent, **kwargs)
assert(opt.is_converged)
assert(opt.cur_cycle == 21)
return opt
def test_energy_szts():
kwargs = copy.copy(KWARGS)
convergence = {
"rms_force_thresh": 2.8,
}
kwargs["convergence"] = convergence
szts_energy = SimpleZTS(get_geoms(), param="energy")
opt = run_cos_opt(szts_energy, SteepestDescent, **kwargs)
assert(opt.is_converged)
assert(opt.cur_cycle == 15)
return opt
def test_energy_szts_more_images():
kwargs = copy.copy(KWARGS)
kwargs["images"] = 10
convergence = {
"rms_force_thresh": 1.7,
}
kwargs["convergence"] = convergence
szts_energy = SimpleZTS(get_geoms(), param="energy")
opt = run_cos_opt(szts_energy, SteepestDescent, **kwargs)
assert(opt.is_converged)
assert(opt.cur_cycle == 22)
return opt
if __name__ == "__main__":
# Steepest Descent
opt = test_steepest_descent_neb()
#opt = test_steepest_descent_straight_neb()
#opt = test_steepest_descent_neb_more_images()
# opt = test_bfgs_straight_neb()
# opt = test_lbfgs_neb()
# FIRE
#opt = test_fire_neb()
# SimpleZTS
#opt = test_equal_szts()
#opt = test_equal_szts_straight()
#opt = test_equal_szts_more_images()
#opt = test_energy_szts()
#opt = test_energy_szts_more_images()
ap = animate(opt)
plt.show()<|fim▁end|>
|
convergence = {
"rms_force_thresh": 2.4,
|
<|file_name|>mod.rs<|end_file_name|><|fim▁begin|>use unix::bsd::O_SYNC;
pub type clock_t = i64;
pub type suseconds_t = ::c_long;
pub type dev_t = i32;
pub type sigset_t = ::c_uint;
pub type blksize_t = ::int32_t;
pub type fsblkcnt_t = ::uint64_t;
pub type fsfilcnt_t = ::uint64_t;
pub type pthread_attr_t = *mut ::c_void;
pub type pthread_mutex_t = *mut ::c_void;
pub type pthread_mutexattr_t = *mut ::c_void;
pub type pthread_cond_t = *mut ::c_void;
pub type pthread_condattr_t = *mut ::c_void;
pub type pthread_rwlock_t = *mut ::c_void;
pub type pthread_rwlockattr_t = *mut ::c_void;
s! {
pub struct dirent {
pub d_fileno: ::ino_t,
pub d_off: ::off_t,
pub d_reclen: u16,
pub d_type: u8,
pub d_namlen: u8,
__d_padding: [u8; 4],
pub d_name: [::c_char; 256],
}
pub struct glob_t {
pub gl_pathc: ::c_int,
pub gl_matchc: ::c_int,
pub gl_offs: ::c_int,
pub gl_flags: ::c_int,
pub gl_pathv: *mut *mut ::c_char,
__unused1: *mut ::c_void,
__unused2: *mut ::c_void,
__unused3: *mut ::c_void,
__unused4: *mut ::c_void,
__unused5: *mut ::c_void,
__unused6: *mut ::c_void,
__unused7: *mut ::c_void,
}
pub struct kevent {
pub ident: ::uintptr_t,
pub filter: ::c_short,
pub flags: ::c_ushort,
pub fflags: ::c_uint,
pub data: ::int64_t,
pub udata: *mut ::c_void,
}
pub struct stat {
pub st_mode: ::mode_t,
pub st_dev: ::dev_t,
pub st_ino: ::ino_t,
pub st_nlink: ::nlink_t,
pub st_uid: ::uid_t,
pub st_gid: ::gid_t,
pub st_rdev: ::dev_t,
pub st_atime: ::time_t,
pub st_atime_nsec: ::c_long,
pub st_mtime: ::time_t,
pub st_mtime_nsec: ::c_long,
pub st_ctime: ::time_t,
pub st_ctime_nsec: ::c_long,
pub st_size: ::off_t,
pub st_blocks: ::blkcnt_t,
pub st_blksize: ::blksize_t,
pub st_flags: ::uint32_t,
pub st_gen: ::uint32_t,
pub st_birthtime: ::time_t,
pub st_birthtime_nsec: ::c_long,
}
pub struct statvfs {
pub f_bsize: ::c_ulong,
pub f_frsize: ::c_ulong,
pub f_blocks: ::fsblkcnt_t,
pub f_bfree: ::fsblkcnt_t,
pub f_bavail: ::fsblkcnt_t,
pub f_files: ::fsfilcnt_t,
pub f_ffree: ::fsfilcnt_t,
pub f_favail: ::fsfilcnt_t,
pub f_fsid: ::c_ulong,
pub f_flag: ::c_ulong,
pub f_namemax: ::c_ulong,
}
pub struct addrinfo {
pub ai_flags: ::c_int,
pub ai_family: ::c_int,
pub ai_socktype: ::c_int,
pub ai_protocol: ::c_int,
pub ai_addrlen: ::socklen_t,
pub ai_addr: *mut ::sockaddr,
pub ai_canonname: *mut ::c_char,
pub ai_next: *mut ::addrinfo,
}
pub struct sockaddr_storage {
pub ss_len: u8,
pub ss_family: ::sa_family_t,
__ss_pad1: [u8; 6],
__ss_pad2: i64,
__ss_pad3: [u8; 240],
}
pub struct siginfo_t {
pub si_signo: ::c_int,
pub si_code: ::c_int,
pub si_errno: ::c_int,
pub si_addr: *mut ::c_char,
#[cfg(target_pointer_width = "32")]
__pad: [u8; 112],
#[cfg(target_pointer_width = "64")]
__pad: [u8; 108],
}
pub struct Dl_info {
pub dli_fname: *const ::c_char,
pub dli_fbase: *mut ::c_void,
pub dli_sname: *const ::c_char,
pub dli_saddr: *mut ::c_void,
}
pub struct lastlog {
ll_time: ::time_t,
ll_line: [::c_char; UT_LINESIZE],
ll_host: [::c_char; UT_HOSTSIZE],
}
pub struct utmp {
pub ut_line: [::c_char; UT_LINESIZE],
pub ut_name: [::c_char; UT_NAMESIZE],
pub ut_host: [::c_char; UT_HOSTSIZE],
pub ut_time: ::time_t,
}
pub struct if_data {
pub ifi_type: ::c_uchar,
pub ifi_addrlen: ::c_uchar,
pub ifi_hdrlen: ::c_uchar,
pub ifi_link_state: ::c_uchar,
pub ifi_mtu: u32,
pub ifi_metric: u32,
pub ifi_rdomain: u32,
pub ifi_baudrate: u64,
pub ifi_ipackets: u64,
pub ifi_ierrors: u64,
pub ifi_opackets: u64,
pub ifi_oerrors: u64,
pub ifi_collisions: u64,
pub ifi_ibytes: u64,
pub ifi_obytes: u64,
pub ifi_imcasts: u64,
pub ifi_omcasts: u64,
pub ifi_iqdrops: u64,
pub ifi_oqdrops: u64,
pub ifi_noproto: u64,
pub ifi_capabilities: u32,
pub ifi_lastchange: ::timeval,
}
pub struct if_msghdr {
pub ifm_msglen: ::c_ushort,
pub ifm_version: ::c_uchar,
pub ifm_type: ::c_uchar,
pub ifm_hdrlen: ::c_ushort,
pub ifm_index: ::c_ushort,
pub ifm_tableid: ::c_ushort,
pub ifm_pad1: ::c_uchar,
pub ifm_pad2: ::c_uchar,
pub ifm_addrs: ::c_int,
pub ifm_flags: ::c_int,
pub ifm_xflags: ::c_int,
pub ifm_data: if_data,
}
}
pub const UT_NAMESIZE: usize = 32;
pub const UT_LINESIZE: usize = 8;
pub const UT_HOSTSIZE: usize = 256;
pub const O_CLOEXEC: ::c_int = 0x10000;
pub const O_DIRECTORY: ::c_int = 0x20000;
pub const O_RSYNC: ::c_int = O_SYNC;
pub const MS_SYNC : ::c_int = 0x0002;
pub const MS_INVALIDATE : ::c_int = 0x0004;
pub const PTHREAD_STACK_MIN : ::size_t = 2048;
pub const POLLNORM: ::c_short = ::POLLRDNORM;
pub const ENOATTR : ::c_int = 83;
pub const EILSEQ : ::c_int = 84;
pub const EOVERFLOW : ::c_int = 87;
pub const ECANCELED : ::c_int = 88;
pub const EIDRM : ::c_int = 89;
pub const ENOMSG : ::c_int = 90;
pub const ENOTSUP : ::c_int = 91;
pub const ELAST : ::c_int = 91;
pub const F_DUPFD_CLOEXEC : ::c_int = 10;
pub const AT_FDCWD: ::c_int = -100;
pub const AT_EACCESS: ::c_int = 0x01;
pub const AT_SYMLINK_NOFOLLOW: ::c_int = 0x02;
pub const AT_SYMLINK_FOLLOW: ::c_int = 0x04;
pub const AT_REMOVEDIR: ::c_int = 0x08;
pub const RLIM_NLIMITS: ::c_int = 9;
pub const SO_TIMESTAMP: ::c_int = 0x0800;
pub const SO_SNDTIMEO: ::c_int = 0x1005;
pub const SO_RCVTIMEO: ::c_int = 0x1006;
pub const SO_BINDANY: ::c_int = 0x1000;
pub const SO_NETPROC: ::c_int = 0x1020;
pub const SO_RTABLE: ::c_int = 0x1021;
pub const SO_PEERCRED: ::c_int = 0x1022;
pub const SO_SPLICE: ::c_int = 0x1023;
// https://github.com/openbsd/src/blob/master/sys/net/if.h#L187
pub const IFF_UP: ::c_int = 0x1; // interface is up
pub const IFF_BROADCAST: ::c_int = 0x2; // broadcast address valid
pub const IFF_DEBUG: ::c_int = 0x4; // turn on debugging
pub const IFF_LOOPBACK: ::c_int = 0x8; // is a loopback net
pub const IFF_POINTOPOINT: ::c_int = 0x10; // interface is point-to-point link
pub const IFF_NOTRAILERS: ::c_int = 0x20; // avoid use of trailers
pub const IFF_RUNNING: ::c_int = 0x40; // resources allocated
pub const IFF_NOARP: ::c_int = 0x80; // no address resolution protocol
pub const IFF_PROMISC: ::c_int = 0x100; // receive all packets
pub const IFF_ALLMULTI: ::c_int = 0x200; // receive all multicast packets
pub const IFF_OACTIVE: ::c_int = 0x400; // transmission in progress
pub const IFF_SIMPLEX: ::c_int = 0x800; // can't hear own transmissions
pub const IFF_LINK0: ::c_int = 0x1000; // per link layer defined bit
pub const IFF_LINK1: ::c_int = 0x2000; // per link layer defined bit
pub const IFF_LINK2: ::c_int = 0x4000; // per link layer defined bit
pub const IFF_MULTICAST: ::c_int = 0x8000; // supports multicast
// sys/netinet/in.h
// Protocols (RFC 1700)
// NOTE: These are in addition to the constants defined in src/unix/mod.rs
// IPPROTO_IP defined in src/unix/mod.rs
/// Hop-by-hop option header
pub const IPPROTO_HOPOPTS: ::c_int = 0;
// IPPROTO_ICMP defined in src/unix/mod.rs
/// group mgmt protocol
pub const IPPROTO_IGMP: ::c_int = 2;
/// gateway^2 (deprecated)
pub const IPPROTO_GGP: ::c_int = 3;
/// for compatibility
pub const IPPROTO_IPIP: ::c_int = 4;
// IPPROTO_TCP defined in src/unix/mod.rs
/// exterior gateway protocol
pub const IPPROTO_EGP: ::c_int = 8;
/// pup
pub const IPPROTO_PUP: ::c_int = 12;
// IPPROTO_UDP defined in src/unix/mod.rs
/// xns idp
pub const IPPROTO_IDP: ::c_int = 22;
/// tp-4 w/ class negotiation
pub const IPPROTO_TP: ::c_int = 29;
// IPPROTO_IPV6 defined in src/unix/mod.rs
/// IP6 routing header
pub const IPPROTO_ROUTING: ::c_int = 43;
/// IP6 fragmentation header
pub const IPPROTO_FRAGMENT: ::c_int = 44;
/// resource reservation
pub const IPPROTO_RSVP: ::c_int = 46;
/// General Routing Encap.
pub const IPPROTO_GRE: ::c_int = 47;
/// IP6 Encap Sec. Payload
pub const IPPROTO_ESP: ::c_int = 50;
/// IP6 Auth Header
pub const IPPROTO_AH: ::c_int = 51;
/// IP Mobility RFC 2004
pub const IPPROTO_MOBILE: ::c_int = 55;
// IPPROTO_ICMPV6 defined in src/unix/mod.rs
/// IP6 no next header
pub const IPPROTO_NONE: ::c_int = 59;
/// IP6 destination option
pub const IPPROTO_DSTOPTS: ::c_int = 60;
/// ISO cnlp
pub const IPPROTO_EON: ::c_int = 80;
/// Ethernet-in-IP
pub const IPPROTO_ETHERIP: ::c_int = 97;
/// encapsulation header
pub const IPPROTO_ENCAP: ::c_int = 98;
/// Protocol indep. multicast
pub const IPPROTO_PIM: ::c_int = 103;
/// IP Payload Comp. Protocol
pub const IPPROTO_IPCOMP: ::c_int = 108;
/// CARP
pub const IPPROTO_CARP: ::c_int = 112;
/// unicast MPLS packet
pub const IPPROTO_MPLS: ::c_int = 137;
/// PFSYNC
pub const IPPROTO_PFSYNC: ::c_int = 240;
pub const IPPROTO_MAX: ::c_int = 256;
/* Only used internally, so it can be outside the range of valid IP protocols */
/// Divert sockets
pub const IPPROTO_DIVERT: ::c_int = 258;
pub const AF_ECMA: ::c_int = 8;
pub const AF_ROUTE: ::c_int = 17;
pub const AF_ENCAP: ::c_int = 28;
pub const AF_SIP: ::c_int = 29;
pub const AF_KEY: ::c_int = 30;
pub const pseudo_AF_HDRCMPLT: ::c_int = 31;
pub const AF_BLUETOOTH: ::c_int = 32;
pub const AF_MPLS: ::c_int = 33;
pub const pseudo_AF_PFLOW: ::c_int = 34;
pub const pseudo_AF_PIPEX: ::c_int = 35;
#[doc(hidden)]
pub const AF_MAX: ::c_int = 36;
#[doc(hidden)]
pub const NET_MAXID: ::c_int = AF_MAX;
pub const NET_RT_DUMP: ::c_int = 1;
pub const NET_RT_FLAGS: ::c_int = 2;
pub const NET_RT_IFLIST: ::c_int = 3;
pub const NET_RT_STATS: ::c_int = 4;
pub const NET_RT_TABLE: ::c_int = 5;
pub const NET_RT_IFNAMES: ::c_int = 6;
#[doc(hidden)]
pub const NET_RT_MAXID: ::c_int = 7;
pub const IPV6_JOIN_GROUP: ::c_int = 12;
pub const IPV6_LEAVE_GROUP: ::c_int = 13;
pub const PF_ROUTE: ::c_int = AF_ROUTE;
pub const PF_ECMA: ::c_int = AF_ECMA;
pub const PF_ENCAP: ::c_int = AF_ENCAP;
pub const PF_SIP: ::c_int = AF_SIP;
pub const PF_KEY: ::c_int = AF_KEY;
pub const PF_BPF: ::c_int = pseudo_AF_HDRCMPLT;
pub const PF_BLUETOOTH: ::c_int = AF_BLUETOOTH;
pub const PF_MPLS: ::c_int = AF_MPLS;
pub const PF_PFLOW: ::c_int = pseudo_AF_PFLOW;
pub const PF_PIPEX: ::c_int = pseudo_AF_PIPEX;
#[doc(hidden)]
pub const PF_MAX: ::c_int = AF_MAX;
pub const SCM_TIMESTAMP: ::c_int = 0x04;
pub const O_DSYNC : ::c_int = 128;
pub const MAP_RENAME : ::c_int = 0x0000;
pub const MAP_NORESERVE : ::c_int = 0x0000;
pub const MAP_HASSEMAPHORE : ::c_int = 0x0000;
pub const EIPSEC : ::c_int = 82;
pub const ENOMEDIUM : ::c_int = 85;
pub const EMEDIUMTYPE : ::c_int = 86;
pub const EAI_SYSTEM: ::c_int = -11;
pub const RUSAGE_THREAD: ::c_int = 1;
pub const MAP_COPY : ::c_int = 0x0002;
pub const MAP_NOEXTEND : ::c_int = 0x0000;
pub const _PC_LINK_MAX : ::c_int = 1;
pub const _PC_MAX_CANON : ::c_int = 2;
pub const _PC_MAX_INPUT : ::c_int = 3;
pub const _PC_NAME_MAX : ::c_int = 4;
pub const _PC_PATH_MAX : ::c_int = 5;
pub const _PC_PIPE_BUF : ::c_int = 6;
pub const _PC_CHOWN_RESTRICTED : ::c_int = 7;
pub const _PC_NO_TRUNC : ::c_int = 8;
pub const _PC_VDISABLE : ::c_int = 9;
pub const _PC_2_SYMLINKS : ::c_int = 10;
pub const _PC_ALLOC_SIZE_MIN : ::c_int = 11;
pub const _PC_ASYNC_IO : ::c_int = 12;
pub const _PC_FILESIZEBITS : ::c_int = 13;
pub const _PC_PRIO_IO : ::c_int = 14;
pub const _PC_REC_INCR_XFER_SIZE : ::c_int = 15;
pub const _PC_REC_MAX_XFER_SIZE : ::c_int = 16;
pub const _PC_REC_MIN_XFER_SIZE : ::c_int = 17;
pub const _PC_REC_XFER_ALIGN : ::c_int = 18;
pub const _PC_SYMLINK_MAX : ::c_int = 19;
pub const _PC_SYNC_IO : ::c_int = 20;
pub const _PC_TIMESTAMP_RESOLUTION : ::c_int = 21;
pub const _SC_CLK_TCK : ::c_int = 3;
pub const _SC_SEM_NSEMS_MAX : ::c_int = 31;
pub const _SC_SEM_VALUE_MAX : ::c_int = 32;
pub const _SC_HOST_NAME_MAX : ::c_int = 33;
pub const _SC_MONOTONIC_CLOCK : ::c_int = 34;
pub const _SC_2_PBS : ::c_int = 35;
pub const _SC_2_PBS_ACCOUNTING : ::c_int = 36;
pub const _SC_2_PBS_CHECKPOINT : ::c_int = 37;
pub const _SC_2_PBS_LOCATE : ::c_int = 38;
pub const _SC_2_PBS_MESSAGE : ::c_int = 39;
pub const _SC_2_PBS_TRACK : ::c_int = 40;
pub const _SC_ADVISORY_INFO : ::c_int = 41;
pub const _SC_AIO_LISTIO_MAX : ::c_int = 42;
pub const _SC_AIO_MAX : ::c_int = 43;
pub const _SC_AIO_PRIO_DELTA_MAX : ::c_int = 44;
pub const _SC_ASYNCHRONOUS_IO : ::c_int = 45;
pub const _SC_ATEXIT_MAX : ::c_int = 46;
pub const _SC_BARRIERS : ::c_int = 47;
pub const _SC_CLOCK_SELECTION : ::c_int = 48;
pub const _SC_CPUTIME : ::c_int = 49;
pub const _SC_DELAYTIMER_MAX : ::c_int = 50;
pub const _SC_IOV_MAX : ::c_int = 51;
pub const _SC_IPV6 : ::c_int = 52;
pub const _SC_MAPPED_FILES : ::c_int = 53;
pub const _SC_MEMLOCK : ::c_int = 54;
pub const _SC_MEMLOCK_RANGE : ::c_int = 55;
pub const _SC_MEMORY_PROTECTION : ::c_int = 56;
pub const _SC_MESSAGE_PASSING : ::c_int = 57;
pub const _SC_MQ_OPEN_MAX : ::c_int = 58;
pub const _SC_MQ_PRIO_MAX : ::c_int = 59;
pub const _SC_PRIORITIZED_IO : ::c_int = 60;
pub const _SC_PRIORITY_SCHEDULING : ::c_int = 61;
pub const _SC_RAW_SOCKETS : ::c_int = 62;
pub const _SC_READER_WRITER_LOCKS : ::c_int = 63;
pub const _SC_REALTIME_SIGNALS : ::c_int = 64;
pub const _SC_REGEXP : ::c_int = 65;
pub const _SC_RTSIG_MAX : ::c_int = 66;
pub const _SC_SEMAPHORES : ::c_int = 67;
pub const _SC_SHARED_MEMORY_OBJECTS : ::c_int = 68;
pub const _SC_SHELL : ::c_int = 69;
pub const _SC_SIGQUEUE_MAX : ::c_int = 70;
pub const _SC_SPAWN : ::c_int = 71;
pub const _SC_SPIN_LOCKS : ::c_int = 72;
pub const _SC_SPORADIC_SERVER : ::c_int = 73;
pub const _SC_SS_REPL_MAX : ::c_int = 74;
pub const _SC_SYNCHRONIZED_IO : ::c_int = 75;
pub const _SC_SYMLOOP_MAX : ::c_int = 76;
pub const _SC_THREAD_ATTR_STACKADDR : ::c_int = 77;
pub const _SC_THREAD_ATTR_STACKSIZE : ::c_int = 78;
pub const _SC_THREAD_CPUTIME : ::c_int = 79;
pub const _SC_THREAD_DESTRUCTOR_ITERATIONS : ::c_int = 80;
pub const _SC_THREAD_KEYS_MAX : ::c_int = 81;
pub const _SC_THREAD_PRIO_INHERIT : ::c_int = 82;
pub const _SC_THREAD_PRIO_PROTECT : ::c_int = 83;
pub const _SC_THREAD_PRIORITY_SCHEDULING : ::c_int = 84;
pub const _SC_THREAD_PROCESS_SHARED : ::c_int = 85;
pub const _SC_THREAD_ROBUST_PRIO_INHERIT : ::c_int = 86;
pub const _SC_THREAD_ROBUST_PRIO_PROTECT : ::c_int = 87;
pub const _SC_THREAD_SPORADIC_SERVER : ::c_int = 88;
pub const _SC_THREAD_STACK_MIN : ::c_int = 89;
pub const _SC_THREAD_THREADS_MAX : ::c_int = 90;
pub const _SC_THREADS : ::c_int = 91;
pub const _SC_TIMEOUTS : ::c_int = 92;
pub const _SC_TIMER_MAX : ::c_int = 93;
pub const _SC_TIMERS : ::c_int = 94;
pub const _SC_TRACE : ::c_int = 95;
pub const _SC_TRACE_EVENT_FILTER : ::c_int = 96;
pub const _SC_TRACE_EVENT_NAME_MAX : ::c_int = 97;
pub const _SC_TRACE_INHERIT : ::c_int = 98;
pub const _SC_TRACE_LOG : ::c_int = 99;
pub const _SC_GETGR_R_SIZE_MAX : ::c_int = 100;
pub const _SC_GETPW_R_SIZE_MAX : ::c_int = 101;
pub const _SC_LOGIN_NAME_MAX : ::c_int = 102;
pub const _SC_THREAD_SAFE_FUNCTIONS : ::c_int = 103;
pub const _SC_TRACE_NAME_MAX : ::c_int = 104;
pub const _SC_TRACE_SYS_MAX : ::c_int = 105;
pub const _SC_TRACE_USER_EVENT_MAX : ::c_int = 106;
pub const _SC_TTY_NAME_MAX : ::c_int = 107;
pub const _SC_TYPED_MEMORY_OBJECTS : ::c_int = 108;
pub const _SC_V6_ILP32_OFF32 : ::c_int = 109;
pub const _SC_V6_ILP32_OFFBIG : ::c_int = 110;
pub const _SC_V6_LP64_OFF64 : ::c_int = 111;
pub const _SC_V6_LPBIG_OFFBIG : ::c_int = 112;
pub const _SC_V7_ILP32_OFF32 : ::c_int = 113;
pub const _SC_V7_ILP32_OFFBIG : ::c_int = 114;
pub const _SC_V7_LP64_OFF64 : ::c_int = 115;
pub const _SC_V7_LPBIG_OFFBIG : ::c_int = 116;
pub const _SC_XOPEN_CRYPT : ::c_int = 117;
pub const _SC_XOPEN_ENH_I18N : ::c_int = 118;
pub const _SC_XOPEN_LEGACY : ::c_int = 119;
pub const _SC_XOPEN_REALTIME : ::c_int = 120;
pub const _SC_XOPEN_REALTIME_THREADS : ::c_int = 121;
pub const _SC_XOPEN_STREAMS : ::c_int = 122;
pub const _SC_XOPEN_UNIX : ::c_int = 123;
pub const _SC_XOPEN_UUCP : ::c_int = 124;
pub const _SC_XOPEN_VERSION : ::c_int = 125;
pub const _SC_PHYS_PAGES : ::c_int = 500;
pub const _SC_AVPHYS_PAGES : ::c_int = 501;
pub const _SC_NPROCESSORS_CONF : ::c_int = 502;
pub const _SC_NPROCESSORS_ONLN : ::c_int = 503;
pub const FD_SETSIZE: usize = 1024;
pub const ST_NOSUID: ::c_ulong = 2;
pub const PTHREAD_MUTEX_INITIALIZER: pthread_mutex_t = 0 as *mut _;
pub const PTHREAD_COND_INITIALIZER: pthread_cond_t = 0 as *mut _;
pub const PTHREAD_RWLOCK_INITIALIZER: pthread_rwlock_t = 0 as *mut _;
pub const PTHREAD_MUTEX_ERRORCHECK: ::c_int = 1;
pub const PTHREAD_MUTEX_RECURSIVE: ::c_int = 2;
pub const PTHREAD_MUTEX_NORMAL: ::c_int = 3;
pub const PTHREAD_MUTEX_STRICT_NP: ::c_int = 4;
pub const PTHREAD_MUTEX_DEFAULT: ::c_int = PTHREAD_MUTEX_STRICT_NP;
pub const EVFILT_AIO: ::int16_t = -3;
pub const EVFILT_PROC: ::int16_t = -5;
pub const EVFILT_READ: ::int16_t = -1;
pub const EVFILT_SIGNAL: ::int16_t = -6;
pub const EVFILT_TIMER: ::int16_t = -7;
pub const EVFILT_VNODE: ::int16_t = -4;
pub const EVFILT_WRITE: ::int16_t = -2;
pub const EV_ADD: ::uint16_t = 0x1;
pub const EV_DELETE: ::uint16_t = 0x2;
pub const EV_ENABLE: ::uint16_t = 0x4;
pub const EV_DISABLE: ::uint16_t = 0x8;
pub const EV_ONESHOT: ::uint16_t = 0x10;
pub const EV_CLEAR: ::uint16_t = 0x20;
pub const EV_RECEIPT: ::uint16_t = 0x40;
pub const EV_DISPATCH: ::uint16_t = 0x80;
pub const EV_FLAG1: ::uint16_t = 0x2000;
pub const EV_ERROR: ::uint16_t = 0x4000;
pub const EV_EOF: ::uint16_t = 0x8000;
pub const EV_SYSFLAGS: ::uint16_t = 0xf000;
pub const NOTE_LOWAT: ::uint32_t = 0x00000001;
pub const NOTE_EOF: ::uint32_t = 0x00000002;
pub const NOTE_DELETE: ::uint32_t = 0x00000001;
pub const NOTE_WRITE: ::uint32_t = 0x00000002;
pub const NOTE_EXTEND: ::uint32_t = 0x00000004;
pub const NOTE_ATTRIB: ::uint32_t = 0x00000008;
pub const NOTE_LINK: ::uint32_t = 0x00000010;
pub const NOTE_RENAME: ::uint32_t = 0x00000020;
pub const NOTE_REVOKE: ::uint32_t = 0x00000040;
pub const NOTE_TRUNCATE: ::uint32_t = 0x00000080;
pub const NOTE_EXIT: ::uint32_t = 0x80000000;
pub const NOTE_FORK: ::uint32_t = 0x40000000;
pub const NOTE_EXEC: ::uint32_t = 0x20000000;
pub const NOTE_PDATAMASK: ::uint32_t = 0x000fffff;
pub const NOTE_PCTRLMASK: ::uint32_t = 0xf0000000;
pub const NOTE_TRACK: ::uint32_t = 0x00000001;
pub const NOTE_TRACKERR: ::uint32_t = 0x00000002;
pub const NOTE_CHILD: ::uint32_t = 0x00000004;
pub const TMP_MAX : ::c_uint = 0x7fffffff;
pub const NI_MAXHOST: ::size_t = 256;
pub const RTLD_LOCAL: ::c_int = 0;
pub const CTL_MAXNAME: ::c_int = 12;
pub const CTLTYPE_NODE: ::c_int = 1;
pub const CTLTYPE_INT: ::c_int = 2;
pub const CTLTYPE_STRING: ::c_int = 3;
pub const CTLTYPE_QUAD: ::c_int = 4;
pub const CTLTYPE_STRUCT: ::c_int = 5;
pub const CTL_UNSPEC: ::c_int = 0;
pub const CTL_KERN: ::c_int = 1;
pub const CTL_VM: ::c_int = 2;
pub const CTL_FS: ::c_int = 3;
pub const CTL_NET: ::c_int = 4;
pub const CTL_DEBUG: ::c_int = 5;
pub const CTL_HW: ::c_int = 6;
pub const CTL_MACHDEP: ::c_int = 7;
pub const CTL_DDB: ::c_int = 9;
pub const CTL_VFS: ::c_int = 10;
pub const CTL_MAXID: ::c_int = 11;
pub const KERN_OSTYPE: ::c_int = 1;
pub const KERN_OSRELEASE: ::c_int = 2;
pub const KERN_OSREV: ::c_int = 3;
pub const KERN_VERSION: ::c_int = 4;
pub const KERN_MAXVNODES: ::c_int = 5;
pub const KERN_MAXPROC: ::c_int = 6;
pub const KERN_MAXFILES: ::c_int = 7;
pub const KERN_ARGMAX: ::c_int = 8;
pub const KERN_SECURELVL: ::c_int = 9;
pub const KERN_HOSTNAME: ::c_int = 10;
pub const KERN_HOSTID: ::c_int = 11;
pub const KERN_CLOCKRATE: ::c_int = 12;
pub const KERN_PROF: ::c_int = 16;
pub const KERN_POSIX1: ::c_int = 17;
pub const KERN_NGROUPS: ::c_int = 18;
pub const KERN_JOB_CONTROL: ::c_int = 19;
pub const KERN_SAVED_IDS: ::c_int = 20;
pub const KERN_BOOTTIME: ::c_int = 21;
pub const KERN_DOMAINNAME: ::c_int = 22;
pub const KERN_MAXPARTITIONS: ::c_int = 23;
pub const KERN_RAWPARTITION: ::c_int = 24;
pub const KERN_MAXTHREAD: ::c_int = 25;
pub const KERN_NTHREADS: ::c_int = 26;
pub const KERN_OSVERSION: ::c_int = 27;
pub const KERN_SOMAXCONN: ::c_int = 28;
pub const KERN_SOMINCONN: ::c_int = 29;
pub const KERN_USERMOUNT: ::c_int = 30;
pub const KERN_NOSUIDCOREDUMP: ::c_int = 32;
pub const KERN_FSYNC: ::c_int = 33;
pub const KERN_SYSVMSG: ::c_int = 34;
pub const KERN_SYSVSEM: ::c_int = 35;
pub const KERN_SYSVSHM: ::c_int = 36;
pub const KERN_ARND: ::c_int = 37;
pub const KERN_MSGBUFSIZE: ::c_int = 38;
pub const KERN_MALLOCSTATS: ::c_int = 39;
pub const KERN_CPTIME: ::c_int = 40;
pub const KERN_NCHSTATS: ::c_int = 41;
pub const KERN_FORKSTAT: ::c_int = 42;
pub const KERN_NSELCOLL: ::c_int = 43;
pub const KERN_TTY: ::c_int = 44;
pub const KERN_CCPU: ::c_int = 45;
pub const KERN_FSCALE: ::c_int = 46;
pub const KERN_NPROCS: ::c_int = 47;
pub const KERN_MSGBUF: ::c_int = 48;
pub const KERN_POOL: ::c_int = 49;
pub const KERN_STACKGAPRANDOM: ::c_int = 50;
pub const KERN_SYSVIPC_INFO: ::c_int = 51;
pub const KERN_SPLASSERT: ::c_int = 54;
pub const KERN_PROC_ARGS: ::c_int = 55;
pub const KERN_NFILES: ::c_int = 56;
pub const KERN_TTYCOUNT: ::c_int = 57;
pub const KERN_NUMVNODES: ::c_int = 58;
pub const KERN_MBSTAT: ::c_int = 59;
pub const KERN_SEMINFO: ::c_int = 61;
pub const KERN_SHMINFO: ::c_int = 62;
pub const KERN_INTRCNT: ::c_int = 63;
pub const KERN_WATCHDOG: ::c_int = 64;
pub const KERN_PROC: ::c_int = 66;
pub const KERN_MAXCLUSTERS: ::c_int = 67;
pub const KERN_EVCOUNT: ::c_int = 68;
pub const KERN_TIMECOUNTER: ::c_int = 69;
pub const KERN_MAXLOCKSPERUID: ::c_int = 70;
pub const KERN_CPTIME2: ::c_int = 71;
pub const KERN_CACHEPCT: ::c_int = 72;
pub const KERN_FILE: ::c_int = 73;
pub const KERN_CONSDEV: ::c_int = 75;
pub const KERN_NETLIVELOCKS: ::c_int = 76;
pub const KERN_POOL_DEBUG: ::c_int = 77;
pub const KERN_PROC_CWD: ::c_int = 78;
pub const KERN_PROC_NOBROADCASTKILL: ::c_int = 79;
pub const KERN_PROC_VMMAP: ::c_int = 80;
pub const KERN_GLOBAL_PTRACE: ::c_int = 81;
pub const KERN_CONSBUFSIZE: ::c_int = 82;
pub const KERN_CONSBUF: ::c_int = 83;
pub const KERN_MAXID: ::c_int = 84;
pub const KERN_PROC_ALL: ::c_int = 0;
pub const KERN_PROC_PID: ::c_int = 1;
pub const KERN_PROC_PGRP: ::c_int = 2;
pub const KERN_PROC_SESSION: ::c_int = 3;
pub const KERN_PROC_TTY: ::c_int = 4;
pub const KERN_PROC_UID: ::c_int = 5;
pub const KERN_PROC_RUID: ::c_int = 6;
pub const KERN_PROC_KTHREAD: ::c_int = 7;
pub const KERN_PROC_SHOW_THREADS: ::c_int = 0x40000000;
pub const KERN_SYSVIPC_MSG_INFO: ::c_int = 1;
pub const KERN_SYSVIPC_SEM_INFO: ::c_int = 2;
pub const KERN_SYSVIPC_SHM_INFO: ::c_int = 3;
pub const KERN_PROC_ARGV: ::c_int = 1;
pub const KERN_PROC_NARGV: ::c_int = 2;
pub const KERN_PROC_ENV: ::c_int = 3;
pub const KERN_PROC_NENV: ::c_int = 4;
pub const KI_NGROUPS: ::c_int = 16;
pub const KI_MAXCOMLEN: ::c_int = 24;
pub const KI_WMESGLEN: ::c_int = 8;
pub const KI_MAXLOGNAME: ::c_int = 32;
pub const KI_EMULNAMELEN: ::c_int = 8;
pub const CHWFLOW: ::tcflag_t = ::MDMBUF | ::CRTSCTS;
pub const OLCUC: ::tcflag_t = 0x20;
pub const ONOCR: ::tcflag_t = 0x40;
pub const ONLRET: ::tcflag_t = 0x80;
pub const SOCK_CLOEXEC: ::c_int = 0x8000;
pub const SOCK_NONBLOCK: ::c_int = 0x4000;
pub const SOCK_DNS: ::c_int = 0x1000;
pub const WCONTINUED: ::c_int = 8;
f! {
pub fn WIFCONTINUED(status: ::c_int) -> bool {
status & 0o177777 == 0o177777
}
}
extern {
pub fn dirfd(dirp: *mut ::DIR) -> ::c_int;
pub fn getnameinfo(sa: *const ::sockaddr,
salen: ::socklen_t,
host: *mut ::c_char,
hostlen: ::size_t,
serv: *mut ::c_char,
servlen: ::size_t,
flags: ::c_int) -> ::c_int;
pub fn kevent(kq: ::c_int,
changelist: *const ::kevent,
nchanges: ::c_int,
eventlist: *mut ::kevent,
nevents: ::c_int,
timeout: *const ::timespec) -> ::c_int;
pub fn mprotect(addr: *mut ::c_void, len: ::size_t, prot: ::c_int)
-> ::c_int;<|fim▁hole|> pub fn pthread_set_name_np(tid: ::pthread_t, name: *const ::c_char);
pub fn pthread_stackseg_np(thread: ::pthread_t,
sinfo: *mut ::stack_t) -> ::c_int;
pub fn sysctl(name: *const ::c_int,
namelen: ::c_uint,
oldp: *mut ::c_void,
oldlenp: *mut ::size_t,
newp: *mut ::c_void,
newlen: ::size_t)
-> ::c_int;
pub fn getentropy(buf: *mut ::c_void, buflen: ::size_t) -> ::c_int;
pub fn pledge(promises: *const ::c_char,
paths: *mut *const ::c_char) -> ::c_int;
pub fn setresgid(rgid: ::gid_t, egid: ::gid_t, sgid: ::gid_t) -> ::c_int;
pub fn setresuid(ruid: ::uid_t, euid: ::uid_t, suid: ::uid_t) -> ::c_int;
}
cfg_if! {
if #[cfg(target_os = "openbsd")] {
mod openbsd;
pub use self::openbsd::*;
} else if #[cfg(target_os = "bitrig")] {
mod bitrig;
pub use self::bitrig::*;
} else {
// Unknown target_os
}
}
mod other;
pub use self::other::*;<|fim▁end|>
|
pub fn pthread_main_np() -> ::c_int;
|
<|file_name|>settings.py<|end_file_name|><|fim▁begin|>"""
Django settings for CatyHIS project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'jh40i$ueqp$s7+@e71)s-&c*ek8vgt9atzdz7un6=r9(9^*5+-'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'FormGen',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',<|fim▁hole|> 'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'CatyHIS.urls'
WSGI_APPLICATION = 'CatyHIS.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
TEMPLATE_DIRS = [os.path.join(BASE_DIR, 'templates')]
RES_DIR = os.path.join(BASE_DIR, 'res')<|fim▁end|>
| |
<|file_name|>application_ref_spec.ts<|end_file_name|><|fim▁begin|>/**
* @license
* Copyright Google Inc. All Rights Reserved.
*
* Use of this source code is governed by an MIT-style license that can be
* found in the LICENSE file at https://angular.io/license
*/
import {APP_BOOTSTRAP_LISTENER, APP_INITIALIZER, CompilerFactory, Component, NgModule, NgZone, PlatformRef, TemplateRef, Type, ViewChild, ViewContainerRef} from '@angular/core';
import {ApplicationRef, ApplicationRef_} from '@angular/core/src/application_ref';
import {ErrorHandler} from '@angular/core/src/error_handler';
import {ComponentRef} from '@angular/core/src/linker/component_factory';
import {TestComponentRenderer} from '@angular/core/testing';
import {BrowserModule} from '@angular/platform-browser';
import {getDOM} from '@angular/platform-browser/src/dom/dom_adapter';
import {DOCUMENT} from '@angular/platform-browser/src/dom/dom_tokens';
import {dispatchEvent} from '@angular/platform-browser/testing/browser_util';
import {expect} from '@angular/platform-browser/testing/matchers';
import {ServerModule} from '@angular/platform-server';
import {ComponentFixture, ComponentFixtureNoNgZone, TestBed, async, inject, withModule} from '../testing';
@Component({selector: 'bootstrap-app', template: 'hello'})
class SomeComponent {
}
export function main() {
describe('bootstrap', () => {
let mockConsole: MockConsole;
beforeEach(() => { mockConsole = new MockConsole(); });
function createRootEl() {
const doc = TestBed.get(DOCUMENT);
const rootEl = <HTMLElement>getDOM().firstChild(
getDOM().content(getDOM().createTemplate(`<bootstrap-app></bootstrap-app>`)));
const oldRoots = getDOM().querySelectorAll(doc, 'bootstrap-app');
for (let i = 0; i < oldRoots.length; i++) {
getDOM().remove(oldRoots[i]);
}
getDOM().appendChild(doc.body, rootEl);
}
type CreateModuleOptions = {providers?: any[], ngDoBootstrap?: any, bootstrap?: any[]};
function createModule(providers?: any[]): Type<any>;
function createModule(options: CreateModuleOptions): Type<any>;
function createModule(providersOrOptions: any[] | CreateModuleOptions): Type<any> {
let options: CreateModuleOptions = {};
if (providersOrOptions instanceof Array) {
options = {providers: providersOrOptions};
} else {
options = providersOrOptions || {};
}
const errorHandler = new ErrorHandler(false);
errorHandler._console = mockConsole as any;
const platformModule = getDOM().supportsDOMEvents() ? BrowserModule : ServerModule;
@NgModule({
providers: [{provide: ErrorHandler, useValue: errorHandler}, options.providers || []],
imports: [platformModule],
declarations: [SomeComponent],
entryComponents: [SomeComponent],
bootstrap: options.bootstrap || []
})
class MyModule {
}
if (options.ngDoBootstrap !== false) {
(<any>MyModule.prototype).ngDoBootstrap = options.ngDoBootstrap || (() => {});
}
return MyModule;
}
describe('ApplicationRef', () => {
beforeEach(() => { TestBed.configureTestingModule({imports: [createModule()]}); });
it('should throw when reentering tick', inject([ApplicationRef], (ref: ApplicationRef_) => {
const view = jasmine.createSpyObj('view', ['detach', 'attachToAppRef']);
const viewRef = jasmine.createSpyObj(
'viewRef', ['detectChanges', 'detachFromAppRef', 'attachToAppRef']);
viewRef.internalView = view;
view.ref = viewRef;
try {
ref.attachView(viewRef);
viewRef.detectChanges.and.callFake(() => ref.tick());
expect(() => ref.tick()).toThrowError('ApplicationRef.tick is called recursively');
} finally {
ref.detachView(viewRef);
}
}));
describe('APP_BOOTSTRAP_LISTENER', () => {
let capturedCompRefs: ComponentRef<any>[];
beforeEach(() => {
capturedCompRefs = [];
TestBed.configureTestingModule({
providers: [{
provide: APP_BOOTSTRAP_LISTENER,
multi: true,
useValue: (compRef: any) => { capturedCompRefs.push(compRef); }
}]
});
});
it('should be called when a component is bootstrapped',
inject([ApplicationRef], (ref: ApplicationRef_) => {
createRootEl();
const compRef = ref.bootstrap(SomeComponent);
expect(capturedCompRefs).toEqual([compRef]);
}));
});
describe('bootstrap', () => {
it('should throw if an APP_INITIIALIZER is not yet resolved',
withModule(
{
providers: [
{provide: APP_INITIALIZER, useValue: () => new Promise(() => {}), multi: true}
]
},
inject([ApplicationRef], (ref: ApplicationRef_) => {
createRootEl();
expect(() => ref.bootstrap(SomeComponent))
.toThrowError(
'Cannot bootstrap as there are still asynchronous initializers running. Bootstrap components in the `ngDoBootstrap` method of the root module.');
})));
});
});
describe('bootstrapModule', () => {
let defaultPlatform: PlatformRef;
beforeEach(inject([PlatformRef], (_platform: PlatformRef) => {
createRootEl();
defaultPlatform = _platform;
}));
it('should wait for asynchronous app initializers', async(() => {
let resolve: (result: any) => void;
const promise: Promise<any> = new Promise((res) => { resolve = res; });
let initializerDone = false;
setTimeout(() => {
resolve(true);
initializerDone = true;
}, 1);
defaultPlatform
.bootstrapModule(
createModule([{provide: APP_INITIALIZER, useValue: () => promise, multi: true}]))
.then(_ => { expect(initializerDone).toBe(true); });
}));
it('should rethrow sync errors even if the exceptionHandler is not rethrowing', async(() => {
defaultPlatform
.bootstrapModule(createModule(
[{provide: APP_INITIALIZER, useValue: () => { throw 'Test'; }, multi: true}]))
.then(() => expect(false).toBe(true), (e) => {
expect(e).toBe('Test');
// Note: if the modules throws an error during construction,
// we don't have an injector and therefore no way of
// getting the exception handler. So
// the error is only rethrown but not logged via the exception handler.
expect(mockConsole.res).toEqual([]);
});
}));
it('should rethrow promise errors even if the exceptionHandler is not rethrowing',
async(() => {
defaultPlatform
.bootstrapModule(createModule([
{provide: APP_INITIALIZER, useValue: () => Promise.reject('Test'), multi: true}
]))
.then(() => expect(false).toBe(true), (e) => {
expect(e).toBe('Test');
expect(mockConsole.res).toEqual(['EXCEPTION: Test']);
});
}));
it('should throw useful error when ApplicationRef is not configured', async(() => {
@NgModule()
class EmptyModule {
}
return defaultPlatform.bootstrapModule(EmptyModule)
.then(() => fail('expecting error'), (error) => {
expect(error.message)
.toEqual('No ErrorHandler. Is platform module (BrowserModule) included?');
});
}));
it('should call the `ngDoBootstrap` method with `ApplicationRef` on the main module',
async(() => {
const ngDoBootstrap = jasmine.createSpy('ngDoBootstrap');
defaultPlatform.bootstrapModule(createModule({ngDoBootstrap: ngDoBootstrap}))
.then((moduleRef) => {
const appRef = moduleRef.injector.get(ApplicationRef);
expect(ngDoBootstrap).toHaveBeenCalledWith(appRef);
});
}));
it('should auto bootstrap components listed in @NgModule.bootstrap', async(() => {
defaultPlatform.bootstrapModule(createModule({bootstrap: [SomeComponent]}))
.then((moduleRef) => {
const appRef: ApplicationRef = moduleRef.injector.get(ApplicationRef);
expect(appRef.componentTypes).toEqual([SomeComponent]);
});
}));
it('should error if neither `ngDoBootstrap` nor @NgModule.bootstrap was specified',
async(() => {
defaultPlatform.bootstrapModule(createModule({ngDoBootstrap: false}))
.then(() => expect(false).toBe(true), (e) => {
const expectedErrMsg =
`The module MyModule was bootstrapped, but it does not declare "@NgModule.bootstrap" components nor a "ngDoBootstrap" method. Please define one of these.`;
expect(e.message).toEqual(expectedErrMsg);
expect(mockConsole.res[0]).toEqual('EXCEPTION: ' + expectedErrMsg);
});
}));
it('should add bootstrapped module into platform modules list', async(() => {
defaultPlatform.bootstrapModule(createModule({bootstrap: [SomeComponent]}))
.then(module => expect((<any>defaultPlatform)._modules).toContain(module));
}));
});
describe('bootstrapModuleFactory', () => {
let defaultPlatform: PlatformRef;
beforeEach(inject([PlatformRef], (_platform: PlatformRef) => {
createRootEl();
defaultPlatform = _platform;
}));
it('should wait for asynchronous app initializers', async(() => {
let resolve: (result: any) => void;
const promise: Promise<any> = new Promise((res) => { resolve = res; });
let initializerDone = false;
setTimeout(() => {
resolve(true);
initializerDone = true;
}, 1);
const compilerFactory: CompilerFactory =
defaultPlatform.injector.get(CompilerFactory, null);
const moduleFactory = compilerFactory.createCompiler().compileModuleSync(
createModule([{provide: APP_INITIALIZER, useValue: () => promise, multi: true}]));
defaultPlatform.bootstrapModuleFactory(moduleFactory).then(_ => {
expect(initializerDone).toBe(true);
});
}));
it('should rethrow sync errors even if the exceptionHandler is not rethrowing', async(() => {
const compilerFactory: CompilerFactory =
defaultPlatform.injector.get(CompilerFactory, null);
const moduleFactory = compilerFactory.createCompiler().compileModuleSync(createModule(
[{provide: APP_INITIALIZER, useValue: () => { throw 'Test'; }, multi: true}]));
expect(() => defaultPlatform.bootstrapModuleFactory(moduleFactory)).toThrow('Test');
// Note: if the modules throws an error during construction,
// we don't have an injector and therefore no way of
// getting the exception handler. So
// the error is only rethrown but not logged via the exception handler.
expect(mockConsole.res).toEqual([]);
}));
it('should rethrow promise errors even if the exceptionHandler is not rethrowing',
async(() => {
const compilerFactory: CompilerFactory =
defaultPlatform.injector.get(CompilerFactory, null);
const moduleFactory = compilerFactory.createCompiler().compileModuleSync(createModule(
[{provide: APP_INITIALIZER, useValue: () => Promise.reject('Test'), multi: true}]));
defaultPlatform.bootstrapModuleFactory(moduleFactory)
.then(() => expect(false).toBe(true), (e) => {
expect(e).toBe('Test');
expect(mockConsole.res).toEqual(['EXCEPTION: Test']);
});
}));
});
describe('attachView / detachView', () => {
@Component({template: '{{name}}'})
class MyComp {
name = 'Initial';
}
@Component({template: '<ng-container #vc></ng-container>'})
class ContainerComp {
@ViewChild('vc', {read: ViewContainerRef})
vc: ViewContainerRef;
}
@Component({template: '<ng-template #t>Dynamic content</ng-template>'})
class EmbeddedViewComp {
@ViewChild(TemplateRef)
tplRef: TemplateRef<Object>;
}
beforeEach(() => {
TestBed.configureTestingModule({
declarations: [MyComp, ContainerComp, EmbeddedViewComp],
providers: [{provide: ComponentFixtureNoNgZone, useValue: true}]
});
});
it('should dirty check attached views', () => {
const comp = TestBed.createComponent(MyComp);
const appRef: ApplicationRef = TestBed.get(ApplicationRef);
expect(appRef.viewCount).toBe(0);
appRef.tick();
expect(comp.nativeElement).toHaveText('');
appRef.attachView(comp.componentRef.hostView);
appRef.tick();
expect(appRef.viewCount).toBe(1);
expect(comp.nativeElement).toHaveText('Initial');
});
it('should not dirty check detached views', () => {
const comp = TestBed.createComponent(MyComp);
const appRef: ApplicationRef = TestBed.get(ApplicationRef);
appRef.attachView(comp.componentRef.hostView);
appRef.tick();
expect(comp.nativeElement).toHaveText('Initial');
appRef.detachView(comp.componentRef.hostView);
comp.componentInstance.name = 'New';
appRef.tick();
expect(appRef.viewCount).toBe(0);
expect(comp.nativeElement).toHaveText('Initial');
});
it('should detach attached views if they are destroyed', () => {
const comp = TestBed.createComponent(MyComp);
const appRef: ApplicationRef = TestBed.get(ApplicationRef);
appRef.attachView(comp.componentRef.hostView);
comp.destroy();
expect(appRef.viewCount).toBe(0);
});
it('should detach attached embedded views if they are destroyed', () => {
const comp = TestBed.createComponent(EmbeddedViewComp);
const appRef: ApplicationRef = TestBed.get(ApplicationRef);
const embeddedViewRef = comp.componentInstance.tplRef.createEmbeddedView({});
appRef.attachView(embeddedViewRef);
embeddedViewRef.destroy();
expect(appRef.viewCount).toBe(0);
});
it('should not allow to attach a view to both, a view container and the ApplicationRef',
() => {
const comp = TestBed.createComponent(MyComp);
let hostView = comp.componentRef.hostView;
const containerComp = TestBed.createComponent(ContainerComp);
containerComp.detectChanges();
const vc = containerComp.componentInstance.vc;
const appRef: ApplicationRef = TestBed.get(ApplicationRef);
vc.insert(hostView);
expect(() => appRef.attachView(hostView))
.toThrowError('This view is already attached to a ViewContainer!');
hostView = vc.detach(0);
appRef.attachView(hostView);
expect(() => vc.insert(hostView))
.toThrowError('This view is already attached directly to the ApplicationRef!');
});
});
});
describe('AppRef', () => {
@Component({selector: 'sync-comp', template: `<span>{{text}}</span>`})
class SyncComp {
text: string = '1';
}
@Component({selector: 'click-comp', template: `<span (click)="onClick()">{{text}}</span>`})
class ClickComp {<|fim▁hole|> text: string = '1';
onClick() { this.text += '1'; }
}
@Component({selector: 'micro-task-comp', template: `<span>{{text}}</span>`})
class MicroTaskComp {
text: string = '1';
ngOnInit() {
Promise.resolve(null).then((_) => { this.text += '1'; });
}
}
@Component({selector: 'macro-task-comp', template: `<span>{{text}}</span>`})
class MacroTaskComp {
text: string = '1';
ngOnInit() {
setTimeout(() => { this.text += '1'; }, 10);
}
}
@Component({selector: 'micro-macro-task-comp', template: `<span>{{text}}</span>`})
class MicroMacroTaskComp {
text: string = '1';
ngOnInit() {
Promise.resolve(null).then((_) => {
this.text += '1';
setTimeout(() => { this.text += '1'; }, 10);
});
}
}
@Component({selector: 'macro-micro-task-comp', template: `<span>{{text}}</span>`})
class MacroMicroTaskComp {
text: string = '1';
ngOnInit() {
setTimeout(() => {
this.text += '1';
Promise.resolve(null).then((_: any) => { this.text += '1'; });
}, 10);
}
}
let stableCalled = false;
beforeEach(() => {
stableCalled = false;
TestBed.configureTestingModule({
declarations: [
SyncComp, MicroTaskComp, MacroTaskComp, MicroMacroTaskComp, MacroMicroTaskComp, ClickComp
],
});
});
afterEach(() => { expect(stableCalled).toBe(true, 'isStable did not emit true on stable'); });
function expectStableTexts(component: Type<any>, expected: string[]) {
const fixture = TestBed.createComponent(component);
const appRef: ApplicationRef = TestBed.get(ApplicationRef);
const zone: NgZone = TestBed.get(NgZone);
appRef.attachView(fixture.componentRef.hostView);
zone.run(() => appRef.tick());
let i = 0;
appRef.isStable.subscribe({
next: (stable: boolean) => {
if (stable) {
expect(i).toBeLessThan(expected.length);
expect(fixture.nativeElement).toHaveText(expected[i++]);
stableCalled = true;
}
}
});
}
it('isStable should fire on synchronous component loading',
async(() => { expectStableTexts(SyncComp, ['1']); }));
it('isStable should fire after a microtask on init is completed',
async(() => { expectStableTexts(MicroTaskComp, ['11']); }));
it('isStable should fire after a macrotask on init is completed',
async(() => { expectStableTexts(MacroTaskComp, ['11']); }));
it('isStable should fire only after chain of micro and macrotasks on init are completed',
async(() => { expectStableTexts(MicroMacroTaskComp, ['111']); }));
it('isStable should fire only after chain of macro and microtasks on init are completed',
async(() => { expectStableTexts(MacroMicroTaskComp, ['111']); }));
describe('unstable', () => {
let unstableCalled = false;
afterEach(
() => { expect(unstableCalled).toBe(true, 'isStable did not emit false on unstable'); });
function expectUnstable(appRef: ApplicationRef) {
appRef.isStable.subscribe({
next: (stable: boolean) => {
if (stable) {
stableCalled = true;
}
if (!stable) {
unstableCalled = true;
}
}
});
}
it('should be fired after app becomes unstable', async(() => {
const fixture = TestBed.createComponent(ClickComp);
const appRef: ApplicationRef = TestBed.get(ApplicationRef);
const zone: NgZone = TestBed.get(NgZone);
appRef.attachView(fixture.componentRef.hostView);
zone.run(() => appRef.tick());
fixture.whenStable().then(() => {
expectUnstable(appRef);
const element = fixture.debugElement.children[0];
dispatchEvent(element.nativeElement, 'click');
});
}));
});
});
}
class MockConsole {
res: any[] = [];
log(s: any): void { this.res.push(s); }
error(s: any): void { this.res.push(s); }
}<|fim▁end|>
| |
<|file_name|>server.rs<|end_file_name|><|fim▁begin|>use std::sync::mpsc::{self, Sender, Receiver};
use std::collections::{HashMap, HashSet};
use std::net::{TcpListener, TcpStream};
use std::fmt::Debug;
use libc::EINPROGRESS;
use net2::{TcpBuilder, TcpStreamExt};
use serde::{Serialize, Deserialize};
use msgpack::{Serializer, Deserializer};
use slog;
use amy::{Registrar, Notification, Event, FrameReader, FrameWriter};
use members::Members;
use node_id::NodeId;
use msg::Msg;
use executor::ExecutorMsg;
use timer_wheel::TimerWheel;
use envelope::Envelope;
use orset::{ORSet, Delta};
use pid::Pid;
use correlation_id::CorrelationId;
use errors::*;
use metrics::Metrics;
use super::{ClusterStatus, ClusterMsg, ExternalMsg, ClusterMetrics};
// TODO: This is totally arbitrary right now and should probably be user configurable
const MAX_FRAME_SIZE: u32 = 100*1024*1024; // 100 MB
const TICK_TIME: usize = 1000; // milliseconds
const REQUEST_TIMEOUT: usize = 5000; // milliseconds
// This tick allows process specific timers to fire
const EXECUTOR_TICK_TIME: usize = 100; // milliseconds
struct Conn {
sock: TcpStream,
node: Option<NodeId>,
is_client: bool,
members_sent: bool,
timer_wheel_index: usize,<|fim▁hole|>impl Conn {
pub fn new(sock: TcpStream, node: Option<NodeId>, is_client: bool) -> Conn {
Conn {
sock: sock,
node: node,
is_client: is_client,
members_sent: false,
timer_wheel_index: 0, // Initialize with a fake value
reader: FrameReader::new(MAX_FRAME_SIZE),
writer: FrameWriter::new(),
}
}
}
/// A struct that handles cluster membership connection and routing of messages to processes on
/// other nodes.
pub struct ClusterServer<T> {
pid: Pid,
node: NodeId,
rx: Receiver<ClusterMsg<T>>,
executor_tx: Sender<ExecutorMsg<T>>,
executor_timer_id: usize,
timer_id: usize,
timer_wheel: TimerWheel<usize>,
listener: TcpListener,
listener_id: usize,
members: Members,
connections: HashMap<usize, Conn>,
established: HashMap<NodeId, usize>,
registrar: Registrar,
logger: slog::Logger,
metrics: ClusterMetrics
}
impl<'de, T: Serialize + Deserialize<'de> + Debug + Clone> ClusterServer<T> {
pub fn new(node: NodeId,
rx: Receiver<ClusterMsg<T>>,
executor_tx: Sender<ExecutorMsg<T>>,
registrar: Registrar,
logger: slog::Logger) -> ClusterServer<T> {
let pid = Pid {
group: Some("rabble".to_string()),
name: "cluster_server".to_string(),
node: node.clone()
};
let listener = TcpListener::bind(&node.addr[..]).unwrap();
listener.set_nonblocking(true).unwrap();
ClusterServer {
pid: pid,
node: node.clone(),
rx: rx,
executor_tx: executor_tx,
executor_timer_id: 0,
timer_id: 0,
timer_wheel: TimerWheel::new(REQUEST_TIMEOUT / TICK_TIME),
listener: listener,
listener_id: 0,
members: Members::new(node),
connections: HashMap::new(),
established: HashMap::new(),
registrar: registrar,
logger: logger.new(o!("component" => "cluster_server")),
metrics: ClusterMetrics::new()
}
}
pub fn run(mut self) {
info!(self.logger, "Starting");
self.timer_id = self.registrar.set_interval(TICK_TIME).unwrap();
self.executor_timer_id = self.registrar.set_interval(EXECUTOR_TICK_TIME).unwrap();
self.listener_id = self.registrar.register(&self.listener, Event::Read).unwrap();
while let Ok(msg) = self.rx.recv() {
if let Err(e) = self.handle_cluster_msg(msg) {
self.metrics.errors += 1;
for id in e.kind().get_ids() {
self.close(id)
}
match *e.kind() {
ErrorKind::EncodeError(..) | ErrorKind::DecodeError(..) |
ErrorKind::RegistrarError(..) | ErrorKind::SendError(..) => {
error!(self.logger, e.to_string());
break;
}
ErrorKind::Shutdown(..) => {
info!(self.logger, e.to_string());
break;
},
_ => warn!(self.logger, e.to_string())
}
}
}
}
fn handle_cluster_msg(&mut self, msg: ClusterMsg<T>) -> Result<()> {
match msg {
ClusterMsg::PollNotifications(notifications) => {
self.metrics.poll_notifications += 1;
self.handle_poll_notifications(notifications)
},
ClusterMsg::Join(node) => {
self.metrics.joins += 1;
self.join(node)
},
ClusterMsg::Leave(node) => {
self.metrics.leaves += 1;
self.leave(node)
},
ClusterMsg::Envelope(envelope) => {
self.metrics.received_local_envelopes += 1;
// Only metric requests are directly sent to the cluster server
if envelope.to == self.pid {
self.send_metrics(envelope);
return Ok(());
}
self.send_remote(envelope)
},
ClusterMsg::GetStatus(correlation_id) => {
self.metrics.status_requests += 1;
self.get_status(correlation_id)
},
ClusterMsg::Shutdown => Err(ErrorKind::Shutdown(self.pid.clone()).into())
}
}
fn get_status(&self, correlation_id: CorrelationId) -> Result<()> {
let status = ClusterStatus {
members: self.members.all(),
established: self.established.keys().cloned().collect(),
num_connections: self.connections.len()
};
let envelope = Envelope {
to: correlation_id.pid.clone(),
from: self.pid.clone(),
msg: Msg::ClusterStatus(status),
correlation_id: Some(correlation_id)
};
// Route the response through the executor since it knows how to contact all Pids
if let Err(mpsc::SendError(ExecutorMsg::Envelope(envelope))) =
self.executor_tx.send(ExecutorMsg::Envelope(envelope))
{
return Err(ErrorKind::SendError("ExecutorMsg::Envelope".to_string(),
Some(envelope.to)).into());
}
Ok(())
}
fn send_remote(&mut self, envelope: Envelope<T>) -> Result<()> {
if let Some(id) = self.established.get(&envelope.to.node).cloned() {
trace!(self.logger, "send remote"; "to" => envelope.to.to_string());
let mut encoded = Vec::new();
let node = envelope.to.node.clone();
try!(ExternalMsg::Envelope(envelope).serialize(&mut Serializer::new(&mut encoded))
.chain_err(|| ErrorKind::EncodeError(Some(id), Some(node))));
try!(self.write(id, Some(encoded)));
}
Ok(())
}
fn handle_poll_notifications(&mut self, notifications: Vec<Notification>) -> Result<()> {
trace!(self.logger, "handle_poll_notification"; "num_notifications" => notifications.len());
let mut errors = Vec::new();
for n in notifications {
let result = match n.id {
id if id == self.listener_id => self.accept_connection(),
id if id == self.timer_id => self.tick(),
id if id == self.executor_timer_id => self.tick_executor(),
_ => self.do_socket_io(n)
};
if let Err(e) = result {
errors.push(e);
}
}
if errors.len() != 0 {
return Err(ErrorKind::PollNotificationErrors(errors).into());
}
Ok(())
}
fn do_socket_io(&mut self, notification: Notification) -> Result<()> {
match notification.event {
Event::Read => self.read(notification.id),
Event::Write => self.write(notification.id, None),
Event::Both => {
try!(self.read(notification.id));
self.write(notification.id, None)
}
}
}
/// Returns `Some(true)` if there is such a connection and the members were already sent.
/// Returns `Some(false)` if there is such a connection and the members were NOT sent.
/// Returns None if there is no such connection.
fn members_sent(&self, id: usize) -> Option<bool> {
if let Some(conn) = self.connections.get(&id) {
return Some(conn.members_sent);
}
None
}
fn read(&mut self, id: usize) -> Result<()> {
trace!(self.logger, "read"; "id" => id);
match self.members_sent(id) {
Some(false) => try!(self.send_members(id)),
None => (),
Some(true) => {
let messages = try!(self.decode_messages(id));
for msg in messages {
try!(self.handle_decoded_message(id, msg));
}
}
}
Ok(())
}
fn handle_decoded_message(&mut self, id: usize, msg: ExternalMsg<T>) -> Result<()> {
match msg {
ExternalMsg::Members{from, orset} => {
info!(self.logger, "Got Members"; "id" => id, "from" => from.to_string());
self.establish_connection(id, from, orset);
self.check_connections();
},
ExternalMsg::Ping => {
trace!(self.logger, "Got Ping"; "id" => id);
self.reset_timer(id);
}
ExternalMsg::Envelope(envelope) => {
self.metrics.received_remote_envelopes += 1;
debug!(self.logger, "Got User Message";
"from" => envelope.from.to_string(),
"to" => envelope.to.to_string());
if let Err(mpsc::SendError(ExecutorMsg::Envelope(envelope)))
= self.executor_tx.send(ExecutorMsg::Envelope(envelope))
{
return Err(ErrorKind::SendError("ExecutorMsg::Enelope".to_string(),
Some(envelope.to)).into());
}
},
ExternalMsg::Delta(delta) => {
debug!(self.logger, "Got Delta mutator";
"id" => id, "delta" => format!("{:?}", delta));
if self.members.join_delta(delta.clone()) {
try!(self.broadcast_delta(delta));
}
}
}
Ok(())
}
fn write(&mut self, id: usize, msg: Option<Vec<u8>>) -> Result<()> {
trace!(self.logger, "write"; "id" => id);
let registrar = &self.registrar;
if let Some(mut conn) = self.connections.get_mut(&id) {
if msg.is_none() {
if conn.writer.is_writable() {
// The socket has just became writable. We need to re-register it as only
// readable, or it the event will keep firing indefinitely even if there is
// no data to write.
try!(registrar.reregister(id, &conn.sock, Event::Read)
.chain_err(|| ErrorKind::RegistrarError(Some(id), conn.node.clone())));
}
// We just got an Event::Write from the poller
conn.writer.writable();
}
try!(conn_write(id, &mut conn, msg, ®istrar));
}
Ok(())
}
fn reset_timer(&mut self, id: usize) {
if let Some(conn) = self.connections.get_mut(&id) {
self.timer_wheel.remove(&id, conn.timer_wheel_index);
conn.timer_wheel_index = self.timer_wheel.insert(id)
}
}
/// Transition a connection from unestablished to established. If there is already an
/// established connection between these two nodes, determine which one should be closed.
fn establish_connection(&mut self, id: usize, from: NodeId, orset: ORSet<NodeId>) {
self.members.join(orset);
if let Some(close_id) = self.choose_connection_to_close(id, &from) {
debug!(self.logger,
"Two connections between nodes. Closing the connection where \
the peer that sorts lower was the connecting client";
"peer" => from.to_string(), "id" => close_id);
self.close(close_id);
if close_id == id {
return;
}
}
debug!(self.logger, "Trying to establish connection"; "peer" => from.to_string(), "id" => id);
if let Some(conn) = self.connections.get_mut(&id) {
info!(self.logger, "Establish connection"; "peer" => from.to_string(), "id" => id);
conn.node = Some(from.clone());
self.timer_wheel.remove(&id, conn.timer_wheel_index);
conn.timer_wheel_index = self.timer_wheel.insert(id);
self.established.insert(from, id);
}
}
/// We only want a single connection between nodes. Choose the connection where the client side
/// comes from a node that sorts less than the node of the server side of the connection.
/// Return the id to remove if there is an existing connection to remove, otherwise return
/// `None` indicating that there isn't an existing connection, so don't close the new one.
fn choose_connection_to_close(&self, id: usize, from: &NodeId) -> Option<usize> {
if let Some(saved_id) = self.established.get(from) {
if let Some(saved_conn) = self.connections.get(&saved_id) {
// A client connection always comes from self.node
if (saved_conn.is_client && self.node < *from) ||
(!saved_conn.is_client && *from < self.node) {
return Some(*saved_id);
} else {
return Some(id);
}
}
}
None
}
fn decode_messages(&mut self, id: usize) -> Result<Vec<ExternalMsg<T>>> {
let mut output = Vec::new();
if let Some(conn) = self.connections.get_mut(&id) {
let node = conn.node.clone();
try!(conn.reader.read(&mut conn.sock)
.chain_err(|| ErrorKind::ReadError(id, node.clone())));
for frame in conn.reader.iter_mut() {
let mut decoder = Deserializer::new(&frame[..]);
let msg = try!(Deserialize::deserialize(&mut decoder)
.chain_err(|| ErrorKind::DecodeError(id, node.clone())));
output.push(msg);
}
}
Ok(output)
}
fn join(&mut self, node: NodeId) -> Result<()> {
let delta = self.members.add(node.clone());
try!(self.broadcast_delta(delta));
self.metrics.connection_attempts += 1;
self.connect(node)
}
fn leave(&mut self, node: NodeId) -> Result<()> {
if let Some(delta) = self.members.leave(node.clone()) {
try!(self.broadcast_delta(delta));
}
Ok(())
}
fn connect(&mut self, node: NodeId) -> Result<()> {
debug!(self.logger, "connect"; "to" => node.to_string());
let sock = try!(TcpBuilder::new_v4().chain_err(|| "Failed to create a IPv4 socket"));
let sock = try!(sock.to_tcp_stream().chain_err(|| "Failed to create TcpStream"));
try!(sock.set_nonblocking(true).chain_err(|| "Failed to make socket nonblocking"));
if let Err(e) = sock.connect(&node.addr[..]) {
if e.raw_os_error().is_some() && *e.raw_os_error().as_ref().unwrap() != EINPROGRESS {
return Err(e).chain_err(|| ErrorKind::ConnectError(node));
}
}
try!(self.init_connection(sock, Some(node)));
Ok(())
}
fn accept_connection(&mut self) -> Result<()> {
while let Ok((sock, _)) = self.listener.accept() {
self.metrics.accepted_connections += 1;
debug!(self.logger, "accepted connection");
try!(sock.set_nonblocking(true).chain_err(|| "Failed to make socket nonblocking"));
let id = try!(self.init_connection(sock, None));
try!(self.send_members(id));
}
Ok(())
}
fn init_connection(&mut self, sock: TcpStream, node: Option<NodeId>) -> Result<usize> {
let id = try!(self.registrar.register(&sock, Event::Read)
.chain_err(|| ErrorKind::RegistrarError(None, None)));
debug!(self.logger, "init_connection()";
"id" => id, "is_client" => node.is_some(), "peer" => format!("{:?}", node));
let is_client = node.is_some();
let mut conn = Conn::new(sock, node, is_client);
conn.timer_wheel_index = self.timer_wheel.insert(id);
self.connections.insert(id, conn);
Ok(id)
}
fn send_members(&mut self, id: usize) -> Result<()> {
let encoded = try!(self.encode_members(id));
let registrar = &self.registrar;
if let Some(mut conn) = self.connections.get_mut(&id) {
info!(self.logger, "Send members"; "id" => id);
try!(conn_write(id, &mut conn, Some(encoded), ®istrar));
conn.members_sent = true;
}
Ok(())
}
fn tick(&mut self) -> Result<()> {
trace!(self.logger, "tick");
let expired = self.timer_wheel.expire();
self.deregister(expired);
try!(self.broadcast_pings());
self.check_connections();
Ok(())
}
fn tick_executor(&mut self) -> Result<()> {
trace!(self.logger, "tick_executor");
// Panic if the executor is down.
self.executor_tx.send(ExecutorMsg::Tick).unwrap() ;
Ok(())
}
fn encode_members(&self, id: usize) -> Result<Vec<u8>> {
let orset = self.members.get_orset();
let mut encoded = Vec::new();
let msg = ExternalMsg::Members::<T> {from: self.node.clone(), orset: orset};
try!(msg.serialize(&mut Serializer::new(&mut encoded))
.chain_err(|| ErrorKind::EncodeError(Some(id), None)));
Ok(encoded)
}
fn deregister(&mut self, expired: HashSet<usize>) {
for id in expired.iter() {
warn!(self.logger, "Connection timeout"; "id" => *id);
self.close(*id);
}
}
/// Close an existing connection and remove all related state.
fn close(&mut self, id: usize) {
if let Some(conn) = self.connections.remove(&id) {
let _ = self.registrar.deregister(conn.sock);
self.timer_wheel.remove(&id, conn.timer_wheel_index);
if let Some(node) = conn.node {
// Remove established connection if it matches this id
if let Some(established_id) = self.established.remove(&node) {
if established_id == id {
info!(self.logger, "Closing established connection";
"id" => id,"peer" => node.to_string());
return;
}
// The established node didn't correspond to this id, so put it back
self.established.insert(node, established_id);
}
}
info!(self.logger, "Closing unestablished connection"; "id" => id);
}
}
fn broadcast_delta(&mut self, delta: Delta<NodeId>) -> Result<()> {
debug!(self.logger, "Broadcasting delta"; "delta" => format!("{:?}", delta));
let mut encoded = Vec::new();
let msg = ExternalMsg::Delta::<T>(delta);
try!(msg.serialize(&mut Serializer::new(&mut encoded))
.chain_err(|| ErrorKind::EncodeError(None, None)));
self.broadcast(encoded)
}
fn broadcast_pings(&mut self) -> Result<()> {
let mut encoded = Vec::new();
let msg = ExternalMsg::Ping::<T>;
try!(msg.serialize(&mut Serializer::new(&mut encoded))
.chain_err(|| ErrorKind::EncodeError(None, None)));
self.broadcast(encoded)
}
// Write encoded values to all connections and return the id of any connections with errors
fn broadcast(&mut self, encoded: Vec<u8>) -> Result<()> {
let mut errors = Vec::new();
let registrar = &self.registrar;
for (id, mut conn) in self.connections.iter_mut() {
if !conn.members_sent {
// This connection isn't connected yet
continue;
}
if let Err(e) = conn_write(*id, &mut conn, Some(encoded.clone()), ®istrar) {
errors.push(e)
}
}
if errors.len() != 0 {
return Err(ErrorKind::BroadcastError(errors).into());
}
Ok(())
}
// Ensure connections are correct based on membership state
fn check_connections(&mut self) {
let all = self.members.all();
// If this node is no longer a member of the cluster disconnect from all nodes
if !all.contains(&self.node) {
return self.disconnect_all();
}
// Pending, Client connected, or established server side connections
let known_peer_conns: HashSet<NodeId> =
self.connections.iter().filter_map(|(_, conn)| conn.node.clone()).collect();
let to_connect: Vec<NodeId> = all.difference(&known_peer_conns)
.filter(|&node| *node != self.node).cloned().collect();
let to_disconnect: Vec<NodeId> = known_peer_conns.difference(&all).cloned().collect();
trace!(self.logger, "check_connections";
"to_connect" => format!("{:?}", to_connect),
"to_disconnect" => format!("{:?}", to_disconnect));
for node in to_connect {
self.metrics.connection_attempts += 1;
if let Err(e) = self.connect(node) {
warn!(self.logger, e.to_string());
}
}
self.disconnect_established(to_disconnect);
}
fn disconnect_all(&mut self) {
self.established = HashMap::new();
for (id, conn) in self.connections.drain() {
self.timer_wheel.remove(&id, conn.timer_wheel_index);
if let Err(e) = self.registrar.deregister(conn.sock) {
error!(self.logger, "Failed to deregister socket";
"id" => id, "peer" => format!("{:?}", conn.node),
"error" => e.to_string());
}
}
}
fn disconnect_established(&mut self, to_disconnect: Vec<NodeId>) {
for node in to_disconnect {
if let Some(id) = self.established.remove(&node) {
let conn = self.connections.remove(&id).unwrap();
self.timer_wheel.remove(&id, conn.timer_wheel_index);
if let Err(e) = self.registrar.deregister(conn.sock) {
error!(self.logger, "Failed to deregister socket";
"id" => id, "peer" => conn.node.unwrap().to_string(),
"error" => e.to_string());
}
}
}
}
fn send_metrics(&mut self, envelope: Envelope<T>) {
if let Msg::GetMetrics = envelope.msg {
let new_envelope = Envelope {
to: envelope.from,
from: self.pid.clone(),
msg: Msg::Metrics(self.metrics.data()),
correlation_id: envelope.correlation_id
};
// Route the response through the executor since it knows how to contact all Pids
if let Err(mpsc::SendError(ExecutorMsg::Envelope(new_envelope))) =
self.executor_tx.send(ExecutorMsg::Envelope(new_envelope))
{
error!(self.logger, "Failed to send to executor";
"envelope" => format!("{:?}", new_envelope));
}
} else {
error!(self.logger, "Received Unknown Msg";
"envelope" => format!("{:?}", envelope));
}
}
}
fn conn_write(id: usize,
conn: &mut Conn,
msg: Option<Vec<u8>>,
registrar: &Registrar) -> Result<()>
{
let writable = try!(conn.writer.write(&mut conn.sock, msg).chain_err(|| {
ErrorKind::WriteError(id, conn.node.clone())
}));
if !writable {
return registrar.reregister(id, &conn.sock, Event::Both)
.chain_err(|| ErrorKind::RegistrarError(Some(id), conn.node.clone()));
}
Ok(())
}<|fim▁end|>
|
reader: FrameReader,
writer: FrameWriter
}
|
<|file_name|>load_balancer_certificates_data_source_test.go<|end_file_name|><|fim▁begin|>// Copyright (c) 2017, 2021, Oracle and/or its affiliates. All rights reserved.
// Licensed under the Mozilla Public License v2.0
package oci
import (
"testing"
"github.com/terraform-providers/terraform-provider-oci/httpreplay"
"regexp"
"github.com/hashicorp/terraform-plugin-sdk/helper/resource"
)
// issue-routing-tag: load_balancer/default
func TestAccDatasourceLoadBalancerCertificates_basic(t *testing.T) {
httpreplay.SetScenario("TestAccDatasourceLoadBalancerCertificates_basic")
defer httpreplay.SaveScenario()
providers := testAccProviders
config := legacyTestProviderConfig() + caCertificateVariableStr + privateKeyVariableStr + `
data "oci_identity_availability_domains" "ADs" {
compartment_id = "${var.compartment_id}"
}
resource "oci_core_virtual_network" "t" {
compartment_id = "${var.compartment_id}"
cidr_block = "10.0.0.0/16"
display_name = "-tf-vcn"
}
resource "oci_core_subnet" "t" {
compartment_id = "${var.compartment_id}"
vcn_id = "${oci_core_virtual_network.t.id}"
availability_domain = "${lookup(data.oci_identity_availability_domains.ADs.availability_domains[0],"name")}"
route_table_id = "${oci_core_virtual_network.t.default_route_table_id}"
security_list_ids = ["${oci_core_virtual_network.t.default_security_list_id}"]
dhcp_options_id = "${oci_core_virtual_network.t.default_dhcp_options_id}"
cidr_block = "10.0.0.0/24"<|fim▁hole|> }
resource "oci_load_balancer" "t" {
shape = "100Mbps"
compartment_id = "${var.compartment_id}"
subnet_ids = ["${oci_core_subnet.t.id}"]
display_name = "-tf-lb"
is_private = true
}
resource "oci_load_balancer_certificate" "t" {
load_balancer_id = "${oci_load_balancer.t.id}"
ca_certificate = "${var.ca_certificate_value}"
certificate_name = "tf_cert_name"
private_key = "${var.private_key_value}"
public_certificate = "${var.ca_certificate_value}"
}
data "oci_load_balancer_certificates" "t" {
load_balancer_id = "${oci_load_balancer.t.id}"
}`
resourceName := "data.oci_load_balancer_certificates.t"
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
PreventPostDestroyRefresh: true,
Providers: providers,
Steps: []resource.TestStep{
{
Config: config,
},
{
Config: config,
Check: ComposeAggregateTestCheckFuncWrapper(
resource.TestCheckResourceAttrSet(resourceName, "load_balancer_id"),
resource.TestCheckResourceAttr(resourceName, "certificates.#", "1"),
resource.TestMatchResourceAttr(resourceName, "certificates.0.ca_certificate", regexp.MustCompile("-----BEGIN CERT.*")),
resource.TestCheckResourceAttr(resourceName, "certificates.0.certificate_name", "tf_cert_name"),
resource.TestMatchResourceAttr(resourceName, "certificates.0.public_certificate", regexp.MustCompile("-----BEGIN CERT.*")),
),
},
},
})
}<|fim▁end|>
|
display_name = "-tf-subnet"
|
<|file_name|>multiple_tags.rs<|end_file_name|><|fim▁begin|><|fim▁hole|>
fn main() {
}
#[not_safe(a)]
fn unsafe_method() {
}
fn wrapper() {
unsafe_method()
}
#[not_safe(b)]
fn unsafe_b_method() {
}
#[not_safe(a,c)]
fn unsafe_ac_method() {
}
#[deny(not_tagged_safe)]
#[req_safe(a,c)]
fn caller() {
wrapper();
//~^ ERROR Calling a-unsafe
unsafe_b_method();
// don't expect an error
unsafe_ac_method()
//~^ ERROR Calling a-unsafe
//~^^ ERROR Calling c-unsafe
}<|fim▁end|>
|
#![feature(custom_attribute,plugin)]
#![plugin(tag_safe)]
#![allow(dead_code)]
|
<|file_name|>boltzmann_reward_prediction_policy.py<|end_file_name|><|fim▁begin|># coding=utf-8
# Copyright 2020 The TF-Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Policy for reward prediction and boltzmann exploration."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import Optional, Text, Tuple, Sequence
import gin
import tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import
import tensorflow_probability as tfp
from tf_agents.bandits.networks import heteroscedastic_q_network
from tf_agents.bandits.policies import constraints as constr
from tf_agents.bandits.specs import utils as bandit_spec_utils
from tf_agents.distributions import shifted_categorical
from tf_agents.policies import tf_policy
from tf_agents.policies import utils as policy_utilities
from tf_agents.specs import tensor_spec
from tf_agents.trajectories import policy_step
from tf_agents.typing import types
@gin.configurable
class BoltzmannRewardPredictionPolicy(tf_policy.TFPolicy):
"""Class to build Reward Prediction Policies with Boltzmann exploration."""
def __init__(self,
time_step_spec: types.TimeStep,
action_spec: types.NestedTensorSpec,
reward_network: types.Network,
temperature: types.FloatOrReturningFloat = 1.0,
boltzmann_gumbel_exploration_constant: Optional[
types.Float] = None,
observation_and_action_constraint_splitter: Optional[
types.Splitter] = None,
accepts_per_arm_features: bool = False,
constraints: Tuple[constr.NeuralConstraint, ...] = (),
emit_policy_info: Tuple[Text, ...] = (),
num_samples_list: Sequence[tf.Variable] = (),
name: Optional[Text] = None):
"""Builds a BoltzmannRewardPredictionPolicy given a reward network.
This policy takes a tf_agents.Network predicting rewards and chooses an
action with weighted probabilities (i.e., using a softmax over the network
estimates of value for each action).
Args:
time_step_spec: A `TimeStep` spec of the expected time_steps.
action_spec: A nest of BoundedTensorSpec representing the actions.
reward_network: An instance of a `tf_agents.network.Network`,
callable via `network(observation, step_type) -> (output, final_state)`.
temperature: float or callable that returns a float. The temperature used
in the Boltzmann exploration.
boltzmann_gumbel_exploration_constant: optional positive float. When
provided, the policy implements Neural Bandit with Boltzmann-Gumbel
exploration from the paper:
N. Cesa-Bianchi et al., "Boltzmann Exploration Done Right", NIPS 2017.
observation_and_action_constraint_splitter: A function used for masking
valid/invalid actions with each state of the environment. The function
takes in a full observation and returns a tuple consisting of 1) the
part of the observation intended as input to the network and 2) the
mask. The mask should be a 0-1 `Tensor` of shape
`[batch_size, num_actions]`. This function should also work with a
`TensorSpec` as input, and should output `TensorSpec` objects for the
observation and mask.
accepts_per_arm_features: (bool) Whether the policy accepts per-arm
features.
constraints: iterable of constraints objects that are instances of
`tf_agents.bandits.agents.NeuralConstraint`.
emit_policy_info: (tuple of strings) what side information we want to get
as part of the policy info. Allowed values can be found in
`policy_utilities.PolicyInfo`.
num_samples_list: list or tuple of tf.Variable's. Used only in
Boltzmann-Gumbel exploration. Otherwise, empty.
name: The name of this policy. All variables in this module will fall
under that name. Defaults to the class name.
Raises:
NotImplementedError: If `action_spec` contains more than one
`BoundedTensorSpec` or the `BoundedTensorSpec` is not valid.
"""
policy_utilities.check_no_mask_with_arm_features(
accepts_per_arm_features, observation_and_action_constraint_splitter)
flat_action_spec = tf.nest.flatten(action_spec)
if len(flat_action_spec) > 1:
raise NotImplementedError(
'action_spec can only contain a single BoundedTensorSpec.')
self._temperature = temperature
action_spec = flat_action_spec[0]
if (not tensor_spec.is_bounded(action_spec) or
not tensor_spec.is_discrete(action_spec) or
action_spec.shape.rank > 1 or
action_spec.shape.num_elements() != 1):
raise NotImplementedError(
'action_spec must be a BoundedTensorSpec of type int32 and shape (). '
'Found {}.'.format(action_spec))
self._expected_num_actions = action_spec.maximum - action_spec.minimum + 1
self._action_offset = action_spec.minimum
reward_network.create_variables()
self._reward_network = reward_network
self._constraints = constraints
self._boltzmann_gumbel_exploration_constant = (
boltzmann_gumbel_exploration_constant)
self._num_samples_list = num_samples_list
if self._boltzmann_gumbel_exploration_constant is not None:
if self._boltzmann_gumbel_exploration_constant <= 0.0:
raise ValueError(
'The Boltzmann-Gumbel exploration constant is expected to be ',
'positive. Found: ', self._boltzmann_gumbel_exploration_constant)
if self._action_offset > 0:
raise NotImplementedError('Action offset is not supported when ',
'Boltzmann-Gumbel exploration is enabled.')
if accepts_per_arm_features:
raise NotImplementedError(
'Boltzmann-Gumbel exploration is not supported ',
'for arm features case.')
if len(self._num_samples_list) != self._expected_num_actions:
raise ValueError(
'Size of num_samples_list: ', len(self._num_samples_list),
' does not match the expected number of actions:',
self._expected_num_actions)
self._emit_policy_info = emit_policy_info
predicted_rewards_mean = ()
if policy_utilities.InfoFields.PREDICTED_REWARDS_MEAN in emit_policy_info:
predicted_rewards_mean = tensor_spec.TensorSpec(
[self._expected_num_actions])
bandit_policy_type = ()
if policy_utilities.InfoFields.BANDIT_POLICY_TYPE in emit_policy_info:
bandit_policy_type = (
policy_utilities.create_bandit_policy_type_tensor_spec(shape=[1]))
if accepts_per_arm_features:
# The features for the chosen arm is saved to policy_info.
chosen_arm_features_info = (
policy_utilities.create_chosen_arm_features_info_spec(
time_step_spec.observation))
info_spec = policy_utilities.PerArmPolicyInfo(
predicted_rewards_mean=predicted_rewards_mean,
bandit_policy_type=bandit_policy_type,
chosen_arm_features=chosen_arm_features_info)
else:
info_spec = policy_utilities.PolicyInfo(
predicted_rewards_mean=predicted_rewards_mean,
bandit_policy_type=bandit_policy_type)
self._accepts_per_arm_features = accepts_per_arm_features
super(BoltzmannRewardPredictionPolicy, self).__init__(
time_step_spec, action_spec,
policy_state_spec=reward_network.state_spec,
clip=False,
info_spec=info_spec,
emit_log_probability='log_probability' in emit_policy_info,
observation_and_action_constraint_splitter=(
observation_and_action_constraint_splitter),
name=name)<|fim▁hole|>
def _variables(self):
policy_variables = self._reward_network.variables
for c in self._constraints:
policy_variables.append(c.variables)
return policy_variables
def _get_temperature_value(self):
if callable(self._temperature):
return self._temperature()
return self._temperature
def _distribution(self, time_step, policy_state):
observation = time_step.observation
if self.observation_and_action_constraint_splitter is not None:
observation, _ = self.observation_and_action_constraint_splitter(
observation)
predictions, policy_state = self._reward_network(
observation, time_step.step_type, policy_state)
batch_size = tf.shape(predictions)[0]
if isinstance(self._reward_network,
heteroscedastic_q_network.HeteroscedasticQNetwork):
predicted_reward_values = predictions.q_value_logits
else:
predicted_reward_values = predictions
predicted_reward_values.shape.with_rank_at_least(2)
predicted_reward_values.shape.with_rank_at_most(3)
if predicted_reward_values.shape[
-1] is not None and predicted_reward_values.shape[
-1] != self._expected_num_actions:
raise ValueError(
'The number of actions ({}) does not match the reward_network output'
' size ({}).'.format(self._expected_num_actions,
predicted_reward_values.shape[1]))
mask = constr.construct_mask_from_multiple_sources(
time_step.observation, self._observation_and_action_constraint_splitter,
self._constraints, self._expected_num_actions)
if self._boltzmann_gumbel_exploration_constant is not None:
logits = predicted_reward_values
# Apply masking if needed. Overwrite the logits for invalid actions to
# logits.dtype.min.
if mask is not None:
almost_neg_inf = tf.constant(logits.dtype.min, dtype=logits.dtype)
logits = tf.compat.v2.where(
tf.cast(mask, tf.bool), logits, almost_neg_inf)
gumbel_dist = tfp.distributions.Gumbel(loc=0., scale=1.)
gumbel_samples = gumbel_dist.sample(tf.shape(logits))
num_samples_list_float = tf.stack(
[tf.cast(x.read_value(), tf.float32) for x in self._num_samples_list],
axis=-1)
exploration_weights = tf.math.divide_no_nan(
self._boltzmann_gumbel_exploration_constant,
tf.sqrt(num_samples_list_float))
final_logits = logits + exploration_weights * gumbel_samples
actions = tf.cast(
tf.math.argmax(final_logits, axis=1), self._action_spec.dtype)
# Log probability is not available in closed form. We treat this as a
# deterministic policy at the moment.
log_probability = tf.zeros([batch_size], tf.float32)
else:
# Apply the temperature scaling, needed for Boltzmann exploration.
logits = predicted_reward_values / self._get_temperature_value()
# Apply masking if needed. Overwrite the logits for invalid actions to
# logits.dtype.min.
if mask is not None:
almost_neg_inf = tf.constant(logits.dtype.min, dtype=logits.dtype)
logits = tf.compat.v2.where(
tf.cast(mask, tf.bool), logits, almost_neg_inf)
if self._action_offset != 0:
distribution = shifted_categorical.ShiftedCategorical(
logits=logits,
dtype=self._action_spec.dtype,
shift=self._action_offset)
else:
distribution = tfp.distributions.Categorical(
logits=logits,
dtype=self._action_spec.dtype)
actions = distribution.sample()
log_probability = distribution.log_prob(actions)
bandit_policy_values = tf.fill([batch_size, 1],
policy_utilities.BanditPolicyType.BOLTZMANN)
if self._accepts_per_arm_features:
# Saving the features for the chosen action to the policy_info.
def gather_observation(obs):
return tf.gather(params=obs, indices=actions, batch_dims=1)
chosen_arm_features = tf.nest.map_structure(
gather_observation,
observation[bandit_spec_utils.PER_ARM_FEATURE_KEY])
policy_info = policy_utilities.PerArmPolicyInfo(
log_probability=log_probability if
policy_utilities.InfoFields.LOG_PROBABILITY in self._emit_policy_info
else (),
predicted_rewards_mean=(
predicted_reward_values if policy_utilities.InfoFields
.PREDICTED_REWARDS_MEAN in self._emit_policy_info else ()),
bandit_policy_type=(bandit_policy_values
if policy_utilities.InfoFields.BANDIT_POLICY_TYPE
in self._emit_policy_info else ()),
chosen_arm_features=chosen_arm_features)
else:
policy_info = policy_utilities.PolicyInfo(
log_probability=log_probability if
policy_utilities.InfoFields.LOG_PROBABILITY in self._emit_policy_info
else (),
predicted_rewards_mean=(
predicted_reward_values if policy_utilities.InfoFields
.PREDICTED_REWARDS_MEAN in self._emit_policy_info else ()),
bandit_policy_type=(bandit_policy_values
if policy_utilities.InfoFields.BANDIT_POLICY_TYPE
in self._emit_policy_info else ()))
return policy_step.PolicyStep(
tfp.distributions.Deterministic(loc=actions), policy_state, policy_info)<|fim▁end|>
|
@property
def accepts_per_arm_features(self):
return self._accepts_per_arm_features
|
<|file_name|>machineoperand.cpp<|end_file_name|><|fim▁begin|>#include "machineoperand.h"
#include "basicblock.h"
#include <cassert>
#include <iostream>
#include <new>
using namespace TosLang::BackEnd;
MachineOperand::MachineOperand() : mKind{ OperandKind::UNKNOWN } { }
MachineOperand::MachineOperand(const unsigned op, const OperandKind kind)
{<|fim▁hole|> || (kind == OperandKind::REGISTER));
mKind = kind;
switch (kind)
{
case OperandKind::IMMEDIATE:
imm = op;
break;
case OperandKind::STACK_SLOT:
stackslot = op;
break;
case OperandKind::REGISTER:
reg = op;
break;
default:
assert(false && "Unexpected error while building a virtual instruction");
break;
}
}
std::ostream& TosLang::BackEnd::operator<<(std::ostream& stream, const MachineOperand& op)
{
switch (op.mKind)
{
case MachineOperand::OperandKind::IMMEDIATE:
return stream << op.imm;
case MachineOperand::OperandKind::STACK_SLOT:
return stream << "S" << op.stackslot;
case MachineOperand::OperandKind::REGISTER:
return stream << "R" << op.reg;
default:
return stream;
}
}<|fim▁end|>
|
assert((kind == OperandKind::IMMEDIATE)
|| (kind == OperandKind::STACK_SLOT)
|
<|file_name|>details.py<|end_file_name|><|fim▁begin|>from rest_framework.response import Response<|fim▁hole|>from rest_framework.views import APIView
from .common import login_required
from Requests import myudc
# Student's details requests handler
class Student(APIView):
"""
This only returns student's basic info right now,
but in the future it will have all layout details including:
theme preferences, student's modifications and other settings
"""
# Returns student's details on GET request
@staticmethod
@login_required("myudc")
def get(request):
# Return student's basic info as of now
return Response(
# Get & scrape student's basic info from MyUDC
myudc.scrape.student_details(
myudc.get.summarized_schedule(
# Send MyUDC cookies
request.session["myudc"]
)
)
)<|fim▁end|>
| |
<|file_name|>certificate.rs<|end_file_name|><|fim▁begin|>//
// Copyright 2021 The Project Oak Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
use crate::{
get_sha256,
report::{AttestationInfo, Report},
};
use anyhow::Context;
use log::info;
use openssl::{
asn1::Asn1Time,
bn::{BigNum, MsbOption},
hash::MessageDigest,
pkey::{HasPublic, PKey, PKeyRef, Private},
rsa::Rsa,
stack::Stack,
x509::{
extension::{
AuthorityKeyIdentifier, BasicConstraints, KeyUsage, SubjectAlternativeName,
SubjectKeyIdentifier,
},
X509Builder, X509NameBuilder, X509Ref, X509Req, X509,
},
};
// X.509 certificate parameters.
//
// <https://tools.ietf.org/html/rfc5280>
const RSA_KEY_SIZE: u32 = 2048;
// Version is zero-indexed, so the value of `2` corresponds to the version `3`.
const CERTIFICATE_VERSION: i32 = 2;
// Length of the randomly generated X.509 certificate serial number (which is 20 bytes).
//
// The most significant bit is excluded because it's passed as a separate argument to:<|fim▁hole|>// https://docs.rs/openssl/0.10.33/openssl/bn/struct.BigNum.html#method.rand
const SERIAL_NUMBER_SIZE: i32 = 159;
const CERTIFICATE_EXPIRATION_INTERVAL_IN_DAYS: u32 = 1;
const DEFAULT_DNS_NAME: &str = "localhost";
/// Indicates whether to add a custom TEE extension to a certificate.
#[derive(PartialEq)]
pub enum AddTeeExtension {
/// Enum value contains a PEM encoded TEE Provider's X.509 certificate that signs TEE firmware
/// key.
Yes(Vec<u8>),
No,
}
/// Convenience structure for generating X.509 certificates.
///
/// <https://tools.ietf.org/html/rfc5280>
pub struct CertificateAuthority {
pub key_pair: PKey<Private>,
pub root_certificate: X509,
}
impl CertificateAuthority {
/// Generates a root X.509 certificate and a corresponding private/public key pair.
///
/// `add_tee_extension` indicates whether to add a custom extension containing a TEE report to
/// the root certificate.
pub fn create(add_tee_extension: AddTeeExtension) -> anyhow::Result<Self> {
let key_pair = CertificateAuthority::generate_key_pair()?;
let root_certificate =
CertificateAuthority::generate_root_certificate(&key_pair, add_tee_extension)?;
Ok(Self {
key_pair,
root_certificate,
})
}
/// Generates RSA private/public key pair.
fn generate_key_pair() -> anyhow::Result<PKey<Private>> {
let rsa = Rsa::generate(RSA_KEY_SIZE).context("Couldn't generate RSA key")?;
PKey::from_rsa(rsa).context("Couldn't parse RSA key")
}
/// Creates a root X.509 certificate.
fn generate_root_certificate(
key_pair: &PKey<Private>,
add_tee_extension: AddTeeExtension,
) -> anyhow::Result<X509> {
info!("Generating root certificate");
let mut builder = CertificateBuilder::create()?;
builder.set_version(CERTIFICATE_VERSION)?;
builder.set_serial_number(SERIAL_NUMBER_SIZE)?;
builder.set_name()?;
builder.set_public_key(key_pair)?;
builder.set_expiration_interval(CERTIFICATE_EXPIRATION_INTERVAL_IN_DAYS)?;
builder.add_basic_constraints_extension(true)?;
builder.add_key_usage_extension(true)?;
builder.add_subject_key_identifier_extension(None)?;
builder.add_subject_alt_name_extension()?;
// Bind the certificate to the TEE firmware using an X.509 TEE extension.
if let AddTeeExtension::Yes(tee_certificate) = add_tee_extension {
builder.add_tee_extension(key_pair, tee_certificate)?;
}
let certificate = builder.build(key_pair)?;
Ok(certificate)
}
/// Generates an X.509 certificate based on the certificate signing `request`.
///
/// `add_tee_extension` indicates whether to add a custom extension containing a TEE report.
pub fn sign_certificate(
&self,
request: X509Req,
add_tee_extension: AddTeeExtension,
) -> anyhow::Result<X509> {
info!("Signing certificate");
let mut builder = CertificateBuilder::create()?;
builder.set_version(CERTIFICATE_VERSION)?;
builder.set_serial_number(SERIAL_NUMBER_SIZE)?;
builder.set_name()?;
builder.set_public_key(request.public_key()?.as_ref())?;
builder.set_expiration_interval(CERTIFICATE_EXPIRATION_INTERVAL_IN_DAYS)?;
builder.add_basic_constraints_extension(false)?;
builder.add_key_usage_extension(false)?;
builder.add_subject_key_identifier_extension(Some(&self.root_certificate))?;
builder.add_auth_key_identifier_extension(&self.root_certificate)?;
// Add X.509 extensions from the certificate signing request.
builder.add_extensions(request.extensions()?)?;
// Bind the certificate to the TEE firmware using an X.509 TEE extension.
if let AddTeeExtension::Yes(tee_certificate) = add_tee_extension {
builder.add_tee_extension(request.public_key()?.as_ref(), tee_certificate)?;
}
let certificate = builder.build(&self.key_pair)?;
Ok(certificate)
}
/// Get RSA key pair encoded in PEM format.
///
/// <https://tools.ietf.org/html/rfc7468>
pub fn get_private_key_pem(&self) -> anyhow::Result<Vec<u8>> {
self.key_pair
.private_key_to_pem_pkcs8()
.context("Couldn't encode key pair in PEM format")
}
/// Get a root X.509 certificate encoded in PEM format.
///
/// <https://tools.ietf.org/html/rfc7468>
pub fn get_root_certificate_pem(&self) -> anyhow::Result<Vec<u8>> {
self.root_certificate
.to_pem()
.context("Couldn't encode root certificate in PEM format")
}
}
/// Helper struct that implements certificate creation using `openssl`.
struct CertificateBuilder {
builder: X509Builder,
}
impl CertificateBuilder {
fn create() -> anyhow::Result<Self> {
let builder = X509::builder()?;
Ok(Self { builder })
}
fn set_version(&mut self, version: i32) -> anyhow::Result<&mut Self> {
self.builder.set_version(version)?;
Ok(self)
}
fn set_serial_number(&mut self, serial_number_size: i32) -> anyhow::Result<&mut Self> {
let serial_number = {
let mut serial = BigNum::new()?;
serial.rand(serial_number_size, MsbOption::MAYBE_ZERO, false)?;
serial.to_asn1_integer()?
};
self.builder.set_serial_number(&serial_number)?;
Ok(self)
}
fn set_name(&mut self) -> anyhow::Result<&mut Self> {
let mut name = X509NameBuilder::new()?;
name.append_entry_by_text("O", "Oak")?;
name.append_entry_by_text("CN", "Proxy Attestation Service")?;
let name = name.build();
self.builder.set_subject_name(&name)?;
self.builder.set_issuer_name(&name)?;
Ok(self)
}
fn set_public_key<T>(&mut self, public_key: &PKeyRef<T>) -> anyhow::Result<&mut Self>
where
T: HasPublic,
{
self.builder.set_pubkey(public_key)?;
Ok(self)
}
fn set_expiration_interval(&mut self, expiration_interval: u32) -> anyhow::Result<&mut Self> {
let not_before = Asn1Time::days_from_now(0)?;
self.builder.set_not_before(¬_before)?;
let not_after = Asn1Time::days_from_now(expiration_interval)?;
self.builder.set_not_after(¬_after)?;
Ok(self)
}
fn add_basic_constraints_extension(&mut self, is_critical: bool) -> anyhow::Result<&mut Self> {
if is_critical {
self.builder
.append_extension(BasicConstraints::new().critical().build()?)?;
} else {
self.builder
.append_extension(BasicConstraints::new().build()?)?;
}
Ok(self)
}
fn add_key_usage_extension(&mut self, is_root_certificate: bool) -> anyhow::Result<&mut Self> {
if is_root_certificate {
self.builder.append_extension(
KeyUsage::new()
.critical()
.key_cert_sign()
.crl_sign()
.build()?,
)?;
} else {
self.builder.append_extension(
KeyUsage::new()
.critical()
.non_repudiation()
.digital_signature()
.key_encipherment()
.build()?,
)?;
}
Ok(self)
}
fn add_subject_key_identifier_extension(
&mut self,
root_certificate: Option<&X509Ref>,
) -> anyhow::Result<&mut Self> {
let subject_key_identifier = SubjectKeyIdentifier::new()
.build(&self.builder.x509v3_context(root_certificate, None))?;
self.builder.append_extension(subject_key_identifier)?;
Ok(self)
}
fn add_subject_alt_name_extension(&mut self) -> anyhow::Result<&mut Self> {
let subject_alt_name = SubjectAlternativeName::new()
.dns(DEFAULT_DNS_NAME)
.build(&self.builder.x509v3_context(None, None))?;
self.builder.append_extension(subject_alt_name)?;
Ok(self)
}
fn add_auth_key_identifier_extension(
&mut self,
root_certificate: &X509Ref,
) -> anyhow::Result<&mut Self> {
let auth_key_identifier = AuthorityKeyIdentifier::new()
.keyid(false)
.issuer(false)
.build(&self.builder.x509v3_context(Some(root_certificate), None))?;
self.builder.append_extension(auth_key_identifier)?;
Ok(self)
}
// Generates a TEE report with the public key hash as data and add it to the certificate as a
// custom extension. This is required to bind the certificate to the TEE firmware.
fn add_tee_extension<T>(
&mut self,
public_key: &PKeyRef<T>,
tee_certificate: Vec<u8>,
) -> anyhow::Result<&mut Self>
where
T: HasPublic,
{
let public_key_hash = get_sha256(&public_key.public_key_to_der()?);
let tee_report = Report::new(&public_key_hash);
let attestation_info = AttestationInfo {
report: tee_report,
certificate: tee_certificate,
};
let tee_extension = attestation_info.to_extension()?;
self.builder.append_extension(tee_extension)?;
Ok(self)
}
fn add_extensions(
&mut self,
extensions: Stack<openssl::x509::X509Extension>,
) -> anyhow::Result<&mut Self> {
for extension in extensions.iter() {
self.builder.append_extension2(extension)?;
}
Ok(self)
}
fn build(mut self, private_key: &PKey<Private>) -> anyhow::Result<X509> {
self.builder.sign(private_key, MessageDigest::sha256())?;
Ok(self.builder.build())
}
}<|fim▁end|>
| |
<|file_name|>mod.rs<|end_file_name|><|fim▁begin|>pub mod analysis;
pub mod traits;
pub mod pa_source;<|fim▁hole|><|fim▁end|>
|
pub mod soundio_source;
pub mod rms;
|
<|file_name|>statusbareventfilter.cpp<|end_file_name|><|fim▁begin|>/*****************************************************************************
* Copyright 2015-2020 Alexander Barthel [email protected]
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*<|fim▁hole|>*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*****************************************************************************/
#include "gui/statusbareventfilter.h"
#include <QHelpEvent>
#include <QLabel>
#include <QStatusBar>
#include <QToolTip>
StatusBarEventFilter::StatusBarEventFilter(QStatusBar *parentStatusBar, QLabel *firstLabel)
: QObject(parentStatusBar), statusBar(parentStatusBar), firstWidget(firstLabel)
{
}
StatusBarEventFilter::~StatusBarEventFilter()
{
}
bool StatusBarEventFilter::eventFilter(QObject *object, QEvent *event)
{
if(event->type() == QEvent::ToolTip)
{
QHelpEvent *mouseEvent = dynamic_cast<QHelpEvent *>(event);
if(mouseEvent != nullptr)
{
// Allow tooltip events only on the left side of the first label widget
QRect rect(0, 0, firstWidget->geometry().left(), statusBar->height());
if(!rect.contains(mouseEvent->pos()))
return true;
}
}
else if(event->type() == QEvent::MouseButtonRelease)
{
QMouseEvent *mouseEvent = dynamic_cast<QMouseEvent *>(event);
if(mouseEvent != nullptr)
{
// Allow tooltips on click only on the left side of the first label widget
QRect rect(0, 0, firstWidget->geometry().left(), statusBar->height());
if(rect.contains(mouseEvent->pos()))
{
QToolTip::showText(QCursor::pos(), statusBar->toolTip(), statusBar);
return true;
}
}
}
return QObject::eventFilter(object, event);
}<|fim▁end|>
|
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
|
<|file_name|>0013_partecipazione_centrale_operativa.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('attivita', '0012_attivita_centrale_operativa'),
]
operations = [
migrations.AddField(
model_name='partecipazione',
name='centrale_operativa',<|fim▁hole|><|fim▁end|>
|
field=models.BooleanField(default=False, db_index=True),
),
]
|
<|file_name|>hivemind_vi_VN.ts<|end_file_name|><|fim▁begin|><TS language="vi_VN" version="2.0">
<context>
<name>AddressBookPage</name>
<message>
<source>Create a new address</source>
<translation>Tạo một địa chỉ mới</translation>
</message>
</context>
<context>
<name>AddressTableModel</name>
</context>
<context>
<name>AskPassphraseDialog</name>
</context>
<context>
<name>HivemindGUI</name>
</context>
<context>
<name>ClientModel</name>
</context>
<context>
<name>CoinControlDialog</name>
</context>
<context>
<name>EditAddressDialog</name>
</context>
<context>
<name>FreespaceChecker</name>
</context>
<context>
<name>HelpMessageDialog</name>
</context>
<context>
<name>Intro</name>
</context>
<context>
<name>OpenURIDialog</name>
</context>
<context>
<name>OptionsDialog</name>
</context>
<context>
<name>OverviewPage</name>
</context>
<context>
<name>PaymentServer</name>
</context>
<context>
<name>PeerTableModel</name>
</context>
<context>
<name>QObject</name>
</context>
<context>
<name>QRImageWidget</name>
</context>
<context>
<name>RPCConsole</name>
</context>
<context>
<name>ReceiveCoinsDialog</name>
</context>
<context>
<name>ReceiveRequestDialog</name>
</context>
<context>
<name>RecentRequestsTableModel</name>
</context>
<context><|fim▁hole|> </context>
<context>
<name>ShutdownWindow</name>
</context>
<context>
<name>SignVerifyMessageDialog</name>
</context>
<context>
<name>SplashScreen</name>
</context>
<context>
<name>TrafficGraphWidget</name>
</context>
<context>
<name>TransactionDesc</name>
</context>
<context>
<name>TransactionDescDialog</name>
</context>
<context>
<name>TransactionTableModel</name>
</context>
<context>
<name>TransactionView</name>
</context>
<context>
<name>UnitDisplayStatusBarControl</name>
</context>
<context>
<name>WalletFrame</name>
</context>
<context>
<name>WalletModel</name>
</context>
<context>
<name>WalletView</name>
</context>
<context>
<name>hivemind-core</name>
</context>
</TS><|fim▁end|>
|
<name>SendCoinsDialog</name>
</context>
<context>
<name>SendCoinsEntry</name>
|
<|file_name|>common.js<|end_file_name|><|fim▁begin|>global.requireWithCoverage = function (libName) {
if (process.env.NODE_CHESS_COVERAGE) {
return require('../lib-cov/' + libName + '.js');
}
if (libName === 'index') {
return require('../lib');
} else {
return require('../lib/' + libName + '.js');
}
};<|fim▁hole|>global.chai = require('chai');
global.assert = chai.assert;
global.expect = chai.expect;
global.should = chai.should();<|fim▁end|>
| |
<|file_name|>utils.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
from __future__ import unicode_literals
def get_client_ip(request):
"""
Given an HTTP request, returns the related IP address.
"""
x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR', None)
if x_forwarded_for:
ip = x_forwarded_for.split(',')[0]
else:<|fim▁hole|><|fim▁end|>
|
ip = request.META.get('REMOTE_ADDR')
return ip
|
<|file_name|>runtests.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# vim:ts=4:sw=4:et:
# no unicode literals
from __future__ import absolute_import, division, print_function
import argparse
import json
import math
import multiprocessing
import os
import os.path
import random
import shutil
import signal
import subprocess
import sys
import tempfile
import threading
import time
import traceback
# in the FB internal test infra, ensure that we are running from the
# dir that houses this script rather than some other higher level dir
# in the containing tree. We can't use __file__ to determine this
# because our PAR machinery can generate a name like /proc/self/fd/3/foo
# which won't resolve to anything useful by the time we get here.
if not os.path.exists("runtests.py") and os.path.exists("watchman/runtests.py"):
os.chdir("watchman")
try:
import unittest2 as unittest
except ImportError:
import unittest
# Ensure that we can find pywatchman and integration tests (if we're not the
# main module, a wrapper is probably loading us up and we shouldn't screw around
# with sys.path).
if __name__ == "__main__":
sys.path.insert(0, os.path.join(os.getcwd(), "python"))
sys.path.insert(1, os.path.join(os.getcwd(), "tests", "integration"))
sys.path.insert(1, os.path.join(os.getcwd(), "tests", "integration", "facebook"))
# Only Python 3.5+ supports native asyncio
has_asyncio = sys.version_info >= (3, 5)
if has_asyncio:
sys.path.insert(0, os.path.join(os.getcwd(), "tests", "async"))
import asyncio
try:
import queue
except Exception:
import Queue
queue = Queue
parser = argparse.ArgumentParser(
description="Run the watchman unit and integration tests"
)
parser.add_argument("-v", "--verbosity", default=2, help="test runner verbosity")
parser.add_argument(
"--keep",
action="store_true",
help="preserve all temporary files created during test execution",
)
parser.add_argument(
"--keep-if-fail",
action="store_true",
help="preserve all temporary files created during test execution if failed",
)
parser.add_argument("files", nargs="*", help="specify which test files to run")
parser.add_argument(
"--method", action="append", help="specify which python test method names to run"
)
def default_concurrency():
# Python 2.7 hangs when we use threads, so avoid it
# https://bugs.python.org/issue20318
if sys.version_info >= (3, 0):
level = min(4, math.ceil(1.5 * multiprocessing.cpu_count()))
if "CIRCLECI" in os.environ:
# Use fewer cores in circle CI because the inotify sysctls
# are pretty low, and we sometimes hit those limits.
level = level / 2
return int(level)
return 1
parser.add_argument(
"--concurrency",
default=default_concurrency(),
type=int,
help="How many tests to run at once",
)
parser.add_argument(
"--watcher",
action="store",
default="auto",
help="Specify which watcher should be used to run the tests",
)
parser.add_argument(
"--debug-watchman",
action="store_true",
help="Pauses start up and prints out the PID for watchman server process."
+ "Forces concurrency to 1.",
)
parser.add_argument(
"--watchman-path", action="store", help="Specify the path to the watchman binary"
)
parser.add_argument(
"--win7", action="store_true", help="Set env to force win7 compatibility tests"
)
parser.add_argument(
"--retry-flaky",
action="store",
type=int,
default=2,
help="How many additional times to retry flaky tests.",
)
parser.add_argument(
"--testpilot-json",
action="store_true",
help="Output test results in Test Pilot JSON format",
)
parser.add_argument(
"--pybuild-dir",
action="store",
help="For out-of-src-tree builds, where the generated python lives",
)
args = parser.parse_args()
if args.pybuild_dir is not None:
sys.path.insert(0, os.path.realpath(args.pybuild_dir))
# Import our local stuff after we've had a chance to look at args.pybuild_dir.
# The `try` block prevents the imports from being reordered
try:
import Interrupt
import TempDir
import WatchmanInstance
import pywatchman
except ImportError:
raise
# We test for this in a test case
os.environ["WATCHMAN_EMPTY_ENV_VAR"] = ""
os.environ["HGUSER"] = "John Smith <[email protected]>"
os.environ["NOSCMLOG"] = "1"
os.environ["WATCHMAN_NO_SPAWN"] = "1"
if args.win7:
os.environ["WATCHMAN_WIN7_COMPAT"] = "1"
# Ensure that we find the watchman we built in the tests
if args.watchman_path:
args.watchman_path = os.path.realpath(args.watchman_path)
bin_dir = os.path.dirname(args.watchman_path)
os.environ["WATCHMAN_BINARY"] = args.watchman_path
else:
bin_dir = os.path.dirname(__file__)
os.environ["PYWATCHMAN_PATH"] = os.path.join(os.getcwd(), "python")
os.environ["WATCHMAN_PYTHON_BIN"] = os.path.abspath(
os.path.join(os.getcwd(), "python", "bin")
)
os.environ["PATH"] = "%s%s%s" % (
os.path.abspath(bin_dir),
os.pathsep,
os.environ["PATH"],
)
# We'll put all our temporary stuff under one dir so that we
# can clean it all up at the end
temp_dir = TempDir.get_temp_dir(args.keep)
def interrupt_handler(signo, frame):
Interrupt.setInterrupted()
signal.signal(signal.SIGINT, interrupt_handler)
class Result(unittest.TestResult):
# Make it easier to spot success/failure by coloring the status
# green for pass, red for fail and yellow for skip.
# also print the elapsed time per test
transport = None
encoding = None
attempt = 0
def shouldStop(self):
if Interrupt.wasInterrupted():
return True
return super(Result, self).shouldStop()
def startTest(self, test):
self.startTime = time.time()
super(Result, self).startTest(test)
def addSuccess(self, test):
elapsed = time.time() - self.startTime
super(Result, self).addSuccess(test)
if args.testpilot_json:
print(<|fim▁hole|> "test": test.id(),
"start_time": self.startTime,
"end_time": time.time(),
}
)
)
else:
print(
"\033[32mPASS\033[0m %s (%.3fs)%s"
% (test.id(), elapsed, self._attempts())
)
def addSkip(self, test, reason):
elapsed = time.time() - self.startTime
super(Result, self).addSkip(test, reason)
if args.testpilot_json:
print(
json.dumps(
{
"op": "test_done",
"status": "skipped",
"test": test.id(),
"details": reason,
"start_time": self.startTime,
"end_time": time.time(),
}
)
)
else:
print("\033[33mSKIP\033[0m %s (%.3fs) %s" % (test.id(), elapsed, reason))
def __printFail(self, test, err):
elapsed = time.time() - self.startTime
t, val, trace = err
if args.testpilot_json:
print(
json.dumps(
{
"op": "test_done",
"status": "failed",
"test": test.id(),
"details": "".join(traceback.format_exception(t, val, trace)),
"start_time": self.startTime,
"end_time": time.time(),
}
)
)
else:
print(
"\033[31mFAIL\033[0m %s (%.3fs)%s\n%s"
% (
test.id(),
elapsed,
self._attempts(),
"".join(traceback.format_exception(t, val, trace)),
)
)
def addFailure(self, test, err):
self.__printFail(test, err)
super(Result, self).addFailure(test, err)
def addError(self, test, err):
self.__printFail(test, err)
super(Result, self).addError(test, err)
def setAttemptNumber(self, attempt):
self.attempt = attempt
def _attempts(self):
if self.attempt > 0:
return " (%d attempts)" % self.attempt
return ""
def expandFilesList(files):
""" expand any dir names into a full list of files """
res = []
for g in args.files:
if os.path.isdir(g):
for dirname, _dirs, files in os.walk(g):
for f in files:
if not f.startswith("."):
res.append(os.path.normpath(os.path.join(dirname, f)))
else:
res.append(os.path.normpath(g))
return res
if args.files:
args.files = expandFilesList(args.files)
def shouldIncludeTestFile(filename):
""" used by our loader to respect the set of tests to run """
global args
fname = os.path.relpath(filename.replace(".pyc", ".py"))
if args.files:
for f in args.files:
if f == fname:
return True
return False
if args.method:
# implies python tests only
if not fname.endswith(".py"):
return False
return True
def shouldIncludeTestName(name):
""" used by our loader to respect the set of tests to run """
global args
if args.method:
for f in args.method:
if f in name:
# the strict original interpretation of this flag
# was pretty difficult to use in practice, so we
# now also allow substring matches against the
# entire test name.
return True
return False
return True
class Loader(unittest.TestLoader):
""" allows us to control the subset of which tests are run """
def __init__(self):
super(Loader, self).__init__()
def loadTestsFromTestCase(self, testCaseClass):
return super(Loader, self).loadTestsFromTestCase(testCaseClass)
def getTestCaseNames(self, testCaseClass):
names = super(Loader, self).getTestCaseNames(testCaseClass)
return filter(lambda name: shouldIncludeTestName(name), names)
def loadTestsFromModule(self, module, *args, **kw):
if not shouldIncludeTestFile(module.__file__):
return unittest.TestSuite()
return super(Loader, self).loadTestsFromModule(module, *args, **kw)
loader = Loader()
suite = unittest.TestSuite()
directories = ["python/tests", "tests/integration"]
facebook_directory = "tests/integration/facebook"
if os.path.exists(facebook_directory):
# the facebook dir isn't sync'd to github, but it
# is present internally, so it should remain in this list
directories += [facebook_directory]
if has_asyncio:
directories += ["tests/async"]
for d in directories:
suite.addTests(loader.discover(d, top_level_dir=d))
if os.name == "nt":
t_globs = "tests/*.exe"
else:
t_globs = "tests/*.t"
tls = threading.local()
# Manage printing from concurrent threads
# http://stackoverflow.com/a/3030755/149111
class ThreadSafeFile(object):
def __init__(self, f):
self.f = f
self.lock = threading.RLock()
self.nesting = 0
def _getlock(self):
self.lock.acquire()
self.nesting += 1
def _droplock(self):
nesting = self.nesting
self.nesting = 0
for _ in range(nesting):
self.lock.release()
def __getattr__(self, name):
if name == "softspace":
return tls.softspace
else:
raise AttributeError(name)
def __setattr__(self, name, value):
if name == "softspace":
tls.softspace = value
else:
return object.__setattr__(self, name, value)
def write(self, data):
self._getlock()
self.f.write(data)
if data == "\n":
self._droplock()
def flush(self):
self._getlock()
self.f.flush()
self._droplock()
sys.stdout = ThreadSafeFile(sys.stdout)
tests_queue = queue.Queue()
results_queue = queue.Queue()
def runner():
global results_queue
global tests_queue
broken = False
try:
# Start up a shared watchman instance for the tests.
inst = WatchmanInstance.Instance(
{"watcher": args.watcher}, debug_watchman=args.debug_watchman
)
inst.start()
# Allow tests to locate this default instance
WatchmanInstance.setSharedInstance(inst)
if has_asyncio:
# Each thread will have its own event loop
asyncio.set_event_loop(asyncio.new_event_loop())
except Exception as e:
print("while starting watchman: %s" % str(e))
traceback.print_exc()
broken = True
while not broken:
test = tests_queue.get()
try:
if test == "terminate":
break
if Interrupt.wasInterrupted() or broken:
continue
result = None
for attempt in range(0, args.retry_flaky + 1):
# Check liveness of the server
try:
client = pywatchman.client(timeout=3.0, sockpath=inst.getSockPath())
client.query("version")
client.close()
except Exception as exc:
print(
"Failed to connect to watchman server: %s; starting a new one"
% exc
)
try:
inst.stop()
except Exception:
pass
try:
inst = WatchmanInstance.Instance(
{"watcher": args.watcher},
debug_watchman=args.debug_watchman,
)
inst.start()
# Allow tests to locate this default instance
WatchmanInstance.setSharedInstance(inst)
except Exception as e:
print("while starting watchman: %s" % str(e))
traceback.print_exc()
broken = True
continue
try:
result = Result()
result.setAttemptNumber(attempt)
if hasattr(test, "setAttemptNumber"):
test.setAttemptNumber(attempt)
test.run(result)
if hasattr(test, "setAttemptNumber") and not result.wasSuccessful():
# Facilitate retrying this possibly flaky test
continue
break
except Exception as e:
print(e)
if hasattr(test, "setAttemptNumber") and not result.wasSuccessful():
# Facilitate retrying this possibly flaky test
continue
if (
not result.wasSuccessful()
and "TRAVIS" in os.environ
and hasattr(test, "dumpLogs")
):
test.dumpLogs()
results_queue.put(result)
finally:
tests_queue.task_done()
if not broken:
inst.stop()
def expand_suite(suite, target=None):
""" recursively expand a TestSuite into a list of TestCase """
if target is None:
target = []
for test in suite:
if isinstance(test, unittest.TestSuite):
expand_suite(test, target)
else:
target.append(test)
# randomize both because we don't want tests to have relatively
# dependency ordering and also because this can help avoid clumping
# longer running tests together
random.shuffle(target)
return target
def queue_jobs(tests):
for test in tests:
tests_queue.put(test)
all_tests = expand_suite(suite)
if args.debug_watchman:
args.concurrency = 1
elif len(all_tests) < args.concurrency:
args.concurrency = len(all_tests)
queue_jobs(all_tests)
if args.concurrency > 1:
for _ in range(args.concurrency):
t = threading.Thread(target=runner)
t.daemon = True
t.start()
# also send a termination sentinel
tests_queue.put("terminate")
# Wait for all tests to have been dispatched
tests_queue.join()
else:
# add a termination sentinel
tests_queue.put("terminate")
runner()
# Now pull out and aggregate the results
tests_run = 0
tests_failed = 0
tests_skipped = 0
while not results_queue.empty():
res = results_queue.get()
tests_run = tests_run + res.testsRun
tests_failed = tests_failed + len(res.errors) + len(res.failures)
tests_skipped = tests_skipped + len(res.skipped)
if not args.testpilot_json:
print(
"Ran %d, failed %d, skipped %d, concurrency %d"
% (tests_run, tests_failed, tests_skipped, args.concurrency)
)
if "APPVEYOR" in os.environ:
logdir = "logs7" if args.win7 else "logs"
logzip = "%s.zip" % logdir
shutil.copytree(tempfile.tempdir, logdir)
subprocess.call(["7z", "a", logzip, logdir])
subprocess.call(["appveyor", "PushArtifact", logzip])
if "CIRCLE_ARTIFACTS" in os.environ:
print("Creating %s/logs.zip" % os.environ["CIRCLE_ARTIFACTS"])
subprocess.call(
[
"zip",
"-q",
"-r",
"%s/logs.zip" % os.environ["CIRCLE_ARTIFACTS"],
temp_dir.get_dir(),
]
)
if tests_failed or (tests_run == 0):
if args.keep_if_fail:
temp_dir.set_keep(True)
if args.testpilot_json:
# When outputting JSON, our return code indicates if we successfully
# produced output or not, not whether the tests passed. The JSON
# output contains the detailed test pass/failure information.
sys.exit(0)
sys.exit(1)<|fim▁end|>
|
json.dumps(
{
"op": "test_done",
"status": "passed",
|
<|file_name|>xtc.rs<|end_file_name|><|fim▁begin|>//! This module serves as the entry point into Xt's main binary.
// This file is part of Xt.
// This is the Xt text editor; it edits text.
// Copyright (C) 2016-2018 The Xt Developers
// This program is free software: you can redistribute it and/or
// modify it under the terms of the GNU General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
// This program is distributed in the hope that it will be useful, but
// WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
// General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see
// <http://www.gnu.org/licenses/>.
extern crate clap;
extern crate xt_core as xt;
#[macro_use]
extern crate slog;
extern crate slog_term;
<|fim▁hole|> App::new("xt-core")
.version("0.1.0")
.author("Dom Rodriguez <[email protected]>")
.about("Core backend for Xt.")
.arg(
Arg::with_name("verbose")
.short("v")
.multiple(true)
.required(false)
.help("Set the level of logging verbosity"),
)
.subcommand(
SubCommand::with_name("spawn").help("Spawn a instance of Xt"),
)
.get_matches()
}
fn main() {
let _args = retrieve_arguments();
let log = init_logger();
info!(log, "Xt (core) loading..");
warn!(
log,
"Xt (core) has no configuration file. Reverting to defaults."
);
error!(log, "Xt Core is not ready for deployment. Halt.");
unimplemented!();
}<|fim▁end|>
|
use clap::{App, Arg, ArgMatches, SubCommand};
use xt::logging::init_logger;
fn retrieve_arguments() -> ArgMatches<'static> {
|
<|file_name|>rdsinstance.go<|end_file_name|><|fim▁begin|>/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
package mapper
import (
"github.com/ernestio/aws-definition-mapper/definition"
"github.com/ernestio/aws-definition-mapper/output"
)
// MapRDSInstances : Maps the rds instances for the input payload on a ernest internal format
func MapRDSInstances(d definition.Definition) []output.RDSInstance {
var instances []output.RDSInstance
for _, instance := range d.RDSInstances {
var sgroups []string
var networks []string
for _, sg := range instance.SecurityGroups {
sgroups = append(sgroups, d.GeneratedName()+sg)
}
for _, nw := range instance.Networks {
networks = append(networks, d.GeneratedName()+nw)
}
name := d.GeneratedName() + instance.Name
i := output.RDSInstance{
Name: name,
Size: instance.Size,
Engine: instance.Engine,
EngineVersion: instance.EngineVersion,
Port: instance.Port,
Cluster: instance.Cluster,
Public: instance.Public,
MultiAZ: instance.MultiAZ,
PromotionTier: instance.PromotionTier,
StorageType: instance.Storage.Type,
StorageSize: instance.Storage.Size,
StorageIops: instance.Storage.Iops,
AvailabilityZone: instance.AvailabilityZone,
SecurityGroups: sgroups,
SecurityGroupAWSIDs: mapRDSSecurityGroupIDs(sgroups),
Networks: networks,
NetworkAWSIDs: mapRDSNetworkIDs(networks),
DatabaseName: instance.DatabaseName,
DatabaseUsername: instance.DatabaseUsername,
DatabasePassword: instance.DatabasePassword,<|fim▁hole|> AutoUpgrade: instance.AutoUpgrade,
BackupRetention: instance.Backups.Retention,
BackupWindow: instance.Backups.Window,
MaintenanceWindow: instance.MaintenanceWindow,
ReplicationSource: instance.ReplicationSource,
FinalSnapshot: instance.FinalSnapshot,
License: instance.License,
Timezone: instance.Timezone,
Tags: mapTagsServiceOnly(d.Name),
ProviderType: "$(datacenters.items.0.type)",
VpcID: "$(vpcs.items.0.vpc_id)",
SecretAccessKey: "$(datacenters.items.0.aws_secret_access_key)",
AccessKeyID: "$(datacenters.items.0.aws_access_key_id)",
DatacenterRegion: "$(datacenters.items.0.region)",
}
cluster := d.FindRDSCluster(instance.Cluster)
if cluster != nil {
i.Engine = cluster.Engine
i.Cluster = d.GeneratedName() + instance.Cluster
}
instances = append(instances, i)
}
return instances
}
// MapDefinitionRDSInstances : Maps the rds instances from the internal format to the input definition format
func MapDefinitionRDSInstances(m *output.FSMMessage) []definition.RDSInstance {
var instances []definition.RDSInstance
prefix := m.Datacenters.Items[0].Name + "-" + m.ServiceName + "-"
for _, instance := range m.RDSInstances.Items {
sgroups := ComponentNamesFromIDs(m.Firewalls.Items, instance.SecurityGroupAWSIDs)
subnets := ComponentNamesFromIDs(m.Networks.Items, instance.NetworkAWSIDs)
i := definition.RDSInstance{
Name: ShortName(instance.Name, prefix),
Size: instance.Size,
Engine: instance.Engine,
EngineVersion: instance.EngineVersion,
Port: instance.Port,
Cluster: ShortName(instance.Cluster, prefix),
Public: instance.Public,
MultiAZ: instance.MultiAZ,
PromotionTier: instance.PromotionTier,
AvailabilityZone: instance.AvailabilityZone,
SecurityGroups: ShortNames(sgroups, prefix),
Networks: ShortNames(subnets, prefix),
DatabaseName: instance.DatabaseName,
DatabaseUsername: instance.DatabaseUsername,
DatabasePassword: instance.DatabasePassword,
AutoUpgrade: instance.AutoUpgrade,
MaintenanceWindow: instance.MaintenanceWindow,
ReplicationSource: instance.ReplicationSource,
FinalSnapshot: instance.FinalSnapshot,
License: instance.License,
Timezone: instance.Timezone,
}
i.Storage.Type = instance.StorageType
i.Storage.Size = instance.StorageSize
i.Storage.Iops = instance.StorageIops
i.Backups.Retention = instance.BackupRetention
i.Backups.Window = instance.BackupWindow
if i.Storage.Type != "io1" {
i.Storage.Iops = nil
}
instances = append(instances, i)
}
return instances
}
// UpdateRDSInstanceValues corrects missing values after an import
func UpdateRDSInstanceValues(m *output.FSMMessage) {
for i := 0; i < len(m.RDSInstances.Items); i++ {
m.RDSInstances.Items[i].ProviderType = "$(datacenters.items.0.type)"
m.RDSInstances.Items[i].AccessKeyID = "$(datacenters.items.0.aws_access_key_id)"
m.RDSInstances.Items[i].SecretAccessKey = "$(datacenters.items.0.aws_secret_access_key)"
m.RDSInstances.Items[i].DatacenterRegion = "$(datacenters.items.0.region)"
m.RDSInstances.Items[i].VpcID = "$(vpcs.items.0.vpc_id)"
m.RDSInstances.Items[i].SecurityGroups = ComponentNamesFromIDs(m.Firewalls.Items, m.RDSInstances.Items[i].SecurityGroupAWSIDs)
m.RDSInstances.Items[i].Networks = ComponentNamesFromIDs(m.Networks.Items, m.RDSInstances.Items[i].NetworkAWSIDs)
}
}<|fim▁end|>
| |
<|file_name|>iterative_zoom.py<|end_file_name|><|fim▁begin|>from .fft_tools import zoom
import numpy as np
import matplotlib.pyplot as pl
def iterative_zoom(image, mindiff=1., zoomshape=[10,10],
return_zoomed=False, zoomstep=2, verbose=False,
minmax=np.min, ploteach=False, return_center=True):
"""
Iteratively zoom in on the *minimum* position in an image until the
delta-peak value is below `mindiff`
Parameters
----------
image : np.ndarray
Two-dimensional image with a *minimum* to zoom in on (or maximum, if
specified using `minmax`)
mindiff : float
Minimum difference that must be present in image before zooming is done
zoomshape : [int,int]
Shape of the "mini" image to create. Smaller is faster, but a bit less
accurate. [10,10] seems to work well in preliminary tests (though unit
tests have not been written)
return_zoomed : bool
Return the zoomed image in addition to the measured offset?
zoomstep : int
Amount to increase the zoom factor by on each iteration. Probably best to
stick with small integers (2-5ish).
verbose : bool
Print out information about zoom factor, offset at each iteration
minmax : np.min or np.max
Can zoom in on the minimum or maximum of the image
ploteach : bool
Primarily a debug tool, and to be used with extreme caution! Will open
a new figure at each iteration showing the next zoom level.
return_center : bool
Return the center position in original image coordinates? If False,
will retern the *offset from center* instead (but beware the
conventions associated with the concept of 'center' for even images).
Returns
-------
The y,x offsets (following numpy convention) of the center position of the
original image. If `return_zoomed`, returns (zoomed_image, zoom_factor,
offsets) because you can't interpret the zoomed image without the zoom
factor.
"""
image_zoom = image
argminmax = np.argmin if "min" in minmax.__name__ else np.argmax
zf = 1. # "zoom factor" initialized to 1 for the base shift measurement
offset = np.array([0]*image.ndim,dtype='float') # center offset
delta_image = (image_zoom - minmax(image_zoom))
xaxzoom = np.indices(image.shape)
if ploteach:
ii = 1
pl.figure(ii)
pl.clf()
pl.pcolor(np.arange(image.shape[0]+1)-0.5,np.arange(image.shape[1]+1)-0.5, image)
minpos = np.unravel_index(argminmax(image_zoom), image_zoom.shape)
pl.plot(minpos[1],minpos[0],'wx')
# check to make sure the smallest *nonzero* difference > mindiff
while np.abs(delta_image[np.abs(delta_image)>0]).min() > mindiff:
minpos = np.unravel_index(argminmax(image_zoom), image_zoom.shape)
center = xaxzoom[0][minpos],xaxzoom[1][minpos]
offset = xaxzoom[0][minpos]-(image.shape[0]-1)/2,xaxzoom[1][minpos]-(image.shape[1]-1)/2
zf *= zoomstep
xaxzoom, image_zoom = zoom.zoom_on_pixel(image, center, usfac=zf,
outshape=zoomshape, return_xouts=True)
delta_image = image_zoom-minmax(image_zoom)
# base case: in case you can't do any better...
# (at this point, you're all the way zoomed)
if np.all(delta_image == 0):
if verbose:
print("Can't zoom any further. zf=%i" % zf)
break
if verbose:
print(("Zoom factor %6i, center = %30s, offset=%30s, minpos=%30s, min|diff|=%15g" %
(zf, ",".join(["%15g" % c for c in center]),
",".join(["%15g" % c for c in offset]),
",".join(["%5i" % c for c in minpos]),
np.abs(delta_image[np.abs(delta_image)>0]).min()
)))
if ploteach:
ii += 1
pl.figure(ii)
pl.clf()
pl.pcolor(centers_to_edges(xaxzoom[1][0,:]),centers_to_edges(xaxzoom[0][:,0]),image_zoom)
pl.contour(xaxzoom[1],xaxzoom[0],image_zoom-image_zoom.min(),levels=[1,5,15],cmap=pl.cm.gray)
pl.plot(center[1],center[0],'wx')
minpos = np.unravel_index(argminmax(image_zoom), image_zoom.shape)
pl.plot(xaxzoom[1][minpos],
xaxzoom[0][minpos],
'w+')
pl.arrow(center[1],center[0],xaxzoom[1][minpos]-center[1],xaxzoom[0][minpos]-center[0],color='w',
head_width=0.1/zf, linewidth=1./zf, length_includes_head=True)
pl.figure(1)
#pl.contour(xaxzoom[1],xaxzoom[0],image_zoom-image_zoom.min(),levels=[1,5,15],cmap=pl.cm.gray)
pl.arrow(center[1],center[0],xaxzoom[1][minpos]-center[1],xaxzoom[0][minpos]-center[0],color='w',
head_width=0.1/zf, linewidth=1./zf, length_includes_head=True)
if return_center:
result = center
else:
result = offset
if return_zoomed:
return image_zoom,zf,result
else:
return result
def centers_to_edges(arr):
dx = arr[1]-arr[0]
newarr = np.linspace(arr.min()-dx/2,arr.max()+dx/2,arr.size+1)
return newarr
def iterative_zoom_1d(data, mindiff=1., zoomshape=(10,),
return_zoomed=False, zoomstep=2, verbose=False,
minmax=np.min, return_center=True):
"""
Iteratively zoom in on the *minimum* position in a spectrum or timestream
until the delta-peak value is below `mindiff`
Parameters
----------
data : np.ndarray
One-dimensional array with a *minimum* (or maximum, as specified by
minmax) to zoom in on
mindiff : float
Minimum difference that must be present in image before zooming is done
zoomshape : int
Shape of the "mini" image to create. Smaller is faster, but a bit less
accurate. 10 seems to work well in preliminary tests (though unit
tests have not been written)
return_zoomed : bool
Return the zoomed image in addition to the measured offset?
zoomstep : int
Amount to increase the zoom factor by on each iteration. Probably best to
stick with small integers (2-5ish).
verbose : bool
Print out information about zoom factor, offset at each iteration
minmax : np.min or np.max
Can zoom in on the minimum or maximum of the image<|fim▁hole|>
Returns
-------
The x offsets of the center position of the original spectrum. If
`return_zoomed`, returns (zoomed_image, zoom_factor, offsets) because you
can't interpret the zoomed spectrum without the zoom factor.
"""
data_zoom = data
argminmax = np.argmin if "min" in minmax.__name__ else np.argmax
zf = 1. # "zoom factor" initialized to 1 for the base shift measurement
offset = 0.
delta_data = (data_zoom - minmax(data_zoom))
xaxzoom = np.arange(data.size)
# check to make sure the smallest *nonzero* difference > mindiff
while np.abs(delta_data[np.abs(delta_data)>0]).min() > mindiff:
minpos = argminmax(data_zoom)
center = xaxzoom.squeeze()[minpos],
offset = xaxzoom.squeeze()[minpos]-(data.size-1)/2,
zf *= zoomstep
xaxzoom, data_zoom = zoom.zoom_on_pixel(data, center, usfac=zf,
outshape=zoomshape, return_xouts=True)
delta_data = data_zoom-minmax(data_zoom)
# base case: in case you can't do any better...
# (at this point, you're all the way zoomed)
if np.all(delta_data == 0):
if verbose:
print("Can't zoom any further. zf=%i" % zf)
break
if verbose:
print(("Zoom factor %6i, center = %30s, offset=%30s, minpos=%30s, mindiff=%30s" %
(zf, "%15g" % center,
"%15g" % offset,
"%15g" % minpos,
"%15g" % np.abs(delta_data[np.abs(delta_data)>0]).min(),
)))
if return_center:
result = center
else:
result = offset
if return_zoomed:
return data_zoom,zf,result
else:
return result
def centers_to_edges(arr):
dx = arr[1]-arr[0]
newarr = np.linspace(arr.min()-dx/2,arr.max()+dx/2,arr.size+1)
return newarr<|fim▁end|>
|
return_center : bool
Return the center position in original image coordinates? If False,
will retern the *offset from center* instead (but beware the
conventions associated with the concept of 'center' for even images).
|
<|file_name|>validator.py<|end_file_name|><|fim▁begin|># Copyright (C) 2014 Universidad Politecnica de Madrid
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
from keystone import exception
from keystone.auth import plugins as auth_plugins
from keystone.common import dependency
from keystone.openstack.common import log
from oauthlib.oauth2 import RequestValidator
try: from oslo.utils import timeutils
except ImportError: from keystone.openstack.common import timeutils
METHOD_NAME = 'oauth2_validator'
LOG = log.getLogger(__name__)
@dependency.requires('oauth2_api')
class OAuth2Validator(RequestValidator):
"""OAuthlib request validator."""
# Ordered roughly in order of appearance in the authorization grant flow
# Pre- and post-authorization.
def validate_client_id(self, client_id, request, *args, **kwargs):
# Simple validity check, does client exist? Not banned?
client_dict = self.oauth2_api.get_consumer(client_id)
if client_dict:
return True
# NOTE(garcianavalon) Currently the sql driver raises an exception
# if the consumer doesnt exist so we throw the Keystone NotFound
# 404 Not Found exception instead of the OAutlib InvalidClientId
# 400 Bad Request exception.
return False
def validate_redirect_uri(self, client_id, redirect_uri, request, *args, **kwargs):
# Is the client allowed to use the supplied redirect_uri? i.e. has
# the client previously registered this EXACT redirect uri.<|fim▁hole|> client_dict = self.oauth2_api.get_consumer(client_id)
registered_uris = client_dict['redirect_uris']
return redirect_uri in registered_uris
def get_default_redirect_uri(self, client_id, request, *args, **kwargs):
# The redirect used if none has been supplied.
# Prefer your clients to pre register a redirect uri rather than
# supplying one on each authorization request.
# TODO(garcianavalon) implement
pass
def validate_scopes(self, client_id, scopes, client, request, *args, **kwargs):
# Is the client allowed to access the requested scopes?
if not scopes:
return True # the client is not requesting any scope
client_dict = self.oauth2_api.get_consumer(client_id)
if not client_dict['scopes']:
return False # the client isnt allowed any scopes
for scope in scopes:
if not scope in client_dict['scopes']:
return False
return True
def get_default_scopes(self, client_id, request, *args, **kwargs):
# Scopes a client will authorize for if none are supplied in the
# authorization request.
# TODO(garcianavalon) implement
pass
def validate_response_type(self, client_id, response_type, client, request, *args, **kwargs):
# Clients should only be allowed to use one type of response type, the
# one associated with their one allowed grant type.
# FIXME(garcianavalon) we need to support multiple grant types
# for the same consumers right now. In the future we should
# separate them and only allow one grant type (registering
# each client one time for each grant or allowing components)
# or update the tools to allow to create clients with
# multiple grants
# client_dict = self.oauth2_api.get_consumer(client_id)
# allowed_response_type = client_dict['response_type']
# return allowed_response_type == response_type
return True
# Post-authorization
def save_authorization_code(self, client_id, code, request, *args, **kwargs):
# Remember to associate it with request.scopes, request.redirect_uri
# request.client, request.state and request.user (the last is passed in
# post_authorization credentials, i.e. { 'user': request.user}.
authorization_code = {
'code': code['code'], # code is a dict with state and the code
'consumer_id': client_id,
'scopes': request.scopes,
'authorizing_user_id': request.user_id, # populated through the credentials
'state': request.state,
'redirect_uri': request.redirect_uri
}
token_duration = 28800 # TODO(garcianavalon) extract as configuration option
# TODO(garcianavalon) find a better place to do this
now = timeutils.utcnow()
future = now + datetime.timedelta(seconds=token_duration)
expiry_date = timeutils.isotime(future, subsecond=True)
authorization_code['expires_at'] = expiry_date
self.oauth2_api.store_authorization_code(authorization_code)
# Token request
def authenticate_client(self, request, *args, **kwargs):
# Whichever authentication method suits you, HTTP Basic might work
# TODO(garcianavalon) write it cleaner
LOG.debug('OAUTH2: authenticating client')
authmethod, auth = request.headers['Authorization'].split(' ', 1)
auth = auth.decode('unicode_escape')
if authmethod.lower() == 'basic':
auth = auth.decode('base64')
client_id, secret = auth.split(':', 1)
client_dict = self.oauth2_api.get_consumer_with_secret(client_id)
if client_dict['secret'] == secret:
# TODO(garcianavalon) this can be done in a cleaner way
#if we change the consumer model attribute to client_id
request.client = type('obj', (object,),
{'client_id' : client_id})
LOG.info('OAUTH2: succesfully authenticated client %s',
client_dict['name'])
return True
return False
def authenticate_client_id(self, client_id, request, *args, **kwargs):
# Don't allow public (non-authenticated) clients
# TODO(garcianavalon) check this method
return False
def validate_code(self, client_id, code, client, request, *args, **kwargs):
# Validate the code belongs to the client. Add associated scopes,
# state and user to request.scopes, request.state and request.user.
authorization_code = self.oauth2_api.get_authorization_code(code)
if not authorization_code['valid']:
return False
if not authorization_code['consumer_id'] == request.client.client_id:
return False
request.scopes = authorization_code['scopes']
request.state = authorization_code['state']
request.user = authorization_code['authorizing_user_id']
return True
def confirm_redirect_uri(self, client_id, code, redirect_uri, client, *args, **kwargs):
# You did save the redirect uri with the authorization code right?
authorization_code = self.oauth2_api.get_authorization_code(code)
return authorization_code['redirect_uri'] == redirect_uri
def validate_grant_type(self, client_id, grant_type, client, request, *args, **kwargs):
# Clients should only be allowed to use one type of grant.
# FIXME(garcianavalon) we need to support multiple grant types
# for the same consumers right now. In the future we should
# separate them and only allow one grant type (registering
# each client one time for each grant or allowing components)
# or update the tools to allow to create clients with
# multiple grants
# # client_id comes as None, we use the one in request
# client_dict = self.oauth2_api.get_consumer(request.client.client_id)
# return grant_type == client_dict['grant_type']
# TODO(garcianavalon) sync with SQL backend soported grant_types
return grant_type in [
'password', 'authorization_code', 'client_credentials', 'refresh_token',
]
def save_bearer_token(self, token, request, *args, **kwargs):
# Remember to associate it with request.scopes, request.user and
# request.client. The two former will be set when you validate
# the authorization code. Don't forget to save both the
# access_token and the refresh_token and set expiration for the
# access_token to now + expires_in seconds.
# token is a dictionary with the following elements:
# {
# u'access_token': u'iC1DQuu7zOgNIjquPXPmXE5hKnTwgu',
# u'expires_in': 3600,
# u'token_type': u'Bearer',
# u'state': u'yKxWeujbz9VUBncQNrkWvVcx8EXl1w',
# u'scope': u'basic_scope',
# u'refresh_token': u'02DTsL6oWgAibU7xenvXttwG80trJC'
# }
# TODO(garcinanavalon) create a custom TokenCreator instead of
# hacking the dictionary
if getattr(request, 'client', None):
consumer_id = request.client.client_id
else:
consumer_id = request.client_id
if getattr(request, 'user', None):
user_id = request.user
else:
user_id = request.user_id
expires_at = datetime.datetime.today() + datetime.timedelta(seconds=token['expires_in'])
access_token = {
'id':token['access_token'],
'consumer_id':consumer_id,
'authorizing_user_id':user_id,
'scopes': request.scopes,
'expires_at':datetime.datetime.strftime(expires_at, '%Y-%m-%d %H:%M:%S'),
'refresh_token': token.get('refresh_token', None),
}
self.oauth2_api.store_access_token(access_token)
def invalidate_authorization_code(self, client_id, code, request, *args, **kwargs):
# Authorization codes are use once, invalidate it when a Bearer token
# has been acquired.
self.oauth2_api.invalidate_authorization_code(code)
# Protected resource request
def validate_bearer_token(self, token, scopes, request):
# Remember to check expiration and scope membership
try:
access_token = self.oauth2_api.get_access_token(token)
except exception.NotFound:
return False
if (datetime.datetime.strptime(access_token['expires_at'], '%Y-%m-%d %H:%M:%S')
< datetime.datetime.today()):
return False
if access_token['scopes'] != scopes:
return False
# NOTE(garcianavalon) we set some attributes in request for later use. There
# is no documentation about this so I follow the comments found in the example
# at https://oauthlib.readthedocs.org/en/latest/oauth2/endpoints/resource.html
# which are:
# oauthlib_request has a few convenient attributes set such as
# oauthlib_request.client = the client associated with the token
# oauthlib_request.user = the user associated with the token
# oauthlib_request.scopes = the scopes bound to this token
# request.scopes is set by oauthlib already
request.user = access_token['authorizing_user_id']
request.client = access_token['consumer_id']
return True
# Token refresh request
def get_original_scopes(self, refresh_token, request, *args, **kwargs):
# Obtain the token associated with the given refresh_token and
# return its scopes, these will be passed on to the refreshed
# access token if the client did not specify a scope during the
# request.
# TODO(garcianavalon)
return ['all_info']
def is_within_original_scope(self, request_scopes, refresh_token, request, *args, **kwargs):
"""Check if requested scopes are within a scope of the refresh token.
When access tokens are refreshed the scope of the new token
needs to be within the scope of the original token. This is
ensured by checking that all requested scopes strings are on
the list returned by the get_original_scopes. If this check
fails, is_within_original_scope is called. The method can be
used in situations where returning all valid scopes from the
get_original_scopes is not practical.
:param request_scopes: A list of scopes that were requested by client
:param refresh_token: Unicode refresh_token
:param request: The HTTP Request (oauthlib.common.Request)
:rtype: True or False
Method is used by:
- Refresh token grant
"""
# TODO(garcianavalon)
return True
def validate_refresh_token(self, refresh_token, client, request, *args, **kwargs):
"""Ensure the Bearer token is valid and authorized access to scopes.
OBS! The request.user attribute should be set to the resource owner
associated with this refresh token.
:param refresh_token: Unicode refresh token
:param client: Client object set by you, see authenticate_client.
:param request: The HTTP Request (oauthlib.common.Request)
:rtype: True or False
Method is used by:
- Authorization Code Grant (indirectly by issuing refresh tokens)
- Resource Owner Password Credentials Grant (also indirectly)
- Refresh Token Grant
"""
try:
access_token = self.oauth2_api.get_access_token_by_refresh_token(refresh_token)
# Validate that the refresh token is not expired
token_duration = 28800 # TODO(garcianavalon) extract as configuration option
refresh_token_duration = 14 # TODO(garcianavalon) extract as configuration option
# TODO(garcianavalon) find a better place to do this
access_token_expiration_date = datetime.datetime.strptime(
access_token['expires_at'], '%Y-%m-%d %H:%M:%S')
refres_token_expiration_date = (
access_token_expiration_date
- datetime.timedelta(seconds=token_duration)
+ datetime.timedelta(days=refresh_token_duration))
if refres_token_expiration_date < datetime.datetime.today():
return False
except exception.NotFound:
return False
request.user = access_token['authorizing_user_id']
return True
# Support for password grant
def validate_user(self, username, password, client, request,
*args, **kwargs):
"""Ensure the username and password is valid.
OBS! The validation should also set the user attribute of the request
to a valid resource owner, i.e. request.user = username or similar. If
not set you will be unable to associate a token with a user in the
persistance method used (commonly, save_bearer_token).
:param username: Unicode username
:param password: Unicode password
:param client: Client object set by you, see authenticate_client.
:param request: The HTTP Request (oauthlib.common.Request)
:rtype: True or False
Method is used by:
- Resource Owner Password Credentials Grant
"""
# To validate the user, try to authenticate it
password_plugin = auth_plugins.password.Password()
auth_payload = {
'user': {
"domain": {
"id": "default"
},
"name": username,
"password": password
}
}
auth_context = {}
try:
password_plugin.authenticate(
context={},
auth_payload=auth_payload,
auth_context=auth_context)
# set the request user
request.user = auth_context['user_id']
return True
except Exception:
return False<|fim▁end|>
| |
<|file_name|>events.rs<|end_file_name|><|fim▁begin|>macro_rules! struct_events {
(
keyboard: { $( $k_alias:ident : $k_sdl:ident ),* },
// Match against a pattern
else: { $( $e_alias:ident : $e_sdl:pat ),* }
) => {
use ::sdl2::EventPump;
pub struct ImmediateEvents {
// expand all aliases to an Option (true/false/None)
$( pub $k_alias: Option<bool>, )*
// one-shots events:
$( pub $e_alias: bool ),*
}
impl ImmediateEvents {
pub fn new() -> ImmediateEvents {
ImmediateEvents {
$( $k_alias: None, )*
$( $e_alias: false),*
}
}
}
pub struct Events {<|fim▁hole|>
// true => pressed
// false => not pressed
$( pub $k_alias: bool ),*
}
impl Events {
pub fn new(pump: EventPump) -> Events {
Events {
pump: pump,
now: ImmediateEvents::new(),
$( $k_alias: false ),*
}
}
pub fn pump(&mut self) {
self.now = ImmediateEvents::new();
for event in self.pump.poll_iter() {
use sdl2::event::Event::*;
use sdl2::keyboard::Keycode::*;
match event {
KeyDown { keycode, ..} => match keycode {
$(
Some($k_sdl) => {
if !self.$k_alias {
self.now.$k_alias = Some(true);
}
self.$k_alias = true;
}
),*
_ => {},
},
KeyUp { keycode, ..} => match keycode {
$(
Some($k_sdl) => {
self.now.$k_alias = Some(false);
self.$k_alias = false;
}
),*
_ => {},
},
$(
$e_sdl => {
self.now.$e_alias = true;
}
),*
_ => {},
}
}
}
}
}
}<|fim▁end|>
|
pump: EventPump,
pub now : ImmediateEvents,
|
<|file_name|>client.js<|end_file_name|><|fim▁begin|>import './turn-order-a2314c0f.js';
import 'redux';
import 'immer';
import './reducer-4d135cbd.js';
import './Debug-1ad6801e.js';
import 'flatted';
import './ai-ce6b7ece.js';
import './initialize-ec2b5846.js';<|fim▁hole|><|fim▁end|>
|
export { C as Client } from './client-abd9e531.js';
|
<|file_name|>fib_fac.py<|end_file_name|><|fim▁begin|>#-*- coding: utf-8 -*-
def factorial(n):
"""Return the factorial of n"""
if n < 2:
return 1
return n * factorial(n - 1)
def fibonacci(n):
"""Return the nth fibonacci number"""
if n < 2:
return n
return fibonacci(n - 1) + fibonacci(n - 2)
def fib_fac(x=30, y=900):
fib = fibonacci(x)
fac = factorial(y)<|fim▁hole|>
if __name__ == "__main__":
def opc1():
fruits = tuple(str(i) for i in xrange(100))
out = ''
for fruit in fruits:
out += fruit +':'
return out
def opc2():
format_str = '%s:' * 100
fruits = tuple(str(i) for i in xrange(100))
out = format_str % fruits
return out
def opc3():
format_str = '{}:' * 100
fruits = tuple(str(i) for i in xrange(100))
out = format_str.format(*fruits)
return out
def opc4():
fruits = tuple(str(i) for i in xrange(100))
out = ':'.join(fruits)
return out
import timeit
print timeit.timeit(stmt=opc4, number=100)
fib_fac()<|fim▁end|>
|
print "fibonacci({}):".format(x), fib
print "factorial({}):".format(y), fac
|
<|file_name|>manifest.py<|end_file_name|><|fim▁begin|>from jsbuild.attrdict import AttrDict
from time import strftime
class Manifest(AttrDict):
def __init__(self,*args,**kwargs):
super(AttrDict, self).__init__(*args,**kwargs)
self._buffer_ = None
self._parent_ = None
if not self.__contains__('_dict_'):
self['_dict_'] = {}
self['_dict_']['timestamp'] = int(strftime("%Y%m%d%H%M"))
def __getitem__(self,name):
item = super(Manifest,self).__getitem__(name)
if isinstance(item,Manifest) and not item._parent_:
item._parent_ = self
elif isinstance(item,str):
root = self
while root._parent_: root = root._parent_<|fim▁hole|>
return item<|fim▁end|>
|
item = item%root._dict_
|
<|file_name|>command.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
#
from rest_framework import viewsets
from rest_framework.exceptions import ValidationError
from django.db import transaction
from django.utils.translation import ugettext as _
from django.conf import settings
from orgs.mixins.api import RootOrgViewMixin
from common.permissions import IsValidUser<|fim▁hole|>
class CommandExecutionViewSet(RootOrgViewMixin, viewsets.ModelViewSet):
serializer_class = CommandExecutionSerializer
permission_classes = (IsValidUser,)
def get_queryset(self):
return CommandExecution.objects.filter(
user_id=str(self.request.user.id)
)
def check_hosts(self, serializer):
data = serializer.validated_data
assets = data["hosts"]
system_user = data["run_as"]
util = AssetPermissionUtil(self.request.user)
util.filter_permissions(system_users=system_user.id)
permed_assets = util.get_assets().filter(id__in=[a.id for a in assets])
invalid_assets = set(assets) - set(permed_assets)
if invalid_assets:
msg = _("Not has host {} permission").format(
[str(a.id) for a in invalid_assets]
)
raise ValidationError({"hosts": msg})
def check_permissions(self, request):
if not settings.SECURITY_COMMAND_EXECUTION and request.user.is_common_user:
return self.permission_denied(request, "Command execution disabled")
return super().check_permissions(request)
def perform_create(self, serializer):
self.check_hosts(serializer)
instance = serializer.save()
instance.user = self.request.user
instance.save()
cols = self.request.query_params.get("cols", '80')
rows = self.request.query_params.get("rows", '24')
transaction.on_commit(lambda: run_command_execution.apply_async(
args=(instance.id,), kwargs={"cols": cols, "rows": rows},
task_id=str(instance.id)
))<|fim▁end|>
|
from perms.utils import AssetPermissionUtil
from ..models import CommandExecution
from ..serializers import CommandExecutionSerializer
from ..tasks import run_command_execution
|
<|file_name|>util.go<|end_file_name|><|fim▁begin|>package vauth
import (
"crypto/subtle"<|fim▁hole|>// SecureCompare performs a constant time compare of two strings to limit timing attacks.
func SecureCompare(x string, y string) bool {
if len(x) != len(y) {
return false
}
return subtle.ConstantTimeCompare([]byte(x), []byte(y)) == 1
}<|fim▁end|>
|
)
|
<|file_name|>test_agent.py<|end_file_name|><|fim▁begin|># Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import copy
import sys
import uuid
import eventlet
import mock
from oslo_config import cfg
import oslo_messaging
import testtools
from neutron.agent.common import config
from neutron.agent.dhcp import agent as dhcp_agent
from neutron.agent.dhcp import config as dhcp_config
from neutron.agent import dhcp_agent as entry
from neutron.agent.linux import dhcp
from neutron.agent.linux import interface
from neutron.common import config as common_config
from neutron.common import constants as const
from neutron.common import exceptions
from neutron.common import utils
from neutron import context
from neutron.tests import base
HOSTNAME = 'hostname'
dev_man = dhcp.DeviceManager
rpc_api = dhcp_agent.DhcpPluginApi
DEVICE_MANAGER = '%s.%s' % (dev_man.__module__, dev_man.__name__)
DHCP_PLUGIN = '%s.%s' % (rpc_api.__module__, rpc_api.__name__)
fake_tenant_id = 'aaaaaaaa-aaaa-aaaa-aaaaaaaaaaaa'
fake_subnet1_allocation_pools = dhcp.DictModel(dict(id='', start='172.9.9.2',
end='172.9.9.254'))
fake_subnet1 = dhcp.DictModel(dict(id='bbbbbbbb-bbbb-bbbb-bbbbbbbbbbbb',
network_id='12345678-1234-5678-1234567890ab',
cidr='172.9.9.0/24', enable_dhcp=True, name='',
tenant_id=fake_tenant_id,
gateway_ip='172.9.9.1', host_routes=[],
dns_nameservers=[], ip_version=4,
ipv6_ra_mode=None, ipv6_address_mode=None,
allocation_pools=fake_subnet1_allocation_pools))
fake_subnet2_allocation_pools = dhcp.DictModel(dict(id='', start='172.9.8.2',
end='172.9.8.254'))
fake_subnet2 = dhcp.DictModel(dict(id='dddddddd-dddd-dddd-dddddddddddd',
network_id='12345678-1234-5678-1234567890ab',
cidr='172.9.8.0/24', enable_dhcp=False, name='',
tenant_id=fake_tenant_id, gateway_ip='172.9.8.1',
host_routes=[], dns_nameservers=[], ip_version=4,
allocation_pools=fake_subnet2_allocation_pools))
fake_subnet3 = dhcp.DictModel(dict(id='bbbbbbbb-1111-2222-bbbbbbbbbbbb',
network_id='12345678-1234-5678-1234567890ab',
cidr='192.168.1.1/24', enable_dhcp=True))
fake_ipv6_subnet = dhcp.DictModel(dict(id='bbbbbbbb-1111-2222-bbbbbbbbbbbb',
network_id='12345678-1234-5678-1234567890ab',
cidr='2001:0db8::0/64', enable_dhcp=True,
tenant_id=fake_tenant_id,
gateway_ip='2001:0db8::1', ip_version=6,
ipv6_ra_mode='slaac', ipv6_address_mode=None))
fake_meta_subnet = dhcp.DictModel(dict(id='bbbbbbbb-1111-2222-bbbbbbbbbbbb',
network_id='12345678-1234-5678-1234567890ab',
cidr='169.254.169.252/30',
gateway_ip='169.254.169.253',
enable_dhcp=True))
fake_fixed_ip1 = dhcp.DictModel(dict(id='', subnet_id=fake_subnet1.id,
ip_address='172.9.9.9'))
fake_fixed_ip2 = dhcp.DictModel(dict(id='', subnet_id=fake_subnet1.id,
ip_address='172.9.9.10'))
fake_fixed_ipv6 = dhcp.DictModel(dict(id='', subnet_id=fake_ipv6_subnet.id,
ip_address='2001:db8::a8bb:ccff:fedd:ee99'))
fake_meta_fixed_ip = dhcp.DictModel(dict(id='', subnet=fake_meta_subnet,
ip_address='169.254.169.254'))
fake_allocation_pool_subnet1 = dhcp.DictModel(dict(id='', start='172.9.9.2',
end='172.9.9.254'))
fake_port1 = dhcp.DictModel(dict(id='12345678-1234-aaaa-1234567890ab',
device_id='dhcp-12345678-1234-aaaa-1234567890ab',
device_owner='',
allocation_pools=fake_subnet1_allocation_pools,
mac_address='aa:bb:cc:dd:ee:ff',
network_id='12345678-1234-5678-1234567890ab',
fixed_ips=[fake_fixed_ip1]))
fake_dhcp_port = dhcp.DictModel(dict(id='12345678-1234-aaaa-123456789022',
device_id='dhcp-12345678-1234-aaaa-123456789022',
device_owner='network:dhcp',
allocation_pools=fake_subnet1_allocation_pools,
mac_address='aa:bb:cc:dd:ee:22',
network_id='12345678-1234-5678-1234567890ab',
fixed_ips=[fake_fixed_ip2]))
fake_port2 = dhcp.DictModel(dict(id='12345678-1234-aaaa-123456789000',
device_id='dhcp-12345678-1234-aaaa-123456789000',
device_owner='',
mac_address='aa:bb:cc:dd:ee:99',
network_id='12345678-1234-5678-1234567890ab',
fixed_ips=[fake_fixed_ip2]))
fake_ipv6_port = dhcp.DictModel(dict(id='12345678-1234-aaaa-123456789000',
device_owner='',
mac_address='aa:bb:cc:dd:ee:99',
network_id='12345678-1234-5678-1234567890ab',
fixed_ips=[fake_fixed_ipv6]))
fake_meta_port = dhcp.DictModel(dict(id='12345678-1234-aaaa-1234567890ab',
mac_address='aa:bb:cc:dd:ee:ff',
network_id='12345678-1234-5678-1234567890ab',
device_owner=const.DEVICE_OWNER_ROUTER_INTF,
device_id='forzanapoli',
fixed_ips=[fake_meta_fixed_ip]))
fake_meta_dvr_port = dhcp.DictModel(fake_meta_port.copy())
fake_meta_dvr_port.device_owner = const.DEVICE_OWNER_DVR_INTERFACE
fake_dist_port = dhcp.DictModel(dict(id='12345678-1234-aaaa-1234567890ab',
mac_address='aa:bb:cc:dd:ee:ff',
network_id='12345678-1234-5678-1234567890ab',
device_owner=const.DEVICE_OWNER_DVR_INTERFACE,
device_id='forzanapoli',
fixed_ips=[fake_meta_fixed_ip]))
FAKE_NETWORK_UUID = '12345678-1234-5678-1234567890ab'
FAKE_NETWORK_DHCP_NS = "qdhcp-%s" % FAKE_NETWORK_UUID
fake_network = dhcp.NetModel(True, dict(id=FAKE_NETWORK_UUID,
tenant_id='aaaaaaaa-aaaa-aaaa-aaaaaaaaaaaa',
admin_state_up=True,
subnets=[fake_subnet1, fake_subnet2],
ports=[fake_port1]))
fake_network_ipv6 = dhcp.NetModel(True, dict(
id='12345678-1234-5678-1234567890ab',
tenant_id='aaaaaaaa-aaaa-aaaa-aaaaaaaaaaaa',
admin_state_up=True,
subnets=[fake_ipv6_subnet],
ports=[fake_ipv6_port]))
fake_network_ipv6_ipv4 = dhcp.NetModel(True, dict(
id='12345678-1234-5678-1234567890ab',
tenant_id='aaaaaaaa-aaaa-aaaa-aaaaaaaaaaaa',
admin_state_up=True,
subnets=[fake_ipv6_subnet, fake_subnet1],
ports=[fake_port1]))
isolated_network = dhcp.NetModel(
True, dict(
id='12345678-1234-5678-1234567890ab',
tenant_id='aaaaaaaa-aaaa-aaaa-aaaaaaaaaaaa',
admin_state_up=True,
subnets=[fake_subnet1],
ports=[fake_port1]))
nonisolated_dist_network = dhcp.NetModel(
True, dict(
id='12345678-1234-5678-1234567890ab',
tenant_id='aaaaaaaa-aaaa-aaaa-aaaaaaaaaaaa',
admin_state_up=True,
subnets=[fake_subnet1],
ports=[fake_port1, fake_port2]))
empty_network = dhcp.NetModel(
True, dict(
id='12345678-1234-5678-1234567890ab',
tenant_id='aaaaaaaa-aaaa-aaaa-aaaaaaaaaaaa',
admin_state_up=True,
subnets=[fake_subnet1],
ports=[]))
fake_meta_network = dhcp.NetModel(
True, dict(id='12345678-1234-5678-1234567890ab',
tenant_id='aaaaaaaa-aaaa-aaaa-aaaaaaaaaaaa',
admin_state_up=True,
subnets=[fake_meta_subnet],
ports=[fake_meta_port]))
fake_meta_dvr_network = dhcp.NetModel(True, fake_meta_network.copy())
fake_meta_dvr_network.ports = [fake_meta_dvr_port]
fake_dist_network = dhcp.NetModel(
True, dict(id='12345678-1234-5678-1234567890ab',
tenant_id='aaaaaaaa-aaaa-aaaa-aaaaaaaaaaaa',
admin_state_up=True,
subnets=[fake_meta_subnet],
ports=[fake_meta_port, fake_dist_port]))
fake_down_network = dhcp.NetModel(
True, dict(id='12345678-dddd-dddd-1234567890ab',
tenant_id='aaaaaaaa-aaaa-aaaa-aaaaaaaaaaaa',
admin_state_up=False,
subnets=[],
ports=[]))
class TestDhcpAgent(base.BaseTestCase):
def setUp(self):
super(TestDhcpAgent, self).setUp()
entry.register_options(cfg.CONF)
cfg.CONF.set_override('interface_driver',
'neutron.agent.linux.interface.NullDriver')
# disable setting up periodic state reporting
cfg.CONF.set_override('report_interval', 0, 'AGENT')
self.driver_cls_p = mock.patch(
'neutron.agent.dhcp.agent.importutils.import_class')
self.driver = mock.Mock(name='driver')
self.driver.existing_dhcp_networks.return_value = []
self.driver_cls = self.driver_cls_p.start()
self.driver_cls.return_value = self.driver
self.mock_makedirs_p = mock.patch("os.makedirs")
self.mock_makedirs = self.mock_makedirs_p.start()
def test_init_host(self):
dhcp = dhcp_agent.DhcpAgent(HOSTNAME)
with mock.patch.object(dhcp, 'sync_state') as sync_state:
dhcp.init_host()
sync_state.assert_called_once_with()
def test_dhcp_agent_manager(self):
state_rpc_str = 'neutron.agent.rpc.PluginReportStateAPI'
# sync_state is needed for this test
cfg.CONF.set_override('report_interval', 1, 'AGENT')
with mock.patch.object(dhcp_agent.DhcpAgentWithStateReport,
'sync_state',
autospec=True) as mock_sync_state:
with mock.patch.object(dhcp_agent.DhcpAgentWithStateReport,
'periodic_resync',
autospec=True) as mock_periodic_resync:
with mock.patch(state_rpc_str) as state_rpc:
with mock.patch.object(sys, 'argv') as sys_argv:
sys_argv.return_value = [
'dhcp', '--config-file',
base.etcdir('neutron.conf')]
cfg.CONF.register_opts(dhcp_config.DHCP_AGENT_OPTS)
config.register_interface_driver_opts_helper(cfg.CONF)
config.register_agent_state_opts_helper(cfg.CONF)
cfg.CONF.register_opts(interface.OPTS)
common_config.init(sys.argv[1:])<|fim▁hole|> mock_sync_state.assert_called_once_with(agent_mgr)
mock_periodic_resync.assert_called_once_with(agent_mgr)
state_rpc.assert_has_calls(
[mock.call(mock.ANY),
mock.call().report_state(mock.ANY, mock.ANY,
mock.ANY)])
def test_dhcp_agent_main_agent_manager(self):
logging_str = 'neutron.agent.common.config.setup_logging'
launcher_str = 'oslo_service.service.ServiceLauncher'
with mock.patch(logging_str):
with mock.patch.object(sys, 'argv') as sys_argv:
with mock.patch(launcher_str) as launcher:
sys_argv.return_value = ['dhcp', '--config-file',
base.etcdir('neutron.conf')]
entry.main()
launcher.assert_has_calls(
[mock.call(cfg.CONF),
mock.call().launch_service(mock.ANY),
mock.call().wait()])
def test_run_completes_single_pass(self):
with mock.patch(DEVICE_MANAGER):
dhcp = dhcp_agent.DhcpAgent(HOSTNAME)
attrs_to_mock = dict(
[(a, mock.DEFAULT) for a in
['sync_state', 'periodic_resync']])
with mock.patch.multiple(dhcp, **attrs_to_mock) as mocks:
dhcp.run()
mocks['sync_state'].assert_called_once_with()
mocks['periodic_resync'].assert_called_once_with()
def test_call_driver(self):
network = mock.Mock()
network.id = '1'
dhcp = dhcp_agent.DhcpAgent(cfg.CONF)
self.assertTrue(dhcp.call_driver('foo', network))
self.driver.assert_called_once_with(cfg.CONF,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY)
def _test_call_driver_failure(self, exc=None,
trace_level='exception', expected_sync=True):
network = mock.Mock()
network.id = '1'
self.driver.return_value.foo.side_effect = exc or Exception
with mock.patch.object(dhcp_agent.LOG, trace_level) as log:
dhcp = dhcp_agent.DhcpAgent(HOSTNAME)
with mock.patch.object(dhcp,
'schedule_resync') as schedule_resync:
self.assertIsNone(dhcp.call_driver('foo', network))
self.driver.assert_called_once_with(cfg.CONF,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY)
self.assertEqual(log.call_count, 1)
self.assertEqual(expected_sync, schedule_resync.called)
def test_call_driver_ip_address_generation_failure(self):
error = oslo_messaging.RemoteError(
exc_type='IpAddressGenerationFailure')
self._test_call_driver_failure(exc=error, expected_sync=False)
def test_call_driver_failure(self):
self._test_call_driver_failure()
def test_call_driver_remote_error_net_not_found(self):
self._test_call_driver_failure(
exc=oslo_messaging.RemoteError(exc_type='NetworkNotFound'),
trace_level='warning')
def test_call_driver_network_not_found(self):
self._test_call_driver_failure(
exc=exceptions.NetworkNotFound(net_id='1'),
trace_level='warning')
def test_call_driver_conflict(self):
self._test_call_driver_failure(
exc=exceptions.Conflict(),
trace_level='warning',
expected_sync=False)
def _test_sync_state_helper(self, known_net_ids, active_net_ids):
active_networks = set(mock.Mock(id=netid) for netid in active_net_ids)
with mock.patch(DHCP_PLUGIN) as plug:
mock_plugin = mock.Mock()
mock_plugin.get_active_networks_info.return_value = active_networks
plug.return_value = mock_plugin
dhcp = dhcp_agent.DhcpAgent(HOSTNAME)
attrs_to_mock = dict([(a, mock.DEFAULT)
for a in ['disable_dhcp_helper', 'cache',
'safe_configure_dhcp_for_network']])
with mock.patch.multiple(dhcp, **attrs_to_mock) as mocks:
mocks['cache'].get_network_ids.return_value = known_net_ids
dhcp.sync_state()
diff = set(known_net_ids) - set(active_net_ids)
exp_disable = [mock.call(net_id) for net_id in diff]
mocks['cache'].assert_has_calls([mock.call.get_network_ids()])
mocks['disable_dhcp_helper'].assert_has_calls(exp_disable)
def test_sync_state_initial(self):
self._test_sync_state_helper([], ['a'])
def test_sync_state_same(self):
self._test_sync_state_helper(['a'], ['a'])
def test_sync_state_disabled_net(self):
self._test_sync_state_helper(['b'], ['a'])
def test_sync_state_waitall(self):
with mock.patch.object(dhcp_agent.eventlet.GreenPool, 'waitall') as w:
active_net_ids = ['1', '2', '3', '4', '5']
known_net_ids = ['1', '2', '3', '4', '5']
self._test_sync_state_helper(known_net_ids, active_net_ids)
w.assert_called_once_with()
def test_sync_state_plugin_error(self):
with mock.patch(DHCP_PLUGIN) as plug:
mock_plugin = mock.Mock()
mock_plugin.get_active_networks_info.side_effect = Exception
plug.return_value = mock_plugin
with mock.patch.object(dhcp_agent.LOG, 'exception') as log:
dhcp = dhcp_agent.DhcpAgent(HOSTNAME)
with mock.patch.object(dhcp,
'schedule_resync') as schedule_resync:
dhcp.sync_state()
self.assertTrue(log.called)
self.assertTrue(schedule_resync.called)
def test_periodic_resync(self):
dhcp = dhcp_agent.DhcpAgent(HOSTNAME)
with mock.patch.object(dhcp_agent.eventlet, 'spawn') as spawn:
dhcp.periodic_resync()
spawn.assert_called_once_with(dhcp._periodic_resync_helper)
def test_periodic_resync_helper(self):
with mock.patch.object(dhcp_agent.eventlet, 'sleep') as sleep:
dhcp = dhcp_agent.DhcpAgent(HOSTNAME)
resync_reasons = collections.OrderedDict(
(('a', 'reason1'), ('b', 'reason2')))
dhcp.needs_resync_reasons = resync_reasons
with mock.patch.object(dhcp, 'sync_state') as sync_state:
sync_state.side_effect = RuntimeError
with testtools.ExpectedException(RuntimeError):
dhcp._periodic_resync_helper()
sync_state.assert_called_once_with(resync_reasons.keys())
sleep.assert_called_once_with(dhcp.conf.resync_interval)
self.assertEqual(len(dhcp.needs_resync_reasons), 0)
def test_populate_cache_on_start_without_active_networks_support(self):
# emul dhcp driver that doesn't support retrieving of active networks
self.driver.existing_dhcp_networks.side_effect = NotImplementedError
with mock.patch.object(dhcp_agent.LOG, 'debug') as log:
dhcp = dhcp_agent.DhcpAgent(HOSTNAME)
self.driver.existing_dhcp_networks.assert_called_once_with(
dhcp.conf,
)
self.assertFalse(dhcp.cache.get_network_ids())
self.assertTrue(log.called)
def test_populate_cache_on_start(self):
networks = ['aaa', 'bbb']
self.driver.existing_dhcp_networks.return_value = networks
dhcp = dhcp_agent.DhcpAgent(HOSTNAME)
self.driver.existing_dhcp_networks.assert_called_once_with(
dhcp.conf,
)
self.assertEqual(set(networks), set(dhcp.cache.get_network_ids()))
def test_none_interface_driver(self):
cfg.CONF.set_override('interface_driver', None)
self.assertRaises(SystemExit, dhcp.DeviceManager,
cfg.CONF, None)
def test_nonexistent_interface_driver(self):
# Temporarily turn off mock, so could use the real import_class
# to import interface_driver.
self.driver_cls_p.stop()
self.addCleanup(self.driver_cls_p.start)
cfg.CONF.set_override('interface_driver', 'foo.bar')
self.assertRaises(SystemExit, dhcp.DeviceManager,
cfg.CONF, None)
class TestLogArgs(base.BaseTestCase):
def test_log_args_without_log_dir_and_file(self):
conf_dict = {'debug': True,
'verbose': False,
'log_dir': None,
'log_file': None,
'use_syslog': True,
'syslog_log_facility': 'LOG_USER'}
conf = dhcp.DictModel(conf_dict)
expected_args = ['--debug',
'--use-syslog',
'--syslog-log-facility=LOG_USER']
args = config.get_log_args(conf, 'log_file_name')
self.assertEqual(expected_args, args)
def test_log_args_without_log_file(self):
conf_dict = {'debug': True,
'verbose': True,
'log_dir': '/etc/tests',
'log_file': None,
'use_syslog': False,
'syslog_log_facility': 'LOG_USER'}
conf = dhcp.DictModel(conf_dict)
expected_args = ['--debug',
'--verbose',
'--log-file=log_file_name',
'--log-dir=/etc/tests']
args = config.get_log_args(conf, 'log_file_name')
self.assertEqual(expected_args, args)
def test_log_args_with_log_dir_and_file(self):
conf_dict = {'debug': True,
'verbose': False,
'log_dir': '/etc/tests',
'log_file': 'tests/filelog',
'use_syslog': False,
'syslog_log_facility': 'LOG_USER'}
conf = dhcp.DictModel(conf_dict)
expected_args = ['--debug',
'--log-file=log_file_name',
'--log-dir=/etc/tests/tests']
args = config.get_log_args(conf, 'log_file_name')
self.assertEqual(expected_args, args)
def test_log_args_without_log_dir(self):
conf_dict = {'debug': True,
'verbose': False,
'log_file': 'tests/filelog',
'log_dir': None,
'use_syslog': False,
'syslog_log_facility': 'LOG_USER'}
conf = dhcp.DictModel(conf_dict)
expected_args = ['--debug',
'--log-file=log_file_name',
'--log-dir=tests']
args = config.get_log_args(conf, 'log_file_name')
self.assertEqual(expected_args, args)
def test_log_args_with_filelog_and_syslog(self):
conf_dict = {'debug': True,
'verbose': True,
'log_file': 'tests/filelog',
'log_dir': '/etc/tests',
'use_syslog': True,
'syslog_log_facility': 'LOG_USER'}
conf = dhcp.DictModel(conf_dict)
expected_args = ['--debug',
'--verbose',
'--log-file=log_file_name',
'--log-dir=/etc/tests/tests']
args = config.get_log_args(conf, 'log_file_name')
self.assertEqual(expected_args, args)
class TestDhcpAgentEventHandler(base.BaseTestCase):
def setUp(self):
super(TestDhcpAgentEventHandler, self).setUp()
config.register_interface_driver_opts_helper(cfg.CONF)
cfg.CONF.set_override('interface_driver',
'neutron.agent.linux.interface.NullDriver')
entry.register_options(cfg.CONF) # register all dhcp cfg options
self.plugin_p = mock.patch(DHCP_PLUGIN)
plugin_cls = self.plugin_p.start()
self.plugin = mock.Mock()
plugin_cls.return_value = self.plugin
self.cache_p = mock.patch('neutron.agent.dhcp.agent.NetworkCache')
cache_cls = self.cache_p.start()
self.cache = mock.Mock()
cache_cls.return_value = self.cache
self.mock_makedirs_p = mock.patch("os.makedirs")
self.mock_makedirs = self.mock_makedirs_p.start()
self.mock_init_p = mock.patch('neutron.agent.dhcp.agent.'
'DhcpAgent._populate_networks_cache')
self.mock_init = self.mock_init_p.start()
self.dhcp = dhcp_agent.DhcpAgent(HOSTNAME)
self.call_driver_p = mock.patch.object(self.dhcp, 'call_driver')
self.call_driver = self.call_driver_p.start()
self.schedule_resync_p = mock.patch.object(self.dhcp,
'schedule_resync')
self.schedule_resync = self.schedule_resync_p.start()
self.external_process_p = mock.patch(
'neutron.agent.linux.external_process.ProcessManager'
)
self.external_process = self.external_process_p.start()
def _process_manager_constructor_call(self, ns=FAKE_NETWORK_DHCP_NS):
return mock.call(conf=cfg.CONF,
uuid=FAKE_NETWORK_UUID,
namespace=ns,
default_cmd_callback=mock.ANY)
def _enable_dhcp_helper(self, network, enable_isolated_metadata=False,
is_isolated_network=False):
self.dhcp._process_monitor = mock.Mock()
if enable_isolated_metadata:
cfg.CONF.set_override('enable_isolated_metadata', True)
self.plugin.get_network_info.return_value = network
self.dhcp.enable_dhcp_helper(network.id)
self.plugin.assert_has_calls([
mock.call.get_network_info(network.id)])
self.call_driver.assert_called_once_with('enable', network)
self.cache.assert_has_calls([mock.call.put(network)])
if is_isolated_network:
self.external_process.assert_has_calls([
self._process_manager_constructor_call(),
mock.call().enable()
])
else:
self.assertFalse(self.external_process.call_count)
def test_enable_dhcp_helper_enable_metadata_isolated_network(self):
self._enable_dhcp_helper(isolated_network,
enable_isolated_metadata=True,
is_isolated_network=True)
def test_enable_dhcp_helper_enable_metadata_no_gateway(self):
isolated_network_no_gateway = copy.deepcopy(isolated_network)
isolated_network_no_gateway.subnets[0].gateway_ip = None
self._enable_dhcp_helper(isolated_network_no_gateway,
enable_isolated_metadata=True,
is_isolated_network=True)
def test_enable_dhcp_helper_enable_metadata_nonisolated_network(self):
nonisolated_network = copy.deepcopy(isolated_network)
nonisolated_network.ports[0].device_owner = (
const.DEVICE_OWNER_ROUTER_INTF)
nonisolated_network.ports[0].fixed_ips[0].ip_address = '172.9.9.1'
self._enable_dhcp_helper(nonisolated_network,
enable_isolated_metadata=True,
is_isolated_network=False)
def test_enable_dhcp_helper_enable_metadata_nonisolated_dist_network(self):
nonisolated_dist_network.ports[0].device_owner = (
const.DEVICE_OWNER_ROUTER_INTF)
nonisolated_dist_network.ports[0].fixed_ips[0].ip_address = '172.9.9.1'
nonisolated_dist_network.ports[1].device_owner = (
const.DEVICE_OWNER_DVR_INTERFACE)
nonisolated_dist_network.ports[1].fixed_ips[0].ip_address = '172.9.9.1'
self._enable_dhcp_helper(nonisolated_dist_network,
enable_isolated_metadata=True,
is_isolated_network=False)
def test_enable_dhcp_helper_enable_metadata_empty_network(self):
self._enable_dhcp_helper(empty_network,
enable_isolated_metadata=True,
is_isolated_network=True)
def test_enable_dhcp_helper_enable_metadata_ipv6_ipv4_network(self):
self._enable_dhcp_helper(fake_network_ipv6_ipv4,
enable_isolated_metadata=True,
is_isolated_network=True)
def test_enable_dhcp_helper_driver_failure_ipv6_ipv4_network(self):
self.plugin.get_network_info.return_value = fake_network_ipv6_ipv4
self.call_driver.return_value = False
cfg.CONF.set_override('enable_isolated_metadata', True)
with mock.patch.object(
self.dhcp, 'enable_isolated_metadata_proxy') as enable_metadata:
self.dhcp.enable_dhcp_helper(fake_network_ipv6_ipv4.id)
self.plugin.assert_has_calls(
[mock.call.get_network_info(fake_network_ipv6_ipv4.id)])
self.call_driver.assert_called_once_with('enable',
fake_network_ipv6_ipv4)
self.assertFalse(self.cache.called)
self.assertFalse(enable_metadata.called)
self.assertFalse(self.external_process.called)
def test_enable_dhcp_helper(self):
self._enable_dhcp_helper(fake_network)
def test_enable_dhcp_helper_ipv6_network(self):
self._enable_dhcp_helper(fake_network_ipv6)
def test_enable_dhcp_helper_down_network(self):
self.plugin.get_network_info.return_value = fake_down_network
self.dhcp.enable_dhcp_helper(fake_down_network.id)
self.plugin.assert_has_calls(
[mock.call.get_network_info(fake_down_network.id)])
self.assertFalse(self.call_driver.called)
self.assertFalse(self.cache.called)
self.assertFalse(self.external_process.called)
def test_enable_dhcp_helper_network_none(self):
self.plugin.get_network_info.return_value = None
with mock.patch.object(dhcp_agent.LOG, 'warn') as log:
self.dhcp.enable_dhcp_helper('fake_id')
self.plugin.assert_has_calls(
[mock.call.get_network_info('fake_id')])
self.assertFalse(self.call_driver.called)
self.assertTrue(log.called)
self.assertFalse(self.dhcp.schedule_resync.called)
def test_enable_dhcp_helper_exception_during_rpc(self):
self.plugin.get_network_info.side_effect = Exception
with mock.patch.object(dhcp_agent.LOG, 'exception') as log:
self.dhcp.enable_dhcp_helper(fake_network.id)
self.plugin.assert_has_calls(
[mock.call.get_network_info(fake_network.id)])
self.assertFalse(self.call_driver.called)
self.assertTrue(log.called)
self.assertTrue(self.schedule_resync.called)
self.assertFalse(self.cache.called)
self.assertFalse(self.external_process.called)
def test_enable_dhcp_helper_driver_failure(self):
self.plugin.get_network_info.return_value = fake_network
self.call_driver.return_value = False
self.dhcp.enable_dhcp_helper(fake_network.id)
self.plugin.assert_has_calls(
[mock.call.get_network_info(fake_network.id)])
self.call_driver.assert_called_once_with('enable', fake_network)
self.assertFalse(self.cache.called)
self.assertFalse(self.external_process.called)
def _disable_dhcp_helper_known_network(self, isolated_metadata=False):
if isolated_metadata:
cfg.CONF.set_override('enable_isolated_metadata', True)
self.cache.get_network_by_id.return_value = fake_network
self.dhcp.disable_dhcp_helper(fake_network.id)
self.cache.assert_has_calls(
[mock.call.get_network_by_id(fake_network.id)])
self.call_driver.assert_called_once_with('disable', fake_network)
if isolated_metadata:
self.external_process.assert_has_calls([
self._process_manager_constructor_call(ns=None),
mock.call().disable()])
else:
self.assertFalse(self.external_process.call_count)
def test_disable_dhcp_helper_known_network_isolated_metadata(self):
self._disable_dhcp_helper_known_network(isolated_metadata=True)
def test_disable_dhcp_helper_known_network(self):
self._disable_dhcp_helper_known_network()
def test_disable_dhcp_helper_unknown_network(self):
self.cache.get_network_by_id.return_value = None
self.dhcp.disable_dhcp_helper('abcdef')
self.cache.assert_has_calls(
[mock.call.get_network_by_id('abcdef')])
self.assertEqual(0, self.call_driver.call_count)
self.assertFalse(self.external_process.called)
def _disable_dhcp_helper_driver_failure(self, isolated_metadata=False):
if isolated_metadata:
cfg.CONF.set_override('enable_isolated_metadata', True)
self.cache.get_network_by_id.return_value = fake_network
self.call_driver.return_value = False
self.dhcp.disable_dhcp_helper(fake_network.id)
self.cache.assert_has_calls(
[mock.call.get_network_by_id(fake_network.id)])
self.call_driver.assert_called_once_with('disable', fake_network)
self.cache.assert_has_calls(
[mock.call.get_network_by_id(fake_network.id)])
if isolated_metadata:
self.external_process.assert_has_calls([
self._process_manager_constructor_call(ns=None),
mock.call().disable()
])
else:
self.assertFalse(self.external_process.call_count)
def test_disable_dhcp_helper_driver_failure_isolated_metadata(self):
self._disable_dhcp_helper_driver_failure(isolated_metadata=True)
def test_disable_dhcp_helper_driver_failure(self):
self._disable_dhcp_helper_driver_failure()
def test_enable_isolated_metadata_proxy(self):
self.dhcp._process_monitor = mock.Mock()
self.dhcp.enable_isolated_metadata_proxy(fake_network)
self.external_process.assert_has_calls([
self._process_manager_constructor_call(),
mock.call().enable()
])
def test_disable_isolated_metadata_proxy(self):
method_path = ('neutron.agent.metadata.driver.MetadataDriver'
'.destroy_monitored_metadata_proxy')
with mock.patch(method_path) as destroy:
self.dhcp.disable_isolated_metadata_proxy(fake_network)
destroy.assert_called_once_with(self.dhcp._process_monitor,
fake_network.id,
cfg.CONF)
def _test_metadata_network(self, network):
cfg.CONF.set_override('enable_metadata_network', True)
cfg.CONF.set_override('debug', True)
cfg.CONF.set_override('verbose', False)
cfg.CONF.set_override('log_file', 'test.log')
method_path = ('neutron.agent.metadata.driver.MetadataDriver'
'.spawn_monitored_metadata_proxy')
with mock.patch(method_path) as spawn:
self.dhcp.enable_isolated_metadata_proxy(network)
spawn.assert_called_once_with(self.dhcp._process_monitor,
network.namespace,
dhcp.METADATA_PORT,
cfg.CONF,
router_id='forzanapoli')
def test_enable_isolated_metadata_proxy_with_metadata_network(self):
self._test_metadata_network(fake_meta_network)
def test_enable_isolated_metadata_proxy_with_metadata_network_dvr(self):
self._test_metadata_network(fake_meta_dvr_network)
def test_enable_isolated_metadata_proxy_with_dist_network(self):
self._test_metadata_network(fake_dist_network)
def test_network_create_end(self):
payload = dict(network=dict(id=fake_network.id))
with mock.patch.object(self.dhcp, 'enable_dhcp_helper') as enable:
self.dhcp.network_create_end(None, payload)
enable.assert_called_once_with(fake_network.id)
def test_network_update_end_admin_state_up(self):
payload = dict(network=dict(id=fake_network.id, admin_state_up=True))
with mock.patch.object(self.dhcp, 'enable_dhcp_helper') as enable:
self.dhcp.network_update_end(None, payload)
enable.assert_called_once_with(fake_network.id)
def test_network_update_end_admin_state_down(self):
payload = dict(network=dict(id=fake_network.id, admin_state_up=False))
with mock.patch.object(self.dhcp, 'disable_dhcp_helper') as disable:
self.dhcp.network_update_end(None, payload)
disable.assert_called_once_with(fake_network.id)
def test_network_delete_end(self):
payload = dict(network_id=fake_network.id)
with mock.patch.object(self.dhcp, 'disable_dhcp_helper') as disable:
self.dhcp.network_delete_end(None, payload)
disable.assert_called_once_with(fake_network.id)
def test_refresh_dhcp_helper_no_dhcp_enabled_networks(self):
network = dhcp.NetModel(True, dict(id='net-id',
tenant_id='aaaaaaaa-aaaa-aaaa-aaaaaaaaaaaa',
admin_state_up=True,
subnets=[],
ports=[]))
self.cache.get_network_by_id.return_value = network
self.plugin.get_network_info.return_value = network
with mock.patch.object(self.dhcp, 'disable_dhcp_helper') as disable:
self.dhcp.refresh_dhcp_helper(network.id)
disable.assert_called_once_with(network.id)
self.assertFalse(self.cache.called)
self.assertFalse(self.call_driver.called)
self.cache.assert_has_calls(
[mock.call.get_network_by_id('net-id')])
def test_refresh_dhcp_helper_exception_during_rpc(self):
network = dhcp.NetModel(True, dict(id='net-id',
tenant_id='aaaaaaaa-aaaa-aaaa-aaaaaaaaaaaa',
admin_state_up=True,
subnets=[],
ports=[]))
self.cache.get_network_by_id.return_value = network
self.plugin.get_network_info.side_effect = Exception
with mock.patch.object(dhcp_agent.LOG, 'exception') as log:
self.dhcp.refresh_dhcp_helper(network.id)
self.assertFalse(self.call_driver.called)
self.cache.assert_has_calls(
[mock.call.get_network_by_id('net-id')])
self.assertTrue(log.called)
self.assertTrue(self.dhcp.schedule_resync.called)
def test_subnet_update_end(self):
payload = dict(subnet=dict(network_id=fake_network.id))
self.cache.get_network_by_id.return_value = fake_network
self.plugin.get_network_info.return_value = fake_network
self.dhcp.subnet_update_end(None, payload)
self.cache.assert_has_calls([mock.call.put(fake_network)])
self.call_driver.assert_called_once_with('reload_allocations',
fake_network)
def test_subnet_update_end_restart(self):
new_state = dhcp.NetModel(True, dict(id=fake_network.id,
tenant_id=fake_network.tenant_id,
admin_state_up=True,
subnets=[fake_subnet1, fake_subnet3],
ports=[fake_port1]))
payload = dict(subnet=dict(network_id=fake_network.id))
self.cache.get_network_by_id.return_value = fake_network
self.plugin.get_network_info.return_value = new_state
self.dhcp.subnet_update_end(None, payload)
self.cache.assert_has_calls([mock.call.put(new_state)])
self.call_driver.assert_called_once_with('restart',
new_state)
def test_subnet_update_end_delete_payload(self):
prev_state = dhcp.NetModel(True, dict(id=fake_network.id,
tenant_id=fake_network.tenant_id,
admin_state_up=True,
subnets=[fake_subnet1, fake_subnet3],
ports=[fake_port1]))
payload = dict(subnet_id=fake_subnet1.id)
self.cache.get_network_by_subnet_id.return_value = prev_state
self.cache.get_network_by_id.return_value = prev_state
self.plugin.get_network_info.return_value = fake_network
self.dhcp.subnet_delete_end(None, payload)
self.cache.assert_has_calls([
mock.call.get_network_by_subnet_id(
'bbbbbbbb-bbbb-bbbb-bbbbbbbbbbbb'),
mock.call.get_network_by_id('12345678-1234-5678-1234567890ab'),
mock.call.put(fake_network)])
self.call_driver.assert_called_once_with('restart',
fake_network)
def test_port_update_end(self):
payload = dict(port=fake_port2)
self.cache.get_network_by_id.return_value = fake_network
self.cache.get_port_by_id.return_value = fake_port2
self.dhcp.port_update_end(None, payload)
self.cache.assert_has_calls(
[mock.call.get_network_by_id(fake_port2.network_id),
mock.call.put_port(mock.ANY)])
self.call_driver.assert_called_once_with('reload_allocations',
fake_network)
def test_port_update_change_ip_on_port(self):
payload = dict(port=fake_port1)
self.cache.get_network_by_id.return_value = fake_network
updated_fake_port1 = copy.deepcopy(fake_port1)
updated_fake_port1.fixed_ips[0].ip_address = '172.9.9.99'
self.cache.get_port_by_id.return_value = updated_fake_port1
self.dhcp.port_update_end(None, payload)
self.cache.assert_has_calls(
[mock.call.get_network_by_id(fake_port1.network_id),
mock.call.put_port(mock.ANY)])
self.call_driver.assert_has_calls(
[mock.call.call_driver('reload_allocations', fake_network)])
def test_port_update_change_ip_on_dhcp_agents_port(self):
self.cache.get_network_by_id.return_value = fake_network
self.cache.get_port_by_id.return_value = fake_port1
payload = dict(port=copy.deepcopy(fake_port1))
device_id = utils.get_dhcp_agent_device_id(
payload['port']['network_id'], self.dhcp.conf.host)
payload['port']['fixed_ips'][0]['ip_address'] = '172.9.9.99'
payload['port']['device_id'] = device_id
self.dhcp.port_update_end(None, payload)
self.call_driver.assert_has_calls(
[mock.call.call_driver('restart', fake_network)])
def test_port_update_on_dhcp_agents_port_no_ip_change(self):
self.cache.get_network_by_id.return_value = fake_network
self.cache.get_port_by_id.return_value = fake_port1
payload = dict(port=fake_port1)
device_id = utils.get_dhcp_agent_device_id(
payload['port']['network_id'], self.dhcp.conf.host)
payload['port']['device_id'] = device_id
self.dhcp.port_update_end(None, payload)
self.call_driver.assert_has_calls(
[mock.call.call_driver('reload_allocations', fake_network)])
def test_port_delete_end(self):
payload = dict(port_id=fake_port2.id)
self.cache.get_network_by_id.return_value = fake_network
self.cache.get_port_by_id.return_value = fake_port2
self.dhcp.port_delete_end(None, payload)
self.cache.assert_has_calls(
[mock.call.get_port_by_id(fake_port2.id),
mock.call.get_network_by_id(fake_network.id),
mock.call.remove_port(fake_port2)])
self.call_driver.assert_has_calls(
[mock.call.call_driver('reload_allocations', fake_network)])
def test_port_delete_end_unknown_port(self):
payload = dict(port_id='unknown')
self.cache.get_port_by_id.return_value = None
self.dhcp.port_delete_end(None, payload)
self.cache.assert_has_calls([mock.call.get_port_by_id('unknown')])
self.assertEqual(self.call_driver.call_count, 0)
class TestDhcpPluginApiProxy(base.BaseTestCase):
def _test_dhcp_api(self, method, **kwargs):
ctxt = context.get_admin_context()
proxy = dhcp_agent.DhcpPluginApi('foo', ctxt, None, host='foo')
with mock.patch.object(proxy.client, 'call') as rpc_mock,\
mock.patch.object(proxy.client, 'prepare') as prepare_mock:
prepare_mock.return_value = proxy.client
rpc_mock.return_value = kwargs.pop('return_value', [])
prepare_args = {}
if 'version' in kwargs:
prepare_args['version'] = kwargs.pop('version')
retval = getattr(proxy, method)(**kwargs)
self.assertEqual(retval, rpc_mock.return_value)
prepare_mock.assert_called_once_with(**prepare_args)
kwargs['host'] = proxy.host
rpc_mock.assert_called_once_with(ctxt, method, **kwargs)
def test_get_active_networks_info(self):
self._test_dhcp_api('get_active_networks_info', version='1.1')
def test_get_network_info(self):
self._test_dhcp_api('get_network_info', network_id='fake_id',
return_value=None)
def test_create_dhcp_port(self):
self._test_dhcp_api('create_dhcp_port', port='fake_port',
return_value=None, version='1.1')
def test_update_dhcp_port(self):
self._test_dhcp_api('update_dhcp_port', port_id='fake_id',
port='fake_port', return_value=None, version='1.1')
def test_release_dhcp_port(self):
self._test_dhcp_api('release_dhcp_port', network_id='fake_id',
device_id='fake_id_2')
def test_release_port_fixed_ip(self):
self._test_dhcp_api('release_port_fixed_ip', network_id='fake_id',
device_id='fake_id_2', subnet_id='fake_id_3')
class TestNetworkCache(base.BaseTestCase):
def test_put_network(self):
nc = dhcp_agent.NetworkCache()
nc.put(fake_network)
self.assertEqual(nc.cache,
{fake_network.id: fake_network})
self.assertEqual(nc.subnet_lookup,
{fake_subnet1.id: fake_network.id,
fake_subnet2.id: fake_network.id})
self.assertEqual(nc.port_lookup,
{fake_port1.id: fake_network.id})
def test_put_network_existing(self):
prev_network_info = mock.Mock()
nc = dhcp_agent.NetworkCache()
with mock.patch.object(nc, 'remove') as remove:
nc.cache[fake_network.id] = prev_network_info
nc.put(fake_network)
remove.assert_called_once_with(prev_network_info)
self.assertEqual(nc.cache,
{fake_network.id: fake_network})
self.assertEqual(nc.subnet_lookup,
{fake_subnet1.id: fake_network.id,
fake_subnet2.id: fake_network.id})
self.assertEqual(nc.port_lookup,
{fake_port1.id: fake_network.id})
def test_remove_network(self):
nc = dhcp_agent.NetworkCache()
nc.cache = {fake_network.id: fake_network}
nc.subnet_lookup = {fake_subnet1.id: fake_network.id,
fake_subnet2.id: fake_network.id}
nc.port_lookup = {fake_port1.id: fake_network.id}
nc.remove(fake_network)
self.assertEqual(len(nc.cache), 0)
self.assertEqual(len(nc.subnet_lookup), 0)
self.assertEqual(len(nc.port_lookup), 0)
def test_get_network_by_id(self):
nc = dhcp_agent.NetworkCache()
nc.put(fake_network)
self.assertEqual(nc.get_network_by_id(fake_network.id), fake_network)
def test_get_network_ids(self):
nc = dhcp_agent.NetworkCache()
nc.put(fake_network)
self.assertEqual(list(nc.get_network_ids()), [fake_network.id])
def test_get_network_by_subnet_id(self):
nc = dhcp_agent.NetworkCache()
nc.put(fake_network)
self.assertEqual(nc.get_network_by_subnet_id(fake_subnet1.id),
fake_network)
def test_get_network_by_port_id(self):
nc = dhcp_agent.NetworkCache()
nc.put(fake_network)
self.assertEqual(nc.get_network_by_port_id(fake_port1.id),
fake_network)
def test_put_port(self):
fake_net = dhcp.NetModel(
True, dict(id='12345678-1234-5678-1234567890ab',
tenant_id='aaaaaaaa-aaaa-aaaa-aaaaaaaaaaaa',
subnets=[fake_subnet1],
ports=[fake_port1]))
nc = dhcp_agent.NetworkCache()
nc.put(fake_net)
nc.put_port(fake_port2)
self.assertEqual(len(nc.port_lookup), 2)
self.assertIn(fake_port2, fake_net.ports)
def test_put_port_existing(self):
fake_net = dhcp.NetModel(
True, dict(id='12345678-1234-5678-1234567890ab',
tenant_id='aaaaaaaa-aaaa-aaaa-aaaaaaaaaaaa',
subnets=[fake_subnet1],
ports=[fake_port1, fake_port2]))
nc = dhcp_agent.NetworkCache()
nc.put(fake_net)
nc.put_port(fake_port2)
self.assertEqual(len(nc.port_lookup), 2)
self.assertIn(fake_port2, fake_net.ports)
def test_remove_port_existing(self):
fake_net = dhcp.NetModel(
True, dict(id='12345678-1234-5678-1234567890ab',
tenant_id='aaaaaaaa-aaaa-aaaa-aaaaaaaaaaaa',
subnets=[fake_subnet1],
ports=[fake_port1, fake_port2]))
nc = dhcp_agent.NetworkCache()
nc.put(fake_net)
nc.remove_port(fake_port2)
self.assertEqual(len(nc.port_lookup), 1)
self.assertNotIn(fake_port2, fake_net.ports)
def test_get_port_by_id(self):
nc = dhcp_agent.NetworkCache()
nc.put(fake_network)
self.assertEqual(nc.get_port_by_id(fake_port1.id), fake_port1)
class FakePort1(object):
id = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
class FakeV4Subnet(object):
id = 'dddddddd-dddd-dddd-dddd-dddddddddddd'
ip_version = 4
cidr = '192.168.0.0/24'
gateway_ip = '192.168.0.1'
enable_dhcp = True
class FakeV4SubnetNoGateway(object):
id = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
ip_version = 4
cidr = '192.168.1.0/24'
gateway_ip = None
enable_dhcp = True
class FakeV4Network(object):
id = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
subnets = [FakeV4Subnet()]
ports = [FakePort1()]
namespace = 'qdhcp-aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
class FakeV4NetworkNoSubnet(object):
id = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
subnets = []
ports = []
class FakeV4NetworkNoGateway(object):
id = 'cccccccc-cccc-cccc-cccc-cccccccccccc'
subnets = [FakeV4SubnetNoGateway()]
ports = [FakePort1()]
class TestDeviceManager(base.BaseTestCase):
def setUp(self):
super(TestDeviceManager, self).setUp()
config.register_interface_driver_opts_helper(cfg.CONF)
config.register_use_namespaces_opts_helper(cfg.CONF)
cfg.CONF.register_opts(dhcp_config.DHCP_AGENT_OPTS)
cfg.CONF.set_override('interface_driver',
'neutron.agent.linux.interface.NullDriver')
cfg.CONF.set_override('use_namespaces', True)
cfg.CONF.set_override('enable_isolated_metadata', True)
self.ensure_device_is_ready_p = mock.patch(
'neutron.agent.linux.ip_lib.ensure_device_is_ready')
self.ensure_device_is_ready = (self.ensure_device_is_ready_p.start())
self.dvr_cls_p = mock.patch('neutron.agent.linux.interface.NullDriver')
self.iproute_cls_p = mock.patch('neutron.agent.linux.'
'ip_lib.IpRouteCommand')
driver_cls = self.dvr_cls_p.start()
iproute_cls = self.iproute_cls_p.start()
self.mock_driver = mock.MagicMock()
self.mock_driver.DEV_NAME_LEN = (
interface.LinuxInterfaceDriver.DEV_NAME_LEN)
self.mock_driver.use_gateway_ips = False
self.mock_iproute = mock.MagicMock()
driver_cls.return_value = self.mock_driver
iproute_cls.return_value = self.mock_iproute
iptables_cls_p = mock.patch(
'neutron.agent.linux.iptables_manager.IptablesManager')
iptables_cls = iptables_cls_p.start()
self.iptables_inst = mock.Mock()
iptables_cls.return_value = self.iptables_inst
self.mangle_inst = mock.Mock()
self.iptables_inst.ipv4 = {'mangle': self.mangle_inst}
def _test_setup_helper(self, device_is_ready, net=None, port=None):
net = net or fake_network
port = port or fake_port1
plugin = mock.Mock()
plugin.create_dhcp_port.return_value = port or fake_port1
self.ensure_device_is_ready.return_value = device_is_ready
self.mock_driver.get_device_name.return_value = 'tap12345678-12'
dh = dhcp.DeviceManager(cfg.CONF, plugin)
dh._set_default_route = mock.Mock()
interface_name = dh.setup(net)
self.assertEqual(interface_name, 'tap12345678-12')
plugin.assert_has_calls([
mock.call.create_dhcp_port(
{'port': {'name': '', 'admin_state_up': True,
'network_id': net.id, 'tenant_id': net.tenant_id,
'fixed_ips':
[{'subnet_id': port.fixed_ips[0].subnet_id}],
'device_id': mock.ANY}})])
if port == fake_ipv6_port:
expected_ips = ['169.254.169.254/16']
else:
expected_ips = ['172.9.9.9/24', '169.254.169.254/16']
expected = [
mock.call.get_device_name(port),
mock.call.init_l3(
'tap12345678-12',
expected_ips,
namespace=net.namespace)]
if not device_is_ready:
expected.insert(1,
mock.call.plug(net.id,
port.id,
'tap12345678-12',
'aa:bb:cc:dd:ee:ff',
namespace=net.namespace))
self.mock_driver.assert_has_calls(expected)
dh._set_default_route.assert_called_once_with(net, 'tap12345678-12')
def test_setup(self):
cfg.CONF.set_override('enable_metadata_network', False)
self._test_setup_helper(False)
cfg.CONF.set_override('enable_metadata_network', True)
self._test_setup_helper(False)
def test_setup_calls_fill_dhcp_udp_checksums(self):
self._test_setup_helper(False)
rule = ('-p udp --dport %d -j CHECKSUM --checksum-fill'
% const.DHCP_RESPONSE_PORT)
expected = [mock.call.add_rule('POSTROUTING', rule)]
self.mangle_inst.assert_has_calls(expected)
def test_setup_create_dhcp_port(self):
plugin = mock.Mock()
net = copy.deepcopy(fake_network)
plugin.create_dhcp_port.return_value = fake_dhcp_port
dh = dhcp.DeviceManager(cfg.CONF, plugin)
dh.setup(net)
plugin.assert_has_calls([
mock.call.create_dhcp_port(
{'port': {'name': '', 'admin_state_up': True,
'network_id': net.id,
'tenant_id': net.tenant_id,
'fixed_ips': [{'subnet_id':
fake_dhcp_port.fixed_ips[0].subnet_id}],
'device_id': mock.ANY}})])
self.assertIn(fake_dhcp_port, net.ports)
def test_setup_ipv6(self):
self._test_setup_helper(True, net=fake_network_ipv6,
port=fake_ipv6_port)
def test_setup_device_is_ready(self):
self._test_setup_helper(True)
def test_create_dhcp_port_raise_conflict(self):
plugin = mock.Mock()
dh = dhcp.DeviceManager(cfg.CONF, plugin)
plugin.create_dhcp_port.return_value = None
self.assertRaises(exceptions.Conflict,
dh.setup_dhcp_port,
fake_network)
def test_create_dhcp_port_create_new(self):
plugin = mock.Mock()
dh = dhcp.DeviceManager(cfg.CONF, plugin)
plugin.create_dhcp_port.return_value = fake_network.ports[0]
dh.setup_dhcp_port(fake_network)
plugin.assert_has_calls([
mock.call.create_dhcp_port(
{'port': {'name': '', 'admin_state_up': True,
'network_id':
fake_network.id, 'tenant_id': fake_network.tenant_id,
'fixed_ips':
[{'subnet_id': fake_fixed_ip1.subnet_id}],
'device_id': mock.ANY}})])
def test_create_dhcp_port_update_add_subnet(self):
plugin = mock.Mock()
dh = dhcp.DeviceManager(cfg.CONF, plugin)
fake_network_copy = copy.deepcopy(fake_network)
fake_network_copy.ports[0].device_id = dh.get_device_id(fake_network)
fake_network_copy.subnets[1].enable_dhcp = True
plugin.update_dhcp_port.return_value = fake_network.ports[0]
dh.setup_dhcp_port(fake_network_copy)
port_body = {'port': {
'network_id': fake_network.id,
'fixed_ips': [{'subnet_id': fake_fixed_ip1.subnet_id,
'ip_address': fake_fixed_ip1.ip_address},
{'subnet_id': fake_subnet2.id}]}}
plugin.assert_has_calls([
mock.call.update_dhcp_port(fake_network_copy.ports[0].id,
port_body)])
def test_update_dhcp_port_raises_conflict(self):
plugin = mock.Mock()
dh = dhcp.DeviceManager(cfg.CONF, plugin)
fake_network_copy = copy.deepcopy(fake_network)
fake_network_copy.ports[0].device_id = dh.get_device_id(fake_network)
fake_network_copy.subnets[1].enable_dhcp = True
plugin.update_dhcp_port.return_value = None
self.assertRaises(exceptions.Conflict,
dh.setup_dhcp_port,
fake_network_copy)
def test_create_dhcp_port_no_update_or_create(self):
plugin = mock.Mock()
dh = dhcp.DeviceManager(cfg.CONF, plugin)
fake_network_copy = copy.deepcopy(fake_network)
fake_network_copy.ports[0].device_id = dh.get_device_id(fake_network)
dh.setup_dhcp_port(fake_network_copy)
self.assertFalse(plugin.setup_dhcp_port.called)
self.assertFalse(plugin.update_dhcp_port.called)
def test_setup_dhcp_port_with_non_enable_dhcp_subnet(self):
plugin = mock.Mock()
dh = dhcp.DeviceManager(cfg.CONF, plugin)
fake_network_copy = copy.deepcopy(fake_network)
fake_network_copy.ports[0].device_id = dh.get_device_id(fake_network)
plugin.update_dhcp_port.return_value = fake_port1
self.assertEqual(fake_subnet1.id,
dh.setup_dhcp_port(fake_network_copy).fixed_ips[0].subnet_id)
def test_destroy(self):
fake_net = dhcp.NetModel(
True, dict(id=FAKE_NETWORK_UUID,
tenant_id='aaaaaaaa-aaaa-aaaa-aaaaaaaaaaaa'))
with mock.patch('neutron.agent.linux.interface.NullDriver') as dvr_cls:
mock_driver = mock.MagicMock()
mock_driver.get_device_name.return_value = 'tap12345678-12'
dvr_cls.return_value = mock_driver
plugin = mock.Mock()
dh = dhcp.DeviceManager(cfg.CONF, plugin)
dh.destroy(fake_net, 'tap12345678-12')
dvr_cls.assert_called_once_with(cfg.CONF)
mock_driver.assert_has_calls(
[mock.call.unplug('tap12345678-12',
namespace='qdhcp-' + fake_net.id)])
plugin.assert_has_calls(
[mock.call.release_dhcp_port(fake_net.id, mock.ANY)])
def test_get_interface_name(self):
fake_net = dhcp.NetModel(
True, dict(id='12345678-1234-5678-1234567890ab',
tenant_id='aaaaaaaa-aaaa-aaaa-aaaaaaaaaaaa'))
fake_port = dhcp.DictModel(
dict(id='12345678-1234-aaaa-1234567890ab',
mac_address='aa:bb:cc:dd:ee:ff'))
with mock.patch('neutron.agent.linux.interface.NullDriver') as dvr_cls:
mock_driver = mock.MagicMock()
mock_driver.get_device_name.return_value = 'tap12345678-12'
dvr_cls.return_value = mock_driver
plugin = mock.Mock()
dh = dhcp.DeviceManager(cfg.CONF, plugin)
dh.get_interface_name(fake_net, fake_port)
dvr_cls.assert_called_once_with(cfg.CONF)
mock_driver.assert_has_calls(
[mock.call.get_device_name(fake_port)])
self.assertEqual(len(plugin.mock_calls), 0)
def test_get_device_id(self):
fake_net = dhcp.NetModel(
True, dict(id='12345678-1234-5678-1234567890ab',
tenant_id='aaaaaaaa-aaaa-aaaa-aaaaaaaaaaaa'))
expected = ('dhcp1ae5f96c-c527-5079-82ea-371a01645457-12345678-1234-'
'5678-1234567890ab')
with mock.patch('uuid.uuid5') as uuid5:
uuid5.return_value = '1ae5f96c-c527-5079-82ea-371a01645457'
dh = dhcp.DeviceManager(cfg.CONF, None)
uuid5.called_once_with(uuid.NAMESPACE_DNS, cfg.CONF.host)
self.assertEqual(dh.get_device_id(fake_net), expected)
def test_update(self):
# Try with namespaces and no metadata network
cfg.CONF.set_override('use_namespaces', True)
cfg.CONF.set_override('enable_metadata_network', False)
dh = dhcp.DeviceManager(cfg.CONF, None)
dh._set_default_route = mock.Mock()
network = mock.Mock()
dh.update(network, 'ns-12345678-12')
dh._set_default_route.assert_called_once_with(network,
'ns-12345678-12')
# No namespaces, shouldn't set default route.
cfg.CONF.set_override('use_namespaces', False)
cfg.CONF.set_override('enable_metadata_network', False)
dh = dhcp.DeviceManager(cfg.CONF, None)
dh._set_default_route = mock.Mock()
dh.update(FakeV4Network(), 'tap12345678-12')
self.assertFalse(dh._set_default_route.called)
# Meta data network enabled, don't interfere with its gateway.
cfg.CONF.set_override('use_namespaces', True)
cfg.CONF.set_override('enable_metadata_network', True)
dh = dhcp.DeviceManager(cfg.CONF, None)
dh._set_default_route = mock.Mock()
dh.update(FakeV4Network(), 'ns-12345678-12')
self.assertTrue(dh._set_default_route.called)
# For completeness
cfg.CONF.set_override('use_namespaces', False)
cfg.CONF.set_override('enable_metadata_network', True)
dh = dhcp.DeviceManager(cfg.CONF, None)
dh._set_default_route = mock.Mock()
dh.update(FakeV4Network(), 'ns-12345678-12')
self.assertFalse(dh._set_default_route.called)
def test_set_default_route(self):
dh = dhcp.DeviceManager(cfg.CONF, None)
with mock.patch.object(dhcp.ip_lib, 'IPDevice') as mock_IPDevice:
device = mock.Mock()
mock_IPDevice.return_value = device
device.route.get_gateway.return_value = None
# Basic one subnet with gateway.
network = FakeV4Network()
dh._set_default_route(network, 'tap-name')
self.assertEqual(device.route.get_gateway.call_count, 1)
self.assertFalse(device.route.delete_gateway.called)
device.route.add_gateway.assert_called_once_with('192.168.0.1')
def test_set_default_route_no_subnet(self):
dh = dhcp.DeviceManager(cfg.CONF, None)
with mock.patch.object(dhcp.ip_lib, 'IPDevice') as mock_IPDevice:
device = mock.Mock()
mock_IPDevice.return_value = device
device.route.get_gateway.return_value = None
network = FakeV4NetworkNoSubnet()
network.namespace = 'qdhcp-1234'
dh._set_default_route(network, 'tap-name')
self.assertEqual(device.route.get_gateway.call_count, 1)
self.assertFalse(device.route.delete_gateway.called)
self.assertFalse(device.route.add_gateway.called)
def test_set_default_route_no_subnet_delete_gateway(self):
dh = dhcp.DeviceManager(cfg.CONF, None)
with mock.patch.object(dhcp.ip_lib, 'IPDevice') as mock_IPDevice:
device = mock.Mock()
mock_IPDevice.return_value = device
device.route.get_gateway.return_value = dict(gateway='192.168.0.1')
network = FakeV4NetworkNoSubnet()
network.namespace = 'qdhcp-1234'
dh._set_default_route(network, 'tap-name')
self.assertEqual(device.route.get_gateway.call_count, 1)
device.route.delete_gateway.assert_called_once_with('192.168.0.1')
self.assertFalse(device.route.add_gateway.called)
def test_set_default_route_no_gateway(self):
dh = dhcp.DeviceManager(cfg.CONF, None)
with mock.patch.object(dhcp.ip_lib, 'IPDevice') as mock_IPDevice:
device = mock.Mock()
mock_IPDevice.return_value = device
device.route.get_gateway.return_value = dict(gateway='192.168.0.1')
network = FakeV4NetworkNoGateway()
network.namespace = 'qdhcp-1234'
dh._set_default_route(network, 'tap-name')
self.assertEqual(device.route.get_gateway.call_count, 1)
device.route.delete_gateway.assert_called_once_with('192.168.0.1')
self.assertFalse(device.route.add_gateway.called)
def test_set_default_route_do_nothing(self):
dh = dhcp.DeviceManager(cfg.CONF, None)
with mock.patch.object(dhcp.ip_lib, 'IPDevice') as mock_IPDevice:
device = mock.Mock()
mock_IPDevice.return_value = device
device.route.get_gateway.return_value = dict(gateway='192.168.0.1')
network = FakeV4Network()
dh._set_default_route(network, 'tap-name')
self.assertEqual(device.route.get_gateway.call_count, 1)
self.assertFalse(device.route.delete_gateway.called)
self.assertFalse(device.route.add_gateway.called)
def test_set_default_route_change_gateway(self):
dh = dhcp.DeviceManager(cfg.CONF, None)
with mock.patch.object(dhcp.ip_lib, 'IPDevice') as mock_IPDevice:
device = mock.Mock()
mock_IPDevice.return_value = device
device.route.get_gateway.return_value = dict(gateway='192.168.0.2')
network = FakeV4Network()
dh._set_default_route(network, 'tap-name')
self.assertEqual(device.route.get_gateway.call_count, 1)
self.assertFalse(device.route.delete_gateway.called)
device.route.add_gateway.assert_called_once_with('192.168.0.1')
def test_set_default_route_two_subnets(self):
# Try two subnets. Should set gateway from the first.
dh = dhcp.DeviceManager(cfg.CONF, None)
with mock.patch.object(dhcp.ip_lib, 'IPDevice') as mock_IPDevice:
device = mock.Mock()
mock_IPDevice.return_value = device
device.route.get_gateway.return_value = None
network = FakeV4Network()
subnet2 = FakeV4Subnet()
subnet2.gateway_ip = '192.168.1.1'
network.subnets = [subnet2, FakeV4Subnet()]
dh._set_default_route(network, 'tap-name')
self.assertEqual(device.route.get_gateway.call_count, 1)
self.assertFalse(device.route.delete_gateway.called)
device.route.add_gateway.assert_called_once_with('192.168.1.1')
class TestDictModel(base.BaseTestCase):
def test_basic_dict(self):
d = dict(a=1, b=2)
m = dhcp.DictModel(d)
self.assertEqual(m.a, 1)
self.assertEqual(m.b, 2)
def test_dict_has_sub_dict(self):
d = dict(a=dict(b=2))
m = dhcp.DictModel(d)
self.assertEqual(m.a.b, 2)
def test_dict_contains_list(self):
d = dict(a=[1, 2])
m = dhcp.DictModel(d)
self.assertEqual(m.a, [1, 2])
def test_dict_contains_list_of_dicts(self):
d = dict(a=[dict(b=2), dict(c=3)])
m = dhcp.DictModel(d)
self.assertEqual(m.a[0].b, 2)
self.assertEqual(m.a[1].c, 3)
class TestNetModel(base.BaseTestCase):
def test_ns_name(self):
network = dhcp.NetModel(True, {'id': 'foo'})
self.assertEqual(network.namespace, 'qdhcp-foo')
def test_ns_name_false_namespace(self):
network = dhcp.NetModel(False, {'id': 'foo'})
self.assertIsNone(network.namespace)
def test_ns_name_none_namespace(self):
network = dhcp.NetModel(None, {'id': 'foo'})
self.assertIsNone(network.namespace)<|fim▁end|>
|
agent_mgr = dhcp_agent.DhcpAgentWithStateReport(
'testhost')
eventlet.greenthread.sleep(1)
agent_mgr.after_start()
|
<|file_name|>typeMatch.js<|end_file_name|><|fim▁begin|>"use strict";
const _ = require ('underscore')
_.hasTypeMatch = true
/* Type matching for arbitrary complex structures (TODO: test)
======================================================================== */
Meta.globalTag ('required')
Meta.globalTag ('atom')
$global.const ('$any', _.identity)
_.deferTest (['type', 'type matching'], function () {
$assert (_.omitTypeMismatches ( { '*': $any, foo: $required ('number'), bar: $required ('number') },
{ baz: 'x', foo: 42, bar: 'foo' }),
{ })
$assert (_.omitTypeMismatches ( { foo: { '*': $any } },
{ foo: { bar: 42, baz: 'qux' } }),
{ foo: { bar: 42, baz: 'qux' } })
$assert (_.omitTypeMismatches ( { foo: { bar: $required(42), '*': $any } },
{ foo: { bar: 'foo', baz: 'qux' } }),
{ })
$assert (_.omitTypeMismatches ( [{ foo: $required ('number'), bar: 'number' }],
[{ foo: 42, bar: 42 },
{ foo: 24, },
{ bar: 42 }]), [{ foo: 42, bar: 42 }, { foo: 24 }])
$assert (_.omitTypeMismatches ({ '*': 'number' }, { foo: 42, bar: 42 }), { foo: 42, bar: 42 })
$assert (_.omitTypeMismatches ({ foo: $any }, { foo: 0 }), { foo: 0 }) // there was a bug (any zero value was omitted)
$assert (_.decideType ([]), [])
$assert (_.decideType (42), 'number')
$assert (_.decideType (_.identity), 'function')
$assert (_.decideType ([{ foo: 1 }, { foo: 2 }]), [{ foo: 'number' }])
$assert (_.decideType ([{ foo: 1 }, { bar: 2 }]), [])
$assert (_.decideType ( { foo: { bar: 1 }, foo: { baz: [] } }),
{ foo: { bar: 'number' }, foo: { baz: [] } })
$assert (_.decideType ( { foo: { bar: 1 }, foo: { bar: 2 } }),
{ foo: { bar: 'number' } })
$assert (_.decideType ( { foo: { bar: 1 },
bar: { bar: 2 } }),
{ '*': { bar: 'number' } })
if (_.hasOOP) {
var Type = $prototype ()
$assert (_.decideType ({ x: new Type () }), { x: Type }) }
}, function () {
_.isMeta = function (x) { return (x === $any) || $atom.is (x) || $required.is (x) }
var zip = function (type, value, pred) {
var required = Meta.unwrapAll (_.filter2 (type, $required.is))
var match = _.nonempty (_.zip2 (Meta.unwrapAll (type), value, pred))
if (_.isEmpty (required)) {
return match }
else { var requiredMatch = _.nonempty (_.zip2 (required, value, pred))
var allSatisfied = _.values2 (required).length === _.values2 (requiredMatch).length
return allSatisfied ?
match : _.coerceToEmpty (value) } }
var hyperMatch = _.hyperOperator (_.binary,
function (type_, value, pred) { var type = Meta.unwrap (type_)
if (_.isArray (type)) { // matches [ItemType] → [item, item, ..., N]
if (_.isArray (value)) {
return zip (_.times (value.length, _.constant (type[0])), value, pred) }
else {
return undefined } }
else if (_.isStrictlyObject (type) && type['*']) { // matches { *: .. } → { a: .., b: .., c: .. }
if (_.isStrictlyObject (value)) {
return zip (_.extend ( _.map2 (value, _.constant (type['*'])),<|fim▁hole|> else {
return undefined } }
else {
return zip (type_, value, pred) } })
var typeMatchesValue = function (c, v) { var contract = Meta.unwrap (c)
return (contract === $any) ||
((contract === undefined) && (v === undefined)) ||
(_.isFunction (contract) && (
_.isPrototypeConstructor (contract) ?
_.isTypeOf (contract, v) : // constructor type
(contract (v) === true))) || // test predicate
(typeof v === contract) || // plain JS type
(v === contract) } // constant match
_.mismatches = function (op, contract, value) {
return hyperMatch (contract, value,
function (contract, v) {
return op (contract, v) ? undefined : contract }) }
_.omitMismatches = function (op, contract, value) {
return hyperMatch (contract, value,
function (contract, v) {
return op (contract, v) ? v : undefined }) }
_.typeMismatches = _.partial (_.mismatches, typeMatchesValue)
_.omitTypeMismatches = _.partial (_.omitMismatches, typeMatchesValue)
_.valueMismatches = _.partial (_.mismatches, function (a, b) { return (a === $any) || (b === $any) || (a === b) })
var unifyType = function (value) {
if (_.isArray (value)) {
return _.nonempty ([_.reduce (value.slice (1), function (a, b) { return _.undiff (a, b) }, _.first (value) || undefined)]) }
else if (_.isStrictlyObject (value)) {
var pairs = _.pairs (value)
var unite = _.map ( _.reduce (pairs.slice (1), function (a, b) { return _.undiff (a, b) }, _.first (pairs) || [undefined, undefined]),
_.nonempty)
return (_.isEmpty (unite) || _.isEmpty (unite[1])) ? value : _.fromPairs ([[unite[0] || '*', unite[1]]]) }
else {
return value } }
_.decideType = function (value) {
var operator = _.hyperOperator (_.unary,
function (value, pred) {
if (value && value.constructor && value.constructor.$definition) {
return value.constructor }
return unifyType (_.map2 (value, pred)) })
return operator (value, function (value) {
if (_.isPrototypeInstance (value)) {
return value.constructor }
else {
return _.isEmptyArray (value) ? value : (typeof value) } }) } }) // TODO: fix hyperOperator to remove additional check for []<|fim▁end|>
|
_.omit (type, '*')), value, pred) }
|
<|file_name|>test_database_upgrade_service.py<|end_file_name|><|fim▁begin|>'''
Created on 28.06.2016
@author: michael
'''
import unittest
from alexandriabase.services import DatabaseUpgradeService
from daotests.test_base import DatabaseBaseTest
<|fim▁hole|>
def setUp(self):
super().setUp()
self.upgrade_service = DatabaseUpgradeService(self.engine)
def tearDown(self):
super().tearDown()
def testUpgrade(self):
self.assertTrue(self.upgrade_service.is_update_necessary())
self.upgrade_service.run_update()
self.assertFalse(self.upgrade_service.is_update_necessary())
def testFailingUpgrade(self):
registry_dao = RegistryDao(self.engine)
registry_dao.set('version', 'not_existing')
self.assertTrue(self.upgrade_service.is_update_necessary())
expected_exception = False
try:
self.upgrade_service.run_update()
except Exception:
expected_exception = True
self.assertTrue(expected_exception)
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()<|fim▁end|>
|
from alexandriabase.daos import RegistryDao
class DatabaseUpgradeServiceTest(DatabaseBaseTest):
|
<|file_name|>cover.js<|end_file_name|><|fim▁begin|>/*! UIkit 2.27.2 | http://www.getuikit.com | (c) 2014 YOOtheme | MIT License */
(function (UI) {
"use strict";
UI.component('cover', {
defaults: {
automute: true
},
boot: function () {
// auto init
UI.ready(function (context) {
UI.$('[data-uk-cover]', context).each(function () {
var ele = UI.$(this);
if (!ele.data('cover')) {
var plugin = UI.cover(ele, UI.Utils.options(ele.attr('data-uk-cover')));
}
});
});
},
init: function () {
this.parent = this.element.parent();
UI.$win.on('load resize orientationchange', UI.Utils.debounce(function () {
this.check();
}.bind(this), 100));
this.on('display.uk.check', function (e) {
if (this.element.is(':visible')) this.check();
}.bind(this));
this.check();
if (this.element.is('iframe') && this.options.automute) {
var src = this.element.attr('src');
this.element.attr('src', '').on('load', function () {
this.contentWindow.postMessage('{ "event": "command", "func": "mute", "method":"setVolume", "value":0}', '*');
}).attr('src', [src, (src.indexOf('?') > -1 ? '&' : '?'), 'enablejsapi=1&api=1'].join(''));
}
},
check: function () {
this.element.css({width: '', height: ''});
this.dimension = {w: this.element.width(), h: this.element.height()};
if (this.element.attr('width') && !isNaN(this.element.attr('width'))) {
this.dimension.w = this.element.attr('width');
}
if (this.element.attr('height') && !isNaN(this.element.attr('height'))) {
this.dimension.h = this.element.attr('height');
}
this.ratio = this.dimension.w / this.dimension.h;
var w = this.parent.width(), h = this.parent.height(), width, height;
// if element height < parent height (gap underneath)
if ((w / this.ratio) < h) {
width = Math.ceil(h * this.ratio);
height = h;
// element width < parent width (gap to right)
} else {
width = w;
height = Math.ceil(w / this.ratio);
}
<|fim▁hole|> }
});
})(UIkit);<|fim▁end|>
|
this.element.css({width: width, height: height});
|
<|file_name|>22e4e60e03f_bug_867387_bixie_dra.py<|end_file_name|><|fim▁begin|>"""bug 867387 Bixie draft schema
Revision ID: 22e4e60e03f
Revises: 37004fc6e41e
Create Date: 2013-05-10 13:20:35.750954
"""
# revision identifiers, used by Alembic.
revision = '22e4e60e03f'
down_revision = '37004fc6e41e'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
from sqlalchemy.dialects import postgresql
import sqlalchemy.types as types
from sqlalchemy.sql import table, column
class CITEXT(types.UserDefinedType):
name = 'citext'
def get_col_spec(self):
return 'CITEXT'
def bind_processor(self, dialect):
def process(value):
return value
return process
def result_processor(self, dialect, coltype):
def process(value):
return value
return process
def __repr__(self):
return "citext"
class JSON(types.UserDefinedType):
name = 'json'
def get_col_spec(self):
return 'JSON'
def bind_processor(self, dialect):
def process(value):
return value
return process
def result_processor(self, dialect, coltype):
def process(value):
return value
return process
def __repr__(self):
return "json"
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.execute(u'CREATE SCHEMA bixie')
op.create_table(u'product_versions',
sa.Column(u'id', sa.INTEGER(), nullable=False),
sa.Column(u'name', CITEXT(), nullable=True),
sa.Column(u'release_version', sa.TEXT(), nullable=True),
sa.Column(u'major_version', sa.TEXT(), nullable=True),
sa.PrimaryKeyConstraint(u'id'),
schema=u'bixie'
)
op.create_table(u'full_urls',
sa.Column(u'id', sa.INTEGER(), nullable=False),
sa.Column(u'url', sa.TEXT(), nullable=False),
sa.PrimaryKeyConstraint(u'id'),
schema=u'bixie'
)
op.create_table(u'raw_product_releases',
sa.Column(u'id', sa.INTEGER(), nullable=False),
sa.Column(u'version', sa.TEXT(), nullable=False),
sa.Column(u'build', sa.TEXT(), nullable=False),
sa.Column(u'build_type', CITEXT(), nullable=False),
sa.Column(u'platform', sa.TEXT(), nullable=False),
sa.Column(u'product_name', CITEXT(), nullable=False),
sa.Column(u'repository', sa.TEXT(), nullable=False),
sa.Column(u'stability', sa.TEXT(), nullable=False),
sa.PrimaryKeyConstraint(u'id'),
schema=u'bixie'
)
op.create_table(u'products',
sa.Column(u'id', sa.INTEGER(), nullable=False),
sa.Column(u'name', sa.TEXT(), nullable=False),
sa.PrimaryKeyConstraint(u'id'),
schema=u'bixie'
)
op.create_table(u'crashes_normalized',
sa.Column(u'crash_id', postgresql.UUID(), autoincrement=False,
nullable=False),
sa.Column(u'signature_id', sa.TEXT(), nullable=False),
sa.Column(u'error_message_id', JSON(), nullable=False),
sa.Column(u'product_id', sa.TEXT(), nullable=True),
sa.Column(u'user_agent_id', sa.TEXT(), nullable=True),
sa.PrimaryKeyConstraint(u'crash_id'),
schema=u'bixie'
)
op.create_table(u'hosts',
sa.Column(u'id', sa.INTEGER(), nullable=False),
sa.Column(u'name', sa.TEXT(), nullable=False),
sa.PrimaryKeyConstraint(u'id'),
schema=u'bixie'
)
op.create_table(u'signatures',
sa.Column(u'id', sa.INTEGER(), nullable=False),<|fim▁hole|> )
op.create_table(u'crashes',
sa.Column(u'crash_id', postgresql.UUID(), autoincrement=False,
nullable=False),
sa.Column(u'signature', sa.TEXT(), nullable=False),
sa.Column(u'error', JSON(), nullable=False),
sa.Column(u'product', sa.TEXT(), nullable=True),
sa.Column(u'protocol', sa.TEXT(), nullable=True),
sa.Column(u'hostname', sa.TEXT(), nullable=True),
sa.Column(u'username', sa.TEXT(), nullable=True),
sa.Column(u'port', sa.TEXT(), nullable=True),
sa.Column(u'path', sa.TEXT(), nullable=True),
sa.Column(u'query', sa.TEXT(), nullable=True),
sa.Column(u'full_url', sa.TEXT(), nullable=True),
sa.Column(u'user_agent', sa.TEXT(), nullable=True),
sa.Column(u'success', sa.BOOLEAN(), nullable=True),
sa.Column(u'client_crash_datetime',
postgresql.TIMESTAMP(timezone=True), nullable=True),
sa.Column(u'client_submitted_datetime',
postgresql.TIMESTAMP(timezone=True), nullable=True),
sa.Column(u'processor_started_datetime',
postgresql.TIMESTAMP(timezone=True), nullable=True),
sa.Column(u'processor_completed_datetime',
postgresql.TIMESTAMP(timezone=True), nullable=True),
sa.PrimaryKeyConstraint(u'crash_id'),
schema=u'bixie'
)
op.create_table(u'release_channels',
sa.Column(u'id', sa.INTEGER(), nullable=False),
sa.Column(u'name', CITEXT(), nullable=False),
sa.Column(u'sort', sa.TEXT(), nullable=False),
sa.PrimaryKeyConstraint(u'id'),
schema=u'bixie'
)
op.create_table(u'os_names',
sa.Column(u'id', sa.INTEGER(), nullable=False),
sa.Column(u'name', sa.TEXT(), nullable=False),
sa.PrimaryKeyConstraint(u'id'),
schema=u'bixie'
)
op.create_table(u'error_messages',
sa.Column(u'id', sa.INTEGER(), nullable=False),
sa.Column(u'error_message', sa.TEXT(), nullable=False),
sa.PrimaryKeyConstraint(u'id'),
schema=u'bixie'
)
op.create_table(u'product_version_adi',
sa.Column(u'id', sa.INTEGER(), nullable=False),
sa.Column(u'product_version_id', sa.INTEGER(), nullable=False),
sa.Column(u'adi_count', sa.BIGINT(), nullable=False),
sa.Column(u'adi_date', sa.INTEGER(), nullable=False),
sa.Column(u'os_name', sa.TEXT(), nullable=False),
sa.PrimaryKeyConstraint(u'id'),
schema=u'bixie'
)
op.create_table(u'raw_adi',
sa.Column(u'adi_count', sa.BIGINT(), nullable=True),
sa.Column(u'date', sa.DATE(), nullable=True),
sa.Column(u'product_name', sa.TEXT(), nullable=True),
sa.Column(u'product_os_platform', sa.TEXT(), nullable=True),
sa.Column(u'product_os_version', sa.TEXT(), nullable=True),
sa.Column(u'product_version', sa.TEXT(), nullable=True),
sa.Column(u'build', sa.TEXT(), nullable=True),
sa.Column(u'build_channel', sa.TEXT(), nullable=True),
sa.Column(u'product_guid', sa.TEXT(), nullable=True),
sa.Column(u'received_at', postgresql.TIMESTAMP(timezone=True),
nullable=True),
sa.PrimaryKeyConstraint(),
schema=u'bixie'
)
op.create_table(u'users',
sa.Column(u'id', sa.INTEGER(), nullable=False),
sa.Column(u'name', sa.TEXT(), nullable=False),
sa.PrimaryKeyConstraint(u'id'),
schema=u'bixie'
)
op.create_table(u'product_adi',
sa.Column(u'id', sa.INTEGER(), nullable=False),
sa.Column(u'product_id', sa.INTEGER(), nullable=False),
sa.Column(u'adi_count', sa.BIGINT(), nullable=False),
sa.Column(u'adi_date', sa.INTEGER(), nullable=False),
sa.Column(u'os_name', CITEXT(), nullable=False),
sa.PrimaryKeyConstraint(u'id'),
schema=u'bixie'
)
op.create_table(u'user_agents',
sa.Column(u'id', sa.INTEGER(), nullable=False),
sa.Column(u'error_message_id', sa.INTEGER(), nullable=True),
sa.ForeignKeyConstraint(['error_message_id'],
[u'bixie.error_messages.id'], ),
sa.PrimaryKeyConstraint(u'id'),
schema=u'bixie'
)
op.create_table(u'product_users',
sa.Column(u'id', sa.INTEGER(), nullable=False),
sa.Column(u'product_id', sa.INTEGER(), nullable=True),
sa.Column(u'user_id', sa.INTEGER(), nullable=True),
sa.ForeignKeyConstraint(['product_id'], [u'bixie.products.id'], ),
sa.ForeignKeyConstraint(['user_id'], [u'bixie.users.id'], ),
sa.PrimaryKeyConstraint(u'id'),
schema=u'bixie'
)
op.create_table(u'product_release_channels',
sa.Column(u'id', sa.INTEGER(), nullable=False),
sa.Column(u'release_channel_id', sa.INTEGER(), nullable=True),
sa.Column(u'product_id', sa.INTEGER(), nullable=True),
sa.ForeignKeyConstraint(['product_id'], [u'bixie.products.id'], ),
sa.ForeignKeyConstraint(['release_channel_id'],
[u'bixie.release_channels.id'], ),
sa.PrimaryKeyConstraint(u'id'),
schema=u'bixie'
)
op.create_table(u'error_message_products',
sa.Column(u'id', sa.INTEGER(), nullable=False),
sa.Column(u'error_message_id', sa.INTEGER(), nullable=True),
sa.Column(u'product_id', sa.INTEGER(), nullable=True),
sa.ForeignKeyConstraint(['error_message_id'],
[u'bixie.error_messages.id'], ),
sa.ForeignKeyConstraint(['product_id'], [u'bixie.products.id'], ),
sa.PrimaryKeyConstraint(u'id'),
schema=u'bixie'
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table(u'error_message_products', schema=u'bixie')
op.drop_table(u'product_release_channels', schema=u'bixie')
op.drop_table(u'product_users', schema=u'bixie')
op.drop_table(u'user_agents', schema=u'bixie')
op.drop_table(u'product_adi', schema=u'bixie')
op.drop_table(u'users', schema=u'bixie')
op.drop_table(u'raw_adi', schema=u'bixie')
op.drop_table(u'product_version_adi', schema=u'bixie')
op.drop_table(u'error_messages', schema=u'bixie')
op.drop_table(u'os_names', schema=u'bixie')
op.drop_table(u'release_channels', schema=u'bixie')
op.drop_table(u'crashes', schema=u'bixie')
op.drop_table(u'signatures', schema=u'bixie')
op.drop_table(u'hosts', schema=u'bixie')
op.drop_table(u'crashes_normalized', schema=u'bixie')
op.drop_table(u'products', schema=u'bixie')
op.drop_table(u'raw_product_releases', schema=u'bixie')
op.drop_table(u'full_urls', schema=u'bixie')
op.drop_table(u'product_versions', schema=u'bixie')
op.execute(u'DROP SCHEMA bixie')
### end Alembic commands ###<|fim▁end|>
|
sa.Column(u'signature', sa.TEXT(), nullable=False),
sa.PrimaryKeyConstraint(u'id'),
schema=u'bixie'
|
<|file_name|>links.py<|end_file_name|><|fim▁begin|>#
# This source file is part of the EdgeDB open source project.
#
# Copyright 2008-present MagicStack Inc. and the EdgeDB authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import annotations
from typing import *
from edb.edgeql import ast as qlast
from edb.edgeql import qltypes
from edb import errors
from . import abc as s_abc
from . import constraints
from . import delta as sd
from . import indexes
from . import inheriting
from . import properties
from . import name as sn
from . import objects as so
from . import pointers
from . import referencing
from . import sources
from . import utils
if TYPE_CHECKING:
from . import objtypes as s_objtypes
from . import types as s_types
from . import schema as s_schema
LinkTargetDeleteAction = qltypes.LinkTargetDeleteAction
def merge_actions(
target: so.InheritingObject,
sources: List[so.Object],
field_name: str,
*,
ignore_local: bool = False,
schema: s_schema.Schema,
) -> Any:
if not ignore_local:
ours = target.get_explicit_local_field_value(schema, field_name, None)
else:
ours = None
if ours is None:
current = None
current_from = None
for source in sources:
theirs = source.get_explicit_field_value(schema, field_name, None)
if theirs is not None:
if current is None:
current = theirs
current_from = source
elif current != theirs:
target_source = target.get_source(schema)
current_from_source = current_from.get_source(schema)
source_source = source.get_source(schema)
tgt_repr = (
f'{target_source.get_displayname(schema)}.'
f'{target.get_displayname(schema)}'
)
cf_repr = (
f'{current_from_source.get_displayname(schema)}.'
f'{current_from.get_displayname(schema)}'
)
other_repr = (
f'{source_source.get_displayname(schema)}.'
f'{source.get_displayname(schema)}'
)
raise errors.SchemaError(
f'cannot implicitly resolve the '
f'`on target delete` action for '
f'{tgt_repr!r}: it is defined as {current} in '
f'{cf_repr!r} and as {theirs} in {other_repr!r}; '
f'to resolve, declare `on target delete` '
f'explicitly on {tgt_repr!r}'
)
return current
else:
return ours
class Link(
sources.Source,
pointers.Pointer,
s_abc.Link,
qlkind=qltypes.SchemaObjectClass.LINK,
data_safe=False,
):
on_target_delete = so.SchemaField(
LinkTargetDeleteAction,
default=LinkTargetDeleteAction.Restrict,
coerce=True,
compcoef=0.9,
merge_fn=merge_actions)
def get_target(self, schema: s_schema.Schema) -> s_objtypes.ObjectType:
return self.get_field_value( # type: ignore[no-any-return]
schema, 'target')
def is_link_property(self, schema: s_schema.Schema) -> bool:
return False
def is_property(self, schema: s_schema.Schema) -> bool:
return False
def scalar(self) -> bool:
return False
def has_user_defined_properties(self, schema: s_schema.Schema) -> bool:
return bool([p for p in self.get_pointers(schema).objects(schema)
if not p.is_special_pointer(schema)])
def get_source_type(
self,
schema: s_schema.Schema
) -> s_types.Type:
from . import types as s_types
source = self.get_source(schema)
assert isinstance(source, s_types.Type)
return source
def compare(
self,
other: so.Object,
*,
our_schema: s_schema.Schema,
their_schema: s_schema.Schema,
context: so.ComparisonContext,
) -> float:
if not isinstance(other, Link):
if isinstance(other, pointers.Pointer):
return 0.0
else:
raise NotImplementedError()
return super().compare(
other, our_schema=our_schema,
their_schema=their_schema, context=context)
def set_target(
self,
schema: s_schema.Schema,
target: s_types.Type,
) -> s_schema.Schema:
schema = super().set_target(schema, target)
tgt_prop = self.getptr(schema, sn.UnqualName('target'))
schema = tgt_prop.set_target(schema, target)
return schema
@classmethod
def get_root_classes(cls) -> Tuple[sn.QualName, ...]:
return (
sn.QualName(module='std', name='link'),
sn.QualName(module='schema', name='__type__'),
)
@classmethod
def get_default_base_name(self) -> sn.QualName:
return sn.QualName('std', 'link')
class LinkSourceCommandContext(sources.SourceCommandContext):
pass
class LinkSourceCommand(inheriting.InheritingObjectCommand[sources.Source_T]):
pass
class LinkCommandContext(pointers.PointerCommandContext[Link],
constraints.ConsistencySubjectCommandContext,
properties.PropertySourceContext,
indexes.IndexSourceCommandContext):
pass
class LinkCommand(
properties.PropertySourceCommand[Link],
pointers.PointerCommand[Link],
context_class=LinkCommandContext,
referrer_context_class=LinkSourceCommandContext,
):
def _append_subcmd_ast(
self,
schema: s_schema.Schema,
node: qlast.DDLOperation,
subcmd: sd.Command,
context: sd.CommandContext,
) -> None:
if (
isinstance(subcmd, pointers.PointerCommand)
and subcmd.classname != self.classname
):
pname = sn.shortname_from_fullname(subcmd.classname)
if pname.name in {'source', 'target'}:
return
super()._append_subcmd_ast(schema, node, subcmd, context)
def validate_object(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> None:
"""Check that link definition is sound."""
super().validate_object(schema, context)
scls = self.scls
assert isinstance(scls, Link)
if not scls.get_owned(schema):
return
target = scls.get_target(schema)
assert target is not None
if not target.is_object_type():
srcctx = self.get_attribute_source_context('target')
raise errors.InvalidLinkTargetError(
f'invalid link target type, expected object type, got '
f'{target.get_verbosename(schema)}',
context=srcctx,
)
if target.is_free_object_type(schema):
srcctx = self.get_attribute_source_context('target')
raise errors.InvalidLinkTargetError(
f'{target.get_verbosename(schema)} is not a valid link target',
context=srcctx,
)
if (
not scls.is_pure_computable(schema)
and not scls.get_from_alias(schema)
and target.is_view(schema)
):
srcctx = self.get_attribute_source_context('target')
raise errors.InvalidLinkTargetError(
f'invalid link type: {target.get_displayname(schema)!r}'
f' is an expression alias, not a proper object type',
context=srcctx,
)
def _get_ast(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
*,
parent_node: Optional[qlast.DDLOperation] = None,
) -> Optional[qlast.DDLOperation]:
node = super()._get_ast(schema, context, parent_node=parent_node)
# __type__ link is special, and while it exists on every object
# it does not have a defined default in the schema (and therefore
# it isn't marked as required.) We intervene here to mark all
# __type__ links required when rendering for SDL/TEXT.
if context.declarative and node is not None:
assert isinstance(node, (qlast.CreateConcreteLink,
qlast.CreateLink))
if node.name.name == '__type__':
assert isinstance(node, qlast.CreateConcretePointer)
node.is_required = True
return node
def _reinherit_classref_dict(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
refdict: so.RefDict,
) -> Tuple[s_schema.Schema,
Dict[sn.Name, Type[sd.ObjectCommand[so.Object]]]]:
if self.scls.get_computable(schema) and refdict.attr != 'pointers':
# If the link is a computable, the inheritance would only
# happen in the case of aliasing, and in that case we only
# need to inherit the link properties and nothing else.
return schema, {}
return super()._reinherit_classref_dict(schema, context, refdict)
class CreateLink(
pointers.CreatePointer[Link],
LinkCommand,
):
astnode = [qlast.CreateConcreteLink, qlast.CreateLink]
referenced_astnode = qlast.CreateConcreteLink
@classmethod
def _cmd_tree_from_ast(
cls,
schema: s_schema.Schema,
astnode: qlast.DDLOperation,
context: sd.CommandContext,
) -> sd.Command:
cmd = super()._cmd_tree_from_ast(schema, astnode, context)
if isinstance(astnode, qlast.CreateConcreteLink):
assert isinstance(cmd, pointers.PointerCommand)
cmd._process_create_or_alter_ast(schema, astnode, context)
else:
# this is an abstract property then
if cmd.get_attribute_value('default') is not None:
raise errors.SchemaDefinitionError(
f"'default' is not a valid field for an abstract link",
context=astnode.context)
assert isinstance(cmd, sd.Command)
return cmd
def get_ast_attr_for_field(
self,
field: str,
astnode: Type[qlast.DDLOperation],
) -> Optional[str]:
if (
field == 'required'
and issubclass(astnode, qlast.CreateConcreteLink)
):
return 'is_required'
elif (
field == 'cardinality'
and issubclass(astnode, qlast.CreateConcreteLink)
):
return 'cardinality'
else:
return super().get_ast_attr_for_field(field, astnode)
def _apply_field_ast(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
node: qlast.DDLOperation,
op: sd.AlterObjectProperty,
) -> None:
objtype = self.get_referrer_context(context)
if op.property == 'target' and objtype:
# Due to how SDL is processed the underlying AST may be an
# AlterConcreteLink, which requires different handling.
if isinstance(node, qlast.CreateConcreteLink):
if not node.target:
expr = self.get_attribute_value('expr')
if expr is not None:
node.target = expr.qlast
else:
t = op.new_value
assert isinstance(t, (so.Object, so.ObjectShell))
node.target = utils.typeref_to_ast(schema, t)
else:
old_type = pointers.merge_target(
self.scls,
list(self.scls.get_bases(schema).objects(schema)),
'target',
ignore_local=True,
schema=schema,
)
assert isinstance(op.new_value, (so.Object, so.ObjectShell))
new_type = (
op.new_value.resolve(schema)
if isinstance(op.new_value, so.ObjectShell)
else op.new_value)
new_type_ast = utils.typeref_to_ast(schema, op.new_value)
cast_expr = None
# If the type isn't assignment castable, generate a
# USING with a nonsense cast. It shouldn't matter,
# since there should be no data to cast, but the DDL side
# of things doesn't know that since the command is split up.
if old_type and not old_type.assignment_castable_to(
new_type, schema):
cast_expr = qlast.TypeCast(
type=new_type_ast,
expr=qlast.Set(elements=[]),
)
node.commands.append(
qlast.SetPointerType(
value=new_type_ast,
cast_expr=cast_expr,
)
)
elif op.property == 'on_target_delete':
node.commands.append(qlast.OnTargetDelete(cascade=op.new_value))
else:
super()._apply_field_ast(schema, context, node, op)
def inherit_classref_dict(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
refdict: so.RefDict,
) -> sd.CommandGroup:
if self.scls.get_computable(schema) and refdict.attr != 'pointers':
# If the link is a computable, the inheritance would only
# happen in the case of aliasing, and in that case we only
# need to inherit the link properties and nothing else.
return sd.CommandGroup()
cmd = super().inherit_classref_dict(schema, context, refdict)
if refdict.attr != 'pointers':
return cmd
parent_ctx = self.get_referrer_context(context)
if parent_ctx is None:
return cmd
base_prop_name = sn.QualName('std', 'source')
s_name = sn.get_specialized_name(
sn.QualName('__', 'source'), str(self.classname))
src_prop_name = sn.QualName(
name=s_name, module=self.classname.module)
src_prop = properties.CreateProperty(
classname=src_prop_name,
is_strong_ref=True,
)
src_prop.set_attribute_value('name', src_prop_name)
src_prop.set_attribute_value(
'bases',
so.ObjectList.create(schema, [schema.get(base_prop_name)]),
)
src_prop.set_attribute_value(
'source',
self.scls,
)
src_prop.set_attribute_value(
'target',
parent_ctx.op.scls,
)
src_prop.set_attribute_value('required', True)
src_prop.set_attribute_value('readonly', True)
src_prop.set_attribute_value('owned', True)
src_prop.set_attribute_value('from_alias',
self.scls.get_from_alias(schema))
src_prop.set_attribute_value('cardinality',
qltypes.SchemaCardinality.One)
cmd.prepend(src_prop)
base_prop_name = sn.QualName('std', 'target')
s_name = sn.get_specialized_name(
sn.QualName('__', 'target'), str(self.classname))
tgt_prop_name = sn.QualName(
name=s_name, module=self.classname.module)
tgt_prop = properties.CreateProperty(
classname=tgt_prop_name,
is_strong_ref=True,
)
tgt_prop.set_attribute_value('name', tgt_prop_name)
tgt_prop.set_attribute_value(
'bases',
so.ObjectList.create(schema, [schema.get(base_prop_name)]),
)
tgt_prop.set_attribute_value(
'source',
self.scls,
)
tgt_prop.set_attribute_value(
'target',
self.get_attribute_value('target'),
)
tgt_prop.set_attribute_value('required', False)
tgt_prop.set_attribute_value('readonly', True)
tgt_prop.set_attribute_value('owned', True)
tgt_prop.set_attribute_value('from_alias',
self.scls.get_from_alias(schema))
tgt_prop.set_attribute_value('cardinality',
qltypes.SchemaCardinality.One)
cmd.prepend(tgt_prop)
return cmd
class RenameLink(
LinkCommand,
referencing.RenameReferencedInheritingObject[Link],
):
pass
class RebaseLink(
LinkCommand,
referencing.RebaseReferencedInheritingObject[Link],
):
pass
class SetLinkType(
pointers.SetPointerType[Link],
referrer_context_class=LinkSourceCommandContext,
field='target',
):
def _alter_begin(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
schema = super()._alter_begin(schema, context)
scls = self.scls
new_target = scls.get_target(schema)
if not context.canonical:
# We need to update the target link prop as well
tgt_prop = scls.getptr(schema, sn.UnqualName('target'))
tgt_prop_alter = tgt_prop.init_delta_command(
schema, sd.AlterObject)
tgt_prop_alter.set_attribute_value('target', new_target)
self.add(tgt_prop_alter)
return schema
class AlterLinkUpperCardinality(
pointers.AlterPointerUpperCardinality[Link],
referrer_context_class=LinkSourceCommandContext,<|fim▁hole|>
class AlterLinkLowerCardinality(
pointers.AlterPointerLowerCardinality[Link],
referrer_context_class=LinkSourceCommandContext,
field='required',
):
pass
class AlterLinkOwned(
referencing.AlterOwned[Link],
pointers.PointerCommandOrFragment[Link],
referrer_context_class=LinkSourceCommandContext,
field='owned',
):
pass
class SetTargetDeletePolicy(sd.Command):
astnode = qlast.OnTargetDelete
@classmethod
def _cmd_from_ast(
cls,
schema: s_schema.Schema,
astnode: qlast.DDLOperation,
context: sd.CommandContext,
) -> sd.AlterObjectProperty:
return sd.AlterObjectProperty(
property='on_target_delete'
)
@classmethod
def _cmd_tree_from_ast(
cls,
schema: s_schema.Schema,
astnode: qlast.DDLOperation,
context: sd.CommandContext,
) -> sd.Command:
assert isinstance(astnode, qlast.OnTargetDelete)
cmd = super()._cmd_tree_from_ast(schema, astnode, context)
assert isinstance(cmd, sd.AlterObjectProperty)
cmd.new_value = astnode.cascade
return cmd
class AlterLink(
LinkCommand,
pointers.AlterPointer[Link],
):
astnode = [qlast.AlterConcreteLink, qlast.AlterLink]
referenced_astnode = qlast.AlterConcreteLink
@classmethod
def _cmd_tree_from_ast(
cls,
schema: s_schema.Schema,
astnode: qlast.DDLOperation,
context: sd.CommandContext,
) -> AlterLink:
cmd = super()._cmd_tree_from_ast(schema, astnode, context)
assert isinstance(cmd, AlterLink)
if isinstance(astnode, qlast.CreateConcreteLink):
cmd._process_create_or_alter_ast(schema, astnode, context)
else:
cmd._process_alter_ast(schema, astnode, context)
return cmd
def _apply_field_ast(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
node: qlast.DDLOperation,
op: sd.AlterObjectProperty,
) -> None:
if op.property == 'target':
if op.new_value:
assert isinstance(op.new_value, so.ObjectShell)
node.commands.append(
qlast.SetPointerType(
value=utils.typeref_to_ast(schema, op.new_value),
),
)
elif op.property == 'computable':
if not op.new_value:
node.commands.append(
qlast.SetField(
name='expr',
value=None,
special_syntax=True,
),
)
elif op.property == 'on_target_delete':
node.commands.append(qlast.OnTargetDelete(cascade=op.new_value))
else:
super()._apply_field_ast(schema, context, node, op)
class DeleteLink(
LinkCommand,
pointers.DeletePointer[Link],
):
astnode = [qlast.DropConcreteLink, qlast.DropLink]
referenced_astnode = qlast.DropConcreteLink
# NB: target type cleanup (e.g. target compound type) is done by
# the DeleteProperty handler for the @target property.
def _get_ast(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
*,
parent_node: Optional[qlast.DDLOperation] = None,
) -> Optional[qlast.DDLOperation]:
if self.get_orig_attribute_value('from_alias'):
# This is an alias type, appropriate DDL would be generated
# from the corresponding Alter/DeleteAlias node.
return None
else:
return super()._get_ast(schema, context, parent_node=parent_node)<|fim▁end|>
|
field='cardinality',
):
pass
|
<|file_name|>fortios_wanopt_profile.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_wanopt_profile
short_description: Configure WAN optimization profiles in Fortinet's FortiOS and FortiGate.<|fim▁hole|>description:
- This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the
user to set and modify wanopt feature and profile category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.5
version_added: "2.8"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate IP address.
type: str
required: false
username:
description:
- FortiOS or FortiGate username.
type: str
required: false
password:
description:
- FortiOS or FortiGate password.
type: str
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
type: str
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS protocol.
type: bool
default: true
ssl_verify:
description:
- Ensures FortiGate certificate must be verified by a proper CA.
type: bool
default: true
version_added: 2.9
state:
description:
- Indicates whether to create or remove the object.
type: str
required: true
choices:
- present
- absent
version_added: 2.9
wanopt_profile:
description:
- Configure WAN optimization profiles.
default: null
type: dict
suboptions:
auth_group:
description:
- Optionally add an authentication group to restrict access to the WAN Optimization tunnel to peers in the authentication group. Source
wanopt.auth-group.name.
type: str
cifs:
description:
- Enable/disable CIFS (Windows sharing) WAN Optimization and configure CIFS WAN Optimization features.
type: dict
suboptions:
byte_caching:
description:
- Enable/disable byte-caching for HTTP. Byte caching reduces the amount of traffic by caching file data sent across the WAN and in
future serving if from the cache.
type: str
choices:
- enable
- disable
log_traffic:
description:
- Enable/disable logging.
type: str
choices:
- enable
- disable
port:
description:
- Single port number or port number range for CIFS. Only packets with a destination port number that matches this port number or
range are accepted by this profile.
type: int
prefer_chunking:
description:
- Select dynamic or fixed-size data chunking for HTTP WAN Optimization.
type: str
choices:
- dynamic
- fix
secure_tunnel:
description:
- Enable/disable securing the WAN Opt tunnel using SSL. Secure and non-secure tunnels use the same TCP port (7810).
type: str
choices:
- enable
- disable
status:
description:
- Enable/disable HTTP WAN Optimization.
type: str
choices:
- enable
- disable
tunnel_sharing:
description:
- Tunnel sharing mode for aggressive/non-aggressive and/or interactive/non-interactive protocols.
type: str
choices:
- private
- shared
- express-shared
comments:
description:
- Comment.
type: str
ftp:
description:
- Enable/disable FTP WAN Optimization and configure FTP WAN Optimization features.
type: dict
suboptions:
byte_caching:
description:
- Enable/disable byte-caching for HTTP. Byte caching reduces the amount of traffic by caching file data sent across the WAN and in
future serving if from the cache.
type: str
choices:
- enable
- disable
log_traffic:
description:
- Enable/disable logging.
type: str
choices:
- enable
- disable
port:
description:
- Single port number or port number range for FTP. Only packets with a destination port number that matches this port number or
range are accepted by this profile.
type: int
prefer_chunking:
description:
- Select dynamic or fixed-size data chunking for HTTP WAN Optimization.
type: str
choices:
- dynamic
- fix
secure_tunnel:
description:
- Enable/disable securing the WAN Opt tunnel using SSL. Secure and non-secure tunnels use the same TCP port (7810).
type: str
choices:
- enable
- disable
status:
description:
- Enable/disable HTTP WAN Optimization.
type: str
choices:
- enable
- disable
tunnel_sharing:
description:
- Tunnel sharing mode for aggressive/non-aggressive and/or interactive/non-interactive protocols.
type: str
choices:
- private
- shared
- express-shared
http:
description:
- Enable/disable HTTP WAN Optimization and configure HTTP WAN Optimization features.
type: dict
suboptions:
byte_caching:
description:
- Enable/disable byte-caching for HTTP. Byte caching reduces the amount of traffic by caching file data sent across the WAN and in
future serving if from the cache.
type: str
choices:
- enable
- disable
log_traffic:
description:
- Enable/disable logging.
type: str
choices:
- enable
- disable
port:
description:
- Single port number or port number range for HTTP. Only packets with a destination port number that matches this port number or
range are accepted by this profile.
type: int
prefer_chunking:
description:
- Select dynamic or fixed-size data chunking for HTTP WAN Optimization.
type: str
choices:
- dynamic
- fix
secure_tunnel:
description:
- Enable/disable securing the WAN Opt tunnel using SSL. Secure and non-secure tunnels use the same TCP port (7810).
type: str
choices:
- enable
- disable
ssl:
description:
- Enable/disable SSL/TLS offloading (hardware acceleration) for HTTPS traffic in this tunnel.
type: str
choices:
- enable
- disable
ssl_port:
description:
- Port on which to expect HTTPS traffic for SSL/TLS offloading.
type: int
status:
description:
- Enable/disable HTTP WAN Optimization.
type: str
choices:
- enable
- disable
tunnel_non_http:
description:
- Configure how to process non-HTTP traffic when a profile configured for HTTP traffic accepts a non-HTTP session. Can occur if an
application sends non-HTTP traffic using an HTTP destination port.
type: str
choices:
- enable
- disable
tunnel_sharing:
description:
- Tunnel sharing mode for aggressive/non-aggressive and/or interactive/non-interactive protocols.
type: str
choices:
- private
- shared
- express-shared
unknown_http_version:
description:
- How to handle HTTP sessions that do not comply with HTTP 0.9, 1.0, or 1.1.
type: str
choices:
- reject
- tunnel
- best-effort
mapi:
description:
- Enable/disable MAPI email WAN Optimization and configure MAPI WAN Optimization features.
type: dict
suboptions:
byte_caching:
description:
- Enable/disable byte-caching for HTTP. Byte caching reduces the amount of traffic by caching file data sent across the WAN and in
future serving if from the cache.
type: str
choices:
- enable
- disable
log_traffic:
description:
- Enable/disable logging.
type: str
choices:
- enable
- disable
port:
description:
- Single port number or port number range for MAPI. Only packets with a destination port number that matches this port number or
range are accepted by this profile.
type: int
secure_tunnel:
description:
- Enable/disable securing the WAN Opt tunnel using SSL. Secure and non-secure tunnels use the same TCP port (7810).
type: str
choices:
- enable
- disable
status:
description:
- Enable/disable HTTP WAN Optimization.
type: str
choices:
- enable
- disable
tunnel_sharing:
description:
- Tunnel sharing mode for aggressive/non-aggressive and/or interactive/non-interactive protocols.
type: str
choices:
- private
- shared
- express-shared
name:
description:
- Profile name.
required: true
type: str
tcp:
description:
- Enable/disable TCP WAN Optimization and configure TCP WAN Optimization features.
type: dict
suboptions:
byte_caching:
description:
- Enable/disable byte-caching for HTTP. Byte caching reduces the amount of traffic by caching file data sent across the WAN and in
future serving if from the cache.
type: str
choices:
- enable
- disable
byte_caching_opt:
description:
- Select whether TCP byte-caching uses system memory only or both memory and disk space.
type: str
choices:
- mem-only
- mem-disk
log_traffic:
description:
- Enable/disable logging.
type: str
choices:
- enable
- disable
port:
description:
- Single port number or port number range for TCP. Only packets with a destination port number that matches this port number or
range are accepted by this profile.
type: str
secure_tunnel:
description:
- Enable/disable securing the WAN Opt tunnel using SSL. Secure and non-secure tunnels use the same TCP port (7810).
type: str
choices:
- enable
- disable
ssl:
description:
- Enable/disable SSL/TLS offloading.
type: str
choices:
- enable
- disable
ssl_port:
description:
- Port on which to expect HTTPS traffic for SSL/TLS offloading.
type: int
status:
description:
- Enable/disable HTTP WAN Optimization.
type: str
choices:
- enable
- disable
tunnel_sharing:
description:
- Tunnel sharing mode for aggressive/non-aggressive and/or interactive/non-interactive protocols.
type: str
choices:
- private
- shared
- express-shared
transparent:
description:
- Enable/disable transparent mode.
type: str
choices:
- enable
- disable
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
ssl_verify: "False"
tasks:
- name: Configure WAN optimization profiles.
fortios_wanopt_profile:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
https: "False"
state: "present"
wanopt_profile:
auth_group: "<your_own_value> (source wanopt.auth-group.name)"
cifs:
byte_caching: "enable"
log_traffic: "enable"
port: "7"
prefer_chunking: "dynamic"
secure_tunnel: "enable"
status: "enable"
tunnel_sharing: "private"
comments: "<your_own_value>"
ftp:
byte_caching: "enable"
log_traffic: "enable"
port: "16"
prefer_chunking: "dynamic"
secure_tunnel: "enable"
status: "enable"
tunnel_sharing: "private"
http:
byte_caching: "enable"
log_traffic: "enable"
port: "24"
prefer_chunking: "dynamic"
secure_tunnel: "enable"
ssl: "enable"
ssl_port: "28"
status: "enable"
tunnel_non_http: "enable"
tunnel_sharing: "private"
unknown_http_version: "reject"
mapi:
byte_caching: "enable"
log_traffic: "enable"
port: "36"
secure_tunnel: "enable"
status: "enable"
tunnel_sharing: "private"
name: "default_name_40"
tcp:
byte_caching: "enable"
byte_caching_opt: "mem-only"
log_traffic: "enable"
port: "<your_own_value>"
secure_tunnel: "enable"
ssl: "enable"
ssl_port: "48"
status: "enable"
tunnel_sharing: "private"
transparent: "enable"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
from ansible.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG
def login(data, fos):
host = data['host']
username = data['username']
password = data['password']
ssl_verify = data['ssl_verify']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password, verify=ssl_verify)
def filter_wanopt_profile_data(json):
option_list = ['auth_group', 'cifs', 'comments',
'ftp', 'http', 'mapi',
'name', 'tcp', 'transparent']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def underscore_to_hyphen(data):
if isinstance(data, list):
for elem in data:
elem = underscore_to_hyphen(elem)
elif isinstance(data, dict):
new_data = {}
for k, v in data.items():
new_data[k.replace('_', '-')] = underscore_to_hyphen(v)
data = new_data
return data
def wanopt_profile(data, fos):
vdom = data['vdom']
state = data['state']
wanopt_profile_data = data['wanopt_profile']
filtered_data = underscore_to_hyphen(filter_wanopt_profile_data(wanopt_profile_data))
if state == "present":
return fos.set('wanopt',
'profile',
data=filtered_data,
vdom=vdom)
elif state == "absent":
return fos.delete('wanopt',
'profile',
mkey=filtered_data['name'],
vdom=vdom)
def is_successful_status(status):
return status['status'] == "success" or \
status['http_method'] == "DELETE" and status['http_status'] == 404
def fortios_wanopt(data, fos):
if data['wanopt_profile']:
resp = wanopt_profile(data, fos)
return not is_successful_status(resp), \
resp['status'] == "success", \
resp
def main():
fields = {
"host": {"required": False, "type": "str"},
"username": {"required": False, "type": "str"},
"password": {"required": False, "type": "str", "default": "", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": True},
"ssl_verify": {"required": False, "type": "bool", "default": True},
"state": {"required": True, "type": "str",
"choices": ["present", "absent"]},
"wanopt_profile": {
"required": False, "type": "dict", "default": None,
"options": {
"auth_group": {"required": False, "type": "str"},
"cifs": {"required": False, "type": "dict",
"options": {
"byte_caching": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"log_traffic": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"port": {"required": False, "type": "int"},
"prefer_chunking": {"required": False, "type": "str",
"choices": ["dynamic", "fix"]},
"secure_tunnel": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"status": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"tunnel_sharing": {"required": False, "type": "str",
"choices": ["private", "shared", "express-shared"]}
}},
"comments": {"required": False, "type": "str"},
"ftp": {"required": False, "type": "dict",
"options": {
"byte_caching": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"log_traffic": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"port": {"required": False, "type": "int"},
"prefer_chunking": {"required": False, "type": "str",
"choices": ["dynamic", "fix"]},
"secure_tunnel": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"status": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"tunnel_sharing": {"required": False, "type": "str",
"choices": ["private", "shared", "express-shared"]}
}},
"http": {"required": False, "type": "dict",
"options": {
"byte_caching": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"log_traffic": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"port": {"required": False, "type": "int"},
"prefer_chunking": {"required": False, "type": "str",
"choices": ["dynamic", "fix"]},
"secure_tunnel": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"ssl": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"ssl_port": {"required": False, "type": "int"},
"status": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"tunnel_non_http": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"tunnel_sharing": {"required": False, "type": "str",
"choices": ["private", "shared", "express-shared"]},
"unknown_http_version": {"required": False, "type": "str",
"choices": ["reject", "tunnel", "best-effort"]}
}},
"mapi": {"required": False, "type": "dict",
"options": {
"byte_caching": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"log_traffic": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"port": {"required": False, "type": "int"},
"secure_tunnel": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"status": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"tunnel_sharing": {"required": False, "type": "str",
"choices": ["private", "shared", "express-shared"]}
}},
"name": {"required": True, "type": "str"},
"tcp": {"required": False, "type": "dict",
"options": {
"byte_caching": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"byte_caching_opt": {"required": False, "type": "str",
"choices": ["mem-only", "mem-disk"]},
"log_traffic": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"port": {"required": False, "type": "str"},
"secure_tunnel": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"ssl": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"ssl_port": {"required": False, "type": "int"},
"status": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"tunnel_sharing": {"required": False, "type": "str",
"choices": ["private", "shared", "express-shared"]}
}},
"transparent": {"required": False, "type": "str",
"choices": ["enable", "disable"]}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
# legacy_mode refers to using fortiosapi instead of HTTPAPI
legacy_mode = 'host' in module.params and module.params['host'] is not None and \
'username' in module.params and module.params['username'] is not None and \
'password' in module.params and module.params['password'] is not None
if not legacy_mode:
if module._socket_path:
connection = Connection(module._socket_path)
fos = FortiOSHandler(connection)
is_error, has_changed, result = fortios_wanopt(module.params, fos)
else:
module.fail_json(**FAIL_SOCKET_MSG)
else:
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
fos = FortiOSAPI()
login(module.params, fos)
is_error, has_changed, result = fortios_wanopt(module.params, fos)
fos.logout()
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()<|fim▁end|>
| |
<|file_name|>JobUtil.java<|end_file_name|><|fim▁begin|>/*
* Copyright 2010-2011 Raisonne Techonologies.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy
* of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
*/
package com.raisonne.quartz.scheduler.job.service.impl;
/**
* <p>A helper class which is responsible for fetching list of all jobs
* under a certain group as well individual jobs matching search criteria.</p>
* <p>
* Triggers associated with the <|fim▁hole|> * </p>
* @author Umesh Awasthi
*
*
*/
public class JobUtil {
}<|fim▁end|>
| |
<|file_name|>cas_manager.py<|end_file_name|><|fim▁begin|>"""
file: cas_manager.py<|fim▁hole|>authors: Christoffer Rosen <[email protected]>
date: Jan. 2014
description: This module contains the CAS_manager class, which is a thread that continously checks if there
is work that needs to be done. Also contains supporting classes of Worker and ThreadPool used by
the CAS_Manager.
"""
from analyzer.analyzer import *
from ingester.ingester import *
from orm.repository import *
import calendar # to convert datetime to unix time
from caslogging import logging
from queue import *
import threading
import time
from monthdelta import *
BACKGROUND_INTERVAL = 60 # wait 1 minutes
class CAS_Manager(threading.Thread):
"""
Thread that continiously checks if there is work to be done and adds it to
the thread pool work queue
"""
def __init__(self):
"""Constructor"""
threading.Thread.__init__(self)
numOfWorkers = int(config['system']['workers'])
self.workQueue = ThreadPool(numOfWorkers)
self.modelQueue = Queue()
def checkIngestion(self):
"""Check if any repo needs to be ingested"""
session = Session()
repo_update_freq = int(config['repoUpdates']['freqInDays'])
refresh_date = str(datetime.utcnow() - timedelta(days=repo_update_freq))
repos_to_get = (session.query(Repository)
.filter(
(Repository.status == "Waiting to be Ingested") |
(Repository.ingestion_date < refresh_date) &
(Repository.status != "Error") &
(Repository.status != "Analyzing"))
.all())
for repo in repos_to_get:
logging.info("Adding repo " + repo.id + " to work queue for ingesting")
repo.status = "In Queue to be Ingested"
session.commit() # update the status of repo
self.workQueue.add_task(ingest,repo.id)
session.close()
def checkAnalyzation(self):
"""Checks if any repo needs to be analyzed"""
session = Session()
repo_update_freq = int(config['repoUpdates']['freqInDays'])
refresh_date = str(datetime.utcnow() - timedelta(days=repo_update_freq))
repos_to_get = (session.query(Repository)
.filter( (Repository.status == "Waiting to be Analyzed") )
.all()
)
for repo in repos_to_get:
logging.info("Adding repo " + repo.id + " to work queue for analyzing.")
repo.status = "In Queue to be Analyzed"
session.commit() # update the status of repo
self.workQueue.add_task(analyze, repo.id)
session.close()
def checkModel(self):
"""Check if any repo needs metrics to be generated"""
session = Session()
repos_to_get = (session.query(Repository)
.filter(
(Repository.status == "In Queue to Build Model") )
.all())
for repo in repos_to_get:
logging.info("Adding repo " + repo.id + " to model queue to finish analyzing")
repo.status = "Building Model"
session.commit() # update status of repo
self.modelQueue.put(repo.id)
session.close()
def checkBuildModel(self):
""" Checks if any repo is awaiting to build model.
We are using a queue because we can't concurrently access R """
session = Session()
if self.modelQueue.empty() != True:
repo_id = self.modelQueue.get()
repo = (session.query(Repository).filter(Repository.id == repo_id).first())
# use data only up to X months prior we won't have sufficent data to build models
# as there may be bugs introduced in those months that haven't been fixed, skewing
# our model.
glm_model_time = int(config['glm_modeling']['months'])
data_months_datetime = datetime.utcnow() - monthdelta(glm_model_time)
data_months_unixtime = calendar.timegm(data_months_datetime.utctimetuple())
# all commits for repo prior to current time - glm model time
training_commits = (session.query(Commit)
.filter(
( Commit.repository_id == repo_id ) &
( Commit.author_date_unix_timestamp < str(data_months_unixtime))
)
.order_by( Commit.author_date_unix_timestamp.desc() )
.all())
# all commits for repo after or on current time - glm model time
testing_commits = (session.query(Commit)
.filter(
( Commit.repository_id == repo_id ) &
( Commit.author_date_unix_timestamp >= str(data_months_unixtime)))
.all())
try:
metrics_generator = MetricsGenerator(repo_id, training_commits, testing_commits)
metrics_generator.buildAllModels()
# montly data dump - or rather, every 30 days.
dump_refresh_date = str(datetime.utcnow() - timedelta(days=30))
if repo.last_data_dump == None or repo.last_data_dump < dump_refresh_date:
logging.info("Generating a monthly data dump for repository: " + repo_id)
# Get all commits for the repository
all_commits = (session.query(Commit)
.filter(
( Commit.repository_id == repo_id )
)
.order_by( Commit.author_date_unix_timestamp.desc() )
.all())
metrics_generator.dumpData(all_commits)
repo.last_data_dump = str(datetime.now().replace(microsecond=0))
# Notify user if repo has never been analyzed previously
if repo.analysis_date is None:
self.notify(repo)
logging.info("Repo " + repo_id + " finished analyzing.")
repo.analysis_date = str(datetime.now().replace(microsecond=0))
repo.status = "Analyzed"
session.commit() # update status of repo
session.close()
# uh-oh
except Exception as e:
logging.exception("Got an exception building model for repository " + repo_id)
repo.status = "Error"
session.commit() # update repo status
session.close()
def notify(self, repo):
""" Send e-mail notifications if applicable to a repo
used by checkBuildModel """
notify = False
notifier = None
logging.info("Notifying subscribed users for repository " + repo.id)
# Create the Notifier
gmail_user = config['gmail']['user']
gmail_pass = config['gmail']['pass']
notifier = Notifier(gmail_user, gmail_pass, repo.name)
# Add subscribers if applicable
if repo.email is not None:
notifier.addSubscribers([repo.email, gmail_user])
else:
notifier.addSubscribers([gmail_user])
notifier.notify()
def run(self):
while(True):
### --- Check repository table if there is any work to be done --- ###
self.checkIngestion()
self.checkAnalyzation()
self.checkModel()
self.checkBuildModel()
time.sleep(BACKGROUND_INTERVAL)
class Worker(threading.Thread):
"""Thread executing tasks from a given tasks queue"""
def __init__(self, tasks):
threading.Thread.__init__(self)
self.tasks = tasks
self.daemon = True
self.start()
def run(self):
while True:
func, args, kargs = self.tasks.get()
try:
func(*args, **kargs)
except Exception as e:
print(e)
self.tasks.task_done()
class ThreadPool:
"""Pool of threads consuming tasks from a queue"""
def __init__(self, num_threads):
self.tasks = Queue(num_threads)
for _ in range(num_threads): Worker(self.tasks)
def add_task(self, func, *args, **kargs):
"""Add a task to the queue"""
self.tasks.put((func, args, kargs))
def wait_completion(self):
"""Wait for completion of all the tasks in the queue"""
self.tasks.join()<|fim▁end|>
| |
<|file_name|>fuzzy.js<|end_file_name|><|fim▁begin|>// Fuzzy utility
'use strict';
var filter = require('array-filter');<|fim▁hole|> query = query.toLowerCase();
var re = new RegExp(query, 'i');
return filter(items, function(item) {
return re.test(item[key]);
});
};
};
module.exports = fuzzy;<|fim▁end|>
|
var fuzzy = function(items, key) {
return function (query) {
|
<|file_name|>smart-patcher.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
# This programs is intended to manage patches and apply them automatically
# through email in an automated fashion.
#
# Copyright (C) 2008 Imran M Yousuf ([email protected])
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import poplib, email, re, sys, xmlConfigs, utils;
class ReferenceNode :
def __init__(self, node, emailMessage, references=list(), children=dict(), slotted=bool("false")):
self.node = node
self.children = dict(children)
self.references = references[:]
self.slotted = slotted
self.emailMessage = emailMessage
def get_node(self):
return self.node
def get_children(self):
return self.children
def set_node(self, node):
self.node = node
def set_children(self, children):
self.children = children
def get_references(self):
return self.references
def is_slotted(self):
return self.slotted
def set_slotted(self, slotted):
self.slotted = slotted
def get_message(self):
return self.emailMessage
def __repr__(self):
return self.node + "\nREF: " + str(self.references) + "\nChildren: " + str(self.children.keys()) + "\n"
def handleNode(currentNodeInAction, referenceNodeNow, referencesToCheck, patchMessageReferenceNode):
for reference in referencesToCheck[:] :
if reference in referenceNodeNow.get_children() :
referencesToCheck.remove(reference)
return patchMessageReferenceNode[reference]
if len(referencesToCheck) == 0 :
referenceNodeNow.get_children()[currentNodeInAction.get_node()] = currentNodeInAction
def makeChildren(patchMessageReferenceNode) :
ref_keys = patchMessageReferenceNode.keys()
ref_keys.sort()<|fim▁hole|> referenceIds = referenceNode.get_references()
referenceIdsClone = referenceIds[:]
utils.verboseOutput(verbose, "Cloned References: ", referenceIdsClone)
if len(referenceIds) > 0 :
nextNode = patchMessageReferenceNode[referenceIdsClone[0]]
referenceIdsClone.remove(referenceIdsClone[0])
while nextNode != None :
utils.verboseOutput(verbose, "Next Node: ", nextNode.get_node())
utils.verboseOutput(verbose, "Curent Node: ", referenceNode.get_node())
utils.verboseOutput(verbose, "REF: ", referenceIdsClone)
nextNode = handleNode(referenceNode, nextNode, referenceIdsClone, patchMessageReferenceNode)
if __name__ == "__main__":
arguments = sys.argv
verbose = "false"
pseudoArgs = arguments[:]
while len(pseudoArgs) > 1 :
argument = pseudoArgs[1]
if argument == "-v" or argument == "--verbose" :
verbose = "true"
pseudoArgs.remove(argument)
utils.verboseOutput(verbose, "Checking POP3 for gmail")
try:
emailConfig = xmlConfigs.initializePopConfig("./email-configuration.xml")
myPop = emailConfig.get_pop3_connection()
numMessages = len(myPop.list()[1])
patchMessages = dict()
for i in range(numMessages):
utils.verboseOutput(verbose, "Index: ", i)
totalContent = ""
for content in myPop.retr(i+1)[1]:
totalContent += content + '\n'
msg = email.message_from_string(totalContent)
if 'subject' in msg :
subject = msg['subject']
subjectPattern = "^\[.*PATCH.*\].+"
subjectMatch = re.match(subjectPattern, subject)
utils.verboseOutput(verbose, "Checking subject: ", subject)
if subjectMatch == None :
continue
else :
continue
messageId = ""
if 'message-id' in msg:
messageId = re.search("<(.*)>", msg['message-id']).group(1)
utils.verboseOutput(verbose, 'Message-ID:', messageId)
referenceIds = []
if 'references' in msg:
references = msg['references']
referenceIds = re.findall("<(.*)>", references)
utils.verboseOutput(verbose, "References: ", referenceIds)
currentNode = ReferenceNode(messageId, msg, referenceIds)
patchMessages[messageId] = currentNode
currentNode.set_slotted(bool("false"))
utils.verboseOutput(verbose, "**************Make Children**************")
makeChildren(patchMessages)
utils.verboseOutput(verbose, "--------------RESULT--------------")
utils.verboseOutput(verbose, patchMessages)
except:
utils.verboseOutput(verbose, "Error: ", sys.exc_info())<|fim▁end|>
|
for messageId in ref_keys:
referenceNode = patchMessageReferenceNode[messageId]
utils.verboseOutput(verbose, "Managing Message Id:", referenceNode.get_node())
|
<|file_name|>Service.ts<|end_file_name|><|fim▁begin|>import { Barman } from '.';
export class Service {
id?: number;
startAt: Date;
endAt: Date;
nbMax: number;<|fim▁hole|>
barmen?: Barman[];
constructor(values: Object = {}) {
Object.assign(this, values);
const castVal = values as Service;
this.startAt = castVal.startAt ? new Date(castVal.startAt) : null;
this.endAt = castVal.endAt ? new Date(castVal.endAt) : null;
}
isPassed(): boolean {
return this.endAt.getTime() < Date.now();
}
}<|fim▁end|>
|
// Association
|
<|file_name|>bitcoin_sq.ts<|end_file_name|><|fim▁begin|><?xml version="1.0" ?><!DOCTYPE TS><TS language="sq" version="2.1">
<context>
<name>AboutDialog</name>
<message>
<location filename="../forms/aboutdialog.ui" line="+14"/>
<source>About Vector</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+39"/>
<source><b>Vector</b> version</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+41"/>
<source>Copyright © 2009-2014 The Bitcoin developers
Copyright © 2012-2014 The NovaCoin developers
Copyright © 2014 The Vector developers</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+15"/>
<source>
This is experimental software.
Distributed under the MIT/X11 software license, see the accompanying file COPYING or <a href="http://www.opensource.org/licenses/mit-license.php">http://www.opensource.org/licenses/mit-license.php</a>.
This product includes software developed by the OpenSSL Project for use in the OpenSSL Toolkit (<a href="https://www.openssl.org/">https://www.openssl.org/</a>) and cryptographic software written by Eric Young (<a href="mailto:[email protected]">[email protected]</a>) and UPnP software written by Thomas Bernard.</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>AddressBookPage</name>
<message>
<location filename="../forms/addressbookpage.ui" line="+14"/>
<source>Address Book</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+22"/>
<source>Double-click to edit address or label</source>
<translation>Klikoni 2 herë për të ndryshuar adressën ose etiketën</translation>
</message>
<message>
<location line="+24"/>
<source>Create a new address</source>
<translation>Krijo një adresë të re</translation>
</message>
<message>
<location line="+10"/>
<source>Copy the currently selected address to the system clipboard</source>
<translation>Kopjo adresën e zgjedhur në memorjen e sistemit </translation>
</message>
<message>
<location line="-7"/>
<source>&New Address</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-43"/>
<source>These are your Vector addresses for receiving payments. You may want to give a different one to each sender so you can keep track of who is paying you.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+53"/>
<source>&Copy Address</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>Show &QR Code</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>Sign a message to prove you own a Vector address</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Sign &Message</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+17"/>
<source>Delete the currently selected address from the list</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-10"/>
<source>Verify a message to ensure it was signed with a specified Vector address</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>&Verify Message</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+10"/>
<source>&Delete</source>
<translation>&Fshi</translation>
</message>
<message>
<location filename="../addressbookpage.cpp" line="+66"/>
<source>Copy &Label</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>&Edit</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+248"/>
<source>Export Address Book Data</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Comma separated file (*.csv)</source>
<translation>Skedar i ndarë me pikëpresje(*.csv)</translation>
</message>
<message>
<location line="+13"/>
<source>Error exporting</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<source>Could not write to file %1.</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>AddressTableModel</name>
<message>
<location filename="../addresstablemodel.cpp" line="+145"/>
<source>Label</source>
<translation>Etiketë</translation>
</message>
<message>
<location line="+0"/>
<source>Address</source>
<translation>Adresë</translation>
</message>
<message>
<location line="+36"/>
<source>(no label)</source>
<translation>(pa etiketë)</translation>
</message>
</context>
<context>
<name>AskPassphraseDialog</name>
<message>
<location filename="../forms/askpassphrasedialog.ui" line="+26"/>
<source>Passphrase Dialog</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+21"/>
<source>Enter passphrase</source>
<translation>Futni frazkalimin</translation>
</message>
<message>
<location line="+14"/>
<source>New passphrase</source>
<translation>Frazkalim i ri</translation>
</message>
<message>
<location line="+14"/>
<source>Repeat new passphrase</source>
<translation>Përsërisni frazkalimin e ri</translation>
</message>
<message>
<location line="+33"/>
<source>Serves to disable the trivial sendmoney when OS account compromised. Provides no real security.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>For staking only</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../askpassphrasedialog.cpp" line="+38"/>
<source>Encrypt wallet</source>
<translation>Enkripto portofolin</translation>
</message>
<message>
<location line="+7"/>
<source>This operation needs your wallet passphrase to unlock the wallet.</source>
<translation>Ky veprim ka nevojë per frazkalimin e portofolit tuaj që të ç'kyç portofolin.</translation>
</message>
<message>
<location line="+5"/>
<source>Unlock wallet</source>
<translation>ç'kyç portofolin.</translation>
</message>
<message>
<location line="+3"/>
<source>This operation needs your wallet passphrase to decrypt the wallet.</source>
<translation>Ky veprim kërkon frazkalimin e portofolit tuaj që të dekriptoj portofolin.</translation>
</message>
<message>
<location line="+5"/>
<source>Decrypt wallet</source>
<translation>Dekripto portofolin</translation>
</message>
<message>
<location line="+3"/>
<source>Change passphrase</source>
<translation>Ndrysho frazkalimin</translation>
</message>
<message>
<location line="+1"/>
<source>Enter the old and new passphrase to the wallet.</source>
<translation>Futni frazkalimin e vjetër dhe të ri në portofol. </translation>
</message>
<message>
<location line="+45"/>
<source>Confirm wallet encryption</source>
<translation>Konfirmoni enkriptimin e portofolit</translation>
</message>
<message>
<location line="+1"/>
<source>Warning: If you encrypt your wallet and lose your passphrase, you will <b>LOSE ALL OF YOUR COINS</b>!</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<source>Are you sure you wish to encrypt your wallet?</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+15"/>
<source>IMPORTANT: Any previous backups you have made of your wallet file should be replaced with the newly generated, encrypted wallet file. For security reasons, previous backups of the unencrypted wallet file will become useless as soon as you start using the new, encrypted wallet.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+103"/>
<location line="+24"/>
<source>Warning: The Caps Lock key is on!</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-133"/>
<location line="+60"/>
<source>Wallet encrypted</source>
<translation>Portofoli u enkriptua</translation>
</message>
<message>
<location line="-140"/>
<source>Enter the new passphrase to the wallet.<br/>Please use a passphrase of <b>ten or more random characters</b>, or <b>eight or more words</b>.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+82"/>
<source>Vector will close now to finish the encryption process. Remember that encrypting your wallet cannot fully protect your coins from being stolen by malware infecting your computer.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+13"/>
<location line="+7"/>
<location line="+44"/>
<location line="+6"/>
<source>Wallet encryption failed</source>
<translation>Enkriptimi i portofolit dështoi</translation>
</message>
<message>
<location line="-56"/>
<source>Wallet encryption failed due to an internal error. Your wallet was not encrypted.</source>
<translation>Enkriptimi i portofolit dështoi për shkak të një gabimi të brëndshëm. portofoli juaj nuk u enkriptua.</translation>
</message>
<message>
<location line="+7"/>
<location line="+50"/>
<source>The supplied passphrases do not match.</source>
<translation>Frazkalimet e plotësuara nuk përputhen.</translation>
</message>
<message>
<location line="-38"/>
<source>Wallet unlock failed</source>
<translation>ç'kyçja e portofolit dështoi</translation>
</message>
<message>
<location line="+1"/>
<location line="+12"/>
<location line="+19"/>
<source>The passphrase entered for the wallet decryption was incorrect.</source>
<translation>Frazkalimi i futur për dekriptimin e portofolit nuk ishte i saktë.</translation>
</message>
<message>
<location line="-20"/>
<source>Wallet decryption failed</source>
<translation>Dekriptimi i portofolit dështoi</translation>
</message>
<message>
<location line="+14"/>
<source>Wallet passphrase was successfully changed.</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>BitcoinGUI</name>
<message>
<location filename="../bitcoingui.cpp" line="+297"/>
<source>Sign &message...</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-64"/>
<source>Show general overview of wallet</source>
<translation>Trego një përmbledhje te përgjithshme të portofolit</translation>
</message>
<message>
<location line="+17"/>
<source>&Transactions</source>
<translation>&Transaksionet</translation>
</message>
<message>
<location line="+1"/>
<source>Browse transaction history</source>
<translation>Shfleto historinë e transaksioneve</translation>
</message>
<message>
<location line="+5"/>
<source>&Address Book</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Edit the list of stored addresses and labels</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-18"/>
<source>Show the list of addresses for receiving payments</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+34"/>
<source>E&xit</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Quit application</source>
<translation>Mbyllni aplikacionin</translation>
</message>
<message>
<location line="+4"/>
<source>Show information about Vector</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>About &Qt</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Show information about Qt</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>&Options...</source>
<translation>&Opsione</translation>
</message>
<message>
<location line="+4"/>
<source>&Encrypt Wallet...</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>&Backup Wallet...</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>&Change Passphrase...</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+9"/>
<source>&Export...</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-55"/>
<source>Send coins to a Vector address</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+39"/>
<source>Modify configuration options for Vector</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+17"/>
<source>Export the data in the current tab to a file</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-13"/>
<source>Encrypt or decrypt wallet</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Backup wallet to another location</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Change the passphrase used for wallet encryption</source>
<translation>Ndrysho frazkalimin e përdorur per enkriptimin e portofolit</translation>
</message>
<message>
<location line="+10"/>
<source>&Debug window</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Open debugging and diagnostic console</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-5"/>
<source>&Verify message...</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-214"/>
<location line="+555"/>
<source>Vector</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-555"/>
<source>Wallet</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+193"/>
<source>&About Vector</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+9"/>
<source>&Show / Hide</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+8"/>
<source>Unlock wallet</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>&Lock Wallet</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Lock wallet</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+32"/>
<source>&File</source>
<translation>&Skedar</translation>
</message>
<message>
<location line="+8"/>
<source>&Settings</source>
<translation>&Konfigurimet</translation>
</message>
<message>
<location line="+8"/>
<source>&Help</source>
<translation>&Ndihmë</translation>
</message>
<message>
<location line="+17"/>
<source>Tabs toolbar</source>
<translation>Shiriti i mjeteve</translation>
</message>
<message>
<location line="+46"/>
<location line="+9"/>
<source>[testnet]</source>
<translation>[testo rrjetin]</translation>
</message>
<message>
<location line="+0"/>
<location line="+58"/>
<source>Vector client</source>
<translation type="unfinished"/>
</message>
<message numerus="yes">
<location line="+70"/>
<source>%n active connection(s) to Vector network</source>
<translation type="unfinished"><numerusform></numerusform><numerusform></numerusform></translation>
</message>
<message>
<location line="+488"/>
<source>Staking.<br>Your weight is %1<br>Network weight is %2<br>Expected time to earn reward is %3</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+6"/>
<source>Not staking because wallet is locked</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Not staking because wallet is offline</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Not staking because wallet is syncing</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Not staking because you don't have mature coins</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-812"/>
<source>&Dashboard</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+6"/>
<source>&Receive</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+6"/>
<source>&Send</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+49"/>
<source>&Unlock Wallet...</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+277"/>
<source>Up to date</source>
<translation>I azhornuar</translation>
</message>
<message>
<location line="+43"/>
<source>Catching up...</source>
<translation>Duke u azhornuar...</translation>
</message>
<message>
<location line="+113"/>
<source>Confirm transaction fee</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+27"/>
<source>Sent transaction</source>
<translation>Dërgo transaksionin</translation>
</message>
<message>
<location line="+1"/>
<source>Incoming transaction</source>
<translation>Transaksion në ardhje</translation>
</message>
<message>
<location line="+1"/>
<source>Date: %1
Amount: %2
Type: %3
Address: %4
</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+100"/>
<location line="+15"/>
<source>URI handling</source><|fim▁hole|> <location line="-15"/>
<location line="+15"/>
<source>URI can not be parsed! This can be caused by an invalid Vector address or malformed URI parameters.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+9"/>
<source>Wallet is <b>not encrypted</b></source>
<translation type="unfinished"/>
</message>
<message>
<location line="+8"/>
<source>Wallet is <b>encrypted</b> and currently <b>unlocked</b></source>
<translation>Portofoli po <b> enkriptohet</b> dhe është <b> i ç'kyçur</b></translation>
</message>
<message>
<location line="+8"/>
<source>Wallet is <b>encrypted</b> and currently <b>locked</b></source>
<translation>Portofoli po <b> enkriptohet</b> dhe është <b> i kyçur</b></translation>
</message>
<message>
<location line="+24"/>
<source>Backup Wallet</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<source>Wallet Data (*.dat)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Backup Failed</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<source>There was an error trying to save the wallet data to the new location.</source>
<translation type="unfinished"/>
</message>
<message numerus="yes">
<location line="+91"/>
<source>%n second(s)</source>
<translation type="unfinished"><numerusform></numerusform><numerusform></numerusform></translation>
</message>
<message numerus="yes">
<location line="+4"/>
<source>%n minute(s)</source>
<translation type="unfinished"><numerusform></numerusform><numerusform></numerusform></translation>
</message>
<message numerus="yes">
<location line="-429"/>
<location line="+433"/>
<source>%n hour(s)</source>
<translation type="unfinished"><numerusform></numerusform><numerusform></numerusform></translation>
</message>
<message>
<location line="-456"/>
<source>Processed %1 blocks of transaction history.</source>
<translation type="unfinished"/>
</message>
<message numerus="yes">
<location line="+27"/>
<location line="+433"/>
<source>%n day(s)</source>
<translation type="unfinished"><numerusform></numerusform><numerusform></numerusform></translation>
</message>
<message numerus="yes">
<location line="-429"/>
<location line="+6"/>
<source>%n week(s)</source>
<translation type="unfinished"><numerusform></numerusform><numerusform></numerusform></translation>
</message>
<message>
<location line="+0"/>
<source>%1 and %2</source>
<translation type="unfinished"/>
</message>
<message numerus="yes">
<location line="+0"/>
<source>%n year(s)</source>
<translation type="unfinished"><numerusform></numerusform><numerusform></numerusform></translation>
</message>
<message>
<location line="+5"/>
<source>%1 behind</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+15"/>
<source>Last received block was generated %1 ago.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Transactions after this will not yet be visible.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+23"/>
<source>Error</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Warning</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Information</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+69"/>
<source>This transaction is over the size limit. You can still send it for a fee of %1, which goes to the nodes that process your transaction and helps to support the network. Do you want to pay the fee?</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+324"/>
<source>Not staking</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoin.cpp" line="+104"/>
<source>A fatal error occurred. Vector can no longer continue safely and will quit.</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>ClientModel</name>
<message>
<location filename="../clientmodel.cpp" line="+110"/>
<source>Network Alert</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>CoinControlDialog</name>
<message>
<location filename="../forms/coincontroldialog.ui" line="+14"/>
<source>Coin Control</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+31"/>
<source>Quantity:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+32"/>
<source>Bytes:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+48"/>
<source>Amount:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+32"/>
<source>Priority:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+48"/>
<source>Fee:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+35"/>
<source>Low Output:</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../coincontroldialog.cpp" line="+537"/>
<source>no</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../forms/coincontroldialog.ui" line="+51"/>
<source>After Fee:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+35"/>
<source>Change:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+69"/>
<source>(un)select all</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+13"/>
<source>Tree mode</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+16"/>
<source>List mode</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+45"/>
<source>Amount</source>
<translation>Sasia</translation>
</message>
<message>
<location line="+5"/>
<source>Label</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>Address</source>
<translation>Adresë</translation>
</message>
<message>
<location line="+5"/>
<source>Date</source>
<translation>Data</translation>
</message>
<message>
<location line="+5"/>
<source>Confirmations</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Confirmed</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>Priority</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../coincontroldialog.cpp" line="-500"/>
<source>Copy address</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Copy label</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<location line="+26"/>
<source>Copy amount</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-25"/>
<source>Copy transaction ID</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+24"/>
<source>Copy quantity</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Copy fee</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Copy after fee</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Copy bytes</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Copy priority</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Copy low output</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Copy change</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+317"/>
<source>highest</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>high</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>medium-high</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>medium</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+4"/>
<source>low-medium</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>low</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>lowest</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+140"/>
<source>DUST</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<source>yes</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+10"/>
<source>This label turns red, if the transaction size is bigger than 10000 bytes.
This means a fee of at least %1 per kb is required.
Can vary +/- 1 Byte per input.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Transactions with higher priority get more likely into a block.
This label turns red, if the priority is smaller than "medium".
This means a fee of at least %1 per kb is required.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>This label turns red, if any recipient receives an amount smaller than %1.
This means a fee of at least %2 is required.
Amounts below 0.546 times the minimum relay fee are shown as DUST.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>This label turns red, if the change is smaller than %1.
This means a fee of at least %2 is required.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+36"/>
<location line="+66"/>
<source>(no label)</source>
<translation>(pa etiketë)</translation>
</message>
<message>
<location line="-9"/>
<source>change from %1 (%2)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>(change)</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>EditAddressDialog</name>
<message>
<location filename="../forms/editaddressdialog.ui" line="+14"/>
<source>Edit Address</source>
<translation>Ndrysho Adresën</translation>
</message>
<message>
<location line="+11"/>
<source>&Label</source>
<translation>&Etiketë</translation>
</message>
<message>
<location line="+10"/>
<source>The label associated with this address book entry</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>&Address</source>
<translation>&Adresa</translation>
</message>
<message>
<location line="+10"/>
<source>The address associated with this address book entry. This can only be modified for sending addresses.</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../editaddressdialog.cpp" line="+21"/>
<source>New receiving address</source>
<translation>Adresë e re pritëse</translation>
</message>
<message>
<location line="+4"/>
<source>New sending address</source>
<translation>Adresë e re dërgimi</translation>
</message>
<message>
<location line="+3"/>
<source>Edit receiving address</source>
<translation>Ndrysho adresën pritëse</translation>
</message>
<message>
<location line="+4"/>
<source>Edit sending address</source>
<translation>ndrysho adresën dërguese</translation>
</message>
<message>
<location line="+76"/>
<source>The entered address "%1" is already in the address book.</source>
<translation>Adresa e dhënë "%1" është e zënë në librin e adresave. </translation>
</message>
<message>
<location line="-5"/>
<source>The entered address "%1" is not a valid Vector address.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+10"/>
<source>Could not unlock wallet.</source>
<translation>Nuk mund të ç'kyçet portofoli.</translation>
</message>
<message>
<location line="+5"/>
<source>New key generation failed.</source>
<translation>Krijimi i çelësit të ri dështoi.</translation>
</message>
</context>
<context>
<name>GUIUtil::HelpMessageBox</name>
<message>
<location filename="../guiutil.cpp" line="+426"/>
<location line="+12"/>
<source>Vector-Qt</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-12"/>
<source>version</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Usage:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>command-line options</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+4"/>
<source>UI options</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Set language, for example "de_DE" (default: system locale)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Start minimized</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Show splash screen on startup (default: 1)</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>OptionsDialog</name>
<message>
<location filename="../forms/optionsdialog.ui" line="+14"/>
<source>Options</source>
<translation>Opsionet</translation>
</message>
<message>
<location line="+16"/>
<source>&Main</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+6"/>
<source>Optional transaction fee per kB that helps make sure your transactions are processed quickly. Most transactions are 1 kB. Fee 0.01 recommended.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+15"/>
<source>Pay transaction &fee</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+31"/>
<source>Reserved amount does not participate in staking and is therefore spendable at any time.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+15"/>
<source>Reserve</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+31"/>
<source>Automatically start Vector after logging in to the system.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>&Start Vector on system login</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+21"/>
<source>&Network</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+6"/>
<source>Automatically open the Vector client port on the router. This only works when your router supports UPnP and it is enabled.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Map port using &UPnP</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+19"/>
<source>Proxy &IP:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+19"/>
<source>IP address of the proxy (e.g. 127.0.0.1)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>&Port:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+19"/>
<source>Port of the proxy (e.g. 9050)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-57"/>
<source>Connect to the Vector network through a SOCKS5 proxy (e.g. when connecting through Tor).</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>&Connect through SOCKS5 proxy:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+90"/>
<source>&Window</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+6"/>
<source>Show only a tray icon after minimizing the window.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>&Minimize to the tray instead of the taskbar</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>Minimize instead of exit the application when the window is closed. When this option is enabled, the application will be closed only after selecting Quit in the menu.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>M&inimize on close</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+21"/>
<source>&Display</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+8"/>
<source>User Interface &language:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+13"/>
<source>The user interface language can be set here. This setting will take effect after restarting Vector.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+11"/>
<source>&Unit to show amounts in:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+13"/>
<source>Choose the default subdivision unit to show in the interface and when sending coins.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+9"/>
<source>Whether to show coin control features or not.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Display coin &control features (experts only!)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>Whether to select the coin outputs randomly or with minimal coin age.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Minimize weight consumption (experimental)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>Use black visual theme (requires restart)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+71"/>
<source>&OK</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>&Cancel</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+10"/>
<source>&Apply</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../optionsdialog.cpp" line="+47"/>
<source>default</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+148"/>
<location line="+9"/>
<source>Warning</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-9"/>
<location line="+9"/>
<source>This setting will take effect after restarting Vector.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+29"/>
<source>The supplied proxy address is invalid.</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>OverviewPage</name>
<message>
<location filename="../forms/overviewpage.ui" line="+14"/>
<source>Form</source>
<translation>Formilarë</translation>
</message>
<message>
<location line="+46"/>
<location line="+247"/>
<source>The displayed information may be out of date. Your wallet automatically synchronizes with the Vector network after a connection is established, but this process has not completed yet.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-173"/>
<source>Stake:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+32"/>
<source>Unconfirmed:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-113"/>
<source>Wallet</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+49"/>
<source>Spendable:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+16"/>
<source>Your current spendable balance</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+80"/>
<source>Immature:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+13"/>
<source>Mined balance that has not yet matured</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+23"/>
<source>Total:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+16"/>
<source>Your current total balance</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+50"/>
<source><b>Recent transactions</b></source>
<translation><b>Transaksionet e fundit</b></translation>
</message>
<message>
<location line="-118"/>
<source>Total of transactions that have yet to be confirmed, and do not yet count toward the current balance</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-32"/>
<source>Total of coins that was staked, and do not yet count toward the current balance</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../overviewpage.cpp" line="+116"/>
<location line="+1"/>
<source>out of sync</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>PaymentServer</name>
<message>
<location filename="../paymentserver.cpp" line="+107"/>
<source>Cannot start vector: click-to-pay handler</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>QRCodeDialog</name>
<message>
<location filename="../forms/qrcodedialog.ui" line="+14"/>
<source>QR Code Dialog</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+59"/>
<source>Request Payment</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+56"/>
<source>Amount:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-44"/>
<source>Label:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+19"/>
<source>Message:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+71"/>
<source>&Save As...</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../qrcodedialog.cpp" line="+62"/>
<source>Error encoding URI into QR Code.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+40"/>
<source>The entered amount is invalid, please check.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+23"/>
<source>Resulting URI too long, try to reduce the text for label / message.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+25"/>
<source>Save QR Code</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<source>PNG Images (*.png)</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>RPCConsole</name>
<message>
<location filename="../forms/rpcconsole.ui" line="+46"/>
<source>Client name</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+10"/>
<location line="+23"/>
<location line="+26"/>
<location line="+23"/>
<location line="+23"/>
<location line="+36"/>
<location line="+53"/>
<location line="+23"/>
<source>N/A</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-194"/>
<source>Client version</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-45"/>
<source>&Information</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+68"/>
<source>Using OpenSSL version</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+49"/>
<source>Startup time</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+29"/>
<source>Network</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>Number of connections</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+23"/>
<source>On testnet</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+23"/>
<source>Block chain</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>Current number of blocks</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+197"/>
<source>&Network Traffic</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+52"/>
<source>&Clear</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+13"/>
<source>Totals</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+64"/>
<source>In:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+80"/>
<source>Out:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-383"/>
<source>Last block time</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+52"/>
<source>&Open</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+16"/>
<source>Command-line options</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>Show the Vector-Qt help message to get a list with possible Vector command-line options.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>&Show</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+24"/>
<source>&Console</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-237"/>
<source>Build date</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-104"/>
<source>Vector - Debug window</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+25"/>
<source>Vector Core</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+256"/>
<source>Debug log file</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>Open the Vector debug log file from the current data directory. This can take a few seconds for large log files.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+102"/>
<source>Clear console</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../rpcconsole.cpp" line="+325"/>
<source>Welcome to the Vector RPC console.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Use up and down arrows to navigate history, and <b>Ctrl-L</b> to clear screen.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Type <b>help</b> for an overview of available commands.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+127"/>
<source>%1 B</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>%1 KB</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>%1 MB</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>%1 GB</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>%1 m</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>%1 h</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>%1 h %2 m</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>SendCoinsDialog</name>
<message>
<location filename="../forms/sendcoinsdialog.ui" line="+14"/>
<location filename="../sendcoinsdialog.cpp" line="+182"/>
<location line="+5"/>
<location line="+5"/>
<location line="+5"/>
<location line="+6"/>
<location line="+5"/>
<location line="+5"/>
<source>Send Coins</source>
<translation>Dërgo Monedha</translation>
</message>
<message>
<location line="+76"/>
<source>Coin Control Features</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+20"/>
<source>Inputs...</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>automatically selected</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+19"/>
<source>Insufficient funds!</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+77"/>
<source>Quantity:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+22"/>
<location line="+35"/>
<source>0</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-19"/>
<source>Bytes:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+51"/>
<source>Amount:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+35"/>
<source>Priority:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+19"/>
<source>medium</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+32"/>
<source>Fee:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+35"/>
<source>Low Output:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+19"/>
<source>no</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+32"/>
<source>After Fee:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+35"/>
<source>Change</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+50"/>
<source>custom change address</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+106"/>
<source>Send to multiple recipients at once</source>
<translation>Dërgo marrësve të ndryshëm njëkohësisht</translation>
</message>
<message>
<location line="+3"/>
<source>Add &Recipient</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+16"/>
<source>Remove all transaction fields</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Clear &All</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+24"/>
<source>Balance:</source>
<translation>Balanca:</translation>
</message>
<message>
<location line="+47"/>
<source>Confirm the send action</source>
<translation>Konfirmo veprimin e dërgimit</translation>
</message>
<message>
<location line="+3"/>
<source>S&end</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../sendcoinsdialog.cpp" line="-174"/>
<source>Enter a Vector address (e.g. B8gZqgY4r2RoEdqYk3QsAqFckyf9pRHN6i)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+15"/>
<source>Copy quantity</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Copy amount</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Copy fee</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Copy after fee</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Copy bytes</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Copy priority</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Copy low output</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Copy change</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+87"/>
<source><b>%1</b> to %2 (%3)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>Confirm send coins</source>
<translation>konfirmo dërgimin e monedhave</translation>
</message>
<message>
<location line="+1"/>
<source>Are you sure you want to send %1?</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<source> and </source>
<translation type="unfinished"/>
</message>
<message>
<location line="+29"/>
<source>The recipient address is not valid, please recheck.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>The amount to pay must be larger than 0.</source>
<translation>Shuma e paguar duhet të jetë më e madhe se 0.</translation>
</message>
<message>
<location line="+5"/>
<source>The amount exceeds your balance.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>The total exceeds your balance when the %1 transaction fee is included.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+6"/>
<source>Duplicate address found, can only send to each address once per send operation.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>Error: Transaction creation failed!</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>Error: The transaction was rejected. This might happen if some of the coins in your wallet were already spent, such as if you used a copy of wallet.dat and coins were spent in the copy but not marked as spent here.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+247"/>
<source>WARNING: Invalid Vector address</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+13"/>
<source>(no label)</source>
<translation>(pa etiketë)</translation>
</message>
<message>
<location line="+4"/>
<source>WARNING: unknown change address</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>SendCoinsEntry</name>
<message>
<location filename="../forms/sendcoinsentry.ui" line="+14"/>
<source>Form</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+15"/>
<source>A&mount:</source>
<translation>Sh&uma:</translation>
</message>
<message>
<location line="+13"/>
<source>Pay &To:</source>
<translation>Paguaj &drejt:</translation>
</message>
<message>
<location line="+34"/>
<source>The address to send the payment to (e.g. B8gZqgY4r2RoEdqYk3QsAqFckyf9pRHN6i)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+60"/>
<location filename="../sendcoinsentry.cpp" line="+26"/>
<source>Enter a label for this address to add it to your address book</source>
<translation>Krijoni një etiketë për këtë adresë që t'ja shtoni librit të adresave</translation>
</message>
<message>
<location line="-78"/>
<source>&Label:</source>
<translation>&Etiketë:</translation>
</message>
<message>
<location line="+28"/>
<source>Choose address from address book</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+10"/>
<source>Alt+A</source>
<translation>Alt+A</translation>
</message>
<message>
<location line="+7"/>
<source>Paste address from clipboard</source>
<translation>Ngjit nga memorja e sistemit</translation>
</message>
<message>
<location line="+10"/>
<source>Alt+P</source>
<translation>Alt+P</translation>
</message>
<message>
<location line="+7"/>
<source>Remove this recipient</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../sendcoinsentry.cpp" line="+1"/>
<source>Enter a Vector address (e.g. B8gZqgY4r2RoEdqYk3QsAqFckyf9pRHN6i)</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>SignVerifyMessageDialog</name>
<message>
<location filename="../forms/signverifymessagedialog.ui" line="+14"/>
<source>Signatures - Sign / Verify a Message</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+13"/>
<location line="+124"/>
<source>&Sign Message</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-118"/>
<source>You can sign messages with your addresses to prove you own them. Be careful not to sign anything vague, as phishing attacks may try to trick you into signing your identity over to them. Only sign fully-detailed statements you agree to.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+18"/>
<source>The address to sign the message with (e.g. B8gZqgY4r2RoEdqYk3QsAqFckyf9pRHN6i)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+10"/>
<location line="+203"/>
<source>Choose an address from the address book</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-193"/>
<location line="+203"/>
<source>Alt+A</source>
<translation>Alt+A</translation>
</message>
<message>
<location line="-193"/>
<source>Paste address from clipboard</source>
<translation>Ngjit nga memorja e sistemit</translation>
</message>
<message>
<location line="+10"/>
<source>Alt+P</source>
<translation>Alt+P</translation>
</message>
<message>
<location line="+12"/>
<source>Enter the message you want to sign here</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+24"/>
<source>Copy the current signature to the system clipboard</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+21"/>
<source>Sign the message to prove you own this Vector address</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+17"/>
<source>Reset all sign message fields</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<location line="+146"/>
<source>Clear &All</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-87"/>
<location line="+70"/>
<source>&Verify Message</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-64"/>
<source>Enter the signing address, message (ensure you copy line breaks, spaces, tabs, etc. exactly) and signature below to verify the message. Be careful not to read more into the signature than what is in the signed message itself, to avoid being tricked by a man-in-the-middle attack.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+21"/>
<source>The address the message was signed with (e.g. B8gZqgY4r2RoEdqYk3QsAqFckyf9pRHN6i)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+40"/>
<source>Verify the message to ensure it was signed with the specified Vector address</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+17"/>
<source>Reset all verify message fields</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../signverifymessagedialog.cpp" line="+27"/>
<location line="+3"/>
<source>Enter a Vector address (e.g. B8gZqgY4r2RoEdqYk3QsAqFckyf9pRHN6i)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-2"/>
<source>Click "Sign Message" to generate signature</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Enter Vector signature</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+85"/>
<location line="+81"/>
<source>The entered address is invalid.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-81"/>
<location line="+8"/>
<location line="+73"/>
<location line="+8"/>
<source>Please check the address and try again.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-81"/>
<location line="+81"/>
<source>The entered address does not refer to a key.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-73"/>
<source>Wallet unlock was cancelled.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+8"/>
<source>Private key for the entered address is not available.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+12"/>
<source>Message signing failed.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>Message signed.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+59"/>
<source>The signature could not be decoded.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<location line="+13"/>
<source>Please check the signature and try again.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<source>The signature did not match the message digest.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>Message verification failed.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>Message verified.</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>TrafficGraphWidget</name>
<message>
<location filename="../trafficgraphwidget.cpp" line="+75"/>
<source>KB/s</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>TransactionDesc</name>
<message>
<location filename="../transactiondesc.cpp" line="+25"/>
<source>Open until %1</source>
<translation>Hapur deri më %1</translation>
</message>
<message>
<location line="+6"/>
<source>conflicted</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>%1/offline</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>%1/unconfirmed</source>
<translation>%1/I pakonfirmuar</translation>
</message>
<message>
<location line="+2"/>
<source>%1 confirmations</source>
<translation>%1 konfirmimet</translation>
</message>
<message>
<location line="+17"/>
<source>Status</source>
<translation type="unfinished"/>
</message>
<message numerus="yes">
<location line="+7"/>
<source>, broadcast through %n node(s)</source>
<translation type="unfinished"><numerusform></numerusform><numerusform></numerusform></translation>
</message>
<message>
<location line="+4"/>
<source>Date</source>
<translation>Data</translation>
</message>
<message>
<location line="+7"/>
<source>Source</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<source>Generated</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<location line="+13"/>
<source>From</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<location line="+19"/>
<location line="+58"/>
<source>To</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-74"/>
<location line="+2"/>
<source>own address</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-2"/>
<source>label</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+34"/>
<location line="+12"/>
<location line="+45"/>
<location line="+17"/>
<location line="+30"/>
<source>Credit</source>
<translation type="unfinished"/>
</message>
<message numerus="yes">
<location line="-102"/>
<source>matures in %n more block(s)</source>
<translation type="unfinished"><numerusform></numerusform><numerusform></numerusform></translation>
</message>
<message>
<location line="+2"/>
<source>not accepted</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+44"/>
<location line="+8"/>
<location line="+15"/>
<location line="+30"/>
<source>Debit</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-39"/>
<source>Transaction fee</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+16"/>
<source>Net amount</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+6"/>
<source>Message</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Comment</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Transaction ID</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Generated coins must mature 510 blocks before they can be spent. When you generated this block, it was broadcast to the network to be added to the block chain. If it fails to get into the chain, its state will change to "not accepted" and it won't be spendable. This may occasionally happen if another node generates a block within a few seconds of yours.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>Debug information</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+8"/>
<source>Transaction</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>Inputs</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+21"/>
<source>Amount</source>
<translation>Sasia</translation>
</message>
<message>
<location line="+1"/>
<source>true</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<source>false</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-202"/>
<source>, has not been successfully broadcast yet</source>
<translation>, nuk është transmetuar me sukses deri tani</translation>
</message>
<message numerus="yes">
<location line="-36"/>
<source>Open for %n more block(s)</source>
<translation type="unfinished"><numerusform></numerusform><numerusform></numerusform></translation>
</message>
<message>
<location line="+67"/>
<source>unknown</source>
<translation>i/e panjohur</translation>
</message>
</context>
<context>
<name>TransactionDescDialog</name>
<message>
<location filename="../forms/transactiondescdialog.ui" line="+14"/>
<source>Transaction details</source>
<translation>Detajet e transaksionit</translation>
</message>
<message>
<location line="+6"/>
<source>This pane shows a detailed description of the transaction</source>
<translation>Ky panel tregon një përshkrim të detajuar të transaksionit</translation>
</message>
</context>
<context>
<name>TransactionTableModel</name>
<message>
<location filename="../transactiontablemodel.cpp" line="+231"/>
<source>Date</source>
<translation>Data</translation>
</message>
<message>
<location line="+0"/>
<source>Type</source>
<translation>Lloji</translation>
</message>
<message>
<location line="+0"/>
<source>Address</source>
<translation>Adresë</translation>
</message>
<message>
<location line="+0"/>
<source>Amount</source>
<translation>Sasia</translation>
</message>
<message>
<location line="+52"/>
<source>Open until %1</source>
<translation>Hapur deri më %1</translation>
</message>
<message>
<location line="+12"/>
<source>Confirmed (%1 confirmations)</source>
<translation>I/E konfirmuar(%1 konfirmime)</translation>
</message>
<message numerus="yes">
<location line="-15"/>
<source>Open for %n more block(s)</source>
<translation type="unfinished"><numerusform></numerusform><numerusform></numerusform></translation>
</message>
<message>
<location line="+6"/>
<source>Offline</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Unconfirmed</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Confirming (%1 of %2 recommended confirmations)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+6"/>
<source>Conflicted</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Immature (%1 confirmations, will be available after %2)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>This block was not received by any other nodes and will probably not be accepted!</source>
<translation>Ky bllok është marrë nga ndonjë nyje dhe ka shumë mundësi të mos pranohet! </translation>
</message>
<message>
<location line="+3"/>
<source>Generated but not accepted</source>
<translation>I krijuar por i papranuar</translation>
</message>
<message>
<location line="+42"/>
<source>Received with</source>
<translation>Marrë me</translation>
</message>
<message>
<location line="+2"/>
<source>Received from</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Sent to</source>
<translation>Dërguar drejt</translation>
</message>
<message>
<location line="+2"/>
<source>Payment to yourself</source>
<translation>Pagesë ndaj vetvetes</translation>
</message>
<message>
<location line="+2"/>
<source>Mined</source>
<translation>Minuar</translation>
</message>
<message>
<location line="+38"/>
<source>(n/a)</source>
<translation>(p/a)</translation>
</message>
<message>
<location line="+194"/>
<source>Transaction status. Hover over this field to show number of confirmations.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Date and time that the transaction was received.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Type of transaction.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Destination address of transaction.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Amount removed from or added to balance.</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>TransactionView</name>
<message>
<location filename="../transactionview.cpp" line="+54"/>
<location line="+17"/>
<source>All</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-16"/>
<source>Today</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>This week</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>This month</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Last month</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>This year</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Range...</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+12"/>
<source>Received with</source>
<translation>Marrë me</translation>
</message>
<message>
<location line="+2"/>
<source>Sent to</source>
<translation>Dërguar drejt</translation>
</message>
<message>
<location line="+2"/>
<source>To yourself</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Mined</source>
<translation>Minuar</translation>
</message>
<message>
<location line="+1"/>
<source>Other</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>Enter address or label to search</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>Min amount</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+34"/>
<source>Copy address</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Copy label</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Copy amount</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Copy transaction ID</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Edit label</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Show transaction details</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+138"/>
<source>Export Transaction Data</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Comma separated file (*.csv)</source>
<translation>Skedar i ndarë me pikëpresje(*.csv)</translation>
</message>
<message>
<location line="+8"/>
<source>Confirmed</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Date</source>
<translation>Data</translation>
</message>
<message>
<location line="+1"/>
<source>Type</source>
<translation>Lloji</translation>
</message>
<message>
<location line="+1"/>
<source>Label</source>
<translation>Etiketë</translation>
</message>
<message>
<location line="+1"/>
<source>Address</source>
<translation>Adresë</translation>
</message>
<message>
<location line="+1"/>
<source>Amount</source>
<translation>Sasia</translation>
</message>
<message>
<location line="+1"/>
<source>ID</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+4"/>
<source>Error exporting</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<source>Could not write to file %1.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+100"/>
<source>Range:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+8"/>
<source>to</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>WalletModel</name>
<message>
<location filename="../walletmodel.cpp" line="+212"/>
<source>Sending...</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>bitcoin-core</name>
<message>
<location filename="../bitcoinstrings.cpp" line="+171"/>
<source>Vector version</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Usage:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Send command to -server or vectord</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>List commands</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Get help for a command</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-145"/>
<source>Options:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Specify configuration file (default: vector.conf)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Specify pid file (default: vectord.pid)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Specify wallet file (within data directory)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-1"/>
<source>Specify data directory</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-25"/>
<source>%s, you must set a rpcpassword in the configuration file:
%s
It is recommended you use the following random password:
rpcuser=vectorrpc
rpcpassword=%s
(you do not need to remember this password)
The username and password MUST NOT be the same.
If the file does not exist, create it with owner-readable-only file permissions.
It is also recommended to set alertnotify so you are notified of problems;
for example: alertnotify=echo %%s | mail -s "Vector Alert" [email protected]
</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+27"/>
<source>Set database cache size in megabytes (default: 25)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Set database disk log size in megabytes (default: 100)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>Listen for connections on <port> (default: 1715 or testnet: 11715)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Maintain at most <n> connections to peers (default: 125)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Connect to a node to retrieve peer addresses, and disconnect</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Specify your own public address</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+4"/>
<source>Bind to given address. Use [host]:port notation for IPv6</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Query for peer addresses via DNS lookup, if low on addresses (default: 1 unless -connect)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Always query for peer addresses via DNS lookup (default: 0)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+4"/>
<source>Threshold for disconnecting misbehaving peers (default: 100)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Number of seconds to keep misbehaving peers from reconnecting (default: 86400)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-35"/>
<source>An error occurred while setting up the RPC port %u for listening on IPv4: %s</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+62"/>
<source>Listen for JSON-RPC connections on <port> (default: 1716 or testnet: 11716)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-16"/>
<source>Accept command line and JSON-RPC commands</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Run in the background as a daemon and accept commands</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Use the test network</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-23"/>
<source>Accept connections from outside (default: 1 if no -proxy or -connect)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-28"/>
<source>An error occurred while setting up the RPC port %u for listening on IPv6, falling back to IPv4: %s</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+93"/>
<source>Set maximum size of high-priority/low-fee transactions in bytes (default: 27000)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+15"/>
<source>Warning: -paytxfee is set very high! This is the transaction fee you will pay if you send a transaction.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-103"/>
<source>Warning: Please check that your computer's date and time are correct! If your clock is wrong Vector will not work properly.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+130"/>
<source>Warning: error reading wallet.dat! All keys read correctly, but transaction data or address book entries might be missing or incorrect.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-16"/>
<source>Warning: wallet.dat corrupt, data salvaged! Original wallet.dat saved as wallet.{timestamp}.bak in %s; if your balance or transactions are incorrect you should restore from a backup.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-34"/>
<source>Attempt to recover private keys from a corrupt wallet.dat</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>Block creation options:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-67"/>
<source>Connect only to the specified node(s)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+4"/>
<source>Discover own IP address (default: 1 when listening and no -externalip)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+101"/>
<source>Failed to listen on any port. Use -listen=0 if you want this.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-2"/>
<source>Invalid -tor address: '%s'</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+4"/>
<source>Invalid amount for -reservebalance=<amount></source>
<translation type="unfinished"/>
</message>
<message>
<location line="-89"/>
<source>Maximum per-connection receive buffer, <n>*1000 bytes (default: 5000)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Maximum per-connection send buffer, <n>*1000 bytes (default: 1000)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-16"/>
<source>Only connect to nodes in network <net> (IPv4, IPv6 or Tor)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+30"/>
<source>Prepend debug output with timestamp</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+40"/>
<source>SSL options: (see the Bitcoin Wiki for SSL setup instructions)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-38"/>
<source>Send trace/debug info to console instead of debug.log file</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+34"/>
<source>Set maximum block size in bytes (default: 250000)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-1"/>
<source>Set minimum block size in bytes (default: 0)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-34"/>
<source>Shrink debug.log file on client startup (default: 1 when no -debug)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-41"/>
<source>Specify connection timeout in milliseconds (default: 5000)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+28"/>
<source>Use UPnP to map the listening port (default: 0)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-1"/>
<source>Use UPnP to map the listening port (default: 1 when listening)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-25"/>
<source>Use proxy to reach tor hidden services (default: same as -proxy)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+45"/>
<source>Username for JSON-RPC connections</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+54"/>
<source>Verifying database integrity...</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+42"/>
<source>Error: Wallet locked, unable to create transaction!</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Error: This transaction requires a transaction fee of at least %s because of its amount, complexity, or use of recently received funds!</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Error: Transaction creation failed!</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Error: The transaction was rejected! This might happen if some of the coins in your wallet were already spent, such as if you used a copy of wallet.dat and coins were spent in the copy but not marked as spent here.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>Warning</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Information</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Warning: This version is obsolete, upgrade required!</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-52"/>
<source>wallet.dat corrupt, salvage failed</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-59"/>
<source>Password for JSON-RPC connections</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-47"/>
<source>Connect through SOCKS5 proxy</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+17"/>
<source>Sync time with other nodes. Disable if time on your system is precise e.g. syncing with NTP (default: 1)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+12"/>
<source>When creating transactions, ignore inputs with value less than this (default: 0.01)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+6"/>
<source>Output debugging information (default: 0, supplying <category> is optional)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>If <category> is not supplied, output all debugging information.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source><category> can be:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+4"/>
<source>Enter regression test mode, which uses a special chain in which blocks can be solved instantly. This is intended for regression testing tools and app development.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+8"/>
<source>Allow JSON-RPC connections from specified IP address</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Send commands to node running on <ip> (default: 127.0.0.1)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Wait for RPC server to start</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Set the number of threads to service RPC calls (default: 4)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Execute command when the best block changes (%s in cmd is replaced by block hash)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Execute command when a wallet transaction changes (%s in cmd is replaced by TxID)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Require a confirmations for change (default: 0)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Execute command when a relevant alert is received (%s in cmd is replaced by message)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Upgrade wallet to latest format</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Set key pool size to <n> (default: 100)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Rescan the block chain for missing wallet transactions</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>How thorough the block verification is (0-6, default: 1)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Imports blocks from external VEC000?.dat file</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Keep at most <n> MiB of unconnectable blocks in memory (default: %u)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+8"/>
<source>Use OpenSSL (https) for JSON-RPC connections</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Server certificate file (default: server.cert)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Server private key (default: server.pem)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>Error: Unsupported argument -socks found. Setting SOCKS version isn't possible anymore, only SOCKS5 proxies are supported.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+8"/>
<source>Initialization sanity check failed. Vector is shutting down.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+20"/>
<source>Error loading block database</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+28"/>
<source>Error: Wallet unlocked for staking only, unable to create transaction.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+16"/>
<source>Error: Disk space is low!</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>This is a pre-release test build - use at your own risk - do not use for mining or merchant applications</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-168"/>
<source>This help message</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+104"/>
<source>Wallet %s resides outside data directory %s.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+35"/>
<source>Unable to bind to %s on this computer (bind returned error %d, %s)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-129"/>
<source>Allow DNS lookups for -addnode, -seednode and -connect</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+125"/>
<source>Loading addresses...</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-10"/>
<source>Error loading wallet.dat: Wallet corrupted</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+4"/>
<source>Error loading wallet.dat: Wallet requires newer version of Vector</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Wallet needed to be rewritten: restart Vector to complete</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Error loading wallet.dat</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-15"/>
<source>Invalid -proxy address: '%s'</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-1"/>
<source>Unknown network specified in -onlynet: '%s'</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Cannot resolve -bind address: '%s'</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Cannot resolve -externalip address: '%s'</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-22"/>
<source>Invalid amount for -paytxfee=<amount>: '%s'</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+58"/>
<source>Sending...</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>Invalid amount</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Insufficient funds</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-40"/>
<source>Loading block index...</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-109"/>
<source>Add a node to connect to and attempt to keep the connection open</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+124"/>
<source>Unable to bind to %s on this computer. Vector is probably already running.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-101"/>
<source>Fee per KB to add to transactions you send</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+33"/>
<source>Minimize weight consumption (experimental) (default: 0)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+8"/>
<source>How many blocks to check at startup (default: 500, 0 = all)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+14"/>
<source>Acceptable ciphers (default: TLSv1.2+HIGH:TLSv1+HIGH:!SSLv2:!aNULL:!eNULL:!3DES:@STRENGTH)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Warning: Deprecated argument -debugnet ignored, use -debug=net</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+8"/>
<source>Invalid amount for -mininput=<amount>: '%s'</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Cannot obtain a lock on data directory %s. Vector is probably already running.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+4"/>
<source>Error initializing wallet database environment %s!</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+15"/>
<source>Loading wallet...</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+8"/>
<source>Cannot downgrade wallet</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Cannot write default address</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Rescanning...</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Done loading</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-159"/>
<source>To use the %s option</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+186"/>
<source>Error</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-18"/>
<source>You must set rpcpassword=<password> in the configuration file:
%s
If the file does not exist, create it with owner-readable-only file permissions.</source>
<translation type="unfinished"/>
</message>
</context>
</TS><|fim▁end|>
|
<translation type="unfinished"/>
</message>
<message>
|
<|file_name|>TestMax.java<|end_file_name|><|fim▁begin|>/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
* either express or implied. See the License for the specific language
* governing permissions and limitations under the License.
*/
package fuzzy.internal.functions;
import static org.junit.Assert.assertEquals;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import org.junit.Test;
/**
* Tests for Max function.
*
* @since 0.2
* @see Max
*/
public class TestMax {
@Test
public void testMax() {
Collection<Double> list = Arrays.asList(-1.0, 1.0, 2.0, 3.5);
Double r = Max.of(list, false);
assertEquals(Double.valueOf(3.5), r);
}
@Test
public void testMaxEmpty() {<|fim▁hole|> @Test
public void testMaxAbs() {
Collection<Double> list = Arrays.asList(-10.0, -1.0, 1.0, 2.0, 3.5);
Double r = Max.of(list, true);
assertEquals(Double.valueOf(-10.0), r);
}
}<|fim▁end|>
|
Double r = Max.of(Collections.<Double>emptyList(), false);
assertEquals(Double.valueOf(0.0), r);
}
|
<|file_name|>tls.go<|end_file_name|><|fim▁begin|>package autoconf
import (
"context"
"fmt"
"net"
"github.com/hashicorp/consul/agent/cache"
cachetype "github.com/hashicorp/consul/agent/cache-types"
"github.com/hashicorp/consul/agent/connect"
"github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/consul/proto/pbautoconf"
)
const (
// ID of the roots watch
rootsWatchID = "roots"
// ID of the leaf watch
leafWatchID = "leaf"
unknownTrustDomain = "unknown"
)
var (
defaultDNSSANs = []string{"localhost"}
defaultIPSANs = []net.IP{{127, 0, 0, 1}, net.ParseIP("::1")}
)
func extractPEMs(roots *structs.IndexedCARoots) []string {
var pems []string
for _, root := range roots.Roots {
pems = append(pems, root.RootCert)
}
return pems
}
// updateTLSFromResponse will update the TLS certificate and roots in the shared
// TLS configurator.
func (ac *AutoConfig) updateTLSFromResponse(resp *pbautoconf.AutoConfigResponse) error {
var pems []string
for _, root := range resp.GetCARoots().GetRoots() {
pems = append(pems, root.RootCert)
}
err := ac.acConfig.TLSConfigurator.UpdateAutoTLS(
resp.ExtraCACertificates,
pems,
resp.Certificate.GetCertPEM(),
resp.Certificate.GetPrivateKeyPEM(),
resp.Config.GetTLS().GetVerifyServerHostname(),
)
if err != nil {
return fmt.Errorf("Failed to update the TLS configurator with new certificates: %w", err)
}
return nil
}
func (ac *AutoConfig) setInitialTLSCertificates(certs *structs.SignedResponse) error {
if certs == nil {
return nil
}
if err := ac.populateCertificateCache(certs); err != nil {
return fmt.Errorf("error populating cache with certificates: %w", err)
}
connectCAPems := extractPEMs(&certs.ConnectCARoots)
err := ac.acConfig.TLSConfigurator.UpdateAutoTLS(
certs.ManualCARoots,
connectCAPems,
certs.IssuedCert.CertPEM,
certs.IssuedCert.PrivateKeyPEM,
certs.VerifyServerHostname,
)
if err != nil {
return fmt.Errorf("error updating TLS configurator with certificates: %w", err)
}
return nil
}
func (ac *AutoConfig) populateCertificateCache(certs *structs.SignedResponse) error {
cert, err := connect.ParseCert(certs.IssuedCert.CertPEM)
if err != nil {
return fmt.Errorf("Failed to parse certificate: %w", err)
}
// prepolutate roots cache
rootRes := cache.FetchResult{Value: &certs.ConnectCARoots, Index: certs.ConnectCARoots.QueryMeta.Index}
rootsReq := ac.caRootsRequest()
// getting the roots doesn't require a token so in order to potentially share the cache with another
if err := ac.acConfig.Cache.Prepopulate(cachetype.ConnectCARootName, rootRes, ac.config.Datacenter, "", rootsReq.CacheInfo().Key); err != nil {
return err
}
leafReq := ac.leafCertRequest()
// prepolutate leaf cache
certRes := cache.FetchResult{
Value: &certs.IssuedCert,
Index: certs.IssuedCert.RaftIndex.ModifyIndex,
State: cachetype.ConnectCALeafSuccess(connect.EncodeSigningKeyID(cert.AuthorityKeyId)),
}
if err := ac.acConfig.Cache.Prepopulate(cachetype.ConnectCALeafName, certRes, leafReq.Datacenter, leafReq.Token, leafReq.Key()); err != nil {
return err
}
return nil
}
func (ac *AutoConfig) setupCertificateCacheWatches(ctx context.Context) (context.CancelFunc, error) {
notificationCtx, cancel := context.WithCancel(ctx)
rootsReq := ac.caRootsRequest()
err := ac.acConfig.Cache.Notify(notificationCtx, cachetype.ConnectCARootName, &rootsReq, rootsWatchID, ac.cacheUpdates)
if err != nil {
cancel()
return nil, err
}
leafReq := ac.leafCertRequest()
err = ac.acConfig.Cache.Notify(notificationCtx, cachetype.ConnectCALeafName, &leafReq, leafWatchID, ac.cacheUpdates)
if err != nil {
cancel()
return nil, err
}
return cancel, nil
}
func (ac *AutoConfig) updateCARoots(roots *structs.IndexedCARoots) error {
switch {
case ac.config.AutoConfig.Enabled:
ac.Lock()
defer ac.Unlock()
var err error
ac.autoConfigResponse.CARoots, err = translateCARootsToProtobuf(roots)
if err != nil {
return err
}
<|fim▁hole|> return ac.persistAutoConfig(ac.autoConfigResponse)
case ac.config.AutoEncryptTLS:
pems := extractPEMs(roots)
if err := ac.acConfig.TLSConfigurator.UpdateAutoTLSCA(pems); err != nil {
return fmt.Errorf("failed to update Connect CA certificates: %w", err)
}
return nil
default:
return nil
}
}
func (ac *AutoConfig) updateLeafCert(cert *structs.IssuedCert) error {
switch {
case ac.config.AutoConfig.Enabled:
ac.Lock()
defer ac.Unlock()
var err error
ac.autoConfigResponse.Certificate, err = translateIssuedCertToProtobuf(cert)
if err != nil {
return err
}
if err := ac.updateTLSFromResponse(ac.autoConfigResponse); err != nil {
return err
}
return ac.persistAutoConfig(ac.autoConfigResponse)
case ac.config.AutoEncryptTLS:
if err := ac.acConfig.TLSConfigurator.UpdateAutoTLSCert(cert.CertPEM, cert.PrivateKeyPEM); err != nil {
return fmt.Errorf("failed to update the agent leaf cert: %w", err)
}
return nil
default:
return nil
}
}
func (ac *AutoConfig) caRootsRequest() structs.DCSpecificRequest {
return structs.DCSpecificRequest{Datacenter: ac.config.Datacenter}
}
func (ac *AutoConfig) leafCertRequest() cachetype.ConnectCALeafRequest {
return cachetype.ConnectCALeafRequest{
Datacenter: ac.config.Datacenter,
Agent: ac.config.NodeName,
DNSSAN: ac.getDNSSANs(),
IPSAN: ac.getIPSANs(),
Token: ac.acConfig.Tokens.AgentToken(),
EnterpriseMeta: *structs.NodeEnterpriseMetaInPartition(ac.config.PartitionOrEmpty()),
}
}
// generateCSR will generate a CSR for an Agent certificate. This should
// be sent along with the AutoConfig.InitialConfiguration RPC or the
// AutoEncrypt.Sign RPC. The generated CSR does NOT have a real trust domain
// as when generating this we do not yet have the CA roots. The server will
// update the trust domain for us though.
func (ac *AutoConfig) generateCSR() (csr string, key string, err error) {
// We don't provide the correct host here, because we don't know any
// better at this point. Apart from the domain, we would need the
// ClusterID, which we don't have. This is why we go with
// unknownTrustDomain the first time. Subsequent CSRs will have the
// correct TrustDomain.
id := &connect.SpiffeIDAgent{
// will be replaced
Host: unknownTrustDomain,
Datacenter: ac.config.Datacenter,
Agent: ac.config.NodeName,
Partition: ac.config.PartitionOrDefault(),
}
caConfig, err := ac.config.ConnectCAConfiguration()
if err != nil {
return "", "", fmt.Errorf("Cannot generate CSR: %w", err)
}
conf, err := caConfig.GetCommonConfig()
if err != nil {
return "", "", fmt.Errorf("Failed to load common CA configuration: %w", err)
}
if conf.PrivateKeyType == "" {
conf.PrivateKeyType = connect.DefaultPrivateKeyType
}
if conf.PrivateKeyBits == 0 {
conf.PrivateKeyBits = connect.DefaultPrivateKeyBits
}
// Create a new private key
pk, pkPEM, err := connect.GeneratePrivateKeyWithConfig(conf.PrivateKeyType, conf.PrivateKeyBits)
if err != nil {
return "", "", fmt.Errorf("Failed to generate private key: %w", err)
}
dnsNames := ac.getDNSSANs()
ipAddresses := ac.getIPSANs()
// Create a CSR.
csr, err = connect.CreateCSR(id, pk, dnsNames, ipAddresses)
if err != nil {
return "", "", err
}
return csr, pkPEM, nil
}
func (ac *AutoConfig) getDNSSANs() []string {
sans := defaultDNSSANs
switch {
case ac.config.AutoConfig.Enabled:
sans = append(sans, ac.config.AutoConfig.DNSSANs...)
case ac.config.AutoEncryptTLS:
sans = append(sans, ac.config.AutoEncryptDNSSAN...)
}
return sans
}
func (ac *AutoConfig) getIPSANs() []net.IP {
sans := defaultIPSANs
switch {
case ac.config.AutoConfig.Enabled:
sans = append(sans, ac.config.AutoConfig.IPSANs...)
case ac.config.AutoEncryptTLS:
sans = append(sans, ac.config.AutoEncryptIPSAN...)
}
return sans
}<|fim▁end|>
|
if err := ac.updateTLSFromResponse(ac.autoConfigResponse); err != nil {
return err
}
|
<|file_name|>test_autotag.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
# This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Tests for autotagging functionality.
"""
from __future__ import division, absolute_import, print_function
import re
import copy
from test import _common
from test._common import unittest
from beets import autotag
from beets.autotag import match
from beets.autotag.hooks import Distance, string_dist
from beets.library import Item
from beets.util import plurality
from beets.autotag import AlbumInfo, TrackInfo
from beets import config
class PluralityTest(_common.TestCase):
def test_plurality_consensus(self):
objs = [1, 1, 1, 1]
obj, freq = plurality(objs)
self.assertEqual(obj, 1)
self.assertEqual(freq, 4)
def test_plurality_near_consensus(self):
objs = [1, 1, 2, 1]
obj, freq = plurality(objs)
self.assertEqual(obj, 1)
self.assertEqual(freq, 3)
def test_plurality_conflict(self):
objs = [1, 1, 2, 2, 3]
obj, freq = plurality(objs)
self.assertTrue(obj in (1, 2))
self.assertEqual(freq, 2)
def test_plurality_empty_sequence_raises_error(self):
with self.assertRaises(ValueError):
plurality([])
def test_current_metadata_finds_pluralities(self):
items = [Item(artist='The Beetles', album='The White Album'),
Item(artist='The Beatles', album='The White Album'),
Item(artist='The Beatles', album='Teh White Album')]
likelies, consensus = match.current_metadata(items)
self.assertEqual(likelies['artist'], 'The Beatles')
self.assertEqual(likelies['album'], 'The White Album')
self.assertFalse(consensus['artist'])
def test_current_metadata_artist_consensus(self):
items = [Item(artist='The Beatles', album='The White Album'),
Item(artist='The Beatles', album='The White Album'),
Item(artist='The Beatles', album='Teh White Album')]
likelies, consensus = match.current_metadata(items)
self.assertEqual(likelies['artist'], 'The Beatles')
self.assertEqual(likelies['album'], 'The White Album')
self.assertTrue(consensus['artist'])
def test_albumartist_consensus(self):
items = [Item(artist='tartist1', album='album',
albumartist='aartist'),
Item(artist='tartist2', album='album',
albumartist='aartist'),
Item(artist='tartist3', album='album',
albumartist='aartist')]
likelies, consensus = match.current_metadata(items)
self.assertEqual(likelies['artist'], 'aartist')
self.assertFalse(consensus['artist'])
def test_current_metadata_likelies(self):
fields = ['artist', 'album', 'albumartist', 'year', 'disctotal',
'mb_albumid', 'label', 'catalognum', 'country', 'media',
'albumdisambig']
items = [Item(**dict((f, '%s_%s' % (f, i or 1)) for f in fields))
for i in range(5)]
likelies, _ = match.current_metadata(items)
for f in fields:
self.assertEqual(likelies[f], '%s_1' % f)
def _make_item(title, track, artist=u'some artist'):
return Item(title=title, track=track,
artist=artist, album=u'some album',
length=1,
mb_trackid='', mb_albumid='', mb_artistid='')
def _make_trackinfo():
return [
TrackInfo(u'one', None, u'some artist', length=1, index=1),
TrackInfo(u'two', None, u'some artist', length=1, index=2),
TrackInfo(u'three', None, u'some artist', length=1, index=3),
]
def _clear_weights():
"""Hack around the lazy descriptor used to cache weights for
Distance calculations.
"""
Distance.__dict__['_weights'].computed = False
class DistanceTest(_common.TestCase):
def tearDown(self):
super(DistanceTest, self).tearDown()
_clear_weights()
def test_add(self):
dist = Distance()
dist.add('add', 1.0)
self.assertEqual(dist._penalties, {'add': [1.0]})
def test_add_equality(self):
dist = Distance()
dist.add_equality('equality', 'ghi', ['abc', 'def', 'ghi'])
self.assertEqual(dist._penalties['equality'], [0.0])
dist.add_equality('equality', 'xyz', ['abc', 'def', 'ghi'])
self.assertEqual(dist._penalties['equality'], [0.0, 1.0])
dist.add_equality('equality', 'abc', re.compile(r'ABC', re.I))
self.assertEqual(dist._penalties['equality'], [0.0, 1.0, 0.0])
def test_add_expr(self):
dist = Distance()
dist.add_expr('expr', True)
self.assertEqual(dist._penalties['expr'], [1.0])
dist.add_expr('expr', False)
self.assertEqual(dist._penalties['expr'], [1.0, 0.0])
def test_add_number(self):
dist = Distance()
# Add a full penalty for each number of difference between two numbers.
dist.add_number('number', 1, 1)
self.assertEqual(dist._penalties['number'], [0.0])
dist.add_number('number', 1, 2)
self.assertEqual(dist._penalties['number'], [0.0, 1.0])
dist.add_number('number', 2, 1)
self.assertEqual(dist._penalties['number'], [0.0, 1.0, 1.0])
dist.add_number('number', -1, 2)
self.assertEqual(dist._penalties['number'], [0.0, 1.0, 1.0, 1.0,
1.0, 1.0])
def test_add_priority(self):
dist = Distance()
dist.add_priority('priority', 'abc', 'abc')
self.assertEqual(dist._penalties['priority'], [0.0])
dist.add_priority('priority', 'def', ['abc', 'def'])
self.assertEqual(dist._penalties['priority'], [0.0, 0.5])
dist.add_priority('priority', 'gh', ['ab', 'cd', 'ef',
re.compile('GH', re.I)])
self.assertEqual(dist._penalties['priority'], [0.0, 0.5, 0.75])
dist.add_priority('priority', 'xyz', ['abc', 'def'])
self.assertEqual(dist._penalties['priority'], [0.0, 0.5, 0.75,
1.0])
def test_add_ratio(self):
dist = Distance()
dist.add_ratio('ratio', 25, 100)
self.assertEqual(dist._penalties['ratio'], [0.25])
dist.add_ratio('ratio', 10, 5)
self.assertEqual(dist._penalties['ratio'], [0.25, 1.0])
dist.add_ratio('ratio', -5, 5)
self.assertEqual(dist._penalties['ratio'], [0.25, 1.0, 0.0])
dist.add_ratio('ratio', 5, 0)
self.assertEqual(dist._penalties['ratio'], [0.25, 1.0, 0.0, 0.0])
def test_add_string(self):
dist = Distance()
sdist = string_dist(u'abc', u'bcd')
dist.add_string('string', u'abc', u'bcd')
self.assertEqual(dist._penalties['string'], [sdist])
self.assertNotEqual(dist._penalties['string'], [0])
def test_add_string_none(self):
dist = Distance()
dist.add_string('string', None, 'string')
self.assertEqual(dist._penalties['string'], [1])
def test_add_string_both_none(self):
dist = Distance()
dist.add_string('string', None, None)
self.assertEqual(dist._penalties['string'], [0])
def test_distance(self):
config['match']['distance_weights']['album'] = 2.0
config['match']['distance_weights']['medium'] = 1.0
_clear_weights()
dist = Distance()
dist.add('album', 0.5)
dist.add('media', 0.25)
dist.add('media', 0.75)
self.assertEqual(dist.distance, 0.5)
# __getitem__()
self.assertEqual(dist['album'], 0.25)
self.assertEqual(dist['media'], 0.25)
def test_max_distance(self):
config['match']['distance_weights']['album'] = 3.0
config['match']['distance_weights']['medium'] = 1.0
_clear_weights()
dist = Distance()
dist.add('album', 0.5)
dist.add('medium', 0.0)
dist.add('medium', 0.0)
self.assertEqual(dist.max_distance, 5.0)
def test_operators(self):
config['match']['distance_weights']['source'] = 1.0
config['match']['distance_weights']['album'] = 2.0
config['match']['distance_weights']['medium'] = 1.0
_clear_weights()
dist = Distance()
dist.add('source', 0.0)
dist.add('album', 0.5)
dist.add('medium', 0.25)
dist.add('medium', 0.75)
self.assertEqual(len(dist), 2)
self.assertEqual(list(dist), [('album', 0.2), ('medium', 0.2)])
self.assertTrue(dist == 0.4)
self.assertTrue(dist < 1.0)
self.assertTrue(dist > 0.0)
self.assertEqual(dist - 0.4, 0.0)
self.assertEqual(0.4 - dist, 0.0)
self.assertEqual(float(dist), 0.4)
def test_raw_distance(self):
config['match']['distance_weights']['album'] = 3.0
config['match']['distance_weights']['medium'] = 1.0
_clear_weights()
dist = Distance()
dist.add('album', 0.5)
dist.add('medium', 0.25)
dist.add('medium', 0.5)
self.assertEqual(dist.raw_distance, 2.25)
def test_items(self):
config['match']['distance_weights']['album'] = 4.0
config['match']['distance_weights']['medium'] = 2.0
_clear_weights()
dist = Distance()
dist.add('album', 0.1875)
dist.add('medium', 0.75)
self.assertEqual(dist.items(), [('medium', 0.25), ('album', 0.125)])
# Sort by key if distance is equal.
dist = Distance()
dist.add('album', 0.375)
dist.add('medium', 0.75)
self.assertEqual(dist.items(), [('album', 0.25), ('medium', 0.25)])
def test_update(self):
dist1 = Distance()
dist1.add('album', 0.5)
dist1.add('media', 1.0)
dist2 = Distance()
dist2.add('album', 0.75)
dist2.add('album', 0.25)
dist2.add('media', 0.05)
dist1.update(dist2)
self.assertEqual(dist1._penalties, {'album': [0.5, 0.75, 0.25],
'media': [1.0, 0.05]})
class TrackDistanceTest(_common.TestCase):
def test_identical_tracks(self):
item = _make_item(u'one', 1)
info = _make_trackinfo()[0]
dist = match.track_distance(item, info, incl_artist=True)
self.assertEqual(dist, 0.0)
def test_different_title(self):
item = _make_item(u'foo', 1)
info = _make_trackinfo()[0]
dist = match.track_distance(item, info, incl_artist=True)
self.assertNotEqual(dist, 0.0)
def test_different_artist(self):
item = _make_item(u'one', 1)
item.artist = u'foo'
info = _make_trackinfo()[0]
dist = match.track_distance(item, info, incl_artist=True)
self.assertNotEqual(dist, 0.0)
def test_various_artists_tolerated(self):
item = _make_item(u'one', 1)
item.artist = u'Various Artists'
info = _make_trackinfo()[0]
dist = match.track_distance(item, info, incl_artist=True)
self.assertEqual(dist, 0.0)
class AlbumDistanceTest(_common.TestCase):
def _mapping(self, items, info):
out = {}
for i, t in zip(items, info.tracks):
out[i] = t
return out
def _dist(self, items, info):
return match.distance(items, info, self._mapping(items, info))
def test_identical_albums(self):
items = []
items.append(_make_item(u'one', 1))
items.append(_make_item(u'two', 2))
items.append(_make_item(u'three', 3))
info = AlbumInfo(
artist=u'some artist',
album=u'some album',
tracks=_make_trackinfo(),
va=False,
album_id=None,
artist_id=None,
)
self.assertEqual(self._dist(items, info), 0)
def test_incomplete_album(self):
items = []
items.append(_make_item(u'one', 1))
items.append(_make_item(u'three', 3))
info = AlbumInfo(
artist=u'some artist',
album=u'some album',
tracks=_make_trackinfo(),
va=False,
album_id=None,
artist_id=None,
)
dist = self._dist(items, info)
self.assertNotEqual(dist, 0)
# Make sure the distance is not too great
self.assertTrue(dist < 0.2)
def test_global_artists_differ(self):
items = []
items.append(_make_item(u'one', 1))
items.append(_make_item(u'two', 2))
items.append(_make_item(u'three', 3))
info = AlbumInfo(
artist=u'someone else',
album=u'some album',
tracks=_make_trackinfo(),
va=False,
album_id=None,
artist_id=None,
)
self.assertNotEqual(self._dist(items, info), 0)
def test_comp_track_artists_match(self):
items = []
items.append(_make_item(u'one', 1))
items.append(_make_item(u'two', 2))
items.append(_make_item(u'three', 3))
info = AlbumInfo(
artist=u'should be ignored',
album=u'some album',
tracks=_make_trackinfo(),
va=True,
album_id=None,
artist_id=None,
)
self.assertEqual(self._dist(items, info), 0)
def test_comp_no_track_artists(self):
# Some VA releases don't have track artists (incomplete metadata).
items = []
items.append(_make_item(u'one', 1))
items.append(_make_item(u'two', 2))
items.append(_make_item(u'three', 3))
info = AlbumInfo(
artist=u'should be ignored',
album=u'some album',
tracks=_make_trackinfo(),
va=True,
album_id=None,
artist_id=None,
)
info.tracks[0].artist = None
info.tracks[1].artist = None
info.tracks[2].artist = None
self.assertEqual(self._dist(items, info), 0)
def test_comp_track_artists_do_not_match(self):
items = []
items.append(_make_item(u'one', 1))
items.append(_make_item(u'two', 2, u'someone else'))
items.append(_make_item(u'three', 3))
info = AlbumInfo(
artist=u'some artist',
album=u'some album',
tracks=_make_trackinfo(),
va=True,
album_id=None,
artist_id=None,
)
self.assertNotEqual(self._dist(items, info), 0)
def test_tracks_out_of_order(self):
items = []
items.append(_make_item(u'one', 1))
items.append(_make_item(u'three', 2))
items.append(_make_item(u'two', 3))
info = AlbumInfo(
artist=u'some artist',
album=u'some album',
tracks=_make_trackinfo(),
va=False,
album_id=None,
artist_id=None,
)
dist = self._dist(items, info)
self.assertTrue(0 < dist < 0.2)
def test_two_medium_release(self):
items = []
items.append(_make_item(u'one', 1))
items.append(_make_item(u'two', 2))
items.append(_make_item(u'three', 3))
info = AlbumInfo(
artist=u'some artist',
album=u'some album',
tracks=_make_trackinfo(),
va=False,
album_id=None,
artist_id=None,
)
info.tracks[0].medium_index = 1
info.tracks[1].medium_index = 2
info.tracks[2].medium_index = 1
dist = self._dist(items, info)
self.assertEqual(dist, 0)
def test_per_medium_track_numbers(self):
items = []
items.append(_make_item(u'one', 1))
items.append(_make_item(u'two', 2))
items.append(_make_item(u'three', 1))
info = AlbumInfo(
artist=u'some artist',
album=u'some album',
tracks=_make_trackinfo(),
va=False,
album_id=None,
artist_id=None,
)
info.tracks[0].medium_index = 1
info.tracks[1].medium_index = 2
info.tracks[2].medium_index = 1
dist = self._dist(items, info)
self.assertEqual(dist, 0)
class AssignmentTest(unittest.TestCase):
def item(self, title, track):
return Item(
title=title, track=track,
mb_trackid='', mb_albumid='', mb_artistid='',
)
def test_reorder_when_track_numbers_incorrect(self):
items = []
items.append(self.item(u'one', 1))
items.append(self.item(u'three', 2))
items.append(self.item(u'two', 3))
trackinfo = []
trackinfo.append(TrackInfo(u'one', None))
trackinfo.append(TrackInfo(u'two', None))
trackinfo.append(TrackInfo(u'three', None))
mapping, extra_items, extra_tracks = \
match.assign_items(items, trackinfo)
self.assertEqual(extra_items, [])
self.assertEqual(extra_tracks, [])
self.assertEqual(mapping, {
items[0]: trackinfo[0],
items[1]: trackinfo[2],
items[2]: trackinfo[1],
})
def test_order_works_with_invalid_track_numbers(self):
items = []
items.append(self.item(u'one', 1))
items.append(self.item(u'three', 1))
items.append(self.item(u'two', 1))
trackinfo = []
trackinfo.append(TrackInfo(u'one', None))
trackinfo.append(TrackInfo(u'two', None))
trackinfo.append(TrackInfo(u'three', None))
mapping, extra_items, extra_tracks = \
match.assign_items(items, trackinfo)
self.assertEqual(extra_items, [])
self.assertEqual(extra_tracks, [])
self.assertEqual(mapping, {
items[0]: trackinfo[0],
items[1]: trackinfo[2],
items[2]: trackinfo[1],
})
def test_order_works_with_missing_tracks(self):
items = []
items.append(self.item(u'one', 1))
items.append(self.item(u'three', 3))
trackinfo = []
trackinfo.append(TrackInfo(u'one', None))
trackinfo.append(TrackInfo(u'two', None))
trackinfo.append(TrackInfo(u'three', None))
mapping, extra_items, extra_tracks = \
match.assign_items(items, trackinfo)
self.assertEqual(extra_items, [])
self.assertEqual(extra_tracks, [trackinfo[1]])
self.assertEqual(mapping, {
items[0]: trackinfo[0],
items[1]: trackinfo[2],
})
def test_order_works_with_extra_tracks(self):
items = []
items.append(self.item(u'one', 1))
items.append(self.item(u'two', 2))
items.append(self.item(u'three', 3))
trackinfo = []
trackinfo.append(TrackInfo(u'one', None))
trackinfo.append(TrackInfo(u'three', None))
mapping, extra_items, extra_tracks = \
match.assign_items(items, trackinfo)
self.assertEqual(extra_items, [items[1]])
self.assertEqual(extra_tracks, [])
self.assertEqual(mapping, {
items[0]: trackinfo[0],
items[2]: trackinfo[1],
})
def test_order_works_when_track_names_are_entirely_wrong(self):
# A real-world test case contributed by a user.
def item(i, length):
return Item(
artist=u'ben harper',
album=u'burn to shine',
title=u'ben harper - Burn to Shine {0}'.format(i),
track=i,
length=length,
mb_trackid='', mb_albumid='', mb_artistid='',
)
items = []
items.append(item(1, 241.37243007106997))
items.append(item(2, 342.27781704375036))
items.append(item(3, 245.95070222338137))
items.append(item(4, 472.87662515485437))
items.append(item(5, 279.1759535763187))
items.append(item(6, 270.33333768012))
items.append(item(7, 247.83435613222923))
items.append(item(8, 216.54504531525072))
items.append(item(9, 225.72775379800484))
items.append(item(10, 317.7643606963552))
items.append(item(11, 243.57001238834192))
items.append(item(12, 186.45916150485752))
def info(index, title, length):
return TrackInfo(title, None, length=length, index=index)
trackinfo = []
trackinfo.append(info(1, u'Alone', 238.893))
trackinfo.append(info(2, u'The Woman in You', 341.44))
trackinfo.append(info(3, u'Less', 245.59999999999999))
trackinfo.append(info(4, u'Two Hands of a Prayer', 470.49299999999999))
trackinfo.append(info(5, u'Please Bleed', 277.86599999999999))
trackinfo.append(info(6, u'Suzie Blue', 269.30599999999998))
trackinfo.append(info(7, u'Steal My Kisses', 245.36000000000001))
trackinfo.append(info(8, u'Burn to Shine', 214.90600000000001))
trackinfo.append(info(9, u'Show Me a Little Shame', 224.0929999999999))
trackinfo.append(info(10, u'Forgiven', 317.19999999999999))
trackinfo.append(info(11, u'Beloved One', 243.733))
trackinfo.append(info(12, u'In the Lord\'s Arms', 186.13300000000001))
mapping, extra_items, extra_tracks = \
match.assign_items(items, trackinfo)
self.assertEqual(extra_items, [])
self.assertEqual(extra_tracks, [])
for item, info in mapping.items():
self.assertEqual(items.index(item), trackinfo.index(info))
class ApplyTestUtil(object):
def _apply(self, info=None, per_disc_numbering=False):
info = info or self.info
mapping = {}
for i, t in zip(self.items, info.tracks):
mapping[i] = t
config['per_disc_numbering'] = per_disc_numbering
autotag.apply_metadata(info, mapping)
class ApplyTest(_common.TestCase, ApplyTestUtil):
def setUp(self):
super(ApplyTest, self).setUp()
self.items = []
self.items.append(Item({}))
self.items.append(Item({}))
trackinfo = []
trackinfo.append(TrackInfo(
u'oneNew',
u'dfa939ec-118c-4d0f-84a0-60f3d1e6522c',
medium=1,
medium_index=1,
medium_total=1,
index=1,
artist_credit='trackArtistCredit',
artist_sort='trackArtistSort',
))
trackinfo.append(TrackInfo(
u'twoNew',
u'40130ed1-a27c-42fd-a328-1ebefb6caef4',
medium=2,
medium_index=1,
index=2,
medium_total=1,
))
self.info = AlbumInfo(
tracks=trackinfo,
artist=u'artistNew',
album=u'albumNew',
album_id='7edb51cb-77d6-4416-a23c-3a8c2994a2c7',
artist_id='a6623d39-2d8e-4f70-8242-0a9553b91e50',
artist_credit=u'albumArtistCredit',
artist_sort=u'albumArtistSort',
albumtype=u'album',
va=False,
mediums=2,
)
def test_titles_applied(self):
self._apply()
self.assertEqual(self.items[0].title, 'oneNew')
self.assertEqual(self.items[1].title, 'twoNew')
def test_album_and_artist_applied_to_all(self):
self._apply()
self.assertEqual(self.items[0].album, 'albumNew')
self.assertEqual(self.items[1].album, 'albumNew')
self.assertEqual(self.items[0].artist, 'artistNew')
self.assertEqual(self.items[1].artist, 'artistNew')
def test_track_index_applied(self):
self._apply()
self.assertEqual(self.items[0].track, 1)
self.assertEqual(self.items[1].track, 2)
def test_track_total_applied(self):
self._apply()
self.assertEqual(self.items[0].tracktotal, 2)
self.assertEqual(self.items[1].tracktotal, 2)
def test_disc_index_applied(self):
self._apply()
self.assertEqual(self.items[0].disc, 1)
self.assertEqual(self.items[1].disc, 2)
def test_disc_total_applied(self):
self._apply()
self.assertEqual(self.items[0].disctotal, 2)
self.assertEqual(self.items[1].disctotal, 2)
def test_per_disc_numbering(self):
self._apply(per_disc_numbering=True)
self.assertEqual(self.items[0].track, 1)
self.assertEqual(self.items[1].track, 1)
def test_per_disc_numbering_track_total(self):
self._apply(per_disc_numbering=True)
self.assertEqual(self.items[0].tracktotal, 1)
self.assertEqual(self.items[1].tracktotal, 1)
def test_mb_trackid_applied(self):
self._apply()
self.assertEqual(self.items[0].mb_trackid,
'dfa939ec-118c-4d0f-84a0-60f3d1e6522c')
self.assertEqual(self.items[1].mb_trackid,
'40130ed1-a27c-42fd-a328-1ebefb6caef4')
def test_mb_albumid_and_artistid_applied(self):
self._apply()
for item in self.items:
self.assertEqual(item.mb_albumid,
'7edb51cb-77d6-4416-a23c-3a8c2994a2c7')
self.assertEqual(item.mb_artistid,
'a6623d39-2d8e-4f70-8242-0a9553b91e50')
def test_albumtype_applied(self):
self._apply()
self.assertEqual(self.items[0].albumtype, 'album')
self.assertEqual(self.items[1].albumtype, 'album')
def test_album_artist_overrides_empty_track_artist(self):
my_info = copy.deepcopy(self.info)
self._apply(info=my_info)
self.assertEqual(self.items[0].artist, 'artistNew')
self.assertEqual(self.items[1].artist, 'artistNew')
def test_album_artist_overriden_by_nonempty_track_artist(self):
my_info = copy.deepcopy(self.info)
my_info.tracks[0].artist = 'artist1!'
my_info.tracks[1].artist = 'artist2!'
self._apply(info=my_info)
self.assertEqual(self.items[0].artist, 'artist1!')
self.assertEqual(self.items[1].artist, 'artist2!')
def test_artist_credit_applied(self):
self._apply()
self.assertEqual(self.items[0].albumartist_credit, 'albumArtistCredit')
self.assertEqual(self.items[0].artist_credit, 'trackArtistCredit')
self.assertEqual(self.items[1].albumartist_credit, 'albumArtistCredit')
self.assertEqual(self.items[1].artist_credit, 'albumArtistCredit')
def test_artist_sort_applied(self):
self._apply()
self.assertEqual(self.items[0].albumartist_sort, 'albumArtistSort')
self.assertEqual(self.items[0].artist_sort, 'trackArtistSort')
self.assertEqual(self.items[1].albumartist_sort, 'albumArtistSort')
self.assertEqual(self.items[1].artist_sort, 'albumArtistSort')
def test_full_date_applied(self):
my_info = copy.deepcopy(self.info)
my_info.year = 2013
my_info.month = 12
my_info.day = 18
self._apply(info=my_info)
self.assertEqual(self.items[0].year, 2013)
self.assertEqual(self.items[0].month, 12)
self.assertEqual(self.items[0].day, 18)
def test_date_only_zeros_month_and_day(self):
self.items = []
self.items.append(Item(year=1, month=2, day=3))
self.items.append(Item(year=4, month=5, day=6))
my_info = copy.deepcopy(self.info)
my_info.year = 2013
self._apply(info=my_info)
self.assertEqual(self.items[0].year, 2013)
self.assertEqual(self.items[0].month, 0)
self.assertEqual(self.items[0].day, 0)
def test_missing_date_applies_nothing(self):
self.items = []
self.items.append(Item(year=1, month=2, day=3))
self.items.append(Item(year=4, month=5, day=6))
self._apply()
self.assertEqual(self.items[0].year, 1)
self.assertEqual(self.items[0].month, 2)
self.assertEqual(self.items[0].day, 3)
def test_data_source_applied(self):
my_info = copy.deepcopy(self.info)
my_info.data_source = 'MusicBrainz'
self._apply(info=my_info)
self.assertEqual(self.items[0].data_source, 'MusicBrainz')
class ApplyCompilationTest(_common.TestCase, ApplyTestUtil):
def setUp(self):
super(ApplyCompilationTest, self).setUp()
self.items = []
self.items.append(Item({}))
self.items.append(Item({}))
trackinfo = []
trackinfo.append(TrackInfo(
u'oneNew',
u'dfa939ec-118c-4d0f-84a0-60f3d1e6522c',
u'artistOneNew',
u'a05686fc-9db2-4c23-b99e-77f5db3e5282',
index=1,
))
trackinfo.append(TrackInfo(
u'twoNew',
u'40130ed1-a27c-42fd-a328-1ebefb6caef4',
u'artistTwoNew',
u'80b3cf5e-18fe-4c59-98c7-e5bb87210710',
index=2,
))
self.info = AlbumInfo(
tracks=trackinfo,
artist=u'variousNew',
album=u'albumNew',
album_id='3b69ea40-39b8-487f-8818-04b6eff8c21a',
artist_id='89ad4ac3-39f7-470e-963a-56509c546377',
albumtype=u'compilation',
)
def test_album_and_track_artists_separate(self):
self._apply()
self.assertEqual(self.items[0].artist, 'artistOneNew')
self.assertEqual(self.items[1].artist, 'artistTwoNew')
self.assertEqual(self.items[0].albumartist, 'variousNew')
self.assertEqual(self.items[1].albumartist, 'variousNew')
def test_mb_albumartistid_applied(self):
self._apply()
self.assertEqual(self.items[0].mb_albumartistid,
'89ad4ac3-39f7-470e-963a-56509c546377')
self.assertEqual(self.items[1].mb_albumartistid,
'89ad4ac3-39f7-470e-963a-56509c546377')
self.assertEqual(self.items[0].mb_artistid,
'a05686fc-9db2-4c23-b99e-77f5db3e5282')
self.assertEqual(self.items[1].mb_artistid,
'80b3cf5e-18fe-4c59-98c7-e5bb87210710')
def test_va_flag_cleared_does_not_set_comp(self):
self._apply()
self.assertFalse(self.items[0].comp)
self.assertFalse(self.items[1].comp)
def test_va_flag_sets_comp(self):
va_info = copy.deepcopy(self.info)
va_info.va = True
self._apply(info=va_info)
self.assertTrue(self.items[0].comp)
self.assertTrue(self.items[1].comp)
class StringDistanceTest(unittest.TestCase):
def test_equal_strings(self):
dist = string_dist(u'Some String', u'Some String')
self.assertEqual(dist, 0.0)
def test_different_strings(self):
dist = string_dist(u'Some String', u'Totally Different')
self.assertNotEqual(dist, 0.0)
def test_punctuation_ignored(self):
dist = string_dist(u'Some String', u'Some.String!')
self.assertEqual(dist, 0.0)
def test_case_ignored(self):
dist = string_dist(u'Some String', u'sOME sTring')
self.assertEqual(dist, 0.0)
def test_leading_the_has_lower_weight(self):
dist1 = string_dist(u'XXX Band Name', u'Band Name')
dist2 = string_dist(u'The Band Name', u'Band Name')
self.assertTrue(dist2 < dist1)
def test_parens_have_lower_weight(self):
dist1 = string_dist(u'One .Two.', u'One')
dist2 = string_dist(u'One (Two)', u'One')
self.assertTrue(dist2 < dist1)
def test_brackets_have_lower_weight(self):
dist1 = string_dist(u'One .Two.', u'One')
dist2 = string_dist(u'One [Two]', u'One')
self.assertTrue(dist2 < dist1)
def test_ep_label_has_zero_weight(self):
dist = string_dist(u'My Song (EP)', u'My Song')
self.assertEqual(dist, 0.0)
def test_featured_has_lower_weight(self):
dist1 = string_dist(u'My Song blah Someone', u'My Song')
dist2 = string_dist(u'My Song feat Someone', u'My Song')
self.assertTrue(dist2 < dist1)
def test_postfix_the(self):
dist = string_dist(u'The Song Title', u'Song Title, The')
self.assertEqual(dist, 0.0)
def test_postfix_a(self):
dist = string_dist(u'A Song Title', u'Song Title, A')
self.assertEqual(dist, 0.0)
def test_postfix_an(self):
dist = string_dist(u'An Album Title', u'Album Title, An')
self.assertEqual(dist, 0.0)
def test_empty_strings(self):
dist = string_dist(u'', u'')
self.assertEqual(dist, 0.0)
def test_solo_pattern(self):
# Just make sure these don't crash.
string_dist(u'The ', u'')
string_dist(u'(EP)', u'(EP)')
string_dist(u', An', u'')
def test_heuristic_does_not_harm_distance(self):
dist = string_dist(u'Untitled', u'[Untitled]')
self.assertEqual(dist, 0.0)
def test_ampersand_expansion(self):
dist = string_dist(u'And', u'&')
self.assertEqual(dist, 0.0)
def test_accented_characters(self):
dist = string_dist(u'\xe9\xe1\xf1', u'ean')
self.assertEqual(dist, 0.0)
class EnumTest(_common.TestCase):<|fim▁hole|> """
Test Enum Subclasses defined in beets.util.enumeration
"""
def test_ordered_enum(self):
OrderedEnumClass = match.OrderedEnum('OrderedEnumTest', ['a', 'b', 'c']) # noqa
self.assertLess(OrderedEnumClass.a, OrderedEnumClass.b)
self.assertLess(OrderedEnumClass.a, OrderedEnumClass.c)
self.assertLess(OrderedEnumClass.b, OrderedEnumClass.c)
self.assertGreater(OrderedEnumClass.b, OrderedEnumClass.a)
self.assertGreater(OrderedEnumClass.c, OrderedEnumClass.a)
self.assertGreater(OrderedEnumClass.c, OrderedEnumClass.b)
def suite():
return unittest.TestLoader().loadTestsFromName(__name__)
if __name__ == '__main__':
unittest.main(defaultTest='suite')<|fim▁end|>
| |
<|file_name|>redundant_field_names.rs<|end_file_name|><|fim▁begin|>use clippy_utils::diagnostics::span_lint_and_sugg;
use clippy_utils::{meets_msrv, msrvs};
use rustc_ast::ast::{Expr, ExprKind};
use rustc_errors::Applicability;
use rustc_lint::{EarlyContext, EarlyLintPass};
use rustc_middle::lint::in_external_macro;
use rustc_semver::RustcVersion;
use rustc_session::{declare_tool_lint, impl_lint_pass};
declare_clippy_lint! {
/// ### What it does
/// Checks for fields in struct literals where shorthands
/// could be used.
///
/// ### Why is this bad?
/// If the field and variable names are the same,<|fim▁hole|> /// ```rust
/// let bar: u8 = 123;
///
/// struct Foo {
/// bar: u8,
/// }
///
/// let foo = Foo { bar: bar };
/// ```
/// the last line can be simplified to
/// ```ignore
/// let foo = Foo { bar };
/// ```
pub REDUNDANT_FIELD_NAMES,
style,
"checks for fields in struct literals where shorthands could be used"
}
pub struct RedundantFieldNames {
msrv: Option<RustcVersion>,
}
impl RedundantFieldNames {
#[must_use]
pub fn new(msrv: Option<RustcVersion>) -> Self {
Self { msrv }
}
}
impl_lint_pass!(RedundantFieldNames => [REDUNDANT_FIELD_NAMES]);
impl EarlyLintPass for RedundantFieldNames {
fn check_expr(&mut self, cx: &EarlyContext<'_>, expr: &Expr) {
if !meets_msrv(self.msrv.as_ref(), &msrvs::FIELD_INIT_SHORTHAND) {
return;
}
if in_external_macro(cx.sess, expr.span) {
return;
}
if let ExprKind::Struct(ref se) = expr.kind {
for field in &se.fields {
if field.is_shorthand {
continue;
}
if let ExprKind::Path(None, path) = &field.expr.kind {
if path.segments.len() == 1
&& path.segments[0].ident == field.ident
&& path.segments[0].args.is_none()
{
span_lint_and_sugg(
cx,
REDUNDANT_FIELD_NAMES,
field.span,
"redundant field names in struct initialization",
"replace it with",
field.ident.to_string(),
Applicability::MachineApplicable,
);
}
}
}
}
}
extract_msrv_attr!(EarlyContext);
}<|fim▁end|>
|
/// the field name is redundant.
///
/// ### Example
|
<|file_name|>internals.go<|end_file_name|><|fim▁begin|>package builder
// internals for handling commands. Covers many areas and a lot of
// non-contiguous functionality. Please read the comments.
import (
"crypto/sha256"
"encoding/hex"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"os"
"path/filepath"
"runtime"
"sort"
"strings"
"syscall"
"time"
"github.com/Sirupsen/logrus"
"github.com/docker/docker/builder/parser"
"github.com/docker/docker/cliconfig"
"github.com/docker/docker/daemon"
"github.com/docker/docker/graph"
"github.com/docker/docker/image"
"github.com/docker/docker/pkg/archive"
"github.com/docker/docker/pkg/chrootarchive"
"github.com/docker/docker/pkg/httputils"
"github.com/docker/docker/pkg/ioutils"
"github.com/docker/docker/pkg/jsonmessage"
"github.com/docker/docker/pkg/parsers"
"github.com/docker/docker/pkg/progressreader"
"github.com/docker/docker/pkg/stringid"
"github.com/docker/docker/pkg/system"
"github.com/docker/docker/pkg/tarsum"
"github.com/docker/docker/pkg/urlutil"
"github.com/docker/docker/registry"
"github.com/docker/docker/runconfig"
)
func (b *builder) readContext(context io.Reader) (err error) {
tmpdirPath, err := ioutil.TempDir("", "docker-build")
if err != nil {
return
}
// Make sure we clean-up upon error. In the happy case the caller
// is expected to manage the clean-up
defer func() {
if err != nil {
if e := os.RemoveAll(tmpdirPath); e != nil {
logrus.Debugf("[BUILDER] failed to remove temporary context: %s", e)
}
}
}()
decompressedStream, err := archive.DecompressStream(context)
if err != nil {
return
}
if b.context, err = tarsum.NewTarSum(decompressedStream, true, tarsum.Version1); err != nil {
return
}
if err = chrootarchive.Untar(b.context, tmpdirPath, nil); err != nil {
return
}
b.contextPath = tmpdirPath
return
}
func (b *builder) commit(id string, autoCmd *runconfig.Command, comment string) error {
if b.disableCommit {
return nil
}
if b.image == "" && !b.noBaseImage {
return fmt.Errorf("Please provide a source image with `from` prior to commit")
}
b.Config.Image = b.image
if id == "" {
cmd := b.Config.Cmd
if runtime.GOOS != "windows" {
b.Config.Cmd = runconfig.NewCommand("/bin/sh", "-c", "#(nop) "+comment)
} else {
b.Config.Cmd = runconfig.NewCommand("cmd", "/S /C", "REM (nop) "+comment)
}
defer func(cmd *runconfig.Command) { b.Config.Cmd = cmd }(cmd)
hit, err := b.probeCache()
if err != nil {
return err
}
if hit {
return nil
}
container, err := b.create()
if err != nil {
return err
}
id = container.ID
if err := container.Mount(); err != nil {
return err
}
defer container.Unmount()
}
container, err := b.Daemon.Get(id)
if err != nil {
return err
}
// Note: Actually copy the struct
autoConfig := *b.Config
autoConfig.Cmd = autoCmd
commitCfg := &daemon.ContainerCommitConfig{
Author: b.maintainer,
Pause: true,
Config: &autoConfig,
}
// Commit the container
image, err := b.Daemon.Commit(container, commitCfg)
if err != nil {
return err
}
b.Daemon.Graph().Retain(b.id, image.ID)
b.activeImages = append(b.activeImages, image.ID)
b.image = image.ID
return nil
}
type copyInfo struct {
origPath string
destPath string
hash string
decompress bool
tmpDir string
}
func (b *builder) runContextCommand(args []string, allowRemote bool, allowDecompression bool, cmdName string) error {
if b.context == nil {
return fmt.Errorf("No context given. Impossible to use %s", cmdName)
}
if len(args) < 2 {
return fmt.Errorf("Invalid %s format - at least two arguments required", cmdName)
}
// Work in daemon-specific filepath semantics
dest := filepath.FromSlash(args[len(args)-1]) // last one is always the dest
copyInfos := []*copyInfo{}
b.Config.Image = b.image
defer func() {
for _, ci := range copyInfos {
if ci.tmpDir != "" {
os.RemoveAll(ci.tmpDir)
}
}
}()
// Loop through each src file and calculate the info we need to
// do the copy (e.g. hash value if cached). Don't actually do
// the copy until we've looked at all src files
for _, orig := range args[0 : len(args)-1] {
if err := calcCopyInfo(
b,
cmdName,
©Infos,
orig,
dest,
allowRemote,
allowDecompression,
true,
); err != nil {
return err
}
}
if len(copyInfos) == 0 {
return fmt.Errorf("No source files were specified")
}
if len(copyInfos) > 1 && !strings.HasSuffix(dest, string(os.PathSeparator)) {
return fmt.Errorf("When using %s with more than one source file, the destination must be a directory and end with a /", cmdName)
}
// For backwards compat, if there's just one CI then use it as the
// cache look-up string, otherwise hash 'em all into one
var srcHash string
var origPaths string
if len(copyInfos) == 1 {
srcHash = copyInfos[0].hash
origPaths = copyInfos[0].origPath
} else {
var hashs []string
var origs []string
for _, ci := range copyInfos {
hashs = append(hashs, ci.hash)
origs = append(origs, ci.origPath)
}
hasher := sha256.New()
hasher.Write([]byte(strings.Join(hashs, ",")))
srcHash = "multi:" + hex.EncodeToString(hasher.Sum(nil))
origPaths = strings.Join(origs, " ")
}
cmd := b.Config.Cmd
if runtime.GOOS != "windows" {
b.Config.Cmd = runconfig.NewCommand("/bin/sh", "-c", fmt.Sprintf("#(nop) %s %s in %s", cmdName, srcHash, dest))
} else {
b.Config.Cmd = runconfig.NewCommand("cmd", "/S /C", fmt.Sprintf("REM (nop) %s %s in %s", cmdName, srcHash, dest))
}
defer func(cmd *runconfig.Command) { b.Config.Cmd = cmd }(cmd)
hit, err := b.probeCache()
if err != nil {
return err
}
if hit {
return nil
}
container, _, err := b.Daemon.ContainerCreate("", b.Config, nil, true)
if err != nil {
return err
}
b.TmpContainers[container.ID] = struct{}{}
if err := container.Mount(); err != nil {
return err
}
defer container.Unmount()
for _, ci := range copyInfos {
if err := b.addContext(container, ci.origPath, ci.destPath, ci.decompress); err != nil {
return err
}
}
if err := b.commit(container.ID, cmd, fmt.Sprintf("%s %s in %s", cmdName, origPaths, dest)); err != nil {
return err
}
return nil
}
func calcCopyInfo(b *builder, cmdName string, cInfos *[]*copyInfo, origPath string, destPath string, allowRemote bool, allowDecompression bool, allowWildcards bool) error {
// Work in daemon-specific OS filepath semantics. However, we save
// the the origPath passed in here, as it might also be a URL which
// we need to check for in this function.
passedInOrigPath := origPath
origPath = filepath.FromSlash(origPath)
destPath = filepath.FromSlash(destPath)
if origPath != "" && origPath[0] == os.PathSeparator && len(origPath) > 1 {
origPath = origPath[1:]
}
origPath = strings.TrimPrefix(origPath, "."+string(os.PathSeparator))
// Twiddle the destPath when its a relative path - meaning, make it
// relative to the WORKINGDIR
if !filepath.IsAbs(destPath) {
hasSlash := strings.HasSuffix(destPath, string(os.PathSeparator))
destPath = filepath.Join(string(os.PathSeparator), filepath.FromSlash(b.Config.WorkingDir), destPath)
// Make sure we preserve any trailing slash
if hasSlash {
destPath += string(os.PathSeparator)
}
}
// In the remote/URL case, download it and gen its hashcode
if urlutil.IsURL(passedInOrigPath) {
// As it's a URL, we go back to processing on what was passed in
// to this function
origPath = passedInOrigPath
if !allowRemote {
return fmt.Errorf("Source can't be a URL for %s", cmdName)
}
ci := copyInfo{}
ci.origPath = origPath
ci.hash = origPath // default to this but can change
ci.destPath = destPath
ci.decompress = false
*cInfos = append(*cInfos, &ci)
// Initiate the download
resp, err := httputils.Download(ci.origPath)
if err != nil {
return err
}
// Create a tmp dir
tmpDirName, err := ioutil.TempDir(b.contextPath, "docker-remote")
if err != nil {
return err
}
ci.tmpDir = tmpDirName
// Create a tmp file within our tmp dir
tmpFileName := filepath.Join(tmpDirName, "tmp")
tmpFile, err := os.OpenFile(tmpFileName, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600)
if err != nil {
return err
}
// Download and dump result to tmp file
if _, err := io.Copy(tmpFile, progressreader.New(progressreader.Config{
In: resp.Body,
Out: b.OutOld,
Formatter: b.StreamFormatter,
Size: resp.ContentLength,
NewLines: true,
ID: "",
Action: "Downloading",
})); err != nil {
tmpFile.Close()
return err
}
fmt.Fprintf(b.OutStream, "\n")
tmpFile.Close()
// Set the mtime to the Last-Modified header value if present
// Otherwise just remove atime and mtime
times := make([]syscall.Timespec, 2)
lastMod := resp.Header.Get("Last-Modified")
if lastMod != "" {
mTime, err := http.ParseTime(lastMod)
// If we can't parse it then just let it default to 'zero'
// otherwise use the parsed time value
if err == nil {
times[1] = syscall.NsecToTimespec(mTime.UnixNano())
}
}
if err := system.UtimesNano(tmpFileName, times); err != nil {
return err
}
ci.origPath = filepath.Join(filepath.Base(tmpDirName), filepath.Base(tmpFileName))
// If the destination is a directory, figure out the filename.
if strings.HasSuffix(ci.destPath, string(os.PathSeparator)) {
u, err := url.Parse(origPath)
if err != nil {
return err
}
path := u.Path
if strings.HasSuffix(path, string(os.PathSeparator)) {
path = path[:len(path)-1]
}
parts := strings.Split(path, string(os.PathSeparator))
filename := parts[len(parts)-1]
if filename == "" {
return fmt.Errorf("cannot determine filename from url: %s", u)
}
ci.destPath = ci.destPath + filename
}
// Calc the checksum, even if we're using the cache
r, err := archive.Tar(tmpFileName, archive.Uncompressed)
if err != nil {
return err
}
tarSum, err := tarsum.NewTarSum(r, true, tarsum.Version1)
if err != nil {
return err
}
if _, err := io.Copy(ioutil.Discard, tarSum); err != nil {
return err
}
ci.hash = tarSum.Sum(nil)
r.Close()
return nil
}
// Deal with wildcards
if allowWildcards && containsWildcards(origPath) {
for _, fileInfo := range b.context.GetSums() {
if fileInfo.Name() == "" {
continue
}
match, _ := filepath.Match(origPath, fileInfo.Name())
if !match {
continue
}
// Note we set allowWildcards to false in case the name has
// a * in it
calcCopyInfo(b, cmdName, cInfos, fileInfo.Name(), destPath, allowRemote, allowDecompression, false)
}
return nil
}
// Must be a dir or a file
if err := b.checkPathForAddition(origPath); err != nil {
return err
}
fi, _ := os.Stat(filepath.Join(b.contextPath, origPath))
ci := copyInfo{}
ci.origPath = origPath
ci.hash = origPath
ci.destPath = destPath
ci.decompress = allowDecompression
*cInfos = append(*cInfos, &ci)
// Deal with the single file case
if !fi.IsDir() {
// This will match first file in sums of the archive
fis := b.context.GetSums().GetFile(ci.origPath)
if fis != nil {
ci.hash = "file:" + fis.Sum()
}
return nil
}
// Must be a dir
var subfiles []string
absOrigPath := filepath.Join(b.contextPath, ci.origPath)
// Add a trailing / to make sure we only pick up nested files under
// the dir and not sibling files of the dir that just happen to
// start with the same chars
if !strings.HasSuffix(absOrigPath, string(os.PathSeparator)) {
absOrigPath += string(os.PathSeparator)
}
// Need path w/o slash too to find matching dir w/o trailing slash
absOrigPathNoSlash := absOrigPath[:len(absOrigPath)-1]
for _, fileInfo := range b.context.GetSums() {
absFile := filepath.Join(b.contextPath, fileInfo.Name())
// Any file in the context that starts with the given path will be
// picked up and its hashcode used. However, we'll exclude the
// root dir itself. We do this for a coupel of reasons:
// 1 - ADD/COPY will not copy the dir itself, just its children
// so there's no reason to include it in the hash calc
// 2 - the metadata on the dir will change when any child file
// changes. This will lead to a miss in the cache check if that
// child file is in the .dockerignore list.
if strings.HasPrefix(absFile, absOrigPath) && absFile != absOrigPathNoSlash {
subfiles = append(subfiles, fileInfo.Sum())
}
}
sort.Strings(subfiles)
hasher := sha256.New()
hasher.Write([]byte(strings.Join(subfiles, ",")))
ci.hash = "dir:" + hex.EncodeToString(hasher.Sum(nil))
return nil
}
func containsWildcards(name string) bool {
for i := 0; i < len(name); i++ {
ch := name[i]
if ch == '\\' {
i++
} else if ch == '*' || ch == '?' || ch == '[' {
return true
}
}
return false
}
func (b *builder) pullImage(name string) (*image.Image, error) {
remote, tag := parsers.ParseRepositoryTag(name)
if tag == "" {
tag = "latest"
}
pullRegistryAuth := &cliconfig.AuthConfig{}
if len(b.AuthConfigs) > 0 {
// The request came with a full auth config file, we prefer to use that
repoInfo, err := b.Daemon.RegistryService.ResolveRepository(remote)
if err != nil {
return nil, err
}
resolvedConfig := registry.ResolveAuthConfig(
&cliconfig.ConfigFile{AuthConfigs: b.AuthConfigs},
repoInfo.Index,
)
pullRegistryAuth = &resolvedConfig
}
imagePullConfig := &graph.ImagePullConfig{
AuthConfig: pullRegistryAuth,
OutStream: ioutils.NopWriteCloser(b.OutOld),
}
if err := b.Daemon.Repositories().Pull(remote, tag, imagePullConfig); err != nil {
return nil, err
}
image, err := b.Daemon.Repositories().LookupImage(name)
if err != nil {
return nil, err
}
return image, nil
}
func (b *builder) processImageFrom(img *image.Image) error {
b.image = img.ID
if img.Config != nil {
b.Config = img.Config
}
// The default path will be blank on Windows (set by HCS)
if len(b.Config.Env) == 0 && daemon.DefaultPathEnv != "" {
b.Config.Env = append(b.Config.Env, "PATH="+daemon.DefaultPathEnv)
}
// Process ONBUILD triggers if they exist
if nTriggers := len(b.Config.OnBuild); nTriggers != 0 {
fmt.Fprintf(b.ErrStream, "# Executing %d build triggers\n", nTriggers)
}
// Copy the ONBUILD triggers, and remove them from the config, since the config will be committed.
onBuildTriggers := b.Config.OnBuild
b.Config.OnBuild = []string{}
// parse the ONBUILD triggers by invoking the parser
for stepN, step := range onBuildTriggers {
ast, err := parser.Parse(strings.NewReader(step))
if err != nil {
return err
}
for i, n := range ast.Children {
switch strings.ToUpper(n.Value) {
case "ONBUILD":
return fmt.Errorf("Chaining ONBUILD via `ONBUILD ONBUILD` isn't allowed")
case "MAINTAINER", "FROM":
return fmt.Errorf("%s isn't allowed as an ONBUILD trigger", n.Value)
}
fmt.Fprintf(b.OutStream, "Trigger %d, %s\n", stepN, step)
if err := b.dispatch(i, n); err != nil {
return err
}
}
}
return nil
}
// probeCache checks to see if image-caching is enabled (`b.UtilizeCache`)
// and if so attempts to look up the current `b.image` and `b.Config` pair
// in the current server `b.Daemon`. If an image is found, probeCache returns
// `(true, nil)`. If no image is found, it returns `(false, nil)`. If there
// is any error, it returns `(false, err)`.
func (b *builder) probeCache() (bool, error) {
if !b.UtilizeCache || b.cacheBusted {
return false, nil
}
cache, err := b.Daemon.ImageGetCached(b.image, b.Config)
if err != nil {
return false, err
}
if cache == nil {
logrus.Debugf("[BUILDER] Cache miss")
b.cacheBusted = true
return false, nil
}
fmt.Fprintf(b.OutStream, " ---> Using cache\n")
logrus.Debugf("[BUILDER] Use cached version")
b.image = cache.ID
b.Daemon.Graph().Retain(b.id, cache.ID)
b.activeImages = append(b.activeImages, cache.ID)
return true, nil
}
func (b *builder) create() (*daemon.Container, error) {
if b.image == "" && !b.noBaseImage {
return nil, fmt.Errorf("Please provide a source image with `from` prior to run")
}
b.Config.Image = b.image
hostConfig := &runconfig.HostConfig{
CPUShares: b.cpuShares,
CPUPeriod: b.cpuPeriod,
CPUQuota: b.cpuQuota,
CpusetCpus: b.cpuSetCpus,
CpusetMems: b.cpuSetMems,
CgroupParent: b.cgroupParent,
Memory: b.memory,
MemorySwap: b.memorySwap,
Ulimits: b.ulimits,
}
config := *b.Config
// Create the container
c, warnings, err := b.Daemon.ContainerCreate("", b.Config, hostConfig, true)
if err != nil {
return nil, err
}
for _, warning := range warnings {
fmt.Fprintf(b.OutStream, " ---> [Warning] %s\n", warning)
}
b.TmpContainers[c.ID] = struct{}{}
fmt.Fprintf(b.OutStream, " ---> Running in %s\n", stringid.TruncateID(c.ID))
if config.Cmd.Len() > 0 {
// override the entry point that may have been picked up from the base image
s := config.Cmd.Slice()
c.Path = s[0]
c.Args = s[1:]
} else {
config.Cmd = runconfig.NewCommand()
}
return c, nil
}
func (b *builder) run(c *daemon.Container) error {
var errCh chan error
if b.Verbose {
errCh = c.Attach(nil, b.OutStream, b.ErrStream)
}
//start the container
if err := c.Start(); err != nil {
return err
}
finished := make(chan struct{})
defer close(finished)
go func() {
select {
case <-b.cancelled:
logrus.Debugln("Build cancelled, killing container:", c.ID)
c.Kill()
case <-finished:
}
}()
if b.Verbose {
// Block on reading output from container, stop on err or chan closed
if err := <-errCh; err != nil {
return err
}
}
// Wait for it to finish
if ret, _ := c.WaitStop(-1 * time.Second); ret != 0 {
return &jsonmessage.JSONError{
Message: fmt.Sprintf("The command '%s' returned a non-zero code: %d", b.Config.Cmd.ToString(), ret),
Code: ret,
}
}
return nil
}
func (b *builder) checkPathForAddition(orig string) error {
origPath := filepath.Join(b.contextPath, orig)
origPath, err := filepath.EvalSymlinks(origPath)
if err != nil {
if os.IsNotExist(err) {
return fmt.Errorf("%s: no such file or directory", orig)
}
return err
}
contextPath, err := filepath.EvalSymlinks(b.contextPath)
if err != nil {
return err
}
if !strings.HasPrefix(origPath, contextPath) {
return fmt.Errorf("Forbidden path outside the build context: %s (%s)", orig, origPath)
}
if _, err := os.Stat(origPath); err != nil {
if os.IsNotExist(err) {
return fmt.Errorf("%s: no such file or directory", orig)
}
return err
}
return nil
}
func (b *builder) addContext(container *daemon.Container, orig, dest string, decompress bool) error {
var (
err error
destExists = true
origPath = filepath.Join(b.contextPath, orig)
destPath string
)
// Work in daemon-local OS specific file paths
dest = filepath.FromSlash(dest)
destPath, err = container.GetResourcePath(dest)
if err != nil {
return err
}
// Preserve the trailing slash
if strings.HasSuffix(dest, string(os.PathSeparator)) || dest == "." {
destPath = destPath + string(os.PathSeparator)
}
destStat, err := os.Stat(destPath)
if err != nil {
if !os.IsNotExist(err) {
logrus.Errorf("Error performing os.Stat on %s. %s", destPath, err)
return err
}
destExists = false
}
fi, err := os.Stat(origPath)
if err != nil {
if os.IsNotExist(err) {
return fmt.Errorf("%s: no such file or directory", orig)
}
return err
}
if fi.IsDir() {
return copyAsDirectory(origPath, destPath, destExists)
}
// If we are adding a remote file (or we've been told not to decompress), do not try to untar it
if decompress {
// First try to unpack the source as an archive
// to support the untar feature we need to clean up the path a little bit
// because tar is very forgiving. First we need to strip off the archive's
// filename from the path but this is only added if it does not end in slash
tarDest := destPath
if strings.HasSuffix(tarDest, string(os.PathSeparator)) {
tarDest = filepath.Dir(destPath)
}
// try to successfully untar the orig
if err := chrootarchive.UntarPath(origPath, tarDest); err == nil {
return nil
} else if err != io.EOF {
logrus.Debugf("Couldn't untar %s to %s: %s", origPath, tarDest, err)
}
}
if err := system.MkdirAll(filepath.Dir(destPath), 0755); err != nil {
return err
}
if err := chrootarchive.CopyWithTar(origPath, destPath); err != nil {
return err
}
resPath := destPath
if destExists && destStat.IsDir() {<|fim▁hole|> }
return fixPermissions(origPath, resPath, 0, 0, destExists)
}
func copyAsDirectory(source, destination string, destExisted bool) error {
if err := chrootarchive.CopyWithTar(source, destination); err != nil {
return err
}
return fixPermissions(source, destination, 0, 0, destExisted)
}
func (b *builder) clearTmp() {
for c := range b.TmpContainers {
rmConfig := &daemon.ContainerRmConfig{
ForceRemove: true,
RemoveVolume: true,
}
if err := b.Daemon.ContainerRm(c, rmConfig); err != nil {
fmt.Fprintf(b.OutStream, "Error removing intermediate container %s: %v\n", stringid.TruncateID(c), err)
return
}
delete(b.TmpContainers, c)
fmt.Fprintf(b.OutStream, "Removing intermediate container %s\n", stringid.TruncateID(c))
}
}<|fim▁end|>
|
resPath = filepath.Join(destPath, filepath.Base(origPath))
|
<|file_name|>harmonic_potential_bond_type.py<|end_file_name|><|fim▁begin|>import simtk.unit as units
from intermol.decorators import accepts_compatible_units<|fim▁hole|> __slots__ = ['length', 'k', 'order', 'c']
@accepts_compatible_units(None, None,
length=units.nanometers,
k=units.kilojoules_per_mole * units.nanometers ** (-2),
order=None,
c=None)
def __init__(self, bondingtype1, bondingtype2,
length=0.0 * units.nanometers,
k=0.0 * units.kilojoules_per_mole * units.nanometers ** (-2),
order=1, c=False):
AbstractBondType.__init__(self, bondingtype1, bondingtype2, order, c)
self.length = length
self.k = k
class HarmonicPotentialBond(HarmonicPotentialBondType):
"""
stub documentation
"""
def __init__(self, atom1, atom2, bondingtype1=None, bondingtype2=None,
length=0.0 * units.nanometers,
k=0.0 * units.kilojoules_per_mole * units.nanometers ** (-2),
order=1, c=False):
self.atom1 = atom1
self.atom2 = atom2
HarmonicPotentialBondType.__init__(self, bondingtype1, bondingtype2,
length=length,
k=k,
order=order, c=c)<|fim▁end|>
|
from intermol.forces.abstract_bond_type import AbstractBondType
class HarmonicPotentialBondType(AbstractBondType):
|
<|file_name|>magic-scroll.js<|end_file_name|><|fim▁begin|>(function(){<|fim▁hole|> var MagicScroll = function(selector, options) {
if(!(this instanceof MagicScroll)) {
return new MagicScroll(selector, options);
}
if(!selector) {
console.log('WTF Bro! Give me selector!');
} else {
this.elements = document.querySelectorAll(selector);
}
return this;
};
MagicScroll.prototype.create = function() {
for(var i = 0; i < this.elements.length; i++) {
var element = this.elements[i];
if(!element.magicScroll) {
element.magicScroll = true;
element.addEventListener('mousedown', this._start);
element.addEventListener('mouseup', this._stop);
element.addEventListener('mouseleave', this._stop);
element.addEventListener('mousemove', this._move);
}
}
return this;
};
MagicScroll.prototype._start = function(e) {
active = true;
};
MagicScroll.prototype._stop = function(e) {
active = false;
e.currentTarget.classList.remove('scrolling');
scrolling = false;
};
MagicScroll.prototype._move = function(event) {
if(active) {
event && event.preventDefault();
var $current= event.currentTarget,
mY = (event.movementY) ? event.movementY : event.webkitMovementY;
if(!scrolling) {
$current.classList.add('scrolling');
scrolling = true;
}
if(mY > 0) {
$current.scrollTop -= Math.abs(mY * 2);
}
else if(mY < 0) {
$current.scrollTop += Math.abs(mY * 2);
}
}
};
MagicScroll.prototype.destroy = function() {
for(var i = 0; i < this.elements.length; i++) {
var element = this.elements[i];
if(element.magicScroll) {
element.removeEventListener('mousedown', this._start);
element.removeEventListener('mouseup', this._stop);
element.removeEventListener('mouseleave', this._stop);
element.removeEventListener('mousemove', this._move);
delete element.magicScroll;
}
}
};
if (typeof exports !== 'undefined') {
if (typeof module !== 'undefined' && module.exports) {
exports = module.exports = MagicScroll;
}
exports.MagicScroll = MagicScroll;
} else {
this.MagicScroll = MagicScroll;
}
// AMD Registrity
if (typeof define === 'function' && define.amd) {
define('MagicScroll', [], function() {
return MagicScroll;
});
}
}).call(this);<|fim▁end|>
|
var active = scrolling = false;
|
<|file_name|>EnumTool.ts<|end_file_name|><|fim▁begin|>/**
* Created by XD on 2016/7/24.
*/
export enum DeleteEnum{
//未删除
NotDel=0,
//已删除
IsDel=1
}
export function getDeleteEnumDisplayName(deleteEnum:DeleteEnum){
return {
[DeleteEnum.IsDel]:'已删',
[DeleteEnum.NotDel]:'未删'
}[deleteEnum]
}
export enum CheckEnum
{
/// <summary>
/// 未通过:2
/// </summary>
UnPass = 2,
<|fim▁hole|> Waiting = 0,
/// <summary>
/// 已审核:1
/// </summary>
Pass = 1,
}
export enum UseStateEnum
{
/// <summary>
/// 启用1
/// </summary>
Enable = 1,
/// <summary>
/// 停用0
/// </summary>
Disable = 0
}
export enum SexEnum
{
/// <summary>
/// 未知0
/// </summary>
Unknown = 0,
/// <summary>
/// 男1
// / </summary>
Man = 1,
/// <summary>
/// 女2
/// </summary>
Woman = 2
}
export enum FormsRole
{
/// <summary>
/// 管理员
/// </summary>
Admin=0,
/// <summary>
/// 网站用户
/// </summary>
Member=1
}<|fim▁end|>
|
/// 未审核:0
|
<|file_name|>AndroidVersion.java<|end_file_name|><|fim▁begin|>package org.beryl.app;
/** Convenience class for retrieving the current Android version that's running on the device.
*
* Code example on how to use AndroidVersion to load a multi-versioned class at runtime for backwards compatibility without using reflection.
* <pre class="code"><code class="java">
import org.beryl.app.AndroidVersion;
public class StrictModeEnabler {
public static void enableOnThread() {
IStrictModeEnabler enabler = getStrictModeEnabler();
}
// Strict Mode is only supported on Gingerbread or higher.
private static IStrictModeEnabler getStrictModeEnabler() {
if(AndroidVersion.isGingerbreadOrHigher()) {
return new GingerbreadAndAboveStrictModeEnabler();
} else {
return new NoStrictModeEnabler();
}
}
}
</code></pre>*/
@SuppressWarnings("deprecation")
public class AndroidVersion {
private static final int _ANDROID_SDK_VERSION;
private static final int ANDROID_SDK_VERSION_PREVIEW = Integer.MAX_VALUE;
static {
int android_sdk = 3; // 3 is Android 1.5 (Cupcake) which is the earliest Android SDK available.
try {
android_sdk = Integer.parseInt(android.os.Build.VERSION.SDK);
}
catch (Exception e) {
android_sdk = ANDROID_SDK_VERSION_PREVIEW;
}
finally {}
_ANDROID_SDK_VERSION = android_sdk;
}
/** Returns true if running on development or preview version of Android. */
public static boolean isPreviewVersion() {
return _ANDROID_SDK_VERSION >= android.os.Build.VERSION_CODES.CUR_DEVELOPMENT;
}
/** Gets the SDK Level available to the device. */
public static int getSdkVersion() {<|fim▁hole|> return _ANDROID_SDK_VERSION;
}
/** Returns true if running on Android 1.5 or higher. */
public static boolean isCupcakeOrHigher() {
return _ANDROID_SDK_VERSION >= android.os.Build.VERSION_CODES.CUPCAKE;
}
/** Returns true if running on Android 1.6 or higher. */
public static boolean isDonutOrHigher() {
return _ANDROID_SDK_VERSION >= android.os.Build.VERSION_CODES.DONUT;
}
/** Returns true if running on Android 2.0 or higher. */
public static boolean isEclairOrHigher() {
return _ANDROID_SDK_VERSION >= android.os.Build.VERSION_CODES.ECLAIR;
}
/** Returns true if running on Android 2.1-update1 or higher. */
public static boolean isEclairMr1OrHigher() {
return _ANDROID_SDK_VERSION >= android.os.Build.VERSION_CODES.ECLAIR_MR1;
}
/** Returns true if running on Android 2.2 or higher. */
public static boolean isFroyoOrHigher() {
return _ANDROID_SDK_VERSION >= android.os.Build.VERSION_CODES.FROYO;
}
/** Returns true if running on Android 2.3 or higher. */
public static boolean isGingerbreadOrHigher() {
return _ANDROID_SDK_VERSION >= android.os.Build.VERSION_CODES.GINGERBREAD;
}
/** Returns true if running on Android 2.3.3 or higher. */
public static boolean isGingerbreadMr1OrHigher() {
return _ANDROID_SDK_VERSION >= android.os.Build.VERSION_CODES.GINGERBREAD_MR1;
}
/** Returns true if running on Android 3.0 or higher. */
public static boolean isHoneycombOrHigher() {
return _ANDROID_SDK_VERSION >= android.os.Build.VERSION_CODES.HONEYCOMB;
}
/** Returns true if running on Android 3.1 or higher. */
public static boolean isHoneycombMr1OrHigher() {
return _ANDROID_SDK_VERSION >= android.os.Build.VERSION_CODES.HONEYCOMB_MR1;
}
/** Returns true if running on Android 3.2 or higher. */
public static boolean isHoneycombMr2OrHigher() {
return _ANDROID_SDK_VERSION >= android.os.Build.VERSION_CODES.HONEYCOMB_MR2;
}
/** Returns true if running on Android 4.0 or higher. */
public static boolean isIceCreamSandwichOrHigher() {
return _ANDROID_SDK_VERSION >= android.os.Build.VERSION_CODES.ICE_CREAM_SANDWICH;
}
/** Returns true if running on Android 4.0.3 or higher. */
public static boolean isIceCreamSandwichMr1OrHigher() {
return _ANDROID_SDK_VERSION >= android.os.Build.VERSION_CODES.ICE_CREAM_SANDWICH_MR1;
}
/** Returns true if running on an earlier version than Android 4.0.3. */
public static boolean isBeforeIceCreamSandwichMr1() {
return _ANDROID_SDK_VERSION < android.os.Build.VERSION_CODES.ICE_CREAM_SANDWICH_MR1;
}
/** Returns true if running on an earlier version than Android 4.0. */
public static boolean isBeforeIceCreamSandwich() {
return _ANDROID_SDK_VERSION < android.os.Build.VERSION_CODES.ICE_CREAM_SANDWICH;
}
/** Returns true if running on an earlier version than Android 3.2. */
public static boolean isBeforeHoneycombMr2() {
return _ANDROID_SDK_VERSION < android.os.Build.VERSION_CODES.HONEYCOMB_MR2;
}
/** Returns true if running on an earlier version than Android 3.1. */
public static boolean isBeforeHoneycombMr1() {
return _ANDROID_SDK_VERSION < android.os.Build.VERSION_CODES.HONEYCOMB_MR1;
}
/** Returns true if running on an earlier version than Android 3.0. */
public static boolean isBeforeHoneycomb() {
return _ANDROID_SDK_VERSION < android.os.Build.VERSION_CODES.HONEYCOMB;
}
/** Returns true if running on an earlier version than Android 2.3.3. */
public static boolean isBeforeGingerbreadMr1() {
return _ANDROID_SDK_VERSION < android.os.Build.VERSION_CODES.GINGERBREAD_MR1;
}
/** Returns true if running on an earlier version than Android 2.3. */
public static boolean isBeforeGingerbread() {
return _ANDROID_SDK_VERSION < android.os.Build.VERSION_CODES.GINGERBREAD;
}
/** Returns true if running on an earlier version than Android 2.2. */
public static boolean isBeforeFroyo() {
return _ANDROID_SDK_VERSION < android.os.Build.VERSION_CODES.FROYO;
}
/** Returns true if running on an earlier version than Android 2.1-update. */
public static boolean isBeforeEclairMr1() {
return _ANDROID_SDK_VERSION >= android.os.Build.VERSION_CODES.ECLAIR_MR1;
}
/** Returns true if running on an earlier version than Android 2.0. */
public static boolean isBeforeEclair() {
return _ANDROID_SDK_VERSION < android.os.Build.VERSION_CODES.ECLAIR;
}
/** Returns true if running on an earlier version than Android 1.6. */
public static boolean isBeforeDonut() {
return _ANDROID_SDK_VERSION < android.os.Build.VERSION_CODES.DONUT;
}
/** Returns true if running on an earlier version than Android 1.5. */
public static boolean isBeforeCupcake() {
return _ANDROID_SDK_VERSION < android.os.Build.VERSION_CODES.CUPCAKE;
}
private AndroidVersion() {
// Prevent users from instantiating this class.
}
}<|fim▁end|>
| |
<|file_name|>admin.py<|end_file_name|><|fim▁begin|>from django import forms
from django.contrib import admin
from django.contrib.flatpages.models import FlatPage
from django.utils.translation import ugettext_lazy as _
class FlatpageForm(forms.ModelForm):
url = forms.RegexField(label=_("URL"), max_length=100, regex=r'^[-\w/]+$',
help_text = _("Example: '/about/contact/'. Make sure to have leading"
" and trailing slashes."),
error_message = _("This value must contain only letters, numbers,"
" underscores, dashes or slashes."))
class Meta:
model = FlatPage
class FlatPageAdmin(admin.ModelAdmin):<|fim▁hole|> )
list_display = ('url', 'title')
list_filter = ('sites', 'enable_comments', 'registration_required')
search_fields = ('url', 'title')
admin.site.register(FlatPage, FlatPageAdmin)<|fim▁end|>
|
form = FlatpageForm
fieldsets = (
(None, {'fields': ('url', 'title', 'content', 'sites')}),
(_('Advanced options'), {'classes': ('collapse',), 'fields': ('enable_comments', 'registration_required', 'template_name')}),
|
<|file_name|>picture.server.routes.tests.js<|end_file_name|><|fim▁begin|>'use strict';
var should = require('should'),
request = require('supertest'),
path = require('path'),
mongoose = require('mongoose'),
User = mongoose.model('User'),
Picture = mongoose.model('Picture'),
express = require(path.resolve('./config/lib/express'));
/**
* Globals
*/
var app,
agent,
credentials,
user,
picture;
/**
* Picture routes tests
*/
describe('Picture CRUD tests', function () {
before(function (done) {
// Get application
app = express.init(mongoose);
agent = request.agent(app);
done();
});
beforeEach(function (done) {
// Create user credentials
credentials = {
username: 'username',
password: '[email protected]$Aw3$0m3'
};
// Create a new user
user = new User({
firstName: 'Full',
lastName: 'Name',
displayName: 'Full Name',
email: '[email protected]',
username: credentials.username,
password: credentials.password,
provider: 'local'
});
// Save a user to the test db and create new picture
user.save(function () {
picture = {
title: 'Picture Title',
content: 'Picture Content'
};
done();
});
});
it('should be able to save an picture if logged in', function (done) {
agent.post('/api/auth/signin')
.send(credentials)
.expect(200)
.end(function (signinErr, signinRes) {
// Handle signin error
if (signinErr) {
return done(signinErr);
}
// Get the userId
var userId = user.id;
// Save a new picture
agent.post('/api/pictures')
.send(picture)
.expect(200)
.end(function (pictureSaveErr, pictureSaveRes) {
// Handle picture save error
if (pictureSaveErr) {
return done(pictureSaveErr);
}
// Get a list of pictures
agent.get('/api/pictures')
.end(function (picturesGetErr, picturesGetRes) {
// Handle picture save error
if (picturesGetErr) {
return done(picturesGetErr);
}
// Get pictures list
var pictures = picturesGetRes.body;
// Set assertions
(pictures[0].user._id).should.equal(userId);
(pictures[0].title).should.match('Picture Title');
// Call the assertion callback
done();
});
});
});
});
it('should not be able to save an picture if not logged in', function (done) {
agent.post('/api/pictures')
.send(picture)
.expect(403)
.end(function (pictureSaveErr, pictureSaveRes) {
// Call the assertion callback
done(pictureSaveErr);
});
});
it('should not be able to save an picture if no title is provided', function (done) {
// Invalidate title field
picture.title = '';
agent.post('/api/auth/signin')
.send(credentials)
.expect(200)
.end(function (signinErr, signinRes) {
// Handle signin error
if (signinErr) {
return done(signinErr);
}
// Get the userId
var userId = user.id;
// Save a new picture
agent.post('/api/pictures')
.send(picture)
.expect(400)
.end(function (pictureSaveErr, pictureSaveRes) {
// Set message assertion
(pictureSaveRes.body.message).should.match('Title cannot be blank');
// Handle picture save error
done(pictureSaveErr);
});
});
});
it('should be able to update an picture if signed in', function (done) {
agent.post('/api/auth/signin')
.send(credentials)
.expect(200)
.end(function (signinErr, signinRes) {
// Handle signin error
if (signinErr) {
return done(signinErr);
}
// Get the userId
var userId = user.id;
// Save a new picture
agent.post('/api/pictures')
.send(picture)
.expect(200)
.end(function (pictureSaveErr, pictureSaveRes) {
// Handle picture save error
if (pictureSaveErr) {
return done(pictureSaveErr);
}
// Update picture title
picture.title = 'WHY YOU GOTTA BE SO MEAN?';
// Update an existing picture
agent.put('/api/pictures/' + pictureSaveRes.body._id)
.send(picture)
.expect(200)
.end(function (pictureUpdateErr, pictureUpdateRes) {
// Handle picture update error
if (pictureUpdateErr) {
return done(pictureUpdateErr);
}
// Set assertions
(pictureUpdateRes.body._id).should.equal(pictureSaveRes.body._id);
(pictureUpdateRes.body.title).should.match('WHY YOU GOTTA BE SO MEAN?');
// Call the assertion callback
done();
});
});
});
});
it('should be able to get a list of pictures if not signed in', function (done) {
// Create new picture model instance
var pictureObj = new Picture(picture);
// Save the picture
pictureObj.save(function () {
// Request pictures
request(app).get('/api/pictures')
.end(function (req, res) {
// Set assertion
res.body.should.be.instanceof(Array).and.have.lengthOf(1);
// Call the assertion callback
done();
});
});
});
it('should be able to get a single picture if not signed in', function (done) {
// Create new picture model instance
var pictureObj = new Picture(picture);
// Save the picture
pictureObj.save(function () {
request(app).get('/api/pictures/' + pictureObj._id)
.end(function (req, res) {
// Set assertion
res.body.should.be.instanceof(Object).and.have.property('title', picture.title);
// Call the assertion callback
done();
});
});
});
it('should return proper error for single picture with an invalid Id, if not signed in', function (done) {
// test is not a valid mongoose Id
request(app).get('/api/pictures/test')
.end(function (req, res) {
// Set assertion
res.body.should.be.instanceof(Object).and.have.property('message', 'Picture is invalid');
// Call the assertion callback
done();
});
});
it('should return proper error for single picture which doesnt exist, if not signed in', function (done) {
// This is a valid mongoose Id but a non-existent picture
request(app).get('/api/pictures/559e9cd815f80b4c256a8f41')
.end(function (req, res) {
// Set assertion
res.body.should.be.instanceof(Object).and.have.property('message', 'No picture with that identifier has been found');
// Call the assertion callback
done();
});
});
it('should be able to delete an picture if signed in', function (done) {
agent.post('/api/auth/signin')
.send(credentials)
.expect(200)
.end(function (signinErr, signinRes) {
// Handle signin error
if (signinErr) {
return done(signinErr);
}
// Get the userId
var userId = user.id;
// Save a new picture
agent.post('/api/pictures')
.send(picture)
.expect(200)
.end(function (pictureSaveErr, pictureSaveRes) {
// Handle picture save error
if (pictureSaveErr) {
return done(pictureSaveErr);
}
// Delete an existing picture
agent.delete('/api/pictures/' + pictureSaveRes.body._id)
.send(picture)
.expect(200)
.end(function (pictureDeleteErr, pictureDeleteRes) {
// Handle picture error error
if (pictureDeleteErr) {
return done(pictureDeleteErr);
}
// Set assertions
(pictureDeleteRes.body._id).should.equal(pictureSaveRes.body._id);
// Call the assertion callback
done();
});
});
});
});
it('should not be able to delete an picture if not signed in', function (done) {
// Set picture user
picture.user = user;
// Create new picture model instance
var pictureObj = new Picture(picture);
// Save the picture
pictureObj.save(function () {
// Try deleting picture
request(app).delete('/api/pictures/' + pictureObj._id)
.expect(403)
.end(function (pictureDeleteErr, pictureDeleteRes) {
// Set message assertion
(pictureDeleteRes.body.message).should.match('User is not authorized');
// Handle picture error error
done(pictureDeleteErr);
});
});<|fim▁hole|> var _creds = {
username: 'orphan',
password: '[email protected]$Aw3$0m3'
};
// Create orphan user
var _orphan = new User({
firstName: 'Full',
lastName: 'Name',
displayName: 'Full Name',
email: '[email protected]',
username: _creds.username,
password: _creds.password,
provider: 'local'
});
_orphan.save(function (err, orphan) {
// Handle save error
if (err) {
return done(err);
}
agent.post('/api/auth/signin')
.send(_creds)
.expect(200)
.end(function (signinErr, signinRes) {
// Handle signin error
if (signinErr) {
return done(signinErr);
}
// Get the userId
var orphanId = orphan._id;
// Save a new picture
agent.post('/api/pictures')
.send(picture)
.expect(200)
.end(function (pictureSaveErr, pictureSaveRes) {
// Handle picture save error
if (pictureSaveErr) {
return done(pictureSaveErr);
}
// Set assertions on new picture
(pictureSaveRes.body.title).should.equal(picture.title);
should.exist(pictureSaveRes.body.user);
should.equal(pictureSaveRes.body.user._id, orphanId);
// force the picture to have an orphaned user reference
orphan.remove(function () {
// now signin with valid user
agent.post('/api/auth/signin')
.send(credentials)
.expect(200)
.end(function (err, res) {
// Handle signin error
if (err) {
return done(err);
}
// Get the picture
agent.get('/api/pictures/' + pictureSaveRes.body._id)
.expect(200)
.end(function (pictureInfoErr, pictureInfoRes) {
// Handle picture error
if (pictureInfoErr) {
return done(pictureInfoErr);
}
// Set assertions
(pictureInfoRes.body._id).should.equal(pictureSaveRes.body._id);
(pictureInfoRes.body.title).should.equal(picture.title);
should.equal(pictureInfoRes.body.user, undefined);
// Call the assertion callback
done();
});
});
});
});
});
});
});
it('should be able to get a single picture if signed in and verify the custom "isCurrentUserOwner" field is set to "true"', function (done) {
// Create new picture model instance
picture.user = user;
var pictureObj = new Picture(picture);
// Save the picture
pictureObj.save(function () {
agent.post('/api/auth/signin')
.send(credentials)
.expect(200)
.end(function (signinErr, signinRes) {
// Handle signin error
if (signinErr) {
return done(signinErr);
}
// Get the userId
var userId = user.id;
// Save a new picture
agent.post('/api/pictures')
.send(picture)
.expect(200)
.end(function (pictureSaveErr, pictureSaveRes) {
// Handle picture save error
if (pictureSaveErr) {
return done(pictureSaveErr);
}
// Get the picture
agent.get('/api/pictures/' + pictureSaveRes.body._id)
.expect(200)
.end(function (pictureInfoErr, pictureInfoRes) {
// Handle picture error
if (pictureInfoErr) {
return done(pictureInfoErr);
}
// Set assertions
(pictureInfoRes.body._id).should.equal(pictureSaveRes.body._id);
(pictureInfoRes.body.title).should.equal(picture.title);
// Assert that the "isCurrentUserOwner" field is set to true since the current User created it
(pictureInfoRes.body.isCurrentUserOwner).should.equal(true);
// Call the assertion callback
done();
});
});
});
});
});
it('should be able to get a single picture if not signed in and verify the custom "isCurrentUserOwner" field is set to "false"', function (done) {
// Create new picture model instance
var pictureObj = new Picture(picture);
// Save the picture
pictureObj.save(function () {
request(app).get('/api/pictures/' + pictureObj._id)
.end(function (req, res) {
// Set assertion
res.body.should.be.instanceof(Object).and.have.property('title', picture.title);
// Assert the custom field "isCurrentUserOwner" is set to false for the un-authenticated User
res.body.should.be.instanceof(Object).and.have.property('isCurrentUserOwner', false);
// Call the assertion callback
done();
});
});
});
it('should be able to get single picture, that a different user created, if logged in & verify the "isCurrentUserOwner" field is set to "false"', function (done) {
// Create temporary user creds
var _creds = {
username: 'temp',
password: '[email protected]$Aw3$0m3'
};
// Create temporary user
var _user = new User({
firstName: 'Full',
lastName: 'Name',
displayName: 'Full Name',
email: '[email protected]',
username: _creds.username,
password: _creds.password,
provider: 'local'
});
_user.save(function (err, _user) {
// Handle save error
if (err) {
return done(err);
}
// Sign in with the user that will create the Picture
agent.post('/api/auth/signin')
.send(credentials)
.expect(200)
.end(function (signinErr, signinRes) {
// Handle signin error
if (signinErr) {
return done(signinErr);
}
// Get the userId
var userId = user._id;
// Save a new picture
agent.post('/api/pictures')
.send(picture)
.expect(200)
.end(function (pictureSaveErr, pictureSaveRes) {
// Handle picture save error
if (pictureSaveErr) {
return done(pictureSaveErr);
}
// Set assertions on new picture
(pictureSaveRes.body.title).should.equal(picture.title);
should.exist(pictureSaveRes.body.user);
should.equal(pictureSaveRes.body.user._id, userId);
// now signin with the temporary user
agent.post('/api/auth/signin')
.send(_creds)
.expect(200)
.end(function (err, res) {
// Handle signin error
if (err) {
return done(err);
}
// Get the picture
agent.get('/api/pictures/' + pictureSaveRes.body._id)
.expect(200)
.end(function (pictureInfoErr, pictureInfoRes) {
// Handle picture error
if (pictureInfoErr) {
return done(pictureInfoErr);
}
// Set assertions
(pictureInfoRes.body._id).should.equal(pictureSaveRes.body._id);
(pictureInfoRes.body.title).should.equal(picture.title);
// Assert that the custom field "isCurrentUserOwner" is set to false since the current User didn't create it
(pictureInfoRes.body.isCurrentUserOwner).should.equal(false);
// Call the assertion callback
done();
});
});
});
});
});
});
afterEach(function (done) {
User.remove().exec(function () {
Picture.remove().exec(done);
});
});
});<|fim▁end|>
|
});
it('should be able to get a single picture that has an orphaned user reference', function (done) {
// Create orphan user creds
|
<|file_name|>setup.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# -*- coding: utf-8 -*-
try:
from setuptools import setup
except ImportError:
from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup
setup(
name='django-tastypie',
version='0.12.2-dev',
description='A flexible & capable API layer for Django.',
author='Daniel Lindsley',
author_email='[email protected]',
url='http://github.com/toastdriven/django-tastypie/',
long_description=open('README.rst', 'r').read(),
packages=[
'tastypie',
'tastypie.utils',
'tastypie.management',
'tastypie.management.commands',
'tastypie.south_migrations',
'tastypie.migrations',
'tastypie.contrib',
'tastypie.contrib.gis',
'tastypie.contrib.contenttypes',
],<|fim▁hole|> zip_safe=False,
requires=[
'python_mimeparse(>=0.1.4)',
'dateutil(>=1.5, !=2.0)',
],
install_requires=[
'python-mimeparse >= 0.1.4',
'python-dateutil >= 1.5, != 2.0',
],
tests_require=['mock', 'PyYAML', 'lxml', 'defusedxml'],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Topic :: Utilities'
],
)<|fim▁end|>
|
package_data={
'tastypie': ['templates/tastypie/*'],
},
|
<|file_name|>MultiCloudPricingService.java<|end_file_name|><|fim▁begin|>package de.uniulm.omi.cloudiator.sword.multicloud.service;
import com.google.common.base.Supplier;
import com.google.common.collect.ImmutableSet;
import com.google.inject.Inject;
import de.uniulm.omi.cloudiator.sword.domain.Cloud;
import de.uniulm.omi.cloudiator.sword.domain.Pricing;
import de.uniulm.omi.cloudiator.sword.multicloud.pricing.PricingSupplierFactory;
import de.uniulm.omi.cloudiator.sword.service.PricingService;
import java.util.*;
import static com.google.common.base.Preconditions.checkNotNull;
public class MultiCloudPricingService implements PricingService {
private final CloudRegistry cloudRegistry;
@Inject
private PricingSupplierFactory pricingSupplierFactory;
@Inject
public MultiCloudPricingService(CloudRegistry cloudRegistry) {
this.cloudRegistry = checkNotNull(cloudRegistry, "cloudRegistry is null");
}
@Override
public Iterable<Pricing> listPricing() {
/*final ImmutableSet.Builder<Pricing> builder = ImmutableSet.builder();
Optional<Cloud> awsCloud = cloudRegistry.list().stream().filter(cloud -> cloud.api().providerName().equals("aws-ec2")).findFirst();
if(awsCloud.isPresent()) {
Supplier<Set<Pricing>> awsPricingSupplier = pricingSupplierFactory.createAWSPricingSupplier(awsCloud.get().credential());<|fim▁hole|> return builder.build();*/
final ImmutableSet.Builder<Pricing> builder = ImmutableSet.builder();
cloudRegistry
.list()
.stream()
.filter(cloud -> cloud.api().providerName().equals("aws-ec2"))
.findFirst()
.ifPresent(cloud -> builder.addAll(pricingSupplierFactory.createAWSPricingSupplier(cloud.credential()).get()));
return builder.build();
}
}<|fim▁end|>
|
builder.addAll(awsPricingSupplier.get());
}
|
<|file_name|>ConfigurationConverterTest.java<|end_file_name|><|fim▁begin|>package com.github.aureliano.evtbridge.converter;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import java.util.HashMap;
import java.util.Map;
import org.junit.Test;
import com.github.aureliano.evtbridge.core.config.EventCollectorConfiguration;
import com.github.aureliano.evtbridge.core.helper.DataHelper;
<|fim▁hole|> Map<String, Object> map = this.createConfigurationMap();
EventCollectorConfiguration configuration = new ConfigurationConverter().convert(map);
assertEquals("xpto-collector", configuration.getCollectorId());
assertFalse(configuration.isPersistExecutionLog());
assertTrue(configuration.isMultiThreadingEnabled());
assertEquals(DataHelper.mapToProperties(this.createMetadata()), configuration.getMetadata());
}
private Map<String, Object> createConfigurationMap() {
Map<String, Object> map = new HashMap<String, Object>();
map.put("collectorId", "xpto-collector");
map.put("persistExecutionLog", false);
map.put("multiThreadingEnabled", "true");
map.put("metadata", this.createMetadata());
return map;
}
private Map<String, Object> createMetadata() {
Map<String, Object> map = new HashMap<String, Object>();
map.put("test", true);
map.put("host", "127.0.0.1");
return map;
}
}<|fim▁end|>
|
public class ConfigurationConverterTest {
@Test
public void testConvert() {
|
<|file_name|>disk.py<|end_file_name|><|fim▁begin|>import os
from pi3bar.plugins.base import Plugin
from pi3bar.utils import humanize_size_bytes
class Disk(Plugin):
"""<|fim▁hole|>
Available format replacements (``*_p`` = percentage):
* ``%(size)s`` E.g. '100GB'
* ``%(free)s`` E.g. '70GB'
* ``%(free_p)f`` E.g. 70.0
* ``%(available)s`` E.g. '65GB'
* ``%(available_p)f`` E.g. 65.0
* ``%(usage)s`` E.g. '30GB'
* ``%(usage_p)f`` E.g. 30.0
:param full_format: :class:`str` - Format string (default: '%(usage_p).2f%% (%(size)s)')
:param short_format: :class:`str` - Short format string (default: '%(usage_p).2f%%')
:param warning_usage: :class:`int` - Warning breakpoint (default: 90)
:param warning_color: :class:`str` - Warning color (default: '#ffff00')
:param warning_background: :class:`str` - Warning background color (default: None)
:param critical_usage: :class:`int` - Critical breakpoint (default: 95)
:param critical_color: :class:`str` - Critical color (default: None)
:param critical_background: :class:`str` - Critical background color (default: '#ff0000')
Examples:
.. code-block:: python
# root
Disk('/')
# custom format (escape '%' with '%')
Disk('/', full_format='%(usage)s / %(size)s', short_format='%(free_p)f%%')
# adjust warning/critical switches
Disk('/mnt', warning_usage=80, critical_usage=90)
"""
def __init__(self, mount_path, **kwargs):
self.instance = mount_path
self.mount_path = mount_path
self.full_format = kwargs.pop('full_format', '%(usage_p).2f%% (%(size)s)')
self.short_format = kwargs.pop('short_format', '%(usage_p).2f%%')
self.warning_usage = kwargs.pop('warning_usage', 90)
self.warning_color = kwargs.pop('warning_color', '#ffff00')
self.warning_background = kwargs.pop('warning_background', None)
self.critical_usage = kwargs.pop('critical_usage', 95)
self.critical_color = kwargs.pop('critical_color', None)
self.critical_background = kwargs.pop('critical_background', '#ff0000')
super(Disk, self).__init__(**kwargs)
def get_stats(self):
statvfs = os.statvfs(self.mount_path)
size_bytes = statvfs.f_frsize * statvfs.f_blocks
free_bytes = statvfs.f_frsize * statvfs.f_bfree # with reserved space
free_percent = 100.0 / size_bytes * free_bytes
available_bytes = statvfs.f_frsize * statvfs.f_bavail # without reserved space
available_percent = 100.0 / size_bytes * available_bytes
usage_bytes = size_bytes - free_bytes
usage_percent = 100.0 / size_bytes * usage_bytes
return {
'size': humanize_size_bytes(size_bytes), # 100GB
'free': humanize_size_bytes(free_bytes), # 70GB
'available': humanize_size_bytes(available_bytes), # 65GB
'usage': humanize_size_bytes(usage_bytes), # 30GB
'free_p': free_percent, # 70.0
'available_p': available_percent, # 65.0
'usage_p': usage_percent, # 30.0
}
def cycle(self):
stats = self.get_stats()
prefix = '%s ' % self.mount_path
self.full_text = prefix + self.full_format % stats
self.short_text = prefix + self.short_format % stats
if float(stats['usage_p']) > self.critical_usage:
self.color = self.critical_color
self.background = self.critical_background
elif float(stats['usage_p']) > self.warning_usage:
self.color = self.warning_color
self.background = self.warning_background
else:
self.color = None
self.background = None<|fim▁end|>
|
:class:`pi3bar.app.Pi3Bar` plugin to disk usage.
|
<|file_name|>Tracker.ts<|end_file_name|><|fim▁begin|>import { Queue } from "../Queue/Queue";
import { EventHandler } from "../Event/Handler/EventHandler";
import { ObjectMerger } from "../Common/ObjectMerger";
export class Tracker {
public static get SEND_COMMAND(): string { return "send"; }
constructor(private global: any, private eventHandler: EventHandler, private objectMerger: ObjectMerger) {}
public run() {
if (this.global !== undefined) {
let dataLayerCallback = this.global;
let queue = dataLayerCallback.q;
let push = dataLayerCallback.q.push;
this.process(new Queue(queue));
queue.push = (...dataLayerElements) => {
push.apply(queue, Array.prototype.slice.call(dataLayerElements, 0));
this.process(new Queue(queue));
};
}
}
private process(queue: Queue) {
let dataLayerElementPayload: any = {};
<|fim▁hole|> dataLayerElementPayload = this.objectMerger.merge(dataLayerElementPayload, dataLayerElement[1]);
if (dataLayerElement[0] === Tracker.SEND_COMMAND) {
this.global.q.splice(index, 1);
this.eventHandler.handle(dataLayerElementPayload);
}
}
});
}
}<|fim▁end|>
|
queue.consume((index, dataLayerElement) => {
if (dataLayerElement.length >= 2) {
|
<|file_name|>remark-toc-tests.ts<|end_file_name|><|fim▁begin|>import remark = require('remark')
import toc = require('remark-toc')
remark().use(toc)
remark().use(toc, {})
remark().use(toc, {
heading: 'heading'
})
remark().use(toc, {
maxDepth: 2
})
remark().use(toc, {
tight: true
})
<|fim▁hole|>})
remark().use(toc, {
prefix: 'prefix-'
})
remark().use(toc, {
parents: ['root', 'blockquote']
})<|fim▁end|>
|
remark().use(toc, {
skip: 'skip-heading'
|
<|file_name|>rot.rs<|end_file_name|><|fim▁begin|>use super::Vec2;
/// Rotation
#[derive(Copy, Clone)]
pub struct Rot {
pub s: f32,
pub c: f32
}
impl Rot {
pub fn new() -> Rot {
Rot {
s: 0.0,
c: 1.0
}
}
/// Initialize from an angle in radians
pub fn new_angle(angle: f32) -> Rot {
Rot {<|fim▁hole|> }
pub fn set(&mut self, angle: f32) {
self.s = angle.sin();
self.c = angle.cos();
}
/// Set to the identity rotation
pub fn set_identity(&mut self) {
self.s = 0.0;
self.c = 1.0;
}
/// Get the angle in radians
pub fn get_angle(&mut self) -> f32 {
self.s.atan2(self.c)
}
/// Get the x-axis
pub fn get_x_axis(&mut self) -> Vec2 {
Vec2::new(self.c, self.s)
}
/// Get the u axis
pub fn get_y_axis(&mut self) -> Vec2 {
Vec2::new(-self.s, self.c)
}
}<|fim▁end|>
|
s: angle.sin(),
c: angle.cos()
}
|
<|file_name|>make_waterfalls.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python3
import argparse
import pathlib
import numpy as np
def waterfall(input_filename, output_filename):
fs = 200
nfft = 8192
w = np.blackman(nfft)
x = np.fromfile(input_filename, 'int16')
x = (x[::2] + 1j*x[1::2])/2**15
freq_span = 5
nbins = round(freq_span / fs * nfft)
# In these recordings the internal reference was used, so there
# is a frequency offset
freq_offset = 11.6 if '2021-12-08T12:57:25' in input_filename.name else 0
band = int(input_filename.name.split('_')[-2].replace('kHz', ''))
# 1.6 Hz offset is at 10 MHz
freq_offset *= band / 10000
bin_offset = round(freq_offset / fs * nfft)
freq_sel = slice(nfft//2-nbins+bin_offset, nfft//2+nbins+1+bin_offset)
x = x[:x.size//nfft*nfft]
f = np.fft.fftshift(
np.fft.fft(w * x.reshape(-1, nfft)),
axes=1)
f = np.abs(f[:, freq_sel])**2
np.save(output_filename, f.astype('float32'))
def parse_args():
parser = argparse.ArgumentParser(
description='Make waterfalls from the December 2021 eclipse IQ data')
parser.add_argument('input_folder',
help='Input folder')<|fim▁hole|> parser.add_argument('output_folder',
help='Output folder')
return parser.parse_args()
def main():
args = parse_args()
input_files = pathlib.Path(args.input_folder).glob('*.sigmf-data')
output_path = pathlib.Path(args.output_folder)
for f_in in input_files:
f_out_name = f_in.name.replace('.sigmf-data', '_waterfall.npy')
f_out = output_path / f_out_name
waterfall(f_in, f_out)
if __name__ == '__main__':
main()<|fim▁end|>
| |
<|file_name|>gyptest-dirname.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies simple rules when using an explicit build target of 'all'.
"""
import TestGyp
import os
import sys
test = TestGyp.TestGyp(formats=['make', 'ninja', 'android', 'xcode', 'msvs'])
test.run_gyp('actions.gyp', chdir='src')
test.relocate('src', 'relocate/src')
test.build('actions.gyp', chdir='relocate/src')
expect = """\
no dir here
hi c
hello baz
"""
if test.format == 'xcode':
chdir = 'relocate/src/subdir'
else:
chdir = 'relocate/src'
test.run_built_executable('gencc_int_output', chdir=chdir, stdout=expect)
if test.format == 'msvs':
test.run_built_executable('gencc_int_output_external', chdir=chdir,
stdout=expect)
test.must_match('relocate/src/subdir/foo/bar/baz.dirname',
os.path.join('foo', 'bar'))
test.must_match('relocate/src/subdir/a/b/c.dirname',
os.path.join('a', 'b'))
# FIXME the xcode and make generators incorrectly convert RULE_INPUT_PATH<|fim▁hole|> test.must_match('relocate/src/subdir/a/b/c.path',
os.path.join('a', 'b', 'c.printvars'))
test.pass_test()<|fim▁end|>
|
# to an absolute path, making the tests below fail!
if test.format != 'xcode' and test.format != 'make':
test.must_match('relocate/src/subdir/foo/bar/baz.path',
os.path.join('foo', 'bar', 'baz.printvars'))
|
<|file_name|>inventorypage.cpp<|end_file_name|><|fim▁begin|>#include "inventorypage.h"
#include "ui_inventorypage.h"
#include "fak.h"
#include "inventorypage.h"
#include <QtDebug>
#include "QtDebug"
#include <QSqlQuery>
#include <QSqlError>
#include <QSqlRecord>
static const QString path = "C:/Sr.GUI/FAKKIT/db/fakdb4.db";
InventoryPage::InventoryPage(QWidget *parent) :
QDialog(parent),
ui(new Ui::InventoryPage)
{
ui->setupUi(this);
this->setStyleSheet("background-color:#626065;");
<|fim▁hole|> qDebug() << "Stuff in db:";
QSqlQuery query;
query.exec("SELECT * FROM codes");
int idName = query.record().indexOf("name");
while (query.next())
{
QString name = query.value(idName).toString();
qDebug() << "===" << name;
//ui->dbOutput->setPlainText(name);
ui->dbOutput->append(name);
}
}
DbManager::DbManager(const QString &path)
{
m_db = QSqlDatabase::addDatabase("QSQLITE");
m_db.setDatabaseName(path);
if (!m_db.open())
{
qDebug() << "Error: connection with database fail";
}
else
{
qDebug() << "Database: connection ok";
}
}
InventoryPage::~InventoryPage()
{
delete ui;
}
void InventoryPage::on_HomeButton_clicked()
{
}<|fim▁end|>
|
DbManager db(path);
|
<|file_name|>CombatLogParser.ts<|end_file_name|><|fim▁begin|>import {
ArcaneIntellect,
CancelledCasts,
DivertedEnergy,
ElementalBarrier,
FocusMagic,
GroundingSurge,
IreOfTheAscended,
MirrorImage,
RuneOfPower,
ShiftingPower,
SiphonedMalice,
TempestBarrier,
} from '@wowanalyzer/mage';
import CoreCombatLogParser from 'parser/core/CombatLogParser';
import ArcaneTorrent from 'parser/shared/modules/racials/bloodelf/ArcaneTorrent';
import Checklist from './modules/checklist/Module';
import Buffs from './modules/features/Buffs';
//Normalizers
//Features
import Abilities from './modules/features/Abilities';
import AlwaysBeCasting from './modules/features/AlwaysBeCasting';
import CooldownThroughputTracker from './modules/features/CooldownThroughputTracker';
import WintersChill from './modules/features/WintersChill';
import BrainFreeze from './modules/features/BrainFreeze';
import IceLance from './modules/features/IceLance';
import IcyVeins from './modules/features/IcyVeins';
import FrozenOrb from './modules/cooldowns/FrozenOrb';
import ColdSnap from './modules/cooldowns/ColdSnap';
//Talents
import WaterElemental from './modules/features/WaterElemental';
import LonelyWinter from './modules/talents/LonelyWinter';
import SplittingIce from './modules/talents/SplittingIce';
import ThermalVoid from './modules/talents/ThermalVoid';
import GlacialSpike from './modules/talents/GlacialSpike';
import BoneChilling from './modules/talents/BoneChilling';
//Legendaries
import ColdFront from './modules/items/ColdFront';
import GlacialFragments from './modules/items/GlacialFragments';
//Conduits
import IceBite from './modules/items/IceBite';
import IcyPropulsion from './modules/items/IcyPropulsion';
import ShiveringCore from './modules/items/ShiveringCore';
import UnrelentingCold from './modules/items/UnrelentingCold';
class CombatLogParser extends CoreCombatLogParser {
static specModules = {
checklist: Checklist,
buffs: Buffs,
//Normalizers
// Features
abilities: Abilities,
alwaysBeCasting: AlwaysBeCasting,
cancelledCasts: CancelledCasts,
cooldownThroughputTracker: CooldownThroughputTracker,
wintersChill: WintersChill,
brainFreeze: BrainFreeze,
iceLance: IceLance,
icyVeins: IcyVeins,
arcaneIntellect: ArcaneIntellect,
mirrorImage: MirrorImage,
elementalBarrier: ElementalBarrier,
waterElemental: WaterElemental,
// Talents
boneChilling: BoneChilling,
lonelyWinter: LonelyWinter,
focusMagic: FocusMagic,
runeOfPower: RuneOfPower,
splittingIce: SplittingIce,
thermalVoid: ThermalVoid,
glacialSpike: GlacialSpike,
// Cooldowns
frozenOrb: FrozenOrb,
coldSnap: ColdSnap,
//Legendaries
coldFront: ColdFront,
glacialFragments: GlacialFragments,
<|fim▁hole|> //Covenants
shiftingPower: ShiftingPower,
//Conduits
iceBite: IceBite,
icyPropulsion: IcyPropulsion,
shiveringCore: ShiveringCore,
unrelentingCold: UnrelentingCold,
divertedEnergy: DivertedEnergy,
groundingSurge: GroundingSurge,
ireOfTheAscended: IreOfTheAscended,
tempestBarrier: TempestBarrier,
siphonedMalice: SiphonedMalice,
// There's no throughput benefit from casting Arcane Torrent on cooldown
arcaneTorrent: [ArcaneTorrent, { castEfficiency: null }] as const,
};
}
export default CombatLogParser;<|fim▁end|>
| |
<|file_name|>test_versioning.py<|end_file_name|><|fim▁begin|>from nose.tools import * # flake8: noqa
from api.base import settings
from tests.base import ApiTestCase
# The versions below are specifically for testing purposes and do not reflect the actual versioning of the API.
# If changes are made to this list, or to DEFAULT_VERSION, please reflect those changes in:
# api/base/settings/local-travis.py so that travis tests will pass.
TESTING_ALLOWED_VERSIONS = (
'2.0',
'2.0.1',
'2.1',
'2.2',
'3.0',
'3.0.1',
)
DEFAULT_VERSION = '2.0'
<|fim▁hole|> def setUp(self):
super(VersioningTestCase, self).setUp()
self.valid_url_path_version = '2.0'
self.valid_header_version = '2.0.1'
self.valid_query_parameter_version = '2.1'
self.invalid_url_path_version = '1.0'
self.invalid_header_version = '1.0.1'
self.invalid_query_parameter_version = '1.1'
self.valid_url_path_version_url = '/v2/'
self.invalid_url_path_version_url = '/v1/'
self.valid_query_parameter_version_url = '/v2/?version={}'.format(self.valid_query_parameter_version)
self.invalid_query_parameter_version_url = '/v2/?version={}'.format(self.invalid_query_parameter_version)
self._ALLOWED_VERSIONS = settings.REST_FRAMEWORK['ALLOWED_VERSIONS']
self._DEFAULT_VERSION = settings.REST_FRAMEWORK['DEFAULT_VERSION']
settings.REST_FRAMEWORK['ALLOWED_VERSIONS'] = TESTING_ALLOWED_VERSIONS
settings.REST_FRAMEWORK['DEFAULT_VERSION'] = DEFAULT_VERSION
def tearDown(self):
super(VersioningTestCase, self).tearDown()
settings.REST_FRAMEWORK['ALLOWED_VERSIONS'] = self._ALLOWED_VERSIONS
settings.REST_FRAMEWORK['DEFAULT_VERSION'] = self._DEFAULT_VERSION
class TestBaseVersioning(VersioningTestCase):
def setUp(self):
super(TestBaseVersioning, self).setUp()
def test_url_path_version(self):
res = self.app.get(self.valid_url_path_version_url)
assert_equal(res.status_code, 200)
assert_equal(res.json['meta']['version'], self.valid_url_path_version)
def test_header_version(self):
headers = {'accept': 'application/vnd.api+json;version={}'.format(self.valid_header_version)}
res = self.app.get(self.valid_url_path_version_url, headers=headers)
assert_equal(res.status_code, 200)
assert_equal(res.json['meta']['version'], self.valid_header_version)
def test_query_param_version(self):
res = self.app.get(self.valid_query_parameter_version_url)
assert_equal(res.status_code, 200)
assert_equal(res.json['meta']['version'], self.valid_query_parameter_version)
def test_url_path_version_not_in_allowed_versions(self):
res = self.app.get(self.invalid_url_path_version_url, expect_errors=True)
assert_equal(res.status_code, 404)
def test_header_version_not_in_allowed_versions(self):
headers = {'accept': 'application/vnd.api+json;version={}'.format(self.invalid_header_version)}
res = self.app.get(self.valid_url_path_version_url, headers=headers, expect_errors=True)
assert_equal(res.status_code, 406)
assert_equal(res.json['errors'][0]['detail'], 'Invalid version in "Accept" header.')
def test_query_param_version_not_in_allowed_versions(self):
res = self.app.get(self.invalid_query_parameter_version_url, expect_errors=True)
assert_equal(res.status_code, 404)
assert_equal(res.json['errors'][0]['detail'], 'Invalid version in query parameter.')
def test_query_parameter_version_not_within_url_path_major_version(self):
url = '/v2/?version=3.0.1'
res = self.app.get(url, expect_errors=True)
assert_equal(res.status_code, 409)
assert_equal(
res.json['errors'][0]['detail'],
'Version {} specified in query parameter does not fall within URL path version {}'.format(
'3.0.1',
self.valid_url_path_version
)
)
def test_header_version_not_within_url_path_major_version(self):
headers = {'accept': 'application/vnd.api+json;version=3.0.1'}
res = self.app.get(self.valid_url_path_version_url, headers=headers, expect_errors=True)
assert_equal(res.status_code, 409)
assert_equal(
res.json['errors'][0]['detail'],
'Version {} specified in "Accept" header does not fall within URL path version {}'.format(
'3.0.1',
self.valid_url_path_version
)
)
def test_header_version_and_query_parameter_version_match(self):
headers = {'accept': 'application/vnd.api+json;version={}'.format(self.valid_header_version)}
url = '/v2/?version={}'.format(self.valid_header_version)
res = self.app.get(url, headers=headers)
assert_equal(res.status_code, 200)
assert_equal(res.json['meta']['version'], self.valid_header_version)
def test_header_version_and_query_parameter_version_mismatch(self):
headers = {'accept': 'application/vnd.api+json;version={}'.format(self.valid_header_version)}
url = '/v2/?version={}'.format(self.valid_query_parameter_version)
res = self.app.get(url, headers=headers, expect_errors=True)
assert_equal(res.status_code, 409)
assert_equal(
res.json['errors'][0]['detail'],
'Version {} specified in "Accept" header does not match version {} specified in query parameter'.format(
self.valid_header_version,
self.valid_query_parameter_version
)
)
def test_header_version_bad_format(self):
headers = {'accept': 'application/vnd.api+json;version=not_at_all_a_version'}
res = self.app.get(self.valid_url_path_version_url, headers=headers, expect_errors=True)
assert_equal(res.status_code, 406)
assert_equal(res.json['errors'][0]['detail'], 'Invalid version in "Accept" header.')
def test_query_version_bad_format(self):
url = '/v2/?version=not_at_all_a_version'
res = self.app.get(url, expect_errors=True)
assert_equal(res.status_code, 404)
assert_equal(res.json['errors'][0]['detail'], 'Invalid version in query parameter.')<|fim▁end|>
|
class VersioningTestCase(ApiTestCase):
|
<|file_name|>visEdge.js<|end_file_name|><|fim▁begin|>var _ = require('underscore');
var Backbone = require('backbone');
var GRAPHICS = require('../util/constants').GRAPHICS;
var VisBase = require('../visuals/visBase').VisBase;
var VisEdge = VisBase.extend({
defaults: {
tail: null,
head: null,
animationSpeed: GRAPHICS.defaultAnimationTime,
animationEasing: GRAPHICS.defaultEasing
},
validateAtInit: function() {
var required = ['tail', 'head'];
_.each(required, function(key) {
if (!this.get(key)) {
throw new Error(key + ' is required!');
}
}, this);
},
getID: function() {
return this.get('tail').get('id') + '.' + this.get('head').get('id');
},
initialize: function() {
this.validateAtInit();
// shorthand for the main objects
this.gitVisuals = this.get('gitVisuals');
this.gitEngine = this.get('gitEngine');
this.get('tail').get('outgoingEdges').push(this);
},
remove: function() {
this.removeKeys(['path']);
this.gitVisuals.removeVisEdge(this);
},
genSmoothBezierPathString: function(tail, head) {
var tailPos = tail.getScreenCoords();
var headPos = head.getScreenCoords();
return this.genSmoothBezierPathStringFromCoords(tailPos, headPos);
},
genSmoothBezierPathStringFromCoords: function(tailPos, headPos) {
// we need to generate the path and control points for the bezier. format
// is M(move abs) C (curve to) (control point 1) (control point 2) (final point)
// the control points have to be __below__ to get the curve starting off straight.
var coords = function(pos) {
return String(Math.round(pos.x)) + ',' + String(Math.round(pos.y));
};
var offset = function(pos, dir, delta) {
delta = delta || GRAPHICS.curveControlPointOffset;
return {
x: pos.x,
y: pos.y + delta * dir
};
};
var offset2d = function(pos, x, y) {
return {
x: pos.x + x,
y: pos.y + y
};
};
// first offset tail and head by radii
tailPos = offset(tailPos, -1, this.get('tail').getRadius());
headPos = offset(headPos, 1, this.get('head').getRadius());
var str = '';
// first move to bottom of tail
str += 'M' + coords(tailPos) + ' ';
// start bezier
str += 'C';
// then control points above tail and below head
str += coords(offset(tailPos, -1)) + ' ';
str += coords(offset(headPos, 1)) + ' ';
// now finish
str += coords(headPos);
// arrow head
var delta = GRAPHICS.arrowHeadSize || 10;
str += ' L' + coords(offset2d(headPos, -delta, delta));
str += ' L' + coords(offset2d(headPos, delta, delta));
str += ' L' + coords(headPos);
// then go back, so we can fill correctly
str += 'C';
str += coords(offset(headPos, 1)) + ' ';
str += coords(offset(tailPos, -1)) + ' ';
str += coords(tailPos);
return str;
},
getBezierCurve: function() {
return this.genSmoothBezierPathString(this.get('tail'), this.get('head'));<|fim▁hole|> },
getStrokeColor: function() {
return GRAPHICS.visBranchStrokeColorNone;
},
setOpacity: function(opacity) {
opacity = (opacity === undefined) ? 1 : opacity;
this.get('path').attr({opacity: opacity});
},
genGraphics: function(paper) {
var pathString = this.getBezierCurve();
var path = paper.path(pathString).attr({
'stroke-width': GRAPHICS.visBranchStrokeWidth,
'stroke': this.getStrokeColor(),
'stroke-linecap': 'round',
'stroke-linejoin': 'round',
'fill': this.getStrokeColor()
});
path.toBack();
this.set('path', path);
},
getOpacity: function() {
var stat = this.gitVisuals.getCommitUpstreamStatus(this.get('tail'));
var map = {
'branch': 1,
'head': GRAPHICS.edgeUpstreamHeadOpacity,
'none': GRAPHICS.edgeUpstreamNoneOpacity
};
if (map[stat] === undefined) { throw new Error('bad stat'); }
return map[stat];
},
getAttributes: function() {
var newPath = this.getBezierCurve();
var opacity = this.getOpacity();
return {
path: {
path: newPath,
opacity: opacity
}
};
},
animateUpdatedPath: function(speed, easing) {
var attr = this.getAttributes();
this.animateToAttr(attr, speed, easing);
},
animateFromAttrToAttr: function(fromAttr, toAttr, speed, easing) {
// an animation of 0 is essentially setting the attribute directly
this.animateToAttr(fromAttr, 0);
this.animateToAttr(toAttr, speed, easing);
},
animateToAttr: function(attr, speed, easing) {
if (speed === 0) {
this.get('path').attr(attr.path);
return;
}
this.get('path').toBack();
this.get('path').stop().animate(
attr.path,
speed !== undefined ? speed : this.get('animationSpeed'),
easing || this.get('animationEasing')
);
}
});
var VisEdgeCollection = Backbone.Collection.extend({
model: VisEdge
});
exports.VisEdgeCollection = VisEdgeCollection;
exports.VisEdge = VisEdge;<|fim▁end|>
| |
<|file_name|>AmahiModule.java<|end_file_name|><|fim▁begin|>/*
* Copyright (c) 2014 Amahi
*
* This file is part of Amahi.
*
* Amahi is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Amahi is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Amahi. If not, see <http ://www.gnu.org/licenses/>.
*/
package org.amahi.anywhere;
import android.app.Application;
import android.content.Context;
import org.amahi.anywhere.activity.AuthenticationActivity;
import org.amahi.anywhere.activity.NavigationActivity;
import org.amahi.anywhere.activity.ServerAppActivity;
import org.amahi.anywhere.activity.ServerFileAudioActivity;
import org.amahi.anywhere.activity.ServerFileImageActivity;
import org.amahi.anywhere.activity.ServerFileVideoActivity;
import org.amahi.anywhere.activity.ServerFileWebActivity;
import org.amahi.anywhere.activity.ServerFilesActivity;<|fim▁hole|>import org.amahi.anywhere.fragment.ServerFilesFragment;
import org.amahi.anywhere.fragment.ServerSharesFragment;
import org.amahi.anywhere.fragment.SettingsFragment;
import org.amahi.anywhere.server.ApiModule;
import org.amahi.anywhere.service.AudioService;
import org.amahi.anywhere.service.VideoService;
import javax.inject.Singleton;
import dagger.Module;
import dagger.Provides;
/**
* Application dependency injection module. Includes {@link org.amahi.anywhere.server.ApiModule} and
* provides application's {@link android.content.Context} for possible consumers.
*/
@Module(
includes = {
ApiModule.class
},
injects = {
AuthenticationActivity.class,
NavigationActivity.class,
ServerAppActivity.class,
ServerFilesActivity.class,
ServerFileAudioActivity.class,
ServerFileImageActivity.class,
ServerFileVideoActivity.class,
ServerFileWebActivity.class,
NavigationFragment.class,
ServerSharesFragment.class,
ServerAppsFragment.class,
ServerFilesFragment.class,
ServerFileImageFragment.class,
ServerFileDownloadingFragment.class,
SettingsFragment.class,
AudioService.class,
VideoService.class
}
)
class AmahiModule
{
private final Application application;
public AmahiModule(Application application) {
this.application = application;
}
@Provides
@Singleton
Context provideContext() {
return application;
}
}<|fim▁end|>
|
import org.amahi.anywhere.fragment.ServerFileDownloadingFragment;
import org.amahi.anywhere.fragment.NavigationFragment;
import org.amahi.anywhere.fragment.ServerAppsFragment;
import org.amahi.anywhere.fragment.ServerFileImageFragment;
|
<|file_name|>main.js<|end_file_name|><|fim▁begin|>;
(function() {
var app = angular.module('dashboardApp', [
'ngRoute',
'dashboard'<|fim▁hole|>
var dashboard = angular.module('dashboard', []);
dashboard.run(function($rootScope, invocationUtils, stringUtils, api, urls) {
$rootScope.invocationUtils = invocationUtils;
$rootScope.stringUtils = stringUtils;
$rootScope._api = api;
$rootScope._urls = urls;
});
// this is a basis for some perf improvements
// for things that only needs to bind, well, once.
app.directive('bindOnce', function () {
return {
scope: true,
link: function($scope, $element) {
setTimeout(function () {
$scope.$destroy();
$element.removeClass('ng-binding ng-scope');
}, 0);
}
};
});
dashboard.factory('$exceptionHandler', function() {
return function(exception, cause) {
exception.message += ' (caused by "' + cause + '")';
console.log(["CATCH", exception, cause]);
throw exception;
};
});
app.config(['$routeProvider',
function ($routeProvider) {
var defaultHomePage = '/jobs'; //or /functions if not in Azure Web Sites
$routeProvider.
when('/', {
redirectTo: defaultHomePage
}).
when('/jobs', {
templateUrl: 'app/views/JobsList.html',
controller: 'JobsListController'
}).
when('/jobs/triggered/:jobName', {
templateUrl: 'app/views/TriggeredJob.html',
controller: 'TriggeredJobController'
}).
when('/jobs/continuous/:jobName', {
templateUrl: 'app/views/ContinuousJob.html',
controller: 'ContinuousJobController'
}).
when('/jobs/triggered/:jobName/runs/:runId', {
templateUrl: 'app/views/TriggeredJobRun.html',
controller: 'TriggeredJobRunController'
}).
when('/functions', {
templateUrl: 'app/views/FunctionsHome.html',
controller: 'FunctionsHomeController'
}).
when('/functions/definitions/:functionId', {
templateUrl: 'app/views/Function.html',
controller: 'FunctionController'
}).
when('/functions/invocations/:invocationId', {
templateUrl: 'app/views/FunctionInvocation.html',
controller: 'FunctionInvocationController'
}).
when('/about', {
templateUrl: 'app/views/AboutHome.html',
controller: 'AboutController'
}).
when('/diagnostics/indexerLogEntry/:entryId', {
templateUrl: 'app/views/IndexerLogEntry.html',
controller: 'IndexerLogEntryController'
}).
otherwise({
redirectTo: '/'
});
}]);
// simple paging support
app.filter('startFrom', function() {
return function(input, start) {
start = +start; // ensure int
return input.slice(start);
};
});
app.run(function ($rootScope) {
// Initialize errors / warnings
$rootScope.errors = [];
$rootScope.warnings = [];
});
})();<|fim▁end|>
|
]);
|
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|><|fim▁hole|><|fim▁end|>
|
__author__ = 'matjaz'
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.