prompt
large_stringlengths 70
991k
| completion
large_stringlengths 0
1.02k
|
---|---|
<|file_name|>LexException.java<|end_file_name|><|fim▁begin|>package br.com.ceducarneiro.analisadorsintatico;
public class LexException extends Exception {
private char badCh;
private int line, column;
<|fim▁hole|> this.line = line;
this.column = column;
}
@Override
public String toString() {
return String.format("Caractere inesperado: \"%s\" na linha %d coluna %d", badCh != 0 ? badCh : "EOF", line, column);
}
}<|fim▁end|> | public LexException(int line, int column, char ch) {
badCh = ch; |
<|file_name|>test_with_fixture.py<|end_file_name|><|fim▁begin|>"""Tests for `fix.with_fixture`."""
from __future__ import with_statement
import os
import shutil
import tempfile
from types import FunctionType
from fix import with_fixture
def test_exists():
"""`fix.with_fixture` function exists"""
assert isinstance(with_fixture, FunctionType)
def test_setup_only():
"""`setup_only` fixture works as expected"""
def setup_only(context):
"""A fixture with no `teardown()`."""
def setup():
"""Add something to the context."""
assert context == {}
context.squee = "kapow"
return setup
@with_fixture(setup_only)
def case(context):
"""Check that the context has been set up."""
assert context == {"squee": "kapow"}
case() # pylint: disable=E1120
def test_setup_teardown():
"""`setup_teardown` fixture works as expected"""
def setup_teardown(context):
"""A fixture with both `setup()` and `teardown()`."""
def setup():
"""Add something to the context."""
assert context == {}
context.squee = "kapow"
def teardown():
"""Check that `context.squee` has changed."""
assert context == {"squee": "boing"}
return setup, teardown
@with_fixture(setup_teardown)
def case(context):
"""Alter the context."""
assert context == {"squee": "kapow"}
context.squee = "boing"
case() # pylint: disable=E1120
def test_multiple_invocation():
"""`multiple` fixture creates a fresh context each invocation"""
<|fim▁hole|> """A fixture to be invoked multiple times."""
def setup():
"""Add something to the context."""
assert context == {}
context.squee = "kapow"
def teardown():
"""Check that `context.squee` has changed."""
assert context == {"squee": "kapow", "boing": "thunk"}
return setup, teardown
@with_fixture(multiple)
def case(context):
"""Add to the context."""
assert context == {"squee": "kapow"}
context.boing = "thunk"
for _ in range(3):
case() # pylint: disable=E1120
def test_external():
"""`external` fixture interacts as expected with the 'real world'."""
def external(context, files=3):
"""A fixture to manipulate temporary files and directories."""
def setup():
"""Create some temporary files."""
context.temp_dir = tempfile.mkdtemp()
context.filenames = ["file_%03d" % i for i in range(files)]
for filename in context.filenames:
with open(os.path.join(context.temp_dir, filename), "w") as f:
f.write("This is the file %r.\n" % filename)
def teardown():
"""Delete the temporary files created in `setup()`."""
shutil.rmtree(context.temp_dir)
return setup, teardown
@with_fixture(external, files=5)
def check_files(context):
"""Return the number of present and absent files."""
present = 0
absent = 0
for filename in context.filenames:
if os.path.exists(os.path.join(context.temp_dir, filename)):
present += 1
else:
absent += 1
return context.temp_dir, present, absent
temp_dir, present, absent = check_files() # pylint: disable=E1120
assert not os.path.exists(temp_dir)
assert present == 5
assert absent == 0<|fim▁end|> | def multiple(context): |
<|file_name|>iobuf.rs<|end_file_name|><|fim▁begin|>// Copyright 2021 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use std::ops::{Deref, DerefMut};
use data_model::IoBufMut;
/// A trait for describing regions of memory to be used for IO.
///
/// # Safety
///
/// Types that implement this trait _must_ guarantee that the memory regions described by
/// `as_iobufs()` are valid for the lifetime of the borrow.
pub unsafe trait AsIoBufs {
/// Returns a slice describing regions of memory on which to perform some IO. The returned
/// memory region descriptions may be passed on to the OS kernel so implmentations must
/// guarantee that the memory regions are valid for the lifetime of the borrow.
fn as_iobufs(&mut self) -> &[IoBufMut];
}
/// An owned buffer for IO.
///
/// This type will be most commonly used to wrap a `Vec<u8>`.
///
/// # Examples
///
/// ```
/// # async fn do_it() -> anyhow::Result<()> {
/// use cros_async::{Executor, File, OwnedIoBuf};
/// use std::{convert::TryFrom, fs::OpenOptions};
///
/// let urandom = File::open("/dev/urandom")?;
/// let buf = OwnedIoBuf::new(vec![0; 256]);
///
/// let (res, mut buf) = urandom.read_iobuf(buf, None).await;
/// let count = res?;
/// buf.truncate(count);
///
/// let null = OpenOptions::new()
/// .write(true)
/// .open("/dev/null")
/// .map_err(From::from)
/// .and_then(File::try_from)?;
/// while buf.len() > 0 {
/// let (res, data) = null.write_iobuf(buf, None).await;
/// let bytes_written = res?;
/// buf = data;
/// buf.advance(bytes_written);
/// }
/// # Ok(())
/// # }
/// # cros_async::Executor::new().run_until(do_it()).unwrap().unwrap();
/// ```
pub struct OwnedIoBuf {
buf: Box<[u8]>,
// This IoBufMut has a static lifetime because it points to the data owned by `buf` so the
// pointer is always valid for the lifetime of this struct.
iobuf: [IoBufMut<'static>; 1],
}
impl OwnedIoBuf {
/// Create a new owned IO buffer.
pub fn new<B: Into<Box<[u8]>>>(buf: B) -> OwnedIoBuf {
let mut buf = buf.into();
let iobuf = unsafe { IoBufMut::from_raw_parts(buf.as_mut_ptr(), buf.len()) };
OwnedIoBuf {
buf,
iobuf: [iobuf],
}
}
/// Returns the length of the IO buffer.
pub fn len(&self) -> usize {
self.iobuf[0].len()
}
/// Returns true if the length of the buffer is 0.
pub fn is_empty(&self) -> bool {
self.len() == 0
}
/// Advances the beginning of the buffer by `count` bytes.
///
/// Panics if `count > self.len()`.
pub fn advance(&mut self, count: usize) {
self.iobuf[0].advance(count)
}
/// Change the size of the buffer used for IO.
///
/// Has no effect if `len > self.len()`.
pub fn truncate(&mut self, len: usize) {
self.iobuf[0].truncate(len)
}
/// Reset the buffer to its full length.
pub fn reset(&mut self) {
self.iobuf[0] = unsafe { IoBufMut::from_raw_parts(self.buf.as_mut_ptr(), self.buf.len()) };<|fim▁hole|> self.buf.into()
}
}
impl Deref for OwnedIoBuf {
type Target = [u8];
fn deref(&self) -> &Self::Target {
&self.buf
}
}
impl DerefMut for OwnedIoBuf {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.buf
}
}
// Safe because the pointer and length in the returned `&[IoBufMut]` are valid for the lifetime of
// the OwnedIoBuf.
unsafe impl AsIoBufs for OwnedIoBuf {
fn as_iobufs(&mut self) -> &[IoBufMut] {
&self.iobuf
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn len() {
let buf = OwnedIoBuf::new(vec![0xcc; 64]);
assert_eq!(buf.len(), 64);
}
#[test]
fn advance() {
let mut buf = OwnedIoBuf::new(vec![0xcc; 64]);
buf.advance(17);
assert_eq!(buf.len(), 47);
assert_eq!(buf.iobuf[0].as_ptr(), unsafe { buf.buf.as_ptr().add(17) });
buf.advance(9);
assert_eq!(buf.len(), 38);
assert_eq!(buf.iobuf[0].as_ptr(), unsafe { buf.buf.as_ptr().add(26) });
buf.advance(38);
assert_eq!(buf.len(), 0);
assert_eq!(buf.iobuf[0].as_ptr(), unsafe { buf.buf.as_ptr().add(64) });
}
#[test]
fn truncate() {
let mut buf = OwnedIoBuf::new(vec![0xcc; 64]);
buf.truncate(99);
assert_eq!(buf.len(), 64);
buf.truncate(64);
assert_eq!(buf.len(), 64);
buf.truncate(22);
assert_eq!(buf.len(), 22);
buf.truncate(0);
assert_eq!(buf.len(), 0);
}
#[test]
fn reset() {
let mut buf = OwnedIoBuf::new(vec![0xcc; 64]);
buf.truncate(22);
assert_eq!(buf.len(), 22);
assert_eq!(buf.iobuf[0].as_ptr(), buf.buf.as_ptr());
buf.advance(17);
assert_eq!(buf.len(), 5);
assert_eq!(buf.iobuf[0].as_ptr(), unsafe { buf.buf.as_ptr().add(17) });
buf.reset();
assert_eq!(buf.len(), 64);
assert_eq!(buf.iobuf[0].as_ptr(), buf.buf.as_ptr());
}
}<|fim▁end|> | }
/// Convert this `OwnedIoBuf` back into the inner type.
pub fn into_inner<C: From<Box<[u8]>>>(self) -> C { |
<|file_name|>macros.rs<|end_file_name|><|fim▁begin|>// Copyright 2013-2015, The Gtk-rs Project Developers.
// See the COPYRIGHT file at the top-level directory of this distribution.
// Licensed under the MIT license, see the LICENSE file or <http://opensource.org/licenses/MIT>
#![macro_use]
macro_rules! check_pointer(
($tmp_pointer:ident, $gtk_struct:ident) => ( check_pointer!($tmp_pointer, $gtk_struct, G_OBJECT) );
($tmp_pointer:ident, $gtk_struct:ident, $cast_fn:ident) => (
if $tmp_pointer == ::std::ptr::null_mut() {
::std::option::Option::None
} else {
unsafe {
::gobject_ffi::g_object_ref(
::cast::$cast_fn($tmp_pointer as *mut _) as *mut ::libc::c_void);
}
::std::option::Option::Some($gtk_struct {
pointer: $tmp_pointer as *mut _
})
}
);
);
macro_rules! struct_Widget(
($gtk_struct:ident) => (
pub struct $gtk_struct {
pub pointer: *mut ffi::GtkWidget
}
);
);
macro_rules! impl_TraitObject(
($gtk_struct:ident, $ffi_type:ident) => (
impl ::glib::traits::FFIGObject for $gtk_struct {
fn unwrap_gobject(&self) -> *mut ::gobject_ffi::GObject {
self.pointer as *mut ::gobject_ffi::GObject
}
fn wrap_object(object: *mut ::gobject_ffi::GObject) -> $gtk_struct {
unsafe{
::gobject_ffi::g_object_ref(object as *mut ::libc::c_void);
}
$gtk_struct {
pointer: object as *mut ffi::$ffi_type
}
}
}
impl ::GObjectTrait for $gtk_struct {}
);
);
macro_rules! impl_TraitWidget(
($gtk_struct:ident) => (
impl ::FFIWidget for $gtk_struct {
fn unwrap_widget(&self) -> *mut ffi::GtkWidget {
self.pointer
}
fn wrap_widget(widget: *mut ffi::GtkWidget) -> $gtk_struct {
unsafe{
::gobject_ffi::g_object_ref(::ffi::cast_GtkObject(widget) as *mut ::libc::c_void);
}
$gtk_struct {
pointer: widget
}
}
}
impl ::WidgetTrait for $gtk_struct {}
impl ::glib::traits::FFIGObject for $gtk_struct {
fn unwrap_gobject(&self) -> *mut ::gobject_ffi::GObject {
use ::FFIWidget;
::cast::G_OBJECT(self.unwrap_widget())
}
fn wrap_object(object: *mut ::gobject_ffi::GObject) -> $gtk_struct {
unsafe{
::gobject_ffi::g_object_ref(object as *mut ::libc::c_void);
}
$gtk_struct {
pointer: object as *mut ffi::GtkWidget
}
}
}
impl ::GObjectTrait for $gtk_struct {}
);
);
macro_rules! impl_GObjectFunctions(
($gtk_struct:ident, $ffi_type:ident) => (
impl $gtk_struct {
pub fn unwrap_pointer(&self) -> *mut ffi::$ffi_type {
self.pointer<|fim▁hole|>
pub fn wrap_pointer(pointer: *mut ffi::$ffi_type) -> $gtk_struct {
$gtk_struct {
pointer: pointer
}
}
}
)
);
macro_rules! impl_drop(
($gtk_struct:ident) => ( impl_drop!($gtk_struct, GTK_WIDGET); );
($gtk_struct:ident, $cast_func:ident) => (
impl Drop for $gtk_struct {
fn drop(&mut self) {
unsafe {
::gobject_ffi::g_object_unref(self.pointer as *mut ::libc::c_void);
}
}
}
impl Clone for $gtk_struct {
fn clone(&self) -> $gtk_struct {
let pointer = unsafe {
::gobject_ffi::g_object_ref(self.pointer as *mut ::libc::c_void)
};
unsafe {
$gtk_struct {
pointer: ::cast::$cast_func(::std::mem::transmute(pointer))
}
}
}
}
);
);
// Useful for function wich take a valid widget or NULL for a default widget
// takes an option<&trait::Widget> and return the c widget pointer or ptr::null()
macro_rules! unwrap_widget(
($w:ident) => (
match $w {
Some(ref _w) => _w.unwrap_widget(),
None => ::std::ptr::null_mut()
};
);
);<|fim▁end|> | } |
<|file_name|>classglobal.js<|end_file_name|><|fim▁begin|>var classglobal =
[
[ "Buffer", "df/d84/classglobal.html#a77b5ab3a88955255ac7abc7251919fbc", null ],<|fim▁hole|> [ "run", "df/d84/classglobal.html#a917c15ea7d2eafaa6df0aea5868a86e7", null ],
[ "require", "df/d84/classglobal.html#a9b11defd1000737a5b70b50edfcc8156", null ],
[ "GC", "df/d84/classglobal.html#a02a28758a633a7b1493471415c8949ba", null ],
[ "console", "df/d84/classglobal.html#a69cd8fa430cb4536156e8052a37c9303", null ]
];<|fim▁end|> | |
<|file_name|>graph.js<|end_file_name|><|fim▁begin|>/**
* Created by faspert on 28.04.2015.
*/
'use strict';
angular.module('gardenApp')
.directive('gvDygraph', function () {
<|fim▁hole|> //replace: 'true', // replace directive by template in html
template: '<div id="graphdiv"></div>',
link: function (scope, element, attrs) { //DOM manipulation
console.log('testing the stuff');
var g = new Dygraph(element.children()[0], [[0,0]], {
title: 'Temperature / Humidite',
});
scope.$watch("data", function () {
console.log('scope changes');
var options = scope.options;
if (options === undefined) {
options = {};
}
//do not update if data is empty
if (scope.data.length != 0)
options.file = scope.data;
console.log(scope.data)
g.updateOptions(options);
g.resetZoom();
g.resize();
}, true);
}
}
});<|fim▁end|> | return {
restrict: 'EAC', //E = element, A = attribute, C = class, M = comment
scope: true , //use parent's scope
|
<|file_name|>authenticator_data.cc<|end_file_name|><|fim▁begin|>// Copyright 2017 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "device/fido/authenticator_data.h"
#include <utility>
<|fim▁hole|>#include "components/device_event_log/device_event_log.h"
#include "device/fido/attested_credential_data.h"
#include "device/fido/fido_parsing_utils.h"
namespace device {
namespace {
constexpr size_t kAttestedCredentialDataOffset =
kRpIdHashLength + kFlagsLength + kSignCounterLength;
} // namespace
// static
base::Optional<AuthenticatorData> AuthenticatorData::DecodeAuthenticatorData(
base::span<const uint8_t> auth_data) {
if (auth_data.size() < kAttestedCredentialDataOffset)
return base::nullopt;
auto application_parameter = auth_data.first<kRpIdHashLength>();
uint8_t flag_byte = auth_data[kRpIdHashLength];
auto counter =
auth_data.subspan<kRpIdHashLength + kFlagsLength, kSignCounterLength>();
auth_data = auth_data.subspan(kAttestedCredentialDataOffset);
base::Optional<AttestedCredentialData> attested_credential_data;
if (flag_byte & static_cast<uint8_t>(Flag::kAttestation)) {
auto maybe_result =
AttestedCredentialData::ConsumeFromCtapResponse(auth_data);
if (!maybe_result) {
return base::nullopt;
}
std::tie(attested_credential_data, auth_data) = std::move(*maybe_result);
}
base::Optional<cbor::Value> extensions;
if (flag_byte & static_cast<uint8_t>(Flag::kExtensionDataIncluded)) {
cbor::Reader::DecoderError error;
extensions = cbor::Reader::Read(auth_data, &error);
if (!extensions) {
FIDO_LOG(ERROR)
<< "CBOR decoding of authenticator data extensions failed ("
<< cbor::Reader::ErrorCodeToString(error) << ") from "
<< base::HexEncode(auth_data.data(), auth_data.size());
return base::nullopt;
}
if (!extensions->is_map()) {
FIDO_LOG(ERROR)
<< "Incorrect CBOR structure of authenticator data extensions: "
<< cbor::DiagnosticWriter::Write(*extensions);
return base::nullopt;
}
} else if (!auth_data.empty()) {
return base::nullopt;
}
return AuthenticatorData(application_parameter, flag_byte, counter,
std::move(attested_credential_data),
std::move(extensions));
}
AuthenticatorData::AuthenticatorData(
base::span<const uint8_t, kRpIdHashLength> application_parameter,
uint8_t flags,
base::span<const uint8_t, kSignCounterLength> counter,
base::Optional<AttestedCredentialData> data,
base::Optional<cbor::Value> extensions)
: application_parameter_(
fido_parsing_utils::Materialize(application_parameter)),
flags_(flags),
counter_(fido_parsing_utils::Materialize(counter)),
attested_data_(std::move(data)),
extensions_(std::move(extensions)) {
DCHECK(!extensions_ || extensions_->is_map());
DCHECK_EQ((flags_ & static_cast<uint8_t>(Flag::kExtensionDataIncluded)) != 0,
!!extensions_);
DCHECK_EQ(((flags_ & static_cast<uint8_t>(Flag::kAttestation)) != 0),
!!attested_data_);
}
AuthenticatorData::AuthenticatorData(AuthenticatorData&& other) = default;
AuthenticatorData& AuthenticatorData::operator=(AuthenticatorData&& other) =
default;
AuthenticatorData::~AuthenticatorData() = default;
void AuthenticatorData::DeleteDeviceAaguid() {
if (!attested_data_)
return;
attested_data_->DeleteAaguid();
}
std::vector<uint8_t> AuthenticatorData::SerializeToByteArray() const {
std::vector<uint8_t> authenticator_data;
fido_parsing_utils::Append(&authenticator_data, application_parameter_);
authenticator_data.insert(authenticator_data.end(), flags_);
fido_parsing_utils::Append(&authenticator_data, counter_);
if (attested_data_) {
// Attestations are returned in registration responses but not in assertion
// responses.
fido_parsing_utils::Append(&authenticator_data,
attested_data_->SerializeAsBytes());
}
if (extensions_) {
const auto maybe_extensions = cbor::Writer::Write(*extensions_);
if (maybe_extensions) {
fido_parsing_utils::Append(&authenticator_data, *maybe_extensions);
}
}
return authenticator_data;
}
std::vector<uint8_t> AuthenticatorData::GetCredentialId() const {
if (!attested_data_)
return std::vector<uint8_t>();
return attested_data_->credential_id();
}
} // namespace device<|fim▁end|> | #include "base/strings/string_number_conversions.h"
#include "components/cbor/diagnostic_writer.h"
#include "components/cbor/reader.h"
#include "components/cbor/writer.h" |
<|file_name|>emu.config.js<|end_file_name|><|fim▁begin|><|fim▁hole|> $routeProvider
.when('/emu',{
templateUrl: 'views/emu.html',
controller: 'emuController',
controllerAs: 'emu'
});
});
})();<|fim▁end|> | ;(function(){
'use strict';
angular.module('TTT')
.config(function($routeProvider){ |
<|file_name|>tests.py<|end_file_name|><|fim▁begin|># -*- encoding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Unit tests for pyspark.sql; additional tests are implemented as doctests in
individual modules.
"""
import os
import sys
import subprocess
import pydoc
import shutil
import tempfile
import pickle
import functools
import time
import datetime
import array
import ctypes
import py4j
try:
import xmlrunner
except ImportError:
xmlrunner = None
if sys.version_info[:2] <= (2, 6):
try:
import unittest2 as unittest
except ImportError:
sys.stderr.write('Please install unittest2 to test with Python 2.6 or earlier')
sys.exit(1)
else:
import unittest
_have_pandas = False
try:
import pandas
_have_pandas = True
except:
# No Pandas, but that's okay, we'll skip those tests
pass
from pyspark import SparkContext
from pyspark.sql import SparkSession, SQLContext, HiveContext, Column, Row
from pyspark.sql.types import *
from pyspark.sql.types import UserDefinedType, _infer_type, _make_type_verifier
from pyspark.sql.types import _array_signed_int_typecode_ctype_mappings, _array_type_mappings
from pyspark.sql.types import _array_unsigned_int_typecode_ctype_mappings
from pyspark.tests import QuietTest, ReusedPySparkTestCase, SparkSubmitTests
from pyspark.sql.functions import UserDefinedFunction, sha2, lit
from pyspark.sql.window import Window
from pyspark.sql.utils import AnalysisException, ParseException, IllegalArgumentException
_have_arrow = False
try:
import pyarrow
_have_arrow = True
except:
# No Arrow, but that's okay, we'll skip those tests
pass
class UTCOffsetTimezone(datetime.tzinfo):
"""
Specifies timezone in UTC offset
"""
def __init__(self, offset=0):
self.ZERO = datetime.timedelta(hours=offset)
def utcoffset(self, dt):
return self.ZERO
def dst(self, dt):
return self.ZERO
class ExamplePointUDT(UserDefinedType):
"""
User-defined type (UDT) for ExamplePoint.
"""
@classmethod
def sqlType(self):
return ArrayType(DoubleType(), False)
@classmethod
def module(cls):
return 'pyspark.sql.tests'
@classmethod
def scalaUDT(cls):
return 'org.apache.spark.sql.test.ExamplePointUDT'
def serialize(self, obj):
return [obj.x, obj.y]
<|fim▁hole|>
class ExamplePoint:
"""
An example class to demonstrate UDT in Scala, Java, and Python.
"""
__UDT__ = ExamplePointUDT()
def __init__(self, x, y):
self.x = x
self.y = y
def __repr__(self):
return "ExamplePoint(%s,%s)" % (self.x, self.y)
def __str__(self):
return "(%s,%s)" % (self.x, self.y)
def __eq__(self, other):
return isinstance(other, self.__class__) and \
other.x == self.x and other.y == self.y
class PythonOnlyUDT(UserDefinedType):
"""
User-defined type (UDT) for ExamplePoint.
"""
@classmethod
def sqlType(self):
return ArrayType(DoubleType(), False)
@classmethod
def module(cls):
return '__main__'
def serialize(self, obj):
return [obj.x, obj.y]
def deserialize(self, datum):
return PythonOnlyPoint(datum[0], datum[1])
@staticmethod
def foo():
pass
@property
def props(self):
return {}
class PythonOnlyPoint(ExamplePoint):
"""
An example class to demonstrate UDT in only Python
"""
__UDT__ = PythonOnlyUDT()
class MyObject(object):
def __init__(self, key, value):
self.key = key
self.value = value
class DataTypeTests(unittest.TestCase):
# regression test for SPARK-6055
def test_data_type_eq(self):
lt = LongType()
lt2 = pickle.loads(pickle.dumps(LongType()))
self.assertEqual(lt, lt2)
# regression test for SPARK-7978
def test_decimal_type(self):
t1 = DecimalType()
t2 = DecimalType(10, 2)
self.assertTrue(t2 is not t1)
self.assertNotEqual(t1, t2)
t3 = DecimalType(8)
self.assertNotEqual(t2, t3)
# regression test for SPARK-10392
def test_datetype_equal_zero(self):
dt = DateType()
self.assertEqual(dt.fromInternal(0), datetime.date(1970, 1, 1))
# regression test for SPARK-17035
def test_timestamp_microsecond(self):
tst = TimestampType()
self.assertEqual(tst.toInternal(datetime.datetime.max) % 1000000, 999999)
def test_empty_row(self):
row = Row()
self.assertEqual(len(row), 0)
class SQLTests(ReusedPySparkTestCase):
@classmethod
def setUpClass(cls):
ReusedPySparkTestCase.setUpClass()
cls.tempdir = tempfile.NamedTemporaryFile(delete=False)
os.unlink(cls.tempdir.name)
cls.spark = SparkSession(cls.sc)
cls.testData = [Row(key=i, value=str(i)) for i in range(100)]
cls.df = cls.spark.createDataFrame(cls.testData)
@classmethod
def tearDownClass(cls):
ReusedPySparkTestCase.tearDownClass()
cls.spark.stop()
shutil.rmtree(cls.tempdir.name, ignore_errors=True)
def test_sqlcontext_reuses_sparksession(self):
sqlContext1 = SQLContext(self.sc)
sqlContext2 = SQLContext(self.sc)
self.assertTrue(sqlContext1.sparkSession is sqlContext2.sparkSession)
def tearDown(self):
super(SQLTests, self).tearDown()
# tear down test_bucketed_write state
self.spark.sql("DROP TABLE IF EXISTS pyspark_bucket")
def test_row_should_be_read_only(self):
row = Row(a=1, b=2)
self.assertEqual(1, row.a)
def foo():
row.a = 3
self.assertRaises(Exception, foo)
row2 = self.spark.range(10).first()
self.assertEqual(0, row2.id)
def foo2():
row2.id = 2
self.assertRaises(Exception, foo2)
def test_range(self):
self.assertEqual(self.spark.range(1, 1).count(), 0)
self.assertEqual(self.spark.range(1, 0, -1).count(), 1)
self.assertEqual(self.spark.range(0, 1 << 40, 1 << 39).count(), 2)
self.assertEqual(self.spark.range(-2).count(), 0)
self.assertEqual(self.spark.range(3).count(), 3)
def test_duplicated_column_names(self):
df = self.spark.createDataFrame([(1, 2)], ["c", "c"])
row = df.select('*').first()
self.assertEqual(1, row[0])
self.assertEqual(2, row[1])
self.assertEqual("Row(c=1, c=2)", str(row))
# Cannot access columns
self.assertRaises(AnalysisException, lambda: df.select(df[0]).first())
self.assertRaises(AnalysisException, lambda: df.select(df.c).first())
self.assertRaises(AnalysisException, lambda: df.select(df["c"]).first())
def test_column_name_encoding(self):
"""Ensure that created columns has `str` type consistently."""
columns = self.spark.createDataFrame([('Alice', 1)], ['name', u'age']).columns
self.assertEqual(columns, ['name', 'age'])
self.assertTrue(isinstance(columns[0], str))
self.assertTrue(isinstance(columns[1], str))
def test_explode(self):
from pyspark.sql.functions import explode, explode_outer, posexplode_outer
d = [
Row(a=1, intlist=[1, 2, 3], mapfield={"a": "b"}),
Row(a=1, intlist=[], mapfield={}),
Row(a=1, intlist=None, mapfield=None),
]
rdd = self.sc.parallelize(d)
data = self.spark.createDataFrame(rdd)
result = data.select(explode(data.intlist).alias("a")).select("a").collect()
self.assertEqual(result[0][0], 1)
self.assertEqual(result[1][0], 2)
self.assertEqual(result[2][0], 3)
result = data.select(explode(data.mapfield).alias("a", "b")).select("a", "b").collect()
self.assertEqual(result[0][0], "a")
self.assertEqual(result[0][1], "b")
result = [tuple(x) for x in data.select(posexplode_outer("intlist")).collect()]
self.assertEqual(result, [(0, 1), (1, 2), (2, 3), (None, None), (None, None)])
result = [tuple(x) for x in data.select(posexplode_outer("mapfield")).collect()]
self.assertEqual(result, [(0, 'a', 'b'), (None, None, None), (None, None, None)])
result = [x[0] for x in data.select(explode_outer("intlist")).collect()]
self.assertEqual(result, [1, 2, 3, None, None])
result = [tuple(x) for x in data.select(explode_outer("mapfield")).collect()]
self.assertEqual(result, [('a', 'b'), (None, None), (None, None)])
def test_and_in_expression(self):
self.assertEqual(4, self.df.filter((self.df.key <= 10) & (self.df.value <= "2")).count())
self.assertRaises(ValueError, lambda: (self.df.key <= 10) and (self.df.value <= "2"))
self.assertEqual(14, self.df.filter((self.df.key <= 3) | (self.df.value < "2")).count())
self.assertRaises(ValueError, lambda: self.df.key <= 3 or self.df.value < "2")
self.assertEqual(99, self.df.filter(~(self.df.key == 1)).count())
self.assertRaises(ValueError, lambda: not self.df.key == 1)
def test_udf_with_callable(self):
d = [Row(number=i, squared=i**2) for i in range(10)]
rdd = self.sc.parallelize(d)
data = self.spark.createDataFrame(rdd)
class PlusFour:
def __call__(self, col):
if col is not None:
return col + 4
call = PlusFour()
pudf = UserDefinedFunction(call, LongType())
res = data.select(pudf(data['number']).alias('plus_four'))
self.assertEqual(res.agg({'plus_four': 'sum'}).collect()[0][0], 85)
def test_udf_with_partial_function(self):
d = [Row(number=i, squared=i**2) for i in range(10)]
rdd = self.sc.parallelize(d)
data = self.spark.createDataFrame(rdd)
def some_func(col, param):
if col is not None:
return col + param
pfunc = functools.partial(some_func, param=4)
pudf = UserDefinedFunction(pfunc, LongType())
res = data.select(pudf(data['number']).alias('plus_four'))
self.assertEqual(res.agg({'plus_four': 'sum'}).collect()[0][0], 85)
def test_udf(self):
self.spark.catalog.registerFunction("twoArgs", lambda x, y: len(x) + y, IntegerType())
[row] = self.spark.sql("SELECT twoArgs('test', 1)").collect()
self.assertEqual(row[0], 5)
def test_udf2(self):
self.spark.catalog.registerFunction("strlen", lambda string: len(string), IntegerType())
self.spark.createDataFrame(self.sc.parallelize([Row(a="test")]))\
.createOrReplaceTempView("test")
[res] = self.spark.sql("SELECT strlen(a) FROM test WHERE strlen(a) > 1").collect()
self.assertEqual(4, res[0])
def test_chained_udf(self):
self.spark.catalog.registerFunction("double", lambda x: x + x, IntegerType())
[row] = self.spark.sql("SELECT double(1)").collect()
self.assertEqual(row[0], 2)
[row] = self.spark.sql("SELECT double(double(1))").collect()
self.assertEqual(row[0], 4)
[row] = self.spark.sql("SELECT double(double(1) + 1)").collect()
self.assertEqual(row[0], 6)
def test_single_udf_with_repeated_argument(self):
# regression test for SPARK-20685
self.spark.catalog.registerFunction("add", lambda x, y: x + y, IntegerType())
row = self.spark.sql("SELECT add(1, 1)").first()
self.assertEqual(tuple(row), (2, ))
def test_multiple_udfs(self):
self.spark.catalog.registerFunction("double", lambda x: x * 2, IntegerType())
[row] = self.spark.sql("SELECT double(1), double(2)").collect()
self.assertEqual(tuple(row), (2, 4))
[row] = self.spark.sql("SELECT double(double(1)), double(double(2) + 2)").collect()
self.assertEqual(tuple(row), (4, 12))
self.spark.catalog.registerFunction("add", lambda x, y: x + y, IntegerType())
[row] = self.spark.sql("SELECT double(add(1, 2)), add(double(2), 1)").collect()
self.assertEqual(tuple(row), (6, 5))
def test_udf_in_filter_on_top_of_outer_join(self):
from pyspark.sql.functions import udf
left = self.spark.createDataFrame([Row(a=1)])
right = self.spark.createDataFrame([Row(a=1)])
df = left.join(right, on='a', how='left_outer')
df = df.withColumn('b', udf(lambda x: 'x')(df.a))
self.assertEqual(df.filter('b = "x"').collect(), [Row(a=1, b='x')])
def test_udf_in_filter_on_top_of_join(self):
# regression test for SPARK-18589
from pyspark.sql.functions import udf
left = self.spark.createDataFrame([Row(a=1)])
right = self.spark.createDataFrame([Row(b=1)])
f = udf(lambda a, b: a == b, BooleanType())
df = left.crossJoin(right).filter(f("a", "b"))
self.assertEqual(df.collect(), [Row(a=1, b=1)])
def test_udf_without_arguments(self):
self.spark.catalog.registerFunction("foo", lambda: "bar")
[row] = self.spark.sql("SELECT foo()").collect()
self.assertEqual(row[0], "bar")
def test_udf_with_array_type(self):
d = [Row(l=list(range(3)), d={"key": list(range(5))})]
rdd = self.sc.parallelize(d)
self.spark.createDataFrame(rdd).createOrReplaceTempView("test")
self.spark.catalog.registerFunction("copylist", lambda l: list(l), ArrayType(IntegerType()))
self.spark.catalog.registerFunction("maplen", lambda d: len(d), IntegerType())
[(l1, l2)] = self.spark.sql("select copylist(l), maplen(d) from test").collect()
self.assertEqual(list(range(3)), l1)
self.assertEqual(1, l2)
def test_broadcast_in_udf(self):
bar = {"a": "aa", "b": "bb", "c": "abc"}
foo = self.sc.broadcast(bar)
self.spark.catalog.registerFunction("MYUDF", lambda x: foo.value[x] if x else '')
[res] = self.spark.sql("SELECT MYUDF('c')").collect()
self.assertEqual("abc", res[0])
[res] = self.spark.sql("SELECT MYUDF('')").collect()
self.assertEqual("", res[0])
def test_udf_with_filter_function(self):
df = self.spark.createDataFrame([(1, "1"), (2, "2"), (1, "2"), (1, "2")], ["key", "value"])
from pyspark.sql.functions import udf, col
from pyspark.sql.types import BooleanType
my_filter = udf(lambda a: a < 2, BooleanType())
sel = df.select(col("key"), col("value")).filter((my_filter(col("key"))) & (df.value < "2"))
self.assertEqual(sel.collect(), [Row(key=1, value='1')])
def test_udf_with_aggregate_function(self):
df = self.spark.createDataFrame([(1, "1"), (2, "2"), (1, "2"), (1, "2")], ["key", "value"])
from pyspark.sql.functions import udf, col, sum
from pyspark.sql.types import BooleanType
my_filter = udf(lambda a: a == 1, BooleanType())
sel = df.select(col("key")).distinct().filter(my_filter(col("key")))
self.assertEqual(sel.collect(), [Row(key=1)])
my_copy = udf(lambda x: x, IntegerType())
my_add = udf(lambda a, b: int(a + b), IntegerType())
my_strlen = udf(lambda x: len(x), IntegerType())
sel = df.groupBy(my_copy(col("key")).alias("k"))\
.agg(sum(my_strlen(col("value"))).alias("s"))\
.select(my_add(col("k"), col("s")).alias("t"))
self.assertEqual(sel.collect(), [Row(t=4), Row(t=3)])
def test_udf_in_generate(self):
from pyspark.sql.functions import udf, explode
df = self.spark.range(5)
f = udf(lambda x: list(range(x)), ArrayType(LongType()))
row = df.select(explode(f(*df))).groupBy().sum().first()
self.assertEqual(row[0], 10)
df = self.spark.range(3)
res = df.select("id", explode(f(df.id))).collect()
self.assertEqual(res[0][0], 1)
self.assertEqual(res[0][1], 0)
self.assertEqual(res[1][0], 2)
self.assertEqual(res[1][1], 0)
self.assertEqual(res[2][0], 2)
self.assertEqual(res[2][1], 1)
range_udf = udf(lambda value: list(range(value - 1, value + 1)), ArrayType(IntegerType()))
res = df.select("id", explode(range_udf(df.id))).collect()
self.assertEqual(res[0][0], 0)
self.assertEqual(res[0][1], -1)
self.assertEqual(res[1][0], 0)
self.assertEqual(res[1][1], 0)
self.assertEqual(res[2][0], 1)
self.assertEqual(res[2][1], 0)
self.assertEqual(res[3][0], 1)
self.assertEqual(res[3][1], 1)
def test_udf_with_order_by_and_limit(self):
from pyspark.sql.functions import udf
my_copy = udf(lambda x: x, IntegerType())
df = self.spark.range(10).orderBy("id")
res = df.select(df.id, my_copy(df.id).alias("copy")).limit(1)
res.explain(True)
self.assertEqual(res.collect(), [Row(id=0, copy=0)])
def test_udf_registration_returns_udf(self):
df = self.spark.range(10)
add_three = self.spark.udf.register("add_three", lambda x: x + 3, IntegerType())
self.assertListEqual(
df.selectExpr("add_three(id) AS plus_three").collect(),
df.select(add_three("id").alias("plus_three")).collect()
)
def test_non_existed_udf(self):
spark = self.spark
self.assertRaisesRegexp(AnalysisException, "Can not load class non_existed_udf",
lambda: spark.udf.registerJavaFunction("udf1", "non_existed_udf"))
def test_non_existed_udaf(self):
spark = self.spark
self.assertRaisesRegexp(AnalysisException, "Can not load class non_existed_udaf",
lambda: spark.udf.registerJavaUDAF("udaf1", "non_existed_udaf"))
def test_multiLine_json(self):
people1 = self.spark.read.json("python/test_support/sql/people.json")
people_array = self.spark.read.json("python/test_support/sql/people_array.json",
multiLine=True)
self.assertEqual(people1.collect(), people_array.collect())
def test_multiline_csv(self):
ages_newlines = self.spark.read.csv(
"python/test_support/sql/ages_newlines.csv", multiLine=True)
expected = [Row(_c0=u'Joe', _c1=u'20', _c2=u'Hi,\nI am Jeo'),
Row(_c0=u'Tom', _c1=u'30', _c2=u'My name is Tom'),
Row(_c0=u'Hyukjin', _c1=u'25', _c2=u'I am Hyukjin\n\nI love Spark!')]
self.assertEqual(ages_newlines.collect(), expected)
def test_ignorewhitespace_csv(self):
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
self.spark.createDataFrame([[" a", "b ", " c "]]).write.csv(
tmpPath,
ignoreLeadingWhiteSpace=False,
ignoreTrailingWhiteSpace=False)
expected = [Row(value=u' a,b , c ')]
readback = self.spark.read.text(tmpPath)
self.assertEqual(readback.collect(), expected)
shutil.rmtree(tmpPath)
def test_read_multiple_orc_file(self):
df = self.spark.read.orc(["python/test_support/sql/orc_partitioned/b=0/c=0",
"python/test_support/sql/orc_partitioned/b=1/c=1"])
self.assertEqual(2, df.count())
def test_udf_with_input_file_name(self):
from pyspark.sql.functions import udf, input_file_name
from pyspark.sql.types import StringType
sourceFile = udf(lambda path: path, StringType())
filePath = "python/test_support/sql/people1.json"
row = self.spark.read.json(filePath).select(sourceFile(input_file_name())).first()
self.assertTrue(row[0].find("people1.json") != -1)
def test_udf_with_input_file_name_for_hadooprdd(self):
from pyspark.sql.functions import udf, input_file_name
from pyspark.sql.types import StringType
def filename(path):
return path
sameText = udf(filename, StringType())
rdd = self.sc.textFile('python/test_support/sql/people.json')
df = self.spark.read.json(rdd).select(input_file_name().alias('file'))
row = df.select(sameText(df['file'])).first()
self.assertTrue(row[0].find("people.json") != -1)
rdd2 = self.sc.newAPIHadoopFile(
'python/test_support/sql/people.json',
'org.apache.hadoop.mapreduce.lib.input.TextInputFormat',
'org.apache.hadoop.io.LongWritable',
'org.apache.hadoop.io.Text')
df2 = self.spark.read.json(rdd2).select(input_file_name().alias('file'))
row2 = df2.select(sameText(df2['file'])).first()
self.assertTrue(row2[0].find("people.json") != -1)
def test_udf_defers_judf_initalization(self):
# This is separate of UDFInitializationTests
# to avoid context initialization
# when udf is called
from pyspark.sql.functions import UserDefinedFunction
f = UserDefinedFunction(lambda x: x, StringType())
self.assertIsNone(
f._judf_placeholder,
"judf should not be initialized before the first call."
)
self.assertIsInstance(f("foo"), Column, "UDF call should return a Column.")
self.assertIsNotNone(
f._judf_placeholder,
"judf should be initialized after UDF has been called."
)
def test_udf_with_string_return_type(self):
from pyspark.sql.functions import UserDefinedFunction
add_one = UserDefinedFunction(lambda x: x + 1, "integer")
make_pair = UserDefinedFunction(lambda x: (-x, x), "struct<x:integer,y:integer>")
make_array = UserDefinedFunction(
lambda x: [float(x) for x in range(x, x + 3)], "array<double>")
expected = (2, Row(x=-1, y=1), [1.0, 2.0, 3.0])
actual = (self.spark.range(1, 2).toDF("x")
.select(add_one("x"), make_pair("x"), make_array("x"))
.first())
self.assertTupleEqual(expected, actual)
def test_udf_shouldnt_accept_noncallable_object(self):
from pyspark.sql.functions import UserDefinedFunction
from pyspark.sql.types import StringType
non_callable = None
self.assertRaises(TypeError, UserDefinedFunction, non_callable, StringType())
def test_udf_with_decorator(self):
from pyspark.sql.functions import lit, udf
from pyspark.sql.types import IntegerType, DoubleType
@udf(IntegerType())
def add_one(x):
if x is not None:
return x + 1
@udf(returnType=DoubleType())
def add_two(x):
if x is not None:
return float(x + 2)
@udf
def to_upper(x):
if x is not None:
return x.upper()
@udf()
def to_lower(x):
if x is not None:
return x.lower()
@udf
def substr(x, start, end):
if x is not None:
return x[start:end]
@udf("long")
def trunc(x):
return int(x)
@udf(returnType="double")
def as_double(x):
return float(x)
df = (
self.spark
.createDataFrame(
[(1, "Foo", "foobar", 3.0)], ("one", "Foo", "foobar", "float"))
.select(
add_one("one"), add_two("one"),
to_upper("Foo"), to_lower("Foo"),
substr("foobar", lit(0), lit(3)),
trunc("float"), as_double("one")))
self.assertListEqual(
[tpe for _, tpe in df.dtypes],
["int", "double", "string", "string", "string", "bigint", "double"]
)
self.assertListEqual(
list(df.first()),
[2, 3.0, "FOO", "foo", "foo", 3, 1.0]
)
def test_udf_wrapper(self):
from pyspark.sql.functions import udf
from pyspark.sql.types import IntegerType
def f(x):
"""Identity"""
return x
return_type = IntegerType()
f_ = udf(f, return_type)
self.assertTrue(f.__doc__ in f_.__doc__)
self.assertEqual(f, f_.func)
self.assertEqual(return_type, f_.returnType)
class F(object):
"""Identity"""
def __call__(self, x):
return x
f = F()
return_type = IntegerType()
f_ = udf(f, return_type)
self.assertTrue(f.__doc__ in f_.__doc__)
self.assertEqual(f, f_.func)
self.assertEqual(return_type, f_.returnType)
f = functools.partial(f, x=1)
return_type = IntegerType()
f_ = udf(f, return_type)
self.assertTrue(f.__doc__ in f_.__doc__)
self.assertEqual(f, f_.func)
self.assertEqual(return_type, f_.returnType)
def test_basic_functions(self):
rdd = self.sc.parallelize(['{"foo":"bar"}', '{"foo":"baz"}'])
df = self.spark.read.json(rdd)
df.count()
df.collect()
df.schema
# cache and checkpoint
self.assertFalse(df.is_cached)
df.persist()
df.unpersist(True)
df.cache()
self.assertTrue(df.is_cached)
self.assertEqual(2, df.count())
df.createOrReplaceTempView("temp")
df = self.spark.sql("select foo from temp")
df.count()
df.collect()
def test_apply_schema_to_row(self):
df = self.spark.read.json(self.sc.parallelize(["""{"a":2}"""]))
df2 = self.spark.createDataFrame(df.rdd.map(lambda x: x), df.schema)
self.assertEqual(df.collect(), df2.collect())
rdd = self.sc.parallelize(range(10)).map(lambda x: Row(a=x))
df3 = self.spark.createDataFrame(rdd, df.schema)
self.assertEqual(10, df3.count())
def test_infer_schema_to_local(self):
input = [{"a": 1}, {"b": "coffee"}]
rdd = self.sc.parallelize(input)
df = self.spark.createDataFrame(input)
df2 = self.spark.createDataFrame(rdd, samplingRatio=1.0)
self.assertEqual(df.schema, df2.schema)
rdd = self.sc.parallelize(range(10)).map(lambda x: Row(a=x, b=None))
df3 = self.spark.createDataFrame(rdd, df.schema)
self.assertEqual(10, df3.count())
def test_apply_schema_to_dict_and_rows(self):
schema = StructType().add("b", StringType()).add("a", IntegerType())
input = [{"a": 1}, {"b": "coffee"}]
rdd = self.sc.parallelize(input)
for verify in [False, True]:
df = self.spark.createDataFrame(input, schema, verifySchema=verify)
df2 = self.spark.createDataFrame(rdd, schema, verifySchema=verify)
self.assertEqual(df.schema, df2.schema)
rdd = self.sc.parallelize(range(10)).map(lambda x: Row(a=x, b=None))
df3 = self.spark.createDataFrame(rdd, schema, verifySchema=verify)
self.assertEqual(10, df3.count())
input = [Row(a=x, b=str(x)) for x in range(10)]
df4 = self.spark.createDataFrame(input, schema, verifySchema=verify)
self.assertEqual(10, df4.count())
def test_create_dataframe_schema_mismatch(self):
input = [Row(a=1)]
rdd = self.sc.parallelize(range(3)).map(lambda i: Row(a=i))
schema = StructType([StructField("a", IntegerType()), StructField("b", StringType())])
df = self.spark.createDataFrame(rdd, schema)
self.assertRaises(Exception, lambda: df.show())
def test_serialize_nested_array_and_map(self):
d = [Row(l=[Row(a=1, b='s')], d={"key": Row(c=1.0, d="2")})]
rdd = self.sc.parallelize(d)
df = self.spark.createDataFrame(rdd)
row = df.head()
self.assertEqual(1, len(row.l))
self.assertEqual(1, row.l[0].a)
self.assertEqual("2", row.d["key"].d)
l = df.rdd.map(lambda x: x.l).first()
self.assertEqual(1, len(l))
self.assertEqual('s', l[0].b)
d = df.rdd.map(lambda x: x.d).first()
self.assertEqual(1, len(d))
self.assertEqual(1.0, d["key"].c)
row = df.rdd.map(lambda x: x.d["key"]).first()
self.assertEqual(1.0, row.c)
self.assertEqual("2", row.d)
def test_infer_schema(self):
d = [Row(l=[], d={}, s=None),
Row(l=[Row(a=1, b='s')], d={"key": Row(c=1.0, d="2")}, s="")]
rdd = self.sc.parallelize(d)
df = self.spark.createDataFrame(rdd)
self.assertEqual([], df.rdd.map(lambda r: r.l).first())
self.assertEqual([None, ""], df.rdd.map(lambda r: r.s).collect())
df.createOrReplaceTempView("test")
result = self.spark.sql("SELECT l[0].a from test where d['key'].d = '2'")
self.assertEqual(1, result.head()[0])
df2 = self.spark.createDataFrame(rdd, samplingRatio=1.0)
self.assertEqual(df.schema, df2.schema)
self.assertEqual({}, df2.rdd.map(lambda r: r.d).first())
self.assertEqual([None, ""], df2.rdd.map(lambda r: r.s).collect())
df2.createOrReplaceTempView("test2")
result = self.spark.sql("SELECT l[0].a from test2 where d['key'].d = '2'")
self.assertEqual(1, result.head()[0])
def test_infer_nested_schema(self):
NestedRow = Row("f1", "f2")
nestedRdd1 = self.sc.parallelize([NestedRow([1, 2], {"row1": 1.0}),
NestedRow([2, 3], {"row2": 2.0})])
df = self.spark.createDataFrame(nestedRdd1)
self.assertEqual(Row(f1=[1, 2], f2={u'row1': 1.0}), df.collect()[0])
nestedRdd2 = self.sc.parallelize([NestedRow([[1, 2], [2, 3]], [1, 2]),
NestedRow([[2, 3], [3, 4]], [2, 3])])
df = self.spark.createDataFrame(nestedRdd2)
self.assertEqual(Row(f1=[[1, 2], [2, 3]], f2=[1, 2]), df.collect()[0])
from collections import namedtuple
CustomRow = namedtuple('CustomRow', 'field1 field2')
rdd = self.sc.parallelize([CustomRow(field1=1, field2="row1"),
CustomRow(field1=2, field2="row2"),
CustomRow(field1=3, field2="row3")])
df = self.spark.createDataFrame(rdd)
self.assertEqual(Row(field1=1, field2=u'row1'), df.first())
def test_create_dataframe_from_objects(self):
data = [MyObject(1, "1"), MyObject(2, "2")]
df = self.spark.createDataFrame(data)
self.assertEqual(df.dtypes, [("key", "bigint"), ("value", "string")])
self.assertEqual(df.first(), Row(key=1, value="1"))
def test_select_null_literal(self):
df = self.spark.sql("select null as col")
self.assertEqual(Row(col=None), df.first())
def test_apply_schema(self):
from datetime import date, datetime
rdd = self.sc.parallelize([(127, -128, -32768, 32767, 2147483647, 1.0,
date(2010, 1, 1), datetime(2010, 1, 1, 1, 1, 1),
{"a": 1}, (2,), [1, 2, 3], None)])
schema = StructType([
StructField("byte1", ByteType(), False),
StructField("byte2", ByteType(), False),
StructField("short1", ShortType(), False),
StructField("short2", ShortType(), False),
StructField("int1", IntegerType(), False),
StructField("float1", FloatType(), False),
StructField("date1", DateType(), False),
StructField("time1", TimestampType(), False),
StructField("map1", MapType(StringType(), IntegerType(), False), False),
StructField("struct1", StructType([StructField("b", ShortType(), False)]), False),
StructField("list1", ArrayType(ByteType(), False), False),
StructField("null1", DoubleType(), True)])
df = self.spark.createDataFrame(rdd, schema)
results = df.rdd.map(lambda x: (x.byte1, x.byte2, x.short1, x.short2, x.int1, x.float1,
x.date1, x.time1, x.map1["a"], x.struct1.b, x.list1, x.null1))
r = (127, -128, -32768, 32767, 2147483647, 1.0, date(2010, 1, 1),
datetime(2010, 1, 1, 1, 1, 1), 1, 2, [1, 2, 3], None)
self.assertEqual(r, results.first())
df.createOrReplaceTempView("table2")
r = self.spark.sql("SELECT byte1 - 1 AS byte1, byte2 + 1 AS byte2, " +
"short1 + 1 AS short1, short2 - 1 AS short2, int1 - 1 AS int1, " +
"float1 + 1.5 as float1 FROM table2").first()
self.assertEqual((126, -127, -32767, 32766, 2147483646, 2.5), tuple(r))
from pyspark.sql.types import _parse_schema_abstract, _infer_schema_type
rdd = self.sc.parallelize([(127, -32768, 1.0, datetime(2010, 1, 1, 1, 1, 1),
{"a": 1}, (2,), [1, 2, 3])])
abstract = "byte1 short1 float1 time1 map1{} struct1(b) list1[]"
schema = _parse_schema_abstract(abstract)
typedSchema = _infer_schema_type(rdd.first(), schema)
df = self.spark.createDataFrame(rdd, typedSchema)
r = (127, -32768, 1.0, datetime(2010, 1, 1, 1, 1, 1), {"a": 1}, Row(b=2), [1, 2, 3])
self.assertEqual(r, tuple(df.first()))
def test_struct_in_map(self):
d = [Row(m={Row(i=1): Row(s="")})]
df = self.sc.parallelize(d).toDF()
k, v = list(df.head().m.items())[0]
self.assertEqual(1, k.i)
self.assertEqual("", v.s)
def test_convert_row_to_dict(self):
row = Row(l=[Row(a=1, b='s')], d={"key": Row(c=1.0, d="2")})
self.assertEqual(1, row.asDict()['l'][0].a)
df = self.sc.parallelize([row]).toDF()
df.createOrReplaceTempView("test")
row = self.spark.sql("select l, d from test").head()
self.assertEqual(1, row.asDict()["l"][0].a)
self.assertEqual(1.0, row.asDict()['d']['key'].c)
def test_udt(self):
from pyspark.sql.types import _parse_datatype_json_string, _infer_type, _make_type_verifier
from pyspark.sql.tests import ExamplePointUDT, ExamplePoint
def check_datatype(datatype):
pickled = pickle.loads(pickle.dumps(datatype))
assert datatype == pickled
scala_datatype = self.spark._jsparkSession.parseDataType(datatype.json())
python_datatype = _parse_datatype_json_string(scala_datatype.json())
assert datatype == python_datatype
check_datatype(ExamplePointUDT())
structtype_with_udt = StructType([StructField("label", DoubleType(), False),
StructField("point", ExamplePointUDT(), False)])
check_datatype(structtype_with_udt)
p = ExamplePoint(1.0, 2.0)
self.assertEqual(_infer_type(p), ExamplePointUDT())
_make_type_verifier(ExamplePointUDT())(ExamplePoint(1.0, 2.0))
self.assertRaises(ValueError, lambda: _make_type_verifier(ExamplePointUDT())([1.0, 2.0]))
check_datatype(PythonOnlyUDT())
structtype_with_udt = StructType([StructField("label", DoubleType(), False),
StructField("point", PythonOnlyUDT(), False)])
check_datatype(structtype_with_udt)
p = PythonOnlyPoint(1.0, 2.0)
self.assertEqual(_infer_type(p), PythonOnlyUDT())
_make_type_verifier(PythonOnlyUDT())(PythonOnlyPoint(1.0, 2.0))
self.assertRaises(
ValueError,
lambda: _make_type_verifier(PythonOnlyUDT())([1.0, 2.0]))
def test_simple_udt_in_df(self):
schema = StructType().add("key", LongType()).add("val", PythonOnlyUDT())
df = self.spark.createDataFrame(
[(i % 3, PythonOnlyPoint(float(i), float(i))) for i in range(10)],
schema=schema)
df.show()
def test_nested_udt_in_df(self):
schema = StructType().add("key", LongType()).add("val", ArrayType(PythonOnlyUDT()))
df = self.spark.createDataFrame(
[(i % 3, [PythonOnlyPoint(float(i), float(i))]) for i in range(10)],
schema=schema)
df.collect()
schema = StructType().add("key", LongType()).add("val",
MapType(LongType(), PythonOnlyUDT()))
df = self.spark.createDataFrame(
[(i % 3, {i % 3: PythonOnlyPoint(float(i + 1), float(i + 1))}) for i in range(10)],
schema=schema)
df.collect()
def test_complex_nested_udt_in_df(self):
from pyspark.sql.functions import udf
schema = StructType().add("key", LongType()).add("val", PythonOnlyUDT())
df = self.spark.createDataFrame(
[(i % 3, PythonOnlyPoint(float(i), float(i))) for i in range(10)],
schema=schema)
df.collect()
gd = df.groupby("key").agg({"val": "collect_list"})
gd.collect()
udf = udf(lambda k, v: [(k, v[0])], ArrayType(df.schema))
gd.select(udf(*gd)).collect()
def test_udt_with_none(self):
df = self.spark.range(0, 10, 1, 1)
def myudf(x):
if x > 0:
return PythonOnlyPoint(float(x), float(x))
self.spark.catalog.registerFunction("udf", myudf, PythonOnlyUDT())
rows = [r[0] for r in df.selectExpr("udf(id)").take(2)]
self.assertEqual(rows, [None, PythonOnlyPoint(1, 1)])
def test_infer_schema_with_udt(self):
from pyspark.sql.tests import ExamplePoint, ExamplePointUDT
row = Row(label=1.0, point=ExamplePoint(1.0, 2.0))
df = self.spark.createDataFrame([row])
schema = df.schema
field = [f for f in schema.fields if f.name == "point"][0]
self.assertEqual(type(field.dataType), ExamplePointUDT)
df.createOrReplaceTempView("labeled_point")
point = self.spark.sql("SELECT point FROM labeled_point").head().point
self.assertEqual(point, ExamplePoint(1.0, 2.0))
row = Row(label=1.0, point=PythonOnlyPoint(1.0, 2.0))
df = self.spark.createDataFrame([row])
schema = df.schema
field = [f for f in schema.fields if f.name == "point"][0]
self.assertEqual(type(field.dataType), PythonOnlyUDT)
df.createOrReplaceTempView("labeled_point")
point = self.spark.sql("SELECT point FROM labeled_point").head().point
self.assertEqual(point, PythonOnlyPoint(1.0, 2.0))
def test_apply_schema_with_udt(self):
from pyspark.sql.tests import ExamplePoint, ExamplePointUDT
row = (1.0, ExamplePoint(1.0, 2.0))
schema = StructType([StructField("label", DoubleType(), False),
StructField("point", ExamplePointUDT(), False)])
df = self.spark.createDataFrame([row], schema)
point = df.head().point
self.assertEqual(point, ExamplePoint(1.0, 2.0))
row = (1.0, PythonOnlyPoint(1.0, 2.0))
schema = StructType([StructField("label", DoubleType(), False),
StructField("point", PythonOnlyUDT(), False)])
df = self.spark.createDataFrame([row], schema)
point = df.head().point
self.assertEqual(point, PythonOnlyPoint(1.0, 2.0))
def test_udf_with_udt(self):
from pyspark.sql.tests import ExamplePoint, ExamplePointUDT
row = Row(label=1.0, point=ExamplePoint(1.0, 2.0))
df = self.spark.createDataFrame([row])
self.assertEqual(1.0, df.rdd.map(lambda r: r.point.x).first())
udf = UserDefinedFunction(lambda p: p.y, DoubleType())
self.assertEqual(2.0, df.select(udf(df.point)).first()[0])
udf2 = UserDefinedFunction(lambda p: ExamplePoint(p.x + 1, p.y + 1), ExamplePointUDT())
self.assertEqual(ExamplePoint(2.0, 3.0), df.select(udf2(df.point)).first()[0])
row = Row(label=1.0, point=PythonOnlyPoint(1.0, 2.0))
df = self.spark.createDataFrame([row])
self.assertEqual(1.0, df.rdd.map(lambda r: r.point.x).first())
udf = UserDefinedFunction(lambda p: p.y, DoubleType())
self.assertEqual(2.0, df.select(udf(df.point)).first()[0])
udf2 = UserDefinedFunction(lambda p: PythonOnlyPoint(p.x + 1, p.y + 1), PythonOnlyUDT())
self.assertEqual(PythonOnlyPoint(2.0, 3.0), df.select(udf2(df.point)).first()[0])
def test_parquet_with_udt(self):
from pyspark.sql.tests import ExamplePoint, ExamplePointUDT
row = Row(label=1.0, point=ExamplePoint(1.0, 2.0))
df0 = self.spark.createDataFrame([row])
output_dir = os.path.join(self.tempdir.name, "labeled_point")
df0.write.parquet(output_dir)
df1 = self.spark.read.parquet(output_dir)
point = df1.head().point
self.assertEqual(point, ExamplePoint(1.0, 2.0))
row = Row(label=1.0, point=PythonOnlyPoint(1.0, 2.0))
df0 = self.spark.createDataFrame([row])
df0.write.parquet(output_dir, mode='overwrite')
df1 = self.spark.read.parquet(output_dir)
point = df1.head().point
self.assertEqual(point, PythonOnlyPoint(1.0, 2.0))
def test_union_with_udt(self):
from pyspark.sql.tests import ExamplePoint, ExamplePointUDT
row1 = (1.0, ExamplePoint(1.0, 2.0))
row2 = (2.0, ExamplePoint(3.0, 4.0))
schema = StructType([StructField("label", DoubleType(), False),
StructField("point", ExamplePointUDT(), False)])
df1 = self.spark.createDataFrame([row1], schema)
df2 = self.spark.createDataFrame([row2], schema)
result = df1.union(df2).orderBy("label").collect()
self.assertEqual(
result,
[
Row(label=1.0, point=ExamplePoint(1.0, 2.0)),
Row(label=2.0, point=ExamplePoint(3.0, 4.0))
]
)
def test_column_operators(self):
ci = self.df.key
cs = self.df.value
c = ci == cs
self.assertTrue(isinstance((- ci - 1 - 2) % 3 * 2.5 / 3.5, Column))
rcc = (1 + ci), (1 - ci), (1 * ci), (1 / ci), (1 % ci), (1 ** ci), (ci ** 1)
self.assertTrue(all(isinstance(c, Column) for c in rcc))
cb = [ci == 5, ci != 0, ci > 3, ci < 4, ci >= 0, ci <= 7]
self.assertTrue(all(isinstance(c, Column) for c in cb))
cbool = (ci & ci), (ci | ci), (~ci)
self.assertTrue(all(isinstance(c, Column) for c in cbool))
css = cs.contains('a'), cs.like('a'), cs.rlike('a'), cs.asc(), cs.desc(),\
cs.startswith('a'), cs.endswith('a'), ci.eqNullSafe(cs)
self.assertTrue(all(isinstance(c, Column) for c in css))
self.assertTrue(isinstance(ci.cast(LongType()), Column))
self.assertRaisesRegexp(ValueError,
"Cannot apply 'in' operator against a column",
lambda: 1 in cs)
def test_column_getitem(self):
from pyspark.sql.functions import col
self.assertIsInstance(col("foo")[1:3], Column)
self.assertIsInstance(col("foo")[0], Column)
self.assertIsInstance(col("foo")["bar"], Column)
self.assertRaises(ValueError, lambda: col("foo")[0:10:2])
def test_column_select(self):
df = self.df
self.assertEqual(self.testData, df.select("*").collect())
self.assertEqual(self.testData, df.select(df.key, df.value).collect())
self.assertEqual([Row(value='1')], df.where(df.key == 1).select(df.value).collect())
def test_freqItems(self):
vals = [Row(a=1, b=-2.0) if i % 2 == 0 else Row(a=i, b=i * 1.0) for i in range(100)]
df = self.sc.parallelize(vals).toDF()
items = df.stat.freqItems(("a", "b"), 0.4).collect()[0]
self.assertTrue(1 in items[0])
self.assertTrue(-2.0 in items[1])
def test_aggregator(self):
df = self.df
g = df.groupBy()
self.assertEqual([99, 100], sorted(g.agg({'key': 'max', 'value': 'count'}).collect()[0]))
self.assertEqual([Row(**{"AVG(key#0)": 49.5})], g.mean().collect())
from pyspark.sql import functions
self.assertEqual((0, u'99'),
tuple(g.agg(functions.first(df.key), functions.last(df.value)).first()))
self.assertTrue(95 < g.agg(functions.approxCountDistinct(df.key)).first()[0])
self.assertEqual(100, g.agg(functions.countDistinct(df.value)).first()[0])
def test_first_last_ignorenulls(self):
from pyspark.sql import functions
df = self.spark.range(0, 100)
df2 = df.select(functions.when(df.id % 3 == 0, None).otherwise(df.id).alias("id"))
df3 = df2.select(functions.first(df2.id, False).alias('a'),
functions.first(df2.id, True).alias('b'),
functions.last(df2.id, False).alias('c'),
functions.last(df2.id, True).alias('d'))
self.assertEqual([Row(a=None, b=1, c=None, d=98)], df3.collect())
def test_approxQuantile(self):
df = self.sc.parallelize([Row(a=i, b=i+10) for i in range(10)]).toDF()
aq = df.stat.approxQuantile("a", [0.1, 0.5, 0.9], 0.1)
self.assertTrue(isinstance(aq, list))
self.assertEqual(len(aq), 3)
self.assertTrue(all(isinstance(q, float) for q in aq))
aqs = df.stat.approxQuantile(["a", "b"], [0.1, 0.5, 0.9], 0.1)
self.assertTrue(isinstance(aqs, list))
self.assertEqual(len(aqs), 2)
self.assertTrue(isinstance(aqs[0], list))
self.assertEqual(len(aqs[0]), 3)
self.assertTrue(all(isinstance(q, float) for q in aqs[0]))
self.assertTrue(isinstance(aqs[1], list))
self.assertEqual(len(aqs[1]), 3)
self.assertTrue(all(isinstance(q, float) for q in aqs[1]))
aqt = df.stat.approxQuantile(("a", "b"), [0.1, 0.5, 0.9], 0.1)
self.assertTrue(isinstance(aqt, list))
self.assertEqual(len(aqt), 2)
self.assertTrue(isinstance(aqt[0], list))
self.assertEqual(len(aqt[0]), 3)
self.assertTrue(all(isinstance(q, float) for q in aqt[0]))
self.assertTrue(isinstance(aqt[1], list))
self.assertEqual(len(aqt[1]), 3)
self.assertTrue(all(isinstance(q, float) for q in aqt[1]))
self.assertRaises(ValueError, lambda: df.stat.approxQuantile(123, [0.1, 0.9], 0.1))
self.assertRaises(ValueError, lambda: df.stat.approxQuantile(("a", 123), [0.1, 0.9], 0.1))
self.assertRaises(ValueError, lambda: df.stat.approxQuantile(["a", 123], [0.1, 0.9], 0.1))
def test_corr(self):
import math
df = self.sc.parallelize([Row(a=i, b=math.sqrt(i)) for i in range(10)]).toDF()
corr = df.stat.corr("a", "b")
self.assertTrue(abs(corr - 0.95734012) < 1e-6)
def test_cov(self):
df = self.sc.parallelize([Row(a=i, b=2 * i) for i in range(10)]).toDF()
cov = df.stat.cov("a", "b")
self.assertTrue(abs(cov - 55.0 / 3) < 1e-6)
def test_crosstab(self):
df = self.sc.parallelize([Row(a=i % 3, b=i % 2) for i in range(1, 7)]).toDF()
ct = df.stat.crosstab("a", "b").collect()
ct = sorted(ct, key=lambda x: x[0])
for i, row in enumerate(ct):
self.assertEqual(row[0], str(i))
self.assertTrue(row[1], 1)
self.assertTrue(row[2], 1)
def test_math_functions(self):
df = self.sc.parallelize([Row(a=i, b=2 * i) for i in range(10)]).toDF()
from pyspark.sql import functions
import math
def get_values(l):
return [j[0] for j in l]
def assert_close(a, b):
c = get_values(b)
diff = [abs(v - c[k]) < 1e-6 for k, v in enumerate(a)]
return sum(diff) == len(a)
assert_close([math.cos(i) for i in range(10)],
df.select(functions.cos(df.a)).collect())
assert_close([math.cos(i) for i in range(10)],
df.select(functions.cos("a")).collect())
assert_close([math.sin(i) for i in range(10)],
df.select(functions.sin(df.a)).collect())
assert_close([math.sin(i) for i in range(10)],
df.select(functions.sin(df['a'])).collect())
assert_close([math.pow(i, 2 * i) for i in range(10)],
df.select(functions.pow(df.a, df.b)).collect())
assert_close([math.pow(i, 2) for i in range(10)],
df.select(functions.pow(df.a, 2)).collect())
assert_close([math.pow(i, 2) for i in range(10)],
df.select(functions.pow(df.a, 2.0)).collect())
assert_close([math.hypot(i, 2 * i) for i in range(10)],
df.select(functions.hypot(df.a, df.b)).collect())
def test_rand_functions(self):
df = self.df
from pyspark.sql import functions
rnd = df.select('key', functions.rand()).collect()
for row in rnd:
assert row[1] >= 0.0 and row[1] <= 1.0, "got: %s" % row[1]
rndn = df.select('key', functions.randn(5)).collect()
for row in rndn:
assert row[1] >= -4.0 and row[1] <= 4.0, "got: %s" % row[1]
# If the specified seed is 0, we should use it.
# https://issues.apache.org/jira/browse/SPARK-9691
rnd1 = df.select('key', functions.rand(0)).collect()
rnd2 = df.select('key', functions.rand(0)).collect()
self.assertEqual(sorted(rnd1), sorted(rnd2))
rndn1 = df.select('key', functions.randn(0)).collect()
rndn2 = df.select('key', functions.randn(0)).collect()
self.assertEqual(sorted(rndn1), sorted(rndn2))
def test_array_contains_function(self):
from pyspark.sql.functions import array_contains
df = self.spark.createDataFrame([(["1", "2", "3"],), ([],)], ['data'])
actual = df.select(array_contains(df.data, 1).alias('b')).collect()
# The value argument can be implicitly castable to the element's type of the array.
self.assertEqual([Row(b=True), Row(b=False)], actual)
def test_between_function(self):
df = self.sc.parallelize([
Row(a=1, b=2, c=3),
Row(a=2, b=1, c=3),
Row(a=4, b=1, c=4)]).toDF()
self.assertEqual([Row(a=2, b=1, c=3), Row(a=4, b=1, c=4)],
df.filter(df.a.between(df.b, df.c)).collect())
def test_struct_type(self):
from pyspark.sql.types import StructType, StringType, StructField
struct1 = StructType().add("f1", StringType(), True).add("f2", StringType(), True, None)
struct2 = StructType([StructField("f1", StringType(), True),
StructField("f2", StringType(), True, None)])
self.assertEqual(struct1.fieldNames(), struct2.names)
self.assertEqual(struct1, struct2)
struct1 = StructType().add("f1", StringType(), True).add("f2", StringType(), True, None)
struct2 = StructType([StructField("f1", StringType(), True)])
self.assertNotEqual(struct1.fieldNames(), struct2.names)
self.assertNotEqual(struct1, struct2)
struct1 = (StructType().add(StructField("f1", StringType(), True))
.add(StructField("f2", StringType(), True, None)))
struct2 = StructType([StructField("f1", StringType(), True),
StructField("f2", StringType(), True, None)])
self.assertEqual(struct1.fieldNames(), struct2.names)
self.assertEqual(struct1, struct2)
struct1 = (StructType().add(StructField("f1", StringType(), True))
.add(StructField("f2", StringType(), True, None)))
struct2 = StructType([StructField("f1", StringType(), True)])
self.assertNotEqual(struct1.fieldNames(), struct2.names)
self.assertNotEqual(struct1, struct2)
# Catch exception raised during improper construction
self.assertRaises(ValueError, lambda: StructType().add("name"))
struct1 = StructType().add("f1", StringType(), True).add("f2", StringType(), True, None)
for field in struct1:
self.assertIsInstance(field, StructField)
struct1 = StructType().add("f1", StringType(), True).add("f2", StringType(), True, None)
self.assertEqual(len(struct1), 2)
struct1 = StructType().add("f1", StringType(), True).add("f2", StringType(), True, None)
self.assertIs(struct1["f1"], struct1.fields[0])
self.assertIs(struct1[0], struct1.fields[0])
self.assertEqual(struct1[0:1], StructType(struct1.fields[0:1]))
self.assertRaises(KeyError, lambda: struct1["f9"])
self.assertRaises(IndexError, lambda: struct1[9])
self.assertRaises(TypeError, lambda: struct1[9.9])
def test_parse_datatype_string(self):
from pyspark.sql.types import _all_atomic_types, _parse_datatype_string
for k, t in _all_atomic_types.items():
if t != NullType:
self.assertEqual(t(), _parse_datatype_string(k))
self.assertEqual(IntegerType(), _parse_datatype_string("int"))
self.assertEqual(DecimalType(1, 1), _parse_datatype_string("decimal(1 ,1)"))
self.assertEqual(DecimalType(10, 1), _parse_datatype_string("decimal( 10,1 )"))
self.assertEqual(DecimalType(11, 1), _parse_datatype_string("decimal(11,1)"))
self.assertEqual(
ArrayType(IntegerType()),
_parse_datatype_string("array<int >"))
self.assertEqual(
MapType(IntegerType(), DoubleType()),
_parse_datatype_string("map< int, double >"))
self.assertEqual(
StructType([StructField("a", IntegerType()), StructField("c", DoubleType())]),
_parse_datatype_string("struct<a:int, c:double >"))
self.assertEqual(
StructType([StructField("a", IntegerType()), StructField("c", DoubleType())]),
_parse_datatype_string("a:int, c:double"))
self.assertEqual(
StructType([StructField("a", IntegerType()), StructField("c", DoubleType())]),
_parse_datatype_string("a INT, c DOUBLE"))
def test_metadata_null(self):
from pyspark.sql.types import StructType, StringType, StructField
schema = StructType([StructField("f1", StringType(), True, None),
StructField("f2", StringType(), True, {'a': None})])
rdd = self.sc.parallelize([["a", "b"], ["c", "d"]])
self.spark.createDataFrame(rdd, schema)
def test_save_and_load(self):
df = self.df
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
df.write.json(tmpPath)
actual = self.spark.read.json(tmpPath)
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
schema = StructType([StructField("value", StringType(), True)])
actual = self.spark.read.json(tmpPath, schema)
self.assertEqual(sorted(df.select("value").collect()), sorted(actual.collect()))
df.write.json(tmpPath, "overwrite")
actual = self.spark.read.json(tmpPath)
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
df.write.save(format="json", mode="overwrite", path=tmpPath,
noUse="this options will not be used in save.")
actual = self.spark.read.load(format="json", path=tmpPath,
noUse="this options will not be used in load.")
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
defaultDataSourceName = self.spark.conf.get("spark.sql.sources.default",
"org.apache.spark.sql.parquet")
self.spark.sql("SET spark.sql.sources.default=org.apache.spark.sql.json")
actual = self.spark.read.load(path=tmpPath)
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
self.spark.sql("SET spark.sql.sources.default=" + defaultDataSourceName)
csvpath = os.path.join(tempfile.mkdtemp(), 'data')
df.write.option('quote', None).format('csv').save(csvpath)
shutil.rmtree(tmpPath)
def test_save_and_load_builder(self):
df = self.df
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
df.write.json(tmpPath)
actual = self.spark.read.json(tmpPath)
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
schema = StructType([StructField("value", StringType(), True)])
actual = self.spark.read.json(tmpPath, schema)
self.assertEqual(sorted(df.select("value").collect()), sorted(actual.collect()))
df.write.mode("overwrite").json(tmpPath)
actual = self.spark.read.json(tmpPath)
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
df.write.mode("overwrite").options(noUse="this options will not be used in save.")\
.option("noUse", "this option will not be used in save.")\
.format("json").save(path=tmpPath)
actual =\
self.spark.read.format("json")\
.load(path=tmpPath, noUse="this options will not be used in load.")
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
defaultDataSourceName = self.spark.conf.get("spark.sql.sources.default",
"org.apache.spark.sql.parquet")
self.spark.sql("SET spark.sql.sources.default=org.apache.spark.sql.json")
actual = self.spark.read.load(path=tmpPath)
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
self.spark.sql("SET spark.sql.sources.default=" + defaultDataSourceName)
shutil.rmtree(tmpPath)
def test_stream_trigger(self):
df = self.spark.readStream.format('text').load('python/test_support/sql/streaming')
# Should take at least one arg
try:
df.writeStream.trigger()
except ValueError:
pass
# Should not take multiple args
try:
df.writeStream.trigger(once=True, processingTime='5 seconds')
except ValueError:
pass
# Should take only keyword args
try:
df.writeStream.trigger('5 seconds')
self.fail("Should have thrown an exception")
except TypeError:
pass
def test_stream_read_options(self):
schema = StructType([StructField("data", StringType(), False)])
df = self.spark.readStream\
.format('text')\
.option('path', 'python/test_support/sql/streaming')\
.schema(schema)\
.load()
self.assertTrue(df.isStreaming)
self.assertEqual(df.schema.simpleString(), "struct<data:string>")
def test_stream_read_options_overwrite(self):
bad_schema = StructType([StructField("test", IntegerType(), False)])
schema = StructType([StructField("data", StringType(), False)])
df = self.spark.readStream.format('csv').option('path', 'python/test_support/sql/fake') \
.schema(bad_schema)\
.load(path='python/test_support/sql/streaming', schema=schema, format='text')
self.assertTrue(df.isStreaming)
self.assertEqual(df.schema.simpleString(), "struct<data:string>")
def test_stream_save_options(self):
df = self.spark.readStream.format('text').load('python/test_support/sql/streaming') \
.withColumn('id', lit(1))
for q in self.spark._wrapped.streams.active:
q.stop()
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
self.assertTrue(df.isStreaming)
out = os.path.join(tmpPath, 'out')
chk = os.path.join(tmpPath, 'chk')
q = df.writeStream.option('checkpointLocation', chk).queryName('this_query') \
.format('parquet').partitionBy('id').outputMode('append').option('path', out).start()
try:
self.assertEqual(q.name, 'this_query')
self.assertTrue(q.isActive)
q.processAllAvailable()
output_files = []
for _, _, files in os.walk(out):
output_files.extend([f for f in files if not f.startswith('.')])
self.assertTrue(len(output_files) > 0)
self.assertTrue(len(os.listdir(chk)) > 0)
finally:
q.stop()
shutil.rmtree(tmpPath)
def test_stream_save_options_overwrite(self):
df = self.spark.readStream.format('text').load('python/test_support/sql/streaming')
for q in self.spark._wrapped.streams.active:
q.stop()
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
self.assertTrue(df.isStreaming)
out = os.path.join(tmpPath, 'out')
chk = os.path.join(tmpPath, 'chk')
fake1 = os.path.join(tmpPath, 'fake1')
fake2 = os.path.join(tmpPath, 'fake2')
q = df.writeStream.option('checkpointLocation', fake1)\
.format('memory').option('path', fake2) \
.queryName('fake_query').outputMode('append') \
.start(path=out, format='parquet', queryName='this_query', checkpointLocation=chk)
try:
self.assertEqual(q.name, 'this_query')
self.assertTrue(q.isActive)
q.processAllAvailable()
output_files = []
for _, _, files in os.walk(out):
output_files.extend([f for f in files if not f.startswith('.')])
self.assertTrue(len(output_files) > 0)
self.assertTrue(len(os.listdir(chk)) > 0)
self.assertFalse(os.path.isdir(fake1)) # should not have been created
self.assertFalse(os.path.isdir(fake2)) # should not have been created
finally:
q.stop()
shutil.rmtree(tmpPath)
def test_stream_status_and_progress(self):
df = self.spark.readStream.format('text').load('python/test_support/sql/streaming')
for q in self.spark._wrapped.streams.active:
q.stop()
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
self.assertTrue(df.isStreaming)
out = os.path.join(tmpPath, 'out')
chk = os.path.join(tmpPath, 'chk')
def func(x):
time.sleep(1)
return x
from pyspark.sql.functions import col, udf
sleep_udf = udf(func)
# Use "sleep_udf" to delay the progress update so that we can test `lastProgress` when there
# were no updates.
q = df.select(sleep_udf(col("value")).alias('value')).writeStream \
.start(path=out, format='parquet', queryName='this_query', checkpointLocation=chk)
try:
# "lastProgress" will return None in most cases. However, as it may be flaky when
# Jenkins is very slow, we don't assert it. If there is something wrong, "lastProgress"
# may throw error with a high chance and make this test flaky, so we should still be
# able to detect broken codes.
q.lastProgress
q.processAllAvailable()
lastProgress = q.lastProgress
recentProgress = q.recentProgress
status = q.status
self.assertEqual(lastProgress['name'], q.name)
self.assertEqual(lastProgress['id'], q.id)
self.assertTrue(any(p == lastProgress for p in recentProgress))
self.assertTrue(
"message" in status and
"isDataAvailable" in status and
"isTriggerActive" in status)
finally:
q.stop()
shutil.rmtree(tmpPath)
def test_stream_await_termination(self):
df = self.spark.readStream.format('text').load('python/test_support/sql/streaming')
for q in self.spark._wrapped.streams.active:
q.stop()
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
self.assertTrue(df.isStreaming)
out = os.path.join(tmpPath, 'out')
chk = os.path.join(tmpPath, 'chk')
q = df.writeStream\
.start(path=out, format='parquet', queryName='this_query', checkpointLocation=chk)
try:
self.assertTrue(q.isActive)
try:
q.awaitTermination("hello")
self.fail("Expected a value exception")
except ValueError:
pass
now = time.time()
# test should take at least 2 seconds
res = q.awaitTermination(2.6)
duration = time.time() - now
self.assertTrue(duration >= 2)
self.assertFalse(res)
finally:
q.stop()
shutil.rmtree(tmpPath)
def test_stream_exception(self):
sdf = self.spark.readStream.format('text').load('python/test_support/sql/streaming')
sq = sdf.writeStream.format('memory').queryName('query_explain').start()
try:
sq.processAllAvailable()
self.assertEqual(sq.exception(), None)
finally:
sq.stop()
from pyspark.sql.functions import col, udf
from pyspark.sql.utils import StreamingQueryException
bad_udf = udf(lambda x: 1 / 0)
sq = sdf.select(bad_udf(col("value")))\
.writeStream\
.format('memory')\
.queryName('this_query')\
.start()
try:
# Process some data to fail the query
sq.processAllAvailable()
self.fail("bad udf should fail the query")
except StreamingQueryException as e:
# This is expected
self.assertTrue("ZeroDivisionError" in e.desc)
finally:
sq.stop()
self.assertTrue(type(sq.exception()) is StreamingQueryException)
self.assertTrue("ZeroDivisionError" in sq.exception().desc)
def test_query_manager_await_termination(self):
df = self.spark.readStream.format('text').load('python/test_support/sql/streaming')
for q in self.spark._wrapped.streams.active:
q.stop()
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
self.assertTrue(df.isStreaming)
out = os.path.join(tmpPath, 'out')
chk = os.path.join(tmpPath, 'chk')
q = df.writeStream\
.start(path=out, format='parquet', queryName='this_query', checkpointLocation=chk)
try:
self.assertTrue(q.isActive)
try:
self.spark._wrapped.streams.awaitAnyTermination("hello")
self.fail("Expected a value exception")
except ValueError:
pass
now = time.time()
# test should take at least 2 seconds
res = self.spark._wrapped.streams.awaitAnyTermination(2.6)
duration = time.time() - now
self.assertTrue(duration >= 2)
self.assertFalse(res)
finally:
q.stop()
shutil.rmtree(tmpPath)
def test_help_command(self):
# Regression test for SPARK-5464
rdd = self.sc.parallelize(['{"foo":"bar"}', '{"foo":"baz"}'])
df = self.spark.read.json(rdd)
# render_doc() reproduces the help() exception without printing output
pydoc.render_doc(df)
pydoc.render_doc(df.foo)
pydoc.render_doc(df.take(1))
def test_access_column(self):
df = self.df
self.assertTrue(isinstance(df.key, Column))
self.assertTrue(isinstance(df['key'], Column))
self.assertTrue(isinstance(df[0], Column))
self.assertRaises(IndexError, lambda: df[2])
self.assertRaises(AnalysisException, lambda: df["bad_key"])
self.assertRaises(TypeError, lambda: df[{}])
def test_column_name_with_non_ascii(self):
if sys.version >= '3':
columnName = "数量"
self.assertTrue(isinstance(columnName, str))
else:
columnName = unicode("数量", "utf-8")
self.assertTrue(isinstance(columnName, unicode))
schema = StructType([StructField(columnName, LongType(), True)])
df = self.spark.createDataFrame([(1,)], schema)
self.assertEqual(schema, df.schema)
self.assertEqual("DataFrame[数量: bigint]", str(df))
self.assertEqual([("数量", 'bigint')], df.dtypes)
self.assertEqual(1, df.select("数量").first()[0])
self.assertEqual(1, df.select(df["数量"]).first()[0])
def test_access_nested_types(self):
df = self.sc.parallelize([Row(l=[1], r=Row(a=1, b="b"), d={"k": "v"})]).toDF()
self.assertEqual(1, df.select(df.l[0]).first()[0])
self.assertEqual(1, df.select(df.l.getItem(0)).first()[0])
self.assertEqual(1, df.select(df.r.a).first()[0])
self.assertEqual("b", df.select(df.r.getField("b")).first()[0])
self.assertEqual("v", df.select(df.d["k"]).first()[0])
self.assertEqual("v", df.select(df.d.getItem("k")).first()[0])
def test_field_accessor(self):
df = self.sc.parallelize([Row(l=[1], r=Row(a=1, b="b"), d={"k": "v"})]).toDF()
self.assertEqual(1, df.select(df.l[0]).first()[0])
self.assertEqual(1, df.select(df.r["a"]).first()[0])
self.assertEqual(1, df.select(df["r.a"]).first()[0])
self.assertEqual("b", df.select(df.r["b"]).first()[0])
self.assertEqual("b", df.select(df["r.b"]).first()[0])
self.assertEqual("v", df.select(df.d["k"]).first()[0])
def test_infer_long_type(self):
longrow = [Row(f1='a', f2=100000000000000)]
df = self.sc.parallelize(longrow).toDF()
self.assertEqual(df.schema.fields[1].dataType, LongType())
# this saving as Parquet caused issues as well.
output_dir = os.path.join(self.tempdir.name, "infer_long_type")
df.write.parquet(output_dir)
df1 = self.spark.read.parquet(output_dir)
self.assertEqual('a', df1.first().f1)
self.assertEqual(100000000000000, df1.first().f2)
self.assertEqual(_infer_type(1), LongType())
self.assertEqual(_infer_type(2**10), LongType())
self.assertEqual(_infer_type(2**20), LongType())
self.assertEqual(_infer_type(2**31 - 1), LongType())
self.assertEqual(_infer_type(2**31), LongType())
self.assertEqual(_infer_type(2**61), LongType())
self.assertEqual(_infer_type(2**71), LongType())
def test_filter_with_datetime(self):
time = datetime.datetime(2015, 4, 17, 23, 1, 2, 3000)
date = time.date()
row = Row(date=date, time=time)
df = self.spark.createDataFrame([row])
self.assertEqual(1, df.filter(df.date == date).count())
self.assertEqual(1, df.filter(df.time == time).count())
self.assertEqual(0, df.filter(df.date > date).count())
self.assertEqual(0, df.filter(df.time > time).count())
def test_filter_with_datetime_timezone(self):
dt1 = datetime.datetime(2015, 4, 17, 23, 1, 2, 3000, tzinfo=UTCOffsetTimezone(0))
dt2 = datetime.datetime(2015, 4, 17, 23, 1, 2, 3000, tzinfo=UTCOffsetTimezone(1))
row = Row(date=dt1)
df = self.spark.createDataFrame([row])
self.assertEqual(0, df.filter(df.date == dt2).count())
self.assertEqual(1, df.filter(df.date > dt2).count())
self.assertEqual(0, df.filter(df.date < dt2).count())
def test_time_with_timezone(self):
day = datetime.date.today()
now = datetime.datetime.now()
ts = time.mktime(now.timetuple())
# class in __main__ is not serializable
from pyspark.sql.tests import UTCOffsetTimezone
utc = UTCOffsetTimezone()
utcnow = datetime.datetime.utcfromtimestamp(ts) # without microseconds
# add microseconds to utcnow (keeping year,month,day,hour,minute,second)
utcnow = datetime.datetime(*(utcnow.timetuple()[:6] + (now.microsecond, utc)))
df = self.spark.createDataFrame([(day, now, utcnow)])
day1, now1, utcnow1 = df.first()
self.assertEqual(day1, day)
self.assertEqual(now, now1)
self.assertEqual(now, utcnow1)
# regression test for SPARK-19561
def test_datetime_at_epoch(self):
epoch = datetime.datetime.fromtimestamp(0)
df = self.spark.createDataFrame([Row(date=epoch)])
first = df.select('date', lit(epoch).alias('lit_date')).first()
self.assertEqual(first['date'], epoch)
self.assertEqual(first['lit_date'], epoch)
def test_decimal(self):
from decimal import Decimal
schema = StructType([StructField("decimal", DecimalType(10, 5))])
df = self.spark.createDataFrame([(Decimal("3.14159"),)], schema)
row = df.select(df.decimal + 1).first()
self.assertEqual(row[0], Decimal("4.14159"))
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
df.write.parquet(tmpPath)
df2 = self.spark.read.parquet(tmpPath)
row = df2.first()
self.assertEqual(row[0], Decimal("3.14159"))
def test_dropna(self):
schema = StructType([
StructField("name", StringType(), True),
StructField("age", IntegerType(), True),
StructField("height", DoubleType(), True)])
# shouldn't drop a non-null row
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', 50, 80.1)], schema).dropna().count(),
1)
# dropping rows with a single null value
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', None, 80.1)], schema).dropna().count(),
0)
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', None, 80.1)], schema).dropna(how='any').count(),
0)
# if how = 'all', only drop rows if all values are null
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', None, 80.1)], schema).dropna(how='all').count(),
1)
self.assertEqual(self.spark.createDataFrame(
[(None, None, None)], schema).dropna(how='all').count(),
0)
# how and subset
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', 50, None)], schema).dropna(how='any', subset=['name', 'age']).count(),
1)
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', None, None)], schema).dropna(how='any', subset=['name', 'age']).count(),
0)
# threshold
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', None, 80.1)], schema).dropna(thresh=2).count(),
1)
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', None, None)], schema).dropna(thresh=2).count(),
0)
# threshold and subset
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', 50, None)], schema).dropna(thresh=2, subset=['name', 'age']).count(),
1)
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', None, 180.9)], schema).dropna(thresh=2, subset=['name', 'age']).count(),
0)
# thresh should take precedence over how
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', 50, None)], schema).dropna(
how='any', thresh=2, subset=['name', 'age']).count(),
1)
def test_fillna(self):
schema = StructType([
StructField("name", StringType(), True),
StructField("age", IntegerType(), True),
StructField("height", DoubleType(), True),
StructField("spy", BooleanType(), True)])
# fillna shouldn't change non-null values
row = self.spark.createDataFrame([(u'Alice', 10, 80.1, True)], schema).fillna(50).first()
self.assertEqual(row.age, 10)
# fillna with int
row = self.spark.createDataFrame([(u'Alice', None, None, None)], schema).fillna(50).first()
self.assertEqual(row.age, 50)
self.assertEqual(row.height, 50.0)
# fillna with double
row = self.spark.createDataFrame(
[(u'Alice', None, None, None)], schema).fillna(50.1).first()
self.assertEqual(row.age, 50)
self.assertEqual(row.height, 50.1)
# fillna with bool
row = self.spark.createDataFrame(
[(u'Alice', None, None, None)], schema).fillna(True).first()
self.assertEqual(row.age, None)
self.assertEqual(row.spy, True)
# fillna with string
row = self.spark.createDataFrame([(None, None, None, None)], schema).fillna("hello").first()
self.assertEqual(row.name, u"hello")
self.assertEqual(row.age, None)
# fillna with subset specified for numeric cols
row = self.spark.createDataFrame(
[(None, None, None, None)], schema).fillna(50, subset=['name', 'age']).first()
self.assertEqual(row.name, None)
self.assertEqual(row.age, 50)
self.assertEqual(row.height, None)
self.assertEqual(row.spy, None)
# fillna with subset specified for string cols
row = self.spark.createDataFrame(
[(None, None, None, None)], schema).fillna("haha", subset=['name', 'age']).first()
self.assertEqual(row.name, "haha")
self.assertEqual(row.age, None)
self.assertEqual(row.height, None)
self.assertEqual(row.spy, None)
# fillna with subset specified for bool cols
row = self.spark.createDataFrame(
[(None, None, None, None)], schema).fillna(True, subset=['name', 'spy']).first()
self.assertEqual(row.name, None)
self.assertEqual(row.age, None)
self.assertEqual(row.height, None)
self.assertEqual(row.spy, True)
# fillna with dictionary for boolean types
row = self.spark.createDataFrame([Row(a=None), Row(a=True)]).fillna({"a": True}).first()
self.assertEqual(row.a, True)
def test_bitwise_operations(self):
from pyspark.sql import functions
row = Row(a=170, b=75)
df = self.spark.createDataFrame([row])
result = df.select(df.a.bitwiseAND(df.b)).collect()[0].asDict()
self.assertEqual(170 & 75, result['(a & b)'])
result = df.select(df.a.bitwiseOR(df.b)).collect()[0].asDict()
self.assertEqual(170 | 75, result['(a | b)'])
result = df.select(df.a.bitwiseXOR(df.b)).collect()[0].asDict()
self.assertEqual(170 ^ 75, result['(a ^ b)'])
result = df.select(functions.bitwiseNOT(df.b)).collect()[0].asDict()
self.assertEqual(~75, result['~b'])
def test_expr(self):
from pyspark.sql import functions
row = Row(a="length string", b=75)
df = self.spark.createDataFrame([row])
result = df.select(functions.expr("length(a)")).collect()[0].asDict()
self.assertEqual(13, result["length(a)"])
def test_replace(self):
schema = StructType([
StructField("name", StringType(), True),
StructField("age", IntegerType(), True),
StructField("height", DoubleType(), True)])
# replace with int
row = self.spark.createDataFrame([(u'Alice', 10, 10.0)], schema).replace(10, 20).first()
self.assertEqual(row.age, 20)
self.assertEqual(row.height, 20.0)
# replace with double
row = self.spark.createDataFrame(
[(u'Alice', 80, 80.0)], schema).replace(80.0, 82.1).first()
self.assertEqual(row.age, 82)
self.assertEqual(row.height, 82.1)
# replace with string
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace(u'Alice', u'Ann').first()
self.assertEqual(row.name, u"Ann")
self.assertEqual(row.age, 10)
# replace with subset specified by a string of a column name w/ actual change
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace(10, 20, subset='age').first()
self.assertEqual(row.age, 20)
# replace with subset specified by a string of a column name w/o actual change
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace(10, 20, subset='height').first()
self.assertEqual(row.age, 10)
# replace with subset specified with one column replaced, another column not in subset
# stays unchanged.
row = self.spark.createDataFrame(
[(u'Alice', 10, 10.0)], schema).replace(10, 20, subset=['name', 'age']).first()
self.assertEqual(row.name, u'Alice')
self.assertEqual(row.age, 20)
self.assertEqual(row.height, 10.0)
# replace with subset specified but no column will be replaced
row = self.spark.createDataFrame(
[(u'Alice', 10, None)], schema).replace(10, 20, subset=['name', 'height']).first()
self.assertEqual(row.name, u'Alice')
self.assertEqual(row.age, 10)
self.assertEqual(row.height, None)
# replace with lists
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace([u'Alice'], [u'Ann']).first()
self.assertTupleEqual(row, (u'Ann', 10, 80.1))
# replace with dict
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace({10: 11}).first()
self.assertTupleEqual(row, (u'Alice', 11, 80.1))
# test backward compatibility with dummy value
dummy_value = 1
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace({'Alice': 'Bob'}, dummy_value).first()
self.assertTupleEqual(row, (u'Bob', 10, 80.1))
# test dict with mixed numerics
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace({10: -10, 80.1: 90.5}).first()
self.assertTupleEqual(row, (u'Alice', -10, 90.5))
# replace with tuples
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace((u'Alice', ), (u'Bob', )).first()
self.assertTupleEqual(row, (u'Bob', 10, 80.1))
# replace multiple columns
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.0)], schema).replace((10, 80.0), (20, 90)).first()
self.assertTupleEqual(row, (u'Alice', 20, 90.0))
# test for mixed numerics
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.0)], schema).replace((10, 80), (20, 90.5)).first()
self.assertTupleEqual(row, (u'Alice', 20, 90.5))
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.0)], schema).replace({10: 20, 80: 90.5}).first()
self.assertTupleEqual(row, (u'Alice', 20, 90.5))
# replace with boolean
row = (self
.spark.createDataFrame([(u'Alice', 10, 80.0)], schema)
.selectExpr("name = 'Bob'", 'age <= 15')
.replace(False, True).first())
self.assertTupleEqual(row, (True, True))
# replace list while value is not given (default to None)
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.0)], schema).replace(["Alice", "Bob"]).first()
self.assertTupleEqual(row, (None, 10, 80.0))
# replace string with None and then drop None rows
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.0)], schema).replace(u'Alice', None).dropna()
self.assertEqual(row.count(), 0)
# replace with number and None
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.0)], schema).replace([10, 80], [20, None]).first()
self.assertTupleEqual(row, (u'Alice', 20, None))
# should fail if subset is not list, tuple or None
with self.assertRaises(ValueError):
self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace({10: 11}, subset=1).first()
# should fail if to_replace and value have different length
with self.assertRaises(ValueError):
self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace(["Alice", "Bob"], ["Eve"]).first()
# should fail if when received unexpected type
with self.assertRaises(ValueError):
from datetime import datetime
self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace(datetime.now(), datetime.now()).first()
# should fail if provided mixed type replacements
with self.assertRaises(ValueError):
self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace(["Alice", 10], ["Eve", 20]).first()
with self.assertRaises(ValueError):
self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace({u"Alice": u"Bob", 10: 20}).first()
def test_capture_analysis_exception(self):
self.assertRaises(AnalysisException, lambda: self.spark.sql("select abc"))
self.assertRaises(AnalysisException, lambda: self.df.selectExpr("a + b"))
def test_capture_parse_exception(self):
self.assertRaises(ParseException, lambda: self.spark.sql("abc"))
def test_capture_illegalargument_exception(self):
self.assertRaisesRegexp(IllegalArgumentException, "Setting negative mapred.reduce.tasks",
lambda: self.spark.sql("SET mapred.reduce.tasks=-1"))
df = self.spark.createDataFrame([(1, 2)], ["a", "b"])
self.assertRaisesRegexp(IllegalArgumentException, "1024 is not in the permitted values",
lambda: df.select(sha2(df.a, 1024)).collect())
try:
df.select(sha2(df.a, 1024)).collect()
except IllegalArgumentException as e:
self.assertRegexpMatches(e.desc, "1024 is not in the permitted values")
self.assertRegexpMatches(e.stackTrace,
"org.apache.spark.sql.functions")
def test_with_column_with_existing_name(self):
keys = self.df.withColumn("key", self.df.key).select("key").collect()
self.assertEqual([r.key for r in keys], list(range(100)))
# regression test for SPARK-10417
def test_column_iterator(self):
def foo():
for x in self.df.key:
break
self.assertRaises(TypeError, foo)
# add test for SPARK-10577 (test broadcast join hint)
def test_functions_broadcast(self):
from pyspark.sql.functions import broadcast
df1 = self.spark.createDataFrame([(1, "1"), (2, "2")], ("key", "value"))
df2 = self.spark.createDataFrame([(1, "1"), (2, "2")], ("key", "value"))
# equijoin - should be converted into broadcast join
plan1 = df1.join(broadcast(df2), "key")._jdf.queryExecution().executedPlan()
self.assertEqual(1, plan1.toString().count("BroadcastHashJoin"))
# no join key -- should not be a broadcast join
plan2 = df1.crossJoin(broadcast(df2))._jdf.queryExecution().executedPlan()
self.assertEqual(0, plan2.toString().count("BroadcastHashJoin"))
# planner should not crash without a join
broadcast(df1)._jdf.queryExecution().executedPlan()
def test_generic_hints(self):
from pyspark.sql import DataFrame
df1 = self.spark.range(10e10).toDF("id")
df2 = self.spark.range(10e10).toDF("id")
self.assertIsInstance(df1.hint("broadcast"), DataFrame)
self.assertIsInstance(df1.hint("broadcast", []), DataFrame)
# Dummy rules
self.assertIsInstance(df1.hint("broadcast", "foo", "bar"), DataFrame)
self.assertIsInstance(df1.hint("broadcast", ["foo", "bar"]), DataFrame)
plan = df1.join(df2.hint("broadcast"), "id")._jdf.queryExecution().executedPlan()
self.assertEqual(1, plan.toString().count("BroadcastHashJoin"))
def test_toDF_with_schema_string(self):
data = [Row(key=i, value=str(i)) for i in range(100)]
rdd = self.sc.parallelize(data, 5)
df = rdd.toDF("key: int, value: string")
self.assertEqual(df.schema.simpleString(), "struct<key:int,value:string>")
self.assertEqual(df.collect(), data)
# different but compatible field types can be used.
df = rdd.toDF("key: string, value: string")
self.assertEqual(df.schema.simpleString(), "struct<key:string,value:string>")
self.assertEqual(df.collect(), [Row(key=str(i), value=str(i)) for i in range(100)])
# field names can differ.
df = rdd.toDF(" a: int, b: string ")
self.assertEqual(df.schema.simpleString(), "struct<a:int,b:string>")
self.assertEqual(df.collect(), data)
# number of fields must match.
self.assertRaisesRegexp(Exception, "Length of object",
lambda: rdd.toDF("key: int").collect())
# field types mismatch will cause exception at runtime.
self.assertRaisesRegexp(Exception, "FloatType can not accept",
lambda: rdd.toDF("key: float, value: string").collect())
# flat schema values will be wrapped into row.
df = rdd.map(lambda row: row.key).toDF("int")
self.assertEqual(df.schema.simpleString(), "struct<value:int>")
self.assertEqual(df.collect(), [Row(key=i) for i in range(100)])
# users can use DataType directly instead of data type string.
df = rdd.map(lambda row: row.key).toDF(IntegerType())
self.assertEqual(df.schema.simpleString(), "struct<value:int>")
self.assertEqual(df.collect(), [Row(key=i) for i in range(100)])
def test_join_without_on(self):
df1 = self.spark.range(1).toDF("a")
df2 = self.spark.range(1).toDF("b")
try:
self.spark.conf.set("spark.sql.crossJoin.enabled", "false")
self.assertRaises(AnalysisException, lambda: df1.join(df2, how="inner").collect())
self.spark.conf.set("spark.sql.crossJoin.enabled", "true")
actual = df1.join(df2, how="inner").collect()
expected = [Row(a=0, b=0)]
self.assertEqual(actual, expected)
finally:
# We should unset this. Otherwise, other tests are affected.
self.spark.conf.unset("spark.sql.crossJoin.enabled")
# Regression test for invalid join methods when on is None, Spark-14761
def test_invalid_join_method(self):
df1 = self.spark.createDataFrame([("Alice", 5), ("Bob", 8)], ["name", "age"])
df2 = self.spark.createDataFrame([("Alice", 80), ("Bob", 90)], ["name", "height"])
self.assertRaises(IllegalArgumentException, lambda: df1.join(df2, how="invalid-join-type"))
# Cartesian products require cross join syntax
def test_require_cross(self):
from pyspark.sql.functions import broadcast
df1 = self.spark.createDataFrame([(1, "1")], ("key", "value"))
df2 = self.spark.createDataFrame([(1, "1")], ("key", "value"))
# joins without conditions require cross join syntax
self.assertRaises(AnalysisException, lambda: df1.join(df2).collect())
# works with crossJoin
self.assertEqual(1, df1.crossJoin(df2).count())
def test_conf(self):
spark = self.spark
spark.conf.set("bogo", "sipeo")
self.assertEqual(spark.conf.get("bogo"), "sipeo")
spark.conf.set("bogo", "ta")
self.assertEqual(spark.conf.get("bogo"), "ta")
self.assertEqual(spark.conf.get("bogo", "not.read"), "ta")
self.assertEqual(spark.conf.get("not.set", "ta"), "ta")
self.assertRaisesRegexp(Exception, "not.set", lambda: spark.conf.get("not.set"))
spark.conf.unset("bogo")
self.assertEqual(spark.conf.get("bogo", "colombia"), "colombia")
def test_current_database(self):
spark = self.spark
spark.catalog._reset()
self.assertEquals(spark.catalog.currentDatabase(), "default")
spark.sql("CREATE DATABASE some_db")
spark.catalog.setCurrentDatabase("some_db")
self.assertEquals(spark.catalog.currentDatabase(), "some_db")
self.assertRaisesRegexp(
AnalysisException,
"does_not_exist",
lambda: spark.catalog.setCurrentDatabase("does_not_exist"))
def test_list_databases(self):
spark = self.spark
spark.catalog._reset()
databases = [db.name for db in spark.catalog.listDatabases()]
self.assertEquals(databases, ["default"])
spark.sql("CREATE DATABASE some_db")
databases = [db.name for db in spark.catalog.listDatabases()]
self.assertEquals(sorted(databases), ["default", "some_db"])
def test_list_tables(self):
from pyspark.sql.catalog import Table
spark = self.spark
spark.catalog._reset()
spark.sql("CREATE DATABASE some_db")
self.assertEquals(spark.catalog.listTables(), [])
self.assertEquals(spark.catalog.listTables("some_db"), [])
spark.createDataFrame([(1, 1)]).createOrReplaceTempView("temp_tab")
spark.sql("CREATE TABLE tab1 (name STRING, age INT) USING parquet")
spark.sql("CREATE TABLE some_db.tab2 (name STRING, age INT) USING parquet")
tables = sorted(spark.catalog.listTables(), key=lambda t: t.name)
tablesDefault = sorted(spark.catalog.listTables("default"), key=lambda t: t.name)
tablesSomeDb = sorted(spark.catalog.listTables("some_db"), key=lambda t: t.name)
self.assertEquals(tables, tablesDefault)
self.assertEquals(len(tables), 2)
self.assertEquals(len(tablesSomeDb), 2)
self.assertEquals(tables[0], Table(
name="tab1",
database="default",
description=None,
tableType="MANAGED",
isTemporary=False))
self.assertEquals(tables[1], Table(
name="temp_tab",
database=None,
description=None,
tableType="TEMPORARY",
isTemporary=True))
self.assertEquals(tablesSomeDb[0], Table(
name="tab2",
database="some_db",
description=None,
tableType="MANAGED",
isTemporary=False))
self.assertEquals(tablesSomeDb[1], Table(
name="temp_tab",
database=None,
description=None,
tableType="TEMPORARY",
isTemporary=True))
self.assertRaisesRegexp(
AnalysisException,
"does_not_exist",
lambda: spark.catalog.listTables("does_not_exist"))
def test_list_functions(self):
from pyspark.sql.catalog import Function
spark = self.spark
spark.catalog._reset()
spark.sql("CREATE DATABASE some_db")
functions = dict((f.name, f) for f in spark.catalog.listFunctions())
functionsDefault = dict((f.name, f) for f in spark.catalog.listFunctions("default"))
self.assertTrue(len(functions) > 200)
self.assertTrue("+" in functions)
self.assertTrue("like" in functions)
self.assertTrue("month" in functions)
self.assertTrue("to_date" in functions)
self.assertTrue("to_timestamp" in functions)
self.assertTrue("to_unix_timestamp" in functions)
self.assertTrue("current_database" in functions)
self.assertEquals(functions["+"], Function(
name="+",
description=None,
className="org.apache.spark.sql.catalyst.expressions.Add",
isTemporary=True))
self.assertEquals(functions, functionsDefault)
spark.catalog.registerFunction("temp_func", lambda x: str(x))
spark.sql("CREATE FUNCTION func1 AS 'org.apache.spark.data.bricks'")
spark.sql("CREATE FUNCTION some_db.func2 AS 'org.apache.spark.data.bricks'")
newFunctions = dict((f.name, f) for f in spark.catalog.listFunctions())
newFunctionsSomeDb = dict((f.name, f) for f in spark.catalog.listFunctions("some_db"))
self.assertTrue(set(functions).issubset(set(newFunctions)))
self.assertTrue(set(functions).issubset(set(newFunctionsSomeDb)))
self.assertTrue("temp_func" in newFunctions)
self.assertTrue("func1" in newFunctions)
self.assertTrue("func2" not in newFunctions)
self.assertTrue("temp_func" in newFunctionsSomeDb)
self.assertTrue("func1" not in newFunctionsSomeDb)
self.assertTrue("func2" in newFunctionsSomeDb)
self.assertRaisesRegexp(
AnalysisException,
"does_not_exist",
lambda: spark.catalog.listFunctions("does_not_exist"))
def test_list_columns(self):
from pyspark.sql.catalog import Column
spark = self.spark
spark.catalog._reset()
spark.sql("CREATE DATABASE some_db")
spark.sql("CREATE TABLE tab1 (name STRING, age INT) USING parquet")
spark.sql("CREATE TABLE some_db.tab2 (nickname STRING, tolerance FLOAT) USING parquet")
columns = sorted(spark.catalog.listColumns("tab1"), key=lambda c: c.name)
columnsDefault = sorted(spark.catalog.listColumns("tab1", "default"), key=lambda c: c.name)
self.assertEquals(columns, columnsDefault)
self.assertEquals(len(columns), 2)
self.assertEquals(columns[0], Column(
name="age",
description=None,
dataType="int",
nullable=True,
isPartition=False,
isBucket=False))
self.assertEquals(columns[1], Column(
name="name",
description=None,
dataType="string",
nullable=True,
isPartition=False,
isBucket=False))
columns2 = sorted(spark.catalog.listColumns("tab2", "some_db"), key=lambda c: c.name)
self.assertEquals(len(columns2), 2)
self.assertEquals(columns2[0], Column(
name="nickname",
description=None,
dataType="string",
nullable=True,
isPartition=False,
isBucket=False))
self.assertEquals(columns2[1], Column(
name="tolerance",
description=None,
dataType="float",
nullable=True,
isPartition=False,
isBucket=False))
self.assertRaisesRegexp(
AnalysisException,
"tab2",
lambda: spark.catalog.listColumns("tab2"))
self.assertRaisesRegexp(
AnalysisException,
"does_not_exist",
lambda: spark.catalog.listColumns("does_not_exist"))
def test_cache(self):
spark = self.spark
spark.createDataFrame([(2, 2), (3, 3)]).createOrReplaceTempView("tab1")
spark.createDataFrame([(2, 2), (3, 3)]).createOrReplaceTempView("tab2")
self.assertFalse(spark.catalog.isCached("tab1"))
self.assertFalse(spark.catalog.isCached("tab2"))
spark.catalog.cacheTable("tab1")
self.assertTrue(spark.catalog.isCached("tab1"))
self.assertFalse(spark.catalog.isCached("tab2"))
spark.catalog.cacheTable("tab2")
spark.catalog.uncacheTable("tab1")
self.assertFalse(spark.catalog.isCached("tab1"))
self.assertTrue(spark.catalog.isCached("tab2"))
spark.catalog.clearCache()
self.assertFalse(spark.catalog.isCached("tab1"))
self.assertFalse(spark.catalog.isCached("tab2"))
self.assertRaisesRegexp(
AnalysisException,
"does_not_exist",
lambda: spark.catalog.isCached("does_not_exist"))
self.assertRaisesRegexp(
AnalysisException,
"does_not_exist",
lambda: spark.catalog.cacheTable("does_not_exist"))
self.assertRaisesRegexp(
AnalysisException,
"does_not_exist",
lambda: spark.catalog.uncacheTable("does_not_exist"))
def test_read_text_file_list(self):
df = self.spark.read.text(['python/test_support/sql/text-test.txt',
'python/test_support/sql/text-test.txt'])
count = df.count()
self.assertEquals(count, 4)
def test_BinaryType_serialization(self):
# Pyrolite version <= 4.9 could not serialize BinaryType with Python3 SPARK-17808
schema = StructType([StructField('mybytes', BinaryType())])
data = [[bytearray(b'here is my data')],
[bytearray(b'and here is some more')]]
df = self.spark.createDataFrame(data, schema=schema)
df.collect()
# test for SPARK-16542
def test_array_types(self):
# This test need to make sure that the Scala type selected is at least
# as large as the python's types. This is necessary because python's
# array types depend on C implementation on the machine. Therefore there
# is no machine independent correspondence between python's array types
# and Scala types.
# See: https://docs.python.org/2/library/array.html
def assertCollectSuccess(typecode, value):
row = Row(myarray=array.array(typecode, [value]))
df = self.spark.createDataFrame([row])
self.assertEqual(df.first()["myarray"][0], value)
# supported string types
#
# String types in python's array are "u" for Py_UNICODE and "c" for char.
# "u" will be removed in python 4, and "c" is not supported in python 3.
supported_string_types = []
if sys.version_info[0] < 4:
supported_string_types += ['u']
# test unicode
assertCollectSuccess('u', u'a')
if sys.version_info[0] < 3:
supported_string_types += ['c']
# test string
assertCollectSuccess('c', 'a')
# supported float and double
#
# Test max, min, and precision for float and double, assuming IEEE 754
# floating-point format.
supported_fractional_types = ['f', 'd']
assertCollectSuccess('f', ctypes.c_float(1e+38).value)
assertCollectSuccess('f', ctypes.c_float(1e-38).value)
assertCollectSuccess('f', ctypes.c_float(1.123456).value)
assertCollectSuccess('d', sys.float_info.max)
assertCollectSuccess('d', sys.float_info.min)
assertCollectSuccess('d', sys.float_info.epsilon)
# supported signed int types
#
# The size of C types changes with implementation, we need to make sure
# that there is no overflow error on the platform running this test.
supported_signed_int_types = list(
set(_array_signed_int_typecode_ctype_mappings.keys())
.intersection(set(_array_type_mappings.keys())))
for t in supported_signed_int_types:
ctype = _array_signed_int_typecode_ctype_mappings[t]
max_val = 2 ** (ctypes.sizeof(ctype) * 8 - 1)
assertCollectSuccess(t, max_val - 1)
assertCollectSuccess(t, -max_val)
# supported unsigned int types
#
# JVM does not have unsigned types. We need to be very careful to make
# sure that there is no overflow error.
supported_unsigned_int_types = list(
set(_array_unsigned_int_typecode_ctype_mappings.keys())
.intersection(set(_array_type_mappings.keys())))
for t in supported_unsigned_int_types:
ctype = _array_unsigned_int_typecode_ctype_mappings[t]
assertCollectSuccess(t, 2 ** (ctypes.sizeof(ctype) * 8) - 1)
# all supported types
#
# Make sure the types tested above:
# 1. are all supported types
# 2. cover all supported types
supported_types = (supported_string_types +
supported_fractional_types +
supported_signed_int_types +
supported_unsigned_int_types)
self.assertEqual(set(supported_types), set(_array_type_mappings.keys()))
# all unsupported types
#
# Keys in _array_type_mappings is a complete list of all supported types,
# and types not in _array_type_mappings are considered unsupported.
# `array.typecodes` are not supported in python 2.
if sys.version_info[0] < 3:
all_types = set(['c', 'b', 'B', 'u', 'h', 'H', 'i', 'I', 'l', 'L', 'f', 'd'])
else:
all_types = set(array.typecodes)
unsupported_types = all_types - set(supported_types)
# test unsupported types
for t in unsupported_types:
with self.assertRaises(TypeError):
a = array.array(t)
self.spark.createDataFrame([Row(myarray=a)]).collect()
def test_bucketed_write(self):
data = [
(1, "foo", 3.0), (2, "foo", 5.0),
(3, "bar", -1.0), (4, "bar", 6.0),
]
df = self.spark.createDataFrame(data, ["x", "y", "z"])
def count_bucketed_cols(names, table="pyspark_bucket"):
"""Given a sequence of column names and a table name
query the catalog and return number o columns which are
used for bucketing
"""
cols = self.spark.catalog.listColumns(table)
num = len([c for c in cols if c.name in names and c.isBucket])
return num
# Test write with one bucketing column
df.write.bucketBy(3, "x").mode("overwrite").saveAsTable("pyspark_bucket")
self.assertEqual(count_bucketed_cols(["x"]), 1)
self.assertSetEqual(set(data), set(self.spark.table("pyspark_bucket").collect()))
# Test write two bucketing columns
df.write.bucketBy(3, "x", "y").mode("overwrite").saveAsTable("pyspark_bucket")
self.assertEqual(count_bucketed_cols(["x", "y"]), 2)
self.assertSetEqual(set(data), set(self.spark.table("pyspark_bucket").collect()))
# Test write with bucket and sort
df.write.bucketBy(2, "x").sortBy("z").mode("overwrite").saveAsTable("pyspark_bucket")
self.assertEqual(count_bucketed_cols(["x"]), 1)
self.assertSetEqual(set(data), set(self.spark.table("pyspark_bucket").collect()))
# Test write with a list of columns
df.write.bucketBy(3, ["x", "y"]).mode("overwrite").saveAsTable("pyspark_bucket")
self.assertEqual(count_bucketed_cols(["x", "y"]), 2)
self.assertSetEqual(set(data), set(self.spark.table("pyspark_bucket").collect()))
# Test write with bucket and sort with a list of columns
(df.write.bucketBy(2, "x")
.sortBy(["y", "z"])
.mode("overwrite").saveAsTable("pyspark_bucket"))
self.assertSetEqual(set(data), set(self.spark.table("pyspark_bucket").collect()))
# Test write with bucket and sort with multiple columns
(df.write.bucketBy(2, "x")
.sortBy("y", "z")
.mode("overwrite").saveAsTable("pyspark_bucket"))
self.assertSetEqual(set(data), set(self.spark.table("pyspark_bucket").collect()))
@unittest.skipIf(not _have_pandas, "Pandas not installed")
def test_to_pandas(self):
import numpy as np
schema = StructType().add("a", IntegerType()).add("b", StringType())\
.add("c", BooleanType()).add("d", FloatType())
data = [
(1, "foo", True, 3.0), (2, "foo", True, 5.0),
(3, "bar", False, -1.0), (4, "bar", False, 6.0),
]
df = self.spark.createDataFrame(data, schema)
types = df.toPandas().dtypes
self.assertEquals(types[0], np.int32)
self.assertEquals(types[1], np.object)
self.assertEquals(types[2], np.bool)
self.assertEquals(types[3], np.float32)
def test_create_dataframe_from_array_of_long(self):
import array
data = [Row(longarray=array.array('l', [-9223372036854775808, 0, 9223372036854775807]))]
df = self.spark.createDataFrame(data)
self.assertEqual(df.first(), Row(longarray=[-9223372036854775808, 0, 9223372036854775807]))
class HiveSparkSubmitTests(SparkSubmitTests):
def test_hivecontext(self):
# This test checks that HiveContext is using Hive metastore (SPARK-16224).
# It sets a metastore url and checks if there is a derby dir created by
# Hive metastore. If this derby dir exists, HiveContext is using
# Hive metastore.
metastore_path = os.path.join(tempfile.mkdtemp(), "spark16224_metastore_db")
metastore_URL = "jdbc:derby:;databaseName=" + metastore_path + ";create=true"
hive_site_dir = os.path.join(self.programDir, "conf")
hive_site_file = self.createTempFile("hive-site.xml", ("""
|<configuration>
| <property>
| <name>javax.jdo.option.ConnectionURL</name>
| <value>%s</value>
| </property>
|</configuration>
""" % metastore_URL).lstrip(), "conf")
script = self.createTempFile("test.py", """
|import os
|
|from pyspark.conf import SparkConf
|from pyspark.context import SparkContext
|from pyspark.sql import HiveContext
|
|conf = SparkConf()
|sc = SparkContext(conf=conf)
|hive_context = HiveContext(sc)
|print(hive_context.sql("show databases").collect())
""")
proc = subprocess.Popen(
[self.sparkSubmit, "--master", "local-cluster[1,1,1024]",
"--driver-class-path", hive_site_dir, script],
stdout=subprocess.PIPE)
out, err = proc.communicate()
self.assertEqual(0, proc.returncode)
self.assertIn("default", out.decode('utf-8'))
self.assertTrue(os.path.exists(metastore_path))
class SQLTests2(ReusedPySparkTestCase):
@classmethod
def setUpClass(cls):
ReusedPySparkTestCase.setUpClass()
cls.spark = SparkSession(cls.sc)
@classmethod
def tearDownClass(cls):
ReusedPySparkTestCase.tearDownClass()
cls.spark.stop()
# We can't include this test into SQLTests because we will stop class's SparkContext and cause
# other tests failed.
def test_sparksession_with_stopped_sparkcontext(self):
self.sc.stop()
sc = SparkContext('local[4]', self.sc.appName)
spark = SparkSession.builder.getOrCreate()
df = spark.createDataFrame([(1, 2)], ["c", "c"])
df.collect()
class UDFInitializationTests(unittest.TestCase):
def tearDown(self):
if SparkSession._instantiatedSession is not None:
SparkSession._instantiatedSession.stop()
if SparkContext._active_spark_context is not None:
SparkContext._active_spark_contex.stop()
def test_udf_init_shouldnt_initalize_context(self):
from pyspark.sql.functions import UserDefinedFunction
UserDefinedFunction(lambda x: x, StringType())
self.assertIsNone(
SparkContext._active_spark_context,
"SparkContext shouldn't be initialized when UserDefinedFunction is created."
)
self.assertIsNone(
SparkSession._instantiatedSession,
"SparkSession shouldn't be initialized when UserDefinedFunction is created."
)
class HiveContextSQLTests(ReusedPySparkTestCase):
@classmethod
def setUpClass(cls):
ReusedPySparkTestCase.setUpClass()
cls.tempdir = tempfile.NamedTemporaryFile(delete=False)
try:
cls.sc._jvm.org.apache.hadoop.hive.conf.HiveConf()
except py4j.protocol.Py4JError:
cls.tearDownClass()
raise unittest.SkipTest("Hive is not available")
except TypeError:
cls.tearDownClass()
raise unittest.SkipTest("Hive is not available")
os.unlink(cls.tempdir.name)
cls.spark = HiveContext._createForTesting(cls.sc)
cls.testData = [Row(key=i, value=str(i)) for i in range(100)]
cls.df = cls.sc.parallelize(cls.testData).toDF()
@classmethod
def tearDownClass(cls):
ReusedPySparkTestCase.tearDownClass()
shutil.rmtree(cls.tempdir.name, ignore_errors=True)
def test_save_and_load_table(self):
df = self.df
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
df.write.saveAsTable("savedJsonTable", "json", "append", path=tmpPath)
actual = self.spark.createExternalTable("externalJsonTable", tmpPath, "json")
self.assertEqual(sorted(df.collect()),
sorted(self.spark.sql("SELECT * FROM savedJsonTable").collect()))
self.assertEqual(sorted(df.collect()),
sorted(self.spark.sql("SELECT * FROM externalJsonTable").collect()))
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
self.spark.sql("DROP TABLE externalJsonTable")
df.write.saveAsTable("savedJsonTable", "json", "overwrite", path=tmpPath)
schema = StructType([StructField("value", StringType(), True)])
actual = self.spark.createExternalTable("externalJsonTable", source="json",
schema=schema, path=tmpPath,
noUse="this options will not be used")
self.assertEqual(sorted(df.collect()),
sorted(self.spark.sql("SELECT * FROM savedJsonTable").collect()))
self.assertEqual(sorted(df.select("value").collect()),
sorted(self.spark.sql("SELECT * FROM externalJsonTable").collect()))
self.assertEqual(sorted(df.select("value").collect()), sorted(actual.collect()))
self.spark.sql("DROP TABLE savedJsonTable")
self.spark.sql("DROP TABLE externalJsonTable")
defaultDataSourceName = self.spark.getConf("spark.sql.sources.default",
"org.apache.spark.sql.parquet")
self.spark.sql("SET spark.sql.sources.default=org.apache.spark.sql.json")
df.write.saveAsTable("savedJsonTable", path=tmpPath, mode="overwrite")
actual = self.spark.createExternalTable("externalJsonTable", path=tmpPath)
self.assertEqual(sorted(df.collect()),
sorted(self.spark.sql("SELECT * FROM savedJsonTable").collect()))
self.assertEqual(sorted(df.collect()),
sorted(self.spark.sql("SELECT * FROM externalJsonTable").collect()))
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
self.spark.sql("DROP TABLE savedJsonTable")
self.spark.sql("DROP TABLE externalJsonTable")
self.spark.sql("SET spark.sql.sources.default=" + defaultDataSourceName)
shutil.rmtree(tmpPath)
def test_window_functions(self):
df = self.spark.createDataFrame([(1, "1"), (2, "2"), (1, "2"), (1, "2")], ["key", "value"])
w = Window.partitionBy("value").orderBy("key")
from pyspark.sql import functions as F
sel = df.select(df.value, df.key,
F.max("key").over(w.rowsBetween(0, 1)),
F.min("key").over(w.rowsBetween(0, 1)),
F.count("key").over(w.rowsBetween(float('-inf'), float('inf'))),
F.row_number().over(w),
F.rank().over(w),
F.dense_rank().over(w),
F.ntile(2).over(w))
rs = sorted(sel.collect())
expected = [
("1", 1, 1, 1, 1, 1, 1, 1, 1),
("2", 1, 1, 1, 3, 1, 1, 1, 1),
("2", 1, 2, 1, 3, 2, 1, 1, 1),
("2", 2, 2, 2, 3, 3, 3, 2, 2)
]
for r, ex in zip(rs, expected):
self.assertEqual(tuple(r), ex[:len(r)])
def test_window_functions_without_partitionBy(self):
df = self.spark.createDataFrame([(1, "1"), (2, "2"), (1, "2"), (1, "2")], ["key", "value"])
w = Window.orderBy("key", df.value)
from pyspark.sql import functions as F
sel = df.select(df.value, df.key,
F.max("key").over(w.rowsBetween(0, 1)),
F.min("key").over(w.rowsBetween(0, 1)),
F.count("key").over(w.rowsBetween(float('-inf'), float('inf'))),
F.row_number().over(w),
F.rank().over(w),
F.dense_rank().over(w),
F.ntile(2).over(w))
rs = sorted(sel.collect())
expected = [
("1", 1, 1, 1, 4, 1, 1, 1, 1),
("2", 1, 1, 1, 4, 2, 2, 2, 1),
("2", 1, 2, 1, 4, 3, 2, 2, 2),
("2", 2, 2, 2, 4, 4, 4, 3, 2)
]
for r, ex in zip(rs, expected):
self.assertEqual(tuple(r), ex[:len(r)])
def test_window_functions_cumulative_sum(self):
df = self.spark.createDataFrame([("one", 1), ("two", 2)], ["key", "value"])
from pyspark.sql import functions as F
# Test cumulative sum
sel = df.select(
df.key,
F.sum(df.value).over(Window.rowsBetween(Window.unboundedPreceding, 0)))
rs = sorted(sel.collect())
expected = [("one", 1), ("two", 3)]
for r, ex in zip(rs, expected):
self.assertEqual(tuple(r), ex[:len(r)])
# Test boundary values less than JVM's Long.MinValue and make sure we don't overflow
sel = df.select(
df.key,
F.sum(df.value).over(Window.rowsBetween(Window.unboundedPreceding - 1, 0)))
rs = sorted(sel.collect())
expected = [("one", 1), ("two", 3)]
for r, ex in zip(rs, expected):
self.assertEqual(tuple(r), ex[:len(r)])
# Test boundary values greater than JVM's Long.MaxValue and make sure we don't overflow
frame_end = Window.unboundedFollowing + 1
sel = df.select(
df.key,
F.sum(df.value).over(Window.rowsBetween(Window.currentRow, frame_end)))
rs = sorted(sel.collect())
expected = [("one", 3), ("two", 2)]
for r, ex in zip(rs, expected):
self.assertEqual(tuple(r), ex[:len(r)])
def test_collect_functions(self):
df = self.spark.createDataFrame([(1, "1"), (2, "2"), (1, "2"), (1, "2")], ["key", "value"])
from pyspark.sql import functions
self.assertEqual(
sorted(df.select(functions.collect_set(df.key).alias('r')).collect()[0].r),
[1, 2])
self.assertEqual(
sorted(df.select(functions.collect_list(df.key).alias('r')).collect()[0].r),
[1, 1, 1, 2])
self.assertEqual(
sorted(df.select(functions.collect_set(df.value).alias('r')).collect()[0].r),
["1", "2"])
self.assertEqual(
sorted(df.select(functions.collect_list(df.value).alias('r')).collect()[0].r),
["1", "2", "2", "2"])
def test_limit_and_take(self):
df = self.spark.range(1, 1000, numPartitions=10)
def assert_runs_only_one_job_stage_and_task(job_group_name, f):
tracker = self.sc.statusTracker()
self.sc.setJobGroup(job_group_name, description="")
f()
jobs = tracker.getJobIdsForGroup(job_group_name)
self.assertEqual(1, len(jobs))
stages = tracker.getJobInfo(jobs[0]).stageIds
self.assertEqual(1, len(stages))
self.assertEqual(1, tracker.getStageInfo(stages[0]).numTasks)
# Regression test for SPARK-10731: take should delegate to Scala implementation
assert_runs_only_one_job_stage_and_task("take", lambda: df.take(1))
# Regression test for SPARK-17514: limit(n).collect() should the perform same as take(n)
assert_runs_only_one_job_stage_and_task("collect_limit", lambda: df.limit(1).collect())
def test_datetime_functions(self):
from pyspark.sql import functions
from datetime import date, datetime
df = self.spark.range(1).selectExpr("'2017-01-22' as dateCol")
parse_result = df.select(functions.to_date(functions.col("dateCol"))).first()
self.assertEquals(date(2017, 1, 22), parse_result['to_date(`dateCol`)'])
@unittest.skipIf(sys.version_info < (3, 3), "Unittest < 3.3 doesn't support mocking")
def test_unbounded_frames(self):
from unittest.mock import patch
from pyspark.sql import functions as F
from pyspark.sql import window
import importlib
df = self.spark.range(0, 3)
def rows_frame_match():
return "ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING" in df.select(
F.count("*").over(window.Window.rowsBetween(-sys.maxsize, sys.maxsize))
).columns[0]
def range_frame_match():
return "RANGE BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING" in df.select(
F.count("*").over(window.Window.rangeBetween(-sys.maxsize, sys.maxsize))
).columns[0]
with patch("sys.maxsize", 2 ** 31 - 1):
importlib.reload(window)
self.assertTrue(rows_frame_match())
self.assertTrue(range_frame_match())
with patch("sys.maxsize", 2 ** 63 - 1):
importlib.reload(window)
self.assertTrue(rows_frame_match())
self.assertTrue(range_frame_match())
with patch("sys.maxsize", 2 ** 127 - 1):
importlib.reload(window)
self.assertTrue(rows_frame_match())
self.assertTrue(range_frame_match())
importlib.reload(window)
class DataTypeVerificationTests(unittest.TestCase):
def test_verify_type_exception_msg(self):
self.assertRaisesRegexp(
ValueError,
"test_name",
lambda: _make_type_verifier(StringType(), nullable=False, name="test_name")(None))
schema = StructType([StructField('a', StructType([StructField('b', IntegerType())]))])
self.assertRaisesRegexp(
TypeError,
"field b in field a",
lambda: _make_type_verifier(schema)([["data"]]))
def test_verify_type_ok_nullable(self):
obj = None
types = [IntegerType(), FloatType(), StringType(), StructType([])]
for data_type in types:
try:
_make_type_verifier(data_type, nullable=True)(obj)
except Exception:
self.fail("verify_type(%s, %s, nullable=True)" % (obj, data_type))
def test_verify_type_not_nullable(self):
import array
import datetime
import decimal
schema = StructType([
StructField('s', StringType(), nullable=False),
StructField('i', IntegerType(), nullable=True)])
class MyObj:
def __init__(self, **kwargs):
for k, v in kwargs.items():
setattr(self, k, v)
# obj, data_type
success_spec = [
# String
("", StringType()),
(u"", StringType()),
(1, StringType()),
(1.0, StringType()),
([], StringType()),
({}, StringType()),
# UDT
(ExamplePoint(1.0, 2.0), ExamplePointUDT()),
# Boolean
(True, BooleanType()),
# Byte
(-(2**7), ByteType()),
(2**7 - 1, ByteType()),
# Short
(-(2**15), ShortType()),
(2**15 - 1, ShortType()),
# Integer
(-(2**31), IntegerType()),
(2**31 - 1, IntegerType()),
# Long
(2**64, LongType()),
# Float & Double
(1.0, FloatType()),
(1.0, DoubleType()),
# Decimal
(decimal.Decimal("1.0"), DecimalType()),
# Binary
(bytearray([1, 2]), BinaryType()),
# Date/Timestamp
(datetime.date(2000, 1, 2), DateType()),
(datetime.datetime(2000, 1, 2, 3, 4), DateType()),
(datetime.datetime(2000, 1, 2, 3, 4), TimestampType()),
# Array
([], ArrayType(IntegerType())),
(["1", None], ArrayType(StringType(), containsNull=True)),
([1, 2], ArrayType(IntegerType())),
((1, 2), ArrayType(IntegerType())),
(array.array('h', [1, 2]), ArrayType(IntegerType())),
# Map
({}, MapType(StringType(), IntegerType())),
({"a": 1}, MapType(StringType(), IntegerType())),
({"a": None}, MapType(StringType(), IntegerType(), valueContainsNull=True)),
# Struct
({"s": "a", "i": 1}, schema),
({"s": "a", "i": None}, schema),
({"s": "a"}, schema),
({"s": "a", "f": 1.0}, schema),
(Row(s="a", i=1), schema),
(Row(s="a", i=None), schema),
(Row(s="a", i=1, f=1.0), schema),
(["a", 1], schema),
(["a", None], schema),
(("a", 1), schema),
(MyObj(s="a", i=1), schema),
(MyObj(s="a", i=None), schema),
(MyObj(s="a"), schema),
]
# obj, data_type, exception class
failure_spec = [
# String (match anything but None)
(None, StringType(), ValueError),
# UDT
(ExamplePoint(1.0, 2.0), PythonOnlyUDT(), ValueError),
# Boolean
(1, BooleanType(), TypeError),
("True", BooleanType(), TypeError),
([1], BooleanType(), TypeError),
# Byte
(-(2**7) - 1, ByteType(), ValueError),
(2**7, ByteType(), ValueError),
("1", ByteType(), TypeError),
(1.0, ByteType(), TypeError),
# Short
(-(2**15) - 1, ShortType(), ValueError),
(2**15, ShortType(), ValueError),
# Integer
(-(2**31) - 1, IntegerType(), ValueError),
(2**31, IntegerType(), ValueError),
# Float & Double
(1, FloatType(), TypeError),
(1, DoubleType(), TypeError),
# Decimal
(1.0, DecimalType(), TypeError),
(1, DecimalType(), TypeError),
("1.0", DecimalType(), TypeError),
# Binary
(1, BinaryType(), TypeError),
# Date/Timestamp
("2000-01-02", DateType(), TypeError),
(946811040, TimestampType(), TypeError),
# Array
(["1", None], ArrayType(StringType(), containsNull=False), ValueError),
([1, "2"], ArrayType(IntegerType()), TypeError),
# Map
({"a": 1}, MapType(IntegerType(), IntegerType()), TypeError),
({"a": "1"}, MapType(StringType(), IntegerType()), TypeError),
({"a": None}, MapType(StringType(), IntegerType(), valueContainsNull=False),
ValueError),
# Struct
({"s": "a", "i": "1"}, schema, TypeError),
(Row(s="a"), schema, ValueError), # Row can't have missing field
(Row(s="a", i="1"), schema, TypeError),
(["a"], schema, ValueError),
(["a", "1"], schema, TypeError),
(MyObj(s="a", i="1"), schema, TypeError),
(MyObj(s=None, i="1"), schema, ValueError),
]
# Check success cases
for obj, data_type in success_spec:
try:
_make_type_verifier(data_type, nullable=False)(obj)
except Exception:
self.fail("verify_type(%s, %s, nullable=False)" % (obj, data_type))
# Check failure cases
for obj, data_type, exp in failure_spec:
msg = "verify_type(%s, %s, nullable=False) == %s" % (obj, data_type, exp)
with self.assertRaises(exp, msg=msg):
_make_type_verifier(data_type, nullable=False)(obj)
@unittest.skipIf(not _have_arrow, "Arrow not installed")
class ArrowTests(ReusedPySparkTestCase):
@classmethod
def setUpClass(cls):
ReusedPySparkTestCase.setUpClass()
cls.spark = SparkSession(cls.sc)
cls.spark.conf.set("spark.sql.execution.arrow.enable", "true")
cls.schema = StructType([
StructField("1_str_t", StringType(), True),
StructField("2_int_t", IntegerType(), True),
StructField("3_long_t", LongType(), True),
StructField("4_float_t", FloatType(), True),
StructField("5_double_t", DoubleType(), True)])
cls.data = [("a", 1, 10, 0.2, 2.0),
("b", 2, 20, 0.4, 4.0),
("c", 3, 30, 0.8, 6.0)]
def assertFramesEqual(self, df_with_arrow, df_without):
msg = ("DataFrame from Arrow is not equal" +
("\n\nWith Arrow:\n%s\n%s" % (df_with_arrow, df_with_arrow.dtypes)) +
("\n\nWithout:\n%s\n%s" % (df_without, df_without.dtypes)))
self.assertTrue(df_without.equals(df_with_arrow), msg=msg)
def test_unsupported_datatype(self):
schema = StructType([StructField("dt", DateType(), True)])
df = self.spark.createDataFrame([(datetime.date(1970, 1, 1),)], schema=schema)
with QuietTest(self.sc):
self.assertRaises(Exception, lambda: df.toPandas())
def test_null_conversion(self):
df_null = self.spark.createDataFrame([tuple([None for _ in range(len(self.data[0]))])] +
self.data)
pdf = df_null.toPandas()
null_counts = pdf.isnull().sum().tolist()
self.assertTrue(all([c == 1 for c in null_counts]))
def test_toPandas_arrow_toggle(self):
df = self.spark.createDataFrame(self.data, schema=self.schema)
self.spark.conf.set("spark.sql.execution.arrow.enable", "false")
pdf = df.toPandas()
self.spark.conf.set("spark.sql.execution.arrow.enable", "true")
pdf_arrow = df.toPandas()
self.assertFramesEqual(pdf_arrow, pdf)
def test_pandas_round_trip(self):
import pandas as pd
import numpy as np
data_dict = {}
for j, name in enumerate(self.schema.names):
data_dict[name] = [self.data[i][j] for i in range(len(self.data))]
# need to convert these to numpy types first
data_dict["2_int_t"] = np.int32(data_dict["2_int_t"])
data_dict["4_float_t"] = np.float32(data_dict["4_float_t"])
pdf = pd.DataFrame(data=data_dict)
df = self.spark.createDataFrame(self.data, schema=self.schema)
pdf_arrow = df.toPandas()
self.assertFramesEqual(pdf_arrow, pdf)
def test_filtered_frame(self):
df = self.spark.range(3).toDF("i")
pdf = df.filter("i < 0").toPandas()
self.assertEqual(len(pdf.columns), 1)
self.assertEqual(pdf.columns[0], "i")
self.assertTrue(pdf.empty)
if __name__ == "__main__":
from pyspark.sql.tests import *
if xmlrunner:
unittest.main(testRunner=xmlrunner.XMLTestRunner(output='target/test-reports'))
else:
unittest.main()<|fim▁end|> | def deserialize(self, datum):
return ExamplePoint(datum[0], datum[1]) |
<|file_name|>NewDocumentDBConnectionCallout.tsx<|end_file_name|><|fim▁begin|>import React, { useState } from 'react';
import { NewConnectionCalloutProps } from './Callout.properties';
import { useTranslation } from 'react-i18next';
import CustomPivot from './customPivot/CustomPivot';
import RadioButtonNoFormik from '../../../../../components/form-controls/RadioButtonNoFormik';
import { paddingSidesStyle } from './Callout.styles';
import DocumentDBPivotDataLoader from './documentDBPivot/DocumentDBDataLoader';
import { CustomDropdownProps } from '../../../../../components/form-controls/DropDown';
import { FieldProps } from 'formik';
import { IDropdownProps } from 'office-ui-fabric-react';
enum RadioState {
documentAccount = 'documentAccount',
custom = 'custom',
}
const NewDocumentDBConnectionCallout: React.SFC<NewConnectionCalloutProps & CustomDropdownProps & FieldProps & IDropdownProps> = props => {
const { t } = useTranslation();
const [radioState, setRadioState] = useState<RadioState>(RadioState.documentAccount);
return (
<div style={paddingSidesStyle}>
<h4>{t('documentDBCallout_newDocumentDBConnection')}</h4>
<RadioButtonNoFormik
id="event-hub-connection-callout-options"
ariaLabelledBy={`event-hub-connection-callout-options-label`}
selectedKey={radioState}
options={[
{
key: RadioState.documentAccount,
text: t('documentDBCallout_azureCosmosDBAccount'),
},
{
key: RadioState.custom,
text: t('resourceCallout_customAppSetting'),
},
]}
onChange={(o, e) => e && setRadioState(e.key as RadioState)}<|fim▁hole|> {radioState === RadioState.custom && <CustomPivot {...props} />}
</div>
);
};
export default NewDocumentDBConnectionCallout;<|fim▁end|> | />
{radioState === RadioState.documentAccount && <DocumentDBPivotDataLoader {...props} />} |
<|file_name|>ionic-page.ts<|end_file_name|><|fim▁begin|>/**
* @hidden
* public link interface
*/
export interface IonicPageMetadata {
name?: string;
segment?: string;
defaultHistory?: string[];
priority?: string;
}
/**
* @name IonicPage
* @description
* The Ionic Page handles registering and displaying specific pages based on URLs. It's used
* underneath `NavController` so it will never have to be interacted with directly. When a new
* page is pushed with `NavController`, the URL is updated to match the path to this page.
*
* Unlike traditional web apps, URLs don't dictate navigation in Ionic apps.
* Instead, URLs help us link to specific pieces of content as a breadcrumb.
* The current URL gets updated as we navigate, but we use the `NavController`
* push and pop, or `NavPush` and `NavPop` to move around. This makes it much easier
* to handle complicated nested navigation.
*
* We refer to our URL system as a deep link system instead of a router to encourage
* Ionic developers to think of URLs as a breadcrumb rather than as the source of
* truth in navigation. This encourages flexible navigation design and happy apps all
* over the world.
*
*
* @usage
*
* The first step to setting up deep links is to add the page that should be
* a deep link in the `IonicPageModule.forChild` import of the page's module.
* For our examples, this will be `MyPage`:
*
* ```ts
* @NgModule({
* declarations: [
* MyPage
* ],
* imports: [
* IonicPageModule.forChild(MyPage)
* ],
* entryComponents: [
* MyPage
* ]
* })
* export class MyPageModule {}
* ```
*
* Then, add the `@IonicPage` decorator to the component. The most simple usage is adding an
* empty decorator:
*
* ```ts
* @IonicPage()
* @Component({
* templateUrl: 'main.html'
* })
* export class MyPage {}
* ```
*
* This will automatically create a link to the `MyPage` component using the same name as the class,
* `name`: `'MyPage'`. The page can now be navigated to by using this name. For example:
*
* ```ts
* @Component({
* templateUrl: 'another-page.html'
* })
* export class AnotherPage {
* constructor(public navCtrl: NavController) {}
*
* goToMyPage() {
* // go to the MyPage component
* this.navCtrl.push('MyPage');
* }
* }
* ```
*
* The `@IonicPage` decorator accepts a `DeepLinkMetadataType` object. This object accepts
* the following properties: `name`, `segment`, `defaultHistory`, and `priority`. All of them
* are optional but can be used to create complex navigation links.
*
*
* ### Changing Name
*
* As mentioned previously, the `name` property will be set to the class name if it isn't provided.
* Changing the name of the link is extremely simple. To change the name used to link to the
* component, simply pass it in the decorator like so:
*
* ```ts
* @IonicPage({
* name: 'my-page'
* })
* ```
*
* This will create a link to the `MyPage` component using the name `'my-page'`. Similar to the previous
* example, the page can be navigated to by using the name:
*
* ```ts
* goToMyPage() {
* // go to the MyPage component
* this.navCtrl.push('my-page');
* }
* ```
*
*
* ### Setting URL Path
*
* The `segment` property is used to set the URL to the page. If this property isn't provided, the
* `segment` will use the value of `name`. Since components can be loaded anywhere in the app, the
* `segment` doesn't require a full URL path. When a page becomes the active page, the `segment` is
* appended to the URL.
*
* The `segment` can be changed to anything and doesn't have to match the `name`. For example, passing
* a value for `name` and `segment`:
*
* ```ts
* @IonicPage({
* name: 'my-page',
* segment: 'some-path'
* })
* ```
*
* When navigating to this page as the first page in the app, the URL will look something like:
*
* ```
* http://localhost:8101/#/some-path
* ```
*
* However, navigating to the page will still use the `name` like the previous examples do.
*
*
* ### Dynamic Links
*
* The `segment` property is useful for creating dynamic links. Sometimes the URL isn't known ahead
* of time, so it can be passed as a variable.
*
* Since passing data around is common practice in an app, it can be reflected in the app's URL by
* using the `:param` syntax. For example, set the `segment` in the `@IonicPage` decorator:
*
* ```ts
* @IonicPage({
* name: 'detail-page',
* segment: 'detail/:id'
* })
* ```
*
* In this case, when we `push` to a new instance of `'detail-page'`, the value of `id` will
* in the `detailInfo` data being passed to `push` will replace `:id` in the URL.
*
* Important: The property needs to be something that can be converted into a string, objects
* are not supported.
*
* For example, to push the `'detail-page'` in the `ListPage` component, the following code could
* be used:
*
* ```ts
* @IonicPage({
* name: 'list'
* })
* export class ListPage {
* constructor(public navCtrl: NavController) {}
*
* pushPage(detailInfo) {
* // Push an `id` to the `'detail-page'`
* this.navCtrl.push('detail-page', {
* 'id': detailInfo.id
* })
* }
* }
* ```
*
* If the value of `detailInfo.id` is `12`, for example, the URL would end up looking like this:
*
* ```
* http://localhost:8101/#/list/detail/12
* ```
*
* Since this `id` will be used to pull in the data of the specific detail page, it's Important
* that the `id` is unique.
*<|fim▁hole|> *
*
* ### Default History
*
* Pages can be navigated to using deep links from anywhere in the app, but sometimes the app is
* launched from a URL and the page needs to have the same history as if it were navigated to from
* inside of the app.
*
* By default, the page would be navigated to as the first page in the stack with no prior history.
* A good example is the App Store on iOS. Clicking on a URL to an application in the App Store will
* load the details of the application with no back button, as if it were the first page ever viewed.
*
* The default history of any page can be set in the `defaultHistory` property. This history will only
* be used if the history doesn't already exist, meaning if you navigate to the page the history will
* be the pages that were navigated from.
*
* The `defaultHistory` property takes an array of strings. For example, setting the history of the
* detail page to the list page where the `name` is `list`:
*
* ```ts
* @IonicPage({
* name: 'detail-page',
* segment: 'detail/:id',
* defaultHistory: ['list']
* })
* ```
*
* In this example, if the app is launched at `http://localhost:8101/#/detail/my-detail` the displayed page
* will be the `'detail-page'` with an id of `my-detail` and it will show a back button that goes back to
* the `'list'` page.
*
* An example of an application with a set history stack is the Instagram application. Opening a link
* to an image on Instagram will show the details for that image with a back button to the user's profile
* page. There is no "right" way of setting the history for a page, it is up to the application.
*
* ### Priority
*
* The `priority` property is only used during preloading. By default, preloading is turned off so setting
* this property would do nothing. Preloading eagerly loads all deep links after the application boots
* instead of on demand as needed. To enable preloading, set `preloadModules` in the main application module
* config to `true`:
*
* ```ts
* @NgModule({
* declarations: [
* MyApp
* ],
* imports: [
* BrowserModule,
* IonicModule.forRoot(MyApp, {
* preloadModules: true
* })
* ],
* bootstrap: [IonicApp],
* entryComponents: [
* MyApp
* ]
* })
* export class AppModule { }
* ```
*
* If preloading is turned on, it will load the modules based on the value of `priority`. The following
* values are possible for `priority`: `"high"`, `"low"`, and `"off"`. When there is no `priority`, it
* will be set to `"low"`.
*
* All deep links with their priority set to `"high"` will be loaded first. Upon completion of loading the
* `"high"` priority modules, all deep links with a priority of `"low"` (or no priority) will be loaded. If
* the priority is set to `"off"` the link will not be preloaded. Setting the `priority` is as simple as
* passing it to the `@IonicPage` decorator:
*
* ```ts
* @IonicPage({
* name: 'my-page',
* priority: 'high'
* })
* ```
*
* We recommend setting the `priority` to `"high"` on the pages that will be viewed first when launching
* the application.
*
*/
export function IonicPage(_config?: IonicPageMetadata): ClassDecorator {
return function(clazz: any) {
return clazz;
};
}<|fim▁end|> | * Note: Even though the `name` is `detail-page`, the `segment` uses `detail/:id`, and the URL
* will use the `segment`. |
<|file_name|>event_emitter_spec.ts<|end_file_name|><|fim▁begin|>/**
* @license
* Copyright Google LLC All Rights Reserved.
*
* Use of this source code is governed by an MIT-style license that can be
* found in the LICENSE file at https://angular.io/license
*/
import {AsyncTestCompleter, beforeEach, describe, expect, inject, it} from '@angular/core/testing/src/testing_internal';
import {filter} from 'rxjs/operators';
import {EventEmitter} from '../src/event_emitter';
{
describe('EventEmitter', () => {
let emitter: EventEmitter<any>;
beforeEach(() => {
emitter = new EventEmitter();
});
it('should call the next callback',
inject([AsyncTestCompleter], (async: AsyncTestCompleter) => {
emitter.subscribe({
next: (value: any) => {
expect(value).toEqual(99);
async.done();
}
});
emitter.emit(99);
}));
it('should call the throw callback',
inject([AsyncTestCompleter], (async: AsyncTestCompleter) => {
emitter.subscribe({
next: () => {},
error: (error: any) => {<|fim▁hole|> async.done();
}
});
emitter.error('Boom');
}));
it('should work when no throw callback is provided',
inject([AsyncTestCompleter], (async: AsyncTestCompleter) => {
emitter.subscribe({
next: () => {},
error: (_: any) => {
async.done();
}
});
emitter.error('Boom');
}));
it('should call the return callback',
inject([AsyncTestCompleter], (async: AsyncTestCompleter) => {
emitter.subscribe({
next: () => {},
error: (_: any) => {},
complete: () => {
async.done();
}
});
emitter.complete();
}));
it('should subscribe to the wrapper synchronously', () => {
let called = false;
emitter.subscribe({
next: (value: any) => {
called = true;
}
});
emitter.emit(99);
expect(called).toBe(true);
});
it('delivers next and error events synchronously',
inject([AsyncTestCompleter], (async: AsyncTestCompleter) => {
const log: any[] /** TODO #9100 */ = [];
emitter.subscribe({
next: (x: any) => {
log.push(x);
expect(log).toEqual([1, 2]);
},
error: (err: any) => {
log.push(err);
expect(log).toEqual([1, 2, 3, 4]);
async.done();
}
});
log.push(1);
emitter.emit(2);
log.push(3);
emitter.error(4);
log.push(5);
}));
it('delivers next and complete events synchronously', () => {
const log: any[] /** TODO #9100 */ = [];
emitter.subscribe({
next: (x: any) => {
log.push(x);
expect(log).toEqual([1, 2]);
},
error: null,
complete: () => {
log.push(4);
expect(log).toEqual([1, 2, 3, 4]);
}
});
log.push(1);
emitter.emit(2);
log.push(3);
emitter.complete();
log.push(5);
expect(log).toEqual([1, 2, 3, 4, 5]);
});
it('delivers events asynchronously when forced to async mode',
inject([AsyncTestCompleter], (async: AsyncTestCompleter) => {
const e = new EventEmitter(true);
const log: any[] /** TODO #9100 */ = [];
e.subscribe((x: any) => {
log.push(x);
expect(log).toEqual([1, 3, 2]);
async.done();
});
log.push(1);
e.emit(2);
log.push(3);
}));
it('reports whether it has subscribers', () => {
const e = new EventEmitter(false);
expect(e.observers.length > 0).toBe(false);
e.subscribe({next: () => {}});
expect(e.observers.length > 0).toBe(true);
});
it('remove a subscriber subscribed directly to EventEmitter', () => {
const sub = emitter.subscribe();
expect(emitter.observers.length).toBe(1);
sub.unsubscribe();
expect(emitter.observers.length).toBe(0);
});
it('remove a subscriber subscribed after applying operators with pipe()', () => {
const sub = emitter.pipe(filter(() => true)).subscribe();
expect(emitter.observers.length).toBe(1);
sub.unsubscribe();
expect(emitter.observers.length).toBe(0);
});
it('unsubscribing a subscriber invokes the dispose method', () => {
inject([AsyncTestCompleter], (async: AsyncTestCompleter) => {
const sub = emitter.subscribe();
sub.add(() => async.done());
sub.unsubscribe();
});
});
it('unsubscribing a subscriber after applying operators with pipe() invokes the dispose method',
() => {
inject([AsyncTestCompleter], (async: AsyncTestCompleter) => {
const sub = emitter.pipe(filter(() => true)).subscribe();
sub.add(() => async.done());
sub.unsubscribe();
});
});
it('error thrown inside an Rx chain propagates to the error handler and disposes the chain',
() => {
let errorPropagated = false;
emitter
.pipe(
filter(() => {
throw new Error();
}),
)
.subscribe(
() => {},
err => errorPropagated = true,
);
emitter.next(1);
expect(errorPropagated).toBe(true);
expect(emitter.observers.length).toBe(0);
});
it('error sent by EventEmitter should dispose the Rx chain and remove subscribers', () => {
let errorPropagated = false;
emitter.pipe(filter(() => true))
.subscribe(
() => {},
err => errorPropagated = true,
);
emitter.error(1);
expect(errorPropagated).toBe(true);
expect(emitter.observers.length).toBe(0);
});
// TODO: vsavkin: add tests cases
// should call dispose on the subscription if generator returns {done:true}
// should call dispose on the subscription on throw
// should call dispose on the subscription on return
});
}<|fim▁end|> | expect(error).toEqual('Boom'); |
<|file_name|>mod.rs<|end_file_name|><|fim▁begin|>// Zinc, the bare metal stack for rust.
// Copyright 2014 Vladimir "farcaller" Pouzanov <[email protected]>
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Drivers for TFT LCDs.
use drivers::chario::CharIO;
pub mod c12332;
pub mod ili9341;
pub mod font_small_7;
pub mod hd44780u;
/// LCD provides a generic interface to a TFT LCD peripheral.
///
/// It provides generic methods for drawing primitives and bitmaps based on
/// `pixel` to set a pixel.
///
/// LCD does not flush buffers automatically, user must call `flush` after the
/// drawing sequence to actually display the data on screen.
pub trait LCD : CharIO {
/// Clears the screen.
fn clear(&self);
/// Flushes the internal buffer to screen, where applicable.
fn flush(&self);
/// Sets one pixel color. The actual color bits are driver-specific.
fn pixel(&self, x: u32, y: u32, color: u16);
/// Draws a line from xy0 to xy1.
fn line(&self, x0_b: u32, y0_b: u32, x1: u32, y1: u32, color: u16) {
let (mut x0, mut y0) = (x0_b as i32, y0_b as i32);
let (dx, dy) = ((x1 as i32) - (x0), (y1 as i32) - (y0));
let dx_sym: i32 =
if dx > 0 {
1
} else {
-1
};
let dy_sym: i32 =
if dy > 0 {
1
} else {
-1
};
let (dx, dy) = (dx_sym*dx, dy_sym*dy);
let (dx_x2, dy_x2) = (dx*2, dy*2);<|fim▁hole|> while x0 as u32 != x1 {
self.pixel(x0 as u32, y0 as u32, color);
x0 += dx_sym;
if di < 0 {
di += dy_x2;
} else {
di += dy_x2 - dx_x2;
y0 += dy_sym;
}
}
self.pixel(x0 as u32, y0 as u32, color);
} else {
let mut di = dx_x2 - dy;
while y0 as u32 != y1 {
self.pixel(x0 as u32, y0 as u32, color);
y0 += dy_sym;
if di < 0 {
di += dx_x2;
} else {
di += dx_x2 - dy_x2;
x0 += dx_sym;
}
}
self.pixel(x0 as u32, y0 as u32, color);
}
}
/// Draws a rectangle.
fn rect(&self, x0: u32, y0: u32, x1: u32, y1: u32, color: u16) {
if x1 > x0 {
self.line(x0,y0,x1,y0,color);
} else {
self.line(x1,y0,x0,y0,color);
}
if y1 > y0 {
self.line(x0,y0,x0,y1,color);
} else {
self.line(x0,y1,x0,y0,color);
}
if x1 > x0 {
self.line(x0,y1,x1,y1,color);
} else {
self.line(x1,y1,x0,y1,color);
}
if y1 > y0 {
self.line(x1,y0,x1,y1,color);
} else {
self.line(x1,y1,x1,y0,color);
}
}
/// Draws a filled rectangle.
fn fillrect(&self, x0_b: u32, y0_b: u32, x1_b: u32, y1_b: u32, color: u16) {
let (x0, x1) =
if x0_b > x1_b {
(x1_b, x0_b)
} else {
(x0_b, x1_b)
};
let (y0, y1) =
if y0_b > y1_b {
(y1_b, y0_b)
} else {
(y0_b, y1_b)
};
for l in x0..(x1 + 1) {
for c in y0..(y1 + 1) {
self.pixel(l as u32, c as u32, color);
}
}
}
/// Draws an image from a buffer.
fn image(&self, width: u32, height: u32, data: &[u16]) {
for x in 0..width {
for y in 0..height {
self.pixel(x, y, data[(x+y*width) as usize]);
}
}
}
}
#[cfg(test)]
mod test {
use core::mem::zeroed;
use core::cell::Cell;
use core::ops::Fn;
use core::ops::Range;
use drivers::chario::CharIO;
use drivers::lcd::LCD;
pub struct TestLCD {
pixbuf: [[Cell<u16>; 16]; 16],
}
impl CharIO for TestLCD {
fn putc(&self, _: char) { }
}
impl LCD for TestLCD {
fn flush(&self) { }
fn clear(&self) { self.set_fill(0); }
fn pixel(&self, x: u32, y: u32, color: u16) {
if x >= 16 || y >= 16 {
return
}
self.pixbuf[x as usize][y as usize].set(color);
}
}
impl TestLCD {
fn new() -> TestLCD {
TestLCD {
pixbuf: unsafe { zeroed() },
}
}
fn coords(&self, x: usize, y: usize) -> (u32, u32) { (x as u32, y as u32) }
fn axis(&self) -> Range<usize> { 0..16 }
fn for_each<F>(&self, block: F) where F: Fn((u32, u32), u16) {
for x in self.axis() {
for y in self.axis() {
block(self.coords(x, y), self.pixbuf[x][y].get());
}
}
}
fn map_each<F>(&self, block: F) where F: Fn((u32, u32), u16) -> u16 {
for x in self.axis() {
for y in self.axis() {
self.pixbuf[x][y].set(block(self.coords(x, y), self.pixbuf[x][y].get()));
}
}
}
fn set_fill(&self, color: u16) {
self.map_each(|_, _| { color });
}
}
/* keep this
let blank: [[u16; 16]; 16] = [
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
];
*/
#[test]
fn should_fill_and_clear() {
let io = TestLCD::new();
io.set_fill(128);
io.for_each(|_, x| assert!(x == 128));
io.clear();
io.for_each(|_, x| assert!(x == 0));
}
#[test]
fn should_set_pixels() {
let io = TestLCD::new();
io.map_each(|(x, y), _| { (x+y) as u16 });
io.for_each(|(x, y), v| assert!(v == (x+y) as u16));
}
#[test]
fn should_draw_line() {
let io = TestLCD::new();
let diagonal: [[u16; 16]; 16] = [
[1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1],
];
io.line(0, 0, 15, 15, 1);
io.line(15, 15, 0, 0, 1);
io.for_each(|(x, y), v| {
assert!(v == diagonal[y as usize][x as usize]);
assert!(v == diagonal[x as usize][y as usize]);
});
io.clear();
let non_symetric: [[u16; 16]; 16] = [
[2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,3,3,3,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,5,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,5,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,5,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,4,4],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,4,4],
];
io.line(0, 0, 0, 1, 2);
io.line(4, 2, 4, 4, 3);
io.line(14, 14, 14, 15, 4);
io.line(15, 14, 15, 15, 4);
io.line(11, 11, 13, 13, 5);
io.for_each(|(x, y), v| {
assert!(v == non_symetric[x as usize][y as usize]);
});
}
#[test]
fn should_draw_rect() {
let io = TestLCD::new();
let overlapping: [[u16; 16]; 16] = [
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,6,6,6,6,6,6,0,0,0,0,0,0,0,0],
[0,0,6,0,0,0,0,6,0,0,0,0,0,0,0,0],
[0,0,6,0,0,0,0,6,0,0,0,0,0,0,0,0],
[0,0,6,0,7,7,7,7,7,7,7,0,0,0,0,0],
[0,0,6,0,7,0,0,6,0,0,7,0,0,0,0,0],
[0,0,6,0,7,0,0,6,0,0,7,0,0,0,0,0],
[0,0,6,0,7,7,7,7,7,7,7,0,0,0,0,0],
[0,0,6,0,0,0,0,6,0,0,0,0,0,0,0,0],
[0,0,6,6,6,6,6,6,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
];
assert!(overlapping[4][2] == 6);
assert!(overlapping[12][7] == 6);
assert!(overlapping[7][4] == 7);
assert!(overlapping[10][10] == 7);
io.rect(4, 2, 12, 7, 6);
io.rect(10, 10, 7, 4, 7);
io.for_each(|(x, y), v| {
assert!(v == overlapping[x as usize][y as usize]);
});
}
#[test]
fn should_draw_fillrect() {
let io = TestLCD::new();
let eights: [[u16; 16]; 16] = [
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,8,8,8,8,8,8,8,8,0,0,0,0],
[0,0,0,0,8,8,8,8,8,8,8,8,0,0,0,0],
[0,0,0,0,8,8,8,8,8,8,8,8,0,0,0,0],
[0,0,0,0,8,8,8,8,8,8,8,8,0,0,0,0],
[0,0,0,0,8,8,8,8,8,8,8,8,0,0,0,0],
[0,0,0,0,8,8,8,8,8,8,8,8,0,0,0,0],
[0,0,0,0,8,8,8,8,8,8,8,8,0,0,0,0],
[0,0,0,0,8,8,8,8,8,8,8,8,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
];
io.fillrect(4, 4, 11, 11, 8);
io.for_each(|(x, y), v| {
assert!(v == eights[x as usize][y as usize]);
});
}
#[test]
fn should_draw_image() {
let io = TestLCD::new();
let i1 = &[
0xff, 0xff, 0xff, 0xad, 0xde, 0x10, 0x01, 0xde,
0xed, 0x10, 0x01, 0xed, 0xad, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xad, 0xde, 0x10, 0x01, 0xde,
0xed, 0x10, 0x01, 0xed, 0xad, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xad, 0xde, 0x10, 0x01, 0xde,
0xed, 0x10, 0x01, 0xed, 0xad, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xad, 0xde, 0x10, 0x01, 0xde,
0xed, 0x10, 0x01, 0xed, 0xad, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xad, 0xde, 0x10, 0x01, 0xde,
0xed, 0x10, 0x01, 0xed, 0xad, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xad, 0xde, 0x10, 0x01, 0xde,
0xed, 0x10, 0x01, 0xed, 0xad, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xad, 0xde, 0x10, 0x01, 0xde,
0xed, 0x10, 0x01, 0xed, 0xad, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xad, 0xde, 0x10, 0x01, 0xde,
0xed, 0x10, 0x01, 0xed, 0xad, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xad, 0xde, 0x10, 0x01, 0xde,
0xed, 0x10, 0x01, 0xed, 0xad, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xad, 0xde, 0x10, 0x01, 0xde,
0xed, 0x10, 0x01, 0xed, 0xad, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xad, 0xde, 0x10, 0x01, 0xde,
0xed, 0x10, 0x01, 0xed, 0xad, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xad, 0xde, 0x10, 0x01, 0xde,
0xed, 0x10, 0x01, 0xed, 0xad, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xad, 0xde, 0x10, 0x01, 0xde,
0xed, 0x10, 0x01, 0xed, 0xad, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xad, 0xde, 0x10, 0x01, 0xde,
0xed, 0x10, 0x01, 0xed, 0xad, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xad, 0xde, 0x10, 0x01, 0xde,
0xed, 0x10, 0x01, 0xed, 0xad, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xad, 0xde, 0x10, 0x01, 0xde,
0xed, 0x10, 0x01, 0xed, 0xad, 0xff, 0xff, 0xff,
];
let i2: [[u16; 16]; 16] = [
[0xff, 0xff, 0xff, 0xad, 0xde, 0x10, 0x01, 0xde,
0xed, 0x10, 0x01, 0xed, 0xad, 0xff, 0xff, 0xff],
[0xff, 0xff, 0xff, 0xad, 0xde, 0x10, 0x01, 0xde,
0xed, 0x10, 0x01, 0xed, 0xad, 0xff, 0xff, 0xff],
[0xff, 0xff, 0xff, 0xad, 0xde, 0x10, 0x01, 0xde,
0xed, 0x10, 0x01, 0xed, 0xad, 0xff, 0xff, 0xff],
[0xff, 0xff, 0xff, 0xad, 0xde, 0x10, 0x01, 0xde,
0xed, 0x10, 0x01, 0xed, 0xad, 0xff, 0xff, 0xff],
[0xff, 0xff, 0xff, 0xad, 0xde, 0x10, 0x01, 0xde,
0xed, 0x10, 0x01, 0xed, 0xad, 0xff, 0xff, 0xff],
[0xff, 0xff, 0xff, 0xad, 0xde, 0x10, 0x01, 0xde,
0xed, 0x10, 0x01, 0xed, 0xad, 0xff, 0xff, 0xff],
[0xff, 0xff, 0xff, 0xad, 0xde, 0x10, 0x01, 0xde,
0xed, 0x10, 0x01, 0xed, 0xad, 0xff, 0xff, 0xff],
[0xff, 0xff, 0xff, 0xad, 0xde, 0x10, 0x01, 0xde,
0xed, 0x10, 0x01, 0xed, 0xad, 0xff, 0xff, 0xff],
[0xff, 0xff, 0xff, 0xad, 0xde, 0x10, 0x01, 0xde,
0xed, 0x10, 0x01, 0xed, 0xad, 0xff, 0xff, 0xff],
[0xff, 0xff, 0xff, 0xad, 0xde, 0x10, 0x01, 0xde,
0xed, 0x10, 0x01, 0xed, 0xad, 0xff, 0xff, 0xff],
[0xff, 0xff, 0xff, 0xad, 0xde, 0x10, 0x01, 0xde,
0xed, 0x10, 0x01, 0xed, 0xad, 0xff, 0xff, 0xff],
[0xff, 0xff, 0xff, 0xad, 0xde, 0x10, 0x01, 0xde,
0xed, 0x10, 0x01, 0xed, 0xad, 0xff, 0xff, 0xff],
[0xff, 0xff, 0xff, 0xad, 0xde, 0x10, 0x01, 0xde,
0xed, 0x10, 0x01, 0xed, 0xad, 0xff, 0xff, 0xff],
[0xff, 0xff, 0xff, 0xad, 0xde, 0x10, 0x01, 0xde,
0xed, 0x10, 0x01, 0xed, 0xad, 0xff, 0xff, 0xff],
[0xff, 0xff, 0xff, 0xad, 0xde, 0x10, 0x01, 0xde,
0xed, 0x10, 0x01, 0xed, 0xad, 0xff, 0xff, 0xff],
[0xff, 0xff, 0xff, 0xad, 0xde, 0x10, 0x01, 0xde,
0xed, 0x10, 0x01, 0xed, 0xad, 0xff, 0xff, 0xff],
];
io.image(16, 16, i1);
io.for_each(|(y, x), v| {
assert!(v == i2[x as usize][y as usize]);
});
}
}<|fim▁end|> |
if dx >= dy {
let mut di = dy_x2 - dx; |
<|file_name|>application.js<|end_file_name|><|fim▁begin|>// This is a manifest file that'll be compiled into including all the files listed below.
// Add new JavaScript/Coffee code in separate files in this directory and they'll automatically
// be included in the compiled file accessible from http://example.com/assets/application.js
// It's not advisable to add code directly here, but if you do, it'll appear at the bottom of the
// the compiled file.
//
//= require jquery
//= require jquery_ujs
//= require twitter/bootstrap
//= require select2
//= require has_accounts_engine/accounting
//= require has_accounts_engine/accounting-jquery
//= require has_accounts_engine/bootstrap.datepicker
//= require_tree .
// Application specific behaviour
function addAlternateTableBehaviour() {
$("table.list tr:odd").addClass("odd");
}
// Dirty Form
function makeEditForm(form) {
var buttons = form.find("fieldset.buttons");
buttons.animate({opacity: 1}, 1000);
}
function addDirtyForm() {
$(".form-view form").dirty_form()
.dirty(function(event, data){
makeEditForm($(this));
})
$(".form-view").focusin(function() {makeEditForm($(this))});
}
function addNestedFormBehaviour() {
$('body').on('click', '.delete-nested-form-item', function(event) {
var item = $(this).parents('.nested-form-item');
// Hide item
item.hide();
// Mark as ready to delete
item.find("input[name$='[_destroy]']").val("1");
item.addClass('delete');
// Drop input fields to prevent browser validation problems
item.find(":input").not("[name$='[_destroy]'], [name$='[id]']").remove();
// TODO: should be callbacks
updatePositions($(this).parents('.nested-form-container'));
updateLineItems();
// Don't follow link
event.preventDefault();
});
}
// Currency helpers
function currencyRound(value) {
if (isNaN(value)) {
return 0.0;
};
rounded = Math.round(value * 20) / 20;
return rounded.toFixed(2);
}
// Line Item calculation
function updateLineItemPrice(lineItem) {
var list = lineItem.parent();
var reference_code = lineItem.find(":input[name$='[reference_code]']").val();
var quantity = lineItem.find(":input[name$='[quantity]']").val();
if (quantity == '%' || quantity == 'saldo_of') {
var included_items;
if (reference_code == '') {
included_items = lineItem.prevAll('.line_item');
} else {
// Should match using ~= but acts_as_taggable_adds_colons between tags
included_items = list.find(":input[name$='[code]'][value='" + reference_code + "']").parents('.line_item, .saldo_line_item');
if (included_items.length == 0) {
// Should match using ~= but acts_as_taggable_adds_colons between tags
included_items = list.find(":input[name$='[include_in_saldo_list]'][value*='" + reference_code + "']").parents('.line_item, .saldo_line_item');
}
}
var price_input = lineItem.find(":input[name$='[price]']");
price_input.val(calculateTotalAmount(included_items));
}
}
function updateAllLineItemPrices() {
$('.line_item, .saldo_line_item').each(function() {
updateLineItemPrice($(this));
});
}
function calculateLineItemTotalAmount(lineItem) {
var times_input = lineItem.find(":input[name$='[times]']");
var times = accounting.parse(times_input.val());
if (isNaN(times)) {
times = 1;
};
var quantity_input = lineItem.find(":input[name$='[quantity]']");
var price_input = lineItem.find(":input[name$='[price]']");
var price = accounting.parse(price_input.val());
// For 'saldo_of' items, we don't take accounts into account
if (quantity_input.val() == "saldo_of") {
return currencyRound(price);
};
var direct_account_id = $('#line_items').data('direct-account-id');
var direct_account_factor = $('#line_items').data('direct-account-factor');
var factor = 0;
if (lineItem.find(":input[name$='[credit_account_id]']").val() == direct_account_id) {
factor = 1;
};
if (lineItem.find(":input[name$='[debit_account_id]']").val() == direct_account_id) {
factor = -1;
};
if (quantity_input.val() == '%') {
times = times / 100;
};
return currencyRound(times * price * factor * direct_account_factor);
}
function updateLineItemTotalAmount(lineItem) {
var total_amount_input = lineItem.find(".total_amount");
var total_amount = accounting.formatNumber(calculateLineItemTotalAmount(lineItem));
// Update Element
total_amount_input.text(total_amount);
}
function calculateTotalAmount(lineItems) {
var total_amount = 0;
$(lineItems).each(function() {
total_amount += accounting.parse($(this).find(".total_amount").text());
});
return currencyRound(total_amount);
}
function updateLineItems() {
if ($('#line_items').length > 0) {
$('.line_item, .saldo_line_item').each(function() {
updateLineItemPrice($(this));
updateLineItemTotalAmount($(this));
});
};
}
// Recalculate after every key stroke
function handleLineItemChange(event) {
// If character is <return>
if(event.keyCode == 13) {
// ...trigger form action
$(event.currentTarget).submit();
} else if(event.keyCode == 32) {
// ...trigger form action
$(event.currentTarget).submit();
} else {
updateLineItems();
}
}
function addCalculateTotalAmountBehaviour() {
$("#line_items").find(":input[name$='[times]'], :input[name$='[quantity]'], :input[name$='[price]'], input[name$='[reference_code]']").on('keyup', handleLineItemChange);
$("#line_items").bind("sortstop", handleLineItemChange);
}
// Sorting
function updatePositions(collection) {
var items = collection.find('.nested-form-item').not('.delete');
items.each(function(index, element) {
$(this).find("input[id$='_position']").val(index + 1)
});
}
function initAccounting() {
// accounting.js
// Settings object that controls default parameters for library methods:
accounting.settings = {
currency: {
symbol : "", // default currency symbol is '$'
format: "%v", // controls output: %s = symbol, %v = value/number (can be object: see below)
decimal : ".", // decimal point separator
thousand: "'", // thousands separator
precision : 2 // decimal places
},
number: {
precision : 2, // default precision on numbers is 0
thousand: "'",
decimal : "."
}
}
}
// Initialize behaviours
function initializeBehaviours() {
// Init settings
initAccounting();
// from cyt.js
addComboboxBehaviour();
addAutofocusBehaviour();
addDatePickerBehaviour();
addLinkifyContainersBehaviour();
addIconTooltipBehaviour();
addModalBehaviour();
// application
addAlternateTableBehaviour();
addNestedFormBehaviour();
addCalculateTotalAmountBehaviour();
updateLineItems();
// twitter bootstrap
$(function () {
$(".alert").alert();
$("*[rel=popover]").popover({
offset: 10
});
$('.small-tooltip').tooltip({
placement: 'right'
});
})
// select2
$('.select2').select2({
allowClear: true
});
$('.select2-tags').each(function(index, element) {
var tags = $(element).data('tags') || '';
$(element).select2({
tags: tags,
tokenSeparators: [","]
})<|fim▁hole|>
// Loads functions after DOM is ready
$(document).ready(initializeBehaviours);<|fim▁end|> | })
} |
<|file_name|>pt_pt.js<|end_file_name|><|fim▁begin|>/*!
* froala_editor v3.2.5 (https://www.froala.com/wysiwyg-editor)
* License https://froala.com/wysiwyg-editor/terms/
* Copyright 2014-2020 Froala Labs
*/
(function (global, factory) {
typeof exports === 'object' && typeof module !== 'undefined' ? factory(require('froala-editor')) :
typeof define === 'function' && define.amd ? define(['froala-editor'], factory) :
(factory(global.FroalaEditor));
}(this, (function (FE) { 'use strict';
FE = FE && FE.hasOwnProperty('default') ? FE['default'] : FE;
/**
* Portuguese spoken in Portugal
*/
FE.LANGUAGE['pt_pt'] = {
translation: {
// Place holder
'Type something': 'Digite algo',
// Basic formatting
'Bold': 'Negrito',
'Italic': "It\xE1lico",
'Underline': 'Sublinhado',
'Strikethrough': 'Rasurado',
// Main buttons
'Insert': 'Inserir',
'Delete': 'Apagar',
'Cancel': 'Cancelar',
'OK': 'Ok',
'Back': 'Voltar',
'Remove': 'Remover',
'More': 'Mais',
'Update': 'Atualizar',
'Style': 'Estilo',
// Font
'Font Family': 'Fonte',
'Font Size': 'Tamanho da fonte',
// Colors
'Colors': 'Cores',
'Background': 'Fundo',
'Text': 'Texto',
'HEX Color': 'Cor hexadecimal',
// Paragraphs
'Paragraph Format': 'Formatos',
'Normal': 'Normal',
'Code': "C\xF3digo",
'Heading 1': "Cabe\xE7alho 1",
'Heading 2': "Cabe\xE7alho 2",
'Heading 3': "Cabe\xE7alho 3",
'Heading 4': "Cabe\xE7alho 4",
// Style
'Paragraph Style': "Estilo de par\xE1grafo",
'Inline Style': 'Estilo embutido',
// Alignment
'Align': 'Alinhar',
'Align Left': "Alinhar \xE0 esquerda",
'Align Center': 'Alinhar ao centro',
'Align Right': "Alinhar \xE0 direita",
'Align Justify': 'Justificado',
'None': 'Nenhum',
// Lists
'Ordered List': 'Lista ordenada',
'Unordered List': "Lista n\xE3o ordenada",
// Indent
'Decrease Indent': "Diminuir avan\xE7o",
'Increase Indent': "Aumentar avan\xE7o",
// Links
'Insert Link': 'Inserir link',
'Open in new tab': 'Abrir em uma nova aba',
'Open Link': 'Abrir link',
'Edit Link': 'Editar link',
'Unlink': 'Remover link',
'Choose Link': 'Escolha o link',
// Images
'Insert Image': 'Inserir imagem',
'Upload Image': 'Carregar imagem',
'By URL': 'Por URL',
'Browse': 'Procurar',
'Drop image': 'Largue imagem',
'or click': 'ou clique em',
'Manage Images': 'Gerenciar as imagens',
'Loading': 'Carregando',
'Deleting': 'Excluindo',
'Tags': 'Etiquetas',
'Are you sure? Image will be deleted.': "Voc\xEA tem certeza? Imagem ser\xE1 apagada.",
'Replace': 'Substituir',
'Uploading': 'Carregando',
'Loading image': 'Carregando imagem',
'Display': 'Exibir',
'Inline': 'Em linha',
'Break Text': 'Texto de quebra',
'Alternative Text': 'Texto alternativo',
'Change Size': 'Alterar tamanho',
'Width': 'Largura',
'Height': 'Altura',
'Something went wrong. Please try again.': 'Algo deu errado. Por favor, tente novamente.',
'Image Caption': 'Legenda da imagem',
'Advanced Edit': 'Edição avançada',
// Video
'Insert Video': "Inserir v\xEDdeo",
'Embedded Code': "C\xF3digo embutido",
'Paste in a video URL': 'Colar em um URL de vídeo',
'Drop video': 'Solte o video',
'Your browser does not support HTML5 video.': 'Seu navegador não suporta o vídeo html5.',
'Upload Video': 'Envio vídeo',
// Tables
'Insert Table': 'Inserir tabela',
'Table Header': "Cabe\xE7alho da tabela",
'Remove Table': 'Remover tabela',
'Table Style': 'estilo de tabela',
'Horizontal Align': 'Alinhamento horizontal',
'Row': 'Linha',
'Insert row above': 'Inserir linha antes',
'Insert row below': 'Inserir linha depois',
'Delete row': 'Eliminar linha',
'Column': 'Coluna',
'Insert column before': 'Inserir coluna antes',
'Insert column after': 'Inserir coluna depois',
'Delete column': 'Eliminar coluna',
'Cell': "C\xE9lula",
'Merge cells': "Unir c\xE9lulas",
'Horizontal split': "Divis\xE3o horizontal",
'Vertical split': "Divis\xE3o vertical",
'Cell Background': "Fundo da c\xE9lula",
'Vertical Align': 'Alinhar vertical',
'Top': 'Topo',
'Middle': 'Meio',
'Bottom': 'Fundo',
'Align Top': 'Alinhar topo',
'Align Middle': 'Alinhar meio',
'Align Bottom': 'Alinhar fundo',
'Cell Style': "Estilo de c\xE9lula",
// Files
'Upload File': 'Upload de arquivo',
'Drop file': 'Largar arquivo',
// Emoticons
'Emoticons': 'Emoticons',
'Grinning face': 'Sorrindo a cara',
'Grinning face with smiling eyes': 'Sorrindo rosto com olhos sorridentes',
'Face with tears of joy': "Rosto com l\xE1grimas de alegria",
'Smiling face with open mouth': 'Rosto de sorriso com a boca aberta',
'Smiling face with open mouth and smiling eyes': 'Rosto de sorriso com a boca aberta e olhos sorridentes',
'Smiling face with open mouth and cold sweat': 'Rosto de sorriso com a boca aberta e suor frio',
'Smiling face with open mouth and tightly-closed eyes': 'Rosto de sorriso com a boca aberta e os olhos bem fechados',
'Smiling face with halo': 'Rosto de sorriso com halo',
'Smiling face with horns': 'Rosto de sorriso com chifres',
'Winking face': 'Pisc a rosto',
'Smiling face with smiling eyes': 'Rosto de sorriso com olhos sorridentes',
'Face savoring delicious food': 'Rosto saboreando uma deliciosa comida',
'Relieved face': 'Rosto aliviado',
'Smiling face with heart-shaped eyes': "Rosto de sorriso com os olhos em forma de cora\xE7\xE3o",
'Smiling face with sunglasses': "Rosto de sorriso com \xF3culos de sol",
'Smirking face': 'Rosto sorridente',
'Neutral face': 'Rosto neutra',
'Expressionless face': 'Rosto inexpressivo',
'Unamused face': "O rosto n\xE3o divertido",<|fim▁hole|> 'Face with cold sweat': 'Rosto com suor frio',
'Pensive face': 'O rosto pensativo',
'Confused face': 'Cara confusa',
'Confounded face': "Rosto at\xF4nito",
'Kissing face': 'Beijar Rosto',
'Face throwing a kiss': 'Rosto jogando um beijo',
'Kissing face with smiling eyes': 'Beijar rosto com olhos sorridentes',
'Kissing face with closed eyes': 'Beijando a cara com os olhos fechados',
'Face with stuck out tongue': "Preso de cara com a l\xEDngua para fora",
'Face with stuck out tongue and winking eye': "Rosto com estendeu a l\xEDngua e olho piscando",
'Face with stuck out tongue and tightly-closed eyes': 'Rosto com estendeu a língua e os olhos bem fechados',
'Disappointed face': 'Rosto decepcionado',
'Worried face': 'O rosto preocupado',
'Angry face': 'Rosto irritado',
'Pouting face': 'Beicinho Rosto',
'Crying face': 'Cara de choro',
'Persevering face': 'Perseverar Rosto',
'Face with look of triumph': 'Rosto com olhar de triunfo',
'Disappointed but relieved face': 'Fiquei Desapontado mas aliviado Rosto',
'Frowning face with open mouth': 'Sobrancelhas franzidas rosto com a boca aberta',
'Anguished face': 'O rosto angustiado',
'Fearful face': 'Cara com medo',
'Weary face': 'Rosto cansado',
'Sleepy face': 'Cara de sono',
'Tired face': 'Rosto cansado',
'Grimacing face': 'Fazendo caretas face',
'Loudly crying face': 'Alto chorando rosto',
'Face with open mouth': 'Enfrentar com a boca aberta',
'Hushed face': 'Flagrantes de rosto',
'Face with open mouth and cold sweat': 'Enfrentar com a boca aberta e suor frio',
'Face screaming in fear': 'Cara gritando de medo',
'Astonished face': 'Cara de surpresa',
'Flushed face': 'Rosto vermelho',
'Sleeping face': 'O rosto de sono',
'Dizzy face': 'Cara tonto',
'Face without mouth': 'Rosto sem boca',
'Face with medical mask': "Rosto com m\xE1scara m\xE9dica",
// Line breaker
'Break': 'Partir',
// Math
'Subscript': 'Subscrito',
'Superscript': 'Sobrescrito',
// Full screen
'Fullscreen': 'Tela cheia',
// Horizontal line
'Insert Horizontal Line': 'Inserir linha horizontal',
// Clear formatting
'Clear Formatting': "Remover formata\xE7\xE3o",
// Save
'Save': "Salve",
// Undo, redo
'Undo': 'Anular',
'Redo': 'Restaurar',
// Select all
'Select All': 'Seleccionar tudo',
// Code view
'Code View': "Exibi\xE7\xE3o de c\xF3digo",
// Quote
'Quote': "Cita\xE7\xE3o",
'Increase': 'Aumentar',
'Decrease': 'Diminuir',
// Quick Insert
'Quick Insert': "Inser\xE7\xE3o r\xE1pida",
// Spcial Characters
'Special Characters': 'Caracteres especiais',
'Latin': 'Latino',
'Greek': 'Grego',
'Cyrillic': 'Cirílico',
'Punctuation': 'Pontuação',
'Currency': 'Moeda',
'Arrows': 'Setas; flechas',
'Math': 'Matemática',
'Misc': 'Misc',
// Print.
'Print': 'Impressão',
// Spell Checker.
'Spell Checker': 'Verificador ortográfico',
// Help
'Help': 'Socorro',
'Shortcuts': 'Atalhos',
'Inline Editor': 'Editor em linha',
'Show the editor': 'Mostre o editor',
'Common actions': 'Ações comuns',
'Copy': 'Cópia de',
'Cut': 'Cortar',
'Paste': 'Colar',
'Basic Formatting': 'Formatação básica',
'Increase quote level': 'Aumentar o nível de cotação',
'Decrease quote level': 'Diminuir o nível de cotação',
'Image / Video': 'Imagem / video',
'Resize larger': 'Redimensionar maior',
'Resize smaller': 'Redimensionar menor',
'Table': 'Tabela',
'Select table cell': 'Selecione a célula da tabela',
'Extend selection one cell': 'Ampliar a seleção de uma célula',
'Extend selection one row': 'Ampliar a seleção uma linha',
'Navigation': 'Navegação',
'Focus popup / toolbar': 'Foco popup / barra de ferramentas',
'Return focus to previous position': 'Retornar o foco para a posição anterior',
// Embed.ly
'Embed URL': 'URL de inserção',
'Paste in a URL to embed': 'Colar em url para incorporar',
// Word Paste.
'The pasted content is coming from a Microsoft Word document. Do you want to keep the format or clean it up?': 'O conteúdo colado vem de um documento Microsoft Word. Você quer manter o formato ou limpá-lo?',
'Keep': 'Guarda',
'Clean': 'Limpar limpo',
'Word Paste Detected': 'Pasta de palavras detectada',
// Character Counter
'Characters': 'Caracteres',
// More Buttons
'More Text': 'Mais Texto',
'More Paragraph': 'Mais Parágrafo',
'More Rich': 'Mais Rico',
'More Misc': 'Mais Misc'
},
direction: 'ltr'
};
})));
//# sourceMappingURL=pt_pt.js.map<|fim▁end|> | |
<|file_name|>atm-outlined.js<|end_file_name|><|fim▁begin|><|fim▁hole|> d: "M8 9v1.5h2.25V15h1.5v-4.5H14V9H8zM6 9H3c-.55 0-1 .45-1 1v5h1.5v-1.5h2V15H7v-5c0-.55-.45-1-1-1zm-.5 3h-2v-1.5h2V12zM21 9h-4.5c-.55 0-1 .45-1 1v5H17v-4.5h1V14h1.5v-3.51h1V15H22v-5c0-.55-.45-1-1-1z"
}), 'AtmOutlined');<|fim▁end|> | import { h } from 'omi';
import createSvgIcon from './utils/createSvgIcon';
export default createSvgIcon(h("path", { |
<|file_name|>urls.py<|end_file_name|><|fim▁begin|>from django.conf.urls.defaults import url, patterns
<|fim▁hole|><|fim▁end|> | urlpatterns = patterns('tutorial.views',
url(r'^$', 'tutorial', name='tutorial'),
) |
<|file_name|>test_udtf.py<|end_file_name|><|fim▁begin|>################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import unittest
from pyflink.table import DataTypes
from pyflink.table.udf import TableFunction, udtf, ScalarFunction, udf
from pyflink.testing import source_sink_utils
from pyflink.testing.test_case_utils import PyFlinkOldStreamTableTestCase, \
PyFlinkBlinkStreamTableTestCase, PyFlinkOldBatchTableTestCase, PyFlinkBlinkBatchTableTestCase
class UserDefinedTableFunctionTests(object):
def test_table_function(self):
self._register_table_sink(
['a', 'b', 'c'],
[DataTypes.BIGINT(), DataTypes.BIGINT(), DataTypes.BIGINT()])
multi_emit = udtf(MultiEmit(), result_types=[DataTypes.BIGINT(), DataTypes.BIGINT()])
multi_num = udf(MultiNum(), result_type=DataTypes.BIGINT())
t = self.t_env.from_elements([(1, 1, 3), (2, 1, 6), (3, 2, 9)], ['a', 'b', 'c'])
t = t.join_lateral(multi_emit(t.a, multi_num(t.b)).alias('x', 'y'))
t = t.left_outer_join_lateral(condition_multi_emit(t.x, t.y).alias('m')) \
.select("x, y, m")
t = t.left_outer_join_lateral(identity(t.m).alias('n')) \
.select("x, y, n")
actual = self._get_output(t)
self.assert_equals(actual,
["+I[1, 0, null]", "+I[1, 1, null]", "+I[2, 0, null]", "+I[2, 1, null]",
"+I[3, 0, 0]", "+I[3, 0, 1]", "+I[3, 0, 2]", "+I[3, 1, 1]",
"+I[3, 1, 2]", "+I[3, 2, 2]", "+I[3, 3, null]"])
def test_table_function_with_sql_query(self):
self._register_table_sink(
['a', 'b', 'c'],
[DataTypes.BIGINT(), DataTypes.BIGINT(), DataTypes.BIGINT()])
self.t_env.create_temporary_system_function(
"multi_emit", udtf(MultiEmit(), result_types=[DataTypes.BIGINT(), DataTypes.BIGINT()]))<|fim▁hole|> t = self.t_env.from_elements([(1, 1, 3), (2, 1, 6), (3, 2, 9)], ['a', 'b', 'c'])
self.t_env.register_table("MyTable", t)
t = self.t_env.sql_query(
"SELECT a, x, y FROM MyTable LEFT JOIN LATERAL TABLE(multi_emit(a, b)) as T(x, y)"
" ON TRUE")
actual = self._get_output(t)
self.assert_equals(actual, ["+I[1, 1, 0]", "+I[2, 2, 0]", "+I[3, 3, 0]", "+I[3, 3, 1]"])
def _register_table_sink(self, field_names: list, field_types: list):
table_sink = source_sink_utils.TestAppendSink(field_names, field_types)
self.t_env.register_table_sink("Results", table_sink)
def _get_output(self, t):
t.execute_insert("Results").wait()
return source_sink_utils.results()
class PyFlinkStreamUserDefinedTableFunctionTests(UserDefinedTableFunctionTests,
PyFlinkOldStreamTableTestCase):
pass
class PyFlinkBlinkStreamUserDefinedFunctionTests(UserDefinedTableFunctionTests,
PyFlinkBlinkStreamTableTestCase):
pass
class PyFlinkBlinkBatchUserDefinedFunctionTests(UserDefinedTableFunctionTests,
PyFlinkBlinkBatchTableTestCase):
pass
class PyFlinkBatchUserDefinedTableFunctionTests(UserDefinedTableFunctionTests,
PyFlinkOldBatchTableTestCase):
def _register_table_sink(self, field_names: list, field_types: list):
pass
def _get_output(self, t):
return self.collect(t)
def test_row_type_as_input_types_and_result_types(self):
# test input_types and result_types are DataTypes.ROW
a = udtf(lambda i: i,
input_types=DataTypes.ROW([DataTypes.FIELD("a", DataTypes.BIGINT())]),
result_types=DataTypes.ROW([DataTypes.FIELD("a", DataTypes.BIGINT())]))
self.assertEqual(a._input_types,
[DataTypes.ROW([DataTypes.FIELD("a", DataTypes.BIGINT())])])
self.assertEqual(a._result_types,
[DataTypes.ROW([DataTypes.FIELD("a", DataTypes.BIGINT())])])
class MultiEmit(TableFunction, unittest.TestCase):
def open(self, function_context):
mg = function_context.get_metric_group()
self.counter = mg.add_group("key", "value").counter("my_counter")
self.counter_sum = 0
def eval(self, x, y):
self.counter.inc(y)
self.counter_sum += y
for i in range(y):
yield x, i
@udtf(result_types=[DataTypes.BIGINT()])
def identity(x):
if x is not None:
from pyflink.common import Row
return Row(x)
# test specify the input_types
@udtf(input_types=[DataTypes.BIGINT(), DataTypes.BIGINT()],
result_types=DataTypes.BIGINT())
def condition_multi_emit(x, y):
if x == 3:
return range(y, x)
class MultiNum(ScalarFunction):
def eval(self, x):
return x * 2
if __name__ == '__main__':
import unittest
try:
import xmlrunner
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports')
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)<|fim▁end|> | |
<|file_name|>PatchController.java<|end_file_name|><|fim▁begin|>package com.jy.controller.workflow.online.apply;
import com.jy.common.ajax.AjaxRes;
import com.jy.common.utils.DateUtils;
import com.jy.common.utils.base.Const;
import com.jy.common.utils.security.AccountShiroUtil;
import com.jy.controller.base.BaseController;
import com.jy.entity.attendance.WorkRecord;
import com.jy.entity.oa.overtime.Overtime;
import com.jy.entity.oa.patch.Patch;
import com.jy.entity.oa.task.TaskInfo;
import com.jy.service.oa.activiti.ActivitiDeployService;
import com.jy.service.oa.overtime.OvertimeService;
import com.jy.service.oa.patch.PatchService;
import com.jy.service.oa.task.TaskInfoService;
import org.activiti.engine.IdentityService;
import org.activiti.engine.RuntimeService;
import org.activiti.engine.TaskService;
import org.activiti.engine.impl.persistence.entity.ExecutionEntity;
import org.activiti.engine.runtime.ProcessInstance;
import org.activiti.engine.task.Task;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Controller;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.RequestMethod;
import org.springframework.web.bind.annotation.ResponseBody;
import java.util.Date;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
/**
* 补卡页面
*/
@Controller
@RequestMapping(value = "/backstage/workflow/online/patch/")
public class PatchController extends BaseController<Object> {
private static final String SECURITY_URL = "/backstage/workflow/online/patch/index";
@Autowired
private RuntimeService runtimeService;
@Autowired
private TaskService taskService;
@Autowired
private TaskInfoService taskInfoService;
@Autowired
private IdentityService identityService;
@Autowired
private PatchService patchService;
@Autowired
private ActivitiDeployService activitiDeployService;
/**
* 补卡列表
*/
@RequestMapping(value = "index")
public String index(org.springframework.ui.Model model) {
if (doSecurityIntercept(Const.RESOURCES_TYPE_MENU)) {
model.addAttribute("permitBtn", getPermitBtn(Const.RESOURCES_TYPE_FUNCTION));
return "/system/workflow/online/apply/patch";
}
return Const.NO_AUTHORIZED_URL;
}
/**
* 启动流程
*/
@RequestMapping(value = "start", method = RequestMethod.POST)
@ResponseBody
public AjaxRes startWorkflow(Patch o) {
AjaxRes ar = getAjaxRes();
if (ar.setNoAuth(doSecurityIntercept(Const.RESOURCES_TYPE_MENU, SECURITY_URL))) {
try {
String currentUserId = AccountShiroUtil.getCurrentUser().getAccountId();
String[] approvers = o.getApprover().split(",");
Map<String, Object> variables = new HashMap<String, Object>();
for (int i = 0; i < approvers.length; i++) {
variables.put("approver" + (i + 1), approvers[i]);
}
String workflowKey = "patch";
identityService.setAuthenticatedUserId(currentUserId);
Date now = new Date();
ProcessInstance processInstance = runtimeService.startProcessInstanceByKeyAndTenantId(workflowKey, variables, getCompany());
String pId = processInstance.getId();
String leaveID = get32UUID();
o.setPid(pId);
o.setAccountId(currentUserId);
o.setCreatetime(now);
o.setIsvalid(0);
o.setName(AccountShiroUtil.getCurrentUser().getName());
o.setId(leaveID);
patchService.insert(o);
Task task = taskService.createTaskQuery().processInstanceId(pId).singleResult();
String processDefinitionName = ((ExecutionEntity) processInstance).getProcessInstance().getProcessDefinition().getName();
String subkect = processDefinitionName + "-"
+ AccountShiroUtil.getCurrentUser().getName() + "-" + DateUtils.formatDate(now, "yyyy-MM-dd HH:mm");
//开始流程
TaskInfo taskInfo = new TaskInfo();
taskInfo.setId(get32UUID());
taskInfo.setBusinesskey(leaveID);
taskInfo.setCode("start");
taskInfo.setName("发起申请");
taskInfo.setStatus(0);
taskInfo.setPresentationsubject(subkect);
taskInfo.setAttr1(processDefinitionName);
taskInfo.setCreatetime(DateUtils.addSeconds(now, -1));
taskInfo.setCompletetime(DateUtils.addSeconds(now, -1));
taskInfo.setCreator(currentUserId);
taskInfo.setAssignee(currentUserId);
taskInfo.setTaskid("0");
taskInfo.setPkey(workflowKey);
taskInfo.setExecutionid("0");
taskInfo.setProcessinstanceid(processInstance.getId());
taskInfo.setProcessdefinitionid(processInstance.getProcessDefinitionId());
taskInfoService.insert(taskInfo);
//第一级审批流程
taskInfo.setId(get32UUID());
taskInfo.setCode(processInstance.getActivityId());
taskInfo.setName(task.getName());<|fim▁hole|> taskInfo.setStatus(1);
taskInfo.setTaskid(task.getId());
taskInfo.setCreatetime(now);
taskInfo.setCompletetime(null);
taskInfo.setAssignee(approvers[0]);
taskInfoService.insert(taskInfo);
ar.setSucceedMsg("发起补卡申请成功!");
} catch (Exception e) {
logger.error(e.toString(), e);
ar.setFailMsg("启动流程失败");
} finally {
identityService.setAuthenticatedUserId(null);
}
}
return ar;
}
}<|fim▁end|> | |
<|file_name|>calc_delta_BC.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
#
# Calculate the change in the betweenness centrality of each residue over the
# course of an MD simulation
#
# Script distributed under GNU GPL 3.0
#<|fim▁hole|>import argparse, calc_delta
from lib.cli import CLI
from lib.utils import Logger
def main(args):
args.matrix_type = "BC"
calc_delta.main(args)
log = Logger()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--reference", help="The reference BC matrix (.dat)")
parser.add_argument("--alternatives", help="The alternative BC matrices (.dat)", nargs="*")
parser.add_argument("--normalize", help="Normalizes the values", action='store_true', default=False)
parser.add_argument('--normalization-mode', help="Method used to normalize - default: (Delta BC/(BC+1))", default=None)
parser.add_argument("--generate-plots", help="Plot results - without setting this flag, no graph will be generated", action='store_true', default=False)
CLI(parser, main, log)<|fim▁end|> | # Author: David Brown
# Date: 17-11-2016
|
<|file_name|>0054_realm_icon.py<|end_file_name|><|fim▁begin|># Generated by Django 1.10.5 on 2017-02-15 06:18
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('zerver', '0053_emailchangestatus'),
]
operations = [
migrations.AddField(<|fim▁hole|> choices=[('G', 'Hosted by Gravatar'), ('U', 'Uploaded by administrator')],
default='G', max_length=1),
),
migrations.AddField(
model_name='realm',
name='icon_version',
field=models.PositiveSmallIntegerField(default=1),
),
]<|fim▁end|> | model_name='realm',
name='icon_source',
field=models.CharField( |
<|file_name|>exterior.rs<|end_file_name|><|fim▁begin|>// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
<|fim▁hole|>
struct Point {x: int, y: int, z: int}
fn f(p: Gc<Cell<Point>>) {
assert!((p.get().z == 12));
p.set(Point {x: 10, y: 11, z: 13});
assert!((p.get().z == 13));
}
pub fn main() {
let a: Point = Point {x: 10, y: 11, z: 12};
let b: Gc<Cell<Point>> = box(GC) Cell::new(a);
assert_eq!(b.get().z, 12);
f(b);
assert_eq!(a.z, 12);
assert_eq!(b.get().z, 13);
}<|fim▁end|> | #![feature(managed_boxes)]
use std::cell::Cell;
use std::gc::{Gc, GC}; |
<|file_name|>mockchain.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python -tt
# by [email protected]
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
# copyright 2012 Red Hat, Inc.
# SUMMARY
# mockchain
# take a mock config and a series of srpms
# rebuild them one at a time
# adding each to a local repo
# so they are available as build deps to next pkg being built
import sys
import subprocess
import os
import optparse
import tempfile
import shutil
from urlgrabber import grabber
import time
import mockbuild.util
# all of the variables below are substituted by the build system
__VERSION__ = "unreleased_version"
SYSCONFDIR = os.path.join(os.path.dirname(os.path.realpath(sys.argv[0])), "..", "etc")
PYTHONDIR = os.path.dirname(os.path.realpath(sys.argv[0]))
PKGPYTHONDIR = os.path.join(os.path.dirname(os.path.realpath(sys.argv[0])), "mockbuild")
MOCKCONFDIR = os.path.join(SYSCONFDIR, "mock")
# end build system subs
mockconfig_path='/etc/mock'
def createrepo(path):
if os.path.exists(path + '/repodata/repomd.xml'):
comm = ['/usr/bin/createrepo', '--update', path]
else:
comm = ['/usr/bin/createrepo', path]
cmd = subprocess.Popen(comm,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = cmd.communicate()
return out, err
def parse_args(args):
parser = optparse.OptionParser('\nmockchain -r mockcfg pkg1 [pkg2] [pkg3]')
parser.add_option('-r', '--root', default=None, dest='chroot',
help="chroot config name/base to use in the mock build")
parser.add_option('-l', '--localrepo', default=None,
help="local path for the local repo, defaults to making its own")
parser.add_option('-c', '--continue', default=False, action='store_true',
dest='cont',
help="if a pkg fails to build, continue to the next one")
parser.add_option('-a','--addrepo', default=[], action='append',
dest='repos',
help="add these repo baseurls to the chroot's yum config")
parser.add_option('--recurse', default=False, action='store_true',
help="if more than one pkg and it fails to build, try to build the rest and come back to it")
parser.add_option('--log', default=None, dest='logfile',
help="log to the file named by this option, defaults to not logging")
parser.add_option('--tmp_prefix', default=None, dest='tmp_prefix',
help="tmp dir prefix - will default to username-pid if not specified")
#FIXME?
# figure out how to pass other args to mock?
opts, args = parser.parse_args(args)
if opts.recurse:
opts.cont = True
if not opts.chroot:
print "You must provide an argument to -r for the mock chroot"
sys.exit(1)
if len(sys.argv) < 3:
print "You must specifiy at least 1 package to build"
sys.exit(1)
return opts, args
def add_local_repo(infile, destfile, baseurl, repoid=None):
"""take a mock chroot config and add a repo to it's yum.conf
infile = mock chroot config file
destfile = where to save out the result
baseurl = baseurl of repo you wish to add"""
global config_opts
try:
execfile(infile)
if not repoid:
repoid=baseurl.split('//')[1].replace('/','_')
localyumrepo="""
[%s]
name=%s
baseurl=%s
enabled=1
skip_if_unavailable=1
metadata_expire=30
cost=1
""" % (repoid, baseurl, baseurl)
config_opts['yum.conf'] += localyumrepo
br_dest = open(destfile, 'w')
for k,v in config_opts.items():
br_dest.write("config_opts[%r] = %r\n" % (k, v))
br_dest.close()
return True, ''
except (IOError, OSError):
return False, "Could not write mock config to %s" % destfile
return True, ''
def do_build(opts, cfg, pkg):
# returns 0, cmd, out, err = failure
# returns 1, cmd, out, err = success
# returns 2, None, None, None = already built
s_pkg = os.path.basename(pkg)
pdn = s_pkg.replace('.src.rpm', '')
resdir = '%s/%s' % (opts.local_repo_dir, pdn)
resdir = os.path.normpath(resdir)
if not os.path.exists(resdir):
os.makedirs(resdir)
success_file = resdir + '/success'<|fim▁hole|>
# clean it up if we're starting over :)
if os.path.exists(fail_file):
os.unlink(fail_file)
mockcmd = ['/usr/bin/mock',
'--configdir', opts.config_path,
'--resultdir', resdir,
'--uniqueext', opts.uniqueext,
'-r', cfg, ]
print 'building %s' % s_pkg
mockcmd.append(pkg)
cmd = subprocess.Popen(mockcmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE )
out, err = cmd.communicate()
if cmd.returncode == 0:
open(success_file, 'w').write('done\n')
ret = 1
else:
open(fail_file, 'w').write('undone\n')
ret = 0
return ret, cmd, out, err
def log(lf, msg):
if lf:
now = time.time()
try:
open(lf, 'a').write(str(now) + ':' + msg + '\n')
except (IOError, OSError), e:
print 'Could not write to logfile %s - %s' % (lf, str(e))
print msg
config_opts = {}
def main(args):
global config_opts
config_opts = mockbuild.util.setup_default_config_opts(os.getgid(), __VERSION__, PKGPYTHONDIR)
opts, args = parse_args(args)
# take mock config + list of pkgs
cfg=opts.chroot
pkgs=args[1:]
mockcfg = mockconfig_path + '/' + cfg + '.cfg'
if not os.path.exists(mockcfg):
print "could not find config: %s" % mockcfg
sys.exit(1)
if not opts.tmp_prefix:
try:
opts.tmp_prefix = os.getlogin()
except OSError, e:
print "Could not find login name for tmp dir prefix add --tmp_prefix"
sys.exit(1)
pid = os.getpid()
opts.uniqueext = '%s-%s' % (opts.tmp_prefix, pid)
# create a tempdir for our local info
if opts.localrepo:
local_tmp_dir = os.path.abspath(opts.localrepo)
if not os.path.exists(local_tmp_dir):
os.makedirs(local_tmp_dir)
else:
pre = 'mock-chain-%s-' % opts.uniqueext
local_tmp_dir = tempfile.mkdtemp(prefix=pre, dir='/var/tmp')
os.chmod(local_tmp_dir, 0755)
if opts.logfile:
opts.logfile = os.path.join(local_tmp_dir, opts.logfile)
if os.path.exists(opts.logfile):
os.unlink(opts.logfile)
log(opts.logfile, "starting logfile: %s" % opts.logfile)
opts.local_repo_dir = os.path.normpath(local_tmp_dir + '/results/' + cfg + '/')
if not os.path.exists(opts.local_repo_dir):
os.makedirs(opts.local_repo_dir, mode=0755)
local_baseurl="file://%s" % opts.local_repo_dir
log(opts.logfile, "results dir: %s" % opts.local_repo_dir)
opts.config_path = os.path.normpath(local_tmp_dir + '/configs/' + cfg + '/')
if not os.path.exists(opts.config_path):
os.makedirs(opts.config_path, mode=0755)
log(opts.logfile, "config dir: %s" % opts.config_path)
my_mock_config = opts.config_path + '/' + os.path.basename(mockcfg)
# modify with localrepo
res, msg = add_local_repo(mockcfg, my_mock_config, local_baseurl, 'local_build_repo')
if not res:
log(opts.logfile, "Error: Could not write out local config: %s" % msg)
sys.exit(1)
for baseurl in opts.repos:
res, msg = add_local_repo(my_mock_config, my_mock_config, baseurl)
if not res:
log(opts.logfile, "Error: Could not add: %s to yum config in mock chroot: %s" % (baseurl, msg))
sys.exit(1)
# these files needed from the mock.config dir to make mock run
for fn in ['site-defaults.cfg', 'logging.ini']:
pth = mockconfig_path + '/' + fn
shutil.copyfile(pth, opts.config_path + '/' + fn)
# createrepo on it
out, err = createrepo(opts.local_repo_dir)
if err.strip():
log(opts.logfile, "Error making local repo: %s" % opts.local_repo_dir)
log(opts.logfile, "Err: %s" % err)
sys.exit(1)
download_dir = tempfile.mkdtemp()
downloaded_pkgs = {}
built_pkgs = []
try_again = True
to_be_built = pkgs
while try_again:
failed = []
for pkg in to_be_built:
if not pkg.endswith('.rpm'):
log(opts.logfile, "%s doesn't appear to be an rpm - skipping" % pkg)
failed.append(pkg)
continue
elif pkg.startswith('http://') or pkg.startswith('https://'):
url = pkg
cwd = os.getcwd()
os.chdir(download_dir)
try:
log(opts.logfile, 'Fetching %s' % url)
ug = grabber.URLGrabber()
fn = ug.urlgrab(url)
pkg = download_dir + '/' + fn
except Exception, e:
log(opts.logfile, 'Error Downloading %s: %s' % (url, str(e)))
failed.append(url)
os.chdir(cwd)
continue
else:
os.chdir(cwd)
downloaded_pkgs[pkg] = url
log(opts.logfile, "Start build: %s" % pkg)
ret, cmd, out, err = do_build(opts, cfg, pkg)
log(opts.logfile, "End build: %s" % pkg)
if ret == 0:
if opts.recurse:
failed.append(pkg)
log(opts.logfile, "Error building %s, will try again" % os.path.basename(pkg))
else:
log(opts.logfile,"Error building %s" % os.path.basename(pkg))
log(opts.logfile,"See logs/results in %s" % opts.local_repo_dir)
if not opts.cont:
sys.exit(1)
elif ret == 1:
log(opts.logfile, "Success building %s" % os.path.basename(pkg))
built_pkgs.append(pkg)
# createrepo with the new pkgs
out, err = createrepo(opts.local_repo_dir)
if err.strip():
log(opts.logfile, "Error making local repo: %s" % opts.local_repo_dir)
log(opts.logfile, "Err: %s" % err)
elif ret == 2:
log(opts.logfile, "Skipping already built pkg %s" % os.path.basename(pkg))
if failed:
if len(failed) != len(to_be_built):
to_be_built = failed
try_again = True
log(opts.logfile, 'Trying to rebuild %s failed pkgs' % len(failed))
else:
log(opts.logfile, "Tried twice - following pkgs could not be successfully built:")
for pkg in failed:
msg = pkg
if pkg in downloaded_pkgs:
msg = downloaded_pkgs[pkg]
log(opts.logfile, msg)
try_again = False
else:
try_again = False
# cleaning up our download dir
shutil.rmtree(download_dir, ignore_errors=True)
log(opts.logfile, "Results out to: %s" % opts.local_repo_dir)
log(opts.logfile, "Pkgs built: %s" % len(built_pkgs))
log(opts.logfile, "Packages successfully built in this order:")
for pkg in built_pkgs:
log(opts.logfile, pkg)
if __name__ == "__main__":
main(sys.argv)
sys.exit(0)<|fim▁end|> | fail_file = resdir + '/fail'
if os.path.exists(success_file):
return 2, None, None, None |
<|file_name|>main_test.go<|end_file_name|><|fim▁begin|>package distribution
import (
"testing"
"github.com/ready-steady/assert"
)
func TestParse(t *testing.T) {
cases := []struct {
line string
success bool
}{
{"Beta(1, 1)", true},
{"beta(0.5, 1.5)", true},
{" Beta \t (1, 1)", true},
{"Gamma(1, 1)", false},
{"Beta(1, 1, 1)", false},
{"beta(-1, 1)", false},
{"beta(0, 1)", false},
{"beta(1, -1)", false},<|fim▁hole|> {"uniform( )", true},
}
for _, c := range cases {
if _, err := Parse(c.line); c.success {
assert.Success(err, t)
} else {
assert.Failure(err, t)
}
}
}<|fim▁end|> | {"beta(1, 0)", false},
{"beta(1, 0)", false},
{"uniform()", true}, |
<|file_name|>triggerbinding.go<|end_file_name|><|fim▁begin|>/*
Copyright 2019 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by lister-gen. DO NOT EDIT.
package v1beta1
import (
v1beta1 "github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/client-go/tools/cache"
)
// TriggerBindingLister helps list TriggerBindings.
// All objects returned here must be treated as read-only.
type TriggerBindingLister interface {
// List lists all TriggerBindings in the indexer.
// Objects returned here must be treated as read-only.
List(selector labels.Selector) (ret []*v1beta1.TriggerBinding, err error)
// TriggerBindings returns an object that can list and get TriggerBindings.
TriggerBindings(namespace string) TriggerBindingNamespaceLister
TriggerBindingListerExpansion
}
// triggerBindingLister implements the TriggerBindingLister interface.
type triggerBindingLister struct {
indexer cache.Indexer
}
// NewTriggerBindingLister returns a new TriggerBindingLister.
func NewTriggerBindingLister(indexer cache.Indexer) TriggerBindingLister {
return &triggerBindingLister{indexer: indexer}
}
// List lists all TriggerBindings in the indexer.
func (s *triggerBindingLister) List(selector labels.Selector) (ret []*v1beta1.TriggerBinding, err error) {
err = cache.ListAll(s.indexer, selector, func(m interface{}) {
ret = append(ret, m.(*v1beta1.TriggerBinding))
})
return ret, err
}
// TriggerBindings returns an object that can list and get TriggerBindings.
func (s *triggerBindingLister) TriggerBindings(namespace string) TriggerBindingNamespaceLister {
return triggerBindingNamespaceLister{indexer: s.indexer, namespace: namespace}
}
// TriggerBindingNamespaceLister helps list and get TriggerBindings.
// All objects returned here must be treated as read-only.
type TriggerBindingNamespaceLister interface {
// List lists all TriggerBindings in the indexer for a given namespace.
// Objects returned here must be treated as read-only.
List(selector labels.Selector) (ret []*v1beta1.TriggerBinding, err error)
// Get retrieves the TriggerBinding from the indexer for a given namespace and name.
// Objects returned here must be treated as read-only.
Get(name string) (*v1beta1.TriggerBinding, error)
TriggerBindingNamespaceListerExpansion
}<|fim▁hole|> indexer cache.Indexer
namespace string
}
// List lists all TriggerBindings in the indexer for a given namespace.
func (s triggerBindingNamespaceLister) List(selector labels.Selector) (ret []*v1beta1.TriggerBinding, err error) {
err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) {
ret = append(ret, m.(*v1beta1.TriggerBinding))
})
return ret, err
}
// Get retrieves the TriggerBinding from the indexer for a given namespace and name.
func (s triggerBindingNamespaceLister) Get(name string) (*v1beta1.TriggerBinding, error) {
obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name)
if err != nil {
return nil, err
}
if !exists {
return nil, errors.NewNotFound(v1beta1.Resource("triggerbinding"), name)
}
return obj.(*v1beta1.TriggerBinding), nil
}<|fim▁end|> |
// triggerBindingNamespaceLister implements the TriggerBindingNamespaceLister
// interface.
type triggerBindingNamespaceLister struct { |
<|file_name|>ledger.pb.go<|end_file_name|><|fim▁begin|>// Code generated by protoc-gen-go.
// source: common/ledger.proto
// DO NOT EDIT!
package common
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// Contains information about the blockchain ledger such as height, current
// block hash, and previous block hash.
type BlockchainInfo struct {
Height uint64 `protobuf:"varint,1,opt,name=height" json:"height,omitempty"`
CurrentBlockHash []byte `protobuf:"bytes,2,opt,name=currentBlockHash,proto3" json:"currentBlockHash,omitempty"`
PreviousBlockHash []byte `protobuf:"bytes,3,opt,name=previousBlockHash,proto3" json:"previousBlockHash,omitempty"`
}
func (m *BlockchainInfo) Reset() { *m = BlockchainInfo{} }
func (m *BlockchainInfo) String() string { return proto.CompactTextString(m) }
func (*BlockchainInfo) ProtoMessage() {}
func (*BlockchainInfo) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{0} }
func init() {
proto.RegisterType((*BlockchainInfo)(nil), "common.BlockchainInfo")
}
func init() { proto.RegisterFile("common/ledger.proto", fileDescriptor3) }
<|fim▁hole|>var fileDescriptor3 = []byte{
// 173 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0x12, 0x4e, 0xce, 0xcf, 0xcd,
0xcd, 0xcf, 0xd3, 0xcf, 0x49, 0x4d, 0x49, 0x4f, 0x2d, 0xd2, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17,
0x62, 0x83, 0x08, 0x2a, 0x35, 0x31, 0x72, 0xf1, 0x39, 0xe5, 0xe4, 0x27, 0x67, 0x27, 0x67, 0x24,
0x66, 0xe6, 0x79, 0xe6, 0xa5, 0xe5, 0x0b, 0x89, 0x71, 0xb1, 0x65, 0xa4, 0x66, 0xa6, 0x67, 0x94,
0x48, 0x30, 0x2a, 0x30, 0x6a, 0xb0, 0x04, 0x41, 0x79, 0x42, 0x5a, 0x5c, 0x02, 0xc9, 0xa5, 0x45,
0x45, 0xa9, 0x79, 0x25, 0x60, 0x0d, 0x1e, 0x89, 0xc5, 0x19, 0x12, 0x4c, 0x0a, 0x8c, 0x1a, 0x3c,
0x41, 0x18, 0xe2, 0x42, 0x3a, 0x5c, 0x82, 0x05, 0x45, 0xa9, 0x65, 0x99, 0xf9, 0xa5, 0xc5, 0x08,
0xc5, 0xcc, 0x60, 0xc5, 0x98, 0x12, 0x4e, 0xba, 0x51, 0xda, 0xe9, 0x99, 0x25, 0x19, 0xa5, 0x49,
0x7a, 0xc9, 0xf9, 0xb9, 0xfa, 0x19, 0x95, 0x05, 0xa9, 0x45, 0x10, 0xc7, 0xea, 0xa7, 0x25, 0x26,
0x15, 0x65, 0x26, 0xeb, 0x83, 0xdd, 0x5c, 0xac, 0x0f, 0x71, 0x73, 0x12, 0x1b, 0x98, 0x6b, 0x0c,
0x08, 0x00, 0x00, 0xff, 0xff, 0xb5, 0x93, 0x7a, 0x44, 0xd9, 0x00, 0x00, 0x00,
}<|fim▁end|> | |
<|file_name|>flashVars.js<|end_file_name|><|fim▁begin|>//>>built
define("dojox/embed/flashVars",["dojo"],function(_1){
_1.deprecated("dojox.embed.flashVars","Will be removed in 2.0","2.0");
var _2={serialize:function(n,o){
var _3=function(_4){
if(typeof _4=="string"){
_4=_4.replace(/;/g,"_sc_");
_4=_4.replace(/\./g,"_pr_");
_4=_4.replace(/\:/g,"_cl_");
}
return _4;
};
var df=dojox.embed.flashVars.serialize;
var _5="";
if(_1.isArray(o)){
for(var i=0;i<o.length;i++){
_5+=df(n+"."+i,_3(o[i]))+";";
}
return _5.replace(/;{2,}/g,";");
}else{
if(_1.isObject(o)){
for(var nm in o){
_5+=df(n+"."+nm,_3(o[nm]))+";";
}
return _5.replace(/;{2,}/g,";");
}
}
return n+":"+o;
}};
_1.setObject("dojox.embed.flashVars",_2);
return _2;
<|fim▁hole|><|fim▁end|> | }); |
<|file_name|>apps.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.apps import AppConfig
class LoCatrConfig(AppConfig):<|fim▁hole|><|fim▁end|> | name = 'LoCatr' |
<|file_name|>TestErasureCoderBase.java<|end_file_name|><|fim▁begin|>/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io.erasurecode.coder;
import org.apache.hadoop.io.erasurecode.ECBlock;
import org.apache.hadoop.io.erasurecode.ECBlockGroup;
import org.apache.hadoop.io.erasurecode.ECChunk;
import org.apache.hadoop.io.erasurecode.TestCoderBase;
import java.lang.reflect.Constructor;
/**
* Erasure coder test base with utilities.<|fim▁hole|> */
public abstract class TestErasureCoderBase extends TestCoderBase {
protected Class<? extends ErasureCoder> encoderClass;
protected Class<? extends ErasureCoder> decoderClass;
private ErasureCoder encoder;
private ErasureCoder decoder;
protected int numChunksInBlock = 16;
/**
* It's just a block for this test purpose. We don't use HDFS block here
* at all for simple.
*/
protected static class TestBlock extends ECBlock {
private ECChunk[] chunks;
// For simple, just assume the block have the chunks already ready.
// In practice we need to read/write chunks from/to the block via file IO.
public TestBlock(ECChunk[] chunks) {
this.chunks = chunks;
}
}
/**
* Generating source data, encoding, recovering and then verifying.
* RawErasureCoder mainly uses ECChunk to pass input and output data buffers,
* it supports two kinds of ByteBuffers, one is array backed, the other is
* direct ByteBuffer. Have usingDirectBuffer to indicate which case to test.
* @param usingDirectBuffer
*/
protected void testCoding(boolean usingDirectBuffer) {
this.usingDirectBuffer = usingDirectBuffer;
prepareCoders();
/**
* The following runs will use 3 different chunkSize for inputs and outputs,
* to verify the same encoder/decoder can process variable width of data.
*/
performTestCoding(baseChunkSize, true);
performTestCoding(baseChunkSize - 17, false);
performTestCoding(baseChunkSize + 16, true);
}
private void performTestCoding(int chunkSize, boolean usingSlicedBuffer) {
setChunkSize(chunkSize);
prepareBufferAllocator(usingSlicedBuffer);
// Generate data and encode
ECBlockGroup blockGroup = prepareBlockGroupForEncoding();
// Backup all the source chunks for later recovering because some coders
// may affect the source data.
TestBlock[] clonedDataBlocks =
cloneBlocksWithData((TestBlock[]) blockGroup.getDataBlocks());
TestBlock[] parityBlocks = (TestBlock[]) blockGroup.getParityBlocks();
ErasureCodingStep codingStep;
codingStep = encoder.calculateCoding(blockGroup);
performCodingStep(codingStep);
// Erase specified sources but return copies of them for later comparing
TestBlock[] backupBlocks = backupAndEraseBlocks(clonedDataBlocks, parityBlocks);
// Decode
blockGroup = new ECBlockGroup(clonedDataBlocks, blockGroup.getParityBlocks());
codingStep = decoder.calculateCoding(blockGroup);
performCodingStep(codingStep);
// Compare
compareAndVerify(backupBlocks, codingStep.getOutputBlocks());
}
/**
* This is typically how a coding step should be performed.
* @param codingStep
*/
private void performCodingStep(ErasureCodingStep codingStep) {
// Pretend that we're opening these input blocks and output blocks.
ECBlock[] inputBlocks = codingStep.getInputBlocks();
ECBlock[] outputBlocks = codingStep.getOutputBlocks();
// We allocate input and output chunks accordingly.
ECChunk[] inputChunks = new ECChunk[inputBlocks.length];
ECChunk[] outputChunks = new ECChunk[outputBlocks.length];
for (int i = 0; i < numChunksInBlock; ++i) {
// Pretend that we're reading input chunks from input blocks.
for (int j = 0; j < inputBlocks.length; ++j) {
inputChunks[j] = ((TestBlock) inputBlocks[j]).chunks[i];
}
// Pretend that we allocate and will write output results to the blocks.
for (int j = 0; j < outputBlocks.length; ++j) {
outputChunks[j] = allocateOutputChunk();
((TestBlock) outputBlocks[j]).chunks[i] = outputChunks[j];
}
// Given the input chunks and output chunk buffers, just call it !
codingStep.performCoding(inputChunks, outputChunks);
}
codingStep.finish();
}
/**
* Compare and verify if recovered blocks data are the same with the erased
* blocks data.
* @param erasedBlocks
* @param recoveredBlocks
*/
protected void compareAndVerify(ECBlock[] erasedBlocks,
ECBlock[] recoveredBlocks) {
for (int i = 0; i < erasedBlocks.length; ++i) {
compareAndVerify(((TestBlock) erasedBlocks[i]).chunks, ((TestBlock) recoveredBlocks[i]).chunks);
}
}
private void prepareCoders() {
if (encoder == null) {
encoder = createEncoder();
}
if (decoder == null) {
decoder = createDecoder();
}
}
/**
* Create the raw erasure encoder to test
* @return
*/
protected ErasureCoder createEncoder() {
ErasureCoder encoder;
try {
Constructor<? extends ErasureCoder> constructor =
(Constructor<? extends ErasureCoder>)
encoderClass.getConstructor(int.class, int.class);
encoder = constructor.newInstance(numDataUnits, numParityUnits);
} catch (Exception e) {
throw new RuntimeException("Failed to create encoder", e);
}
encoder.setConf(getConf());
return encoder;
}
/**
* create the raw erasure decoder to test
* @return
*/
protected ErasureCoder createDecoder() {
ErasureCoder decoder;
try {
Constructor<? extends ErasureCoder> constructor =
(Constructor<? extends ErasureCoder>)
decoderClass.getConstructor(int.class, int.class);
decoder = constructor.newInstance(numDataUnits, numParityUnits);
} catch (Exception e) {
throw new RuntimeException("Failed to create decoder", e);
}
decoder.setConf(getConf());
return decoder;
}
/**
* Prepare a block group for encoding.
* @return
*/
protected ECBlockGroup prepareBlockGroupForEncoding() {
ECBlock[] dataBlocks = new TestBlock[numDataUnits];
ECBlock[] parityBlocks = new TestBlock[numParityUnits];
for (int i = 0; i < numDataUnits; i++) {
dataBlocks[i] = generateDataBlock();
}
for (int i = 0; i < numParityUnits; i++) {
parityBlocks[i] = allocateOutputBlock();
}
return new ECBlockGroup(dataBlocks, parityBlocks);
}
/**
* Generate random data and return a data block.
* @return
*/
protected ECBlock generateDataBlock() {
ECChunk[] chunks = new ECChunk[numChunksInBlock];
for (int i = 0; i < numChunksInBlock; ++i) {
chunks[i] = generateDataChunk();
}
return new TestBlock(chunks);
}
/**
* Erase blocks to test the recovering of them. Before erasure clone them
* first so could return themselves.
* @param dataBlocks
* @return clone of erased dataBlocks
*/
protected TestBlock[] backupAndEraseBlocks(TestBlock[] dataBlocks,
TestBlock[] parityBlocks) {
TestBlock[] toEraseBlocks = new TestBlock[erasedDataIndexes.length +
erasedParityIndexes.length];
int idx = 0;
TestBlock block;
for (int i = 0; i < erasedParityIndexes.length; i++) {
block = parityBlocks[erasedParityIndexes[i]];
toEraseBlocks[idx ++] = cloneBlockWithData(block);
eraseDataFromBlock(block);
}
for (int i = 0; i < erasedDataIndexes.length; i++) {
block = dataBlocks[erasedDataIndexes[i]];
toEraseBlocks[idx ++] = cloneBlockWithData(block);
eraseDataFromBlock(block);
}
return toEraseBlocks;
}
/**
* Allocate an output block. Note the chunk buffer will be allocated by the
* up caller when performing the coding step.
* @return
*/
protected TestBlock allocateOutputBlock() {
ECChunk[] chunks = new ECChunk[numChunksInBlock];
return new TestBlock(chunks);
}
/**
* Clone blocks with data copied along with, avoiding affecting the original
* blocks.
* @param blocks
* @return
*/
protected TestBlock[] cloneBlocksWithData(TestBlock[] blocks) {
TestBlock[] results = new TestBlock[blocks.length];
for (int i = 0; i < blocks.length; ++i) {
results[i] = cloneBlockWithData(blocks[i]);
}
return results;
}
/**
* Clone exactly a block, avoiding affecting the original block.
* @param block
* @return a new block
*/
protected TestBlock cloneBlockWithData(TestBlock block) {
ECChunk[] newChunks = cloneChunksWithData(block.chunks);
return new TestBlock(newChunks);
}
/**
* Erase data from a block.
*/
protected void eraseDataFromBlock(TestBlock theBlock) {
eraseDataFromChunks(theBlock.chunks);
theBlock.setErased(true);
}
}<|fim▁end|> | |
<|file_name|>en-au.js<|end_file_name|><|fim▁begin|>/*
Copyright (c) 2003-2019, CKSource - Frederico Knabben. All rights reserved.
For licensing, see LICENSE.md or https://ckeditor.com/legal/ckeditor-oss-license
*/
CKEDITOR.plugins.setLang( 'fakeobjects', 'en-au', {
anchor: 'Anchor',
flash: 'Flash Animation',
hiddenfield: 'Hidden Field',
iframe: 'IFrame',
<|fim▁hole|><|fim▁end|> | unknown: 'Unknown Object'
} ); |
<|file_name|>config.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
simulation = "L500_NR_tracers"
dirs = {"sim_root_dir" : "/home/fas/nagai/kln26/group_scratch/L500_NR_tracers",
"db_dir" : "..",
"halo_catalog_dir" : "HC.500",
"profiles_dir" : "profiles",
"logs_dir" : "logs" }
hc_radius = "500c"
halo_list_radii = ["200m", "500c", "200c", "vir"]
h_inverse = {"in_profiles": True,
"in_halo_catalog" : True,
"into_db" : True,
"h" : 0.7 }
initial_migration = True
set_main_halos = True
cluster_ids_file = "cluster_ids.dat"
do_write_halos = True
do_write_profiles = True
enabled_hydro = True<|fim▁hole|><|fim▁end|> | enabled_star_formation = False
enabled_epnoneq = False |
<|file_name|>documentlist.js<|end_file_name|><|fim▁begin|>if (NABUCCO === undefined || !NABUCCO)
{
var NABUCCO = {};
}
(function()
{
NABUCCO.component = NABUCCO.component || {};
NABUCCO.component.CMISDocumentList = function(htmlId)
{
// replace Bubbling.on with NO-OP, so the superclass can't register its event listeners (never-ever)
var on = YAHOO.Bubbling.on;
YAHOO.Bubbling.on = function()
{
// NO-OP
return;
};
try
{
NABUCCO.component.CMISDocumentList.superclass.constructor.call(this, htmlId);
// restore
YAHOO.Bubbling.on = on;
}
catch (e)
{
// restore
YAHOO.Bubbling.on = on;
throw e;
}
this.name = "NABUCCO.component.CMISDocumentList";
Alfresco.util.ComponentManager.reregister(this);
this.dataSourceUrl = Alfresco.constants.URL_SERVICECONTEXT + 'nabucco/components/cmis-documentlist/data?';
if (htmlId !== "null")
{
// we actually want to react to metadataRefresh
YAHOO.Bubbling.on("metadataRefresh", this.onDocListRefresh, this);
YAHOO.Bubbling.on("filterChanged", this.onFilterChanged, this);
YAHOO.Bubbling.on("changeFilter", this.onChangeFilter, this);
}
this.dragAndDropAllowed = false;
this.setOptions(<|fim▁hole|> {
preferencePrefix : "org.nabucco.cmis-documentlibrary"
});
};
YAHOO.extend(NABUCCO.component.CMISDocumentList, Alfresco.DocumentList,
{
onSortAscending : function()
{
NABUCCO.component.CMISDocumentList.withPreferencePrefixOverride.call(this,
NABUCCO.component.CMISDocumentList.superclass.onSortAscending, arguments);
},
onSortField : function()
{
NABUCCO.component.CMISDocumentList.withPreferencePrefixOverride.call(this,
NABUCCO.component.CMISDocumentList.superclass.onSortField, arguments);
},
onShowFolders : function()
{
NABUCCO.component.CMISDocumentList.withPreferencePrefixOverride.call(this,
NABUCCO.component.CMISDocumentList.superclass.onShowFolders, arguments);
},
onViewRendererSelect : function()
{
NABUCCO.component.CMISDocumentList.withPreferencePrefixOverride.call(this,
NABUCCO.component.CMISDocumentList.superclass.onViewRendererSelect, arguments);
},
onSimpleDetailed : function()
{
NABUCCO.component.CMISDocumentList.withPreferencePrefixOverride.call(this,
NABUCCO.component.CMISDocumentList.superclass.onSimpleDetailed, arguments);
},
_buildDocListParams : function(p_obj)
{
var params = "", obj =
{
path : this.currentPath
};
// Pagination in use?
if (this.options.usePagination)
{
obj.page = this.widgets.paginator.getCurrentPage() || this.currentPage;
obj.pageSize = this.widgets.paginator.getRowsPerPage();
}
// Passed-in overrides
if (typeof p_obj === "object")
{
obj = YAHOO.lang.merge(obj, p_obj);
}
params = "path=" + obj.path;
// Paging parameters
if (this.options.usePagination)
{
params += "&pageSize=" + obj.pageSize + "&pos=" + obj.page;
}
// Sort parameters
params += "&sortAsc=" + this.options.sortAscending + "&sortField=" + encodeURIComponent(this.options.sortField);
// View mode and No-cache
params += "&view=" + this.actionsView + "&noCache=" + new Date().getTime();
return params;
}
});
NABUCCO.component.CMISDocumentList.withPreferencePrefixOverride = function(callback, args)
{
var prefSet, result, scope = this;
if (YAHOO.lang.isString(this.options.preferencePrefix) && this.options.preferencePrefix !== "org.alfresco.share.documentList")
{
prefSet = this.services.preferences.set;
this.services.preferences.set = function(prefKey, value, responseConfig)
{
prefKey = prefKey.replace("org.alfresco.share.documentList.", scope.options.preferencePrefix + '.');
return prefSet.call(this, prefKey, value, responseConfig);
};
try
{
result = callback.apply(this, args);
this.services.preferences.set = prefSet;
}
catch (e)
{
this.services.preferences.set = prefSet;
throw e;
}
return result;
}
return callback.apply(this, args);
};
// necessary to fix default thumbnail icons for non-standard node types, especially non-file-folder types
NABUCCO.component.CMISDocumentList.withFileIconOverride = function(callback, args)
{
var getFileIcon = Alfresco.util.getFileIcon, node = args[1].getData().jsNode, result;
Alfresco.util.getFileIcon = function(p_fileName, p_fileType, p_iconSize, p_fileParentType)
{
if (p_fileType === undefined)
{
if (node.isLink && YAHOO.lang.isObject(node.linkedNode) && YAHOO.lang.isString(node.linkedNode.type))
{
p_fileType = node.linkedNode.type;
}
else
{
p_fileType = node.type;
}
}
return getFileIcon.call(Alfresco.util, p_fileName, p_fileType, p_iconSize, p_fileParentType);
};
Alfresco.util.getFileIcon.types = getFileIcon.types;
try
{
result = callback.apply(this, args);
Alfresco.util.getFileIcon = getFileIcon;
}
catch (e)
{
Alfresco.util.getFileIcon = getFileIcon;
throw e;
}
return result;
};
// necessary to fix thumbnail URL generation to avoid HTTP 400 responses for attempts on items without content
NABUCCO.component.CMISDocumentList.withThumbnailOverride = function(callback, args)
{
var generateThumbnailUrl = Alfresco.DocumentList.generateThumbnailUrl, result;
Alfresco.DocumentList.generateThumbnailUrl = function(record)
{
var node = record.jsNode;
if ((node.isContent || (node.isLink && node.linkedNode.isContent))
&& (YAHOO.lang.isString(node.contentURL) || (node.isLink && YAHOO.lang.isString(node.linkedNode.contentURL))))
{
return generateThumbnailUrl(record);
}
return Alfresco.constants.URL_RESCONTEXT + 'components/images/filetypes/' + Alfresco.util.getFileIcon(record.displayName);
};
try
{
result = callback.apply(this, args);
Alfresco.DocumentList.generateThumbnailUrl = generateThumbnailUrl;
}
catch (e)
{
Alfresco.DocumentList.generateThumbnailUrl = generateThumbnailUrl;
throw e;
}
return result;
};
// adapt the document list fnRenderCellThumbnail to remove preview when no preview can be generated (node without content) and use
// information available for file icon determination
Alfresco.DocumentList.prototype._nbc_fnRenderCellThumbnail = Alfresco.DocumentList.prototype.fnRenderCellThumbnail;
Alfresco.DocumentList.prototype.fnRenderCellThumbnail = function(renderChain)
{
var scope = this, realRenderer = this._nbc_fnRenderCellThumbnail(), renderCallback = renderChain;
return function(elCell, oRecord, oColumn, oData)
{
var id, node = oRecord.getData().jsNode;
NABUCCO.component.CMISDocumentList.withFileIconOverride.call(this, function()
{
NABUCCO.component.CMISDocumentList.withThumbnailOverride.call(this, function()
{
if (YAHOO.lang.isFunction(renderCallback))
{
renderCallback.call(this, realRenderer, arguments);
}
else
{
realRenderer.apply(this, arguments);
}
}, arguments);
}, [ elCell, oRecord, oColumn, oData ]);
// OOTB view renderer always prepare preview even if node has no content
if (!(node.isContainer || (node.isLink && node.linkedNode.isContainer))
&& !(YAHOO.lang.isString(node.contentURL) || (node.isLink && YAHOO.lang.isString(node.linkedNode.contentURL))))
{
// check for any thumbnails that are not supported due to node without content
id = scope.id + '-preview-' + oRecord.getId();
if (Alfresco.util.arrayContains(scope.previewTooltips, id))
{
scope.previewTooltips = Alfresco.util.arrayRemove(scope.previewTooltips, id);
}
}
};
};
// adapt size renderer for items without content as well as links
Alfresco.DocumentList.prototype._nbc_setupMetadataRenderers = Alfresco.DocumentList.prototype._setupMetadataRenderers;
Alfresco.DocumentList.prototype._setupMetadataRenderers = function()
{
this._nbc_setupMetadataRenderers();
/**
* File size
*/
this.registerRenderer("size", function(record, label)
{
var jsNode = record.jsNode, html = "";
if ((YAHOO.lang.isString(jsNode.contentURL) || YAHOO.lang.isNumber(jsNode.size)) || (jsNode.isLink && (YAHOO.lang.isString(jsNode.linkedNode.contentURL) || YAHOO.lang.isNumber(jsNode.linkedNode.size))))
{
html += '<span class="item">' + label
+ Alfresco.util.formatFileSize(YAHOO.lang.isString(jsNode.contentURL) || YAHOO.lang.isNumber(jsNode.size) ? jsNode.size : jsNode.linkedNode.size)
+ '</span>';
}
return html;
});
};
(function()
{
// additional properties for jsNode
var additionalJsNodeProps = [ "isContent" ];
// adapt Node to support our additional properties
Alfresco.util._nbc_Node = Alfresco.util.Node;
Alfresco.util.Node = function(p_node)
{
var jsNode = Alfresco.util._nbc_Node(p_node), idx, propName;
if (YAHOO.lang.isObject(jsNode))
{
for (idx = 0; idx < additionalJsNodeProps.length; idx++)
{
propName = additionalJsNodeProps[idx];
// override only if no such property has been defined yet
if (p_node.hasOwnProperty(propName) && !jsNode.hasOwnProperty(propName))
{
if (propName.indexOf("Node") !== -1 && propName.substr(propName.indexOf("Node")) === "Node"
&& YAHOO.lang.isString(p_node[propName]))
{
jsNode[propName] = new Alfresco.util.NodeRef(p_node[propName]);
}
else
{
jsNode[propName] = p_node[propName];
}
}
}
}
return jsNode;
};
}());
Alfresco.util.getFileIcon.types["D:cmiscustom:document"] = "file";
Alfresco.util.getFileIcon.types["cmis:document"] = "file";
Alfresco.util.getFileIcon.types["cmis:folder"] = "folder";
}());<|fim▁end|> | |
<|file_name|>blockcutter_test.go<|end_file_name|><|fim▁begin|>/*
Copyright IBM Corp. 2016 All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package blockcutter
import (
"testing"
<|fim▁hole|>
logging "github.com/op/go-logging"
"github.com/stretchr/testify/assert"
)
func init() {
logging.SetLevel(logging.DEBUG, "")
}
var goodTx = &cb.Envelope{Payload: []byte("GOOD")}
var goodTxLarge = &cb.Envelope{Payload: []byte("GOOD"), Signature: make([]byte, 1000)}
func TestNormalBatch(t *testing.T) {
maxMessageCount := uint32(2)
absoluteMaxBytes := uint32(1000)
preferredMaxBytes := uint32(100)
r := NewReceiverImpl(&mockconfig.Orderer{BatchSizeVal: &ab.BatchSize{MaxMessageCount: maxMessageCount, AbsoluteMaxBytes: absoluteMaxBytes, PreferredMaxBytes: preferredMaxBytes}})
batches, ok := r.Ordered(goodTx)
assert.Nil(t, batches, "Should not have created batch")
assert.True(t, ok, "Should have enqueued message into batch")
batches, ok = r.Ordered(goodTx)
assert.NotNil(t, batches, "Should have created batch")
assert.True(t, ok, "Should have enqueued second message into batch")
}
func TestBatchSizePreferredMaxBytesOverflow(t *testing.T) {
goodTxBytes := messageSizeBytes(goodTx)
// set preferred max bytes such that 10 goodTx will not fit
preferredMaxBytes := goodTxBytes*10 - 1
// set message count > 9
maxMessageCount := uint32(20)
r := NewReceiverImpl(&mockconfig.Orderer{BatchSizeVal: &ab.BatchSize{MaxMessageCount: maxMessageCount, AbsoluteMaxBytes: preferredMaxBytes * 2, PreferredMaxBytes: preferredMaxBytes}})
// enqueue 9 messages
for i := 0; i < 9; i++ {
batches, ok := r.Ordered(goodTx)
assert.Nil(t, batches, "Should not have created batch")
assert.True(t, ok, "Should have enqueued message into batch")
}
// next message should create batch
batches, ok := r.Ordered(goodTx)
assert.NotNil(t, batches, "Should have created batch")
assert.True(t, ok, "Should have enqueued message into batch")
assert.Len(t, batches, 1, "Should have created one batch")
assert.Len(t, batches[0], 9, "Should have had nine normal tx in the batch")
// force a batch cut
messageBatch := r.Cut()
assert.NotNil(t, batches, "Should have created batch")
assert.Len(t, messageBatch, 1, "Should have had one tx in the batch")
}
func TestBatchSizePreferredMaxBytesOverflowNoPending(t *testing.T) {
goodTxLargeBytes := messageSizeBytes(goodTxLarge)
// set preferred max bytes such that 1 goodTxLarge will not fit
preferredMaxBytes := goodTxLargeBytes - 1
// set message count > 1
maxMessageCount := uint32(20)
r := NewReceiverImpl(&mockconfig.Orderer{BatchSizeVal: &ab.BatchSize{MaxMessageCount: maxMessageCount, AbsoluteMaxBytes: preferredMaxBytes * 3, PreferredMaxBytes: preferredMaxBytes}})
// submit large message
batches, ok := r.Ordered(goodTxLarge)
assert.NotNil(t, batches, "Should have created batch")
assert.True(t, ok, "Should have enqueued message into batch")
assert.Len(t, batches, 1, "Should have created one batch")
assert.Len(t, batches[0], 1, "Should have had one normal tx in the batch")
}<|fim▁end|> | mockconfig "github.com/hyperledger/fabric/common/mocks/config"
cb "github.com/hyperledger/fabric/protos/common"
ab "github.com/hyperledger/fabric/protos/orderer" |
<|file_name|>test_treeview.py<|end_file_name|><|fim▁begin|>import unittest
import random
import sys
import os
ETEPATH = os.path.abspath(os.path.split(os.path.realpath(__file__))[0]+'/../')
sys.path.insert(0, ETEPATH)
from ete2 import Tree, TreeStyle, NodeStyle, PhyloTree, faces, random_color
from ete2.treeview.faces import *
from ete2.treeview.main import _NODE_TYPE_CHECKER, FACE_POSITIONS
sys.path.insert(0, os.path.join(ETEPATH, "examples/treeview"))
import face_grid, bubble_map, item_faces, node_style, node_background, face_positions, face_rotation, seq_motif_faces, barchart_and_piechart_faces
sys.path.insert(0, os.path.join(ETEPATH, "examples/phylogenies"))
import phylotree_visualization
CONT = 0
class Test_Coretype_Treeview(unittest.TestCase):
""" Tests tree basics. """
def test_renderer(self):
main_tree = Tree()
main_tree.dist = 0
t, ts = face_grid.get_example_tree()
t_grid = TreeFace(t, ts)
n = main_tree.add_child()
n.add_face(t_grid, 0, "aligned")
t, ts = bubble_map.get_example_tree()
t_bubble = TreeFace(t, ts)
n = main_tree.add_child()
n.add_face(t_bubble, 0, "aligned")
t, ts = item_faces.get_example_tree()
t_items = TreeFace(t, ts)
n = main_tree.add_child()
n.add_face(t_items, 0, "aligned")
t, ts = node_style.get_example_tree()
t_nodest = TreeFace(t, ts)
n = main_tree.add_child()
n.add_face(t_nodest, 0, "aligned")
t, ts = node_background.get_example_tree()
t_bg = TreeFace(t, ts)
n = main_tree.add_child()
n.add_face(t_bg, 0, "aligned")
t, ts = face_positions.get_example_tree()
t_fpos = TreeFace(t, ts)
n = main_tree.add_child()
n.add_face(t_fpos, 0, "aligned")
t, ts = phylotree_visualization.get_example_tree()
t_phylo = TreeFace(t, ts)
n = main_tree.add_child()<|fim▁hole|>
t, ts = face_rotation.get_example_tree()
temp_facet = TreeFace(t, ts)
n = main_tree.add_child()
n.add_face(temp_facet, 0, "aligned")
t, ts = seq_motif_faces.get_example_tree()
temp_facet = TreeFace(t, ts)
n = main_tree.add_child()
n.add_face(temp_facet, 0, "aligned")
t, ts = barchart_and_piechart_faces.get_example_tree()
temp_facet = TreeFace(t, ts)
n = main_tree.add_child()
n.add_face(temp_facet, 0, "aligned")
#Test orphan nodes and trees with 0 branch length
t, ts = Tree(), TreeStyle()
t.populate(5)
for n in t.traverse():
n.dist = 0
temp_tface = TreeFace(t, ts)
n = main_tree.add_child()
n.add_face(temp_tface, 0, "aligned")
ts.optimal_scale_level = "full"
temp_tface = TreeFace(t, ts)
n = main_tree.add_child()
n.add_face(temp_tface, 0, "aligned")
ts = TreeStyle()
t.populate(5)
ts.mode = "c"
temp_tface = TreeFace(t, ts)
n = main_tree.add_child()
n.add_face(temp_tface, 0, "aligned")
ts.optimal_scale_level = "full"
temp_tface = TreeFace(t, ts)
n = main_tree.add_child()
n.add_face(temp_tface, 0, "aligned")
t, ts = Tree(), TreeStyle()
temp_tface = TreeFace(Tree('node;'), ts)
n = main_tree.add_child()
n.add_face(temp_tface, 0, "aligned")
t, ts = Tree(), TreeStyle()
ts.mode = "c"
temp_tface = TreeFace(Tree('node;'), ts)
n = main_tree.add_child()
n.add_face(temp_tface, 0, "aligned")
t, ts = Tree(), TreeStyle()
ts.mode = "c"
temp_tface = TreeFace(Tree(), ts)
n = main_tree.add_child()
n.add_face(temp_tface, 0, "aligned")
t, ts = Tree(), TreeStyle()
temp_tface = TreeFace(Tree(), ts)
n = main_tree.add_child()
n.add_face(temp_tface, 0, "aligned")
# TEST TIGHT TEST WRAPPING
chars = ["." "p", "j", "jJ"]
def layout(node):
global CONT
if CONT >= len(chars):
CONT = 0
if node.is_leaf():
node.img_style["size"] = 0
F2= AttrFace("name", tight_text=True)
F= TextFace(chars[CONT], tight_text=True)
F.inner_border.width = 0
F2.inner_border.width = 0
#faces.add_face_to_node(F ,node, 0, position="branch-right")
faces.add_face_to_node(F2 ,node, 1, position="branch-right")
CONT += 1
t = Tree()
t.populate(20, random_branches=True)
ts = TreeStyle()
ts.layout_fn = layout
ts.mode = "c"
ts.show_leaf_name = False
temp_tface = TreeFace(t, ts)
n = main_tree.add_child()
n.add_face(temp_tface, 0, "aligned")
# MAIN TREE
ms = TreeStyle()
ms.mode = "r"
ms.show_leaf_name = False
main_tree.render('test.png', tree_style=ms)
main_tree.render('test.svg', tree_style=ms)
if __name__ == '__main__':
unittest.main()<|fim▁end|> | n.add_face(t_phylo, 0, "aligned") |
<|file_name|>snort-rule-tool.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python2
import sys
import math
from PyQt5 import QtWidgets
from PyQt5 import QtGui
from PyQt5 import QtCore
from mainwindow import Ui_MainWindow
from scapy.all import *
""" dump any string, ascii or encoded, to formatted hex output """
def dumpString(src, length=16):
FILTER = ''.join([(len(repr(chr(x))) == 3) and chr(x) or '.' for x in range(256)])
result = []
for i in xrange(0, len(src), length):
chars = src[i:i+length]
hex = ' '.join(["%02x" % ord(x) for x in chars])
printable = ''.join(["%s" % ((ord(x) <= 127 and FILTER[ord(x)]) or '.') for x in chars])
result.append(["%-*s" % (length*3, hex), "%s" % (printable,)])
return result
class Snort(QtWidgets.QMainWindow):
def __init__(self):
super(Snort, self).__init__()
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
self.show()
self.index = 0
self.comboBoxes = [self.ui.srcCombo, self.ui.srcPortCombo, self.ui.destCombo, self.ui.destPortCombo]
self.defaultFmt = self.ui.hexColumn.currentCharFormat()
#setup scrollbars to be synced
self.hexSlider = self.ui.hexColumn.verticalScrollBar()
self.textSlider = self.ui.textColumn.verticalScrollBar()
self.hexSlider.valueChanged.connect(self.syncScroll)
self.textSlider.valueChanged.connect(self.syncScroll)
self.ui.packetBox.valueChanged.connect(self.changePacket)
self.ui.actionOpen.triggered.connect(self.openPCAP)
self.ui.contentEdit.textChanged.connect(self.contentChanged)
self.ui.flowCheck.stateChanged.connect(self.flowChecked)
self.ui.streamButton.clicked.connect(self.assembleStream)
self.ui.flowCombo.currentTextChanged.connect(self.buildRule)
self.ui.actionCombo.currentTextChanged.connect(self.buildRule)
self.ui.protoCombo.currentTextChanged.connect(self.buildRule)
self.ui.srcCombo.currentTextChanged.connect(self.buildRule)
self.ui.srcPortCombo.currentTextChanged.connect(self.buildRule)
self.ui.dirCombo.currentTextChanged.connect(self.buildRule)
self.ui.destCombo.currentTextChanged.connect(self.buildRule)
self.ui.destPortCombo.currentTextChanged.connect(self.buildRule)
self.streams = []
def syncScroll(self, value):
self.textSlider.setValue(value)
self.hexSlider.setValue(value)
def changePacket(self):
self.index = self.ui.packetBox.value() - 1
self.readPacket()
def findStreams(self):
tcp_streams = self.packets.filter(lambda p: p.haslayer(TCP))
self.streams = []
for syn in tcp_streams.filter(lambda p: p[TCP].flags & 0x02):
for synack in tcp_streams.filter(lambda p: p[TCP].flags & 0x12 and p[TCP].ack == syn.seq + 1):
ack = tcp_streams.filter(lambda p: p[TCP].flags & 0x10 and p[TCP].ack == synack.seq + 1)
if ack:
srcport = syn[TCP].sport
dstport = syn[TCP].dport
L3 = IP
try:
#try underlayer
foot = syn[TCP].underlayer
srcip = foot.src
dstip = foot.dst
if type(foot) == IPv6:
L3 = IPv6
except:
#try other, but upper layer
if IPv6 in syn:
srcip = syn[IPv6].src
dstip = syn[IPv6].dst
L3 = IPv6
elif IP in pkt:
srcip = syn[IP].src
dstip = syn[IP].dst
else:
continue
ip_pair = (srcip,dstip)
port_pair = (srcport,dstport)
filtered_stream = tcp_streams.filter(lambda p: p[TCP].dport in port_pair and \
p[TCP].sport in port_pair and \
p[L3].src in ip_pair and \
p[L3].dst in ip_pair)
assembled_stream = [syn,synack,ack[0]]
while True:
client_next_seq = assembled_stream[-1][TCP].seq
server_next_seq = assembled_stream[-1][TCP].ack
next = filtered_stream.filter(lambda p: p.seq in (client_next_seq,server_next_seq) and \
not p in assembled_stream)
if not next:
break
for pkt in next:
assembled_stream.append(pkt)
self.streams.append(PacketList(assembled_stream))
def assembleStream(self):
pkt = self.packets[self.index]
self.ui.hexColumn.clear()
self.ui.textColumn.clear()
for stream in self.streams:
if pkt in stream:
thisStream = stream
break
streamText = "".join([str(packet) for packet in thisStream])
payload = dumpString(streamText)
for line in payload:
self.ui.hexColumn.appendPlainText(line[0])
self.ui.textColumn.appendPlainText(line[1])
def readPacket(self):
self.clearAll()
pkt = self.packets[self.index]
payload = dumpString(str(pkt))
for line in payload:
self.ui.hexColumn.appendPlainText(line[0])
self.ui.textColumn.appendPlainText(line[1])
if IP in pkt:
self.ui.protoCombo.setCurrentText("ip")
self.ui.srcCombo.insertItem(0, pkt[IP].src)
self.ui.destCombo.insertItem(0,pkt[IP].dst)
srcip = pkt[IP].src
if IPv6 in pkt:
self.ui.protoCombo.setCurrentText("ip")
self.ui.srcCombo.insertItem(0, pkt[IPv6].src)
self.ui.destCombo.insertItem(0,pkt[IPv6].dst)
srcip = pkt[IPv6].src
if TCP in pkt:
self.ui.protoCombo.setCurrentText("tcp")
self.ui.srcPortCombo.insertItem(0, str(pkt[TCP].sport))
self.ui.destPortCombo.insertItem(0, str(pkt[TCP].dport))
for stream in self.streams:
if pkt in stream:
self.ui.flowCheck.setChecked(True)
self.ui.streamButton.setEnabled(True)
client = stream[0]
if IP in client:
layer = IP
else:
layer = IPv6
if srcip == client[layer].src:
self.ui.flowCombo.setCurrentText("to_server")
elif srcip == client[layer].dst:
self.ui.flowCombo.setCurrentText("to_client")
if UDP in pkt:
self.ui.protoCombo.setCurrentText("udp")
self.ui.srcPortCombo.insertItem(0, str(pkt[UDP].sport))
self.ui.destPortCombo.insertItem(0, str(pkt[UDP].dport))
if ICMP in pkt:
self.ui.protoCombo.setCurrentText("icmp")
for combo in self.comboBoxes:
combo.setCurrentIndex(0)
self.buildRule()
self.textSlider.setValue(0)
def openPCAP(self):
filename = QtWidgets.QFileDialog.getOpenFileName(self, 'Open PCAP',filter='Packet Captures (*.cap *.pcap)')
if filename:
self.file = filename[0]
self.packets = rdpcap(self.file)
self.findStreams()
self.ui.packetBox.setRange(1, len(self.packets))
self.readPacket()
def contentChanged(self):
content = self.ui.contentEdit.text()
hexContent = self.ui.hexColumn.toPlainText().replace("\n", "")
textContent = self.ui.textColumn.toPlainText().replace("\n", "")
if self.ui.nocaseCheck.isChecked():
content = content.lower()
textContent = textContent.lower()
cursor = QtGui.QTextCursor(self.ui.hexColumn.document())
cursor.setPosition(0, QtGui.QTextCursor.MoveAnchor)
cursor.setPosition(self.ui.hexColumn.document().characterCount() - 1, QtGui.QTextCursor.KeepAnchor)
cursor.setCharFormat(self.defaultFmt)
cursor2 = QtGui.QTextCursor(self.ui.textColumn.document())
cursor2.setPosition(0, QtGui.QTextCursor.MoveAnchor)
cursor2.setPosition(self.ui.textColumn.document().characterCount() - 1, QtGui.QTextCursor.KeepAnchor)
cursor2.setCharFormat(self.defaultFmt)
matchPointer = 0
endPointer = 0
start = 0
end = 0
match = False
origContent = content
while content:
if content.startswith("|"):
if content.count("|") > 1:
content = content[1:]
index = content.index("|")
search = content[0:index]
content = content[index + 1:]
else:
search = content[1:]
content = None
if search and \
(not match and search in hexContent[endPointer:]) or \
(match and hexContent[endPointer:endPointer + len(search)] == search):
if not match:
end = hexContent[endPointer:].index(search) + len(search) + endPointer
start = hexContent[endPointer:].index(search) + endPointer
match = True
matchPointer = end
else:
end = end + len(search)
endPointer = end
elif match:
content = origContent
match = False
start = 0
end = 0
endPointer = matchPointer
matchPointer = 0
else:
break
else:
if "|" in content:
search = content[0:content.index("|")]
content = content[content.index("|"):]
else:
search = content
content = None
textPointer = int(math.ceil(endPointer / 3.0))
if search and \
(not match and search in textContent[textPointer:]) or \
(match and textContent[textPointer:len(search) + textPointer] == search):
if not match:
end = ((textContent[textPointer:].index(search) + len(search)) * 3) + endPointer
start = (textContent[textPointer:].index(search) * 3) + endPointer
match = True
matchPointer = end
else:
end = end + (len(search) * 3) + 1
endPointer = end
elif match:
content = origContent
match = False
start = 0
end = 0
endPointer = matchPointer
matchPointer = 0
else:
break
if match:
start = start + (start / 47)
end = end + (end / 47)
fmt = QtGui.QTextCharFormat()
fmt.setForeground(QtCore.Qt.red)
cursor.setPosition(start, QtGui.QTextCursor.MoveAnchor)
cursor.setPosition(end, QtGui.QTextCursor.KeepAnchor)
cursor.setCharFormat(fmt)
cursor2.setPosition(start / 3, QtGui.QTextCursor.MoveAnchor)
cursor2.setPosition(math.ceil(end / 3.0), QtGui.QTextCursor.KeepAnchor)
cursor2.setCharFormat(fmt)
self.ui.depthEdit.setText(str(int(math.ceil(end / 3.0))))
self.ui.offsetEdit.setText(str(start / 3))
def clearAll(self):
for combo in self.comboBoxes:
combo.clear()
combo.addItem("any")
self.ui.destPortCombo.addItem("any")
self.ui.hexColumn.clear()
self.ui.textColumn.clear()
self.ui.ruleText.clear()
self.ui.contentEdit.clear()
self.ui.flowCheck.setChecked(False)
self.ui.flowCombo.setCurrentText("established")
self.ui.flowCombo.setEnabled(False)
self.ui.streamButton.setEnabled(False)
self.ui.depthCheck.setChecked(False)
self.ui.depthEdit.clear()
self.ui.depthEdit.setEnabled(False)
self.ui.offsetCheck.setChecked(False)
self.ui.offsetEdit.clear()
self.ui.offsetEdit.setEnabled(False)
self.ui.distanceCheck.setChecked(False)
self.ui.distanceEdit.clear()
self.ui.distanceEdit.setEnabled(False)
self.ui.withinCheck.setChecked(False)
self.ui.withinEdit.clear()
self.ui.withinEdit.setEnabled(False)
self.ui.nocaseCheck.setChecked(False)
def flowChecked(self):
self.ui.flowCombo.setEnabled(self.ui.flowCheck.isChecked())
self.buildRule()
def buildRule(self):
self.ui.ruleText.clear()
options = ""
if self.ui.flowCheck.isChecked():
options += "flow: %s;" % (self.ui.flowCombo.currentText(), )
rule = "%s %s %s %s %s %s %s {%s}" % (
self.ui.actionCombo.currentText(),
self.ui.protoCombo.currentText(),
self.ui.srcCombo.currentText(),<|fim▁hole|> self.ui.destPortCombo.currentText(),
options)
self.ui.ruleText.appendPlainText(rule)
def main():
app = QtWidgets.QApplication(sys.argv)
snort = Snort()
sys.exit(app.exec_())
if __name__ == '__main__':
main()<|fim▁end|> | self.ui.srcPortCombo.currentText(),
self.ui.dirCombo.currentText(),
self.ui.destCombo.currentText(), |
<|file_name|>ciphersuite.rs<|end_file_name|><|fim▁begin|>struct Ciphersuite {
code: u16,
kex_algo: ~str,
sig_algo: ~str,
cipher: ~str,
cipher_keylen: u8,
mac: ~str
}
impl Ciphersuite {
static fn new(suite: u16, kex_algo: ~str, sig_algo: ~str,
cipher: ~str, cipher_keylen: u8, mac: ~str) -> Ciphersuite {
Ciphersuite {
code: suite,
kex_algo: kex_algo,
sig_algo: sig_algo,
cipher: cipher,
cipher_keylen: cipher_keylen,
mac: mac }
}
static fn from_code(suite: u16) -> Ciphersuite {
match suite {
// probably this should be a macro (or generated as in botan)
0x002f => { Ciphersuite::new(0x002f, ~"RSA", ~"RSA", ~"AES", 16, ~"SHA1") },
0x008A => { Ciphersuite::new(0x008A, ~"PSK", ~"PSK", ~"RC4", 16, ~"SHA1") },<|fim▁hole|> _ => { fail(~"No such ciphersuite") }
}
}
}
impl Ciphersuite: ToStr {
pure fn to_str() -> ~str {
let mut out: ~str = ~"TLS_";
out += if self.kex_algo != ~"RSA" { self.kex_algo + ~"_" } else { ~"" };
out += self.sig_algo + "_WITH_";
out += match (self.cipher, self.cipher_keylen) {
(~"AES", 16) => ~"AES_128",
(~"AES", 32) => ~"AES_256",
(~"RC4", 16) => ~"RC4_128",
_ => fail ~"Unknown cipher"
} + ~"_";
out += match self.mac {
~"SHA1" => ~"SHA",
~"SHA256" => ~"SHA256",
_ => fail ~"Unknown mac"
};
out
}
}
#[cfg(test)]
mod tests {
#[test]
fn test() {
let psk = Ciphersuite::from_code(0x008A);
io::println(fmt!("%?", psk));
}
}<|fim▁end|> | |
<|file_name|>__manifest__.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
# Copyright 2019 Joan Marín <Github@JoanMarin>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
{
"name": "Tax Group Types",
"category": "Financial",
"version": "10.0.1.0.0",<|fim▁hole|> "website": "https://github.com/odooloco/l10n-colombia",
"license": "AGPL-3",
"summary": "Types for Tax Groups",
"depends": [
"account_tax_group_menu",
],
"data": [
'security/ir.model.access.csv',
"views/account_tax_group_views.xml",
],
"installable": True,
}<|fim▁end|> | "author": "EXA Auto Parts Github@exaap, "
"Joan Marín Github@JoanMarin", |
<|file_name|>cmp.rs<|end_file_name|><|fim▁begin|>#![feature(core)]
extern crate core;
// macro_rules! e {
// ($e:expr) => { $e }
// }
// macro_rules! tuple_impls {
// ($(
// $Tuple:ident {<|fim▁hole|> // $(($idx:tt) -> $T:ident)+
// }
// )+) => {
// $(
// #[stable(feature = "rust1", since = "1.0.0")]
// impl<$($T:Clone),+> Clone for ($($T,)+) {
// fn clone(&self) -> ($($T,)+) {
// ($(e!(self.$idx.clone()),)+)
// }
// }
//
// #[stable(feature = "rust1", since = "1.0.0")]
// impl<$($T:PartialEq),+> PartialEq for ($($T,)+) {
// #[inline]
// fn eq(&self, other: &($($T,)+)) -> bool {
// e!($(self.$idx == other.$idx)&&+)
// }
// #[inline]
// fn ne(&self, other: &($($T,)+)) -> bool {
// e!($(self.$idx != other.$idx)||+)
// }
// }
//
// #[stable(feature = "rust1", since = "1.0.0")]
// impl<$($T:Eq),+> Eq for ($($T,)+) {}
//
// #[stable(feature = "rust1", since = "1.0.0")]
// impl<$($T:PartialOrd + PartialEq),+> PartialOrd for ($($T,)+) {
// #[inline]
// fn partial_cmp(&self, other: &($($T,)+)) -> Option<Ordering> {
// lexical_partial_cmp!($(self.$idx, other.$idx),+)
// }
// #[inline]
// fn lt(&self, other: &($($T,)+)) -> bool {
// lexical_ord!(lt, $(self.$idx, other.$idx),+)
// }
// #[inline]
// fn le(&self, other: &($($T,)+)) -> bool {
// lexical_ord!(le, $(self.$idx, other.$idx),+)
// }
// #[inline]
// fn ge(&self, other: &($($T,)+)) -> bool {
// lexical_ord!(ge, $(self.$idx, other.$idx),+)
// }
// #[inline]
// fn gt(&self, other: &($($T,)+)) -> bool {
// lexical_ord!(gt, $(self.$idx, other.$idx),+)
// }
// }
//
// #[stable(feature = "rust1", since = "1.0.0")]
// impl<$($T:Ord),+> Ord for ($($T,)+) {
// #[inline]
// fn cmp(&self, other: &($($T,)+)) -> Ordering {
// lexical_cmp!($(self.$idx, other.$idx),+)
// }
// }
//
// #[stable(feature = "rust1", since = "1.0.0")]
// impl<$($T:Default),+> Default for ($($T,)+) {
// #[stable(feature = "rust1", since = "1.0.0")]
// #[inline]
// fn default() -> ($($T,)+) {
// ($({ let x: $T = Default::default(); x},)+)
// }
// }
// )+
// }
// }
// // Constructs an expression that performs a lexical ordering using method $rel.
// // The values are interleaved, so the macro invocation for
// // `(a1, a2, a3) < (b1, b2, b3)` would be `lexical_ord!(lt, a1, b1, a2, b2,
// // a3, b3)` (and similarly for `lexical_cmp`)
// macro_rules! lexical_ord {
// ($rel: ident, $a:expr, $b:expr, $($rest_a:expr, $rest_b:expr),+) => {
// if $a != $b { lexical_ord!($rel, $a, $b) }
// else { lexical_ord!($rel, $($rest_a, $rest_b),+) }
// };
// ($rel: ident, $a:expr, $b:expr) => { ($a) . $rel (& $b) };
// }
// macro_rules! lexical_partial_cmp {
// ($a:expr, $b:expr, $($rest_a:expr, $rest_b:expr),+) => {
// match ($a).partial_cmp(&$b) {
// Some(Equal) => lexical_partial_cmp!($($rest_a, $rest_b),+),
// ordering => ordering
// }
// };
// ($a:expr, $b:expr) => { ($a).partial_cmp(&$b) };
// }
// macro_rules! lexical_cmp {
// ($a:expr, $b:expr, $($rest_a:expr, $rest_b:expr),+) => {
// match ($a).cmp(&$b) {
// Equal => lexical_cmp!($($rest_a, $rest_b),+),
// ordering => ordering
// }
// };
// ($a:expr, $b:expr) => { ($a).cmp(&$b) };
// }
// tuple_impls! {
// Tuple1 {
// (0) -> A
// }
// Tuple2 {
// (0) -> A
// (1) -> B
// }
// Tuple3 {
// (0) -> A
// (1) -> B
// (2) -> C
// }
// Tuple4 {
// (0) -> A
// (1) -> B
// (2) -> C
// (3) -> D
// }
// Tuple5 {
// (0) -> A
// (1) -> B
// (2) -> C
// (3) -> D
// (4) -> E
// }
// Tuple6 {
// (0) -> A
// (1) -> B
// (2) -> C
// (3) -> D
// (4) -> E
// (5) -> F
// }
// Tuple7 {
// (0) -> A
// (1) -> B
// (2) -> C
// (3) -> D
// (4) -> E
// (5) -> F
// (6) -> G
// }
// Tuple8 {
// (0) -> A
// (1) -> B
// (2) -> C
// (3) -> D
// (4) -> E
// (5) -> F
// (6) -> G
// (7) -> H
// }
// Tuple9 {
// (0) -> A
// (1) -> B
// (2) -> C
// (3) -> D
// (4) -> E
// (5) -> F
// (6) -> G
// (7) -> H
// (8) -> I
// }
// Tuple10 {
// (0) -> A
// (1) -> B
// (2) -> C
// (3) -> D
// (4) -> E
// (5) -> F
// (6) -> G
// (7) -> H
// (8) -> I
// (9) -> J
// }
// Tuple11 {
// (0) -> A
// (1) -> B
// (2) -> C
// (3) -> D
// (4) -> E
// (5) -> F
// (6) -> G
// (7) -> H
// (8) -> I
// (9) -> J
// (10) -> K
// }
// Tuple12 {
// (0) -> A
// (1) -> B
// (2) -> C
// (3) -> D
// (4) -> E
// (5) -> F
// (6) -> G
// (7) -> H
// (8) -> I
// (9) -> J
// (10) -> K
// (11) -> L
// }
// }
#[cfg(test)]
mod tests {
use core::cmp::Ordering::{self, Less, Equal, Greater};
macro_rules! cmp_test {
(
$($T:ident)+
) => (
{
let left: ($($T,)+) = ($($T::default(),)+);
let right: ($($T,)+) = ($($T::default() + 1 as $T,)+);
let result: Ordering = left.cmp(&right);
assert_eq!(result, Less);
}
{
let left: ($($T,)+) = ($($T::default(),)+);
let right: ($($T,)+) = ($($T::default(),)+);
let result: Ordering = left.cmp(&right);
assert_eq!(result, Equal);
}
{
let left: ($($T,)+) = ($($T::default() + 1 as $T,)+);
let right: ($($T,)+) = ($($T::default(),)+);
let result: Ordering = left.cmp(&right);
assert_eq!(result, Greater);
}
)
}
type A = u8;
type B = u16;
type C = u32;
type D = u64;
type E = usize;
type F = i8;
type G = i16;
type H = i32;
type I = i64;
#[test]
fn cmp_test1() {
cmp_test! { A B C D E F G H I };
}
}<|fim▁end|> | |
<|file_name|>test_dependencies.py<|end_file_name|><|fim▁begin|>import pytest
import pytest
from cwltool.utils import onWindows
from .util import get_data, get_main_output, needs_docker
try:
from galaxy.tools import deps
except ImportError:
deps = None
@needs_docker
@pytest.mark.skipif(not deps, reason="galaxy-lib is not installed")
def test_biocontainers():
wflow = get_data("tests/seqtk_seq.cwl")
job = get_data("tests/seqtk_seq_job.json")
error_code, _, _ = get_main_output(
["--beta-use-biocontainers", wflow, job])
assert error_code == 0
@pytest.mark.skipif(onWindows(), reason="bioconda currently not working on MS Windows")
@pytest.mark.skipif(not deps, reason="galaxy-lib is not installed")
def test_bioconda():
wflow = get_data("tests/seqtk_seq.cwl")
job = get_data("tests/seqtk_seq_job.json")
error_code, _, stderr = get_main_output(
["--beta-conda-dependencies", "--debug", wflow, job])
assert error_code == 0, stderr
import os
from distutils import spawn
@pytest.mark.skipif(not spawn.find_executable("modulecmd"), reason="modulecmd not installed")
def test_modules():
wflow = get_data("tests/random_lines.cwl")
job = get_data("tests/random_lines_job.json")
os.environ["MODULEPATH"] = os.path.join(os.getcwd(), 'tests/test_deps_env/modulefiles')
error_code, _, stderr = get_main_output(
["--beta-dependency-resolvers-configuration",
"tests/test_deps_env_modules_resolvers_conf.yml", "--debug", wflow, job])<|fim▁hole|><|fim▁end|> |
assert error_code == 0, stderr |
<|file_name|>get_all_networks.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
#
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and<|fim▁hole|>"""
# Import appropriate modules from the client library.
from googleads import ad_manager
def main(client):
# Initialize appropriate service.
network_service = client.GetService('NetworkService', version='v201805')
networks = network_service.getAllNetworks()
# Print out some information for each network.
for network in networks:
print('Network with network code "%s" and display name "%s" was found.'
% (network['networkCode'], network['displayName']))
print '\nNumber of results found: %s' % len(networks)
if __name__ == '__main__':
# Initialize client object.
ad_manager_client = ad_manager.AdManagerClient.LoadFromStorage()
main(ad_manager_client)<|fim▁end|> | # limitations under the License.
"""This example gets all networks. |
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-<|fim▁hole|>
from patient_evaluation_report import *<|fim▁end|> | |
<|file_name|>test_tasks_helper.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
"""
Unit tests for LMS instructor-initiated background tasks helper functions.
Tests that CSV grade report generation works with unicode emails.
"""
import ddt
from mock import Mock, patch
import tempfile
from openedx.core.djangoapps.course_groups import cohorts
import unicodecsv
from django.core.urlresolvers import reverse
from django.test.utils import override_settings
from capa.tests.response_xml_factory import MultipleChoiceResponseXMLFactory
from certificates.models import CertificateStatuses
from certificates.tests.factories import GeneratedCertificateFactory, CertificateWhitelistFactory
from course_modes.models import CourseMode
from courseware.tests.factories import InstructorFactory
from instructor_task.tests.test_base import InstructorTaskCourseTestCase, TestReportMixin, InstructorTaskModuleTestCase
from openedx.core.djangoapps.course_groups.models import CourseUserGroupPartitionGroup
from openedx.core.djangoapps.course_groups.tests.helpers import CohortFactory
import openedx.core.djangoapps.user_api.course_tag.api as course_tag_api
from openedx.core.djangoapps.user_api.partition_schemes import RandomUserPartitionScheme
from shoppingcart.models import Order, PaidCourseRegistration, CourseRegistrationCode, Invoice, \
CourseRegistrationCodeInvoiceItem, InvoiceTransaction, Coupon
from student.tests.factories import UserFactory, CourseModeFactory
from student.models import CourseEnrollment, CourseEnrollmentAllowed, ManualEnrollmentAudit, ALLOWEDTOENROLL_TO_ENROLLED
from verify_student.tests.factories import SoftwareSecurePhotoVerificationFactory
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
from xmodule.partitions.partitions import Group, UserPartition
from instructor_task.models import ReportStore
from instructor_task.tasks_helper import (
cohort_students_and_upload,
upload_problem_responses_csv,
upload_grades_csv,
upload_problem_grade_report,
upload_students_csv,
upload_may_enroll_csv,
upload_enrollment_report,
upload_exec_summary_report,
generate_students_certificates,
)
from openedx.core.djangoapps.util.testing import ContentGroupTestCase, TestConditionalContent
@ddt.ddt
class TestInstructorGradeReport(TestReportMixin, InstructorTaskCourseTestCase):
"""
Tests that CSV grade report generation works.
"""
def setUp(self):
super(TestInstructorGradeReport, self).setUp()
self.course = CourseFactory.create()
@ddt.data([u'[email protected]', u'ni\[email protected]'])
def test_unicode_emails(self, emails):
"""
Test that students with unicode characters in emails is handled.
"""
for i, email in enumerate(emails):
self.create_student('student{0}'.format(i), email)
self.current_task = Mock()
self.current_task.update_state = Mock()
with patch('instructor_task.tasks_helper._get_current_task') as mock_current_task:
mock_current_task.return_value = self.current_task
result = upload_grades_csv(None, None, self.course.id, None, 'graded')
num_students = len(emails)
self.assertDictContainsSubset({'attempted': num_students, 'succeeded': num_students, 'failed': 0}, result)
@patch('instructor_task.tasks_helper._get_current_task')
@patch('instructor_task.tasks_helper.iterate_grades_for')
def test_grading_failure(self, mock_iterate_grades_for, _mock_current_task):
"""
Test that any grading errors are properly reported in the
progress dict and uploaded to the report store.
"""
# mock an error response from `iterate_grades_for`
mock_iterate_grades_for.return_value = [
(self.create_student('username', '[email protected]'), {}, 'Cannot grade student')
]
result = upload_grades_csv(None, None, self.course.id, None, 'graded')
self.assertDictContainsSubset({'attempted': 1, 'succeeded': 0, 'failed': 1}, result)
report_store = ReportStore.from_config(config_name='GRADES_DOWNLOAD')
self.assertTrue(any('grade_report_err' in item[0] for item in report_store.links_for(self.course.id)))
def _verify_cell_data_for_user(self, username, course_id, column_header, expected_cell_content):
"""
Verify cell data in the grades CSV for a particular user.
"""
with patch('instructor_task.tasks_helper._get_current_task'):
result = upload_grades_csv(None, None, course_id, None, 'graded')
self.assertDictContainsSubset({'attempted': 2, 'succeeded': 2, 'failed': 0}, result)
report_store = ReportStore.from_config(config_name='GRADES_DOWNLOAD')
report_csv_filename = report_store.links_for(course_id)[0][0]
with open(report_store.path_to(course_id, report_csv_filename)) as csv_file:
for row in unicodecsv.DictReader(csv_file):
if row.get('username') == username:
self.assertEqual(row[column_header], expected_cell_content)
def test_cohort_data_in_grading(self):
"""
Test that cohort data is included in grades csv if cohort configuration is enabled for course.
"""
cohort_groups = ['cohort 1', 'cohort 2']
course = CourseFactory.create(cohort_config={'cohorted': True, 'auto_cohort': True,
'auto_cohort_groups': cohort_groups})
user_1 = 'user_1'
user_2 = 'user_2'
CourseEnrollment.enroll(UserFactory.create(username=user_1), course.id)
CourseEnrollment.enroll(UserFactory.create(username=user_2), course.id)
# In auto cohorting a group will be assigned to a user only when user visits a problem
# In grading calculation we only add a group in csv if group is already assigned to
# user rather than creating a group automatically at runtime
self._verify_cell_data_for_user(user_1, course.id, 'Cohort Name', '')
self._verify_cell_data_for_user(user_2, course.id, 'Cohort Name', '')
def test_unicode_cohort_data_in_grading(self):
"""
Test that cohorts can contain unicode characters.
"""
course = CourseFactory.create(cohort_config={'cohorted': True})
# Create users and manually assign cohorts
user1 = UserFactory.create(username='user1')
user2 = UserFactory.create(username='user2')
CourseEnrollment.enroll(user1, course.id)
CourseEnrollment.enroll(user2, course.id)
professor_x = u'ÞrÖfessÖr X'
magneto = u'MàgnëtÖ'
cohort1 = CohortFactory(course_id=course.id, name=professor_x)
cohort2 = CohortFactory(course_id=course.id, name=magneto)
cohort1.users.add(user1)
cohort2.users.add(user2)
self._verify_cell_data_for_user(user1.username, course.id, 'Cohort Name', professor_x)
self._verify_cell_data_for_user(user2.username, course.id, 'Cohort Name', magneto)
def test_unicode_user_partitions(self):
"""
Test that user partition groups can contain unicode characters.
"""
user_groups = [u'ÞrÖfessÖr X', u'MàgnëtÖ']
user_partition = UserPartition(
0,
'x_man',
'X Man',
[
Group(0, user_groups[0]),
Group(1, user_groups[1])
]
)
# Create course with group configurations
self.initialize_course(
course_factory_kwargs={
'user_partitions': [user_partition]
}
)
_groups = [group.name for group in self.course.user_partitions[0].groups]
self.assertEqual(_groups, user_groups)
def test_cohort_scheme_partition(self):
"""
Test that cohort-schemed user partitions are ignored in the
grades export.
"""
# Set up a course with 'cohort' and 'random' user partitions.
cohort_scheme_partition = UserPartition(
0,
'Cohort-schemed Group Configuration',
'Group Configuration based on Cohorts',
[Group(0, 'Group A'), Group(1, 'Group B')],
scheme_id='cohort'
)
experiment_group_a = Group(2, u'Expériment Group A')
experiment_group_b = Group(3, u'Expériment Group B')
experiment_partition = UserPartition(
1,
u'Content Expériment Configuration',
u'Group Configuration for Content Expériments',
[experiment_group_a, experiment_group_b],
scheme_id='random'
)
course = CourseFactory.create(
cohort_config={'cohorted': True},
user_partitions=[cohort_scheme_partition, experiment_partition]
)
# Create user_a and user_b which are enrolled in the course
# and assigned to experiment_group_a and experiment_group_b,
# respectively.
user_a = UserFactory.create(username='user_a')
user_b = UserFactory.create(username='user_b')
CourseEnrollment.enroll(user_a, course.id)
CourseEnrollment.enroll(user_b, course.id)
course_tag_api.set_course_tag(
user_a,
course.id,
RandomUserPartitionScheme.key_for_partition(experiment_partition),
experiment_group_a.id
)
course_tag_api.set_course_tag(
user_b,
course.id,
RandomUserPartitionScheme.key_for_partition(experiment_partition),
experiment_group_b.id
)
# Assign user_a to a group in the 'cohort'-schemed user
# partition (by way of a cohort) to verify that the user
# partition group does not show up in the "Experiment Group"
# cell.
cohort_a = CohortFactory.create(course_id=course.id, name=u'Cohørt A', users=[user_a])
CourseUserGroupPartitionGroup(
course_user_group=cohort_a,
partition_id=cohort_scheme_partition.id,
group_id=cohort_scheme_partition.groups[0].id
).save()
# Verify that we see user_a and user_b in their respective
# content experiment groups, and that we do not see any
# content groups.
experiment_group_message = u'Experiment Group ({content_experiment})'
self._verify_cell_data_for_user(
user_a.username,
course.id,
experiment_group_message.format(
content_experiment=experiment_partition.name
),
experiment_group_a.name
)
self._verify_cell_data_for_user(
user_b.username,
course.id,
experiment_group_message.format(
content_experiment=experiment_partition.name
),
experiment_group_b.name
)
# Make sure cohort info is correct.
cohort_name_header = 'Cohort Name'
self._verify_cell_data_for_user(
user_a.username,
course.id,
cohort_name_header,
cohort_a.name
)
self._verify_cell_data_for_user(
user_b.username,
course.id,
cohort_name_header,
''
)
@patch('instructor_task.tasks_helper._get_current_task')
@patch('instructor_task.tasks_helper.iterate_grades_for')
def test_unicode_in_csv_header(self, mock_iterate_grades_for, _mock_current_task):
"""
Tests that CSV grade report works if unicode in headers.
"""
# mock a response from `iterate_grades_for`
mock_iterate_grades_for.return_value = [
(
self.create_student('username', '[email protected]'),
{'section_breakdown': [{'label': u'\u8282\u540e\u9898 01'}], 'percent': 0, 'grade': None},
'Cannot grade student'
)
]
result = upload_grades_csv(None, None, self.course.id, None, 'graded')
self.assertDictContainsSubset({'attempted': 1, 'succeeded': 1, 'failed': 0}, result)
class TestProblemResponsesReport(TestReportMixin, InstructorTaskCourseTestCase):
"""
Tests that generation of CSV files listing student answers to a
given problem works.
"""
def setUp(self):
super(TestProblemResponsesReport, self).setUp()
self.course = CourseFactory.create()
def test_success(self):
task_input = {'problem_location': ''}
with patch('instructor_task.tasks_helper._get_current_task'):
with patch('instructor_task.tasks_helper.list_problem_responses') as patched_data_source:
patched_data_source.return_value = [
{'username': 'user0', 'state': u'state0'},
{'username': 'user1', 'state': u'state1'},
{'username': 'user2', 'state': u'state2'},
]
result = upload_problem_responses_csv(None, None, self.course.id, task_input, 'calculated')
report_store = ReportStore.from_config(config_name='GRADES_DOWNLOAD')
links = report_store.links_for(self.course.id)
self.assertEquals(len(links), 1)
self.assertDictContainsSubset({'attempted': 3, 'succeeded': 3, 'failed': 0}, result)
@ddt.ddt
@patch.dict('django.conf.settings.FEATURES', {'ENABLE_PAID_COURSE_REGISTRATION': True})
class TestInstructorDetailedEnrollmentReport(TestReportMixin, InstructorTaskCourseTestCase):
"""
Tests that CSV detailed enrollment generation works.
"""
def setUp(self):
super(TestInstructorDetailedEnrollmentReport, self).setUp()
self.course = CourseFactory.create()
# create testing invoice 1
self.instructor = InstructorFactory(course_key=self.course.id)
self.sale_invoice_1 = Invoice.objects.create(
total_amount=1234.32, company_name='Test1', company_contact_name='TestName',
company_contact_email='[email protected]',
recipient_name='Testw', recipient_email='[email protected]', customer_reference_number='2Fwe23S',
internal_reference="A", course_id=self.course.id, is_valid=True
)
self.invoice_item = CourseRegistrationCodeInvoiceItem.objects.create(
invoice=self.sale_invoice_1,
qty=1,
unit_price=1234.32,
course_id=self.course.id
)
def test_success(self):
self.create_student('student', '[email protected]')
task_input = {'features': []}
with patch('instructor_task.tasks_helper._get_current_task'):
result = upload_enrollment_report(None, None, self.course.id, task_input, 'generating_enrollment_report')
self.assertDictContainsSubset({'attempted': 1, 'succeeded': 1, 'failed': 0}, result)
def test_student_paid_course_enrollment_report(self):
"""
test to check the paid user enrollment csv report status
and enrollment source.
"""
student = UserFactory()
student_cart = Order.get_cart_for_user(student)
PaidCourseRegistration.add_to_order(student_cart, self.course.id)
student_cart.purchase()
task_input = {'features': []}
with patch('instructor_task.tasks_helper._get_current_task'):
result = upload_enrollment_report(None, None, self.course.id, task_input, 'generating_enrollment_report')
self.assertDictContainsSubset({'attempted': 1, 'succeeded': 1, 'failed': 0}, result)
self._verify_cell_data_in_csv(student.username, 'Enrollment Source', 'Credit Card - Individual')
self._verify_cell_data_in_csv(student.username, 'Payment Status', 'purchased')
def test_student_manually_enrolled_in_detailed_enrollment_source(self):
"""
test to check the manually enrolled user enrollment report status
and enrollment source.
"""
student = UserFactory()
enrollment = CourseEnrollment.enroll(student, self.course.id)
ManualEnrollmentAudit.create_manual_enrollment_audit(
self.instructor, student.email, ALLOWEDTOENROLL_TO_ENROLLED,
'manually enrolling unenrolled user', enrollment
)
task_input = {'features': []}
with patch('instructor_task.tasks_helper._get_current_task'):
result = upload_enrollment_report(None, None, self.course.id, task_input, 'generating_enrollment_report')
enrollment_source = u'manually enrolled by user_id {user_id}, enrollment state transition: {transition}'.format(
user_id=self.instructor.id, transition=ALLOWEDTOENROLL_TO_ENROLLED) # pylint: disable=no-member
self.assertDictContainsSubset({'attempted': 1, 'succeeded': 1, 'failed': 0}, result)
self._verify_cell_data_in_csv(student.username, 'Enrollment Source', enrollment_source)
self._verify_cell_data_in_csv(student.username, 'Payment Status', 'TBD')
def test_student_used_enrollment_code_for_course_enrollment(self):
"""
test to check the user enrollment source and payment status in the
enrollment detailed report
"""
student = UserFactory()
self.client.login(username=student.username, password='test')
student_cart = Order.get_cart_for_user(student)
paid_course_reg_item = PaidCourseRegistration.add_to_order(student_cart, self.course.id)
# update the quantity of the cart item paid_course_reg_item
resp = self.client.post(reverse('shoppingcart.views.update_user_cart'),
{'ItemId': paid_course_reg_item.id, 'qty': '4'})
self.assertEqual(resp.status_code, 200)
student_cart.purchase()
course_reg_codes = CourseRegistrationCode.objects.filter(order=student_cart)
redeem_url = reverse('register_code_redemption', args=[course_reg_codes[0].code])
response = self.client.get(redeem_url)
self.assertEquals(response.status_code, 200)
# check button text
self.assertTrue('Activate Course Enrollment' in response.content)
response = self.client.post(redeem_url)
self.assertEquals(response.status_code, 200)
task_input = {'features': []}
with patch('instructor_task.tasks_helper._get_current_task'):
result = upload_enrollment_report(None, None, self.course.id, task_input, 'generating_enrollment_report')
self.assertDictContainsSubset({'attempted': 1, 'succeeded': 1, 'failed': 0}, result)
self._verify_cell_data_in_csv(student.username, 'Enrollment Source', 'Used Registration Code')
self._verify_cell_data_in_csv(student.username, 'Payment Status', 'purchased')
def test_student_used_invoice_unpaid_enrollment_code_for_course_enrollment(self):
"""
test to check the user enrollment source and payment status in the
enrollment detailed report
"""
student = UserFactory()
self.client.login(username=student.username, password='test')
course_registration_code = CourseRegistrationCode(
code='abcde',
course_id=self.course.id.to_deprecated_string(),
created_by=self.instructor,
invoice=self.sale_invoice_1,
invoice_item=self.invoice_item,
mode_slug='honor'
)
course_registration_code.save()
redeem_url = reverse('register_code_redemption', args=['abcde'])
response = self.client.get(redeem_url)
self.assertEquals(response.status_code, 200)
# check button text
self.assertTrue('Activate Course Enrollment' in response.content)
response = self.client.post(redeem_url)
self.assertEquals(response.status_code, 200)
task_input = {'features': []}
with patch('instructor_task.tasks_helper._get_current_task'):
result = upload_enrollment_report(None, None, self.course.id, task_input, 'generating_enrollment_report')
self.assertDictContainsSubset({'attempted': 1, 'succeeded': 1, 'failed': 0}, result)
self._verify_cell_data_in_csv(student.username, 'Enrollment Source', 'Used Registration Code')
self._verify_cell_data_in_csv(student.username, 'Payment Status', 'Invoice Outstanding')
def test_student_used_invoice_paid_enrollment_code_for_course_enrollment(self):
"""
test to check the user enrollment source and payment status in the
enrollment detailed report
"""
student = UserFactory()
self.client.login(username=student.username, password='test')
invoice_transaction = InvoiceTransaction(
invoice=self.sale_invoice_1,
amount=self.sale_invoice_1.total_amount,
status='completed',
created_by=self.instructor,
last_modified_by=self.instructor
)
invoice_transaction.save()
course_registration_code = CourseRegistrationCode(
code='abcde',
course_id=self.course.id.to_deprecated_string(),
created_by=self.instructor,
invoice=self.sale_invoice_1,
invoice_item=self.invoice_item,
mode_slug='honor'
)
course_registration_code.save()
redeem_url = reverse('register_code_redemption', args=['abcde'])
response = self.client.get(redeem_url)
self.assertEquals(response.status_code, 200)
# check button text
self.assertTrue('Activate Course Enrollment' in response.content)
response = self.client.post(redeem_url)
self.assertEquals(response.status_code, 200)
task_input = {'features': []}
with patch('instructor_task.tasks_helper._get_current_task'):
result = upload_enrollment_report(None, None, self.course.id, task_input, 'generating_enrollment_report')
self.assertDictContainsSubset({'attempted': 1, 'succeeded': 1, 'failed': 0}, result)
self._verify_cell_data_in_csv(student.username, 'Enrollment Source', 'Used Registration Code')
self._verify_cell_data_in_csv(student.username, 'Payment Status', 'Invoice Paid')
def _verify_cell_data_in_csv(self, username, column_header, expected_cell_content):
"""
Verify that the last ReportStore CSV contains the expected content.
"""
report_store = ReportStore.from_config(config_name='FINANCIAL_REPORTS')
report_csv_filename = report_store.links_for(self.course.id)[0][0]
with open(report_store.path_to(self.course.id, report_csv_filename)) as csv_file:
# Expand the dict reader generator so we don't lose it's content
for row in unicodecsv.DictReader(csv_file):
if row.get('Username') == username:
self.assertEqual(row[column_header], expected_cell_content)
@ddt.ddt
class TestProblemGradeReport(TestReportMixin, InstructorTaskModuleTestCase):
"""
Test that the problem CSV generation works.
"""
def setUp(self):
super(TestProblemGradeReport, self).setUp()
self.initialize_course()
# Add unicode data to CSV even though unicode usernames aren't
# technically possible in openedx.
self.student_1 = self.create_student(u'üser_1')
self.student_2 = self.create_student(u'üser_2')
self.csv_header_row = [u'Student ID', u'Email', u'Username', u'Final Grade']
@patch('instructor_task.tasks_helper._get_current_task')
def test_no_problems(self, _get_current_task):
"""
Verify that we see no grade information for a course with no graded
problems.
"""
result = upload_problem_grade_report(None, None, self.course.id, None, 'graded')
self.assertDictContainsSubset({'action_name': 'graded', 'attempted': 2, 'succeeded': 2, 'failed': 0}, result)
self.verify_rows_in_csv([
dict(zip(
self.csv_header_row,
[unicode(self.student_1.id), self.student_1.email, self.student_1.username, '0.0']
)),
dict(zip(
self.csv_header_row,
[unicode(self.student_2.id), self.student_2.email, self.student_2.username, '0.0']
))
])
@patch('instructor_task.tasks_helper._get_current_task')
def test_single_problem(self, _get_current_task):
vertical = ItemFactory.create(
parent_location=self.problem_section.location,
category='vertical',
metadata={'graded': True},
display_name='Problem Vertical'
)
self.define_option_problem(u'Pröblem1', parent=vertical)
self.submit_student_answer(self.student_1.username, u'Pröblem1', ['Option 1'])
result = upload_problem_grade_report(None, None, self.course.id, None, 'graded')
self.assertDictContainsSubset({'action_name': 'graded', 'attempted': 2, 'succeeded': 2, 'failed': 0}, result)
problem_name = u'Homework 1: Problem - Pröblem1'
header_row = self.csv_header_row + [problem_name + ' (Earned)', problem_name + ' (Possible)']
self.verify_rows_in_csv([
dict(zip(
header_row,
[
unicode(self.student_1.id),
self.student_1.email,
self.student_1.username,
'0.01', '1.0', '2.0']
)),
dict(zip(
header_row,
[
unicode(self.student_2.id),
self.student_2.email,
self.student_2.username,
'0.0', 'N/A', 'N/A'
]
))
])
@patch('instructor_task.tasks_helper._get_current_task')
@patch('instructor_task.tasks_helper.iterate_grades_for')
@ddt.data(u'Cannöt grade student', '')
def test_grading_failure(self, error_message, mock_iterate_grades_for, _mock_current_task):
"""
Test that any grading errors are properly reported in the progress
dict and uploaded to the report store.
"""
# mock an error response from `iterate_grades_for`
student = self.create_student(u'username', u'[email protected]')
mock_iterate_grades_for.return_value = [
(student, {}, error_message)
]
result = upload_problem_grade_report(None, None, self.course.id, None, 'graded')
self.assertDictContainsSubset({'attempted': 1, 'succeeded': 0, 'failed': 1}, result)
report_store = ReportStore.from_config(config_name='GRADES_DOWNLOAD')
self.assertTrue(any('grade_report_err' in item[0] for item in report_store.links_for(self.course.id)))
self.verify_rows_in_csv([
{
u'Student ID': unicode(student.id),
u'Email': student.email,
u'Username': student.username,
u'error_msg': error_message if error_message else "Unknown error"
}
])
class TestProblemReportSplitTestContent(TestReportMixin, TestConditionalContent, InstructorTaskModuleTestCase):
"""
Test the problem report on a course that has split tests.
"""
OPTION_1 = 'Option 1'
OPTION_2 = 'Option 2'
def setUp(self):
super(TestProblemReportSplitTestContent, self).setUp()
self.problem_a_url = u'pröblem_a_url'
self.problem_b_url = u'pröblem_b_url'
self.define_option_problem(self.problem_a_url, parent=self.vertical_a)
self.define_option_problem(self.problem_b_url, parent=self.vertical_b)
def test_problem_grade_report(self):
"""
Test that we generate the correct the correct grade report when dealing with A/B tests.
In order to verify that the behavior of the grade report is correct, we submit answers for problems
that the student won't have access to. A/B tests won't restrict access to the problems, but it should
not show up in that student's course tree when generating the grade report, hence the N/A's in the grade report.
"""
# student A will get 100%, student B will get 50% because
# OPTION_1 is the correct option, and OPTION_2 is the
# incorrect option
self.submit_student_answer(self.student_a.username, self.problem_a_url, [self.OPTION_1, self.OPTION_1])
self.submit_student_answer(self.student_a.username, self.problem_b_url, [self.OPTION_1, self.OPTION_1])
self.submit_student_answer(self.student_b.username, self.problem_a_url, [self.OPTION_1, self.OPTION_2])
self.submit_student_answer(self.student_b.username, self.problem_b_url, [self.OPTION_1, self.OPTION_2])
with patch('instructor_task.tasks_helper._get_current_task'):
result = upload_problem_grade_report(None, None, self.course.id, None, 'graded')
self.assertDictContainsSubset(
{'action_name': 'graded', 'attempted': 2, 'succeeded': 2, 'failed': 0}, result
)
problem_names = [u'Homework 1: Problem - pröblem_a_url', u'Homework 1: Problem - pröblem_b_url']
header_row = [u'Student ID', u'Email', u'Username', u'Final Grade']
for problem in problem_names:
header_row += [problem + ' (Earned)', problem + ' (Possible)']
self.verify_rows_in_csv([
dict(zip(
header_row,
[
unicode(self.student_a.id),
self.student_a.email,
self.student_a.username,
u'1.0', u'2.0', u'2.0', u'N/A', u'N/A'
]
)),
dict(zip(
header_row,
[
unicode(self.student_b.id),
self.student_b.email,
self.student_b.username, u'0.5', u'N/A', u'N/A', u'1.0', u'2.0'
]
))
])
class TestProblemReportCohortedContent(TestReportMixin, ContentGroupTestCase, InstructorTaskModuleTestCase):
"""
Test the problem report on a course that has cohorted content.
"""
def setUp(self):
super(TestProblemReportCohortedContent, self).setUp()
# construct cohorted problems to work on.
self.add_course_content()
vertical = ItemFactory.create(
parent_location=self.problem_section.location,
category='vertical',
metadata={'graded': True},
display_name='Problem Vertical'
)
self.define_option_problem(
u"Pröblem0",
parent=vertical,
group_access={self.course.user_partitions[0].id: [self.course.user_partitions[0].groups[0].id]}
)
self.define_option_problem(
u"Pröblem1",
parent=vertical,
group_access={self.course.user_partitions[0].id: [self.course.user_partitions[0].groups[1].id]}
)
def _format_user_grade(self, header_row, user, grade):
"""
Helper method that format the user grade
Args:
header_row(list): header row of csv containing Student ID, Email, Username etc
user(object): Django user object
grade(list): Users' grade list
"""
return dict(zip(
header_row,
[
unicode(user.id),
user.email,
user.username,
] + grade
))
def test_cohort_content(self):
self.submit_student_answer(self.alpha_user.username, u'Pröblem0', ['Option 1', 'Option 1'])
resp = self.submit_student_answer(self.alpha_user.username, u'Pröblem1', ['Option 1', 'Option 1'])
self.assertEqual(resp.status_code, 404)
resp = self.submit_student_answer(self.beta_user.username, u'Pröblem0', ['Option 1', 'Option 2'])
self.assertEqual(resp.status_code, 404)
self.submit_student_answer(self.beta_user.username, u'Pröblem1', ['Option 1', 'Option 2'])
with patch('instructor_task.tasks_helper._get_current_task'):
result = upload_problem_grade_report(None, None, self.course.id, None, 'graded')
self.assertDictContainsSubset(
{'action_name': 'graded', 'attempted': 4, 'succeeded': 4, 'failed': 0}, result
)
problem_names = [u'Homework 1: Problem - Pröblem0', u'Homework 1: Problem - Pröblem1']
header_row = [u'Student ID', u'Email', u'Username', u'Final Grade']
for problem in problem_names:
header_row += [problem + ' (Earned)', problem + ' (Possible)']
user_grades = [
{'user': self.staff_user, 'grade': [u'0.0', u'N/A', u'N/A', u'N/A', u'N/A']},
{'user': self.alpha_user, 'grade': [u'1.0', u'2.0', u'2.0', u'N/A', u'N/A']},
{'user': self.beta_user, 'grade': [u'0.5', u'N/A', u'N/A', u'1.0', u'2.0']},
{'user': self.non_cohorted_user, 'grade': [u'0.0', u'N/A', u'N/A', u'N/A', u'N/A']},
]
# Verify generated grades and expected grades match
expected_grades = [self._format_user_grade(header_row, **user_grade) for user_grade in user_grades]
self.verify_rows_in_csv(expected_grades)
@patch('courseware.grades.MaxScoresCache.get', Mock(return_value=1))
def test_cohort_content_with_maxcache(self):
"""
Tests the cohoted course grading to test the scenario in which `max_scores_cache` is set for the course
problems.
"""
# Course is cohorted
self.assertTrue(cohorts.is_course_cohorted(self.course.id))
# Verify user groups
self.assertEquals(
cohorts.get_cohort(self.alpha_user, self.course.id).id,
self.course.user_partitions[0].groups[0].id,
"alpha_user should be assigned to the correct cohort"
)
self.assertEquals(
cohorts.get_cohort(self.beta_user, self.course.id).id,
self.course.user_partitions[0].groups[1].id,
"beta_user should be assigned to the correct cohort"
)
# Verify user enrollment
for user in [self.alpha_user, self.beta_user, self.non_cohorted_user]:
self.assertTrue(CourseEnrollment.is_enrolled(user, self.course.id))
self.submit_student_answer(self.alpha_user.username, u'Pröblem0', ['Option 1', 'Option 1'])
resp = self.submit_student_answer(self.alpha_user.username, u'Pröblem1', ['Option 1', 'Option 1'])
self.assertEqual(resp.status_code, 404)
resp = self.submit_student_answer(self.beta_user.username, u'Pröblem0', ['Option 1', 'Option 2'])
self.assertEqual(resp.status_code, 404)
self.submit_student_answer(self.beta_user.username, u'Pröblem1', ['Option 1', 'Option 2'])
with patch('instructor_task.tasks_helper._get_current_task'):
result = upload_problem_grade_report(None, None, self.course.id, None, 'graded')
self.assertDictContainsSubset(
{'action_name': 'graded', 'attempted': 4, 'succeeded': 4, 'failed': 0}, result
)
problem_names = [u'Homework 1: Problem - Pröblem0', u'Homework 1: Problem - Pröblem1']
header_row = [u'Student ID', u'Email', u'Username', u'Final Grade']
for problem in problem_names:
header_row += [problem + ' (Earned)', problem + ' (Possible)']
user_grades = [
{'user': self.staff_user, 'grade': [u'0.0', u'N/A', u'N/A', u'N/A', u'N/A']},
{'user': self.alpha_user, 'grade': [u'1.0', u'2.0', u'2.0', u'N/A', u'N/A']},
{'user': self.beta_user, 'grade': [u'0.5', u'N/A', u'N/A', u'1.0', u'2.0']},
{'user': self.non_cohorted_user, 'grade': [u'0.0', u'N/A', u'N/A', u'N/A', u'N/A']},
]
# Verify generated grades and expected grades match
expected_grades = [self._format_user_grade(header_row, **grade) for grade in user_grades]
self.verify_rows_in_csv(expected_grades)
@ddt.ddt
class TestExecutiveSummaryReport(TestReportMixin, InstructorTaskCourseTestCase):
"""
Tests that Executive Summary report generation works.
"""
def setUp(self):
super(TestExecutiveSummaryReport, self).setUp()
self.course = CourseFactory.create()
CourseModeFactory.create(course_id=self.course.id, min_price=50)
self.instructor = InstructorFactory(course_key=self.course.id)
self.student1 = UserFactory()
self.student2 = UserFactory()
self.student1_cart = Order.get_cart_for_user(self.student1)
self.student2_cart = Order.get_cart_for_user(self.student2)
self.sale_invoice_1 = Invoice.objects.create(
total_amount=1234.32, company_name='Test1', company_contact_name='TestName',
company_contact_email='[email protected]',
recipient_name='Testw', recipient_email='[email protected]', customer_reference_number='2Fwe23S',
internal_reference="A", course_id=self.course.id, is_valid=True
)
InvoiceTransaction.objects.create(
invoice=self.sale_invoice_1,
amount=self.sale_invoice_1.total_amount,
status='completed',
created_by=self.instructor,
last_modified_by=self.instructor
)
self.invoice_item = CourseRegistrationCodeInvoiceItem.objects.create(
invoice=self.sale_invoice_1,
qty=10,
unit_price=1234.32,
course_id=self.course.id
)
for i in range(5):
coupon = Coupon(
code='coupon{0}'.format(i), description='test_description', course_id=self.course.id,
percentage_discount='{0}'.format(i), created_by=self.instructor, is_active=True,
)
coupon.save()
def test_successfully_generate_executive_summary_report(self):
"""
Test that successfully generates the executive summary report.
"""
task_input = {'features': []}
with patch('instructor_task.tasks_helper._get_current_task'):
result = upload_exec_summary_report(
None, None, self.course.id,
task_input, 'generating executive summary report'
)
ReportStore.from_config(config_name='FINANCIAL_REPORTS')
self.assertDictContainsSubset({'attempted': 1, 'succeeded': 1, 'failed': 0}, result)
def students_purchases(self):
"""
Students purchases the courses using enrollment
and coupon codes.
"""
self.client.login(username=self.student1.username, password='test')
paid_course_reg_item = PaidCourseRegistration.add_to_order(self.student1_cart, self.course.id)
# update the quantity of the cart item paid_course_reg_item
resp = self.client.post(reverse('shoppingcart.views.update_user_cart'), {
'ItemId': paid_course_reg_item.id, 'qty': '4'
})
self.assertEqual(resp.status_code, 200)
# apply the coupon code to the item in the cart
resp = self.client.post(reverse('shoppingcart.views.use_code'), {'code': 'coupon1'})
self.assertEqual(resp.status_code, 200)
self.student1_cart.purchase()
course_reg_codes = CourseRegistrationCode.objects.filter(order=self.student1_cart)
redeem_url = reverse('register_code_redemption', args=[course_reg_codes[0].code])
response = self.client.get(redeem_url)
self.assertEquals(response.status_code, 200)
# check button text
self.assertTrue('Activate Course Enrollment' in response.content)
response = self.client.post(redeem_url)
self.assertEquals(response.status_code, 200)
self.client.login(username=self.student2.username, password='test')
PaidCourseRegistration.add_to_order(self.student2_cart, self.course.id)
# apply the coupon code to the item in the cart
resp = self.client.post(reverse('shoppingcart.views.use_code'), {'code': 'coupon1'})
self.assertEqual(resp.status_code, 200)
self.student2_cart.purchase()
@patch.dict('django.conf.settings.FEATURES', {'ENABLE_PAID_COURSE_REGISTRATION': True})
def test_generate_executive_summary_report(self):
"""
test to generate executive summary report
and then test the report authenticity.
"""
self.students_purchases()
task_input = {'features': []}
with patch('instructor_task.tasks_helper._get_current_task'):
result = upload_exec_summary_report(
None, None, self.course.id,
task_input, 'generating executive summary report'
)
report_store = ReportStore.from_config(config_name='FINANCIAL_REPORTS')
expected_data = [
'Gross Revenue Collected', '$1481.82',
'Gross Revenue Pending', '$0.00',
'Average Price per Seat', '$296.36',
'Number of seats purchased using coupon codes', '<td>2</td>'
]
self.assertDictContainsSubset({'attempted': 1, 'succeeded': 1, 'failed': 0}, result)
self._verify_html_file_report(report_store, expected_data)
def _verify_html_file_report(self, report_store, expected_data):
"""
Verify grade report data.
"""
report_html_filename = report_store.links_for(self.course.id)[0][0]
with open(report_store.path_to(self.course.id, report_html_filename)) as html_file:
html_file_data = html_file.read()
for data in expected_data:
self.assertTrue(data in html_file_data)
@ddt.ddt
class TestStudentReport(TestReportMixin, InstructorTaskCourseTestCase):
"""
Tests that CSV student profile report generation works.
"""
def setUp(self):
super(TestStudentReport, self).setUp()
self.course = CourseFactory.create()
def test_success(self):
self.create_student('student', '[email protected]')
task_input = {'features': []}
with patch('instructor_task.tasks_helper._get_current_task'):
result = upload_students_csv(None, None, self.course.id, task_input, 'calculated')
report_store = ReportStore.from_config(config_name='GRADES_DOWNLOAD')
links = report_store.links_for(self.course.id)
self.assertEquals(len(links), 1)
self.assertDictContainsSubset({'attempted': 1, 'succeeded': 1, 'failed': 0}, result)
@ddt.data([u'student', u'student\xec'])
def test_unicode_usernames(self, students):
"""
Test that students with unicode characters in their usernames
are handled.
"""
for i, student in enumerate(students):
self.create_student(username=student, email='student{0}@example.com'.format(i))
self.current_task = Mock()
self.current_task.update_state = Mock()
task_input = {
'features': [
'id', 'username', 'name', 'email', 'language', 'location',
'year_of_birth', 'gender', 'level_of_education', 'mailing_address',
'goals'
]
}
with patch('instructor_task.tasks_helper._get_current_task') as mock_current_task:
mock_current_task.return_value = self.current_task
result = upload_students_csv(None, None, self.course.id, task_input, 'calculated')
# This assertion simply confirms that the generation completed with no errors
num_students = len(students)
self.assertDictContainsSubset({'attempted': num_students, 'succeeded': num_students, 'failed': 0}, result)
@ddt.ddt
class TestListMayEnroll(TestReportMixin, InstructorTaskCourseTestCase):
"""
Tests that generation of CSV files containing information about
students who may enroll in a given course (but have not signed up
for it yet) works.
"""
def _create_enrollment(self, email):
"Factory method for creating CourseEnrollmentAllowed objects."
return CourseEnrollmentAllowed.objects.create(
email=email, course_id=self.course.id
)
def setUp(self):
super(TestListMayEnroll, self).setUp()
self.course = CourseFactory.create()
def test_success(self):
self._create_enrollment('[email protected]')
task_input = {'features': []}
with patch('instructor_task.tasks_helper._get_current_task'):
result = upload_may_enroll_csv(None, None, self.course.id, task_input, 'calculated')
report_store = ReportStore.from_config(config_name='GRADES_DOWNLOAD')
links = report_store.links_for(self.course.id)
self.assertEquals(len(links), 1)
self.assertDictContainsSubset({'attempted': 1, 'succeeded': 1, 'failed': 0}, result)
def test_unicode_email_addresses(self):
"""
Test handling of unicode characters in email addresses of students
who may enroll in a course.
"""
enrollments = [u'[email protected]', u'ni\[email protected]']
for email in enrollments:
self._create_enrollment(email)
task_input = {'features': ['email']}
with patch('instructor_task.tasks_helper._get_current_task'):
result = upload_may_enroll_csv(None, None, self.course.id, task_input, 'calculated')
# This assertion simply confirms that the generation completed with no errors
num_enrollments = len(enrollments)
self.assertDictContainsSubset({'attempted': num_enrollments, 'succeeded': num_enrollments, 'failed': 0}, result)
class MockDefaultStorage(object):
"""Mock django's DefaultStorage"""
def __init__(self):
pass
def open(self, file_name):
"""Mock out DefaultStorage.open with standard python open"""
return open(file_name)
@patch('instructor_task.tasks_helper.DefaultStorage', new=MockDefaultStorage)
class TestCohortStudents(TestReportMixin, InstructorTaskCourseTestCase):
"""
Tests that bulk student cohorting works.
"""
def setUp(self):
super(TestCohortStudents, self).setUp()
self.course = CourseFactory.create()
self.cohort_1 = CohortFactory(course_id=self.course.id, name='Cohort 1')
self.cohort_2 = CohortFactory(course_id=self.course.id, name='Cohort 2')
self.student_1 = self.create_student(username=u'student_1\xec', email='[email protected]')
self.student_2 = self.create_student(username='student_2', email='[email protected]')
self.csv_header_row = ['Cohort Name', 'Exists', 'Students Added', 'Students Not Found']
def _cohort_students_and_upload(self, csv_data):
"""
Call `cohort_students_and_upload` with a file generated from `csv_data`.
"""
with tempfile.NamedTemporaryFile() as temp_file:
temp_file.write(csv_data.encode('utf-8'))
temp_file.flush()
with patch('instructor_task.tasks_helper._get_current_task'):
return cohort_students_and_upload(None, None, self.course.id, {'file_name': temp_file.name}, 'cohorted')
def test_username(self):
result = self._cohort_students_and_upload(
u'username,email,cohort\n'
u'student_1\xec,,Cohort 1\n'
u'student_2,,Cohort 2'
)
self.assertDictContainsSubset({'total': 2, 'attempted': 2, 'succeeded': 2, 'failed': 0}, result)
self.verify_rows_in_csv(
[
dict(zip(self.csv_header_row, ['Cohort 1', 'True', '1', ''])),
dict(zip(self.csv_header_row, ['Cohort 2', 'True', '1', ''])),
],
verify_order=False
)
def test_email(self):
result = self._cohort_students_and_upload(
'username,email,cohort\n'
',[email protected],Cohort 1\n'
',[email protected],Cohort 2'
)
self.assertDictContainsSubset({'total': 2, 'attempted': 2, 'succeeded': 2, 'failed': 0}, result)
self.verify_rows_in_csv(
[
dict(zip(self.csv_header_row, ['Cohort 1', 'True', '1', ''])),
dict(zip(self.csv_header_row, ['Cohort 2', 'True', '1', ''])),
],
verify_order=False
)
def test_username_and_email(self):
result = self._cohort_students_and_upload(
u'username,email,cohort\n'
u'student_1\xec,[email protected],Cohort 1\n'
u'student_2,[email protected],Cohort 2'
)
self.assertDictContainsSubset({'total': 2, 'attempted': 2, 'succeeded': 2, 'failed': 0}, result)
self.verify_rows_in_csv(
[
dict(zip(self.csv_header_row, ['Cohort 1', 'True', '1', ''])),
dict(zip(self.csv_header_row, ['Cohort 2', 'True', '1', ''])),
],
verify_order=False
)
def test_prefer_email(self):
"""
Test that `cohort_students_and_upload` greedily prefers 'email' over
'username' when identifying the user. This means that if a correct
email is present, an incorrect or non-matching username will simply be
ignored.
"""
result = self._cohort_students_and_upload(
u'username,email,cohort\n'
u'student_1\xec,[email protected],Cohort 1\n' # valid username and email
u'Invalid,[email protected],Cohort 2' # invalid username, valid email
)
self.assertDictContainsSubset({'total': 2, 'attempted': 2, 'succeeded': 2, 'failed': 0}, result)
self.verify_rows_in_csv(
[
dict(zip(self.csv_header_row, ['Cohort 1', 'True', '1', ''])),
dict(zip(self.csv_header_row, ['Cohort 2', 'True', '1', ''])),
],
verify_order=False
)
def test_non_existent_user(self):
result = self._cohort_students_and_upload(
'username,email,cohort\n'
'Invalid,,Cohort 1\n'
'student_2,[email protected],Cohort 2'
)
self.assertDictContainsSubset({'total': 2, 'attempted': 2, 'succeeded': 0, 'failed': 2}, result)
self.verify_rows_in_csv(
[
dict(zip(self.csv_header_row, ['Cohort 1', 'True', '0', 'Invalid'])),
dict(zip(self.csv_header_row, ['Cohort 2', 'True', '0', '[email protected]'])),
],
verify_order=False
)
def test_non_existent_cohort(self):
result = self._cohort_students_and_upload(
'username,email,cohort\n'
',[email protected],Does Not Exist\n'
'student_2,,Cohort 2'
)
self.assertDictContainsSubset({'total': 2, 'attempted': 2, 'succeeded': 1, 'failed': 1}, result)
self.verify_rows_in_csv(<|fim▁hole|> ],
verify_order=False
)
def test_too_few_commas(self):
"""
A CSV file may be malformed and lack traling commas at the end of a row.
In this case, those cells take on the value None by the CSV parser.
Make sure we handle None values appropriately.
i.e.:
header_1,header_2,header_3
val_1,val_2,val_3 <- good row
val_1,, <- good row
val_1 <- bad row; no trailing commas to indicate empty rows
"""
result = self._cohort_students_and_upload(
u'username,email,cohort\n'
u'student_1\xec,\n'
u'student_2'
)
self.assertDictContainsSubset({'total': 2, 'attempted': 2, 'succeeded': 0, 'failed': 2}, result)
self.verify_rows_in_csv(
[
dict(zip(self.csv_header_row, ['', 'False', '0', ''])),
],
verify_order=False
)
def test_only_header_row(self):
result = self._cohort_students_and_upload(
u'username,email,cohort'
)
self.assertDictContainsSubset({'total': 0, 'attempted': 0, 'succeeded': 0, 'failed': 0}, result)
self.verify_rows_in_csv([])
def test_carriage_return(self):
"""
Test that we can handle carriage returns in our file.
"""
result = self._cohort_students_and_upload(
u'username,email,cohort\r'
u'student_1\xec,,Cohort 1\r'
u'student_2,,Cohort 2'
)
self.assertDictContainsSubset({'total': 2, 'attempted': 2, 'succeeded': 2, 'failed': 0}, result)
self.verify_rows_in_csv(
[
dict(zip(self.csv_header_row, ['Cohort 1', 'True', '1', ''])),
dict(zip(self.csv_header_row, ['Cohort 2', 'True', '1', ''])),
],
verify_order=False
)
def test_carriage_return_line_feed(self):
"""
Test that we can handle carriage returns and line feeds in our file.
"""
result = self._cohort_students_and_upload(
u'username,email,cohort\r\n'
u'student_1\xec,,Cohort 1\r\n'
u'student_2,,Cohort 2'
)
self.assertDictContainsSubset({'total': 2, 'attempted': 2, 'succeeded': 2, 'failed': 0}, result)
self.verify_rows_in_csv(
[
dict(zip(self.csv_header_row, ['Cohort 1', 'True', '1', ''])),
dict(zip(self.csv_header_row, ['Cohort 2', 'True', '1', ''])),
],
verify_order=False
)
def test_move_users_to_new_cohort(self):
self.cohort_1.users.add(self.student_1)
self.cohort_2.users.add(self.student_2)
result = self._cohort_students_and_upload(
u'username,email,cohort\n'
u'student_1\xec,,Cohort 2\n'
u'student_2,,Cohort 1'
)
self.assertDictContainsSubset({'total': 2, 'attempted': 2, 'succeeded': 2, 'failed': 0}, result)
self.verify_rows_in_csv(
[
dict(zip(self.csv_header_row, ['Cohort 1', 'True', '1', ''])),
dict(zip(self.csv_header_row, ['Cohort 2', 'True', '1', ''])),
],
verify_order=False
)
def test_move_users_to_same_cohort(self):
self.cohort_1.users.add(self.student_1)
self.cohort_2.users.add(self.student_2)
result = self._cohort_students_and_upload(
u'username,email,cohort\n'
u'student_1\xec,,Cohort 1\n'
u'student_2,,Cohort 2'
)
self.assertDictContainsSubset({'total': 2, 'attempted': 2, 'skipped': 2, 'failed': 0}, result)
self.verify_rows_in_csv(
[
dict(zip(self.csv_header_row, ['Cohort 1', 'True', '0', ''])),
dict(zip(self.csv_header_row, ['Cohort 2', 'True', '0', ''])),
],
verify_order=False
)
@ddt.ddt
@patch('instructor_task.tasks_helper.DefaultStorage', new=MockDefaultStorage)
class TestGradeReportEnrollmentAndCertificateInfo(TestReportMixin, InstructorTaskModuleTestCase):
"""
Test that grade report has correct user enrolment, verification, and certificate information.
"""
def setUp(self):
super(TestGradeReportEnrollmentAndCertificateInfo, self).setUp()
self.initialize_course()
self.create_problem()
self.columns_to_check = [
'Enrollment Track',
'Verification Status',
'Certificate Eligible',
'Certificate Delivered',
'Certificate Type'
]
def create_problem(self, problem_display_name='test_problem', parent=None):
"""
Create a multiple choice response problem.
"""
if parent is None:
parent = self.problem_section
factory = MultipleChoiceResponseXMLFactory()
args = {'choices': [False, True, False]}
problem_xml = factory.build_xml(**args)
ItemFactory.create(
parent_location=parent.location,
parent=parent,
category="problem",
display_name=problem_display_name,
data=problem_xml
)
def user_is_embargoed(self, user, is_embargoed):
"""
Set a users emabargo state.
"""
user_profile = UserFactory(username=user.username, email=user.email).profile
user_profile.allow_certificate = not is_embargoed
user_profile.save()
def _verify_csv_data(self, username, expected_data):
"""
Verify grade report data.
"""
with patch('instructor_task.tasks_helper._get_current_task'):
upload_grades_csv(None, None, self.course.id, None, 'graded')
report_store = ReportStore.from_config(config_name='GRADES_DOWNLOAD')
report_csv_filename = report_store.links_for(self.course.id)[0][0]
with open(report_store.path_to(self.course.id, report_csv_filename)) as csv_file:
for row in unicodecsv.DictReader(csv_file):
if row.get('username') == username:
csv_row_data = [row[column] for column in self.columns_to_check]
self.assertEqual(csv_row_data, expected_data)
def _create_user_data(self,
user_enroll_mode,
has_passed,
whitelisted,
is_embargoed,
verification_status,
certificate_status,
certificate_mode):
"""
Create user data to be used during grade report generation.
"""
user = self.create_student('u1', mode=user_enroll_mode)
if has_passed:
self.submit_student_answer('u1', 'test_problem', ['choice_1'])
CertificateWhitelistFactory.create(user=user, course_id=self.course.id, whitelist=whitelisted)
self.user_is_embargoed(user, is_embargoed)
if user_enroll_mode in CourseMode.VERIFIED_MODES:
SoftwareSecurePhotoVerificationFactory.create(user=user, status=verification_status)
GeneratedCertificateFactory.create(
user=user,
course_id=self.course.id,
status=certificate_status,
mode=certificate_mode
)
return user
@ddt.data(
(
'verified', False, False, False, 'approved', 'notpassing', 'honor',
['verified', 'ID Verified', 'N', 'N', 'N/A']
),
(
'verified', False, True, False, 'approved', 'downloadable', 'verified',
['verified', 'ID Verified', 'Y', 'Y', 'verified']
),
(
'honor', True, True, True, 'approved', 'restricted', 'honor',
['honor', 'N/A', 'N', 'N', 'N/A']
),
(
'verified', True, True, False, 'must_retry', 'downloadable', 'honor',
['verified', 'Not ID Verified', 'Y', 'Y', 'honor']
),
)
@ddt.unpack
def test_grade_report_enrollment_and_certificate_info(
self,
user_enroll_mode,
has_passed,
whitelisted,
is_embargoed,
verification_status,
certificate_status,
certificate_mode,
expected_output
):
user = self._create_user_data(
user_enroll_mode,
has_passed,
whitelisted,
is_embargoed,
verification_status,
certificate_status,
certificate_mode
)
self._verify_csv_data(user.username, expected_output)
@override_settings(CERT_QUEUE='test-queue')
class TestCertificateGeneration(InstructorTaskModuleTestCase):
"""
Test certificate generation task works.
"""
def setUp(self):
super(TestCertificateGeneration, self).setUp()
self.initialize_course()
def test_certificate_generation_for_students(self):
"""
Verify that certificates generated for all eligible students enrolled in a course.
"""
# create 10 students
students = [self.create_student(username='student_{}'.format(i), email='student_{}@example.com'.format(i))
for i in xrange(1, 11)]
# mark 2 students to have certificates generated already
for student in students[:2]:
GeneratedCertificateFactory.create(
user=student,
course_id=self.course.id,
status=CertificateStatuses.downloadable,
mode='honor'
)
# white-list 5 students
for student in students[2:7]:
CertificateWhitelistFactory.create(user=student, course_id=self.course.id, whitelist=True)
current_task = Mock()
current_task.update_state = Mock()
with self.assertNumQueries(125):
with patch('instructor_task.tasks_helper._get_current_task') as mock_current_task:
mock_current_task.return_value = current_task
with patch('capa.xqueue_interface.XQueueInterface.send_to_queue') as mock_queue:
mock_queue.return_value = (0, "Successfully queued")
result = generate_students_certificates(None, None, self.course.id, None, 'certificates generated')
self.assertDictContainsSubset(
{
'action_name': 'certificates generated',
'total': 10,
'attempted': 8,
'succeeded': 5,
'failed': 3,
'skipped': 2
},
result
)<|fim▁end|> | [
dict(zip(self.csv_header_row, ['Does Not Exist', 'False', '0', ''])),
dict(zip(self.csv_header_row, ['Cohort 2', 'True', '1', ''])), |
<|file_name|>jft300m_vit_base16_finetune_cifar100.py<|end_file_name|><|fim▁begin|># coding=utf-8
# Copyright 2022 The Uncertainty Baselines Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=line-too-long
r"""ViT-B/16 finetuning on CIFAR.
"""
# pylint: enable=line-too-long
import ml_collections
# TODO(dusenberrymw): Open-source remaining imports.
def get_sweep(hyper):
# Below shows an example for how to sweep hyperparameters.
# lr_grid = [1e-4, 3e-4, 6e-4, 1e-3, 1.3e-3, 1.6e-3, 2e-3]
return hyper.product([
# hyper.sweep('config.lr.base', lr_grid),
])
def get_config():
"""Config for training a patch-transformer on JFT."""
config = ml_collections.ConfigDict()
# Fine-tuning dataset
config.dataset = 'cifar100'
config.val_split = 'train[98%:]'
config.train_split = 'train[:98%]'
config.num_classes = 100
BATCH_SIZE = 512 # pylint: disable=invalid-name
config.batch_size = BATCH_SIZE
config.total_steps = 10_000
INPUT_RES = 384 # pylint: disable=invalid-name
pp_common = '|value_range(-1, 1)'
# pp_common += f'|onehot({config.num_classes})'
# To use ancestor 'smearing', use this line instead:
pp_common += f'|onehot({config.num_classes}, key="label", key_result="labels")' # pylint: disable=line-too-long
pp_common += '|keep(["image", "labels"])'
config.pp_train = f'decode|inception_crop({INPUT_RES})|flip_lr' + pp_common
config.pp_eval = f'decode|resize({INPUT_RES})' + pp_common
# OOD eval
# ood_split is the data split for both the ood_dataset and the dataset.
config.ood_datasets = ['cifar10', 'svhn_cropped']
config.ood_num_classes = [10, 10]
config.ood_split = 'test'
config.ood_methods = ['msp', 'entropy', 'maha', 'rmaha']
pp_eval_ood = []
for num_classes in config.ood_num_classes:
if num_classes > config.num_classes:
# Note that evaluation_fn ignores the entries with all zero labels for
# evaluation. When num_classes > n_cls, we should use onehot{num_classes},
# otherwise the labels that are greater than n_cls will be encoded with
# all zeros and then be ignored.
pp_eval_ood.append(
config.pp_eval.replace(f'onehot({config.num_classes}',
f'onehot({num_classes}'))
else:
pp_eval_ood.append(config.pp_eval)
config.pp_eval_ood = pp_eval_ood
config.shuffle_buffer_size = 50_000 # Per host, so small-ish is ok.
config.log_training_steps = 10
config.log_eval_steps = 100
# NOTE: eval is very fast O(seconds) so it's fine to run it often.
config.checkpoint_steps = 1000
config.checkpoint_timeout = 1
config.prefetch_to_device = 2
config.trial = 0
# Model section
# pre-trained model ckpt file
# !!! The below section should be modified per experiment
config.model_init = '/path/to/pretrained_model_ckpt.npz'
# Model definition to be copied from the pre-training config
config.model = ml_collections.ConfigDict()
config.model.patches = ml_collections.ConfigDict()
config.model.patches.size = [16, 16]
config.model.hidden_size = 768
config.model.transformer = ml_collections.ConfigDict()
config.model.transformer.attention_dropout_rate = 0.
config.model.transformer.dropout_rate = 0.
config.model.transformer.mlp_dim = 3072
config.model.transformer.num_heads = 12
config.model.transformer.num_layers = 12
config.model.classifier = 'token' # Or 'gap'<|fim▁hole|> # This is "no head" fine-tuning, which we use by default
config.model.representation_size = None
# Optimizer section
config.optim_name = 'Momentum'
config.optim = ml_collections.ConfigDict()
config.grad_clip_norm = 1.0
config.weight_decay = None # No explicit weight decay
config.loss = 'softmax_xent' # or 'sigmoid_xent'
config.lr = ml_collections.ConfigDict()
config.lr.base = 0.002
config.lr.warmup_steps = 500
config.lr.decay_type = 'cosine'
return config<|fim▁end|> | |
<|file_name|>archives.module.ts<|end_file_name|><|fim▁begin|>import { NgModule } from '@angular/core';
import { SharedModule } from '../shared/shared.module';
import { ArchivesRoutingModule } from './archives-routing.module';
import { ArchiveDetailResolve } from './archive-detail/archive-detail-resolve.service';
import { ArchiveDetailComponent } from './archive-detail/archive-detail.component';
import { ArchivesComponent } from './archives.component';
import { ArchiveService } from './shared/archive.service';<|fim▁hole|>import { ArchiveListComponent } from './archive-list/archive-list.component';
@NgModule({
imports: [
SharedModule,
ArchivesRoutingModule
],
declarations: [
ArchivesComponent,
ArchiveComponent,
ArchiveDetailComponent,
ArchiveComponent,
ArchiveListComponent
],
providers: [
ArchiveService,
ArchiveDetailResolve
]
})
export class ArchivesModule { }<|fim▁end|> | import { ArchiveComponent } from './archive/archive.component'; |
<|file_name|>backbonemixin.js<|end_file_name|><|fim▁begin|>/* Taken from a very informative blogpost by Eldar Djafarov:
* http://eldar.djafarov.com/2013/11/reactjs-mixing-with-backbone/
*/
(function() {
'use strict';
module.exports = {
/* Forces an update when the underlying Backbone model instance has
* changed. Users will have to implement getBackboneModels().
* Also requires that React is loaded with addons.
*/
__syncedModels: [],
componentDidMount: function() {
// Whenever there may be a change in the Backbone data, trigger a reconcile.
this.getBackboneModels().forEach(this.injectModel, this);
},
componentWillUnmount: function() {
// Ensure that we clean up any dangling references when the component is
// destroyed.
this.__syncedModels.forEach(function(model) {
model.off(null, model.__updater, this);
}, this);
},
injectModel: function(model){
if(!~this.__syncedModels.indexOf(model)){
var updater = function() {
try {
this.forceUpdate();
} catch(e) {
// This means the component is already being updated somewhere
// else, so we just silently go on with our business.
// This is most likely due to some AJAX callback that already
// updated the model at the same time or slightly earlier.
}
}.bind(this, null);
model.__updater = updater;
model.on('add change remove', updater, this);
}
},
bindTo: function(model, key){
/* Allows for two-way databinding for Backbone models.
* Use by passing it as a 'valueLink' property, e.g.:
* valueLink={this.bindTo(model, attribute)} */
return {
value: model.get(key),
requestChange: function(value){
model.set(key, value);
}.bind(this)<|fim▁hole|> }
};
})();<|fim▁end|> | }; |
<|file_name|>FastBloomFilter.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
# -*- coding: utf-8 -*-
from FastBitSet import FastBitSet
import math
import mmh3
class FastBloomFilter(object):
mask32 = 0xffffffff
mask64 = 0xffffffffffffffff
mask128 = 0xffffffffffffffffffffffffffffffff
seeds = [2, 3, 5, 7, 11,
13, 17, 19, 23, 29,
31, 37, 41, 43, 47,
53, 59, 61, 67, 71,
73, 79, 83, 89, 97,
101, 103, 107, 109, 113,
127, 131, 137, 139, 149,
151, 157, 163, 167, 173,
179, 181, 191, 193, 197,
199, 211, 223, 227, 229,
233, 239, 241, 251, 257,
263, 269, 271, 277, 281,
283, 293, 307, 311, 313,
317, 331, 337, 347, 349,
353, 359, 367, 373, 379,
383, 389, 397, 401, 409,
419, 421, 431, 433, 439,
443, 449, 457, 461, 463,
467, 479, 487, 491, 499,
503, 509, 521, 523, 541,
547, 557, 563, 569, 571,
577, 587, 593, 599, 601,
607, 613, 617, 619, 631,
641, 643, 647, 653, 659,
661, 673, 677, 683, 691]
def __init__(self, n, fpr=0.00001):
m = -1 * math.log(fpr, math.e) * n / math.pow(math.log(2, math.e), 2)
k = (m / n) * math.log(2, math.e)
<|fim▁hole|> self.m = int(math.ceil(m))
self.k = int(k)
self.bsUnitSize = 64
self.bsCap = int(math.ceil(self.m / 64))
self.bitSet = FastBitSet(self.bsCap, self.bsUnitSize)
self.bitSetLength = self.bitSet.length
def append(self, s):
self.bitSet.setList(self.hashs(s, self.k))
def exists(self, s):
bites = self.bitSet.getList(self.hashs(s, self.k))
return not (0 in bites)
def remove(self, s):
self.bitSet.setList(self.hashs(s, self.k), False)
def clear(self):
self.bitSet.clear()
def hashs(self, s, k):
bitSetLength = self.bitSetLength
#mask = self.mask32
mask = self.mask128
seeds = self.seeds
hashs = []
for i in range(k):
#print(mmh3.hash64(s, seeds[i]))
#hashs.append((mmh3.hash(s, seeds[i]) & mask) % bitSetLength)
hashs.append((mmh3.hash128(s, seeds[i]) & mask) % bitSetLength)
return hashs
def hashs2(self, s, k):
bitSetLength = self.bitSetLength
mask = self.mask32
hashs = []
hash1 = mmh3.hash64(s, 0)
hash2 = mmh3.hash64(s, hash1)
for i in k:
hashs.append(((hash1 + i * hash2) % bitSetLength) & mask)
return hashs<|fim▁end|> | self.n = int(math.ceil(n))
self.fpr = fpr
|
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
'''
watdo.tests
~~~~~~~~~~~
This module contains tests for watdo.<|fim▁hole|> :license: MIT, see LICENSE for more details.
'''<|fim▁end|> | This particular file contains tools for testing.
:copyright: (c) 2013 Markus Unterwaditzer |
<|file_name|>unregister.js<|end_file_name|><|fim▁begin|>var expect = require('expect.js');
var helpers = require('../helpers');
var fakeRepositoryFactory = function () {
function FakeRepository() { }
FakeRepository.prototype.getRegistryClient = function () {
return {
unregister: function (name, cb) {
cb(null, { name: name });
}
};
};
return FakeRepository;
};
var unregister = helpers.command('unregister');
var unregisterFactory = function () {
return helpers.command('unregister', {
'../core/PackageRepository': fakeRepositoryFactory()
});
};
describe('bower unregister', function () {
it('correctly reads arguments', function () {
expect(unregister.readOptions(['jquery']))
.to.eql(['jquery']);
});
it('errors if name is not provided', function () {<|fim▁hole|> expect(reason.message).to.be('Usage: bower unregister <name> <url>');
expect(reason.code).to.be('EINVFORMAT');
});
});
it('should call registry client with name', function () {
var unregister = unregisterFactory();
return helpers.run(unregister, ['some-name'])
.spread(function (result) {
expect(result).to.eql({
// Result from register action on stub
name: 'some-name'
});
});
});
it('should confirm in interactive mode', function () {
var register = unregisterFactory();
var promise = helpers.run(register,
['some-name', {
interactive: true,
registry: { register: 'http://localhost' }
}]
);
return helpers.expectEvent(promise.logger, 'confirm')
.spread(function (e) {
expect(e.type).to.be('confirm');
expect(e.message).to.be('You are about to remove component "some-name" from the bower registry (http://localhost). It is generally considered bad behavior to remove versions of a library that others are depending on. Are you really sure?');
expect(e.default).to.be(false);
});
});
it('should skip confirming when forcing', function () {
var register = unregisterFactory();
return helpers.run(register,
['some-name',
{ interactive: true, force: true }
]
);
});
});<|fim▁end|> | return helpers.run(unregister).fail(function (reason) { |
<|file_name|>putProducer.go<|end_file_name|><|fim▁begin|>package helpers
import (
"fmt"
"github.com/SpectraLogic/ds3_go_sdk/ds3"
ds3Models "github.com/SpectraLogic/ds3_go_sdk/ds3/models"
helperModels "github.com/SpectraLogic/ds3_go_sdk/helpers/models"
"github.com/SpectraLogic/ds3_go_sdk/sdk_log"
"sync"
"time"
)
type putProducer struct {
JobMasterObjectList *ds3Models.MasterObjectList //MOL from put bulk job creation
WriteObjects *[]helperModels.PutObject
queue *chan TransferOperation
strategy *WriteTransferStrategy
client *ds3.Client
waitGroup *sync.WaitGroup
writeObjectMap map[string]helperModels.PutObject
processedBlobTracker blobTracker
deferredBlobQueue BlobDescriptionQueue // queue of blobs whose channels are not yet ready for transfer
sdk_log.Logger
// Conditional value that gets triggered when a blob has finished being transferred
doneNotifier NotifyBlobDone
}
func newPutProducer(
jobMasterObjectList *ds3Models.MasterObjectList,
putObjects *[]helperModels.PutObject,
queue *chan TransferOperation,
strategy *WriteTransferStrategy,
client *ds3.Client,
waitGroup *sync.WaitGroup,
doneNotifier NotifyBlobDone) *putProducer {
return &putProducer{
JobMasterObjectList: jobMasterObjectList,
WriteObjects: putObjects,
queue: queue,
strategy: strategy,
client: client,
waitGroup: waitGroup,
writeObjectMap: toWriteObjectMap(putObjects),
deferredBlobQueue: NewBlobDescriptionQueue(),
processedBlobTracker: newProcessedBlobTracker(),
Logger: client.Logger, // use the same logger as the client
doneNotifier: doneNotifier,
}
}
// Creates a map of object name to PutObject struct
func toWriteObjectMap(putObjects *[]helperModels.PutObject) map[string]helperModels.PutObject {
objectMap := make(map[string]helperModels.PutObject)
if putObjects == nil {
return objectMap
}
for _, obj := range *putObjects {
objectMap[obj.PutObject.Name] = obj
}
return objectMap
}
// Information required to perform a put operation of a blob using the source channelBuilder to BP destination
type putObjectInfo struct {
blob helperModels.BlobDescription
channelBuilder helperModels.ReadChannelBuilder
bucketName string
jobId string
}
// Creates the transfer operation that will perform the data upload of the specified blob to BP
func (producer *putProducer) transferOperationBuilder(info putObjectInfo) TransferOperation {
return func() {
// has this file fatally errored while transferring a different blob?
if info.channelBuilder.HasFatalError() {
// skip performing this blob transfer
producer.Warningf("fatal error occurred previously on this file, skipping sending blob name='%s' offset=%d length=%d", info.blob.Name(), info.blob.Offset(), info.blob.Length())
return
}
reader, err := info.channelBuilder.GetChannel(info.blob.Offset())
if err != nil {
producer.strategy.Listeners.Errored(info.blob.Name(), err)
info.channelBuilder.SetFatalError(err)
producer.Errorf("could not get reader for object with name='%s' offset=%d length=%d: %v", info.blob.Name(), info.blob.Offset(), info.blob.Length(), err)
return
}
defer info.channelBuilder.OnDone(reader)
sizedReader := NewIoReaderWithSizeDecorator(reader, info.blob.Length())
putObjRequest := ds3Models.NewPutObjectRequest(info.bucketName, info.blob.Name(), sizedReader).
WithJob(info.jobId).
WithOffset(info.blob.Offset())
producer.maybeAddMetadata(info, putObjRequest)
_, err = producer.client.PutObject(putObjRequest)
if err != nil {
producer.strategy.Listeners.Errored(info.blob.Name(), err)
info.channelBuilder.SetFatalError(err)
producer.Errorf("problem during transfer of %s: %s", info.blob.Name(), err.Error())
}
}
}
func (producer *putProducer) maybeAddMetadata(info putObjectInfo, putObjRequest *ds3Models.PutObjectRequest) {
metadataMap := producer.metadataFrom(info)
if len(metadataMap) == 0 {
return
}
for key, value := range metadataMap {
putObjRequest.WithMetaData(key, value)
}
}
func (producer *putProducer) metadataFrom(info putObjectInfo) map[string]string {
result := map[string]string{}
for _, objectToPut := range *producer.WriteObjects {
if objectToPut.PutObject.Name == info.blob.Name() {
result = objectToPut.Metadata
break
}
}
return result
}
// Processes all the blobs in a chunk and attempts to add them to the transfer queue.
// If a blob is not ready for transfer, then it is added to the waiting to be transferred queue.
// Returns the number of blobs added to queue.
func (producer *putProducer) processChunk(curChunk *ds3Models.Objects, bucketName string, jobId string) (int, error) {
processedCount := 0
producer.Debugf("begin chunk processing %s", curChunk.ChunkId)
// transfer blobs that are ready, and queue those that are waiting for channel
for _, curObj := range curChunk.Objects {
producer.Debugf("queuing object in waiting to be processed %s offset=%d length=%d", *curObj.Name, curObj.Offset, curObj.Length)
blob := helperModels.NewBlobDescription(*curObj.Name, curObj.Offset, curObj.Length)
blobQueued, err := producer.queueBlobForTransfer(&blob, bucketName, jobId)
if err != nil {
return 0, err
}
if blobQueued {
processedCount++
}
}
return processedCount, nil
}
// Iterates through blobs that are waiting to be transferred and attempts to transfer.
// If successful, blob is removed from queue. Else, it is re-queued.
// Returns the number of blobs added to queue.
func (producer *putProducer) processWaitingBlobs(bucketName string, jobId string) (int, error) {
processedCount := 0
// attempt to process all blobs in waiting to be transferred
waitingBlobs := producer.deferredBlobQueue.Size()
for i := 0; i < waitingBlobs; i++ {
//attempt transfer
curBlob, err := producer.deferredBlobQueue.Pop()
if err != nil {
//should not be possible to get here
producer.Errorf("problem when getting next blob to be transferred: %s", err.Error())
break
}
producer.Debugf("attempting to process %s offset=%d length=%d", curBlob.Name(), curBlob.Offset(), curBlob.Length())
blobQueued, err := producer.queueBlobForTransfer(curBlob, bucketName, jobId)
if err != nil {
return 0, err
}
if blobQueued {
processedCount++
}
}
return processedCount, nil
}
// Attempts to transfer a single blob. If the blob is not ready for transfer,
// it is added to the waiting to transfer queue.
// Returns whether or not the blob was queued for transfer.
func (producer *putProducer) queueBlobForTransfer(blob *helperModels.BlobDescription, bucketName string, jobId string) (bool, error) {
if producer.processedBlobTracker.IsProcessed(*blob) {
return false, nil // this was already processed
}
curWriteObj, ok := producer.writeObjectMap[blob.Name()]
if !ok {
err := fmt.Errorf("failed to find object associated with blob in object map: %s offset=%d length=%d", blob.Name(), blob.Offset(), blob.Length())
producer.Errorf("unrecoverable error: %v", err)
producer.processedBlobTracker.MarkProcessed(*blob)
return false, err // fatal error occurred
}
if curWriteObj.ChannelBuilder == nil {
err := fmt.Errorf("failed to transfer object, it does not have a channel builder: %s", curWriteObj.PutObject.Name)
producer.Errorf("unrecoverable error: %v", err)
producer.processedBlobTracker.MarkProcessed(*blob)
return false, err // fatal error occurred
}
if curWriteObj.ChannelBuilder.HasFatalError() {
// a fatal error happened on a previous blob for this file, skip processing
producer.Warningf("fatal error occurred while transferring previous blob on this file, skipping blob %s offset=%d length=%d", blob.Name(), blob.Offset(), blob.Length())
producer.processedBlobTracker.MarkProcessed(*blob)
return false, nil // not actually transferring this blob
}
if !curWriteObj.ChannelBuilder.IsChannelAvailable(blob.Offset()) {
producer.Debugf("channel is not currently available for blob %s offset=%d length=%d", blob.Name(), blob.Offset(), blob.Length())
// Not ready to be transferred
producer.deferredBlobQueue.Push(blob)
return false, nil // not ready to be sent
}
producer.Debugf("channel is available for blob %s offset=%d length=%d", curWriteObj.PutObject.Name, blob.Offset(), blob.Length())
// Blob ready to be transferred
// Create transfer operation
objInfo := putObjectInfo{
blob: *blob,
channelBuilder: curWriteObj.ChannelBuilder,
bucketName: bucketName,
jobId: jobId,
}
transfer := producer.transferOperationBuilder(objInfo)
// Increment wait group, and enqueue transfer operation
producer.waitGroup.Add(1)
*producer.queue <- transfer
// Mark blob as processed
producer.processedBlobTracker.MarkProcessed(*blob)
return true, nil
}
// This initiates the production of the transfer operations which will be consumed by a consumer running in a separate go routine.
// Each transfer operation will put one blob of content to the BP.
// Once all blobs have been queued to be transferred, the producer will finish, even if all operations have not been consumed yet.
func (producer *putProducer) run() error {<|fim▁hole|> producer.Debugf("job status totalBlobs=%d processedBlobs=%d", totalBlobCount, producer.processedBlobTracker.NumberOfProcessedBlobs())
// process all chunks and make sure all blobs are queued for transfer
for producer.hasMoreToProcess(totalBlobCount) {
processedCount, err := producer.queueBlobsReadyForTransfer(totalBlobCount)
if err != nil {
return err
}
// If the last operation processed blobs, then wait for something to finish
if processedCount > 0 {
// wait for a done signal to be received
producer.doneNotifier.Wait()
} else if producer.hasMoreToProcess(totalBlobCount) {
// nothing could be processed, cache is probably full, wait a bit before trying again
time.Sleep(producer.strategy.BlobStrategy.delay())
}
}
return nil
}
func (producer *putProducer) hasMoreToProcess(totalBlobCount int64) bool {
return producer.processedBlobTracker.NumberOfProcessedBlobs() < totalBlobCount || producer.deferredBlobQueue.Size() > 0
}
// Returns the number of items queued for work.
func (producer *putProducer) queueBlobsReadyForTransfer(totalBlobCount int64) (int, error) {
// Attempt to transfer waiting blobs
processedCount, err := producer.processWaitingBlobs(*producer.JobMasterObjectList.BucketName, producer.JobMasterObjectList.JobId)
if err != nil {
return 0, err
}
// Check if we need to query the BP for allocated blobs, or if we already know everything is allocated.
if int64(producer.deferredBlobQueue.Size()) + producer.processedBlobTracker.NumberOfProcessedBlobs() >= totalBlobCount {
// Everything is already allocated, no need to query BP for allocated chunks
return processedCount, nil
}
// Get the list of available chunks that the server can receive. The server may
// not be able to receive everything, so not all chunks will necessarily be
// returned
chunksReady := ds3Models.NewGetJobChunksReadyForClientProcessingSpectraS3Request(producer.JobMasterObjectList.JobId)
chunksReadyResponse, err := producer.client.GetJobChunksReadyForClientProcessingSpectraS3(chunksReady)
if err != nil {
producer.Errorf("unrecoverable error: %v", err)
return processedCount, err
}
// Check to see if any chunks can be processed
numberOfChunks := len(chunksReadyResponse.MasterObjectList.Objects)
if numberOfChunks > 0 {
// Loop through all the chunks that are available for processing, and send
// the files that are contained within them.
for _, curChunk := range chunksReadyResponse.MasterObjectList.Objects {
justProcessedCount, err := producer.processChunk(&curChunk, *chunksReadyResponse.MasterObjectList.BucketName, chunksReadyResponse.MasterObjectList.JobId)
if err != nil {
return 0, err
}
processedCount += justProcessedCount
}
}
return processedCount, nil
}
// Determines the number of blobs to be transferred.
func (producer *putProducer) totalBlobCount() int64 {
if producer.JobMasterObjectList.Objects == nil || len(producer.JobMasterObjectList.Objects) == 0 {
return 0
}
var count int64 = 0
for _, chunk := range producer.JobMasterObjectList.Objects {
for range chunk.Objects {
count++
}
}
return count
}<|fim▁end|> | defer close(*producer.queue)
// determine number of blobs to be processed
totalBlobCount := producer.totalBlobCount() |
<|file_name|>recipeReport.js<|end_file_name|><|fim▁begin|>var RecipeReport = (function () {
"use strict";
var standartThaaliCount = 100;
var sum = function(array, prop) {
return array.reduce( function(a, b){
return a + b[prop];
}, 0);
};
var recipeReport = function (data, fromDate, toDate, recipe) {
this.data = data;
this.fromDate = fromDate;<|fim▁hole|> };
recipeReport.prototype.run = function() {
var d = {};
d.dateWiseReport = this.getDateWiseReport();
d.ingredientWiseAverage = this.getIngredientWiseAverage();
return d;
};
recipeReport.prototype.matchCriteria = function(meal) {
return meal.recipe === this.recipe
&& meal.date<=this.toDate
&& meal.date>=this.fromDate;
};
recipeReport.prototype.getDateWiseReport = function() {
var report = [];
for (var i = this.data.length - 1; i >= 0; i--) {
var meal = this.data[i];
if(this.matchCriteria(meal)) {
var cost = sum(meal.ingredients, 'amount');
report.push({
date: meal.date,
noOfThaalis: meal.noOfThaalis,
//total cost of recipe made on this date
perThaaliCost: (cost/meal.noOfThaalis) * standartThaaliCount,
totalCost: cost
});
}
}
return report;
};
recipeReport.prototype.getIngredientWiseAverage = function() {
var report = {};
var faulty = {};
var isFaulty = false;
for (var i = this.data.length - 1; i >= 0; i--) {
var meal = this.data[i];
if(this.matchCriteria(meal)) {
for (var k = meal.ingredients.length - 1; k >= 0; k--) {
var ingredient = meal.ingredients[k];
var name = ingredient.item.toLowerCase();
report[name] = report[name] || {};
report[name].qty = report[name].qty || 0;
report[name].qty+= ingredient.qty;
if((report[name].unit !== undefined && report[name].unit !== ingredient.unit)) {
fault[ingredient.item.toLowerCase() + '-' + meal.date] = fault[ingredient.item.toLowerCase() + '-' + meal.date] || 0;
fault[ingredient.item.toLowerCase() + '-' + meal.date] += 1;
isFaulty = true;
}
report[name].unit = ingredient.unit;
report[name].amount = report[name].amount || 0;
report[name].amount += ingredient.amount;
report[name].count = report[name].count || 0;
report[name].count++;
}
}
}
var finalReport = [];
for (var prop in report) {
finalReport.push({
"IngredientName": prop,
"Quantity": report[prop].qty/report[prop].count,
"Unit": report[prop].unit,
"PerUnitCost": report[prop].amount/report[prop].qty,
"Amount": report[prop].amount/report[prop].count
});
}
if(isFaulty) {
console.log(faulty);
//TODO add details for faulty items
alert('Different units specified for same ingredient on saperate days. Invalid data. Please see what\'s wrong. Or contact maker of this application');
}
return finalReport;
};
return recipeReport;
})();<|fim▁end|> | this.toDate = toDate;
this.recipe = recipe; |
<|file_name|>resource_aws_db_proxy_target.go<|end_file_name|><|fim▁begin|>package aws
import (
"fmt"
"log"
"strings"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/rds"
"github.com/hashicorp/aws-sdk-go-base/tfawserr"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"github.com/terraform-providers/terraform-provider-aws/aws/internal/service/rds/finder"
)
func resourceAwsDbProxyTarget() *schema.Resource {
return &schema.Resource{
Create: resourceAwsDbProxyTargetCreate,
Read: resourceAwsDbProxyTargetRead,
Delete: resourceAwsDbProxyTargetDelete,
Importer: &schema.ResourceImporter{
State: schema.ImportStatePassthrough,
},
Schema: map[string]*schema.Schema{
"db_proxy_name": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
ValidateFunc: validateRdsIdentifier,
},
"target_group_name": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
ValidateFunc: validateRdsIdentifier,
},
"db_instance_identifier": {
Type: schema.TypeString,
Optional: true,
ForceNew: true,
ExactlyOneOf: []string{
"db_instance_identifier",
"db_cluster_identifier",
},
ValidateFunc: validateRdsIdentifier,
},
"db_cluster_identifier": {
Type: schema.TypeString,
Optional: true,
ForceNew: true,
ExactlyOneOf: []string{
"db_instance_identifier",
"db_cluster_identifier",
},
ValidateFunc: validateRdsIdentifier,
},
"endpoint": {
Type: schema.TypeString,
Computed: true,
},
"port": {
Type: schema.TypeInt,
Computed: true,
},
"rds_resource_id": {
Type: schema.TypeString,
Computed: true,
},
"target_arn": {
Type: schema.TypeString,
Computed: true,
},
"tracked_cluster_id": {
Type: schema.TypeString,
Computed: true,
},
"type": {
Type: schema.TypeString,
Computed: true,
},
},
}
}
func resourceAwsDbProxyTargetCreate(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).rdsconn
dbProxyName := d.Get("db_proxy_name").(string)
targetGroupName := d.Get("target_group_name").(string)
params := rds.RegisterDBProxyTargetsInput{
DBProxyName: aws.String(dbProxyName),
TargetGroupName: aws.String(targetGroupName),
}
if v, ok := d.GetOk("db_instance_identifier"); ok {
params.DBInstanceIdentifiers = []*string{aws.String(v.(string))}
}
if v, ok := d.GetOk("db_cluster_identifier"); ok {
params.DBClusterIdentifiers = []*string{aws.String(v.(string))}
}
resp, err := conn.RegisterDBProxyTargets(¶ms)
if err != nil {
return fmt.Errorf("error registering RDS DB Proxy (%s/%s) Target: %w", dbProxyName, targetGroupName, err)
}
dbProxyTarget := resp.DBProxyTargets[0]
d.SetId(strings.Join([]string{dbProxyName, targetGroupName, aws.StringValue(dbProxyTarget.Type), aws.StringValue(dbProxyTarget.RdsResourceId)}, "/"))
return resourceAwsDbProxyTargetRead(d, meta)
}
func resourceAwsDbProxyTargetParseID(id string) (string, string, string, string, error) {
idParts := strings.SplitN(id, "/", 4)
if len(idParts) != 4 || idParts[0] == "" || idParts[1] == "" || idParts[2] == "" || idParts[3] == "" {
return "", "", "", "", fmt.Errorf("unexpected format of ID (%s), expected db_proxy_name/target_group_name/type/id", id)
}
return idParts[0], idParts[1], idParts[2], idParts[3], nil
}
func resourceAwsDbProxyTargetRead(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).rdsconn
dbProxyName, targetGroupName, targetType, rdsResourceId, err := resourceAwsDbProxyTargetParseID(d.Id())
if err != nil {
return err
}
dbProxyTarget, err := finder.DBProxyTarget(conn, dbProxyName, targetGroupName, targetType, rdsResourceId)
if tfawserr.ErrCodeEquals(err, rds.ErrCodeDBProxyNotFoundFault) {
log.Printf("[WARN] RDS DB Proxy Target (%s) not found, removing from state", d.Id())
d.SetId("")
return nil
}
if tfawserr.ErrCodeEquals(err, rds.ErrCodeDBProxyTargetGroupNotFoundFault) {
log.Printf("[WARN] RDS DB Proxy Target (%s) not found, removing from state", d.Id())
d.SetId("")
return nil
}
if err != nil {
return fmt.Errorf("error reading RDS DB Proxy Target (%s): %w", d.Id(), err)
}
if dbProxyTarget == nil {
log.Printf("[WARN] RDS DB Proxy Target (%s) not found, removing from state", d.Id())
d.SetId("")
return nil
}
d.Set("db_proxy_name", dbProxyName)
d.Set("endpoint", dbProxyTarget.Endpoint)
d.Set("port", dbProxyTarget.Port)
d.Set("rds_resource_id", dbProxyTarget.RdsResourceId)
d.Set("target_arn", dbProxyTarget.TargetArn)<|fim▁hole|>
if aws.StringValue(dbProxyTarget.Type) == rds.TargetTypeRdsInstance {
d.Set("db_instance_identifier", dbProxyTarget.RdsResourceId)
} else {
d.Set("db_cluster_identifier", dbProxyTarget.RdsResourceId)
}
return nil
}
func resourceAwsDbProxyTargetDelete(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).rdsconn
params := rds.DeregisterDBProxyTargetsInput{
DBProxyName: aws.String(d.Get("db_proxy_name").(string)),
TargetGroupName: aws.String(d.Get("target_group_name").(string)),
}
if v, ok := d.GetOk("db_instance_identifier"); ok {
params.DBInstanceIdentifiers = []*string{aws.String(v.(string))}
}
if v, ok := d.GetOk("db_cluster_identifier"); ok {
params.DBClusterIdentifiers = []*string{aws.String(v.(string))}
}
log.Printf("[DEBUG] Deregister DB Proxy target: %#v", params)
_, err := conn.DeregisterDBProxyTargets(¶ms)
if tfawserr.ErrCodeEquals(err, rds.ErrCodeDBProxyNotFoundFault) {
return nil
}
if tfawserr.ErrCodeEquals(err, rds.ErrCodeDBProxyTargetGroupNotFoundFault) {
return nil
}
if tfawserr.ErrCodeEquals(err, rds.ErrCodeDBProxyTargetNotFoundFault) {
return nil
}
if err != nil {
return fmt.Errorf("Error deregistering DB Proxy target: %s", err)
}
return nil
}<|fim▁end|> | d.Set("target_group_name", targetGroupName)
d.Set("tracked_cluster_id", dbProxyTarget.TrackedClusterId)
d.Set("type", dbProxyTarget.Type) |
<|file_name|>tracklist.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# -*- coding: utf-8 -*-<|fim▁hole|>from __future__ import unicode_literals
from lxml import html
from tinydb import TinyDB, Query
import requests
from . import get_logger, CONFIG
class TracklistManager(object):
db = TinyDB(CONFIG["technical"]["tracklist-db"], indent=2, separators=(',', ': '))
get_logger().info("Starting tracklist manager with database at %s", CONFIG["technical"]["tracklist-db"])
@classmethod
def get_tracklist(cls, pid):
result = cls.db.get(Query().pid == pid)
if not result:
get_logger().debug("Getting tracklist for: %s", pid)
tracklist = Tracklist(pid).listing
cls.db.insert({"pid": pid, "tracklist": tracklist})
else:
tracklist = result["tracklist"]
return tracklist
class Tracklist(object):
def __init__(self, pid):
"""
See also https://github.com/StevenMaude/bbc_radio_tracklisting_downloader.
:param pid: the unique pid of the episode
"""
self.pid = pid
self.listing = []
url = "http://www.bbc.co.uk/programmes/{}/segments.inc".format(self.pid)
page = requests.get(url)
tree = html.fromstring(page.text)
for track in tree.xpath('//div[@class="segment__track"]'):
try:
artist_names = track.xpath('.//span[@property="byArtist"]//span[@class="artist"]/text()')
except ValueError:
artist_names = []
artist = ', '.join(artist_names)
try:
title, = track.xpath('.//p/span[@property="name"]/text()')
except ValueError:
title = ''
self.listing.append([artist, title])
def __repr__(self):
return "Tracklist[pid={self.pid}, len={amount}]".format(amount=len(self.listing), **locals())<|fim▁end|> | |
<|file_name|>get_defs.py<|end_file_name|><|fim▁begin|>import json
import sys<|fim▁hole|>for e in data.itervalues():
if e['senses'] and e['senses'][0]['definition']:
print u"{0}\t{1}".format(
e['hw'], e['senses'][0]['definition']['sen']).encode('utf-8')<|fim▁end|> |
data = json.load(sys.stdin) |
<|file_name|>views.py<|end_file_name|><|fim▁begin|>from django.contrib.auth import logout as auth_logout
from django.contrib.auth.decorators import login_required
from django.http import *
from django.template import Template, Context
from django.shortcuts import render_to_response, redirect, render, RequestContext, HttpResponseRedirect
def login(request):
return render(request, 'login.html')
@login_required
def home(request):
u = request.user
return render_to_response("home.html", locals(), context_instance=RequestContext(request))
<|fim▁hole|><|fim▁end|> | def logout(request):
auth_logout(request)
return redirect('/') |
<|file_name|>PetRepositoryStubTest.java<|end_file_name|><|fim▁begin|>package com.softserve.app.repository;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.mockito.Mockito.when;
import java.util.ArrayList;
import java.util.List;<|fim▁hole|>import org.junit.Test;
import org.junit.runner.RunWith;
import org.mockito.Mock;
import org.mockito.runners.MockitoJUnitRunner;
import com.softserve.app.model.Pet;
@RunWith(MockitoJUnitRunner.class)
public class PetRepositoryStubTest {
@Mock PetRepository stubM;
@Mock Pet petM;
@Test
public void testFindAllPetsWithMockStub() {
List<Pet> pets = stubM.findAllPets();
assertNotNull(pets);
}
@Test
public void testFindAllPetsWithRealStub() {
PetRepositoryStub stub = new PetRepositoryStub();
List<Pet> pets = (ArrayList<Pet>) stub.findAllPets();
assertNotNull(pets);
}
@Test
public void testFindPetWithRealStub() {
PetRepositoryStub stub = new PetRepositoryStub();
Pet pet = stub.findPet("101");
System.out.println(pet);
assertNotNull(pet);
}
@Test
public void testFindPetWithMockStub() {
when(stubM.findPet("10").getType()).thenReturn("dog");
assertEquals("dog", stubM.findPet("0").getType());
}
@Test(expected=RuntimeException.class)
public void testFindPetWithBadRequestWithRealStub() {
PetRepositoryStub stub = new PetRepositoryStub();
Pet pet = stub.findPet("0");
}
@Test
public void testCreate() {
PetRepositoryStub stub = new PetRepositoryStub();
Pet pet5 = new Pet();
pet5.setId("5");
pet5.setType("Dog");
System.out.println(pet5.getType());
stub.create(pet5);
System.out.println(stub.findPet("5").getType());
assertNotNull(stub.findPet("5").getType());
}
}<|fim▁end|> | |
<|file_name|>signed_url_builder.rs<|end_file_name|><|fim▁begin|>use crate::blob::generate_blob_uri;
use azure_sdk_core::{
BlobNameRequired, BlobNameSupport, ContainerNameRequired, ContainerNameSupport, No, ToAssign,
Yes,
};
use azure_sdk_storage_core::prelude::*;
use azure_sdk_storage_core::{
shared_access_signature::SharedAccessSignature, SharedAccessSignatureRequired,
SharedAccessSignatureSupport,
};
use std::marker::PhantomData;
#[derive(Debug, Clone)]
pub struct SignedUrlBuilder<'a, C, ContainerNameSet, BlobNameSet, SignatureSet>
where
C: Client,
ContainerNameSet: ToAssign,
BlobNameSet: ToAssign,
{
client: &'a C,
p_container_name: PhantomData<ContainerNameSet>,
p_blob_name: PhantomData<BlobNameSet>,
p_signature: PhantomData<SignatureSet>,
container_name: Option<&'a str>,
blob_name: Option<&'a str>,
signature: Option<&'a SharedAccessSignature>,
}
impl<'a, C> SignedUrlBuilder<'a, C, No, No, No>
where
C: Client,
{
pub fn new(client: &'a C) -> Self {
Self {
client,
p_container_name: PhantomData {},
container_name: None,
p_blob_name: PhantomData {},
blob_name: None,
p_signature: PhantomData {},
signature: None,
}
}
}
impl<'a, C, ContainerNameSet, BlobNameSet, SignatureSet> ClientRequired<'a, C>
for SignedUrlBuilder<'a, C, ContainerNameSet, BlobNameSet, SignatureSet>
where
C: Client,
ContainerNameSet: ToAssign,
BlobNameSet: ToAssign,
SignatureSet: ToAssign,
{
#[inline]
fn client(&self) -> &'a C {
&self.client
}
}
impl<'a, C, BlobNameSet, SignatureSet> ContainerNameRequired<'a>
for SignedUrlBuilder<'a, C, Yes, BlobNameSet, SignatureSet>
where
C: Client,
BlobNameSet: ToAssign,
SignatureSet: ToAssign,
{
#[inline]
fn container_name(&self) -> &'a str {
self.container_name.unwrap()
}
}
impl<'a, C, ContainerNameSet, SignatureSet> BlobNameRequired<'a>
for SignedUrlBuilder<'a, C, ContainerNameSet, Yes, SignatureSet>
where
C: Client,
ContainerNameSet: ToAssign,
SignatureSet: ToAssign,
{
#[inline]
fn blob_name(&self) -> &'a str {
self.blob_name.unwrap()
}
}
impl<'a, C, ContainerNameSet, BlobNameSet> SharedAccessSignatureRequired<'a>
for SignedUrlBuilder<'a, C, ContainerNameSet, BlobNameSet, Yes>
where
C: Client,
ContainerNameSet: ToAssign,
BlobNameSet: ToAssign,
{
#[inline]
fn shared_access_signature(&self) -> &'a SharedAccessSignature {
self.signature.unwrap()
}
}
impl<'a, C, ContainerNameSet, BlobNameSet, SignatureSet> ContainerNameSupport<'a>
for SignedUrlBuilder<'a, C, ContainerNameSet, BlobNameSet, SignatureSet>
where
C: Client,
ContainerNameSet: ToAssign,
BlobNameSet: ToAssign,
SignatureSet: ToAssign,
{
type O = SignedUrlBuilder<'a, C, Yes, BlobNameSet, SignatureSet>;
#[inline]
fn with_container_name(self, container_name: &'a str) -> Self::O {
SignedUrlBuilder {
client: self.client,
p_container_name: PhantomData {},
p_blob_name: PhantomData {},
p_signature: PhantomData {},
container_name: Some(container_name),
blob_name: self.blob_name,
signature: self.signature,
}
}
}
impl<'a, C, ContainerNameSet, BlobNameSet, SignatureSet> BlobNameSupport<'a>
for SignedUrlBuilder<'a, C, ContainerNameSet, BlobNameSet, SignatureSet>
where
C: Client,
ContainerNameSet: ToAssign,
BlobNameSet: ToAssign,
SignatureSet: ToAssign,
{
type O = SignedUrlBuilder<'a, C, ContainerNameSet, Yes, SignatureSet>;
#[inline]
fn with_blob_name(self, blob_name: &'a str) -> Self::O {
SignedUrlBuilder {
client: self.client,
p_container_name: PhantomData {},
p_blob_name: PhantomData {},
p_signature: PhantomData {},
container_name: self.container_name,
blob_name: Some(blob_name),
signature: self.signature,
}
}
}
impl<'a, C, ContainerNameSet, BlobNameSet, SignatureSet> SharedAccessSignatureSupport<'a>
for SignedUrlBuilder<'a, C, ContainerNameSet, BlobNameSet, SignatureSet>
where
C: Client,
ContainerNameSet: ToAssign,
BlobNameSet: ToAssign,
SignatureSet: ToAssign,
{
type O = SignedUrlBuilder<'a, C, ContainerNameSet, BlobNameSet, Yes>;
#[inline]
fn with_shared_access_signature(self, signature: &'a SharedAccessSignature) -> Self::O {
SignedUrlBuilder {
client: self.client,
p_container_name: PhantomData {},<|fim▁hole|> signature: Some(signature),
}
}
}
impl<'a, C> SignedUrlBuilder<'a, C, Yes, Yes, Yes>
where
C: Client,
{
#[inline]
pub fn finalize(self) -> String {
generate_blob_uri(
self.client(),
self.container_name(),
self.blob_name(),
Some(&self.signature.unwrap().token()),
)
}
}<|fim▁end|> | p_blob_name: PhantomData {},
p_signature: PhantomData {},
container_name: self.container_name,
blob_name: self.blob_name, |
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|>from __future__ import absolute_import
from django.utils.translation import ugettext_lazy as _
from common.utils import encapsulate
from documents.models import Document
from documents.permissions import (PERMISSION_DOCUMENT_NEW_VERSION,
PERMISSION_DOCUMENT_CREATE)
from navigation.api import register_links, register_model_list_columns
from project_setup.api import register_setup
from .staging import StagingFile
from .models import (WebForm, StagingFolder, SourceTransformation,
WatchFolder)
from .widgets import staging_file_thumbnail
from .permissions import (PERMISSION_SOURCES_SETUP_VIEW,
PERMISSION_SOURCES_SETUP_EDIT, PERMISSION_SOURCES_SETUP_DELETE,
PERMISSION_SOURCES_SETUP_CREATE)
document_create_multiple = {'text': _(u'upload new documents'), 'view': 'document_create_multiple', 'famfam': 'page_add', 'permissions': [PERMISSION_DOCUMENT_CREATE], 'children_view_regex': [r'upload_interactive']}
document_create_siblings = {'text': _(u'clone metadata'), 'view': 'document_create_siblings', 'args': 'object.id', 'famfam': 'page_copy', 'permissions': [PERMISSION_DOCUMENT_CREATE]}<|fim▁hole|>setup_sources = {'text': _(u'sources'), 'view': 'setup_web_form_list', 'famfam': 'application_form', 'icon': 'application_form.png', 'children_classes': [WebForm], 'permissions': [PERMISSION_SOURCES_SETUP_VIEW], 'children_view_regex': [r'setup_web_form', r'setup_staging_folder', r'setup_source_']}
setup_web_form_list = {'text': _(u'web forms'), 'view': 'setup_web_form_list', 'famfam': 'application_form', 'icon': 'application_form.png', 'children_classes': [WebForm], 'permissions': [PERMISSION_SOURCES_SETUP_VIEW]}
setup_staging_folder_list = {'text': _(u'staging folders'), 'view': 'setup_staging_folder_list', 'famfam': 'folder_camera', 'children_classes': [StagingFolder], 'permissions': [PERMISSION_SOURCES_SETUP_VIEW]}
setup_watch_folder_list = {'text': _(u'watch folders'), 'view': 'setup_watch_folder_list', 'famfam': 'folder_magnify', 'children_classes': [WatchFolder], 'permissions': [PERMISSION_SOURCES_SETUP_VIEW]}
setup_source_edit = {'text': _(u'edit'), 'view': 'setup_source_edit', 'args': ['source.source_type', 'source.pk'], 'famfam': 'application_form_edit', 'permissions': [PERMISSION_SOURCES_SETUP_EDIT]}
setup_source_delete = {'text': _(u'delete'), 'view': 'setup_source_delete', 'args': ['source.source_type', 'source.pk'], 'famfam': 'application_form_delete', 'permissions': [PERMISSION_SOURCES_SETUP_DELETE]}
setup_source_create = {'text': _(u'add new source'), 'view': 'setup_source_create', 'args': 'source_type', 'famfam': 'application_form_add', 'permissions': [PERMISSION_SOURCES_SETUP_CREATE]}
setup_source_transformation_list = {'text': _(u'transformations'), 'view': 'setup_source_transformation_list', 'args': ['source.source_type', 'source.pk'], 'famfam': 'shape_move_front', 'permissions': [PERMISSION_SOURCES_SETUP_EDIT]}
setup_source_transformation_create = {'text': _(u'add transformation'), 'view': 'setup_source_transformation_create', 'args': ['source.source_type', 'source.pk'], 'famfam': 'shape_square_add', 'permissions': [PERMISSION_SOURCES_SETUP_EDIT]}
setup_source_transformation_edit = {'text': _(u'edit'), 'view': 'setup_source_transformation_edit', 'args': 'transformation.pk', 'famfam': 'shape_square_edit', 'permissions': [PERMISSION_SOURCES_SETUP_EDIT]}
setup_source_transformation_delete = {'text': _(u'delete'), 'view': 'setup_source_transformation_delete', 'args': 'transformation.pk', 'famfam': 'shape_square_delete', 'permissions': [PERMISSION_SOURCES_SETUP_EDIT]}
source_list = {'text': _(u'Document sources'), 'view': 'setup_web_form_list', 'famfam': 'page_add', 'children_url_regex': [r'sources/setup'], 'permissions': [PERMISSION_SOURCES_SETUP_VIEW]}
upload_version = {'text': _(u'upload new version'), 'view': 'upload_version', 'args': 'object.pk', 'famfam': 'page_add', 'permissions': [PERMISSION_DOCUMENT_NEW_VERSION]}
register_links(StagingFile, [staging_file_delete])
register_links(SourceTransformation, [setup_source_transformation_edit, setup_source_transformation_delete])
#register_links(['setup_web_form_list', 'setup_staging_folder_list', 'setup_watch_folder_list', 'setup_source_create'], [setup_web_form_list, setup_staging_folder_list, setup_watch_folder_list], menu_name='form_header')
register_links(['setup_web_form_list', 'setup_staging_folder_list', 'setup_watch_folder_list', 'setup_source_create'], [setup_web_form_list, setup_staging_folder_list], menu_name='form_header')
#register_links(WebForm, [setup_web_form_list, setup_staging_folder_list, setup_watch_folder_list], menu_name='form_header')
register_links(WebForm, [setup_web_form_list, setup_staging_folder_list], menu_name='form_header')
register_links(WebForm, [setup_source_transformation_list, setup_source_edit, setup_source_delete])
register_links(['setup_web_form_list', 'setup_staging_folder_list', 'setup_watch_folder_list', 'setup_source_edit', 'setup_source_delete', 'setup_source_create'], [setup_sources, setup_source_create], menu_name='sidebar')
#register_links(StagingFolder, [setup_web_form_list, setup_staging_folder_list, setup_watch_folder_list], menu_name='form_header')
register_links(StagingFolder, [setup_web_form_list, setup_staging_folder_list], menu_name='form_header')
register_links(StagingFolder, [setup_source_transformation_list, setup_source_edit, setup_source_delete])
register_links(WatchFolder, [setup_web_form_list, setup_staging_folder_list, setup_watch_folder_list], menu_name='form_header')
register_links(WatchFolder, [setup_source_transformation_list, setup_source_edit, setup_source_delete])
# Document version
register_links(['document_version_list', 'upload_version', 'document_version_revert'], [upload_version], menu_name='sidebar')
register_links(['setup_source_transformation_create', 'setup_source_transformation_edit', 'setup_source_transformation_delete', 'setup_source_transformation_list'], [setup_source_transformation_create], menu_name='sidebar')
source_views = ['setup_web_form_list', 'setup_staging_folder_list', 'setup_watch_folder_list', 'setup_source_edit', 'setup_source_delete', 'setup_source_create', 'setup_source_transformation_list', 'setup_source_transformation_edit', 'setup_source_transformation_delete', 'setup_source_transformation_create']
register_model_list_columns(StagingFile, [
{'name':_(u'thumbnail'), 'attribute':
encapsulate(lambda x: staging_file_thumbnail(x))
},
])
register_setup(setup_sources)
register_links(['document_list_recent', 'document_list', 'document_create', 'document_create_multiple', 'upload_interactive', 'staging_file_delete'], [document_create_multiple], menu_name='secondary_menu')
register_links(Document, [document_create_siblings])<|fim▁end|> |
staging_file_preview = {'text': _(u'preview'), 'class': 'fancybox-noscaling', 'view': 'staging_file_preview', 'args': ['source.source_type', 'source.pk', 'object.id'], 'famfam': 'zoom', 'permissions': [PERMISSION_DOCUMENT_NEW_VERSION, PERMISSION_DOCUMENT_CREATE]}
staging_file_delete = {'text': _(u'delete'), 'view': 'staging_file_delete', 'args': ['source.source_type', 'source.pk', 'object.id'], 'famfam': 'delete', 'keep_query': True, 'permissions': [PERMISSION_DOCUMENT_NEW_VERSION, PERMISSION_DOCUMENT_CREATE]}
|
<|file_name|>person.ts<|end_file_name|><|fim▁begin|>export interface Person {
id: number;<|fim▁hole|> weight: number;
profession?: string;
}<|fim▁end|> | name: string;
height: number; |
<|file_name|>gdaldither.cpp<|end_file_name|><|fim▁begin|>/******************************************************************************
* $Id: gdaldither.cpp 33715 2016-03-13 08:52:06Z goatbar $
*
* Project: CIETMap Phase 2
* Purpose: Convert RGB (24bit) to a pseudo-colored approximation using
* Floyd-Steinberg dithering (error diffusion).
* Author: Frank Warmerdam, [email protected]
*
******************************************************************************
* Copyright (c) 2001, Frank Warmerdam
* Copyright (c) 2007, Even Rouault <even dot rouault at mines-paris dot org>
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
******************************************************************************
*
* Notes:
*
* [1] Floyd-Steinberg dither:
* I should point out that the actual fractions we used were, assuming
* you are at X, moving left to right:
*
* X 7/16
* 3/16 5/16 1/16
*
* Note that the error goes to four neighbors, not three. I think this
* will probably do better (at least for black and white) than the
* 3/8-3/8-1/4 distribution, at the cost of greater processing. I have
* seen the 3/8-3/8-1/4 distribution described as "our" algorithm before,
* but I have no idea who the credit really belongs to.
* --
* Lou Steinberg
*
*/
#include "gdal_priv.h"
#include "gdal_alg.h"
#include "gdal_alg_priv.h"
#if defined(__x86_64) || defined(_M_X64)
#define USE_SSE2
#endif
#ifdef USE_SSE2
#include <emmintrin.h>
#define CAST_PCT(x) ((GByte*)x)
#define ALIGN_INT_ARRAY_ON_16_BYTE(x) ( (((GPtrDiff_t)(x) % 16) != 0 ) ? (int*)((GByte*)(x) + 16 - ((GPtrDiff_t)(x) % 16)) : (x) )
#else
#define CAST_PCT(x) x
#endif
#define MAKE_COLOR_CODE(r,g,b) ((r)|((g)<<8)|((b)<<16))
CPL_CVSID("$Id: gdaldither.cpp 33715 2016-03-13 08:52:06Z goatbar $");
static void FindNearestColor( int nColors, int *panPCT, GByte *pabyColorMap,
int nCLevels );
static int FindNearestColor( int nColors, int *panPCT,
int nRedValue, int nGreenValue, int nBlueValue );
/* Structure for a hashmap from a color code to a color index of the color table */
/* NOTE: if changing the size of this structure, edit */
/* MEDIAN_CUT_AND_DITHER_BUFFER_SIZE_65536 in gdal_alg_priv.h and take into account HashHistogram in gdalmediancut.cpp */
typedef struct
{
GUInt32 nColorCode;
GUInt32 nColorCode2;
GUInt32 nColorCode3;
GByte nIndex;
GByte nIndex2;
GByte nIndex3;
GByte nPadding;
} ColorIndex;
/************************************************************************/
/* GDALDitherRGB2PCT() */
/************************************************************************/
/**
* 24bit to 8bit conversion with dithering.
*
* This functions utilizes Floyd-Steinberg dithering in the process of
* converting a 24bit RGB image into a pseudocolored 8bit image using a
* provided color table.
*
* The red, green and blue input bands do not necessarily need to come
* from the same file, but they must be the same width and height. They will
* be clipped to 8bit during reading, so non-eight bit bands are generally
* inappropriate. Likewise the hTarget band will be written with 8bit values
* and must match the width and height of the source bands.
*
* The color table cannot have more than 256 entries.
*
* @param hRed Red input band.
* @param hGreen Green input band.
* @param hBlue Blue input band.
* @param hTarget Output band.
* @param hColorTable the color table to use with the output band.
* @param pfnProgress callback for reporting algorithm progress matching the
* GDALProgressFunc() semantics. May be NULL.
* @param pProgressArg callback argument passed to pfnProgress.
*
* @return CE_None on success or CE_Failure if an error occurs.
*/
int CPL_STDCALL
GDALDitherRGB2PCT( GDALRasterBandH hRed,
GDALRasterBandH hGreen,
GDALRasterBandH hBlue,
GDALRasterBandH hTarget,
GDALColorTableH hColorTable,
GDALProgressFunc pfnProgress,
void * pProgressArg )
{
return GDALDitherRGB2PCTInternal( hRed, hGreen, hBlue, hTarget,
hColorTable, 5, NULL, TRUE,
pfnProgress, pProgressArg );
}
int GDALDitherRGB2PCTInternal( GDALRasterBandH hRed,
GDALRasterBandH hGreen,
GDALRasterBandH hBlue,
GDALRasterBandH hTarget,
GDALColorTableH hColorTable,
int nBits,
GInt16* pasDynamicColorMap, /* NULL or at least 256 * 256 * 256 * sizeof(GInt16) bytes */
int bDither,
GDALProgressFunc pfnProgress,
void * pProgressArg )
{
VALIDATE_POINTER1( hRed, "GDALDitherRGB2PCT", CE_Failure );
VALIDATE_POINTER1( hGreen, "GDALDitherRGB2PCT", CE_Failure );
VALIDATE_POINTER1( hBlue, "GDALDitherRGB2PCT", CE_Failure );
VALIDATE_POINTER1( hTarget, "GDALDitherRGB2PCT", CE_Failure );
VALIDATE_POINTER1( hColorTable, "GDALDitherRGB2PCT", CE_Failure );
int nXSize, nYSize;
CPLErr err = CE_None;
/* -------------------------------------------------------------------- */
/* Validate parameters. */
/* -------------------------------------------------------------------- */
nXSize = GDALGetRasterBandXSize( hRed );
nYSize = GDALGetRasterBandYSize( hRed );
if( GDALGetRasterBandXSize( hGreen ) != nXSize
|| GDALGetRasterBandYSize( hGreen ) != nYSize
|| GDALGetRasterBandXSize( hBlue ) != nXSize
|| GDALGetRasterBandYSize( hBlue ) != nYSize )
{
CPLError( CE_Failure, CPLE_IllegalArg,
"Green or blue band doesn't match size of red band.\n" );
return CE_Failure;
}
if( GDALGetRasterBandXSize( hTarget ) != nXSize
|| GDALGetRasterBandYSize( hTarget ) != nYSize )
{
CPLError( CE_Failure, CPLE_IllegalArg,
"GDALDitherRGB2PCT(): "
"Target band doesn't match size of source bands.\n" );
return CE_Failure;
}
if( pfnProgress == NULL )
pfnProgress = GDALDummyProgress;
/* -------------------------------------------------------------------- */
/* Setup more direct colormap. */
/* -------------------------------------------------------------------- */
int nColors, iColor;
#ifdef USE_SSE2
int anPCTUnaligned[256+4]; /* 4 for alignment on 16-byte boundary */
int* anPCT = ALIGN_INT_ARRAY_ON_16_BYTE(anPCTUnaligned);
#else
int anPCT[256*4];
#endif
nColors = GDALGetColorEntryCount( hColorTable );
if (nColors == 0 )
{
CPLError( CE_Failure, CPLE_IllegalArg,
"GDALDitherRGB2PCT(): "
"Color table must not be empty.\n" );
return CE_Failure;
}
else if (nColors > 256)
{
CPLError( CE_Failure, CPLE_IllegalArg,
"GDALDitherRGB2PCT(): "
"Color table cannot have more than 256 entries.\n" );
return CE_Failure;
}
iColor = 0;
do
{
GDALColorEntry sEntry;
GDALGetColorEntryAsRGB( hColorTable, iColor, &sEntry );
CAST_PCT(anPCT)[4*iColor+0] = static_cast<GByte>(sEntry.c1);
CAST_PCT(anPCT)[4*iColor+1] = static_cast<GByte>(sEntry.c2);
CAST_PCT(anPCT)[4*iColor+2] = static_cast<GByte>(sEntry.c3);
CAST_PCT(anPCT)[4*iColor+3] = 0;
iColor ++;
} while( iColor < nColors );
#ifdef USE_SSE2
/* Pad to multiple of 8 colors */
int nColorsMod8 = nColors % 8;
if( nColorsMod8 )
{
for( iColor = 0; iColor < 8 - nColorsMod8; iColor ++)
{
anPCT[nColors+iColor] = anPCT[nColors-1];
}
}
#endif
/* -------------------------------------------------------------------- */
/* Setup various variables. */
/* -------------------------------------------------------------------- */
GByte *pabyRed, *pabyGreen, *pabyBlue, *pabyIndex;
GByte *pabyColorMap = NULL;
int *panError;
int nCLevels = 1 << nBits;
ColorIndex* psColorIndexMap = NULL;
pabyRed = (GByte *) VSI_MALLOC_VERBOSE(nXSize);
pabyGreen = (GByte *) VSI_MALLOC_VERBOSE(nXSize);
pabyBlue = (GByte *) VSI_MALLOC_VERBOSE(nXSize);
pabyIndex = (GByte *) VSI_MALLOC_VERBOSE(nXSize);
panError = (int *) VSI_CALLOC_VERBOSE(sizeof(int),(nXSize+2) * 3);
if (pabyRed == NULL ||
pabyGreen == NULL ||
pabyBlue == NULL ||
pabyIndex == NULL ||
panError == NULL)
{
err = CE_Failure;
goto end_and_cleanup;
}
if( pasDynamicColorMap == NULL )
{
/* -------------------------------------------------------------------- */
/* Build a 24bit to 8 bit color mapping. */
/* -------------------------------------------------------------------- */
pabyColorMap = (GByte *) VSI_MALLOC_VERBOSE(nCLevels * nCLevels * nCLevels
* sizeof(GByte));
if( pabyColorMap == NULL )
{
err = CE_Failure;
goto end_and_cleanup;
}
FindNearestColor( nColors, anPCT, pabyColorMap, nCLevels);
}
else
{
pabyColorMap = NULL;
if( nBits == 8 && (GIntBig)nXSize * nYSize <= 65536 )
{
/* If the image is small enough, then the number of colors */
/* will be limited and using a hashmap, rather than a full table */
/* will be more efficient */
psColorIndexMap = (ColorIndex*)pasDynamicColorMap;
memset(psColorIndexMap, 0xFF, sizeof(ColorIndex) * PRIME_FOR_65536);
}
else
{
memset(pasDynamicColorMap, 0xFF, 256 * 256 * 256 * sizeof(GInt16));
}
}
/* ==================================================================== */
/* Loop over all scanlines of data to process. */
/* ==================================================================== */
int iScanline;
for( iScanline = 0; iScanline < nYSize; iScanline++ )
{
int nLastRedError, nLastGreenError, nLastBlueError, i;
/* -------------------------------------------------------------------- */
/* Report progress */
/* -------------------------------------------------------------------- */
if( !pfnProgress( iScanline / (double) nYSize, NULL, pProgressArg ) )
{
CPLError( CE_Failure, CPLE_UserInterrupt, "User Terminated" );
err = CE_Failure;
goto end_and_cleanup;
}
/* -------------------------------------------------------------------- */
/* Read source data. */
/* -------------------------------------------------------------------- */
err = GDALRasterIO( hRed, GF_Read, 0, iScanline, nXSize, 1,
pabyRed, nXSize, 1, GDT_Byte, 0, 0 );
if( err == CE_None )
err = GDALRasterIO( hGreen, GF_Read, 0, iScanline, nXSize, 1,
pabyGreen, nXSize, 1, GDT_Byte, 0, 0 );
if( err == CE_None )
err = GDALRasterIO( hBlue, GF_Read, 0, iScanline, nXSize, 1,
pabyBlue, nXSize, 1, GDT_Byte, 0, 0 );
if( err != CE_None )
goto end_and_cleanup;
/* -------------------------------------------------------------------- */
/* Apply the error from the previous line to this one. */
/* -------------------------------------------------------------------- */
if( bDither )
{
for( i = 0; i < nXSize; i++ )
{
pabyRed[i] = (GByte)
MAX(0,MIN(255,(pabyRed[i] + panError[i*3+0+3])));
pabyGreen[i] = (GByte)
MAX(0,MIN(255,(pabyGreen[i] + panError[i*3+1+3])));
pabyBlue[i] = (GByte)
MAX(0,MIN(255,(pabyBlue[i] + panError[i*3+2+3])));
}
memset( panError, 0, sizeof(int) * (nXSize+2) * 3 );
}
/* -------------------------------------------------------------------- */
/* Figure out the nearest color to the RGB value. */
/* -------------------------------------------------------------------- */
nLastRedError = 0;
nLastGreenError = 0;
nLastBlueError = 0;
for( i = 0; i < nXSize; i++ )
{
int iIndex, nError, nSixth;
int nRedValue, nGreenValue, nBlueValue;
nRedValue = MAX(0,MIN(255, pabyRed[i] + nLastRedError));
nGreenValue = MAX(0,MIN(255, pabyGreen[i] + nLastGreenError));
nBlueValue = MAX(0,MIN(255, pabyBlue[i] + nLastBlueError));
if( psColorIndexMap )
{
GUInt32 nColorCode = MAKE_COLOR_CODE(nRedValue, nGreenValue, nBlueValue);
GUInt32 nIdx = nColorCode % PRIME_FOR_65536;
//int nCollisions = 0;
//static int nMaxCollisions = 0;
while( true )
{
if( psColorIndexMap[nIdx].nColorCode == nColorCode )
{
iIndex = psColorIndexMap[nIdx].nIndex;
break;
}
if( (int)psColorIndexMap[nIdx].nColorCode < 0 )
{
psColorIndexMap[nIdx].nColorCode = nColorCode;
iIndex = FindNearestColor( nColors, anPCT,
nRedValue, nGreenValue, nBlueValue );
psColorIndexMap[nIdx].nIndex = (GByte) iIndex;
break;
}
if( psColorIndexMap[nIdx].nColorCode2 == nColorCode )
{
iIndex = psColorIndexMap[nIdx].nIndex2;
break;
}
if( (int)psColorIndexMap[nIdx].nColorCode2 < 0 )
{
psColorIndexMap[nIdx].nColorCode2 = nColorCode;
iIndex = FindNearestColor( nColors, anPCT,
nRedValue, nGreenValue, nBlueValue );
psColorIndexMap[nIdx].nIndex2 = (GByte) iIndex;
break;
}
if( psColorIndexMap[nIdx].nColorCode3 == nColorCode )
{
iIndex = psColorIndexMap[nIdx].nIndex3;
break;
}
if( (int)psColorIndexMap[nIdx].nColorCode3 < 0 )
{
psColorIndexMap[nIdx].nColorCode3 = nColorCode;
iIndex = FindNearestColor( nColors, anPCT,
nRedValue, nGreenValue, nBlueValue );
psColorIndexMap[nIdx].nIndex3 = (GByte) iIndex;
break;
}
do
{
//nCollisions ++;
nIdx+=257;
if( nIdx >= PRIME_FOR_65536 )
nIdx -= PRIME_FOR_65536;
}
while( (int)psColorIndexMap[nIdx].nColorCode >= 0 &&
psColorIndexMap[nIdx].nColorCode != nColorCode &&
(int)psColorIndexMap[nIdx].nColorCode2 >= 0 &&
psColorIndexMap[nIdx].nColorCode2 != nColorCode&&
(int)psColorIndexMap[nIdx].nColorCode3 >= 0 &&
psColorIndexMap[nIdx].nColorCode3 != nColorCode );
/*if( nCollisions > nMaxCollisions )
{
nMaxCollisions = nCollisions;
printf("nCollisions = %d for R=%d,G=%d,B=%d\n",
nCollisions, nRedValue, nGreenValue, nBlueValue);
}*/
}
}
else if( pasDynamicColorMap == NULL )
{
int iRed = nRedValue * nCLevels / 256;
int iGreen = nGreenValue * nCLevels / 256;
int iBlue = nBlueValue * nCLevels / 256;
iIndex = pabyColorMap[iRed + iGreen * nCLevels
+ iBlue * nCLevels * nCLevels];
}
else
{
GUInt32 nColorCode = MAKE_COLOR_CODE(nRedValue, nGreenValue, nBlueValue);
GInt16* psIndex = &pasDynamicColorMap[nColorCode];
if( *psIndex < 0 )
iIndex = *psIndex = static_cast<GInt16>(FindNearestColor( nColors, anPCT,
nRedValue,
nGreenValue,
nBlueValue ));
else
iIndex = *psIndex;
}
pabyIndex[i] = (GByte) iIndex;
if( !bDither )
continue;
/* -------------------------------------------------------------------- */
/* Compute Red error, and carry it on to the next error line. */
/* -------------------------------------------------------------------- */
nError = nRedValue - CAST_PCT(anPCT)[4*iIndex+0];
nSixth = nError / 6;
panError[i*3 ] += nSixth;
panError[i*3+6 ] = nSixth;
panError[i*3+3 ] += nError - 5 * nSixth;
nLastRedError = 2 * nSixth;
/* -------------------------------------------------------------------- */
/* Compute Green error, and carry it on to the next error line. */
/* -------------------------------------------------------------------- */
nError = nGreenValue - CAST_PCT(anPCT)[4*iIndex+1];
nSixth = nError / 6;
panError[i*3 +1] += nSixth;
panError[i*3+6+1] = nSixth;
panError[i*3+3+1] += nError - 5 * nSixth;
nLastGreenError = 2 * nSixth;
/* -------------------------------------------------------------------- */
/* Compute Blue error, and carry it on to the next error line. */
/* -------------------------------------------------------------------- */
nError = nBlueValue - CAST_PCT(anPCT)[4*iIndex+2];
nSixth = nError / 6;
panError[i*3 +2] += nSixth;
panError[i*3+6+2] = nSixth;
panError[i*3+3+2] += nError - 5 * nSixth;
nLastBlueError = 2 * nSixth;
}
/* -------------------------------------------------------------------- */
/* Write results. */
/* -------------------------------------------------------------------- */
err = GDALRasterIO( hTarget, GF_Write, 0, iScanline, nXSize, 1,
pabyIndex, nXSize, 1, GDT_Byte, 0, 0 );
if( err != CE_None )
break;
}
pfnProgress( 1.0, NULL, pProgressArg );
/* -------------------------------------------------------------------- */
/* Cleanup */
/* -------------------------------------------------------------------- */
end_and_cleanup:
CPLFree( pabyRed );
CPLFree( pabyGreen );
CPLFree( pabyBlue );
CPLFree( pabyIndex );
CPLFree( panError );
CPLFree( pabyColorMap );
return err;
}
static int FindNearestColor( int nColors, int *panPCT,
int nRedValue, int nGreenValue, int nBlueValue )
{
#ifdef USE_SSE2
int iColor;
int nBestDist = 768, nBestIndex = 0;
int anDistanceUnaligned[16+4]; /* 4 for alignment on 16-byte boundary */
int* anDistance = ALIGN_INT_ARRAY_ON_16_BYTE(anDistanceUnaligned);
const __m128i ff = _mm_set1_epi32(0xFFFFFFFF);
const __m128i mask_low = _mm_srli_epi64(ff, 32);
const __m128i mask_high = _mm_slli_epi64(ff, 32);
unsigned int nColorVal = MAKE_COLOR_CODE(nRedValue, nGreenValue, nBlueValue);
const __m128i thisColor = _mm_set1_epi32(nColorVal);
const __m128i thisColor_low = _mm_srli_epi64(thisColor, 32);
const __m128i thisColor_high = _mm_slli_epi64(thisColor, 32);
for( iColor = 0; iColor < nColors; iColor+=8 )
{
__m128i pctColor = _mm_load_si128((__m128i*)&panPCT[iColor]);
__m128i pctColor2 = _mm_load_si128((__m128i*)&panPCT[iColor+4]);
_mm_store_si128((__m128i*)anDistance,
_mm_sad_epu8(_mm_and_si128(pctColor,mask_low),thisColor_low));
_mm_store_si128((__m128i*)(anDistance+4),
_mm_sad_epu8(_mm_and_si128(pctColor,mask_high),thisColor_high));
_mm_store_si128((__m128i*)(anDistance+8),
_mm_sad_epu8(_mm_and_si128(pctColor2,mask_low),thisColor_low));
_mm_store_si128((__m128i*)(anDistance+12),
_mm_sad_epu8(_mm_and_si128(pctColor2,mask_high),thisColor_high));
if( anDistance[0] < nBestDist )
{
nBestIndex = iColor;
nBestDist = anDistance[0];
}
if( anDistance[4] < nBestDist )
{
nBestIndex = iColor+1;
nBestDist = anDistance[4];
}
if( anDistance[2] < nBestDist )
{
nBestIndex = iColor+2;
nBestDist = anDistance[2];
}
if( anDistance[6] < nBestDist )
{
nBestIndex = iColor+3;
nBestDist = anDistance[6];
}
if( anDistance[8+0] < nBestDist )
{
nBestIndex = iColor+4;
nBestDist = anDistance[8+0];
}
if( anDistance[8+4] < nBestDist )
{
nBestIndex = iColor+4+1;
nBestDist = anDistance[8+4];
}
if( anDistance[8+2] < nBestDist )
{
nBestIndex = iColor+4+2;
nBestDist = anDistance[8+2];
}
if( anDistance[8+6] < nBestDist )
{
nBestIndex = iColor+4+3;<|fim▁hole|> }
return nBestIndex;
#else
int iColor;
int nBestDist = 768, nBestIndex = 0;
for( iColor = 0; iColor < nColors; iColor++ )
{
int nThisDist;
nThisDist = ABS(nRedValue - panPCT[4*iColor+0])
+ ABS(nGreenValue - panPCT[4*iColor+1])
+ ABS(nBlueValue - panPCT[4*iColor+2]);
if( nThisDist < nBestDist )
{
nBestIndex = iColor;
nBestDist = nThisDist;
}
}
return nBestIndex;
#endif
}
/************************************************************************/
/* FindNearestColor() */
/* */
/* Finear near PCT color for any RGB color. */
/************************************************************************/
static void FindNearestColor( int nColors, int *panPCT, GByte *pabyColorMap,
int nCLevels )
{
int iBlue, iGreen, iRed;
/* -------------------------------------------------------------------- */
/* Loop over all the cells in the high density cube. */
/* -------------------------------------------------------------------- */
for( iBlue = 0; iBlue < nCLevels; iBlue++ )
{
for( iGreen = 0; iGreen < nCLevels; iGreen++ )
{
for( iRed = 0; iRed < nCLevels; iRed++ )
{
int nRedValue, nGreenValue, nBlueValue;
nRedValue = (iRed * 255) / (nCLevels-1);
nGreenValue = (iGreen * 255) / (nCLevels-1);
nBlueValue = (iBlue * 255) / (nCLevels-1);
int nBestIndex = FindNearestColor( nColors, panPCT,
nRedValue, nGreenValue, nBlueValue );
pabyColorMap[iRed + iGreen*nCLevels
+ iBlue*nCLevels*nCLevels] = (GByte)nBestIndex;
}
}
}
}<|fim▁end|> | nBestDist = anDistance[8+6];
} |
<|file_name|>jmx.py<|end_file_name|><|fim▁begin|># Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ducktape.cluster.remoteaccount import RemoteCommandError
from ducktape.utils.util import wait_until
class JmxMixin(object):
"""This mixin helps existing service subclasses start JmxTool on their worker nodes and collect jmx stats.
A couple things worth noting:
- this is not a service in its own right.<|fim▁hole|> self.jmx_object_names = jmx_object_names
self.jmx_attributes = jmx_attributes or []
self.jmx_port = 9192
self.started = [False] * num_nodes
self.jmx_stats = [{} for x in range(num_nodes)]
self.maximum_jmx_value = {} # map from object_attribute_name to maximum value observed over time
self.average_jmx_value = {} # map from object_attribute_name to average value observed over time
self.jmx_tool_log = "/mnt/jmx_tool.log"
self.jmx_tool_err_log = "/mnt/jmx_tool.err.log"
def clean_node(self, node):
node.account.kill_process("jmx", clean_shutdown=False, allow_fail=True)
node.account.ssh("rm -rf %s" % self.jmx_tool_log, allow_fail=False)
def start_jmx_tool(self, idx, node):
if self.jmx_object_names is None:
self.logger.debug("%s: Not starting jmx tool because no jmx objects are defined" % node.account)
return
if self.started[idx-1]:
self.logger.debug("%s: jmx tool has been started already on this node" % node.account)
return
cmd = "%s kafka.tools.JmxTool " % self.path.script("kafka-run-class.sh", node)
cmd += "--reporting-interval 1000 --jmx-url service:jmx:rmi:///jndi/rmi://127.0.0.1:%d/jmxrmi" % self.jmx_port
for jmx_object_name in self.jmx_object_names:
cmd += " --object-name %s" % jmx_object_name
for jmx_attribute in self.jmx_attributes:
cmd += " --attributes %s" % jmx_attribute
cmd += " 1>> %s" % self.jmx_tool_log
cmd += " 2>> %s &" % self.jmx_tool_err_log
self.logger.debug("%s: Start JmxTool %d command: %s" % (node.account, idx, cmd))
node.account.ssh(cmd, allow_fail=False)
wait_until(lambda: self._jmx_has_output(node), timeout_sec=10, backoff_sec=.5, err_msg="%s: Jmx tool took too long to start" % node.account)
self.started[idx-1] = True
def _jmx_has_output(self, node):
"""Helper used as a proxy to determine whether jmx is running by that jmx_tool_log contains output."""
try:
node.account.ssh("test -z \"$(cat %s)\"" % self.jmx_tool_log, allow_fail=False)
return False
except RemoteCommandError:
return True
def read_jmx_output(self, idx, node):
if not self.started[idx-1]:
return
object_attribute_names = []
cmd = "cat %s" % self.jmx_tool_log
self.logger.debug("Read jmx output %d command: %s", idx, cmd)
lines = [line for line in node.account.ssh_capture(cmd, allow_fail=False)]
assert len(lines) > 1, "There don't appear to be any samples in the jmx tool log: %s" % lines
for line in lines:
if "time" in line:
object_attribute_names = line.strip()[1:-1].split("\",\"")[1:]
continue
stats = [float(field) for field in line.split(',')]
time_sec = int(stats[0]/1000)
self.jmx_stats[idx-1][time_sec] = {name: stats[i+1] for i, name in enumerate(object_attribute_names)}
# do not calculate average and maximum of jmx stats until we have read output from all nodes
# If the service is multithreaded, this means that the results will be aggregated only when the last
# service finishes
if any(len(time_to_stats) == 0 for time_to_stats in self.jmx_stats):
return
start_time_sec = min([min(time_to_stats.keys()) for time_to_stats in self.jmx_stats])
end_time_sec = max([max(time_to_stats.keys()) for time_to_stats in self.jmx_stats])
for name in object_attribute_names:
aggregates_per_time = []
for time_sec in xrange(start_time_sec, end_time_sec + 1):
# assume that value is 0 if it is not read by jmx tool at the given time. This is appropriate for metrics such as bandwidth
values_per_node = [time_to_stats.get(time_sec, {}).get(name, 0) for time_to_stats in self.jmx_stats]
# assume that value is aggregated across nodes by sum. This is appropriate for metrics such as bandwidth
aggregates_per_time.append(sum(values_per_node))
self.average_jmx_value[name] = sum(aggregates_per_time) / len(aggregates_per_time)
self.maximum_jmx_value[name] = max(aggregates_per_time)
def read_jmx_output_all_nodes(self):
for node in self.nodes:
self.read_jmx_output(self.idx(node), node)<|fim▁end|> | - we assume the service using JmxMixin also uses KafkaPathResolverMixin
"""
def __init__(self, num_nodes, jmx_object_names=None, jmx_attributes=None): |
<|file_name|>augment.py<|end_file_name|><|fim▁begin|># -*- test-case-name: txdav.who.test.test_augment -*-
##
# Copyright (c) 2013-2017 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
"""
Augmenting Directory Service
"""
__all__ = [
"AugmentedDirectoryService",
]
import time
from zope.interface import implementer
from twisted.internet.defer import inlineCallbacks, returnValue, succeed
from twistedcaldav.directory.augment import AugmentRecord
from twext.python.log import Logger
from twext.who.directory import DirectoryRecord
from twext.who.directory import DirectoryService as BaseDirectoryService
from twext.who.idirectory import (
IDirectoryService, RecordType, FieldName as BaseFieldName, NotAllowedError
)
from twext.who.util import ConstantsContainer
from txdav.common.idirectoryservice import IStoreDirectoryService
from txdav.who.directory import (
CalendarDirectoryRecordMixin, CalendarDirectoryServiceMixin,
)
from txdav.who.idirectory import (
AutoScheduleMode, FieldName, RecordType as CalRecordType
)
log = Logger()
def timed(f):
"""
A decorator which keeps track of the wrapped function's call count and
total duration
"""
def recordTiming(result, key, startTime):
"""
Figures out how much time to add to the total time spent within the
method identified by key and stores that in the timings dict.
@param result: the result of the wrapped method
@param timings: the dictionary to store timings in
@type timings: C{dict}
@param key: the method name
@type key: C{str}
@param startTime: the start time of the call in seconds
@type startTime: C{float}
"""
AugmentedDirectoryService._addTiming(key, time.time() - startTime)
return result
def timingWrapper(self, *args, **kwds):
"""
Records the start time of the call and the method's name
"""
startTime = time.time()
d = f(self, *args, **kwds)
d.addBoth(recordTiming, f.func_name, startTime)
return d
return timingWrapper
@implementer(IDirectoryService, IStoreDirectoryService)
class AugmentedDirectoryService(
BaseDirectoryService, CalendarDirectoryServiceMixin
):
"""
Augmented directory service.
This is a directory service that wraps an L{IDirectoryService} and augments
directory records with additional or modified fields.
"""
fieldName = ConstantsContainer((
BaseFieldName,
FieldName,
))
_timings = {}
def __init__(self, directory, store, augmentDB):
BaseDirectoryService.__init__(self, directory.realmName)
self._directory = directory
self._store = store
self._augmentDB = augmentDB
# An LDAP DS has extra info to expose via the dashboard
# This is assigned in buildDirectory()
self._ldapDS = None
@classmethod
def _addTiming(cls, key, duration):
if key not in cls._timings:
cls._timings[key] = (0, 0.0)
count, timeSpent = cls._timings[key]
count += 1
timeSpent += duration
cls._timings[key] = (count, timeSpent)
def flush(self):
return self._directory.flush()
def stats(self):
results = {}
results.update(self._timings)
# An LDAP DS has extra info to expose via the dashboard
if self._ldapDS is not None:
results.update(self._ldapDS.poolStats)
return succeed(results)
@property
def recordType(self):
# Defer to the directory service we're augmenting
return self._directory.recordType
def recordTypes(self):
# Defer to the directory service we're augmenting
return self._directory.recordTypes()
@inlineCallbacks
def recordsFromExpression(
self, expression, recordTypes=None,
limitResults=None, timeoutSeconds=None
):
records = yield self._directory.recordsFromExpression(
expression, recordTypes=recordTypes,
limitResults=limitResults, timeoutSeconds=timeoutSeconds
)
augmented = []
for record in records:
record = yield self._augment(record)
augmented.append(record)
returnValue(augmented)
@inlineCallbacks
def recordsWithFieldValue(
self, fieldName, value, limitResults=None, timeoutSeconds=None
):
records = yield self._directory.recordsWithFieldValue(
fieldName, value,
limitResults=limitResults, timeoutSeconds=timeoutSeconds
)
augmented = []
for record in records:
record = yield self._augment(record)
augmented.append(record)
returnValue(augmented)
@timed
@inlineCallbacks
def recordWithUID(self, uid, timeoutSeconds=None):
# MOVE2WHO, REMOVE THIS:
if not isinstance(uid, unicode):
# log.warn("Need to change uid to unicode")
uid = uid.decode("utf-8")
record = yield self._directory.recordWithUID(
uid, timeoutSeconds=timeoutSeconds
)
record = yield self._augment(record)
returnValue(record)
@timed
@inlineCallbacks
def recordWithGUID(self, guid, timeoutSeconds=None):
record = yield self._directory.recordWithGUID(
guid, timeoutSeconds=timeoutSeconds
)
record = yield self._augment(record)
returnValue(record)
@timed
@inlineCallbacks
def recordsWithRecordType(
self, recordType, limitResults=None, timeoutSeconds=None
):
records = yield self._directory.recordsWithRecordType(
recordType, limitResults=limitResults, timeoutSeconds=timeoutSeconds
)
augmented = []
for record in records:
record = yield self._augment(record)
augmented.append(record)
returnValue(augmented)
@timed
@inlineCallbacks
def recordWithShortName(self, recordType, shortName, timeoutSeconds=None):
# MOVE2WHO, REMOVE THIS:
if not isinstance(shortName, unicode):
# log.warn("Need to change shortName to unicode")
shortName = shortName.decode("utf-8")
record = yield self._directory.recordWithShortName(
recordType, shortName, timeoutSeconds=timeoutSeconds
)
record = yield self._augment(record)
returnValue(record)
@timed
@inlineCallbacks
def recordsWithEmailAddress(
self, emailAddress, limitResults=None, timeoutSeconds=None
):
# MOVE2WHO, REMOVE THIS:
if not isinstance(emailAddress, unicode):
# log.warn("Need to change emailAddress to unicode")
emailAddress = emailAddress.decode("utf-8")
records = yield self._directory.recordsWithEmailAddress(
emailAddress,
limitResults=limitResults, timeoutSeconds=timeoutSeconds
)
augmented = []
for record in records:
record = yield self._augment(record)
augmented.append(record)
returnValue(augmented)
@timed
def recordWithCalendarUserAddress(self, *args, **kwds):
return CalendarDirectoryServiceMixin.recordWithCalendarUserAddress(<|fim▁hole|>
@timed
def recordsMatchingTokens(self, *args, **kwds):
return CalendarDirectoryServiceMixin.recordsMatchingTokens(
self, *args, **kwds
)
@timed
def recordsMatchingFields(self, *args, **kwds):
return CalendarDirectoryServiceMixin.recordsMatchingFields(
self, *args, **kwds
)
@timed
@inlineCallbacks
def updateRecords(self, records, create=False):
"""
Pull out the augmented fields from each record, apply those to the
augments database, then update the base records.
"""
baseRecords = []
augmentRecords = []
for record in records:
# Split out the base fields from the augment fields
baseFields, augmentFields = self._splitFields(record)
# Ignore groups for now
if augmentFields and record.recordType != RecordType.group:
# Create an AugmentRecord
autoScheduleMode = {
AutoScheduleMode.none: "none",
AutoScheduleMode.accept: "accept-always",
AutoScheduleMode.decline: "decline-always",
AutoScheduleMode.acceptIfFree: "accept-if-free",
AutoScheduleMode.declineIfBusy: "decline-if-busy",
AutoScheduleMode.acceptIfFreeDeclineIfBusy: "automatic",
}.get(augmentFields.get(FieldName.autoScheduleMode, None), None)
kwargs = {
"uid": record.uid,
"autoScheduleMode": autoScheduleMode,
}
if FieldName.hasCalendars in augmentFields:
kwargs["enabledForCalendaring"] = augmentFields[FieldName.hasCalendars]
if FieldName.hasContacts in augmentFields:
kwargs["enabledForAddressBooks"] = augmentFields[FieldName.hasContacts]
if FieldName.loginAllowed in augmentFields:
kwargs["enabledForLogin"] = augmentFields[FieldName.loginAllowed]
if FieldName.autoAcceptGroup in augmentFields:
kwargs["autoAcceptGroup"] = augmentFields[FieldName.autoAcceptGroup]
if FieldName.serviceNodeUID in augmentFields:
kwargs["serverID"] = augmentFields[FieldName.serviceNodeUID]
augmentRecord = AugmentRecord(**kwargs)
augmentRecords.append(augmentRecord)
# Create new base records:
baseRecords.append(DirectoryRecord(self._directory, record._baseRecord.fields if hasattr(record, "_baseRecord") else baseFields))
# Apply the augment records
if augmentRecords:
yield self._augmentDB.addAugmentRecords(augmentRecords)
# Apply the base records
if baseRecords:
try:
yield self._directory.updateRecords(baseRecords, create=create)
except NotAllowedError:
pass
def _splitFields(self, record):
"""
Returns a tuple of two dictionaries; the first contains all the non
augment fields, and the second contains all the augment fields.
"""
if record is None:
return None
augmentFields = {}
baseFields = record.fields.copy()
for field in (
FieldName.loginAllowed,
FieldName.hasCalendars, FieldName.hasContacts,
FieldName.autoScheduleMode, FieldName.autoAcceptGroup,
FieldName.serviceNodeUID
):
if field in baseFields:
augmentFields[field] = baseFields[field]
del baseFields[field]
return (baseFields, augmentFields)
@inlineCallbacks
def removeRecords(self, uids):
yield self._augmentDB.removeAugmentRecords(uids)
yield self._directory.removeRecords(uids)
def _assignToField(self, fields, name, value):
"""
Assign a value to a field only if not already present in fields.
"""
field = self.fieldName.lookupByName(name)
if field not in fields:
fields[field] = value
@inlineCallbacks
def _augment(self, record):
if record is None:
returnValue(None)
augmentRecord = yield self._augmentDB.getAugmentRecord(
record.uid,
self.recordTypeToOldName(record.recordType)
)
if augmentRecord is None:
# Augments does not know about this record type, so return
# the original record
returnValue(record)
fields = record.fields.copy()
if augmentRecord:
if record.recordType == RecordType.group:
self._assignToField(fields, "hasCalendars", False)
self._assignToField(fields, "hasContacts", False)
else:
self._assignToField(
fields, "hasCalendars",
augmentRecord.enabledForCalendaring
)
self._assignToField(
fields, "hasContacts",
augmentRecord.enabledForAddressBooks
)
# In the case of XML augments, a missing auto-schedule-mode
# element has the same meaning an element with a value of "default"
# in which case augmentRecord.autoScheduleMode = "default". On
# the record we're augmenting, "default" mode means autoScheduleMode
# gets set to None (distinct from AutoScheduleMode.none!),
# which gets swapped for config.Scheduling.Options.AutoSchedule.DefaultMode
# in checkAttendeeAutoReply().
# ...Except for locations/resources which will default to automatic
autoScheduleMode = {
"none": AutoScheduleMode.none,
"accept-always": AutoScheduleMode.accept,
"decline-always": AutoScheduleMode.decline,
"accept-if-free": AutoScheduleMode.acceptIfFree,
"decline-if-busy": AutoScheduleMode.declineIfBusy,
"automatic": AutoScheduleMode.acceptIfFreeDeclineIfBusy,
}.get(augmentRecord.autoScheduleMode, None)
# Resources/Locations default to automatic
if record.recordType in (
CalRecordType.location,
CalRecordType.resource
):
if autoScheduleMode is None:
autoScheduleMode = AutoScheduleMode.acceptIfFreeDeclineIfBusy
self._assignToField(
fields, "autoScheduleMode",
autoScheduleMode
)
if augmentRecord.autoAcceptGroup is not None:
self._assignToField(
fields, "autoAcceptGroup",
augmentRecord.autoAcceptGroup.decode("utf-8")
)
self._assignToField(
fields, "loginAllowed",
augmentRecord.enabledForLogin
)
self._assignToField(
fields, "serviceNodeUID",
augmentRecord.serverID.decode("utf-8")
)
else:
self._assignToField(fields, "hasCalendars", False)
self._assignToField(fields, "hasContacts", False)
self._assignToField(fields, "loginAllowed", False)
# print("Augmented fields", fields)
# Clone to a new record with the augmented fields
augmentedRecord = AugmentedDirectoryRecord(self, record, fields)
returnValue(augmentedRecord)
@inlineCallbacks
def setAutoScheduleMode(self, record, autoScheduleMode):
augmentRecord = yield self._augmentDB.getAugmentRecord(
record.uid,
self.recordTypeToOldName(record.recordType)
)
if augmentRecord is not None:
autoScheduleMode = {
AutoScheduleMode.none: "none",
AutoScheduleMode.accept: "accept-always",
AutoScheduleMode.decline: "decline-always",
AutoScheduleMode.acceptIfFree: "accept-if-free",
AutoScheduleMode.declineIfBusy: "decline-if-busy",
AutoScheduleMode.acceptIfFreeDeclineIfBusy: "automatic",
}.get(autoScheduleMode)
augmentRecord.autoScheduleMode = autoScheduleMode
yield self._augmentDB.addAugmentRecords([augmentRecord])
class AugmentedDirectoryRecord(DirectoryRecord, CalendarDirectoryRecordMixin):
"""
Augmented directory record.
"""
def __init__(self, service, baseRecord, augmentedFields):
DirectoryRecord.__init__(self, service, augmentedFields)
CalendarDirectoryRecordMixin.__init__(self)
self._baseRecord = baseRecord
@timed
@inlineCallbacks
def members(self):
augmented = []
records = yield self._baseRecord.members()
for record in records:
augmented.append((yield self.service._augment(record)))
returnValue(augmented)
def addMembers(self, memberRecords):
return self._baseRecord.addMembers(memberRecords)
def removeMembers(self, memberRecords):
return self._baseRecord.removeMembers(memberRecords)
def setMembers(self, memberRecords):
return self._baseRecord.setMembers(memberRecords)
@timed
@inlineCallbacks
def groups(self):
augmented = []
def _groupUIDsFor(txn):
return txn.groupUIDsFor(self.uid)
groupUIDs = yield self.service._store.inTransaction(
"AugmentedDirectoryRecord.groups",
_groupUIDsFor
)
for groupUID in groupUIDs:
groupRecord = yield self.service.recordWithUID(
groupUID
)
if groupRecord:
augmented.append((yield self.service._augment(groupRecord)))
returnValue(augmented)
@timed
def verifyPlaintextPassword(self, password):
return self._baseRecord.verifyPlaintextPassword(password)
@timed
def verifyHTTPDigest(self, *args):
return self._baseRecord.verifyHTTPDigest(*args)
@timed
def accessForRecord(self, record):
return self._baseRecord.accessForRecord(record)<|fim▁end|> | self, *args, **kwds
) |
<|file_name|>args1.rs<|end_file_name|><|fim▁begin|>// args1.rs
use std::env;
<|fim▁hole|>}<|fim▁end|> | fn main() {
let first = env::args().nth(1).expect("please supply an argument");
let n: i32 = first.parse().expect("not an integer!");
// do your magic |
<|file_name|>packing_test.go<|end_file_name|><|fim▁begin|>// Copyright 2020 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package abi
import (
"math/big"
"github.com/matthieu/go-ethereum/common"
)
type packUnpackTest struct {
def string
unpacked interface{}
packed string
}
var packUnpackTests = []packUnpackTest{
// Booleans
{
def: `[{ "type": "bool" }]`,
packed: "0000000000000000000000000000000000000000000000000000000000000001",
unpacked: true,
},
{
def: `[{ "type": "bool" }]`,
packed: "0000000000000000000000000000000000000000000000000000000000000000",
unpacked: false,
},
// Integers
{
def: `[{ "type": "uint8" }]`,
unpacked: uint8(2),
packed: "0000000000000000000000000000000000000000000000000000000000000002",
},
{
def: `[{ "type": "uint8[]" }]`,
unpacked: []uint8{1, 2},
packed: "0000000000000000000000000000000000000000000000000000000000000020" +
"0000000000000000000000000000000000000000000000000000000000000002" +
"0000000000000000000000000000000000000000000000000000000000000001" +
"0000000000000000000000000000000000000000000000000000000000000002",
},
{
def: `[{ "type": "uint16" }]`,
unpacked: uint16(2),
packed: "0000000000000000000000000000000000000000000000000000000000000002",
},
{
def: `[{ "type": "uint16[]" }]`,
unpacked: []uint16{1, 2},
packed: "0000000000000000000000000000000000000000000000000000000000000020" +
"0000000000000000000000000000000000000000000000000000000000000002" +
"0000000000000000000000000000000000000000000000000000000000000001" +
"0000000000000000000000000000000000000000000000000000000000000002",
},
{
def: `[{"type": "uint17"}]`,
packed: "0000000000000000000000000000000000000000000000000000000000000001",
unpacked: big.NewInt(1),
},
{
def: `[{"type": "uint32"}]`,
packed: "0000000000000000000000000000000000000000000000000000000000000001",
unpacked: uint32(1),
},
{
def: `[{"type": "uint32[]"}]`,
unpacked: []uint32{1, 2},
packed: "0000000000000000000000000000000000000000000000000000000000000020" +
"0000000000000000000000000000000000000000000000000000000000000002" +
"0000000000000000000000000000000000000000000000000000000000000001" +
"0000000000000000000000000000000000000000000000000000000000000002",
},
{
def: `[{"type": "uint64"}]`,
unpacked: uint64(2),
packed: "0000000000000000000000000000000000000000000000000000000000000002",
},
{
def: `[{"type": "uint64[]"}]`,
unpacked: []uint64{1, 2},
packed: "0000000000000000000000000000000000000000000000000000000000000020" +
"0000000000000000000000000000000000000000000000000000000000000002" +
"0000000000000000000000000000000000000000000000000000000000000001" +
"0000000000000000000000000000000000000000000000000000000000000002",
},
{
def: `[{"type": "uint256"}]`,
unpacked: big.NewInt(2),
packed: "0000000000000000000000000000000000000000000000000000000000000002",
},
{
def: `[{"type": "uint256[]"}]`,
unpacked: []*big.Int{big.NewInt(1), big.NewInt(2)},
packed: "0000000000000000000000000000000000000000000000000000000000000020" +
"0000000000000000000000000000000000000000000000000000000000000002" +
"0000000000000000000000000000000000000000000000000000000000000001" +
"0000000000000000000000000000000000000000000000000000000000000002",
},
{
def: `[{"type": "int8"}]`,
unpacked: int8(2),
packed: "0000000000000000000000000000000000000000000000000000000000000002",
},
{
def: `[{"type": "int8[]"}]`,
unpacked: []int8{1, 2},
packed: "0000000000000000000000000000000000000000000000000000000000000020" +
"0000000000000000000000000000000000000000000000000000000000000002" +
"0000000000000000000000000000000000000000000000000000000000000001" +
"0000000000000000000000000000000000000000000000000000000000000002",
},
{
def: `[{"type": "int16"}]`,
unpacked: int16(2),
packed: "0000000000000000000000000000000000000000000000000000000000000002",
},
{
def: `[{"type": "int16[]"}]`,
unpacked: []int16{1, 2},
packed: "0000000000000000000000000000000000000000000000000000000000000020" +
"0000000000000000000000000000000000000000000000000000000000000002" +
"0000000000000000000000000000000000000000000000000000000000000001" +
"0000000000000000000000000000000000000000000000000000000000000002",
},
{
def: `[{"type": "int17"}]`,
packed: "0000000000000000000000000000000000000000000000000000000000000001",
unpacked: big.NewInt(1),
},
{
def: `[{"type": "int32"}]`,
unpacked: int32(2),
packed: "0000000000000000000000000000000000000000000000000000000000000002",
},
{
def: `[{"type": "int32"}]`,
packed: "0000000000000000000000000000000000000000000000000000000000000001",
unpacked: int32(1),
},
{
def: `[{"type": "int32[]"}]`,
unpacked: []int32{1, 2},
packed: "0000000000000000000000000000000000000000000000000000000000000020" +
"0000000000000000000000000000000000000000000000000000000000000002" +
"0000000000000000000000000000000000000000000000000000000000000001" +
"0000000000000000000000000000000000000000000000000000000000000002",
},
{
def: `[{"type": "int64"}]`,
unpacked: int64(2),
packed: "0000000000000000000000000000000000000000000000000000000000000002",
},
{
def: `[{"type": "int64[]"}]`,
unpacked: []int64{1, 2},
packed: "0000000000000000000000000000000000000000000000000000000000000020" +
"0000000000000000000000000000000000000000000000000000000000000002" +
"0000000000000000000000000000000000000000000000000000000000000001" +
"0000000000000000000000000000000000000000000000000000000000000002",
},
{
def: `[{"type": "int256"}]`,
unpacked: big.NewInt(2),
packed: "0000000000000000000000000000000000000000000000000000000000000002",
},
{
def: `[{"type": "int256"}]`,
packed: "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff",
unpacked: big.NewInt(-1),
},
{
def: `[{"type": "int256[]"}]`,
unpacked: []*big.Int{big.NewInt(1), big.NewInt(2)},
packed: "0000000000000000000000000000000000000000000000000000000000000020" +
"0000000000000000000000000000000000000000000000000000000000000002" +
"0000000000000000000000000000000000000000000000000000000000000001" +
"0000000000000000000000000000000000000000000000000000000000000002",
},
// Address
{
def: `[{"type": "address"}]`,
packed: "0000000000000000000000000100000000000000000000000000000000000000",
unpacked: common.Address{1},
},
{
def: `[{"type": "address[]"}]`,
unpacked: []common.Address{{1}, {2}},
packed: "0000000000000000000000000000000000000000000000000000000000000020" +
"0000000000000000000000000000000000000000000000000000000000000002" +
"0000000000000000000000000100000000000000000000000000000000000000" +
"0000000000000000000000000200000000000000000000000000000000000000",
},
// Bytes
{
def: `[{"type": "bytes1"}]`,
unpacked: [1]byte{1},
packed: "0100000000000000000000000000000000000000000000000000000000000000",
},
{
def: `[{"type": "bytes2"}]`,
unpacked: [2]byte{1},
packed: "0100000000000000000000000000000000000000000000000000000000000000",
},
{
def: `[{"type": "bytes3"}]`,
unpacked: [3]byte{1},
packed: "0100000000000000000000000000000000000000000000000000000000000000",
},
{
def: `[{"type": "bytes4"}]`,
unpacked: [4]byte{1},
packed: "0100000000000000000000000000000000000000000000000000000000000000",
},
{
def: `[{"type": "bytes5"}]`,
unpacked: [5]byte{1},
packed: "0100000000000000000000000000000000000000000000000000000000000000",
},
{
def: `[{"type": "bytes6"}]`,
unpacked: [6]byte{1},
packed: "0100000000000000000000000000000000000000000000000000000000000000",
},
{
def: `[{"type": "bytes7"}]`,
unpacked: [7]byte{1},
packed: "0100000000000000000000000000000000000000000000000000000000000000",
},
{
def: `[{"type": "bytes8"}]`,
unpacked: [8]byte{1},
packed: "0100000000000000000000000000000000000000000000000000000000000000",
},
{
def: `[{"type": "bytes9"}]`,
unpacked: [9]byte{1},
packed: "0100000000000000000000000000000000000000000000000000000000000000",
},
{
def: `[{"type": "bytes10"}]`,
unpacked: [10]byte{1},
packed: "0100000000000000000000000000000000000000000000000000000000000000",
},
{
def: `[{"type": "bytes11"}]`,
unpacked: [11]byte{1},
packed: "0100000000000000000000000000000000000000000000000000000000000000",
},
{
def: `[{"type": "bytes12"}]`,
unpacked: [12]byte{1},
packed: "0100000000000000000000000000000000000000000000000000000000000000",
},
{
def: `[{"type": "bytes13"}]`,
unpacked: [13]byte{1},
packed: "0100000000000000000000000000000000000000000000000000000000000000",
},
{
def: `[{"type": "bytes14"}]`,
unpacked: [14]byte{1},
packed: "0100000000000000000000000000000000000000000000000000000000000000",
},
{
def: `[{"type": "bytes15"}]`,
unpacked: [15]byte{1},
packed: "0100000000000000000000000000000000000000000000000000000000000000",
},
{
def: `[{"type": "bytes16"}]`,
unpacked: [16]byte{1},
packed: "0100000000000000000000000000000000000000000000000000000000000000",
},
{
def: `[{"type": "bytes17"}]`,
unpacked: [17]byte{1},
packed: "0100000000000000000000000000000000000000000000000000000000000000",
},
{
def: `[{"type": "bytes18"}]`,
unpacked: [18]byte{1},
packed: "0100000000000000000000000000000000000000000000000000000000000000",
},
{
def: `[{"type": "bytes19"}]`,
unpacked: [19]byte{1},
packed: "0100000000000000000000000000000000000000000000000000000000000000",
},
{
def: `[{"type": "bytes20"}]`,
unpacked: [20]byte{1},
packed: "0100000000000000000000000000000000000000000000000000000000000000",
},
{
def: `[{"type": "bytes21"}]`,
unpacked: [21]byte{1},
packed: "0100000000000000000000000000000000000000000000000000000000000000",
},
{
def: `[{"type": "bytes22"}]`,
unpacked: [22]byte{1},
packed: "0100000000000000000000000000000000000000000000000000000000000000",
},
{
def: `[{"type": "bytes23"}]`,
unpacked: [23]byte{1},
packed: "0100000000000000000000000000000000000000000000000000000000000000",
},
{
def: `[{"type": "bytes24"}]`,
unpacked: [24]byte{1},
packed: "0100000000000000000000000000000000000000000000000000000000000000",
},
{
def: `[{"type": "bytes25"}]`,
unpacked: [25]byte{1},
packed: "0100000000000000000000000000000000000000000000000000000000000000",
},
{
def: `[{"type": "bytes26"}]`,
unpacked: [26]byte{1},
packed: "0100000000000000000000000000000000000000000000000000000000000000",
},
{
def: `[{"type": "bytes27"}]`,
unpacked: [27]byte{1},
packed: "0100000000000000000000000000000000000000000000000000000000000000",
},
{
def: `[{"type": "bytes28"}]`,
unpacked: [28]byte{1},
packed: "0100000000000000000000000000000000000000000000000000000000000000",
},
{
def: `[{"type": "bytes29"}]`,
unpacked: [29]byte{1},
packed: "0100000000000000000000000000000000000000000000000000000000000000",
},
{
def: `[{"type": "bytes30"}]`,
unpacked: [30]byte{1},
packed: "0100000000000000000000000000000000000000000000000000000000000000",
},
{
def: `[{"type": "bytes31"}]`,
unpacked: [31]byte{1},
packed: "0100000000000000000000000000000000000000000000000000000000000000",
},
{
def: `[{"type": "bytes32"}]`,
unpacked: [32]byte{1},
packed: "0100000000000000000000000000000000000000000000000000000000000000",
},
{
def: `[{"type": "bytes32"}]`,
packed: "0100000000000000000000000000000000000000000000000000000000000000",
unpacked: [32]byte{1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
},
{
def: `[{"type": "bytes"}]`,
packed: "0000000000000000000000000000000000000000000000000000000000000020" +
"0000000000000000000000000000000000000000000000000000000000000020" +
"0100000000000000000000000000000000000000000000000000000000000000",
unpacked: common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
},
{
def: `[{"type": "bytes32"}]`,
packed: "0100000000000000000000000000000000000000000000000000000000000000",
unpacked: [32]byte{1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
},
// Functions
{
def: `[{"type": "function"}]`,
packed: "0100000000000000000000000000000000000000000000000000000000000000",
unpacked: [24]byte{1},
},
// Slice and Array
{
def: `[{"type": "uint8[]"}]`,
packed: "0000000000000000000000000000000000000000000000000000000000000020" +
"0000000000000000000000000000000000000000000000000000000000000002" +
"0000000000000000000000000000000000000000000000000000000000000001" +
"0000000000000000000000000000000000000000000000000000000000000002",
unpacked: []uint8{1, 2},
},
{
def: `[{"type": "uint8[]"}]`,
packed: "0000000000000000000000000000000000000000000000000000000000000020" +
"0000000000000000000000000000000000000000000000000000000000000000",
unpacked: []uint8{},
},
{
def: `[{"type": "uint256[]"}]`,
packed: "0000000000000000000000000000000000000000000000000000000000000020" +
"0000000000000000000000000000000000000000000000000000000000000000",
unpacked: []*big.Int{},
},
{
def: `[{"type": "uint8[2]"}]`,
packed: "0000000000000000000000000000000000000000000000000000000000000001" +
"0000000000000000000000000000000000000000000000000000000000000002",
unpacked: [2]uint8{1, 2},
},
{
def: `[{"type": "int8[2]"}]`,
packed: "0000000000000000000000000000000000000000000000000000000000000001" +
"0000000000000000000000000000000000000000000000000000000000000002",
unpacked: [2]int8{1, 2},
},
{
def: `[{"type": "int16[]"}]`,
packed: "0000000000000000000000000000000000000000000000000000000000000020" +
"0000000000000000000000000000000000000000000000000000000000000002" +
"0000000000000000000000000000000000000000000000000000000000000001" +
"0000000000000000000000000000000000000000000000000000000000000002",
unpacked: []int16{1, 2},
},
{
def: `[{"type": "int16[2]"}]`,
packed: "0000000000000000000000000000000000000000000000000000000000000001" +
"0000000000000000000000000000000000000000000000000000000000000002",
unpacked: [2]int16{1, 2},
},
{
def: `[{"type": "int32[]"}]`,
packed: "0000000000000000000000000000000000000000000000000000000000000020" +
"0000000000000000000000000000000000000000000000000000000000000002" +
"0000000000000000000000000000000000000000000000000000000000000001" +
"0000000000000000000000000000000000000000000000000000000000000002",
unpacked: []int32{1, 2},
},
{
def: `[{"type": "int32[2]"}]`,
packed: "0000000000000000000000000000000000000000000000000000000000000001" +
"0000000000000000000000000000000000000000000000000000000000000002",
unpacked: [2]int32{1, 2},
},
{
def: `[{"type": "int64[]"}]`,
packed: "0000000000000000000000000000000000000000000000000000000000000020" +
"0000000000000000000000000000000000000000000000000000000000000002" +
"0000000000000000000000000000000000000000000000000000000000000001" +
"0000000000000000000000000000000000000000000000000000000000000002",
unpacked: []int64{1, 2},
},
{
def: `[{"type": "int64[2]"}]`,
packed: "0000000000000000000000000000000000000000000000000000000000000001" +
"0000000000000000000000000000000000000000000000000000000000000002",
unpacked: [2]int64{1, 2},
},
{
def: `[{"type": "int256[]"}]`,
packed: "0000000000000000000000000000000000000000000000000000000000000020" +
"0000000000000000000000000000000000000000000000000000000000000002" +
"0000000000000000000000000000000000000000000000000000000000000001" +
"0000000000000000000000000000000000000000000000000000000000000002",
unpacked: []*big.Int{big.NewInt(1), big.NewInt(2)},
},
{
def: `[{"type": "int256[3]"}]`,
packed: "0000000000000000000000000000000000000000000000000000000000000001" +
"0000000000000000000000000000000000000000000000000000000000000002" +
"0000000000000000000000000000000000000000000000000000000000000003",
unpacked: [3]*big.Int{big.NewInt(1), big.NewInt(2), big.NewInt(3)},
},
// multi dimensional, if these pass, all types that don't require length prefix should pass
{
def: `[{"type": "uint8[][]"}]`,
packed: "0000000000000000000000000000000000000000000000000000000000000020" +
"0000000000000000000000000000000000000000000000000000000000000000",
unpacked: [][]uint8{},
},
{
def: `[{"type": "uint8[][]"}]`,
packed: "0000000000000000000000000000000000000000000000000000000000000020" +
"0000000000000000000000000000000000000000000000000000000000000002" +
"0000000000000000000000000000000000000000000000000000000000000040" +
"00000000000000000000000000000000000000000000000000000000000000a0" +
"0000000000000000000000000000000000000000000000000000000000000002" +
"0000000000000000000000000000000000000000000000000000000000000001" +
"0000000000000000000000000000000000000000000000000000000000000002" +
"0000000000000000000000000000000000000000000000000000000000000002" +
"0000000000000000000000000000000000000000000000000000000000000001" +
"0000000000000000000000000000000000000000000000000000000000000002",
unpacked: [][]uint8{{1, 2}, {1, 2}},
},
{
def: `[{"type": "uint8[][]"}]`,
packed: "0000000000000000000000000000000000000000000000000000000000000020" +
"0000000000000000000000000000000000000000000000000000000000000002" +
"0000000000000000000000000000000000000000000000000000000000000040" +
"00000000000000000000000000000000000000000000000000000000000000a0" +
"0000000000000000000000000000000000000000000000000000000000000002" +
"0000000000000000000000000000000000000000000000000000000000000001" +
"0000000000000000000000000000000000000000000000000000000000000002" +
"0000000000000000000000000000000000000000000000000000000000000003" +
"0000000000000000000000000000000000000000000000000000000000000001" +
"0000000000000000000000000000000000000000000000000000000000000002" +
"0000000000000000000000000000000000000000000000000000000000000003",
unpacked: [][]uint8{{1, 2}, {1, 2, 3}},
},
{
def: `[{"type": "uint8[2][2]"}]`,<|fim▁hole|> unpacked: [2][2]uint8{{1, 2}, {1, 2}},
},
{
def: `[{"type": "uint8[][2]"}]`,
packed: "0000000000000000000000000000000000000000000000000000000000000020" +
"0000000000000000000000000000000000000000000000000000000000000040" +
"0000000000000000000000000000000000000000000000000000000000000060" +
"0000000000000000000000000000000000000000000000000000000000000000" +
"0000000000000000000000000000000000000000000000000000000000000000",
unpacked: [2][]uint8{{}, {}},
},
{
def: `[{"type": "uint8[][2]"}]`,
packed: "0000000000000000000000000000000000000000000000000000000000000020" +
"0000000000000000000000000000000000000000000000000000000000000040" +
"0000000000000000000000000000000000000000000000000000000000000080" +
"0000000000000000000000000000000000000000000000000000000000000001" +
"0000000000000000000000000000000000000000000000000000000000000001" +
"0000000000000000000000000000000000000000000000000000000000000001" +
"0000000000000000000000000000000000000000000000000000000000000001",
unpacked: [2][]uint8{{1}, {1}},
},
{
def: `[{"type": "uint8[2][]"}]`,
packed: "0000000000000000000000000000000000000000000000000000000000000020" +
"0000000000000000000000000000000000000000000000000000000000000000",
unpacked: [][2]uint8{},
},
{
def: `[{"type": "uint8[2][]"}]`,
packed: "0000000000000000000000000000000000000000000000000000000000000020" +
"0000000000000000000000000000000000000000000000000000000000000001" +
"0000000000000000000000000000000000000000000000000000000000000001" +
"0000000000000000000000000000000000000000000000000000000000000002",
unpacked: [][2]uint8{{1, 2}},
},
{
def: `[{"type": "uint8[2][]"}]`,
packed: "0000000000000000000000000000000000000000000000000000000000000020" +
"0000000000000000000000000000000000000000000000000000000000000002" +
"0000000000000000000000000000000000000000000000000000000000000001" +
"0000000000000000000000000000000000000000000000000000000000000002" +
"0000000000000000000000000000000000000000000000000000000000000001" +
"0000000000000000000000000000000000000000000000000000000000000002",
unpacked: [][2]uint8{{1, 2}, {1, 2}},
},
{
def: `[{"type": "uint16[]"}]`,
packed: "0000000000000000000000000000000000000000000000000000000000000020" +
"0000000000000000000000000000000000000000000000000000000000000002" +
"0000000000000000000000000000000000000000000000000000000000000001" +
"0000000000000000000000000000000000000000000000000000000000000002",
unpacked: []uint16{1, 2},
},
{
def: `[{"type": "uint16[2]"}]`,
packed: "0000000000000000000000000000000000000000000000000000000000000001" +
"0000000000000000000000000000000000000000000000000000000000000002",
unpacked: [2]uint16{1, 2},
},
{
def: `[{"type": "uint32[]"}]`,
packed: "0000000000000000000000000000000000000000000000000000000000000020" +
"0000000000000000000000000000000000000000000000000000000000000002" +
"0000000000000000000000000000000000000000000000000000000000000001" +
"0000000000000000000000000000000000000000000000000000000000000002",
unpacked: []uint32{1, 2},
},
{
def: `[{"type": "uint32[2][3][4]"}]`,
unpacked: [4][3][2]uint32{{{1, 2}, {3, 4}, {5, 6}}, {{7, 8}, {9, 10}, {11, 12}}, {{13, 14}, {15, 16}, {17, 18}}, {{19, 20}, {21, 22}, {23, 24}}},
packed: "0000000000000000000000000000000000000000000000000000000000000001" +
"0000000000000000000000000000000000000000000000000000000000000002" +
"0000000000000000000000000000000000000000000000000000000000000003" +
"0000000000000000000000000000000000000000000000000000000000000004" +
"0000000000000000000000000000000000000000000000000000000000000005" +
"0000000000000000000000000000000000000000000000000000000000000006" +
"0000000000000000000000000000000000000000000000000000000000000007" +
"0000000000000000000000000000000000000000000000000000000000000008" +
"0000000000000000000000000000000000000000000000000000000000000009" +
"000000000000000000000000000000000000000000000000000000000000000a" +
"000000000000000000000000000000000000000000000000000000000000000b" +
"000000000000000000000000000000000000000000000000000000000000000c" +
"000000000000000000000000000000000000000000000000000000000000000d" +
"000000000000000000000000000000000000000000000000000000000000000e" +
"000000000000000000000000000000000000000000000000000000000000000f" +
"0000000000000000000000000000000000000000000000000000000000000010" +
"0000000000000000000000000000000000000000000000000000000000000011" +
"0000000000000000000000000000000000000000000000000000000000000012" +
"0000000000000000000000000000000000000000000000000000000000000013" +
"0000000000000000000000000000000000000000000000000000000000000014" +
"0000000000000000000000000000000000000000000000000000000000000015" +
"0000000000000000000000000000000000000000000000000000000000000016" +
"0000000000000000000000000000000000000000000000000000000000000017" +
"0000000000000000000000000000000000000000000000000000000000000018",
},
{
def: `[{"type": "bytes32[]"}]`,
unpacked: []common.Hash{{1}, {2}},
packed: "0000000000000000000000000000000000000000000000000000000000000020" +
"0000000000000000000000000000000000000000000000000000000000000002" +
"0100000000000000000000000000000000000000000000000000000000000000" +
"0200000000000000000000000000000000000000000000000000000000000000",
},
{
def: `[{"type": "uint32[2]"}]`,
packed: "0000000000000000000000000000000000000000000000000000000000000001" +
"0000000000000000000000000000000000000000000000000000000000000002",
unpacked: [2]uint32{1, 2},
},
{
def: `[{"type": "uint64[]"}]`,
packed: "0000000000000000000000000000000000000000000000000000000000000020" +
"0000000000000000000000000000000000000000000000000000000000000002" +
"0000000000000000000000000000000000000000000000000000000000000001" +
"0000000000000000000000000000000000000000000000000000000000000002",
unpacked: []uint64{1, 2},
},
{
def: `[{"type": "uint64[2]"}]`,
packed: "0000000000000000000000000000000000000000000000000000000000000001" +
"0000000000000000000000000000000000000000000000000000000000000002",
unpacked: [2]uint64{1, 2},
},
{
def: `[{"type": "uint256[]"}]`,
packed: "0000000000000000000000000000000000000000000000000000000000000020" +
"0000000000000000000000000000000000000000000000000000000000000002" +
"0000000000000000000000000000000000000000000000000000000000000001" +
"0000000000000000000000000000000000000000000000000000000000000002",
unpacked: []*big.Int{big.NewInt(1), big.NewInt(2)},
},
{
def: `[{"type": "uint256[3]"}]`,
packed: "0000000000000000000000000000000000000000000000000000000000000001" +
"0000000000000000000000000000000000000000000000000000000000000002" +
"0000000000000000000000000000000000000000000000000000000000000003",
unpacked: [3]*big.Int{big.NewInt(1), big.NewInt(2), big.NewInt(3)},
},
{
def: `[{"type": "string[4]"}]`,
packed: "0000000000000000000000000000000000000000000000000000000000000020" +
"0000000000000000000000000000000000000000000000000000000000000080" +
"00000000000000000000000000000000000000000000000000000000000000c0" +
"0000000000000000000000000000000000000000000000000000000000000100" +
"0000000000000000000000000000000000000000000000000000000000000140" +
"0000000000000000000000000000000000000000000000000000000000000005" +
"48656c6c6f000000000000000000000000000000000000000000000000000000" +
"0000000000000000000000000000000000000000000000000000000000000005" +
"576f726c64000000000000000000000000000000000000000000000000000000" +
"000000000000000000000000000000000000000000000000000000000000000b" +
"476f2d657468657265756d000000000000000000000000000000000000000000" +
"0000000000000000000000000000000000000000000000000000000000000008" +
"457468657265756d000000000000000000000000000000000000000000000000",
unpacked: [4]string{"Hello", "World", "Go-ethereum", "Ethereum"},
},
{
def: `[{"type": "string[]"}]`,
packed: "0000000000000000000000000000000000000000000000000000000000000020" +
"0000000000000000000000000000000000000000000000000000000000000002" +
"0000000000000000000000000000000000000000000000000000000000000040" +
"0000000000000000000000000000000000000000000000000000000000000080" +
"0000000000000000000000000000000000000000000000000000000000000008" +
"457468657265756d000000000000000000000000000000000000000000000000" +
"000000000000000000000000000000000000000000000000000000000000000b" +
"676f2d657468657265756d000000000000000000000000000000000000000000",
unpacked: []string{"Ethereum", "go-ethereum"},
},
{
def: `[{"type": "bytes[]"}]`,
packed: "0000000000000000000000000000000000000000000000000000000000000020" +
"0000000000000000000000000000000000000000000000000000000000000002" +
"0000000000000000000000000000000000000000000000000000000000000040" +
"0000000000000000000000000000000000000000000000000000000000000080" +
"0000000000000000000000000000000000000000000000000000000000000003" +
"f0f0f00000000000000000000000000000000000000000000000000000000000" +
"0000000000000000000000000000000000000000000000000000000000000003" +
"f0f0f00000000000000000000000000000000000000000000000000000000000",
unpacked: [][]byte{{0xf0, 0xf0, 0xf0}, {0xf0, 0xf0, 0xf0}},
},
{
def: `[{"type": "uint256[2][][]"}]`,
packed: "0000000000000000000000000000000000000000000000000000000000000020" +
"0000000000000000000000000000000000000000000000000000000000000002" +
"0000000000000000000000000000000000000000000000000000000000000040" +
"00000000000000000000000000000000000000000000000000000000000000e0" +
"0000000000000000000000000000000000000000000000000000000000000002" +
"0000000000000000000000000000000000000000000000000000000000000001" +
"00000000000000000000000000000000000000000000000000000000000000c8" +
"0000000000000000000000000000000000000000000000000000000000000001" +
"00000000000000000000000000000000000000000000000000000000000003e8" +
"0000000000000000000000000000000000000000000000000000000000000002" +
"0000000000000000000000000000000000000000000000000000000000000001" +
"00000000000000000000000000000000000000000000000000000000000000c8" +
"0000000000000000000000000000000000000000000000000000000000000001" +
"00000000000000000000000000000000000000000000000000000000000003e8",
unpacked: [][][2]*big.Int{{{big.NewInt(1), big.NewInt(200)}, {big.NewInt(1), big.NewInt(1000)}}, {{big.NewInt(1), big.NewInt(200)}, {big.NewInt(1), big.NewInt(1000)}}},
},
// struct outputs
{
def: `[{"name":"int1","type":"int256"},{"name":"int2","type":"int256"}]`,
packed: "0000000000000000000000000000000000000000000000000000000000000001" +
"0000000000000000000000000000000000000000000000000000000000000002",
unpacked: struct {
Int1 *big.Int
Int2 *big.Int
}{big.NewInt(1), big.NewInt(2)},
},
{
def: `[{"name":"int_one","type":"int256"}]`,
packed: "0000000000000000000000000000000000000000000000000000000000000001",
unpacked: struct {
IntOne *big.Int
}{big.NewInt(1)},
},
{
def: `[{"name":"int__one","type":"int256"}]`,
packed: "0000000000000000000000000000000000000000000000000000000000000001",
unpacked: struct {
IntOne *big.Int
}{big.NewInt(1)},
},
{
def: `[{"name":"int_one_","type":"int256"}]`,
packed: "0000000000000000000000000000000000000000000000000000000000000001",
unpacked: struct {
IntOne *big.Int
}{big.NewInt(1)},
},
{
def: `[{"name":"int_one","type":"int256"}, {"name":"intone","type":"int256"}]`,
packed: "0000000000000000000000000000000000000000000000000000000000000001" +
"0000000000000000000000000000000000000000000000000000000000000002",
unpacked: struct {
IntOne *big.Int
Intone *big.Int
}{big.NewInt(1), big.NewInt(2)},
},
{
def: `[{"type": "string"}]`,
unpacked: "foobar",
packed: "0000000000000000000000000000000000000000000000000000000000000020" +
"0000000000000000000000000000000000000000000000000000000000000006" +
"666f6f6261720000000000000000000000000000000000000000000000000000",
},
{
def: `[{"type": "string[]"}]`,
unpacked: []string{"hello", "foobar"},
packed: "0000000000000000000000000000000000000000000000000000000000000020" +
"0000000000000000000000000000000000000000000000000000000000000002" + // len(array) = 2
"0000000000000000000000000000000000000000000000000000000000000040" + // offset 64 to i = 0
"0000000000000000000000000000000000000000000000000000000000000080" + // offset 128 to i = 1
"0000000000000000000000000000000000000000000000000000000000000005" + // len(str[0]) = 5
"68656c6c6f000000000000000000000000000000000000000000000000000000" + // str[0]
"0000000000000000000000000000000000000000000000000000000000000006" + // len(str[1]) = 6
"666f6f6261720000000000000000000000000000000000000000000000000000", // str[1]
},
{
def: `[{"type": "string[2]"}]`,
unpacked: [2]string{"hello", "foobar"},
packed: "0000000000000000000000000000000000000000000000000000000000000020" +
"0000000000000000000000000000000000000000000000000000000000000040" + // offset to i = 0
"0000000000000000000000000000000000000000000000000000000000000080" + // offset to i = 1
"0000000000000000000000000000000000000000000000000000000000000005" + // len(str[0]) = 5
"68656c6c6f000000000000000000000000000000000000000000000000000000" + // str[0]
"0000000000000000000000000000000000000000000000000000000000000006" + // len(str[1]) = 6
"666f6f6261720000000000000000000000000000000000000000000000000000", // str[1]
},
{
def: `[{"type": "bytes32[][]"}]`,
unpacked: [][][32]byte{{{1}, {2}}, {{3}, {4}, {5}}},
packed: "0000000000000000000000000000000000000000000000000000000000000020" +
"0000000000000000000000000000000000000000000000000000000000000002" + // len(array) = 2
"0000000000000000000000000000000000000000000000000000000000000040" + // offset 64 to i = 0
"00000000000000000000000000000000000000000000000000000000000000a0" + // offset 160 to i = 1
"0000000000000000000000000000000000000000000000000000000000000002" + // len(array[0]) = 2
"0100000000000000000000000000000000000000000000000000000000000000" + // array[0][0]
"0200000000000000000000000000000000000000000000000000000000000000" + // array[0][1]
"0000000000000000000000000000000000000000000000000000000000000003" + // len(array[1]) = 3
"0300000000000000000000000000000000000000000000000000000000000000" + // array[1][0]
"0400000000000000000000000000000000000000000000000000000000000000" + // array[1][1]
"0500000000000000000000000000000000000000000000000000000000000000", // array[1][2]
},
{
def: `[{"type": "bytes32[][2]"}]`,
unpacked: [2][][32]byte{{{1}, {2}}, {{3}, {4}, {5}}},
packed: "0000000000000000000000000000000000000000000000000000000000000020" +
"0000000000000000000000000000000000000000000000000000000000000040" + // offset 64 to i = 0
"00000000000000000000000000000000000000000000000000000000000000a0" + // offset 160 to i = 1
"0000000000000000000000000000000000000000000000000000000000000002" + // len(array[0]) = 2
"0100000000000000000000000000000000000000000000000000000000000000" + // array[0][0]
"0200000000000000000000000000000000000000000000000000000000000000" + // array[0][1]
"0000000000000000000000000000000000000000000000000000000000000003" + // len(array[1]) = 3
"0300000000000000000000000000000000000000000000000000000000000000" + // array[1][0]
"0400000000000000000000000000000000000000000000000000000000000000" + // array[1][1]
"0500000000000000000000000000000000000000000000000000000000000000", // array[1][2]
},
{
def: `[{"type": "bytes32[3][2]"}]`,
unpacked: [2][3][32]byte{{{1}, {2}, {3}}, {{3}, {4}, {5}}},
packed: "0100000000000000000000000000000000000000000000000000000000000000" + // array[0][0]
"0200000000000000000000000000000000000000000000000000000000000000" + // array[0][1]
"0300000000000000000000000000000000000000000000000000000000000000" + // array[0][2]
"0300000000000000000000000000000000000000000000000000000000000000" + // array[1][0]
"0400000000000000000000000000000000000000000000000000000000000000" + // array[1][1]
"0500000000000000000000000000000000000000000000000000000000000000", // array[1][2]
},
{
// static tuple
def: `[{"name":"a","type":"int64"},
{"name":"b","type":"int256"},
{"name":"c","type":"int256"},
{"name":"d","type":"bool"},
{"name":"e","type":"bytes32[3][2]"}]`,
unpacked: struct {
A int64
B *big.Int
C *big.Int
D bool
E [2][3][32]byte
}{1, big.NewInt(1), big.NewInt(-1), true, [2][3][32]byte{{{1}, {2}, {3}}, {{3}, {4}, {5}}}},
packed: "0000000000000000000000000000000000000000000000000000000000000001" + // struct[a]
"0000000000000000000000000000000000000000000000000000000000000001" + // struct[b]
"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" + // struct[c]
"0000000000000000000000000000000000000000000000000000000000000001" + // struct[d]
"0100000000000000000000000000000000000000000000000000000000000000" + // struct[e] array[0][0]
"0200000000000000000000000000000000000000000000000000000000000000" + // struct[e] array[0][1]
"0300000000000000000000000000000000000000000000000000000000000000" + // struct[e] array[0][2]
"0300000000000000000000000000000000000000000000000000000000000000" + // struct[e] array[1][0]
"0400000000000000000000000000000000000000000000000000000000000000" + // struct[e] array[1][1]
"0500000000000000000000000000000000000000000000000000000000000000", // struct[e] array[1][2]
},
{
def: `[{"name":"a","type":"string"},
{"name":"b","type":"int64"},
{"name":"c","type":"bytes"},
{"name":"d","type":"string[]"},
{"name":"e","type":"int256[]"},
{"name":"f","type":"address[]"}]`,
unpacked: struct {
FieldA string `abi:"a"` // Test whether abi tag works
FieldB int64 `abi:"b"`
C []byte
D []string
E []*big.Int
F []common.Address
}{"foobar", 1, []byte{1}, []string{"foo", "bar"}, []*big.Int{big.NewInt(1), big.NewInt(-1)}, []common.Address{{1}, {2}}},
packed: "00000000000000000000000000000000000000000000000000000000000000c0" + // struct[a] offset
"0000000000000000000000000000000000000000000000000000000000000001" + // struct[b]
"0000000000000000000000000000000000000000000000000000000000000100" + // struct[c] offset
"0000000000000000000000000000000000000000000000000000000000000140" + // struct[d] offset
"0000000000000000000000000000000000000000000000000000000000000220" + // struct[e] offset
"0000000000000000000000000000000000000000000000000000000000000280" + // struct[f] offset
"0000000000000000000000000000000000000000000000000000000000000006" + // struct[a] length
"666f6f6261720000000000000000000000000000000000000000000000000000" + // struct[a] "foobar"
"0000000000000000000000000000000000000000000000000000000000000001" + // struct[c] length
"0100000000000000000000000000000000000000000000000000000000000000" + // []byte{1}
"0000000000000000000000000000000000000000000000000000000000000002" + // struct[d] length
"0000000000000000000000000000000000000000000000000000000000000040" + // foo offset
"0000000000000000000000000000000000000000000000000000000000000080" + // bar offset
"0000000000000000000000000000000000000000000000000000000000000003" + // foo length
"666f6f0000000000000000000000000000000000000000000000000000000000" + // foo
"0000000000000000000000000000000000000000000000000000000000000003" + // bar offset
"6261720000000000000000000000000000000000000000000000000000000000" + // bar
"0000000000000000000000000000000000000000000000000000000000000002" + // struct[e] length
"0000000000000000000000000000000000000000000000000000000000000001" + // 1
"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" + // -1
"0000000000000000000000000000000000000000000000000000000000000002" + // struct[f] length
"0000000000000000000000000100000000000000000000000000000000000000" + // common.Address{1}
"0000000000000000000000000200000000000000000000000000000000000000", // common.Address{2}
},
{
def: `[{"components": [{"name": "a","type": "uint256"},
{"name": "b","type": "uint256[]"}],
"name": "a","type": "tuple"},
{"name": "b","type": "uint256[]"}]`,
unpacked: struct {
A struct {
FieldA *big.Int `abi:"a"`
B []*big.Int
}
B []*big.Int
}{
A: struct {
FieldA *big.Int `abi:"a"` // Test whether abi tag works for nested tuple
B []*big.Int
}{big.NewInt(1), []*big.Int{big.NewInt(1), big.NewInt(2)}},
B: []*big.Int{big.NewInt(1), big.NewInt(2)}},
packed: "0000000000000000000000000000000000000000000000000000000000000040" + // a offset
"00000000000000000000000000000000000000000000000000000000000000e0" + // b offset
"0000000000000000000000000000000000000000000000000000000000000001" + // a.a value
"0000000000000000000000000000000000000000000000000000000000000040" + // a.b offset
"0000000000000000000000000000000000000000000000000000000000000002" + // a.b length
"0000000000000000000000000000000000000000000000000000000000000001" + // a.b[0] value
"0000000000000000000000000000000000000000000000000000000000000002" + // a.b[1] value
"0000000000000000000000000000000000000000000000000000000000000002" + // b length
"0000000000000000000000000000000000000000000000000000000000000001" + // b[0] value
"0000000000000000000000000000000000000000000000000000000000000002", // b[1] value
},
{
def: `[{"components": [{"name": "a","type": "int256"},
{"name": "b","type": "int256[]"}],
"name": "a","type": "tuple[]"}]`,
unpacked: []struct {
A *big.Int
B []*big.Int
}{
{big.NewInt(-1), []*big.Int{big.NewInt(1), big.NewInt(3)}},
{big.NewInt(1), []*big.Int{big.NewInt(2), big.NewInt(-1)}},
},
packed: "0000000000000000000000000000000000000000000000000000000000000020" +
"0000000000000000000000000000000000000000000000000000000000000002" + // tuple length
"0000000000000000000000000000000000000000000000000000000000000040" + // tuple[0] offset
"00000000000000000000000000000000000000000000000000000000000000e0" + // tuple[1] offset
"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" + // tuple[0].A
"0000000000000000000000000000000000000000000000000000000000000040" + // tuple[0].B offset
"0000000000000000000000000000000000000000000000000000000000000002" + // tuple[0].B length
"0000000000000000000000000000000000000000000000000000000000000001" + // tuple[0].B[0] value
"0000000000000000000000000000000000000000000000000000000000000003" + // tuple[0].B[1] value
"0000000000000000000000000000000000000000000000000000000000000001" + // tuple[1].A
"0000000000000000000000000000000000000000000000000000000000000040" + // tuple[1].B offset
"0000000000000000000000000000000000000000000000000000000000000002" + // tuple[1].B length
"0000000000000000000000000000000000000000000000000000000000000002" + // tuple[1].B[0] value
"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", // tuple[1].B[1] value
},
{
def: `[{"components": [{"name": "a","type": "int256"},
{"name": "b","type": "int256"}],
"name": "a","type": "tuple[2]"}]`,
unpacked: [2]struct {
A *big.Int
B *big.Int
}{
{big.NewInt(-1), big.NewInt(1)},
{big.NewInt(1), big.NewInt(-1)},
},
packed: "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" + // tuple[0].a
"0000000000000000000000000000000000000000000000000000000000000001" + // tuple[0].b
"0000000000000000000000000000000000000000000000000000000000000001" + // tuple[1].a
"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", // tuple[1].b
},
{
def: `[{"components": [{"name": "a","type": "int256[]"}],
"name": "a","type": "tuple[2]"}]`,
unpacked: [2]struct {
A []*big.Int
}{
{[]*big.Int{big.NewInt(-1), big.NewInt(1)}},
{[]*big.Int{big.NewInt(1), big.NewInt(-1)}},
},
packed: "0000000000000000000000000000000000000000000000000000000000000020" +
"0000000000000000000000000000000000000000000000000000000000000040" + // tuple[0] offset
"00000000000000000000000000000000000000000000000000000000000000c0" + // tuple[1] offset
"0000000000000000000000000000000000000000000000000000000000000020" + // tuple[0].A offset
"0000000000000000000000000000000000000000000000000000000000000002" + // tuple[0].A length
"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" + // tuple[0].A[0]
"0000000000000000000000000000000000000000000000000000000000000001" + // tuple[0].A[1]
"0000000000000000000000000000000000000000000000000000000000000020" + // tuple[1].A offset
"0000000000000000000000000000000000000000000000000000000000000002" + // tuple[1].A length
"0000000000000000000000000000000000000000000000000000000000000001" + // tuple[1].A[0]
"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", // tuple[1].A[1]
},
}<|fim▁end|> | packed: "0000000000000000000000000000000000000000000000000000000000000001" +
"0000000000000000000000000000000000000000000000000000000000000002" +
"0000000000000000000000000000000000000000000000000000000000000001" +
"0000000000000000000000000000000000000000000000000000000000000002", |
<|file_name|>sub_dataset.py<|end_file_name|><|fim▁begin|>import numpy
import six
from chainer.dataset import dataset_mixin
class SubDataset(dataset_mixin.DatasetMixin):
"""Subset of a base dataset.
SubDataset defines a subset of a given base dataset. The subset is defined
as an interval of indexes, optionally with a given permutation.
If ``order`` is given, then the ``i``-th example of this dataset is the
``order[start + i]``-th example of the base dataset, where ``i`` is a
non-negative integer. If ``order`` is not given, then the ``i``-th example
of this dataset is the ``start + i``-th example of the base dataset.
Negative indexing is also allowed: in this case, the term ``start + i`` is
replaced by ``finish + i``.
SubDataset is often used to split a dataset into training and validation
subsets. The training set is used for training, while the validation set is
used to track the generalization performance, i.e. how the learned model
works well on unseen data. We can tune hyperparameters (e.g. number of
hidden units, weight initializers, learning rate, etc.) by comparing the
validation performance. Note that we often use another set called test set
to measure the quality of the tuned hyperparameter, which can be made by
nesting multiple SubDatasets.
There are two ways to make training-validation splits. One is a single
split, where the dataset is split just into two subsets. It can be done by
:func:`split_dataset` or :func:`split_dataset_random`. The other one is a
:math:`k`-fold cross validation, in which the dataset is divided into
:math:`k` subsets, and :math:`k` different splits are generated using each
of the :math:`k` subsets as a validation set and the rest as a training
set. It can be done by :func:`get_cross_validation_datasets`.
Args:
dataset: Base dataset.
start (int): The first index in the interval.
finish (int): The next-to-the-last index in the interval.
order (sequence of ints): Permutation of indexes in the base dataset.
If this is ``None``, then the ascending order of indexes is used.
"""
def __init__(self, dataset, start, finish, order=None):
if start < 0 or finish > len(dataset):
raise ValueError('subset overruns the base dataset.')
self._dataset = dataset
self._start = start
self._finish = finish
self._size = finish - start
if order is not None and len(order) != len(dataset):
msg = ('order option must have the same length as the base '
'dataset: len(order) = {} while len(dataset) = {}'.format(
len(order), len(dataset)))
raise ValueError(msg)
self._order = order
def __len__(self):
return self._size
def get_example(self, i):
if i >= 0:
if i >= self._size:
raise IndexError('dataset index out of range')
index = self._start + i
else:
if i < -self._size:
raise IndexError('dataset index out of range')
index = self._finish + i
if self._order is not None:
index = self._order[index]
return self._dataset[index]
def split_dataset(dataset, split_at, order=None):
"""Splits a dataset into two subsets.
This function creates two instances of :class:`SubDataset`. These instances
do not share any examples, and they together cover all examples of the
original dataset.
Args:
dataset: Dataset to split.
split_at (int): Position at which the base dataset is split.
order (sequence of ints): Permutation of indexes in the base dataset.
See the document of :class:`SubDataset` for details.
Returns:
tuple: Two :class:`SubDataset` objects. The first subset represents the
examples of indexes ``order[:split_at]`` while the second subset
represents the examples of indexes ``order[split_at:]``.
"""
n_examples = len(dataset)
if split_at < 0:
raise ValueError('split_at must be non-negative')
if split_at >= n_examples:
raise ValueError('split_at exceeds the dataset size')
subset1 = SubDataset(dataset, 0, split_at, order)
subset2 = SubDataset(dataset, split_at, n_examples, order)
return subset1, subset2
def split_dataset_random(dataset, first_size):
"""Splits a dataset into two subsets randomly.
This function creates two instances of :class:`SubDataset`. These instances
do not share any examples, and they together cover all examples of the
original dataset. The split is automatically done randomly.
Args:
dataset: Dataset to split.
first_size (int): Size of the first subset.
Returns:
tuple: Two :class:`SubDataset` objects. The first subset contains
``first_size`` examples randomly chosen from the dataset without
replacement, and the second subset contains the rest of the
dataset.
"""
order = numpy.random.permutation(len(dataset))
return split_dataset(dataset, first_size, order)
def get_cross_validation_datasets(dataset, n_fold, order=None):
"""Creates a set of training/test splits for cross validation.
This function generates ``n_fold`` splits of the given dataset. The first
part of each split corresponds to the training dataset, while the second
part to the test dataset. No pairs of test datasets share any examples, and
all test datasets together cover the whole base dataset. Each test dataset
contains almost same number of examples (the numbers may differ up to 1).
Args:
dataset: Dataset to split.
n_fold (int): Number of splits for cross validation.
order (sequence of ints): Order of indexes with which each split is
determined. If it is ``None``, then no permutation is used.
Returns:
list of tuples: List of dataset splits.
"""
if order is None:
order = numpy.arange(len(dataset))
else:
order = numpy.array(order) # copy
whole_size = len(dataset)
borders = [whole_size * i // n_fold for i in six.moves.range(n_fold + 1)]
test_sizes = [borders[i + 1] - borders[i] for i in six.moves.range(n_fold)]
splits = []
for test_size in reversed(test_sizes):
size = whole_size - test_size
splits.append(split_dataset(dataset, size, order))
new_order = numpy.empty_like(order)
new_order[:test_size] = order[-test_size:]
new_order[test_size:] = order[:-test_size]
order = new_order<|fim▁hole|>
def get_cross_validation_datasets_random(dataset, n_fold):
"""Creates a set of training/test splits for cross validation randomly.
This function acts almost same as :func:`get_cross_validation_dataset`,
except automatically generating random permutation.
Args:
dataset: Dataset to split.
n_fold (int): Number of splits for cross validation.
Returns:
list of tuples: List of dataset splits.
"""
order = numpy.random.permutation(len(dataset))
return get_cross_validation_datasets(dataset, n_fold, order)<|fim▁end|> |
return splits |
<|file_name|>connector_test.go<|end_file_name|><|fim▁begin|>/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package siesta
import (
"fmt"
"math/rand"
"net"
"os"
"testing"
"time"
)
var ci bool = os.Getenv("TRAVIS_CI") != ""
var brokerUp bool = true
var brokerAddr string = "localhost:9092"
func init() {
conn, err := net.Dial("tcp", brokerAddr)
if err == nil {
brokerUp = true
conn.Close()
}
}
func TestDefaultConnectorFunctional(t *testing.T) {
if !brokerUp && !ci {
t.Skip("Broker is not running. Please spin up the broker at localhost:9092 for this test to work.")
}
numMessages := 1000
topicName := fmt.Sprintf("siesta-%d", time.Now().Unix())
connector := testConnector(t)
testTopicMetadata(t, topicName, connector)
testOffsetStorage(t, topicName, connector)
testProduce(t, topicName, numMessages, connector)
testConsume(t, topicName, numMessages, connector)
closeWithin(t, time.Second, connector)
//check whether closing multiple times hangs
closeWithin(t, time.Second, connector)
anotherConnector := testConnector(t)
//should also work fine - must get topic metadata before consuming
testConsume(t, topicName, numMessages, anotherConnector)
closeWithin(t, time.Second, anotherConnector)
}
func testTopicMetadata(t *testing.T, topicName string, connector *DefaultConnector) {
metadata, err := connector.GetTopicMetadata([]string{topicName})
assertFatal(t, err, nil)
assertNot(t, len(metadata.Brokers), 0)<|fim▁hole|>
broker := metadata.Brokers[0]
assert(t, broker.NodeId, int32(0))
if ci {
// this can be asserted on Travis only as we are guaranteed to advertise the broker as localhost
assert(t, broker.Host, "localhost")
}
assert(t, broker.Port, int32(9092))
topicMetadata := findTopicMetadata(t, metadata.TopicMetadata, topicName)
assert(t, topicMetadata.Error, NoError)
assert(t, topicMetadata.TopicName, topicName)
assertFatal(t, len(topicMetadata.PartitionMetadata), 1)
partitionMetadata := topicMetadata.PartitionMetadata[0]
assert(t, partitionMetadata.Error, NoError)
assert(t, partitionMetadata.Isr, []int32{0})
assert(t, partitionMetadata.Leader, int32(0))
assert(t, partitionMetadata.PartitionId, int32(0))
assert(t, partitionMetadata.Replicas, []int32{0})
}
func testOffsetStorage(t *testing.T, topicName string, connector *DefaultConnector) {
group := fmt.Sprintf("test-%d", time.Now().Unix())
targetOffset := rand.Int63()
offset, err := connector.GetOffset(group, topicName, 0)
assertFatal(t, err, UnknownTopicOrPartition)
assert(t, offset, int64(-1))
err = connector.CommitOffset(group, topicName, 0, targetOffset)
assertFatal(t, err, nil)
offset, err = connector.GetOffset(group, topicName, 0)
assertFatal(t, err, nil)
assert(t, offset, targetOffset)
}
func testProduce(t *testing.T, topicName string, numMessages int, connector *DefaultConnector) {
produceRequest := new(ProduceRequest)
produceRequest.Timeout = 1000
produceRequest.RequiredAcks = 1
for i := 0; i < numMessages; i++ {
produceRequest.AddMessage(topicName, 0, &MessageData{
Key: []byte(fmt.Sprintf("%d", numMessages-i)),
Value: []byte(fmt.Sprintf("%d", i)),
})
}
leader, err := connector.tryGetLeader(topicName, 0, 3)
assert(t, err, nil)
assertNot(t, leader, (*brokerLink)(nil))
bytes, err := connector.syncSendAndReceive(leader, produceRequest)
assertFatal(t, err, nil)
produceResponse := new(ProduceResponse)
decodingErr := connector.decode(bytes, produceResponse)
assertFatal(t, decodingErr, (*DecodingError)(nil))
topicBlock, exists := produceResponse.Blocks[topicName]
assertFatal(t, exists, true)
partitionBlock, exists := topicBlock[int32(0)]
assertFatal(t, exists, true)
assert(t, partitionBlock.Error, NoError)
assert(t, partitionBlock.Offset, int64(0))
}
func testConsume(t *testing.T, topicName string, numMessages int, connector *DefaultConnector) {
response, err := connector.Fetch(topicName, 0, 0)
assertFatal(t, err, nil)
messages, err := response.GetMessages()
assertFatal(t, err, nil)
assertFatal(t, len(messages), numMessages)
for i := 0; i < numMessages; i++ {
message := messages[i]
assert(t, message.Topic, topicName)
assert(t, message.Partition, int32(0))
assert(t, message.Offset, int64(i))
assert(t, message.Key, []byte(fmt.Sprintf("%d", numMessages-i)))
assert(t, message.Value, []byte(fmt.Sprintf("%d", i)))
}
}
func findTopicMetadata(t *testing.T, metadata []*TopicMetadata, topic string) *TopicMetadata {
for _, topicMetadata := range metadata {
if topicMetadata.TopicName == topic {
return topicMetadata
}
}
t.Fatalf("TopicMetadata for topic %s not found", topic)
return nil
}<|fim▁end|> | assertNot(t, len(metadata.TopicMetadata), 0)
if len(metadata.Brokers) > 1 {
t.Skip("Cluster should consist only of one broker for this test to run.")
} |
<|file_name|>clamav.py<|end_file_name|><|fim▁begin|># This file is part of Viper - https://github.com/botherder/viper
# See the file 'LICENSE' for copying permission.
import getopt
try:
import pyclamd
HAVE_CLAMD = True
except ImportError:
HAVE_CLAMD = False
from viper.common.out import *
from viper.common.abstracts import Module
from viper.core.session import __sessions__
class ClamAV(Module):
cmd = 'clamav'
description = 'Scan file from local ClamAV daemon'
authors = ['neriberto']
def run(self):
def usage():
self.log('', "usage: clamav [-h] [-s]")
def help():
usage()
self.log('', "")
self.log('', "Options:")
self.log('', "\t--help (-h)\tShow this help message")
self.log('', "\t--socket(-s)\tSpecify an unix socket (default: Clamd Unix Socket)")
self.log('', "")
if not HAVE_CLAMD:
self.log('error', "Missing dependency, install requests (`pip install pyclamd`)")
return
try:
opts, argv = getopt.getopt(self.args, 'hs:', ['help', 'socket='])
except getopt.GetoptError as e:
self.log('', e)
usage()
return
daemon = None
result = None
socket = None
for opt, value in opts:
if opt in ('-h', '--help'):
help()
return
elif opt in ('-s', '--socket'):
self.log('info', "Using socket {0} to connect to ClamAV daemon".format(value))
socket = value
try:
daemon = pyclamd.ClamdUnixSocket(socket)
except Exception as e:
self.log('error', "Daemon connection failure, {0}".format(e))
return
if not __sessions__.is_set():
self.log('error', "No session opened")
return
try:
if not daemon:
daemon = pyclamd.ClamdUnixSocket()
socket = 'Clamav'
except Exception as e:
self.log('error', "Daemon connection failure, {0}".format(e))
return
try:
if daemon.ping():
results = daemon.scan_file(__sessions__.current.file.path)
else:
self.log('error', "Unable to connect to the daemon")
except Exception as e:
self.log('error', "Unable to scan with antivirus daemon, {0}".format(e))<|fim▁hole|>
if results:
for item in results:
found = results[item][0]
name = results[item][1]
if found == 'ERROR':
self.log('error', "Check permissions of the binary folder, {0}".format(name))
else:
self.log('info', "Daemon {0} returns: {1}".format(socket, name))<|fim▁end|> | return
found = None
name = 'not found' |
<|file_name|>transfer.rs<|end_file_name|><|fim▁begin|>use params::{List, Metadata, Timestamp};
use resources::Currency;
/// The resource representing a Stripe transfer reversal.
///
/// For more details see https://stripe.com/docs/api#transfer_reversal_object.
#[derive(Debug, Deserialize, Serialize)]
pub struct TransferReversal {
pub id: String,
pub object: String,
pub amount: u64,
pub balance_transaction: String,
pub created: Timestamp,
pub currency: Currency,
pub metadata: Metadata,
pub transfer: String,
}
/// The resource representing a Stripe transfer.
///<|fim▁hole|> pub object: String,
pub amount: u64,
pub amount_reversed: u64,
pub balance_transaction: String,
pub created: Timestamp,
pub currency: Currency,
pub description: Option<String>,
pub destination: String,
pub destination_payment: String,
pub livemode: bool,
pub metadata: Metadata,
pub reversals: List<TransferReversal>,
pub reversed: bool,
pub source_transaction: String,
pub source_type: String,
pub transfer_group: String,
}<|fim▁end|> | /// For more details see https://stripe.com/docs/api#transfer_object.
#[derive(Debug, Deserialize, Serialize)]
pub struct Transfer {
pub id: String, |
<|file_name|>token_store_test.go<|end_file_name|><|fim▁begin|>package vault
import (
"context"
"encoding/json"
"fmt"
"path"
"reflect"
"sort"
"strings"
"sync"
"sync/atomic"
"testing"
"time"
hclog "github.com/hashicorp/go-hclog"
"github.com/hashicorp/go-uuid"
"github.com/hashicorp/vault/helper/locksutil"
"github.com/hashicorp/vault/logical"
)
type TokenEntryOld struct {
ID string
Accessor string
Parent string
Policies []string
Path string
Meta map[string]string
DisplayName string
NumUses int
CreationTime int64
TTL time.Duration
ExplicitMaxTTL time.Duration
Role string
Period time.Duration
}
func TestTokenStore_TokenEntryUpgrade(t *testing.T) {
var err error
_, ts, _, _ := TestCoreWithTokenStore(t)
// Use a struct that does not have struct tags to store the items and
// check if the lookup code handles them properly while reading back
entry := &TokenEntryOld{
DisplayName: "test-display-name",
Path: "test",
Policies: []string{"dev", "ops"},
CreationTime: time.Now().Unix(),
ExplicitMaxTTL: 100,
NumUses: 10,
}
entry.ID, err = uuid.GenerateUUID()
if err != nil {
t.Fatal(err)
}
enc, err := json.Marshal(entry)
if err != nil {
t.Fatal(err)
}
saltedID, err := ts.SaltID(context.Background(), entry.ID)
if err != nil {
t.Fatal(err)
}
path := lookupPrefix + saltedID
le := &logical.StorageEntry{
Key: path,
Value: enc,
}
if err := ts.view.Put(context.Background(), le); err != nil {
t.Fatal(err)
}
out, err := ts.Lookup(context.Background(), entry.ID)
if err != nil {
t.Fatalf("err: %s", err)
}
if out.DisplayName != "test-display-name" {
t.Fatalf("bad: display_name: expected: test-display-name, actual: %s", out.DisplayName)
}
if out.CreationTime == 0 {
t.Fatal("bad: expected a non-zero creation time")
}
if out.ExplicitMaxTTL != 100 {
t.Fatalf("bad: explicit_max_ttl: expected: 100, actual: %d", out.ExplicitMaxTTL)
}
if out.NumUses != 10 {
t.Fatalf("bad: num_uses: expected: 10, actual: %d", out.NumUses)
}
// Test the default case to ensure there are no regressions
ent := &TokenEntry{
DisplayName: "test-display-name",
Path: "test",
Policies: []string{"dev", "ops"},
CreationTime: time.Now().Unix(),
ExplicitMaxTTL: 100,
NumUses: 10,
}
if err := ts.create(context.Background(), ent); err != nil {
t.Fatalf("err: %s", err)
}
out, err = ts.Lookup(context.Background(), ent.ID)
if err != nil {
t.Fatalf("err: %s", err)
}
if out.DisplayName != "test-display-name" {
t.Fatalf("bad: display_name: expected: test-display-name, actual: %s", out.DisplayName)
}
if out.CreationTime == 0 {
t.Fatal("bad: expected a non-zero creation time")
}
if out.ExplicitMaxTTL != 100 {
t.Fatalf("bad: explicit_max_ttl: expected: 100, actual: %d", out.ExplicitMaxTTL)
}
if out.NumUses != 10 {
t.Fatalf("bad: num_uses: expected: 10, actual: %d", out.NumUses)
}
// Fill in the deprecated fields and read out from proper fields
ent = &TokenEntry{
Path: "test",
Policies: []string{"dev", "ops"},
DisplayNameDeprecated: "test-display-name",
CreationTimeDeprecated: time.Now().Unix(),
ExplicitMaxTTLDeprecated: 100,
NumUsesDeprecated: 10,
}
if err := ts.create(context.Background(), ent); err != nil {
t.Fatalf("err: %s", err)
}
out, err = ts.Lookup(context.Background(), ent.ID)
if err != nil {
t.Fatalf("err: %s", err)
}
if out.DisplayName != "test-display-name" {
t.Fatalf("bad: display_name: expected: test-display-name, actual: %s", out.DisplayName)
}
if out.CreationTime == 0 {
t.Fatal("bad: expected a non-zero creation time")
}
if out.ExplicitMaxTTL != 100 {
t.Fatalf("bad: explicit_max_ttl: expected: 100, actual: %d", out.ExplicitMaxTTL)
}
if out.NumUses != 10 {
t.Fatalf("bad: num_uses: expected: 10, actual: %d", out.NumUses)
}
// Check if NumUses picks up a lower value
ent = &TokenEntry{
Path: "test",
NumUses: 5,
NumUsesDeprecated: 10,
}
if err := ts.create(context.Background(), ent); err != nil {
t.Fatalf("err: %s", err)
}
out, err = ts.Lookup(context.Background(), ent.ID)
if err != nil {
t.Fatalf("err: %s", err)
}
if out.NumUses != 5 {
t.Fatalf("bad: num_uses: expected: 5, actual: %d", out.NumUses)
}
// Switch the values from deprecated and proper field and check if the
// lower value is still getting picked up
ent = &TokenEntry{
Path: "test",
NumUses: 10,
NumUsesDeprecated: 5,
}
if err := ts.create(context.Background(), ent); err != nil {
t.Fatalf("err: %s", err)
}
out, err = ts.Lookup(context.Background(), ent.ID)
if err != nil {
t.Fatalf("err: %s", err)
}
if out.NumUses != 5 {
t.Fatalf("bad: num_uses: expected: 5, actual: %d", out.NumUses)
}
}
func getBackendConfig(c *Core) *logical.BackendConfig {
return &logical.BackendConfig{
Logger: c.logger,
System: logical.StaticSystemView{
DefaultLeaseTTLVal: time.Hour * 24,
MaxLeaseTTLVal: time.Hour * 24 * 32,
},
}
}
func testMakeToken(t *testing.T, ts *TokenStore, root, client, ttl string, policy []string) {
req := logical.TestRequest(t, logical.UpdateOperation, "create")
req.ClientToken = root
req.Data["id"] = client
req.Data["policies"] = policy
req.Data["ttl"] = ttl
resp, err := ts.HandleRequest(context.Background(), req)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err: %v\nresp: %#v", err, resp)
}
if resp.Auth.ClientToken != client {
t.Fatalf("bad: %#v", resp)
}
}
func testCoreMakeToken(t *testing.T, c *Core, root, client, ttl string, policy []string) {
req := logical.TestRequest(t, logical.UpdateOperation, "auth/token/create")
req.ClientToken = root
req.Data["id"] = client
req.Data["policies"] = policy
req.Data["ttl"] = ttl
resp, err := c.HandleRequest(req)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err: %v\nresp: %#v", err, resp)
}
if resp.Auth.ClientToken != client {
t.Fatalf("bad: %#v", *resp)
}
}
func TestTokenStore_AccessorIndex(t *testing.T) {
_, ts, _, _ := TestCoreWithTokenStore(t)
ent := &TokenEntry{Path: "test", Policies: []string{"dev", "ops"}}
if err := ts.create(context.Background(), ent); err != nil {
t.Fatalf("err: %s", err)
}
out, err := ts.Lookup(context.Background(), ent.ID)
if err != nil {
t.Fatalf("err: %s", err)
}
// Ensure that accessor is created
if out == nil || out.Accessor == "" {
t.Fatalf("bad: %#v", out)
}
aEntry, err := ts.lookupByAccessor(context.Background(), out.Accessor, false)
if err != nil {
t.Fatalf("err: %s", err)
}
// Verify that the value returned from the index matches the token ID
if aEntry.TokenID != ent.ID {
t.Fatalf("bad: got\n%s\nexpected\n%s\n", aEntry.TokenID, ent.ID)
}
}
func TestTokenStore_HandleRequest_LookupAccessor(t *testing.T) {
_, ts, _, root := TestCoreWithTokenStore(t)
testMakeToken(t, ts, root, "tokenid", "", []string{"foo"})
out, err := ts.Lookup(context.Background(), "tokenid")
if err != nil {
t.Fatalf("err: %s", err)
}
if out == nil {
t.Fatalf("err: %s", err)
}
req := logical.TestRequest(t, logical.UpdateOperation, "lookup-accessor")
req.Data = map[string]interface{}{
"accessor": out.Accessor,
}
resp, err := ts.HandleRequest(context.Background(), req)
if err != nil {
t.Fatalf("err: %s", err)
}
if resp.Data == nil {
t.Fatalf("response should contain data")
}
if resp.Data["accessor"].(string) == "" {
t.Fatalf("accessor should not be empty")
}
// Verify that the lookup-accessor operation does not return the token ID
if resp.Data["id"].(string) != "" {
t.Fatalf("token ID should not be returned")
}
}
func TestTokenStore_HandleRequest_ListAccessors(t *testing.T) {
_, ts, _, root := TestCoreWithTokenStore(t)
testKeys := []string{"token1", "token2", "token3", "token4"}
for _, key := range testKeys {
testMakeToken(t, ts, root, key, "", []string{"foo"})
}
// Revoke root to make the number of accessors match
salted, err := ts.SaltID(context.Background(), root)
if err != nil {
t.Fatal(err)
}
ts.revokeSalted(context.Background(), salted)
req := logical.TestRequest(t, logical.ListOperation, "accessors/")
resp, err := ts.HandleRequest(context.Background(), req)
if err != nil {
t.Fatalf("err: %s", err)
}
if resp.Data == nil {
t.Fatalf("response should contain data")
}
if resp.Data["keys"] == nil {
t.Fatalf("keys should not be empty")
}
keys := resp.Data["keys"].([]string)
if len(keys) != len(testKeys) {
t.Fatalf("wrong number of accessors found")
}
if len(resp.Warnings) != 0 {
t.Fatalf("got warnings:\n%#v", resp.Warnings)
}
// Test upgrade from old struct method of accessor storage (of token id)
for _, accessor := range keys {
aEntry, err := ts.lookupByAccessor(context.Background(), accessor, false)
if err != nil {
t.Fatal(err)
}
if aEntry.TokenID == "" || aEntry.AccessorID == "" {
t.Fatalf("error, accessor entry looked up is empty, but no error thrown")
}
salted, err := ts.SaltID(context.Background(), accessor)
if err != nil {
t.Fatal(err)
}
path := accessorPrefix + salted
le := &logical.StorageEntry{Key: path, Value: []byte(aEntry.TokenID)}
if err := ts.view.Put(context.Background(), le); err != nil {
t.Fatalf("failed to persist accessor index entry: %v", err)
}
}
// Do the lookup again, should get same result
resp, err = ts.HandleRequest(context.Background(), req)
if err != nil {
t.Fatalf("err: %s", err)
}
if resp.Data == nil {
t.Fatalf("response should contain data")
}
if resp.Data["keys"] == nil {
t.Fatalf("keys should not be empty")
}
keys2 := resp.Data["keys"].([]string)
if len(keys) != len(testKeys) {
t.Fatalf("wrong number of accessors found")
}
if len(resp.Warnings) != 0 {
t.Fatalf("got warnings:\n%#v", resp.Warnings)
}
for _, accessor := range keys2 {
aEntry, err := ts.lookupByAccessor(context.Background(), accessor, false)
if err != nil {
t.Fatal(err)
}
if aEntry.TokenID == "" || aEntry.AccessorID == "" {
t.Fatalf("error, accessor entry looked up is empty, but no error thrown")
}
}
}
func TestTokenStore_HandleRequest_RevokeAccessor(t *testing.T) {
_, ts, _, root := TestCoreWithTokenStore(t)
testMakeToken(t, ts, root, "tokenid", "", []string{"foo"})
out, err := ts.Lookup(context.Background(), "tokenid")
if err != nil {
t.Fatalf("err: %s", err)
}
if out == nil {
t.Fatalf("err: %s", err)
}
req := logical.TestRequest(t, logical.UpdateOperation, "revoke-accessor")
req.Data = map[string]interface{}{
"accessor": out.Accessor,
}
_, err = ts.HandleRequest(context.Background(), req)
if err != nil {
t.Fatalf("err: %s", err)
}
out, err = ts.Lookup(context.Background(), "tokenid")
if err != nil {
t.Fatalf("err: %s", err)
}
if out != nil {
t.Fatalf("bad:\ngot %#v\nexpected: nil\n", out)
}
}
func TestTokenStore_RootToken(t *testing.T) {
_, ts, _, _ := TestCoreWithTokenStore(t)
te, err := ts.rootToken(context.Background())
if err != nil {
t.Fatalf("err: %v", err)
}
if te.ID == "" {
t.Fatalf("missing ID")
}
out, err := ts.Lookup(context.Background(), te.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
if !reflect.DeepEqual(out, te) {
t.Fatalf("bad: expected:%#v\nactual:%#v", te, out)
}
}
func TestTokenStore_CreateLookup(t *testing.T) {
c, ts, _, _ := TestCoreWithTokenStore(t)
ent := &TokenEntry{Path: "test", Policies: []string{"dev", "ops"}}
if err := ts.create(context.Background(), ent); err != nil {
t.Fatalf("err: %v", err)
}
if ent.ID == "" {
t.Fatalf("missing ID")
}
out, err := ts.Lookup(context.Background(), ent.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
if !reflect.DeepEqual(out, ent) {
t.Fatalf("bad: expected:%#v\nactual:%#v", ent, out)
}
// New store should share the salt
ts2, err := NewTokenStore(context.Background(), hclog.New(&hclog.LoggerOptions{}), c, getBackendConfig(c))
if err != nil {
t.Fatalf("err: %v", err)
}
ts2.SetExpirationManager(c.expiration)
// Should still match
out, err = ts2.Lookup(context.Background(), ent.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
if !reflect.DeepEqual(out, ent) {
t.Fatalf("bad: expected:%#v\nactual:%#v", ent, out)
}
}
func TestTokenStore_CreateLookup_ProvidedID(t *testing.T) {
c, ts, _, _ := TestCoreWithTokenStore(t)
ent := &TokenEntry{
ID: "foobarbaz",
Path: "test",
Policies: []string{"dev", "ops"},
}
if err := ts.create(context.Background(), ent); err != nil {
t.Fatalf("err: %v", err)
}
if ent.ID != "foobarbaz" {
t.Fatalf("bad: ent.ID: expected:\"foobarbaz\"\n actual:%s", ent.ID)
}
if err := ts.create(context.Background(), ent); err == nil {
t.Fatal("expected error creating token with the same ID")
}
out, err := ts.Lookup(context.Background(), ent.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
if !reflect.DeepEqual(out, ent) {
t.Fatalf("bad: expected:%#v\nactual:%#v", ent, out)
}
// New store should share the salt
ts2, err := NewTokenStore(context.Background(), hclog.New(&hclog.LoggerOptions{}), c, getBackendConfig(c))
if err != nil {
t.Fatalf("err: %v", err)
}
ts2.SetExpirationManager(c.expiration)
// Should still match
out, err = ts2.Lookup(context.Background(), ent.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
if !reflect.DeepEqual(out, ent) {
t.Fatalf("bad: expected:%#v\nactual:%#v", ent, out)
}
}
func TestTokenStore_CreateLookup_ExpirationInRestoreMode(t *testing.T) {
_, ts, _, _ := TestCoreWithTokenStore(t)
ent := &TokenEntry{Path: "test", Policies: []string{"dev", "ops"}}
if err := ts.create(context.Background(), ent); err != nil {
t.Fatalf("err: %v", err)
}
if ent.ID == "" {
t.Fatalf("missing ID")
}
// Replace the lease with a lease with an expire time in the past
saltedID, err := ts.SaltID(context.Background(), ent.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
// Create a lease entry
leaseID := path.Join(ent.Path, saltedID)
le := &leaseEntry{
LeaseID: leaseID,
ClientToken: ent.ID,
Path: ent.Path,
IssueTime: time.Now(),
ExpireTime: time.Now().Add(1 * time.Hour),
}
if err := ts.expiration.persistEntry(le); err != nil {
t.Fatalf("err: %v", err)
}
out, err := ts.Lookup(context.Background(), ent.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
if !reflect.DeepEqual(out, ent) {
t.Fatalf("bad: expected:%#v\nactual:%#v", ent, out)
}
// Set to expired lease time
le.ExpireTime = time.Now().Add(-1 * time.Hour)
if err := ts.expiration.persistEntry(le); err != nil {
t.Fatalf("err: %v", err)
}
err = ts.expiration.Stop()
if err != nil {
t.Fatal(err)
}
// Reset expiration manager to restore mode
ts.expiration.restoreModeLock.Lock()
atomic.StoreInt32(&ts.expiration.restoreMode, 1)
ts.expiration.restoreLocks = locksutil.CreateLocks()
ts.expiration.restoreModeLock.Unlock()
// Test that the token lookup does not return the token entry due to the
// expired lease
out, err = ts.Lookup(context.Background(), ent.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
if out != nil {
t.Fatalf("lease expired, no token expected: %#v", out)
}
}
func TestTokenStore_UseToken(t *testing.T) {
_, ts, _, root := TestCoreWithTokenStore(t)
// Lookup the root token
ent, err := ts.Lookup(context.Background(), root)
if err != nil {
t.Fatalf("err: %v", err)
}
// Root is an unlimited use token, should be a no-op
te, err := ts.UseToken(context.Background(), ent)
if err != nil {
t.Fatalf("err: %v", err)
}
if te == nil {
t.Fatalf("token entry after use was nil")
}
// Lookup the root token again
ent2, err := ts.Lookup(context.Background(), root)
if err != nil {
t.Fatalf("err: %v", err)
}
if !reflect.DeepEqual(ent, ent2) {
t.Fatalf("bad: ent:%#v ent2:%#v", ent, ent2)
}
// Create a restricted token
ent = &TokenEntry{Path: "test", Policies: []string{"dev", "ops"}, NumUses: 2}
if err := ts.create(context.Background(), ent); err != nil {
t.Fatalf("err: %v", err)
}
// Use the token
te, err = ts.UseToken(context.Background(), ent)
if err != nil {
t.Fatalf("err: %v", err)
}
if te == nil {
t.Fatalf("token entry for use #1 was nil")
}
// Lookup the token
ent2, err = ts.Lookup(context.Background(), ent.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
// Should be reduced
if ent2.NumUses != 1 {
t.Fatalf("bad: %#v", ent2)
}
// Use the token
te, err = ts.UseToken(context.Background(), ent)
if err != nil {
t.Fatalf("err: %v", err)
}
if te == nil {
t.Fatalf("token entry for use #2 was nil")
}
if te.NumUses != tokenRevocationDeferred {
t.Fatalf("token entry after use #2 did not have revoke flag")
}
ts.Revoke(context.Background(), te.ID)
// Lookup the token
ent2, err = ts.Lookup(context.Background(), ent.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
// Should be revoked
if ent2 != nil {
t.Fatalf("bad: %#v", ent2)
}
}
func TestTokenStore_Revoke(t *testing.T) {
_, ts, _, _ := TestCoreWithTokenStore(t)
ent := &TokenEntry{Path: "test", Policies: []string{"dev", "ops"}}
if err := ts.create(context.Background(), ent); err != nil {
t.Fatalf("err: %v", err)
}
err := ts.Revoke(context.Background(), "")
if err.Error() != "cannot revoke blank token" {
t.Fatalf("err: %v", err)
}
err = ts.Revoke(context.Background(), ent.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
out, err := ts.Lookup(context.Background(), ent.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
if out != nil {
t.Fatalf("bad: %#v", out)
}
}
func TestTokenStore_Revoke_Leases(t *testing.T) {
c, ts, _, _ := TestCoreWithTokenStore(t)
view := NewBarrierView(c.barrier, "noop/")
// Mount a noop backend
noop := &NoopBackend{}
err := ts.expiration.router.Mount(noop, "noop/", &MountEntry{UUID: "noopuuid", Accessor: "noopaccessor"}, view)
if err != nil {
t.Fatal(err)
}
ent := &TokenEntry{Path: "test", Policies: []string{"dev", "ops"}}
if err := ts.create(context.Background(), ent); err != nil {
t.Fatalf("err: %v", err)
}
// Register a lease
req := &logical.Request{
Operation: logical.ReadOperation,
Path: "noop/foo",
ClientToken: ent.ID,
}
resp := &logical.Response{
Secret: &logical.Secret{
LeaseOptions: logical.LeaseOptions{
TTL: 20 * time.Millisecond,
},
},
Data: map[string]interface{}{
"access_key": "xyz",
"secret_key": "abcd",
},
}
leaseID, err := ts.expiration.Register(req, resp)
if err != nil {
t.Fatalf("err: %v", err)
}
// Revoke the token
err = ts.Revoke(context.Background(), ent.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
// Verify the lease is gone
out, err := ts.expiration.loadEntry(leaseID)
if err != nil {
t.Fatalf("err: %v", err)
}
if out != nil {
t.Fatalf("bad: %#v", out)
}
}
func TestTokenStore_Revoke_Orphan(t *testing.T) {
_, ts, _, _ := TestCoreWithTokenStore(t)
ent := &TokenEntry{Path: "test", Policies: []string{"dev", "ops"}}
if err := ts.create(context.Background(), ent); err != nil {
t.Fatalf("err: %v", err)
}
ent2 := &TokenEntry{Parent: ent.ID}
if err := ts.create(context.Background(), ent2); err != nil {
t.Fatalf("err: %v", err)
}
err := ts.Revoke(context.Background(), ent.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
out, err := ts.Lookup(context.Background(), ent2.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
// Unset the expected token parent's ID
ent2.Parent = ""
if !reflect.DeepEqual(out, ent2) {
t.Fatalf("bad: expected:%#v\nactual:%#v", ent2, out)
}
}
// This was the original function name, and now it just calls
// the non recursive version for a variety of depths.
func TestTokenStore_RevokeTree(t *testing.T) {
testTokenStore_RevokeTree_NonRecursive(t, 1)
testTokenStore_RevokeTree_NonRecursive(t, 2)
testTokenStore_RevokeTree_NonRecursive(t, 10)
}
// Revokes a given Token Store tree non recursively.
// The second parameter refers to the depth of the tree.
func testTokenStore_RevokeTree_NonRecursive(t testing.TB, depth uint64) {
_, ts, _, _ := TestCoreWithTokenStore(t)
root, children := buildTokenTree(t, ts, depth)
err := ts.RevokeTree(context.Background(), "")
if err.Error() != "cannot tree-revoke blank token" {
t.Fatalf("err: %v", err)
}
// Nuke tree non recursively.
err = ts.RevokeTree(context.Background(), root.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
// Append the root to ensure it was successfully
// deleted.
children = append(children, root)
for _, entry := range children {
out, err := ts.Lookup(context.Background(), entry.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
if out != nil {
t.Fatalf("bad: %#v", out)
}
}
}
// A benchmark function that tests testTokenStore_RevokeTree_NonRecursive
// for a variety of different depths.
func BenchmarkTokenStore_RevokeTree(b *testing.B) {
benchmarks := []uint64{0, 1, 2, 4, 8, 16, 20}
for _, depth := range benchmarks {
b.Run(fmt.Sprintf("Tree of Depth %d", depth), func(b *testing.B) {
for i := 0; i < b.N; i++ {
testTokenStore_RevokeTree_NonRecursive(b, depth)
}
})
}
}
// Builds a TokenTree of a specified depth, so that
// we may run revoke tests on it.
func buildTokenTree(t testing.TB, ts *TokenStore, depth uint64) (root *TokenEntry, children []*TokenEntry) {
root = &TokenEntry{}
if err := ts.create(context.Background(), root); err != nil {
t.Fatalf("err: %v", err)
}
frontier := []*TokenEntry{root}
current := uint64(0)
for current < depth {
next := make([]*TokenEntry, 0, 2*len(frontier))
for _, node := range frontier {
left := &TokenEntry{Parent: node.ID}
if err := ts.create(context.Background(), left); err != nil {
t.Fatalf("err: %v", err)
}
right := &TokenEntry{Parent: node.ID}
if err := ts.create(context.Background(), right); err != nil {
t.Fatalf("err: %v", err)
}
children = append(children, left, right)
next = append(next, left, right)
}
frontier = next
current++
}
return root, children
}
func TestTokenStore_RevokeSelf(t *testing.T) {
_, ts, _, _ := TestCoreWithTokenStore(t)
ent1 := &TokenEntry{}
if err := ts.create(context.Background(), ent1); err != nil {
t.Fatalf("err: %v", err)
}
ent2 := &TokenEntry{Parent: ent1.ID}
if err := ts.create(context.Background(), ent2); err != nil {
t.Fatalf("err: %v", err)
}
ent3 := &TokenEntry{Parent: ent2.ID}
if err := ts.create(context.Background(), ent3); err != nil {
t.Fatalf("err: %v", err)
}
ent4 := &TokenEntry{Parent: ent2.ID}
if err := ts.create(context.Background(), ent4); err != nil {
t.Fatalf("err: %v", err)
}
req := logical.TestRequest(t, logical.UpdateOperation, "revoke-self")
req.ClientToken = ent1.ID
resp, err := ts.HandleRequest(context.Background(), req)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err: %v\nresp: %#v", err, resp)
}
lookup := []string{ent1.ID, ent2.ID, ent3.ID, ent4.ID}
for _, id := range lookup {
out, err := ts.Lookup(context.Background(), id)
if err != nil {
t.Fatalf("err: %v", err)
}
if out != nil {
t.Fatalf("bad: %#v", out)
}
}
}
func TestTokenStore_HandleRequest_NonAssignable(t *testing.T) {
_, ts, _, root := TestCoreWithTokenStore(t)
req := logical.TestRequest(t, logical.UpdateOperation, "create")
req.ClientToken = root
req.Data["policies"] = []string{"default", "foo"}
resp, err := ts.HandleRequest(context.Background(), req)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err: %v\nresp: %#v", err, resp)
}
req.Data["policies"] = []string{"default", "foo", responseWrappingPolicyName}
resp, err = ts.HandleRequest(context.Background(), req)
if err != nil {
t.Fatal(err)
}
if resp == nil {
t.Fatal("got a nil response")
}
if !resp.IsError() {
t.Fatalf("expected error; response is %#v", *resp)
}
}
func TestTokenStore_HandleRequest_CreateToken_DisplayName(t *testing.T) {
_, ts, _, root := TestCoreWithTokenStore(t)
req := logical.TestRequest(t, logical.UpdateOperation, "create")
req.ClientToken = root
req.Data["display_name"] = "foo_bar.baz!"
resp, err := ts.HandleRequest(context.Background(), req)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err: %v\nresp: %#v", err, resp)
}
expected := &TokenEntry{
ID: resp.Auth.ClientToken,
Accessor: resp.Auth.Accessor,
Parent: root,
Policies: []string{"root"},
Path: "auth/token/create",
DisplayName: "token-foo-bar-baz",
TTL: 0,
}
out, err := ts.Lookup(context.Background(), resp.Auth.ClientToken)
if err != nil {
t.Fatalf("err: %v", err)
}
expected.CreationTime = out.CreationTime
if !reflect.DeepEqual(out, expected) {
t.Fatalf("bad: expected:%#v\nactual:%#v", expected, out)
}
}
func TestTokenStore_HandleRequest_CreateToken_NumUses(t *testing.T) {
_, ts, _, root := TestCoreWithTokenStore(t)
req := logical.TestRequest(t, logical.UpdateOperation, "create")
req.ClientToken = root
req.Data["num_uses"] = "1"
resp, err := ts.HandleRequest(context.Background(), req)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err: %v\nresp: %#v", err, resp)
}
expected := &TokenEntry{
ID: resp.Auth.ClientToken,
Accessor: resp.Auth.Accessor,
Parent: root,
Policies: []string{"root"},
Path: "auth/token/create",
DisplayName: "token",
NumUses: 1,
TTL: 0,
}
out, err := ts.Lookup(context.Background(), resp.Auth.ClientToken)
if err != nil {
t.Fatalf("err: %v", err)
}
expected.CreationTime = out.CreationTime
if !reflect.DeepEqual(out, expected) {
t.Fatalf("bad: expected:%#v\nactual:%#v", expected, out)
}
}
func TestTokenStore_HandleRequest_CreateToken_NumUses_Invalid(t *testing.T) {
_, ts, _, root := TestCoreWithTokenStore(t)
req := logical.TestRequest(t, logical.UpdateOperation, "create")
req.ClientToken = root
req.Data["num_uses"] = "-1"
resp, err := ts.HandleRequest(context.Background(), req)
if err != logical.ErrInvalidRequest {
t.Fatalf("err: %v resp: %#v", err, resp)
}
}
func TestTokenStore_HandleRequest_CreateToken_NumUses_Restricted(t *testing.T) {
_, ts, _, root := TestCoreWithTokenStore(t)
req := logical.TestRequest(t, logical.UpdateOperation, "create")
req.ClientToken = root
req.Data["num_uses"] = "1"
resp, err := ts.HandleRequest(context.Background(), req)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err: %v\nresp: %#v", err, resp)
}
// We should NOT be able to use the restricted token to create a new token
req.ClientToken = resp.Auth.ClientToken
_, err = ts.HandleRequest(context.Background(), req)
if err != logical.ErrInvalidRequest {
t.Fatalf("err: %v resp: %#v", err, resp)
}
}
func TestTokenStore_HandleRequest_CreateToken_NoPolicy(t *testing.T) {
_, ts, _, root := TestCoreWithTokenStore(t)
req := logical.TestRequest(t, logical.UpdateOperation, "create")
req.ClientToken = root
resp, err := ts.HandleRequest(context.Background(), req)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err: %v\nresp: %#v", err, resp)
}
expected := &TokenEntry{
ID: resp.Auth.ClientToken,
Accessor: resp.Auth.Accessor,
Parent: root,
Policies: []string{"root"},
Path: "auth/token/create",
DisplayName: "token",
TTL: 0,
}
out, err := ts.Lookup(context.Background(), resp.Auth.ClientToken)
if err != nil {
t.Fatalf("err: %v", err)
}
expected.CreationTime = out.CreationTime
if !reflect.DeepEqual(out, expected) {
t.Fatalf("bad: expected:%#v\nactual:%#v", expected, out)
}
}
func TestTokenStore_HandleRequest_CreateToken_BadParent(t *testing.T) {
_, ts, _, _ := TestCoreWithTokenStore(t)
req := logical.TestRequest(t, logical.UpdateOperation, "create")
req.ClientToken = "random"
resp, err := ts.HandleRequest(context.Background(), req)
if err != logical.ErrInvalidRequest {
t.Fatalf("err: %v resp: %#v", err, resp)
}
if resp.Data["error"] != "parent token lookup failed" {
t.Fatalf("bad: %#v", resp)
}
}
func TestTokenStore_HandleRequest_CreateToken(t *testing.T) {
_, ts, _, root := TestCoreWithTokenStore(t)
req := logical.TestRequest(t, logical.UpdateOperation, "create")
req.ClientToken = root
req.Data["policies"] = []string{"foo"}
resp, err := ts.HandleRequest(context.Background(), req)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err: %v\nresp: %#v", err, resp)
}
if resp.Auth.ClientToken == "" {
t.Fatalf("bad: %#v", resp)
}
}
func TestTokenStore_HandleRequest_CreateToken_RootID(t *testing.T) {
_, ts, _, root := TestCoreWithTokenStore(t)
req := logical.TestRequest(t, logical.UpdateOperation, "create")
req.ClientToken = root
req.Data["id"] = "foobar"
req.Data["policies"] = []string{"foo"}
resp, err := ts.HandleRequest(context.Background(), req)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err: %v\nresp: %#v", err, resp)
}
if resp.Auth.ClientToken != "foobar" {
t.Fatalf("bad: %#v", resp)
}
}
func TestTokenStore_HandleRequest_CreateToken_NonRootID(t *testing.T) {
_, ts, _, root := TestCoreWithTokenStore(t)
testMakeToken(t, ts, root, "client", "", []string{"foo"})
req := logical.TestRequest(t, logical.UpdateOperation, "create")
req.ClientToken = "client"
req.Data["id"] = "foobar"
req.Data["policies"] = []string{"foo"}
resp, err := ts.HandleRequest(context.Background(), req)
if err != logical.ErrInvalidRequest {
t.Fatalf("err: %v resp: %#v", err, resp)
}
if resp.Data["error"] != "root or sudo privileges required to specify token id" {
t.Fatalf("bad: %#v", resp)
}
}
func TestTokenStore_HandleRequest_CreateToken_NonRoot_Subset(t *testing.T) {
_, ts, _, root := TestCoreWithTokenStore(t)
testMakeToken(t, ts, root, "client", "", []string{"foo", "bar"})
req := logical.TestRequest(t, logical.UpdateOperation, "create")
req.ClientToken = "client"
req.Data["policies"] = []string{"foo"}
resp, err := ts.HandleRequest(context.Background(), req)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err: %v\nresp: %#v", err, resp)
}
if resp.Auth.ClientToken == "" {
t.Fatalf("bad: %#v", resp)
}
}
func TestTokenStore_HandleRequest_CreateToken_NonRoot_InvalidSubset(t *testing.T) {
_, ts, _, root := TestCoreWithTokenStore(t)
testMakeToken(t, ts, root, "client", "", []string{"foo", "bar"})
req := logical.TestRequest(t, logical.UpdateOperation, "create")
req.ClientToken = "client"
req.Data["policies"] = []string{"foo", "bar", "baz"}
resp, err := ts.HandleRequest(context.Background(), req)
if err != logical.ErrInvalidRequest {
t.Fatalf("err: %v resp: %#v", err, resp)
}
if resp.Data["error"] != "child policies must be subset of parent" {
t.Fatalf("bad: %#v", resp)
}
}
func TestTokenStore_HandleRequest_CreateToken_NonRoot_RootChild(t *testing.T) {
core, ts, _, root := TestCoreWithTokenStore(t)
ps := core.policyStore
policy, _ := ParseACLPolicy(tokenCreationPolicy)
policy.Name = "test1"
if err := ps.SetPolicy(context.Background(), policy); err != nil {
t.Fatal(err)
}
testMakeToken(t, ts, root, "sudoClient", "", []string{"test1"})
req := logical.TestRequest(t, logical.UpdateOperation, "create")
req.ClientToken = "sudoClient"
req.MountPoint = "auth/token/"
req.Data["policies"] = []string{"root"}
resp, err := ts.HandleRequest(context.Background(), req)
if err != logical.ErrInvalidRequest {
t.Fatalf("err: %v; resp: %#v", err, resp)
}
if resp == nil || resp.Data == nil {
t.Fatalf("expected a response")
}
if resp.Data["error"].(string) != "root tokens may not be created without parent token being root" {
t.Fatalf("bad: %#v", resp)
}
}
func TestTokenStore_HandleRequest_CreateToken_Root_RootChild_NoExpiry_Expiry(t *testing.T) {
_, ts, _, root := TestCoreWithTokenStore(t)
req := logical.TestRequest(t, logical.UpdateOperation, "create")
req.ClientToken = root
req.Data = map[string]interface{}{
"ttl": "5m",
}
resp, err := ts.HandleRequest(context.Background(), req)
if err != nil {
t.Fatalf("err: %v; resp: %#v", err, resp)
}
if resp == nil || resp.Auth == nil {
t.Fatalf("failed to create a root token using another root token")
}
if !reflect.DeepEqual(resp.Auth.Policies, []string{"root"}) {
t.Fatalf("bad: policies: expected: root; actual: %s", resp.Auth.Policies)
}
if resp.Auth.TTL.Seconds() != 300 {
t.Fatalf("bad: expected 300 second ttl, got %v", resp.Auth.TTL.Seconds())
}
req.ClientToken = resp.Auth.ClientToken
req.Data = map[string]interface{}{
"ttl": "0",
}
resp, err = ts.HandleRequest(context.Background(), req)
if err == nil {
t.Fatalf("expected error")
}
}
func TestTokenStore_HandleRequest_CreateToken_Root_RootChild(t *testing.T) {
_, ts, _, root := TestCoreWithTokenStore(t)
req := logical.TestRequest(t, logical.UpdateOperation, "create")
req.ClientToken = root
resp, err := ts.HandleRequest(context.Background(), req)
if err != nil {
t.Fatalf("err: %v; resp: %#v", err, resp)
}
if resp == nil || resp.Auth == nil {
t.Fatalf("failed to create a root token using another root token")
}
if !reflect.DeepEqual(resp.Auth.Policies, []string{"root"}) {
t.Fatalf("bad: policies: expected: root; actual: %s", resp.Auth.Policies)
}
}
func TestTokenStore_HandleRequest_CreateToken_NonRoot_NoParent(t *testing.T) {
_, ts, _, root := TestCoreWithTokenStore(t)
testMakeToken(t, ts, root, "client", "", []string{"foo"})
req := logical.TestRequest(t, logical.UpdateOperation, "create")
req.ClientToken = "client"
req.Data["no_parent"] = true
req.Data["policies"] = []string{"foo"}
resp, err := ts.HandleRequest(context.Background(), req)
if err != logical.ErrInvalidRequest {
t.Fatalf("err: %v resp: %#v", err, resp)
}
if resp.Data["error"] != "root or sudo privileges required to create orphan token" {
t.Fatalf("bad: %#v", resp)
}
}
func TestTokenStore_HandleRequest_CreateToken_Root_NoParent(t *testing.T) {
_, ts, _, root := TestCoreWithTokenStore(t)
req := logical.TestRequest(t, logical.UpdateOperation, "create")
req.ClientToken = root
req.Data["no_parent"] = true
req.Data["policies"] = []string{"foo"}
resp, err := ts.HandleRequest(context.Background(), req)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err: %v\nresp: %#v", err, resp)
}
if resp.Auth.ClientToken == "" {
t.Fatalf("bad: %#v", resp)
}
out, _ := ts.Lookup(context.Background(), resp.Auth.ClientToken)
if out.Parent != "" {
t.Fatalf("bad: %#v", out)
}
}
func TestTokenStore_HandleRequest_CreateToken_PathBased_NoParent(t *testing.T) {
_, ts, _, root := TestCoreWithTokenStore(t)
req := logical.TestRequest(t, logical.UpdateOperation, "create-orphan")
req.ClientToken = root
req.Data["policies"] = []string{"foo"}
resp, err := ts.HandleRequest(context.Background(), req)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err: %v\nresp: %#v", err, resp)
}
if resp.Auth.ClientToken == "" {
t.Fatalf("bad: %#v", resp)
}
out, _ := ts.Lookup(context.Background(), resp.Auth.ClientToken)
if out.Parent != "" {
t.Fatalf("bad: %#v", out)
}
}
func TestTokenStore_HandleRequest_CreateToken_Metadata(t *testing.T) {
_, ts, _, root := TestCoreWithTokenStore(t)
req := logical.TestRequest(t, logical.UpdateOperation, "create")
req.ClientToken = root
req.Data["policies"] = []string{"foo"}
meta := map[string]string{
"user": "armon",
"source": "github",
}
req.Data["meta"] = meta
resp, err := ts.HandleRequest(context.Background(), req)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err: %v\nresp: %#v", err, resp)
}
if resp.Auth.ClientToken == "" {
t.Fatalf("bad: %#v", resp)
}
out, _ := ts.Lookup(context.Background(), resp.Auth.ClientToken)
if !reflect.DeepEqual(out.Meta, meta) {
t.Fatalf("bad: expected:%#v\nactual:%#v", meta, out.Meta)
}
}
func TestTokenStore_HandleRequest_CreateToken_Lease(t *testing.T) {
_, ts, _, root := TestCoreWithTokenStore(t)
req := logical.TestRequest(t, logical.UpdateOperation, "create")
req.ClientToken = root
req.Data["policies"] = []string{"foo"}
req.Data["lease"] = "1h"
resp, err := ts.HandleRequest(context.Background(), req)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err: %v\nresp: %#v", err, resp)
}
if resp.Auth.ClientToken == "" {
t.Fatalf("bad: %#v", resp)
}
if resp.Auth.TTL != time.Hour {
t.Fatalf("bad: %#v", resp)
}
if !resp.Auth.Renewable {
t.Fatalf("bad: %#v", resp)
}
}
func TestTokenStore_HandleRequest_CreateToken_TTL(t *testing.T) {
_, ts, _, root := TestCoreWithTokenStore(t)
req := logical.TestRequest(t, logical.UpdateOperation, "create")
req.ClientToken = root
req.Data["policies"] = []string{"foo"}
req.Data["ttl"] = "1h"
resp, err := ts.HandleRequest(context.Background(), req)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err: %v\nresp: %#v", err, resp)
}
if resp.Auth.ClientToken == "" {
t.Fatalf("bad: %#v", resp)
}
if resp.Auth.TTL != time.Hour {
t.Fatalf("bad: %#v", resp)
}
if !resp.Auth.Renewable {
t.Fatalf("bad: %#v", resp)
}
}
func TestTokenStore_HandleRequest_Revoke(t *testing.T) {
_, ts, _, root := TestCoreWithTokenStore(t)
testMakeToken(t, ts, root, "child", "", []string{"root", "foo"})
testMakeToken(t, ts, "child", "sub-child", "", []string{"foo"})
req := logical.TestRequest(t, logical.UpdateOperation, "revoke")
req.Data = map[string]interface{}{
"token": "child",
}
resp, err := ts.HandleRequest(context.Background(), req)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err: %v\nresp: %#v", err, resp)
}
if resp != nil {
t.Fatalf("bad: %#v", resp)
}
out, err := ts.Lookup(context.Background(), "child")
if err != nil {
t.Fatalf("err: %v", err)
}
if out != nil {
t.Fatalf("bad: %v", out)
}
// Sub-child should not exist
out, err = ts.Lookup(context.Background(), "sub-child")
if err != nil {
t.Fatalf("err: %v", err)
}
if out != nil {
t.Fatalf("bad: %v", out)
}
}
func TestTokenStore_HandleRequest_RevokeOrphan(t *testing.T) {
_, ts, _, root := TestCoreWithTokenStore(t)
testMakeToken(t, ts, root, "child", "", []string{"root", "foo"})
testMakeToken(t, ts, "child", "sub-child", "", []string{"foo"})
req := logical.TestRequest(t, logical.UpdateOperation, "revoke-orphan")
req.Data = map[string]interface{}{
"token": "child",
}
req.ClientToken = root
resp, err := ts.HandleRequest(context.Background(), req)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err: %v\nresp: %#v", err, resp)
}
if resp != nil {
t.Fatalf("bad: %#v", resp)
}
out, err := ts.Lookup(context.Background(), "child")
if err != nil {
t.Fatalf("err: %v", err)
}
if out != nil {
t.Fatalf("bad: %v", out)
}
// Check that the parent entry is properly cleaned up
saltedID, err := ts.SaltID(context.Background(), "child")
if err != nil {<|fim▁hole|> }
children, err := ts.view.List(context.Background(), parentPrefix+saltedID+"/")
if err != nil {
t.Fatalf("err: %v", err)
}
if len(children) != 0 {
t.Fatalf("bad: %v", children)
}
// Sub-child should exist!
out, err = ts.Lookup(context.Background(), "sub-child")
if err != nil {
t.Fatalf("err: %v", err)
}
if out == nil {
t.Fatalf("bad: %v", out)
}
}
func TestTokenStore_HandleRequest_RevokeOrphan_NonRoot(t *testing.T) {
_, ts, _, root := TestCoreWithTokenStore(t)
testMakeToken(t, ts, root, "child", "", []string{"foo"})
out, err := ts.Lookup(context.Background(), "child")
if err != nil {
t.Fatalf("err: %v", err)
}
if out == nil {
t.Fatalf("bad: %v", out)
}
req := logical.TestRequest(t, logical.UpdateOperation, "revoke-orphan")
req.Data = map[string]interface{}{
"token": "child",
}
req.ClientToken = "child"
resp, err := ts.HandleRequest(context.Background(), req)
if err != logical.ErrInvalidRequest {
t.Fatalf("did not get error when non-root revoking itself with orphan flag; resp is %#v", resp)
}
// Should still exist
out, err = ts.Lookup(context.Background(), "child")
if err != nil {
t.Fatalf("err: %v", err)
}
if out == nil {
t.Fatalf("bad: %v", out)
}
}
func TestTokenStore_HandleRequest_Lookup(t *testing.T) {
c, ts, _, root := TestCoreWithTokenStore(t)
req := logical.TestRequest(t, logical.UpdateOperation, "lookup")
req.Data = map[string]interface{}{
"token": root,
}
resp, err := ts.HandleRequest(context.Background(), req)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err: %v\nresp: %#v", err, resp)
}
if resp == nil {
t.Fatalf("bad: %#v", resp)
}
exp := map[string]interface{}{
"id": root,
"accessor": resp.Data["accessor"].(string),
"policies": []string{"root"},
"path": "auth/token/root",
"meta": map[string]string(nil),
"display_name": "root",
"orphan": true,
"num_uses": 0,
"creation_ttl": int64(0),
"ttl": int64(0),
"explicit_max_ttl": int64(0),
"expire_time": nil,
"entity_id": "",
}
if resp.Data["creation_time"].(int64) == 0 {
t.Fatalf("creation time was zero")
}
delete(resp.Data, "creation_time")
if !reflect.DeepEqual(resp.Data, exp) {
t.Fatalf("bad: expected:%#v\nactual:%#v", exp, resp.Data)
}
testCoreMakeToken(t, c, root, "client", "3600s", []string{"foo"})
// Test via GET
req = logical.TestRequest(t, logical.UpdateOperation, "lookup")
req.Data = map[string]interface{}{
"token": "client",
}
resp, err = ts.HandleRequest(context.Background(), req)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err: %v\nresp: %#v", err, resp)
}
if resp == nil {
t.Fatalf("bad: %#v", resp)
}
exp = map[string]interface{}{
"id": "client",
"accessor": resp.Data["accessor"],
"policies": []string{"default", "foo"},
"path": "auth/token/create",
"meta": map[string]string(nil),
"display_name": "token",
"orphan": false,
"num_uses": 0,
"creation_ttl": int64(3600),
"ttl": int64(3600),
"explicit_max_ttl": int64(0),
"renewable": true,
"entity_id": "",
}
if resp.Data["creation_time"].(int64) == 0 {
t.Fatalf("creation time was zero")
}
delete(resp.Data, "creation_time")
if resp.Data["issue_time"].(time.Time).IsZero() {
t.Fatal("issue time is default time")
}
delete(resp.Data, "issue_time")
if resp.Data["expire_time"].(time.Time).IsZero() {
t.Fatal("expire time is default time")
}
delete(resp.Data, "expire_time")
// Depending on timing of the test this may have ticked down, so accept 3599
if resp.Data["ttl"].(int64) == 3599 {
resp.Data["ttl"] = int64(3600)
}
if !reflect.DeepEqual(resp.Data, exp) {
t.Fatalf("bad: expected:%#v\nactual:%#v", exp, resp.Data)
}
// Test via POST
req = logical.TestRequest(t, logical.UpdateOperation, "lookup")
req.Data = map[string]interface{}{
"token": "client",
}
resp, err = ts.HandleRequest(context.Background(), req)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err: %v\nresp: %#v", err, resp)
}
if resp == nil {
t.Fatalf("bad: %#v", resp)
}
exp = map[string]interface{}{
"id": "client",
"accessor": resp.Data["accessor"],
"policies": []string{"default", "foo"},
"path": "auth/token/create",
"meta": map[string]string(nil),
"display_name": "token",
"orphan": false,
"num_uses": 0,
"creation_ttl": int64(3600),
"ttl": int64(3600),
"explicit_max_ttl": int64(0),
"renewable": true,
"entity_id": "",
}
if resp.Data["creation_time"].(int64) == 0 {
t.Fatalf("creation time was zero")
}
delete(resp.Data, "creation_time")
if resp.Data["issue_time"].(time.Time).IsZero() {
t.Fatal("issue time is default time")
}
delete(resp.Data, "issue_time")
if resp.Data["expire_time"].(time.Time).IsZero() {
t.Fatal("expire time is default time")
}
delete(resp.Data, "expire_time")
// Depending on timing of the test this may have ticked down, so accept 3599
if resp.Data["ttl"].(int64) == 3599 {
resp.Data["ttl"] = int64(3600)
}
if !reflect.DeepEqual(resp.Data, exp) {
t.Fatalf("bad: expected:%#v\nactual:%#v", exp, resp.Data)
}
// Test last_renewal_time functionality
req = logical.TestRequest(t, logical.UpdateOperation, "renew")
req.Data = map[string]interface{}{
"token": "client",
}
resp, err = ts.HandleRequest(context.Background(), req)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err: %v\nresp: %#v", err, resp)
}
if resp == nil {
t.Fatalf("bad: %#v", resp)
}
req = logical.TestRequest(t, logical.UpdateOperation, "lookup")
req.Data = map[string]interface{}{
"token": "client",
}
resp, err = ts.HandleRequest(context.Background(), req)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err: %v\nresp: %#v", err, resp)
}
if resp == nil {
t.Fatalf("bad: %#v", resp)
}
if resp.Data["last_renewal_time"].(int64) == 0 {
t.Fatalf("last_renewal_time was zero")
}
}
func TestTokenStore_HandleRequest_LookupSelf(t *testing.T) {
c, ts, _, root := TestCoreWithTokenStore(t)
testCoreMakeToken(t, c, root, "client", "3600s", []string{"foo"})
req := logical.TestRequest(t, logical.ReadOperation, "lookup-self")
req.ClientToken = "client"
resp, err := ts.HandleRequest(context.Background(), req)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err: %v\nresp: %#v", err, resp)
}
if resp == nil {
t.Fatalf("bad: %#v", resp)
}
exp := map[string]interface{}{
"id": "client",
"accessor": resp.Data["accessor"],
"policies": []string{"default", "foo"},
"path": "auth/token/create",
"meta": map[string]string(nil),
"display_name": "token",
"orphan": false,
"renewable": true,
"num_uses": 0,
"creation_ttl": int64(3600),
"ttl": int64(3600),
"explicit_max_ttl": int64(0),
"entity_id": "",
}
if resp.Data["creation_time"].(int64) == 0 {
t.Fatalf("creation time was zero")
}
delete(resp.Data, "creation_time")
if resp.Data["issue_time"].(time.Time).IsZero() {
t.Fatalf("creation time was zero")
}
delete(resp.Data, "issue_time")
if resp.Data["expire_time"].(time.Time).IsZero() {
t.Fatalf("expire time was zero")
}
delete(resp.Data, "expire_time")
// Depending on timing of the test this may have ticked down, so accept 3599
if resp.Data["ttl"].(int64) == 3599 {
resp.Data["ttl"] = int64(3600)
}
if !reflect.DeepEqual(resp.Data, exp) {
t.Fatalf("bad: expected:%#v\nactual:%#v", exp, resp.Data)
}
}
func TestTokenStore_HandleRequest_Renew(t *testing.T) {
exp := mockExpiration(t)
ts := exp.tokenStore
// Create new token
root, err := ts.rootToken(context.Background())
if err != nil {
t.Fatalf("err: %v", err)
}
// Create a new token
auth := &logical.Auth{
ClientToken: root.ID,
LeaseOptions: logical.LeaseOptions{
TTL: time.Hour,
Renewable: true,
},
}
err = exp.RegisterAuth("auth/token/root", auth)
if err != nil {
t.Fatalf("err: %v", err)
}
// Get the original expire time to compare
originalExpire := auth.ExpirationTime()
beforeRenew := time.Now()
req := logical.TestRequest(t, logical.UpdateOperation, "renew")
req.Data = map[string]interface{}{
"token": root.ID,
"increment": "3600s",
}
req.Data["increment"] = "3600s"
resp, err := ts.HandleRequest(context.Background(), req)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err: %v\nresp: %#v", err, resp)
}
// Get the new expire time
newExpire := resp.Auth.ExpirationTime()
if newExpire.Before(originalExpire) {
t.Fatalf("should expire later: %s %s", newExpire, originalExpire)
}
if newExpire.Before(beforeRenew.Add(time.Hour)) {
t.Fatalf("should have at least an hour: %s %s", newExpire, beforeRenew)
}
}
func TestTokenStore_HandleRequest_RenewSelf(t *testing.T) {
exp := mockExpiration(t)
ts := exp.tokenStore
// Create new token
root, err := ts.rootToken(context.Background())
if err != nil {
t.Fatalf("err: %v", err)
}
// Create a new token
auth := &logical.Auth{
ClientToken: root.ID,
LeaseOptions: logical.LeaseOptions{
TTL: time.Hour,
Renewable: true,
},
}
err = exp.RegisterAuth("auth/token/root", auth)
if err != nil {
t.Fatalf("err: %v", err)
}
// Get the original expire time to compare
originalExpire := auth.ExpirationTime()
beforeRenew := time.Now()
req := logical.TestRequest(t, logical.UpdateOperation, "renew-self")
req.ClientToken = auth.ClientToken
req.Data["increment"] = "3600s"
resp, err := ts.HandleRequest(context.Background(), req)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err: %v\nresp: %#v", err, resp)
}
// Get the new expire time
newExpire := resp.Auth.ExpirationTime()
if newExpire.Before(originalExpire) {
t.Fatalf("should expire later: %s %s", newExpire, originalExpire)
}
if newExpire.Before(beforeRenew.Add(time.Hour)) {
t.Fatalf("should have at least an hour: %s %s", newExpire, beforeRenew)
}
}
func TestTokenStore_RoleCRUD(t *testing.T) {
core, _, _, root := TestCoreWithTokenStore(t)
req := logical.TestRequest(t, logical.ReadOperation, "auth/token/roles/test")
req.ClientToken = root
resp, err := core.HandleRequest(req)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err: %v\nresp: %#v", err, resp)
}
if resp != nil {
t.Fatalf("should not see a role")
}
// First test creation
req.Operation = logical.CreateOperation
req.Data = map[string]interface{}{
"orphan": true,
"period": "72h",
"allowed_policies": "test1,test2",
"path_suffix": "happenin",
}
resp, err = core.HandleRequest(req)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err: %v\nresp: %#v", err, resp)
}
if resp != nil {
t.Fatalf("expected a nil response")
}
req.Operation = logical.ReadOperation
req.Data = map[string]interface{}{}
resp, err = core.HandleRequest(req)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err: %v\nresp: %#v", err, resp)
}
if resp == nil {
t.Fatalf("got a nil response")
}
expected := map[string]interface{}{
"name": "test",
"orphan": true,
"period": int64(259200),
"allowed_policies": []string{"test1", "test2"},
"disallowed_policies": []string{},
"path_suffix": "happenin",
"explicit_max_ttl": int64(0),
"renewable": true,
}
if !reflect.DeepEqual(expected, resp.Data) {
t.Fatalf("bad: expected:%#v\nactual:%#v", expected, resp.Data)
}
// Now test updating; this should be set to an UpdateOperation
// automatically due to the existence check
req.Operation = logical.CreateOperation
req.Data = map[string]interface{}{
"period": "79h",
"allowed_policies": "test3",
"path_suffix": "happenin",
"renewable": false,
}
resp, err = core.HandleRequest(req)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err: %v\nresp: %#v", err, resp)
}
if resp != nil {
t.Fatalf("expected a nil response")
}
req.Operation = logical.ReadOperation
req.Data = map[string]interface{}{}
resp, err = core.HandleRequest(req)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err: %v\nresp: %#v", err, resp)
}
if resp == nil {
t.Fatalf("got a nil response")
}
expected = map[string]interface{}{
"name": "test",
"orphan": true,
"period": int64(284400),
"allowed_policies": []string{"test3"},
"disallowed_policies": []string{},
"path_suffix": "happenin",
"explicit_max_ttl": int64(0),
"renewable": false,
}
if !reflect.DeepEqual(expected, resp.Data) {
t.Fatalf("bad: expected:%#v\nactual:%#v", expected, resp.Data)
}
// Now set explicit max ttl and clear the period
req.Operation = logical.CreateOperation
req.Data = map[string]interface{}{
"explicit_max_ttl": "5",
"period": "0s",
}
resp, err = core.HandleRequest(req)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err: %v\nresp: %#v", err, resp)
}
req.Operation = logical.ReadOperation
req.Data = map[string]interface{}{}
resp, err = core.HandleRequest(req)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err: %v\nresp: %#v", err, resp)
}
if resp == nil {
t.Fatalf("got a nil response")
}
expected = map[string]interface{}{
"name": "test",
"orphan": true,
"explicit_max_ttl": int64(5),
"allowed_policies": []string{"test3"},
"disallowed_policies": []string{},
"path_suffix": "happenin",
"period": int64(0),
"renewable": false,
}
if !reflect.DeepEqual(expected, resp.Data) {
t.Fatalf("bad: expected:%#v\nactual:%#v", expected, resp.Data)
}
req.Operation = logical.ListOperation
req.Path = "auth/token/roles"
req.Data = map[string]interface{}{}
resp, err = core.HandleRequest(req)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err: %v\nresp: %#v", err, resp)
}
if resp == nil {
t.Fatalf("got a nil response")
}
keysInt, ok := resp.Data["keys"]
if !ok {
t.Fatalf("did not find keys in response")
}
keys, ok := keysInt.([]string)
if !ok {
t.Fatalf("could not convert keys interface to key list")
}
if len(keys) != 1 {
t.Fatalf("unexpected number of keys: %d", len(keys))
}
if keys[0] != "test" {
t.Fatalf("expected \"test\", got \"%s\"", keys[0])
}
req.Operation = logical.DeleteOperation
req.Path = "auth/token/roles/test"
resp, err = core.HandleRequest(req)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err: %v\nresp: %#v", err, resp)
}
if resp != nil {
t.Fatalf("expected a nil response")
}
req.Operation = logical.ReadOperation
resp, err = core.HandleRequest(req)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err: %v\nresp: %#v", err, resp)
}
if resp != nil {
t.Fatalf("expected a nil response")
}
}
func TestTokenStore_RoleDisallowedPoliciesWithRoot(t *testing.T) {
var resp *logical.Response
var err error
_, ts, _, root := TestCoreWithTokenStore(t)
// Don't set disallowed_policies. Verify that a read on the role does return a non-nil value.
roleReq := &logical.Request{
Operation: logical.UpdateOperation,
Path: "roles/role1",
Data: map[string]interface{}{
"disallowed_policies": "root,testpolicy",
},
ClientToken: root,
}
resp, err = ts.HandleRequest(context.Background(), roleReq)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err:%v resp:%v", err, resp)
}
roleReq.Operation = logical.ReadOperation
resp, err = ts.HandleRequest(context.Background(), roleReq)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err:%v resp:%v", err, resp)
}
expected := []string{"root", "testpolicy"}
if !reflect.DeepEqual(resp.Data["disallowed_policies"], expected) {
t.Fatalf("bad: expected: %#v, actual: %#v", expected, resp.Data["disallowed_policies"])
}
}
func TestTokenStore_RoleDisallowedPolicies(t *testing.T) {
var req *logical.Request
var resp *logical.Response
var err error
core, ts, _, root := TestCoreWithTokenStore(t)
ps := core.policyStore
// Create 3 different policies
policy, _ := ParseACLPolicy(tokenCreationPolicy)
policy.Name = "test1"
if err := ps.SetPolicy(context.Background(), policy); err != nil {
t.Fatal(err)
}
policy, _ = ParseACLPolicy(tokenCreationPolicy)
policy.Name = "test2"
if err := ps.SetPolicy(context.Background(), policy); err != nil {
t.Fatal(err)
}
policy, _ = ParseACLPolicy(tokenCreationPolicy)
policy.Name = "test3"
if err := ps.SetPolicy(context.Background(), policy); err != nil {
t.Fatal(err)
}
// Create roles with different disallowed_policies configuration
req = logical.TestRequest(t, logical.UpdateOperation, "roles/test1")
req.ClientToken = root
req.Data = map[string]interface{}{
"disallowed_policies": "test1",
}
resp, err = ts.HandleRequest(context.Background(), req)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err:%v resp:%v", err, resp)
}
req = logical.TestRequest(t, logical.UpdateOperation, "roles/test23")
req.ClientToken = root
req.Data = map[string]interface{}{
"disallowed_policies": "test2,test3",
}
resp, err = ts.HandleRequest(context.Background(), req)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err:%v resp:%v", err, resp)
}
req = logical.TestRequest(t, logical.UpdateOperation, "roles/test123")
req.ClientToken = root
req.Data = map[string]interface{}{
"disallowed_policies": "test1,test2,test3",
}
resp, err = ts.HandleRequest(context.Background(), req)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err:%v resp:%v", err, resp)
}
// Create a token that has all the policies defined above
req = logical.TestRequest(t, logical.UpdateOperation, "create")
req.ClientToken = root
req.Data["policies"] = []string{"test1", "test2", "test3"}
resp, err = ts.HandleRequest(context.Background(), req)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err:%v resp:%v", err, resp)
}
if resp == nil || resp.Auth == nil {
t.Fatal("got nil response")
}
if resp.Auth.ClientToken == "" {
t.Fatalf("bad: ClientToken; resp:%#v", resp)
}
parentToken := resp.Auth.ClientToken
req = logical.TestRequest(t, logical.UpdateOperation, "create/test1")
req.ClientToken = parentToken
resp, err = ts.HandleRequest(context.Background(), req)
if err == nil || resp != nil && !resp.IsError() {
t.Fatalf("expected an error response, got %#v", resp)
}
req = logical.TestRequest(t, logical.UpdateOperation, "create/test23")
req.ClientToken = parentToken
resp, err = ts.HandleRequest(context.Background(), req)
if err == nil || resp != nil && !resp.IsError() {
t.Fatal("expected an error response")
}
req = logical.TestRequest(t, logical.UpdateOperation, "create/test123")
req.ClientToken = parentToken
resp, err = ts.HandleRequest(context.Background(), req)
if err == nil || resp != nil && !resp.IsError() {
t.Fatal("expected an error response")
}
// Disallowed should act as a blacklist so make sure we can still make
// something with other policies in the request
req = logical.TestRequest(t, logical.UpdateOperation, "create/test123")
req.Data["policies"] = []string{"foo", "bar"}
req.ClientToken = parentToken
resp, err = ts.HandleRequest(context.Background(), req)
if err != nil || resp == nil || resp.IsError() {
t.Fatalf("err:%v resp:%v", err, resp)
}
// Create a role to have 'default' policy disallowed
req = logical.TestRequest(t, logical.UpdateOperation, "roles/default")
req.ClientToken = root
req.Data = map[string]interface{}{
"disallowed_policies": "default",
}
resp, err = ts.HandleRequest(context.Background(), req)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err:%v resp:%v", err, resp)
}
req = logical.TestRequest(t, logical.UpdateOperation, "create/default")
req.ClientToken = parentToken
resp, err = ts.HandleRequest(context.Background(), req)
if err == nil || resp != nil && !resp.IsError() {
t.Fatal("expected an error response")
}
}
func TestTokenStore_RoleAllowedPolicies(t *testing.T) {
_, ts, _, root := TestCoreWithTokenStore(t)
req := logical.TestRequest(t, logical.UpdateOperation, "roles/test")
req.ClientToken = root
req.Data = map[string]interface{}{
"allowed_policies": "test1,test2",
}
resp, err := ts.HandleRequest(context.Background(), req)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err: %v\nresp: %#v", err, resp)
}
if resp != nil {
t.Fatalf("expected a nil response")
}
req.Data = map[string]interface{}{}
req.Path = "create/test"
req.Data["policies"] = []string{"foo"}
resp, err = ts.HandleRequest(context.Background(), req)
if err == nil {
t.Fatalf("expected error")
}
req.Data["policies"] = []string{"test2"}
resp, err = ts.HandleRequest(context.Background(), req)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err: %v\nresp: %#v", err, resp)
}
if resp.Auth.ClientToken == "" {
t.Fatalf("bad: %#v", resp)
}
// When allowed_policies is blank, should fall back to a subset of the parent policies
req = logical.TestRequest(t, logical.UpdateOperation, "roles/test")
req.ClientToken = root
req.Data = map[string]interface{}{
"allowed_policies": "",
}
resp, err = ts.HandleRequest(context.Background(), req)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err: %v\nresp: %#v", err, resp)
}
if resp != nil {
t.Fatalf("expected a nil response")
}
req = logical.TestRequest(t, logical.UpdateOperation, "create")
req.ClientToken = root
req.Data["policies"] = []string{"test1", "test2", "test3"}
resp, err = ts.HandleRequest(context.Background(), req)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err:%v resp:%v", err, resp)
}
if resp == nil || resp.Auth == nil {
t.Fatal("got nil response")
}
if resp.Auth.ClientToken == "" {
t.Fatalf("bad: ClientToken; resp:%#v", resp)
}
if !reflect.DeepEqual(resp.Auth.Policies, []string{"default", "test1", "test2", "test3"}) {
t.Fatalf("bad: %#v", resp.Auth.Policies)
}
parentToken := resp.Auth.ClientToken
req.Data = map[string]interface{}{}
req.ClientToken = parentToken
req.Path = "create/test"
req.Data["policies"] = []string{"foo"}
resp, err = ts.HandleRequest(context.Background(), req)
if err == nil {
t.Fatalf("expected error")
}
req.Data["policies"] = []string{"test2"}
resp, err = ts.HandleRequest(context.Background(), req)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err: %v\nresp: %#v", err, resp)
}
if resp.Auth.ClientToken == "" {
t.Fatalf("bad: %#v", resp)
}
delete(req.Data, "policies")
resp, err = ts.HandleRequest(context.Background(), req)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err: %v\nresp: %#v", err, resp)
}
if resp.Auth.ClientToken == "" {
t.Fatalf("bad: %#v", resp)
}
if !reflect.DeepEqual(resp.Auth.Policies, []string{"default", "test1", "test2", "test3"}) {
t.Fatalf("bad: %#v", resp.Auth.Policies)
}
}
func TestTokenStore_RoleOrphan(t *testing.T) {
_, ts, _, root := TestCoreWithTokenStore(t)
req := logical.TestRequest(t, logical.UpdateOperation, "roles/test")
req.ClientToken = root
req.Data = map[string]interface{}{
"orphan": true,
}
resp, err := ts.HandleRequest(context.Background(), req)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err: %v\nresp: %#v", err, resp)
}
if resp != nil {
t.Fatalf("expected a nil response")
}
req.Path = "create/test"
resp, err = ts.HandleRequest(context.Background(), req)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err: %v\nresp: %#v", err, resp)
}
if resp.Auth.ClientToken == "" {
t.Fatalf("bad: %#v", resp)
}
out, err := ts.Lookup(context.Background(), resp.Auth.ClientToken)
if err != nil {
t.Fatalf("err: %v", err)
}
if out.Parent != "" {
t.Fatalf("expected orphan token, but found a parent")
}
if !strings.HasPrefix(out.Path, "auth/token/create/test") {
t.Fatalf("expected role in path but did not find it")
}
}
func TestTokenStore_RolePathSuffix(t *testing.T) {
_, ts, _, root := TestCoreWithTokenStore(t)
req := logical.TestRequest(t, logical.UpdateOperation, "roles/test")
req.ClientToken = root
req.Data = map[string]interface{}{
"path_suffix": "happenin",
}
resp, err := ts.HandleRequest(context.Background(), req)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err: %v\nresp: %#v", err, resp)
}
if resp != nil {
t.Fatalf("expected a nil response")
}
req.Path = "create/test"
resp, err = ts.HandleRequest(context.Background(), req)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err: %v\nresp: %#v", err, resp)
}
if resp.Auth.ClientToken == "" {
t.Fatalf("bad: %#v", resp)
}
out, err := ts.Lookup(context.Background(), resp.Auth.ClientToken)
if err != nil {
t.Fatalf("err: %v", err)
}
if out.Path != "auth/token/create/test/happenin" {
t.Fatalf("expected role in path but did not find it")
}
}
func TestTokenStore_RolePeriod(t *testing.T) {
core, _, _, root := TestCoreWithTokenStore(t)
core.defaultLeaseTTL = 10 * time.Second
core.maxLeaseTTL = 10 * time.Second
// Note: these requests are sent to Core since Core handles registration
// with the expiration manager and we need the storage to be consistent
req := logical.TestRequest(t, logical.UpdateOperation, "auth/token/roles/test")
req.ClientToken = root
req.Data = map[string]interface{}{
"period": 5,
}
resp, err := core.HandleRequest(req)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err: %v\nresp: %#v", err, resp)
}
if resp != nil {
t.Fatalf("expected a nil response")
}
// This first set of logic is to verify that a normal non-root token will
// be given a TTL of 10 seconds, and that renewing will not cause the TTL to
// increase since that's the configured backend max. Then we verify that
// increment works.
{
req.Path = "auth/token/create"
req.Data = map[string]interface{}{
"policies": []string{"default"},
}
resp, err = core.HandleRequest(req)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err: %v\nresp: %#v", err, resp)
}
if resp.Auth.ClientToken == "" {
t.Fatalf("bad: %#v", resp)
}
req.ClientToken = resp.Auth.ClientToken
req.Operation = logical.ReadOperation
req.Path = "auth/token/lookup-self"
resp, err = core.HandleRequest(req)
if err != nil {
t.Fatalf("err: %v", err)
}
ttl := resp.Data["ttl"].(int64)
if ttl > 10 {
t.Fatalf("TTL too large")
}
// Let the TTL go down a bit to 8 seconds
time.Sleep(2 * time.Second)
req.Operation = logical.UpdateOperation
req.Path = "auth/token/renew-self"
resp, err = core.HandleRequest(req)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err: %v\nresp: %#v", err, resp)
}
req.Operation = logical.ReadOperation
req.Path = "auth/token/lookup-self"
resp, err = core.HandleRequest(req)
if err != nil {
t.Fatalf("err: %v", err)
}
ttl = resp.Data["ttl"].(int64)
if ttl > 8 {
t.Fatalf("TTL too large: %d", ttl)
}
// Renewing should not have the increment increase since we've hit the
// max
req.Operation = logical.UpdateOperation
req.Path = "auth/token/renew-self"
req.Data = map[string]interface{}{
"increment": 1,
}
resp, err = core.HandleRequest(req)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err: %v\nresp: %#v", err, resp)
}
req.Operation = logical.ReadOperation
req.Path = "auth/token/lookup-self"
resp, err = core.HandleRequest(req)
if err != nil {
t.Fatalf("err: %v", err)
}
ttl = resp.Data["ttl"].(int64)
if ttl > 8 {
t.Fatalf("TTL too large")
}
}
// Now we create a token against the role. We should be able to renew;
// increment should be ignored as well.
{
req.ClientToken = root
req.Operation = logical.UpdateOperation
req.Path = "auth/token/create/test"
resp, err = core.HandleRequest(req)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err: %v\nresp: %#v", err, resp)
}
if resp == nil {
t.Fatal("response was nil")
}
if resp.Auth == nil {
t.Fatalf(fmt.Sprintf("response auth was nil, resp is %#v", *resp))
}
if resp.Auth.ClientToken == "" {
t.Fatalf("bad: %#v", resp)
}
req.ClientToken = resp.Auth.ClientToken
req.Operation = logical.ReadOperation
req.Path = "auth/token/lookup-self"
resp, err = core.HandleRequest(req)
if err != nil {
t.Fatalf("err: %v", err)
}
ttl := resp.Data["ttl"].(int64)
if ttl > 5 {
t.Fatalf("TTL too large (expected %d, got %d", 5, ttl)
}
// Let the TTL go down a bit to 3 seconds
time.Sleep(3 * time.Second)
req.Operation = logical.UpdateOperation
req.Path = "auth/token/renew-self"
req.Data = map[string]interface{}{
"increment": 1,
}
resp, err = core.HandleRequest(req)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err: %v\nresp: %#v", err, resp)
}
req.Operation = logical.ReadOperation
req.Path = "auth/token/lookup-self"
resp, err = core.HandleRequest(req)
if err != nil {
t.Fatalf("err: %v", err)
}
ttl = resp.Data["ttl"].(int64)
if ttl > 5 {
t.Fatalf("TTL too large (expected %d, got %d", 5, ttl)
}
}
}
func TestTokenStore_RoleExplicitMaxTTL(t *testing.T) {
core, _, _, root := TestCoreWithTokenStore(t)
core.defaultLeaseTTL = 5 * time.Second
core.maxLeaseTTL = 5 * time.Hour
// Note: these requests are sent to Core since Core handles registration
// with the expiration manager and we need the storage to be consistent
// Make sure we can't make it larger than the system/mount max; we should get a warning on role write and an error on token creation
req := logical.TestRequest(t, logical.UpdateOperation, "auth/token/roles/test")
req.ClientToken = root
req.Data = map[string]interface{}{
"explicit_max_ttl": "100h",
}
resp, err := core.HandleRequest(req)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err: %v\nresp: %#v", err, resp)
}
if resp == nil {
t.Fatalf("expected a warning")
}
req.Operation = logical.UpdateOperation
req.Path = "auth/token/create/test"
resp, err = core.HandleRequest(req)
if err != nil {
t.Fatalf("expected an error")
}
if len(resp.Warnings) == 0 {
t.Fatalf("expected a warning")
}
// Reset to a good explicit max
req = logical.TestRequest(t, logical.UpdateOperation, "auth/token/roles/test")
req.ClientToken = root
req.Data = map[string]interface{}{
"explicit_max_ttl": "10s",
}
resp, err = core.HandleRequest(req)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err: %v\nresp: %#v", err, resp)
}
if resp != nil {
t.Fatalf("expected a nil response")
}
// This first set of logic is to verify that a normal non-root token will
// be given a TTL of 5 seconds, and that renewing will cause the TTL to
// increase
{
req.Path = "auth/token/create"
req.Data = map[string]interface{}{
"policies": []string{"default"},
}
resp, err = core.HandleRequest(req)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err: %v\nresp: %#v", err, resp)
}
if resp.Auth.ClientToken == "" {
t.Fatalf("bad: %#v", resp)
}
req.ClientToken = resp.Auth.ClientToken
req.Operation = logical.ReadOperation
req.Path = "auth/token/lookup-self"
resp, err = core.HandleRequest(req)
if err != nil {
t.Fatalf("err: %v", err)
}
ttl := resp.Data["ttl"].(int64)
if ttl > 5 {
t.Fatalf("TTL too large")
}
// Let the TTL go down a bit to 3 seconds
time.Sleep(2 * time.Second)
req.Operation = logical.UpdateOperation
req.Path = "auth/token/renew-self"
resp, err = core.HandleRequest(req)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err: %v\nresp: %#v", err, resp)
}
req.Operation = logical.ReadOperation
req.Path = "auth/token/lookup-self"
resp, err = core.HandleRequest(req)
if err != nil {
t.Fatalf("err: %v", err)
}
ttl = resp.Data["ttl"].(int64)
if ttl < 4 {
t.Fatalf("TTL too small after renewal")
}
}
// Now we create a token against the role. After renew our max should still
// be the same.
{
req.ClientToken = root
req.Operation = logical.UpdateOperation
req.Path = "auth/token/create/test"
resp, err = core.HandleRequest(req)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err: %v\nresp: %#v", err, resp)
}
if resp == nil {
t.Fatal("response was nil")
}
if resp.Auth == nil {
t.Fatalf(fmt.Sprintf("response auth was nil, resp is %#v", *resp))
}
if resp.Auth.ClientToken == "" {
t.Fatalf("bad: %#v", resp)
}
req.ClientToken = resp.Auth.ClientToken
req.Operation = logical.ReadOperation
req.Path = "auth/token/lookup-self"
resp, err = core.HandleRequest(req)
if err != nil {
t.Fatalf("err: %v", err)
}
ttl := resp.Data["ttl"].(int64)
if ttl > 10 {
t.Fatalf("TTL too big")
}
// explicit max ttl is stored in the role so not returned here
maxTTL := resp.Data["explicit_max_ttl"].(int64)
if maxTTL != 0 {
t.Fatalf("expected 0 for explicit max TTL, got %d", maxTTL)
}
// Let the TTL go down a bit to ~7 seconds (8 against explicit max)
time.Sleep(2 * time.Second)
req.Operation = logical.UpdateOperation
req.Path = "auth/token/renew-self"
req.Data = map[string]interface{}{
"increment": 300,
}
resp, err = core.HandleRequest(req)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err: %v\nresp: %#v", err, resp)
}
req.Operation = logical.ReadOperation
req.Path = "auth/token/lookup-self"
resp, err = core.HandleRequest(req)
if err != nil {
t.Fatalf("err: %v", err)
}
ttl = resp.Data["ttl"].(int64)
if ttl > 8 {
t.Fatalf("TTL too big: %d", ttl)
}
// Let the TTL go down a bit more to ~5 seconds (6 against explicit max)
time.Sleep(2 * time.Second)
req.Operation = logical.UpdateOperation
req.Path = "auth/token/renew-self"
req.Data = map[string]interface{}{
"increment": 300,
}
resp, err = core.HandleRequest(req)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err: %v\nresp: %#v", err, resp)
}
req.Operation = logical.ReadOperation
req.Path = "auth/token/lookup-self"
resp, err = core.HandleRequest(req)
if err != nil {
t.Fatalf("err: %v", err)
}
ttl = resp.Data["ttl"].(int64)
if ttl > 6 {
t.Fatalf("TTL too big")
}
// It should expire
time.Sleep(8 * time.Second)
req.Operation = logical.UpdateOperation
req.Path = "auth/token/renew-self"
req.Data = map[string]interface{}{
"increment": 300,
}
resp, err = core.HandleRequest(req)
if err == nil {
t.Fatalf("expected error")
}
time.Sleep(2 * time.Second)
req.Operation = logical.ReadOperation
req.Path = "auth/token/lookup-self"
resp, err = core.HandleRequest(req)
if resp != nil && err == nil {
t.Fatalf("expected error, response is %#v", *resp)
}
if err == nil {
t.Fatalf("expected error")
}
}
}
func TestTokenStore_Periodic(t *testing.T) {
core, _, _, root := TestCoreWithTokenStore(t)
core.defaultLeaseTTL = 10 * time.Second
core.maxLeaseTTL = 10 * time.Second
// Note: these requests are sent to Core since Core handles registration
// with the expiration manager and we need the storage to be consistent
req := logical.TestRequest(t, logical.UpdateOperation, "auth/token/roles/test")
req.ClientToken = root
req.Data = map[string]interface{}{
"period": 5,
}
resp, err := core.HandleRequest(req)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err: %v\nresp: %#v", err, resp)
}
if resp != nil {
t.Fatalf("expected a nil response")
}
// First make one directly and verify on renew it uses the period.
{
req.ClientToken = root
req.Operation = logical.UpdateOperation
req.Path = "auth/token/create"
resp, err = core.HandleRequest(req)
if err != nil {
t.Fatal(err)
}
if resp == nil {
t.Fatal("response was nil")
}
if resp.Auth == nil {
t.Fatalf(fmt.Sprintf("response auth was nil, resp is %#v", *resp))
}
if resp.Auth.ClientToken == "" {
t.Fatalf("bad: %#v", resp)
}
req.ClientToken = resp.Auth.ClientToken
req.Operation = logical.ReadOperation
req.Path = "auth/token/lookup-self"
resp, err = core.HandleRequest(req)
if err != nil {
t.Fatalf("err: %v", err)
}
ttl := resp.Data["ttl"].(int64)
if ttl > 5 {
t.Fatalf("TTL too large (expected %d, got %d)", 5, ttl)
}
// Let the TTL go down a bit
time.Sleep(2 * time.Second)
req.Operation = logical.UpdateOperation
req.Path = "auth/token/renew-self"
req.Data = map[string]interface{}{
"increment": 1,
}
resp, err = core.HandleRequest(req)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err: %v\nresp: %#v", err, resp)
}
req.Operation = logical.ReadOperation
req.Path = "auth/token/lookup-self"
resp, err = core.HandleRequest(req)
if err != nil {
t.Fatalf("err: %v", err)
}
ttl = resp.Data["ttl"].(int64)
if ttl > 5 {
t.Fatalf("TTL too large (expected %d, got %d)", 5, ttl)
}
}
// Now we create a token against the role and also set the te value
// directly. We should use the smaller of the two and be able to renew;
// increment should be ignored as well.
{
req.ClientToken = root
req.Operation = logical.UpdateOperation
req.Path = "auth/token/create/test"
req.Data = map[string]interface{}{
"period": 5,
}
resp, err = core.HandleRequest(req)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err: %v\nresp: %#v", err, resp)
}
if resp == nil {
t.Fatal("response was nil")
}
if resp.Auth == nil {
t.Fatalf(fmt.Sprintf("response auth was nil, resp is %#v", *resp))
}
if resp.Auth.ClientToken == "" {
t.Fatalf("bad: %#v", resp)
}
req.ClientToken = resp.Auth.ClientToken
req.Operation = logical.ReadOperation
req.Path = "auth/token/lookup-self"
resp, err = core.HandleRequest(req)
if err != nil {
t.Fatalf("err: %v", err)
}
ttl := resp.Data["ttl"].(int64)
if ttl < 4 || ttl > 5 {
t.Fatalf("TTL bad (expected %d, got %d)", 4, ttl)
}
// Let the TTL go down a bit
time.Sleep(2 * time.Second)
req.Operation = logical.UpdateOperation
req.Path = "auth/token/renew-self"
req.Data = map[string]interface{}{
"increment": 1,
}
resp, err = core.HandleRequest(req)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err: %v\nresp: %#v", err, resp)
}
req.Operation = logical.ReadOperation
req.Path = "auth/token/lookup-self"
resp, err = core.HandleRequest(req)
if err != nil {
t.Fatalf("err: %v", err)
}
ttl = resp.Data["ttl"].(int64)
if ttl > 5 {
t.Fatalf("TTL bad (expected less than %d, got %d)", 5, ttl)
}
}
}
func TestTokenStore_Periodic_ExplicitMax(t *testing.T) {
core, _, _, root := TestCoreWithTokenStore(t)
core.defaultLeaseTTL = 10 * time.Second
core.maxLeaseTTL = 10 * time.Second
// Note: these requests are sent to Core since Core handles registration
// with the expiration manager and we need the storage to be consistent
req := logical.TestRequest(t, logical.UpdateOperation, "auth/token/roles/test")
req.ClientToken = root
req.Data = map[string]interface{}{
"period": 5,
}
resp, err := core.HandleRequest(req)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err: %v\nresp: %#v", err, resp)
}
if resp != nil {
t.Fatalf("expected a nil response")
}
// First make one directly and verify on renew it uses the period.
{
req.ClientToken = root
req.Operation = logical.UpdateOperation
req.Path = "auth/token/create"
req.Data = map[string]interface{}{
"period": 5,
"explicit_max_ttl": 4,
}
resp, err = core.HandleRequest(req)
if err != nil {
t.Fatal(err)
}
if resp == nil {
t.Fatal("response was nil")
}
if resp.Auth == nil {
t.Fatalf(fmt.Sprintf("response auth was nil, resp is %#v", *resp))
}
if resp.Auth.ClientToken == "" {
t.Fatalf("bad: %#v", resp)
}
req.ClientToken = resp.Auth.ClientToken
req.Operation = logical.ReadOperation
req.Path = "auth/token/lookup-self"
resp, err = core.HandleRequest(req)
if err != nil {
t.Fatalf("err: %v", err)
}
ttl := resp.Data["ttl"].(int64)
if ttl < 3 || ttl > 4 {
t.Fatalf("TTL bad (expected %d, got %d)", 3, ttl)
}
// Let the TTL go down a bit
time.Sleep(2 * time.Second)
req.Operation = logical.UpdateOperation
req.Path = "auth/token/renew-self"
req.Data = map[string]interface{}{
"increment": 76,
}
resp, err = core.HandleRequest(req)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err: %v\nresp: %#v", err, resp)
}
req.Operation = logical.ReadOperation
req.Path = "auth/token/lookup-self"
resp, err = core.HandleRequest(req)
if err != nil {
t.Fatalf("err: %v", err)
}
ttl = resp.Data["ttl"].(int64)
if ttl > 2 {
t.Fatalf("TTL bad (expected less than %d, got %d)", 2, ttl)
}
}
// Now we create a token against the role and also set the te value
// directly. We should use the smaller of the two and be able to renew;
// increment should be ignored as well.
{
req.Path = "auth/token/roles/test"
req.ClientToken = root
req.Operation = logical.UpdateOperation
req.Data = map[string]interface{}{
"period": 5,
"explicit_max_ttl": 4,
}
resp, err := core.HandleRequest(req)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err: %v\nresp: %#v", err, resp)
}
if resp != nil {
t.Fatalf("expected a nil response")
}
req.ClientToken = root
req.Operation = logical.UpdateOperation
req.Path = "auth/token/create/test"
resp, err = core.HandleRequest(req)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err: %v\nresp: %#v", err, resp)
}
if resp == nil {
t.Fatal("response was nil")
}
if resp.Auth == nil {
t.Fatalf(fmt.Sprintf("response auth was nil, resp is %#v", *resp))
}
if resp.Auth.ClientToken == "" {
t.Fatalf("bad: %#v", resp)
}
req.ClientToken = resp.Auth.ClientToken
req.Operation = logical.ReadOperation
req.Path = "auth/token/lookup-self"
resp, err = core.HandleRequest(req)
if err != nil {
t.Fatalf("err: %v", err)
}
ttl := resp.Data["ttl"].(int64)
if ttl < 3 || ttl > 4 {
t.Fatalf("TTL bad (expected %d, got %d)", 3, ttl)
}
// Let the TTL go down a bit
time.Sleep(2 * time.Second)
req.Operation = logical.UpdateOperation
req.Path = "auth/token/renew-self"
req.Data = map[string]interface{}{
"increment": 1,
}
resp, err = core.HandleRequest(req)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err: %v\nresp: %#v", err, resp)
}
req.Operation = logical.ReadOperation
req.Path = "auth/token/lookup-self"
resp, err = core.HandleRequest(req)
if err != nil {
t.Fatalf("err: %v", err)
}
ttl = resp.Data["ttl"].(int64)
if ttl > 2 {
t.Fatalf("TTL bad (expected less than %d, got %d)", 2, ttl)
}
}
}
func TestTokenStore_NoDefaultPolicy(t *testing.T) {
var resp *logical.Response
var err error
core, ts, _, root := TestCoreWithTokenStore(t)
ps := core.policyStore
policy, _ := ParseACLPolicy(tokenCreationPolicy)
policy.Name = "policy1"
if err := ps.SetPolicy(context.Background(), policy); err != nil {
t.Fatal(err)
}
// Root token creates a token with desired policy. The created token
// should also have 'default' attached to it.
tokenData := map[string]interface{}{
"policies": []string{"policy1"},
}
tokenReq := &logical.Request{
Path: "create",
ClientToken: root,
Operation: logical.UpdateOperation,
Data: tokenData,
}
resp, err = ts.HandleRequest(context.Background(), tokenReq)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err: %v, resp: %v", err, resp)
}
if !reflect.DeepEqual(resp.Auth.Policies, []string{"default", "policy1"}) {
t.Fatalf("bad: policies: expected: [policy, default]; actual: %s", resp.Auth.Policies)
}
newToken := resp.Auth.ClientToken
// Root token creates a token with desired policy, but also requests
// that the token to not have 'default' policy. The resulting token
// should not have 'default' policy on it.
tokenData["no_default_policy"] = true
resp, err = ts.HandleRequest(context.Background(), tokenReq)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err: %v, resp: %v", err, resp)
}
if !reflect.DeepEqual(resp.Auth.Policies, []string{"policy1"}) {
t.Fatalf("bad: policies: expected: [policy1]; actual: %s", resp.Auth.Policies)
}
// A non-root token which has 'default' policy attached requests for a
// child token. Child token should also have 'default' policy attached.
tokenReq.ClientToken = newToken
tokenReq.Data = nil
resp, err = ts.HandleRequest(context.Background(), tokenReq)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err: %v, resp: %v", err, resp)
}
if !reflect.DeepEqual(resp.Auth.Policies, []string{"default", "policy1"}) {
t.Fatalf("bad: policies: expected: [default policy1]; actual: %s", resp.Auth.Policies)
}
// A non-root token which has 'default' policy attached and period explicitly
// set to its zero value requests for a child token. Child token should be
// successfully created and have 'default' policy attached.
tokenReq.Data = map[string]interface{}{
"period": "0s",
}
resp, err = ts.HandleRequest(context.Background(), tokenReq)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err: %v, resp: %v", err, resp)
}
if !reflect.DeepEqual(resp.Auth.Policies, []string{"default", "policy1"}) {
t.Fatalf("bad: policies: expected: [default policy1]; actual: %s", resp.Auth.Policies)
}
// A non-root token which has 'default' policy attached, request for a
// child token to not have 'default' policy while not sending a list
tokenReq.Data = map[string]interface{}{
"no_default_policy": true,
}
resp, err = ts.HandleRequest(context.Background(), tokenReq)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err: %v, resp: %v", err, resp)
}
if !reflect.DeepEqual(resp.Auth.Policies, []string{"policy1"}) {
t.Fatalf("bad: policies: expected: [policy1]; actual: %s", resp.Auth.Policies)
}
// In this case "default" shouldn't exist because we are not inheriting
// parent policies
tokenReq.Data = map[string]interface{}{
"policies": []string{"policy1"},
"no_default_policy": true,
}
resp, err = ts.HandleRequest(context.Background(), tokenReq)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err: %v, resp: %v", err, resp)
}
if !reflect.DeepEqual(resp.Auth.Policies, []string{"policy1"}) {
t.Fatalf("bad: policies: expected: [policy1]; actual: %s", resp.Auth.Policies)
}
// This is a non-root token which does not have 'default' policy
// attached
newToken = resp.Auth.ClientToken
tokenReq.Data = nil
tokenReq.ClientToken = newToken
resp, err = ts.HandleRequest(context.Background(), tokenReq)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err: %v, resp: %v", err, resp)
}
if !reflect.DeepEqual(resp.Auth.Policies, []string{"policy1"}) {
t.Fatalf("bad: policies: expected: [policy1]; actual: %s", resp.Auth.Policies)
}
roleReq := &logical.Request{
ClientToken: root,
Path: "roles/role1",
Operation: logical.CreateOperation,
}
resp, err = ts.HandleRequest(context.Background(), roleReq)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err: %v, resp: %v", err, resp)
}
tokenReq.Path = "create/role1"
tokenReq.Data = map[string]interface{}{
"policies": []string{"policy1"},
}
resp, err = ts.HandleRequest(context.Background(), tokenReq)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err: %v, resp: %v", err, resp)
}
if !reflect.DeepEqual(resp.Auth.Policies, []string{"policy1"}) {
t.Fatalf("bad: policies: expected: [policy1]; actual: %s", resp.Auth.Policies)
}
// If 'allowed_policies' in role does not have 'default' in it, the
// tokens generated using that role should still have the 'default' policy
// attached to them.
roleReq.Operation = logical.UpdateOperation
roleReq.Data = map[string]interface{}{
"allowed_policies": "policy1",
}
resp, err = ts.HandleRequest(context.Background(), roleReq)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err: %v, resp: %v", err, resp)
}
resp, err = ts.HandleRequest(context.Background(), tokenReq)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err: %v, resp: %v", err, resp)
}
if !reflect.DeepEqual(resp.Auth.Policies, []string{"default", "policy1"}) {
t.Fatalf("bad: policies: expected: [default policy1]; actual: %s", resp.Auth.Policies)
}
// If 'allowed_policies' in role does not have 'default' in it, the
// tokens generated using that role should not have 'default' policy
// attached to them if disallowed_policies contains "default"
roleReq.Operation = logical.UpdateOperation
roleReq.Data = map[string]interface{}{
"allowed_policies": "policy1",
"disallowed_policies": "default",
}
resp, err = ts.HandleRequest(context.Background(), roleReq)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err: %v, resp: %v", err, resp)
}
resp, err = ts.HandleRequest(context.Background(), tokenReq)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err: %v, resp: %v", err, resp)
}
if !reflect.DeepEqual(resp.Auth.Policies, []string{"policy1"}) {
t.Fatalf("bad: policies: expected: [policy1]; actual: %s", resp.Auth.Policies)
}
roleReq.Data = map[string]interface{}{
"allowed_policies": "",
"disallowed_policies": "default",
}
resp, err = ts.HandleRequest(context.Background(), roleReq)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err: %v, resp: %v", err, resp)
}
resp, err = ts.HandleRequest(context.Background(), tokenReq)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err: %v, resp: %v", err, resp)
}
if !reflect.DeepEqual(resp.Auth.Policies, []string{"policy1"}) {
t.Fatalf("bad: policies: expected: [policy1]; actual: %s", resp.Auth.Policies)
}
// Ensure that if default is in both allowed and disallowed, disallowed wins
roleReq.Data = map[string]interface{}{
"allowed_policies": "default",
"disallowed_policies": "default",
}
resp, err = ts.HandleRequest(context.Background(), roleReq)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err: %v, resp: %v", err, resp)
}
delete(tokenReq.Data, "policies")
resp, err = ts.HandleRequest(context.Background(), tokenReq)
if err == nil || (resp != nil && !resp.IsError()) {
t.Fatalf("err: %v, resp: %v", err, resp)
}
}
func TestTokenStore_AllowedDisallowedPolicies(t *testing.T) {
var resp *logical.Response
var err error
_, ts, _, root := TestCoreWithTokenStore(t)
roleReq := &logical.Request{
ClientToken: root,
Path: "roles/role1",
Operation: logical.CreateOperation,
Data: map[string]interface{}{
"allowed_policies": "allowed1,allowed2",
"disallowed_policies": "disallowed1,disallowed2",
},
}
resp, err = ts.HandleRequest(context.Background(), roleReq)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err: %v, resp: %v", err, resp)
}
tokenReq := &logical.Request{
Path: "create/role1",
ClientToken: root,
Operation: logical.UpdateOperation,
Data: map[string]interface{}{
"policies": []string{"allowed1"},
},
}
resp, err = ts.HandleRequest(context.Background(), tokenReq)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err:%v resp:%v", err, resp)
}
expected := []string{"allowed1", "default"}
if !reflect.DeepEqual(resp.Auth.Policies, expected) {
t.Fatalf("bad: expected:%#v actual:%#v", expected, resp.Auth.Policies)
}
// Try again with automatic default adding turned off
tokenReq = &logical.Request{
Path: "create/role1",
ClientToken: root,
Operation: logical.UpdateOperation,
Data: map[string]interface{}{
"policies": []string{"allowed1"},
"no_default_policy": true,
},
}
resp, err = ts.HandleRequest(context.Background(), tokenReq)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err:%v resp:%v", err, resp)
}
expected = []string{"allowed1"}
if !reflect.DeepEqual(resp.Auth.Policies, expected) {
t.Fatalf("bad: expected:%#v actual:%#v", expected, resp.Auth.Policies)
}
tokenReq.Data = map[string]interface{}{
"policies": []string{"disallowed1"},
}
resp, err = ts.HandleRequest(context.Background(), tokenReq)
if err == nil {
t.Fatalf("expected an error")
}
roleReq.Operation = logical.UpdateOperation
roleReq.Data = map[string]interface{}{
"allowed_policies": "allowed1,common",
"disallowed_policies": "disallowed1,common",
}
resp, err = ts.HandleRequest(context.Background(), roleReq)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err: %v, resp: %v", err, resp)
}
tokenReq.Data = map[string]interface{}{
"policies": []string{"allowed1", "common"},
}
resp, err = ts.HandleRequest(context.Background(), tokenReq)
if err == nil {
t.Fatalf("expected an error")
}
}
// Issue 2189
func TestTokenStore_RevokeUseCountToken(t *testing.T) {
var resp *logical.Response
var err error
cubbyFuncLock := &sync.RWMutex{}
cubbyFuncLock.Lock()
exp := mockExpiration(t)
ts := exp.tokenStore
root, _ := exp.tokenStore.rootToken(context.Background())
tokenReq := &logical.Request{
Path: "create",
ClientToken: root.ID,
Operation: logical.UpdateOperation,
Data: map[string]interface{}{
"num_uses": 1,
},
}
resp, err = ts.HandleRequest(context.Background(), tokenReq)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err:%v resp:%v", err, resp)
}
tut := resp.Auth.ClientToken
saltTut, err := ts.SaltID(context.Background(), tut)
if err != nil {
t.Fatal(err)
}
te, err := ts.lookupSalted(context.Background(), saltTut, false)
if err != nil {
t.Fatal(err)
}
if te == nil {
t.Fatal("nil entry")
}
if te.NumUses != 1 {
t.Fatalf("bad: %d", te.NumUses)
}
te, err = ts.UseToken(context.Background(), te)
if err != nil {
t.Fatal(err)
}
if te == nil {
t.Fatal("nil entry")
}
if te.NumUses != tokenRevocationDeferred {
t.Fatalf("bad: %d", te.NumUses)
}
// Should return no entry because it's tainted
te, err = ts.lookupSalted(context.Background(), saltTut, false)
if err != nil {
t.Fatal(err)
}
if te != nil {
t.Fatalf("%#v", te)
}
// But it should show up in an API lookup call
req := &logical.Request{
Path: "lookup-self",
ClientToken: tut,
Operation: logical.UpdateOperation,
}
resp, err = ts.HandleRequest(context.Background(), req)
if err != nil {
t.Fatal(err)
}
if resp == nil || resp.Data == nil || resp.Data["num_uses"] == nil {
t.Fatal("nil resp or data")
}
if resp.Data["num_uses"].(int) != -1 {
t.Fatalf("bad: %v", resp.Data["num_uses"])
}
// Should return tainted entries
te, err = ts.lookupSalted(context.Background(), saltTut, true)
if err != nil {
t.Fatal(err)
}
if te == nil {
t.Fatal("nil entry")
}
if te.NumUses != tokenRevocationDeferred {
t.Fatalf("bad: %d", te.NumUses)
}
origDestroyCubbyhole := ts.cubbyholeDestroyer
ts.cubbyholeDestroyer = func(context.Context, *TokenStore, string) error {
return fmt.Errorf("keep it frosty")
}
err = ts.revokeSalted(context.Background(), saltTut)
if err == nil {
t.Fatalf("expected err")
}
// Since revocation failed we should see the tokenRevocationFailed canary value
te, err = ts.lookupSalted(context.Background(), saltTut, true)
if err != nil {
t.Fatal(err)
}
if te == nil {
t.Fatal("nil entry")
}
if te.NumUses != tokenRevocationFailed {
t.Fatalf("bad: %d", te.NumUses)
}
// Check the race condition situation by making the process sleep
ts.cubbyholeDestroyer = func(context.Context, *TokenStore, string) error {
time.Sleep(1 * time.Second)
return fmt.Errorf("keep it frosty")
}
cubbyFuncLock.Unlock()
go func() {
cubbyFuncLock.RLock()
err := ts.revokeSalted(context.Background(), saltTut)
cubbyFuncLock.RUnlock()
if err == nil {
t.Fatalf("expected error")
}
}()
// Give time for the function to start and grab locks
time.Sleep(200 * time.Millisecond)
te, err = ts.lookupSalted(context.Background(), saltTut, true)
if err != nil {
t.Fatal(err)
}
if te == nil {
t.Fatal("nil entry")
}
if te.NumUses != tokenRevocationInProgress {
t.Fatalf("bad: %d", te.NumUses)
}
// Let things catch up
time.Sleep(2 * time.Second)
// Put back to normal
cubbyFuncLock.Lock()
defer cubbyFuncLock.Unlock()
ts.cubbyholeDestroyer = origDestroyCubbyhole
err = ts.revokeSalted(context.Background(), saltTut)
if err != nil {
t.Fatal(err)
}
te, err = ts.lookupSalted(context.Background(), saltTut, true)
if err != nil {
t.Fatal(err)
}
if te != nil {
t.Fatal("found entry")
}
}
// Create a token, delete the token entry while leaking accessors, invoke tidy
// and check if the dangling accessor entry is getting removed
func TestTokenStore_HandleTidyCase1(t *testing.T) {
var resp *logical.Response
var err error
_, ts, _, root := TestCoreWithTokenStore(t)
// List the number of accessors. Since there is only root token
// present, the list operation should return only one key.
accessorListReq := &logical.Request{
Operation: logical.ListOperation,
Path: "accessors/",
ClientToken: root,
}
resp, err = ts.HandleRequest(context.Background(), accessorListReq)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err:%v resp:%v", err, resp)
}
numberOfAccessors := len(resp.Data["keys"].([]string))
if numberOfAccessors != 1 {
t.Fatalf("bad: number of accessors. Expected: 1, Actual: %d", numberOfAccessors)
}
for i := 1; i <= 100; i++ {
// Create a regular token
tokenReq := &logical.Request{
Operation: logical.UpdateOperation,
Path: "create",
ClientToken: root,
Data: map[string]interface{}{
"policies": []string{"policy1"},
},
}
resp, err = ts.HandleRequest(context.Background(), tokenReq)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err:%v resp:%v", err, resp)
}
tut := resp.Auth.ClientToken
// Creation of another token should end up with incrementing
// the number of accessors
// the storage
resp, err = ts.HandleRequest(context.Background(), accessorListReq)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err:%v resp:%v", err, resp)
}
numberOfAccessors = len(resp.Data["keys"].([]string))
if numberOfAccessors != i+1 {
t.Fatalf("bad: number of accessors. Expected: %d, Actual: %d", i+1, numberOfAccessors)
}
// Revoke the token while leaking other items associated with the
// token. Do this by doing what revokeSalted used to do before it was
// fixed, i.e., by deleting the storage entry for token and its
// cubbyhole and by not deleting its secondary index, its accessor and
// associated leases.
saltedTut, err := ts.SaltID(context.Background(), tut)
if err != nil {
t.Fatal(err)
}
_, err = ts.lookupSalted(context.Background(), saltedTut, true)
if err != nil {
t.Fatalf("failed to lookup token: %v", err)
}
// Destroy the token index
path := lookupPrefix + saltedTut
if ts.view.Delete(context.Background(), path); err != nil {
t.Fatalf("failed to delete token entry: %v", err)
}
// Destroy the cubby space
err = ts.destroyCubbyhole(context.Background(), saltedTut)
if err != nil {
t.Fatalf("failed to destroyCubbyhole: %v", err)
}
// Leaking of accessor should have resulted in no change to the number
// of accessors
resp, err = ts.HandleRequest(context.Background(), accessorListReq)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err:%v resp:%v", err, resp)
}
numberOfAccessors = len(resp.Data["keys"].([]string))
if numberOfAccessors != i+1 {
t.Fatalf("bad: number of accessors. Expected: %d, Actual: %d", i+1, numberOfAccessors)
}
}
tidyReq := &logical.Request{
Path: "tidy",
Operation: logical.UpdateOperation,
ClientToken: root,
}
resp, err = ts.HandleRequest(context.Background(), tidyReq)
if err != nil {
t.Fatal(err)
}
if resp != nil && resp.IsError() {
t.Fatalf("resp: %#v", resp)
}
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err:%v resp:%v", err, resp)
}
// Tidy should have removed all the dangling accessor entries
resp, err = ts.HandleRequest(context.Background(), accessorListReq)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err:%v resp:%v", err, resp)
}
numberOfAccessors = len(resp.Data["keys"].([]string))
if numberOfAccessors != 1 {
t.Fatalf("bad: number of accessors. Expected: 1, Actual: %d", numberOfAccessors)
}
}
// Create a set of tokens along with a child token for each of them, delete the
// token entry while leaking accessors, invoke tidy and check if the dangling
// accessor entry is getting removed and check if child tokens are still present
// and turned into orphan tokens.
func TestTokenStore_HandleTidy_parentCleanup(t *testing.T) {
var resp *logical.Response
var err error
_, ts, _, root := TestCoreWithTokenStore(t)
// List the number of accessors. Since there is only root token
// present, the list operation should return only one key.
accessorListReq := &logical.Request{
Operation: logical.ListOperation,
Path: "accessors/",
ClientToken: root,
}
resp, err = ts.HandleRequest(context.Background(), accessorListReq)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err:%v resp:%v", err, resp)
}
numberOfAccessors := len(resp.Data["keys"].([]string))
if numberOfAccessors != 1 {
t.Fatalf("bad: number of accessors. Expected: 1, Actual: %d", numberOfAccessors)
}
for i := 1; i <= 100; i++ {
// Create a token
tokenReq := &logical.Request{
Operation: logical.UpdateOperation,
Path: "create",
ClientToken: root,
Data: map[string]interface{}{
"policies": []string{"policy1"},
},
}
resp, err = ts.HandleRequest(context.Background(), tokenReq)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err:%v resp:%v", err, resp)
}
tut := resp.Auth.ClientToken
// Create a child token
tokenReq = &logical.Request{
Operation: logical.UpdateOperation,
Path: "create",
ClientToken: tut,
Data: map[string]interface{}{
"policies": []string{"policy1"},
},
}
resp, err = ts.HandleRequest(context.Background(), tokenReq)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err:%v resp:%v", err, resp)
}
// Creation of another token should end up with incrementing the number of
// accessors the storage
resp, err = ts.HandleRequest(context.Background(), accessorListReq)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err:%v resp:%v", err, resp)
}
numberOfAccessors = len(resp.Data["keys"].([]string))
if numberOfAccessors != (i*2)+1 {
t.Fatalf("bad: number of accessors. Expected: %d, Actual: %d", i+1, numberOfAccessors)
}
// Revoke the token while leaking other items associated with the
// token. Do this by doing what revokeSalted used to do before it was
// fixed, i.e., by deleting the storage entry for token and its
// cubbyhole and by not deleting its secondary index, its accessor and
// associated leases.
saltedTut, err := ts.SaltID(context.Background(), tut)
if err != nil {
t.Fatal(err)
}
_, err = ts.lookupSalted(context.Background(), saltedTut, true)
if err != nil {
t.Fatalf("failed to lookup token: %v", err)
}
// Destroy the token index
path := lookupPrefix + saltedTut
if ts.view.Delete(context.Background(), path); err != nil {
t.Fatalf("failed to delete token entry: %v", err)
}
// Destroy the cubby space
err = ts.destroyCubbyhole(context.Background(), saltedTut)
if err != nil {
t.Fatalf("failed to destroyCubbyhole: %v", err)
}
// Leaking of accessor should have resulted in no change to the number
// of accessors
resp, err = ts.HandleRequest(context.Background(), accessorListReq)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err:%v resp:%v", err, resp)
}
numberOfAccessors = len(resp.Data["keys"].([]string))
if numberOfAccessors != (i*2)+1 {
t.Fatalf("bad: number of accessors. Expected: %d, Actual: %d", (i*2)+1, numberOfAccessors)
}
}
tidyReq := &logical.Request{
Path: "tidy",
Operation: logical.UpdateOperation,
ClientToken: root,
}
resp, err = ts.HandleRequest(context.Background(), tidyReq)
if err != nil {
t.Fatal(err)
}
if resp != nil && resp.IsError() {
t.Fatalf("resp: %#v", resp)
}
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err:%v resp:%v", err, resp)
}
// Tidy should have removed all the dangling accessor entries
resp, err = ts.HandleRequest(context.Background(), accessorListReq)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err:%v resp:%v", err, resp)
}
// The number of accessors should be equal to number of valid child tokens
// (100) + the root token (1)
keys := resp.Data["keys"].([]string)
numberOfAccessors = len(keys)
if numberOfAccessors != 101 {
t.Fatalf("bad: number of accessors. Expected: 1, Actual: %d", numberOfAccessors)
}
req := logical.TestRequest(t, logical.UpdateOperation, "lookup-accessor")
for _, accessor := range keys {
req.Data = map[string]interface{}{
"accessor": accessor,
}
resp, err := ts.HandleRequest(context.Background(), req)
if err != nil {
t.Fatalf("err: %s", err)
}
if resp.Data == nil {
t.Fatalf("response should contain data")
}
// These tokens should now be orphaned
if resp.Data["orphan"] != true {
t.Fatalf("token should be orphan")
}
}
}
func TestTokenStore_TidyLeaseRevocation(t *testing.T) {
exp := mockExpiration(t)
ts := exp.tokenStore
noop := &NoopBackend{}
_, barrier, _ := mockBarrier(t)
view := NewBarrierView(barrier, "logical/")
meUUID, err := uuid.GenerateUUID()
if err != nil {
t.Fatal(err)
}
err = exp.router.Mount(noop, "prod/aws/", &MountEntry{UUID: meUUID, Accessor: "awsaccessor"}, view)
if err != nil {
t.Fatal(err)
}
// Create new token
root, err := ts.rootToken(context.Background())
if err != nil {
t.Fatalf("err: %v", err)
}
req := logical.TestRequest(t, logical.UpdateOperation, "create")
req.ClientToken = root.ID
req.Data["policies"] = []string{"default"}
resp, err := ts.HandleRequest(context.Background(), req)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err: %v\nresp: %#v", err, resp)
}
// Create a new token
auth := &logical.Auth{
ClientToken: resp.Auth.ClientToken,
LeaseOptions: logical.LeaseOptions{
TTL: time.Hour,
Renewable: true,
},
}
err = exp.RegisterAuth("auth/token/create", auth)
if err != nil {
t.Fatalf("err: %v", err)
}
tut := resp.Auth.ClientToken
req = &logical.Request{
Path: "prod/aws/foo",
ClientToken: tut,
}
resp = &logical.Response{
Secret: &logical.Secret{
LeaseOptions: logical.LeaseOptions{
TTL: time.Hour,
},
},
}
leases := []string{}
for i := 0; i < 10; i++ {
leaseId, err := exp.Register(req, resp)
if err != nil {
t.Fatal(err)
}
leases = append(leases, leaseId)
}
sort.Strings(leases)
storedLeases, err := exp.lookupByToken(tut)
if err != nil {
t.Fatal(err)
}
sort.Strings(storedLeases)
if !reflect.DeepEqual(leases, storedLeases) {
t.Fatalf("bad: %#v vs %#v", leases, storedLeases)
}
// Now, delete the token entry. The leases should still exist.
saltedTut, err := ts.SaltID(context.Background(), tut)
if err != nil {
t.Fatal(err)
}
te, err := ts.lookupSalted(context.Background(), saltedTut, true)
if err != nil {
t.Fatalf("failed to lookup token: %v", err)
}
if te == nil {
t.Fatal("got nil token entry")
}
// Destroy the token index
path := lookupPrefix + saltedTut
if ts.view.Delete(context.Background(), path); err != nil {
t.Fatalf("failed to delete token entry: %v", err)
}
te, err = ts.lookupSalted(context.Background(), saltedTut, true)
if err != nil {
t.Fatalf("failed to lookup token: %v", err)
}
if te != nil {
t.Fatal("got token entry")
}
// Verify leases still exist
storedLeases, err = exp.lookupByToken(tut)
if err != nil {
t.Fatal(err)
}
sort.Strings(storedLeases)
if !reflect.DeepEqual(leases, storedLeases) {
t.Fatalf("bad: %#v vs %#v", leases, storedLeases)
}
// Call tidy
ts.handleTidy(context.Background(), nil, nil)
// Verify leases are gone
storedLeases, err = exp.lookupByToken(tut)
if err != nil {
t.Fatal(err)
}
if len(storedLeases) > 0 {
t.Fatal("found leases")
}
}<|fim▁end|> | t.Fatal(err) |
<|file_name|>Pali.java<|end_file_name|><|fim▁begin|>import java.util.*;
public class Pali {
public static void main(String args[])
{
Scanner sc=new Scanner(System.in);
String str=sc.next();
StringBuffer buff=new StringBuffer(str).reverse();<|fim▁hole|> }
else
{
System.out.println("Not a Palindrome");
}
}<|fim▁end|> | String str1=buff.toString();
if(str.isequals(str1))
{
System.out.println("Palindrome"); |
<|file_name|>flexberry-wfs-filter.js<|end_file_name|><|fim▁begin|><|fim▁hole|><|fim▁end|> | export { default } from 'ember-flexberry-gis/components/flexberry-wfs-filter'; |
<|file_name|>postgresql.py<|end_file_name|><|fim▁begin|>import logging
import traceback
from functools import wraps
import os
import re
from django.conf import settings
from django.db import connection
from django.db.models import ManyToManyField
logger = logging.getLogger(__name__)
def debug_pg_notices(f):
@wraps(f)
def wrapped(*args, **kwargs):
r = None
if connection.connection:
del connection.connection.notices[:]
try:
r = f(*args, **kwargs)
finally:
# Show triggers output
allnotices = []
current = ''
if connection.connection:
notices = []
for notice in connection.connection.notices:
try:
notice, context = notice.split('CONTEXT:', 1)
context = re.sub(r"\s+", " ", context)<|fim▁hole|> notices.append((context, notice))
if context != current:
allnotices.append(notices)
notices = []
current = context
allnotices.append(notices)
current = ''
for notices in allnotices:
for context, notice in notices:
if context != current:
if context != '':
logger.debug('Context %s...:' % context.strip()[:80])
current = context
notice = notice.replace('NOTICE: ', '')
prefix = ''
logger.debug('%s%s' % (prefix, notice.strip()))
return r
return wrapped
def load_sql_files(app, stage):
"""
Look for SQL files in Django app, and load them into database.
We remove RAISE NOTICE instructions from SQL outside unit testing
since they lead to interpolation errors of '%' character in python.
"""
app_dir = app.path
sql_dir = os.path.normpath(os.path.join(app_dir, 'sql'))
custom_sql_dir = os.path.join(settings.VAR_DIR, 'conf/extra_sql', app.label)
sql_files = []
r = re.compile(r'^{}_.*\.sql$'.format(stage))
if os.path.exists(sql_dir):
sql_files += [
os.path.join(sql_dir, f) for f in os.listdir(sql_dir) if r.match(f) is not None
]
if os.path.exists(custom_sql_dir):
sql_files += [
os.path.join(custom_sql_dir, f) for f in os.listdir(custom_sql_dir) if r.match(f) is not None
]
sql_files.sort()
cursor = connection.cursor()
for sql_file in sql_files:
try:
logger.info("Loading initial SQL data from '%s'" % sql_file)
f = open(sql_file)
sql = f.read()
f.close()
if not settings.TEST and not settings.DEBUG:
# Remove RAISE NOTICE (/!\ only one-liners)
sql = re.sub(r"\n.*RAISE NOTICE.*\n", "\n", sql)
# TODO: this is the ugliest driver hack ever
sql = sql.replace('%', '%%')
# Replace curly braces with settings values
pattern = re.compile(r'{{\s*([^\s]*)\s*}}')
for m in pattern.finditer(sql):
value = getattr(settings, m.group(1))
sql = sql.replace(m.group(0), str(value))
# Replace sharp braces with schemas
pattern = re.compile(r'{#\s*([^\s]*)\s*#}')
for m in pattern.finditer(sql):
try:
value = settings.DATABASE_SCHEMAS[m.group(1)]
except KeyError:
value = settings.DATABASE_SCHEMAS.get('default', 'public')
sql = sql.replace(m.group(0), str(value))
cursor.execute(sql)
except Exception as e:
logger.critical("Failed to install custom SQL file '%s': %s\n" %
(sql_file, e))
traceback.print_exc()
raise
def set_search_path():
# Set search path with all existing schema + new ones
cursor = connection.cursor()
cursor.execute('SELECT schema_name FROM information_schema.schemata')
search_path = set([s[0] for s in cursor.fetchall() if not s[0].startswith('pg_')])
search_path |= set(settings.DATABASE_SCHEMAS.values())
search_path.discard('public')
search_path.discard('information_schema')
search_path = ('public', ) + tuple(search_path)
cursor.execute('SET search_path TO {}'.format(', '.join(search_path)))
def move_models_to_schemas(app):
"""
Move models tables to PostgreSQL schemas.
Views, functions and triggers will be moved in Geotrek app SQL files.
"""
default_schema = settings.DATABASE_SCHEMAS.get('default', 'public')
app_schema = settings.DATABASE_SCHEMAS.get(app.name, default_schema)
table_schemas = {}
for model in app.get_models():
model_name = model._meta.model_name
table_name = model._meta.db_table
model_schema = settings.DATABASE_SCHEMAS.get(model_name, app_schema)
table_schemas.setdefault(model_schema, []).append(table_name)
for field in model._meta.get_fields():
if isinstance(field, ManyToManyField):
table_schemas[model_schema].append(field.m2m_db_table())
cursor = connection.cursor()
for schema_name in table_schemas.keys():
sql = "CREATE SCHEMA IF NOT EXISTS %s;" % model_schema
cursor.execute(sql)
logger.info("Created schema %s" % model_schema)
for schema_name, tables in table_schemas.items():
for table_name in tables:
sql = "SELECT 1 FROM information_schema.tables WHERE table_name=%s AND table_schema!=%s"
cursor.execute(sql, [table_name, schema_name])
if cursor.fetchone():
sql = "ALTER TABLE %s SET SCHEMA %s;" % (table_name, schema_name)
cursor.execute(sql)
logger.info("Moved %s to schema %s" % (table_name, schema_name))
# For Django, search_path is set in connection options.
# But when accessing the database using QGis or ETL, search_path must be
# set database level (for all users, and for this database only).
if app.name == 'geotrek.common':
dbname = settings.DATABASES['default']['NAME']
dbuser = settings.DATABASES['default']['USER']
search_path = ', '.join(('public', ) + tuple(set(settings.DATABASE_SCHEMAS.values())))
sql = "ALTER ROLE %s IN DATABASE %s SET search_path=%s;" % (dbuser, dbname, search_path)
cursor.execute(sql)<|fim▁end|> | except ValueError:
context = '' |
<|file_name|>logging.cpp<|end_file_name|><|fim▁begin|>#include "logging.hpp"
#include <fstream>
#include <boost/log/sinks/text_file_backend.hpp>
#include <boost/log/utility/setup/file.hpp>
#include <boost/log/utility/setup/common_attributes.hpp>
#include <boost/log/sinks.hpp>
namespace p2l { namespace common {
//=====================================================================
void _init_logging()
{
boost::log::add_common_attributes();
boost::log::core::get()->set_filter
(
boost::log::trivial::severity >= boost::log::trivial::trace
);
typedef boost::log::sinks::synchronous_sink< boost::log::sinks::text_ostream_backend > text_sink;
// the file sink for hte default logger
boost::shared_ptr< text_sink > default_sink = boost::make_shared< text_sink >();
default_sink->locked_backend()->add_stream<|fim▁hole|> // the file sink for hte stat logger
boost::shared_ptr< text_sink > stat_sink = boost::make_shared< text_sink >();
stat_sink->locked_backend()->add_stream
( boost::make_shared< std::ofstream >( "stat_log.log" ) );
boost::log::core::get()->add_sink( stat_sink );
}
//=====================================================================
//=====================================================================
//=====================================================================
//=====================================================================
//=====================================================================
//=====================================================================
//=====================================================================
//=====================================================================
//=====================================================================
//=====================================================================
//=====================================================================
}
}<|fim▁end|> | ( boost::make_shared< std::ofstream >( "default_log.log" ) );
boost::log::core::get()->add_sink( default_sink );
|
<|file_name|>interner.rs<|end_file_name|><|fim▁begin|>use std::cmp::Ordering;
use std::fmt;
use std::hash::{Hash, Hasher};
use std::ops::Deref;
use Result;
use base::fnv::FnvMap;
use gc::{GcPtr, Gc, Traverseable};
use array::Str;
/// Interned strings which allow for fast equality checks and hashing
#[derive(Copy, Clone, Eq)]
pub struct InternedStr(GcPtr<Str>);
impl PartialEq<InternedStr> for InternedStr {
fn eq(&self, other: &InternedStr) -> bool {
self.as_ptr() == other.as_ptr()
}
}
impl<'a> PartialEq<&'a str> for InternedStr {
fn eq(&self, other: &&'a str) -> bool {
**self == **other
}
}
impl PartialOrd for InternedStr {
fn partial_cmp(&self, other: &InternedStr) -> Option<Ordering> {
self.as_ptr().partial_cmp(&other.as_ptr())
}
}
impl Ord for InternedStr {
fn cmp(&self, other: &InternedStr) -> Ordering {
self.as_ptr().cmp(&other.as_ptr())
}
}
impl Hash for InternedStr {
fn hash<H>(&self, hasher: &mut H)
where H: Hasher,
{
self.as_ptr().hash(hasher)
}
}
unsafe impl Sync for InternedStr {}
impl Deref for InternedStr {
type Target = str;
fn deref(&self) -> &str {
&self.0
}
}
impl AsRef<str> for InternedStr {
fn as_ref(&self) -> &str {
&self.0
}
}
impl InternedStr {
pub fn inner(&self) -> GcPtr<Str> {
self.0
}
}
pub struct Interner {
// For this map and this map only we can't use InternedStr as keys since the hash should
// not be expected to be the same as ordinary strings, we use a transmute to &'static str to
// have the keys as strings without any unsafety as the keys do not escape the interner and they
// live as long as their values
indexes: FnvMap<&'static str, InternedStr>,
}
impl Traverseable for Interner {
fn traverse(&self, gc: &mut Gc) {
for (_, v) in self.indexes.iter() {
v.0.traverse(gc);
}
}
}
impl Interner {
pub fn new() -> Interner {
Interner { indexes: FnvMap::default() }
}
pub fn intern(&mut self, gc: &mut Gc, s: &str) -> Result<InternedStr> {
match self.indexes.get(s) {
Some(interned_str) => return Ok(*interned_str),
None => (),
}
let gc_str = InternedStr(try!(gc.alloc(s)));
// The key will live as long as the value it refers to and the static str never escapes
// outside interner so this is safe
let key: &'static str = unsafe { ::std::mem::transmute::<&str, &'static str>(&gc_str) };
self.indexes.insert(key, gc_str);
Ok(gc_str)
}
}
impl fmt::Debug for InternedStr {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "InternedStr({:?})", self.0)
}
}<|fim▁hole|> fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", &self[..])
}
}<|fim▁end|> | impl fmt::Display for InternedStr { |
<|file_name|>335. Self Crossing.py<|end_file_name|><|fim▁begin|>class Solution(object):
def isSelfCrossing(self, x):<|fim▁hole|> """
:type x: List[int]
:rtype: bool
"""
inf = float('inf')
n = len(x)
if n < 3:
return False
ruld = [0, 0, 0, 0] # right, up, left, down
next_max = inf
current = [-x[1], x[0]]
for i, elem in enumerate(x[2:], 2):
i %= 4
if elem >= next_max:
return True
xy = 1 if i in {0, 2} else 0
pn = 1 if i in {0, 3} else -1
new = current[xy] + pn * elem
if pn * new > pn * ruld[i - 3]:
next_max = inf
else:
if next_max is inf and pn * new >= pn * ruld[i - 1]:
ruld[i - 2] = ruld[i]
next_max = abs(ruld[i - 2] - current[xy ^ 1])
ruld[i - 1], current[xy] = current[xy], new
return False
assert Solution().isSelfCrossing([2, 1, 1, 2])
assert not Solution().isSelfCrossing([1, 2, 3, 4])
assert Solution().isSelfCrossing([1, 1, 1, 1])
assert not Solution().isSelfCrossing([3,3,4,2,2])
assert Solution().isSelfCrossing([1,1,2,1,1])
assert not Solution().isSelfCrossing([3,3,3,2,1,1])<|fim▁end|> | |
<|file_name|>save_results.py<|end_file_name|><|fim▁begin|># argv[1] - file path to main folder (like $HOME/dsge-models)
# argv[2] - name of model (e.g. 'dsf' or 'nk' or 'ca')
from scipy.io import loadmat
from sys import argv
from json import load
TT = 30 # how many periods of results to send
model = argv[2]
fpath = argv[1] + '/' + model + '_mfiles/'
json = ''
#### 1 - load model results
# load results from mat file and convert to numpy lists
#mat = loadmat(fpath + model + '_results.mat')
#endo_names = mat['M_']['endo_names'].tolist()[0][0]
#endo_simul = mat['oo_']['endo_simul'].tolist()[0][0]
# make string of JSON-looking data out of numpy lists
#for name, simul in zip(endo_names, endo_simul):
# json += '"' + name.strip() + '":'
# json += '[' + ','.join(['%2f' % jj for jj in simul[0:TT]]) + '],'
#### 2 - load extra plot vars
# load results from mat file and convert to numpy lists (new format though)
mat = loadmat(fpath + 'plot_vars.mat')
plot_names = mat['plot_vars'].dtype.names
plot_simul = mat['plot_vars'][0][0]
for name, simul in zip(plot_names, plot_simul):
print 'name: ' + name
json += '"' + name.strip() + '":'
json += '[' + ','.join(['%2f' % jj for jj in simul[0:TT]]) + '],'
# write JSON-looking string to file
f = open(model + '_mfiles/' + model + '_results.json', 'w')
f.write('{' + json[:-1] + '}')
f.close()
# pull JSON data into python dict
json_data = open(fpath + model + '_results.json')
data = load(json_data)
json_data.close()
# pull JSON of short+long var names into python dict<|fim▁hole|>json_names = open(fpath + 'json/var_list.json')
names = load(json_names)
json_names.close()
# make string of public directory
pub_fpath = fpath[:fpath[:-1].rfind('/')] + '/public/'
# create csv file to write to
f = open(pub_fpath + model + '_results.csv','w')
for key in data.keys():
#f.write(str(key) + ', ' + str(data[key])[1:-1] + '\n')
f.write(str(names[key]) + ', ' + str(data[key])[1:-1] + '\n')
f.close()<|fim▁end|> | |
<|file_name|>no-tabs.js<|end_file_name|><|fim▁begin|>/**
* @fileoverview Rule to check for tabs inside a file
* @author Gyandeep Singh
*/
"use strict";
//------------------------------------------------------------------------------
// Helpers
//------------------------------------------------------------------------------
const regex = /\t/;
//------------------------------------------------------------------------------
// Public Interface
//------------------------------------------------------------------------------
module.exports = {
meta: {
docs: {
description: "disallow all tabs",
category: "Stylistic Issues",
recommended: false
},
schema: []
},
create(context) {
return {
Program(node) {
context.getSourceLines().forEach((line, index) => {
const match = regex.exec(line);
if (match) {
context.report({
node,
loc: {
line: index + 1,
column: match.index + 1
},
message: "Unexpected tab character."
});
}
});
}
};<|fim▁hole|><|fim▁end|> | }
}; |
<|file_name|>resource_aws_cloudformation_stack.go<|end_file_name|><|fim▁begin|>package aws
import (
"fmt"
"log"
"regexp"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/cloudformation"
"github.com/hashicorp/errwrap"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/helper/schema"
)
func resourceAwsCloudFormationStack() *schema.Resource {
return &schema.Resource{
Create: resourceAwsCloudFormationStackCreate,
Read: resourceAwsCloudFormationStackRead,
Update: resourceAwsCloudFormationStackUpdate,
Delete: resourceAwsCloudFormationStackDelete,
Schema: map[string]*schema.Schema{
"name": &schema.Schema{
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"template_body": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Computed: true,
ValidateFunc: validateCloudFormationTemplate,
StateFunc: func(v interface{}) string {
template, _ := normalizeCloudFormationTemplate(v)
return template
},
},
"template_url": &schema.Schema{
Type: schema.TypeString,
Optional: true,
},
"capabilities": &schema.Schema{
Type: schema.TypeSet,
Optional: true,
Elem: &schema.Schema{Type: schema.TypeString},
Set: schema.HashString,
},
"disable_rollback": &schema.Schema{
Type: schema.TypeBool,
Optional: true,
ForceNew: true,
},
"notification_arns": &schema.Schema{
Type: schema.TypeSet,
Optional: true,
Elem: &schema.Schema{Type: schema.TypeString},
Set: schema.HashString,
},
"on_failure": &schema.Schema{
Type: schema.TypeString,
Optional: true,
ForceNew: true,
},
"parameters": &schema.Schema{
Type: schema.TypeMap,
Optional: true,
Computed: true,
},
"outputs": &schema.Schema{
Type: schema.TypeMap,
Computed: true,
},
"policy_body": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Computed: true,
ValidateFunc: validateJsonString,
StateFunc: func(v interface{}) string {
json, _ := normalizeJsonString(v)
return json
},
},
"policy_url": &schema.Schema{
Type: schema.TypeString,
Optional: true,
},
"timeout_in_minutes": &schema.Schema{
Type: schema.TypeInt,
Optional: true,
ForceNew: true,
},
"tags": &schema.Schema{
Type: schema.TypeMap,
Optional: true,
ForceNew: true,
},
},
}
}
func resourceAwsCloudFormationStackCreate(d *schema.ResourceData, meta interface{}) error {
retryTimeout := int64(30)
conn := meta.(*AWSClient).cfconn
input := cloudformation.CreateStackInput{
StackName: aws.String(d.Get("name").(string)),
}
if v, ok := d.GetOk("template_body"); ok {
template, err := normalizeCloudFormationTemplate(v)
if err != nil {
return errwrap.Wrapf("template body contains an invalid JSON or YAML: {{err}}", err)
}
input.TemplateBody = aws.String(template)
}
if v, ok := d.GetOk("template_url"); ok {
input.TemplateURL = aws.String(v.(string))
}
if v, ok := d.GetOk("capabilities"); ok {
input.Capabilities = expandStringList(v.(*schema.Set).List())
}
if v, ok := d.GetOk("disable_rollback"); ok {
input.DisableRollback = aws.Bool(v.(bool))
}
if v, ok := d.GetOk("notification_arns"); ok {
input.NotificationARNs = expandStringList(v.(*schema.Set).List())
}
if v, ok := d.GetOk("on_failure"); ok {
input.OnFailure = aws.String(v.(string))
}
if v, ok := d.GetOk("parameters"); ok {
input.Parameters = expandCloudFormationParameters(v.(map[string]interface{}))
}
if v, ok := d.GetOk("policy_body"); ok {
policy, err := normalizeJsonString(v)
if err != nil {
return errwrap.Wrapf("policy body contains an invalid JSON: {{err}}", err)
}
input.StackPolicyBody = aws.String(policy)
}
if v, ok := d.GetOk("policy_url"); ok {
input.StackPolicyURL = aws.String(v.(string))
}
if v, ok := d.GetOk("tags"); ok {
input.Tags = expandCloudFormationTags(v.(map[string]interface{}))
}
if v, ok := d.GetOk("timeout_in_minutes"); ok {
m := int64(v.(int))
input.TimeoutInMinutes = aws.Int64(m)
if m > retryTimeout {
retryTimeout = m + 5
log.Printf("[DEBUG] CloudFormation timeout: %d", retryTimeout)
}
}
log.Printf("[DEBUG] Creating CloudFormation Stack: %s", input)
resp, err := conn.CreateStack(&input)
if err != nil {
return fmt.Errorf("Creating CloudFormation stack failed: %s", err.Error())
}
d.SetId(*resp.StackId)
var lastStatus string
wait := resource.StateChangeConf{
Pending: []string{
"CREATE_IN_PROGRESS",
"DELETE_IN_PROGRESS",
"ROLLBACK_IN_PROGRESS",
},
Target: []string{
"CREATE_COMPLETE",
"CREATE_FAILED",
"DELETE_COMPLETE",<|fim▁hole|> "ROLLBACK_FAILED",
},
Timeout: time.Duration(retryTimeout) * time.Minute,
MinTimeout: 1 * time.Second,
Refresh: func() (interface{}, string, error) {
resp, err := conn.DescribeStacks(&cloudformation.DescribeStacksInput{
StackName: aws.String(d.Id()),
})
if err != nil {
log.Printf("[ERROR] Failed to describe stacks: %s", err)
return nil, "", err
}
if len(resp.Stacks) == 0 {
// This shouldn't happen unless CloudFormation is inconsistent
// See https://github.com/hashicorp/terraform/issues/5487
log.Printf("[WARN] CloudFormation stack %q not found.\nresponse: %q",
d.Id(), resp)
return resp, "", fmt.Errorf(
"CloudFormation stack %q vanished unexpectedly during creation.\n"+
"Unless you knowingly manually deleted the stack "+
"please report this as bug at https://github.com/hashicorp/terraform/issues\n"+
"along with the config & Terraform version & the details below:\n"+
"Full API response: %s\n",
d.Id(), resp)
}
status := *resp.Stacks[0].StackStatus
lastStatus = status
log.Printf("[DEBUG] Current CloudFormation stack status: %q", status)
return resp, status, err
},
}
_, err = wait.WaitForState()
if err != nil {
return err
}
if lastStatus == "ROLLBACK_COMPLETE" || lastStatus == "ROLLBACK_FAILED" {
reasons, err := getCloudFormationRollbackReasons(d.Id(), nil, conn)
if err != nil {
return fmt.Errorf("Failed getting rollback reasons: %q", err.Error())
}
return fmt.Errorf("%s: %q", lastStatus, reasons)
}
if lastStatus == "DELETE_COMPLETE" || lastStatus == "DELETE_FAILED" {
reasons, err := getCloudFormationDeletionReasons(d.Id(), conn)
if err != nil {
return fmt.Errorf("Failed getting deletion reasons: %q", err.Error())
}
d.SetId("")
return fmt.Errorf("%s: %q", lastStatus, reasons)
}
if lastStatus == "CREATE_FAILED" {
reasons, err := getCloudFormationFailures(d.Id(), conn)
if err != nil {
return fmt.Errorf("Failed getting failure reasons: %q", err.Error())
}
return fmt.Errorf("%s: %q", lastStatus, reasons)
}
log.Printf("[INFO] CloudFormation Stack %q created", d.Id())
return resourceAwsCloudFormationStackRead(d, meta)
}
func resourceAwsCloudFormationStackRead(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).cfconn
input := &cloudformation.DescribeStacksInput{
StackName: aws.String(d.Id()),
}
resp, err := conn.DescribeStacks(input)
if err != nil {
awsErr, ok := err.(awserr.Error)
// ValidationError: Stack with id % does not exist
if ok && awsErr.Code() == "ValidationError" {
log.Printf("[WARN] Removing CloudFormation stack %s as it's already gone", d.Id())
d.SetId("")
return nil
}
return err
}
stacks := resp.Stacks
if len(stacks) < 1 {
log.Printf("[WARN] Removing CloudFormation stack %s as it's already gone", d.Id())
d.SetId("")
return nil
}
for _, s := range stacks {
if *s.StackId == d.Id() && *s.StackStatus == "DELETE_COMPLETE" {
log.Printf("[DEBUG] Removing CloudFormation stack %s"+
" as it has been already deleted", d.Id())
d.SetId("")
return nil
}
}
tInput := cloudformation.GetTemplateInput{
StackName: aws.String(d.Id()),
}
out, err := conn.GetTemplate(&tInput)
if err != nil {
return err
}
template, err := normalizeCloudFormationTemplate(*out.TemplateBody)
if err != nil {
return errwrap.Wrapf("template body contains an invalid JSON or YAML: {{err}}", err)
}
d.Set("template_body", template)
stack := stacks[0]
log.Printf("[DEBUG] Received CloudFormation stack: %s", stack)
d.Set("name", stack.StackName)
d.Set("arn", stack.StackId)
if stack.TimeoutInMinutes != nil {
d.Set("timeout_in_minutes", int(*stack.TimeoutInMinutes))
}
if stack.Description != nil {
d.Set("description", stack.Description)
}
if stack.DisableRollback != nil {
d.Set("disable_rollback", stack.DisableRollback)
}
if len(stack.NotificationARNs) > 0 {
err = d.Set("notification_arns", schema.NewSet(schema.HashString, flattenStringList(stack.NotificationARNs)))
if err != nil {
return err
}
}
originalParams := d.Get("parameters").(map[string]interface{})
err = d.Set("parameters", flattenCloudFormationParameters(stack.Parameters, originalParams))
if err != nil {
return err
}
err = d.Set("tags", flattenCloudFormationTags(stack.Tags))
if err != nil {
return err
}
err = d.Set("outputs", flattenCloudFormationOutputs(stack.Outputs))
if err != nil {
return err
}
if len(stack.Capabilities) > 0 {
err = d.Set("capabilities", schema.NewSet(schema.HashString, flattenStringList(stack.Capabilities)))
if err != nil {
return err
}
}
return nil
}
func resourceAwsCloudFormationStackUpdate(d *schema.ResourceData, meta interface{}) error {
retryTimeout := int64(30)
conn := meta.(*AWSClient).cfconn
input := &cloudformation.UpdateStackInput{
StackName: aws.String(d.Id()),
}
// Either TemplateBody, TemplateURL or UsePreviousTemplate are required
if v, ok := d.GetOk("template_url"); ok {
input.TemplateURL = aws.String(v.(string))
}
if v, ok := d.GetOk("template_body"); ok && input.TemplateURL == nil {
template, err := normalizeCloudFormationTemplate(v)
if err != nil {
return errwrap.Wrapf("template body contains an invalid JSON or YAML: {{err}}", err)
}
input.TemplateBody = aws.String(template)
}
// Capabilities must be present whether they are changed or not
if v, ok := d.GetOk("capabilities"); ok {
input.Capabilities = expandStringList(v.(*schema.Set).List())
}
if d.HasChange("notification_arns") {
input.NotificationARNs = expandStringList(d.Get("notification_arns").(*schema.Set).List())
}
// Parameters must be present whether they are changed or not
if v, ok := d.GetOk("parameters"); ok {
input.Parameters = expandCloudFormationParameters(v.(map[string]interface{}))
}
if d.HasChange("policy_body") {
policy, err := normalizeJsonString(d.Get("policy_body"))
if err != nil {
return errwrap.Wrapf("policy body contains an invalid JSON: {{err}}", err)
}
input.StackPolicyBody = aws.String(policy)
}
if d.HasChange("policy_url") {
input.StackPolicyURL = aws.String(d.Get("policy_url").(string))
}
log.Printf("[DEBUG] Updating CloudFormation stack: %s", input)
stack, err := conn.UpdateStack(input)
if err != nil {
return err
}
lastUpdatedTime, err := getLastCfEventTimestamp(d.Id(), conn)
if err != nil {
return err
}
if v, ok := d.GetOk("timeout_in_minutes"); ok {
m := int64(v.(int))
if m > retryTimeout {
retryTimeout = m + 5
log.Printf("[DEBUG] CloudFormation timeout: %d", retryTimeout)
}
}
var lastStatus string
wait := resource.StateChangeConf{
Pending: []string{
"UPDATE_COMPLETE_CLEANUP_IN_PROGRESS",
"UPDATE_IN_PROGRESS",
"UPDATE_ROLLBACK_IN_PROGRESS",
"UPDATE_ROLLBACK_COMPLETE_CLEANUP_IN_PROGRESS",
},
Target: []string{
"UPDATE_COMPLETE",
"UPDATE_ROLLBACK_COMPLETE",
"UPDATE_ROLLBACK_FAILED",
},
Timeout: time.Duration(retryTimeout) * time.Minute,
MinTimeout: 5 * time.Second,
Refresh: func() (interface{}, string, error) {
resp, err := conn.DescribeStacks(&cloudformation.DescribeStacksInput{
StackName: aws.String(d.Id()),
})
if err != nil {
log.Printf("[ERROR] Failed to describe stacks: %s", err)
return nil, "", err
}
status := *resp.Stacks[0].StackStatus
lastStatus = status
log.Printf("[DEBUG] Current CloudFormation stack status: %q", status)
return resp, status, err
},
}
_, err = wait.WaitForState()
if err != nil {
return err
}
if lastStatus == "UPDATE_ROLLBACK_COMPLETE" || lastStatus == "UPDATE_ROLLBACK_FAILED" {
reasons, err := getCloudFormationRollbackReasons(*stack.StackId, lastUpdatedTime, conn)
if err != nil {
return fmt.Errorf("Failed getting details about rollback: %q", err.Error())
}
return fmt.Errorf("%s: %q", lastStatus, reasons)
}
log.Printf("[DEBUG] CloudFormation stack %q has been updated", *stack.StackId)
return resourceAwsCloudFormationStackRead(d, meta)
}
func resourceAwsCloudFormationStackDelete(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).cfconn
input := &cloudformation.DeleteStackInput{
StackName: aws.String(d.Id()),
}
log.Printf("[DEBUG] Deleting CloudFormation stack %s", input)
_, err := conn.DeleteStack(input)
if err != nil {
awsErr, ok := err.(awserr.Error)
if !ok {
return err
}
if awsErr.Code() == "ValidationError" {
// Ignore stack which has been already deleted
return nil
}
return err
}
var lastStatus string
wait := resource.StateChangeConf{
Pending: []string{
"DELETE_IN_PROGRESS",
"ROLLBACK_IN_PROGRESS",
},
Target: []string{
"DELETE_COMPLETE",
"DELETE_FAILED",
},
Timeout: 30 * time.Minute,
MinTimeout: 5 * time.Second,
Refresh: func() (interface{}, string, error) {
resp, err := conn.DescribeStacks(&cloudformation.DescribeStacksInput{
StackName: aws.String(d.Id()),
})
if err != nil {
awsErr, ok := err.(awserr.Error)
if !ok {
return nil, "", err
}
log.Printf("[DEBUG] Error when deleting CloudFormation stack: %s: %s",
awsErr.Code(), awsErr.Message())
// ValidationError: Stack with id % does not exist
if awsErr.Code() == "ValidationError" {
return resp, "DELETE_COMPLETE", nil
}
return nil, "", err
}
if len(resp.Stacks) == 0 {
log.Printf("[DEBUG] CloudFormation stack %q is already gone", d.Id())
return resp, "DELETE_COMPLETE", nil
}
status := *resp.Stacks[0].StackStatus
lastStatus = status
log.Printf("[DEBUG] Current CloudFormation stack status: %q", status)
return resp, status, err
},
}
_, err = wait.WaitForState()
if err != nil {
return err
}
if lastStatus == "DELETE_FAILED" {
reasons, err := getCloudFormationFailures(d.Id(), conn)
if err != nil {
return fmt.Errorf("Failed getting reasons of failure: %q", err.Error())
}
return fmt.Errorf("%s: %q", lastStatus, reasons)
}
log.Printf("[DEBUG] CloudFormation stack %q has been deleted", d.Id())
d.SetId("")
return nil
}
// getLastCfEventTimestamp takes the first event in a list
// of events ordered from the newest to the oldest
// and extracts timestamp from it
// LastUpdatedTime only provides last >successful< updated time
func getLastCfEventTimestamp(stackName string, conn *cloudformation.CloudFormation) (
*time.Time, error) {
output, err := conn.DescribeStackEvents(&cloudformation.DescribeStackEventsInput{
StackName: aws.String(stackName),
})
if err != nil {
return nil, err
}
return output.StackEvents[0].Timestamp, nil
}
func getCloudFormationRollbackReasons(stackId string, afterTime *time.Time, conn *cloudformation.CloudFormation) ([]string, error) {
var failures []string
err := conn.DescribeStackEventsPages(&cloudformation.DescribeStackEventsInput{
StackName: aws.String(stackId),
}, func(page *cloudformation.DescribeStackEventsOutput, lastPage bool) bool {
for _, e := range page.StackEvents {
if afterTime != nil && !e.Timestamp.After(*afterTime) {
continue
}
if cfStackEventIsFailure(e) || cfStackEventIsRollback(e) {
failures = append(failures, *e.ResourceStatusReason)
}
}
return !lastPage
})
return failures, err
}
func getCloudFormationDeletionReasons(stackId string, conn *cloudformation.CloudFormation) ([]string, error) {
var failures []string
err := conn.DescribeStackEventsPages(&cloudformation.DescribeStackEventsInput{
StackName: aws.String(stackId),
}, func(page *cloudformation.DescribeStackEventsOutput, lastPage bool) bool {
for _, e := range page.StackEvents {
if cfStackEventIsFailure(e) || cfStackEventIsStackDeletion(e) {
failures = append(failures, *e.ResourceStatusReason)
}
}
return !lastPage
})
return failures, err
}
func getCloudFormationFailures(stackId string, conn *cloudformation.CloudFormation) ([]string, error) {
var failures []string
err := conn.DescribeStackEventsPages(&cloudformation.DescribeStackEventsInput{
StackName: aws.String(stackId),
}, func(page *cloudformation.DescribeStackEventsOutput, lastPage bool) bool {
for _, e := range page.StackEvents {
if cfStackEventIsFailure(e) {
failures = append(failures, *e.ResourceStatusReason)
}
}
return !lastPage
})
return failures, err
}
func cfStackEventIsFailure(event *cloudformation.StackEvent) bool {
failRe := regexp.MustCompile("_FAILED$")
return failRe.MatchString(*event.ResourceStatus) && event.ResourceStatusReason != nil
}
func cfStackEventIsRollback(event *cloudformation.StackEvent) bool {
rollbackRe := regexp.MustCompile("^ROLLBACK_")
return rollbackRe.MatchString(*event.ResourceStatus) && event.ResourceStatusReason != nil
}
func cfStackEventIsStackDeletion(event *cloudformation.StackEvent) bool {
return *event.ResourceStatus == "DELETE_IN_PROGRESS" &&
*event.ResourceType == "AWS::CloudFormation::Stack" &&
event.ResourceStatusReason != nil
}<|fim▁end|> | "DELETE_FAILED",
"ROLLBACK_COMPLETE", |
<|file_name|>inventory.cpp<|end_file_name|><|fim▁begin|>/*
Minetest
Copyright (C) 2010-2013 celeron55, Perttu Ahola <[email protected]>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation; either version 2.1 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
#include "inventory.h"
#include "serialization.h"
#include "debug.h"
#include <sstream>
#include "log.h"
#include "itemdef.h"
#include "strfnd.h"
#include "content_mapnode.h" // For loading legacy MaterialItems
#include "nameidmapping.h" // For loading legacy MaterialItems
#include "util/serialize.h"
#include "util/string.h"
/*
ItemStack
*/
static content_t content_translate_from_19_to_internal(content_t c_from)
{
for(u32 i=0; i<sizeof(trans_table_19)/sizeof(trans_table_19[0]); i++)
{
if(trans_table_19[i][1] == c_from)
{
return trans_table_19[i][0];
}
}
return c_from;
}
// If the string contains spaces, quotes or control characters, encodes as JSON.
// Else returns the string unmodified.
static std::string serializeJsonStringIfNeeded(const std::string &s)
{
for(size_t i = 0; i < s.size(); ++i)
{
if(s[i] <= 0x1f || s[i] >= 0x7f || s[i] == ' ' || s[i] == '\"')
return serializeJsonString(s);
}
return s;
}
// Parses a string serialized by serializeJsonStringIfNeeded.
static std::string deSerializeJsonStringIfNeeded(std::istream &is)
{
std::ostringstream tmp_os;
bool expect_initial_quote = true;
bool is_json = false;
bool was_backslash = false;
for(;;)
{
char c = is.get();
if(is.eof())
break;
if(expect_initial_quote && c == '"')
{
tmp_os << c;
is_json = true;
}
else if(is_json)
{
tmp_os << c;
if(was_backslash)
was_backslash = false;
else if(c == '\\')
was_backslash = true;
else if(c == '"')
break; // Found end of string
}
else
{
if(c == ' ')
{
// Found end of word
is.unget();
break;
}
else
{
tmp_os << c;
}
}
expect_initial_quote = false;
}
if(is_json)
{
std::istringstream tmp_is(tmp_os.str(), std::ios::binary);
return deSerializeJsonString(tmp_is);
}
else
return tmp_os.str();
}
ItemStack::ItemStack(std::string name_, u16 count_,
u16 wear_, std::string metadata_,
IItemDefManager *itemdef)
{
name = itemdef->getAlias(name_);
count = count_;
wear = wear_;
metadata = metadata_;
if(name.empty() || count == 0)
clear();
else if(itemdef->get(name).type == ITEM_TOOL)
count = 1;
}
void ItemStack::serialize(std::ostream &os) const
{
DSTACK(__FUNCTION_NAME);
if(empty())
return;
// Check how many parts of the itemstring are needed
int parts = 1;
if(count != 1)
parts = 2;
if(wear != 0)
parts = 3;
if(metadata != "")
parts = 4;
os<<serializeJsonStringIfNeeded(name);
if(parts >= 2)
os<<" "<<count;
if(parts >= 3)
os<<" "<<wear;
if(parts >= 4)
os<<" "<<serializeJsonStringIfNeeded(metadata);
}
void ItemStack::deSerialize(std::istream &is, IItemDefManager *itemdef)
{
DSTACK(__FUNCTION_NAME);
clear();
// Read name
name = deSerializeJsonStringIfNeeded(is);
// Skip space
std::string tmp;
std::getline(is, tmp, ' ');
if(!tmp.empty())
throw SerializationError("Unexpected text after item name");
if(name == "MaterialItem")
{
// Obsoleted on 2011-07-30
u16 material;
is>>material;
u16 materialcount;
is>>materialcount;
// Convert old materials
if(material <= 0xff)
material = content_translate_from_19_to_internal(material);
if(material > 0xfff)
throw SerializationError("Too large material number");
// Convert old id to name
NameIdMapping legacy_nimap;
content_mapnode_get_name_id_mapping(&legacy_nimap);
legacy_nimap.getName(material, name);
if(name == "")
name = "unknown_block";
name = itemdef->getAlias(name);
count = materialcount;
}
else if(name == "MaterialItem2")
{
// Obsoleted on 2011-11-16
u16 material;
is>>material;
u16 materialcount;
is>>materialcount;
if(material > 0xfff)
throw SerializationError("Too large material number");
// Convert old id to name
NameIdMapping legacy_nimap;
content_mapnode_get_name_id_mapping(&legacy_nimap);
legacy_nimap.getName(material, name);
if(name == "")
name = "unknown_block";
name = itemdef->getAlias(name);
count = materialcount;
}
else if(name == "node" || name == "NodeItem" || name == "MaterialItem3"
|| name == "craft" || name == "CraftItem")
{
// Obsoleted on 2012-01-07
std::string all;
std::getline(is, all, '\n');
// First attempt to read inside ""
Strfnd fnd(all);
fnd.next("\"");
// If didn't skip to end, we have ""s
if(!fnd.atend()){
name = fnd.next("\"");
} else { // No luck, just read a word then
fnd.start(all);
name = fnd.next(" ");
}
fnd.skip_over(" ");
name = itemdef->getAlias(name);
count = stoi(trim(fnd.next("")));
if(count == 0)
count = 1;
}
else if(name == "MBOItem")
{
// Obsoleted on 2011-10-14
throw SerializationError("MBOItem not supported anymore");
}
else if(name == "tool" || name == "ToolItem")
{
// Obsoleted on 2012-01-07
std::string all;
std::getline(is, all, '\n');
// First attempt to read inside ""
Strfnd fnd(all);
fnd.next("\"");
// If didn't skip to end, we have ""s
if(!fnd.atend()){<|fim▁hole|> fnd.start(all);
name = fnd.next(" ");
}
count = 1;
// Then read wear
fnd.skip_over(" ");
name = itemdef->getAlias(name);
wear = stoi(trim(fnd.next("")));
}
else
{
do // This loop is just to allow "break;"
{
// The real thing
// Apply item aliases
name = itemdef->getAlias(name);
// Read the count
std::string count_str;
std::getline(is, count_str, ' ');
if(count_str.empty())
{
count = 1;
break;
}
else
count = stoi(count_str);
// Read the wear
std::string wear_str;
std::getline(is, wear_str, ' ');
if(wear_str.empty())
break;
else
wear = stoi(wear_str);
// Read metadata
metadata = deSerializeJsonStringIfNeeded(is);
// In case fields are added after metadata, skip space here:
//std::getline(is, tmp, ' ');
//if(!tmp.empty())
// throw SerializationError("Unexpected text after metadata");
} while(false);
}
if(name.empty() || count == 0)
clear();
else if(itemdef->get(name).type == ITEM_TOOL)
count = 1;
}
void ItemStack::deSerialize(const std::string &str, IItemDefManager *itemdef)
{
std::istringstream is(str, std::ios::binary);
deSerialize(is, itemdef);
}
std::string ItemStack::getItemString() const
{
// Get item string
std::ostringstream os(std::ios::binary);
serialize(os);
return os.str();
}
ItemStack ItemStack::addItem(const ItemStack &newitem_,
IItemDefManager *itemdef)
{
ItemStack newitem = newitem_;
// If the item is empty or the position invalid, bail out
if(newitem.empty())
{
// nothing can be added trivially
}
// If this is an empty item, it's an easy job.
else if(empty())
{
*this = newitem;
newitem.clear();
}
// If item name differs, bail out
else if(name != newitem.name)
{
// cannot be added
}
// If the item fits fully, add counter and delete it
else if(newitem.count <= freeSpace(itemdef))
{
add(newitem.count);
newitem.clear();
}
// Else the item does not fit fully. Add all that fits and return
// the rest.
else
{
u16 freespace = freeSpace(itemdef);
add(freespace);
newitem.remove(freespace);
}
return newitem;
}
bool ItemStack::itemFits(const ItemStack &newitem_,
ItemStack *restitem,
IItemDefManager *itemdef) const
{
ItemStack newitem = newitem_;
// If the item is empty or the position invalid, bail out
if(newitem.empty())
{
// nothing can be added trivially
}
// If this is an empty item, it's an easy job.
else if(empty())
{
newitem.clear();
}
// If item name differs, bail out
else if(name != newitem.name)
{
// cannot be added
}
// If the item fits fully, delete it
else if(newitem.count <= freeSpace(itemdef))
{
newitem.clear();
}
// Else the item does not fit fully. Return the rest.
// the rest.
else
{
u16 freespace = freeSpace(itemdef);
newitem.remove(freespace);
}
if(restitem)
*restitem = newitem;
return newitem.empty();
}
ItemStack ItemStack::takeItem(u32 takecount)
{
if(takecount == 0 || count == 0)
return ItemStack();
ItemStack result = *this;
if(takecount >= count)
{
// Take all
clear();
}
else
{
// Take part
remove(takecount);
result.count = takecount;
}
return result;
}
ItemStack ItemStack::peekItem(u32 peekcount) const
{
if(peekcount == 0 || count == 0)
return ItemStack();
ItemStack result = *this;
if(peekcount < count)
result.count = peekcount;
return result;
}
/*
Inventory
*/
InventoryList::InventoryList(std::string name, u32 size, IItemDefManager *itemdef)
{
m_name = name;
m_size = size;
m_width = 0;
m_itemdef = itemdef;
clearItems();
//m_dirty = false;
}
InventoryList::~InventoryList()
{
}
void InventoryList::clearItems()
{
m_items.clear();
for(u32 i=0; i<m_size; i++)
{
m_items.push_back(ItemStack());
}
//setDirty(true);
}
void InventoryList::setSize(u32 newsize)
{
if(newsize != m_items.size())
m_items.resize(newsize);
m_size = newsize;
}
void InventoryList::setWidth(u32 newwidth)
{
m_width = newwidth;
}
void InventoryList::setName(const std::string &name)
{
m_name = name;
}
void InventoryList::serialize(std::ostream &os) const
{
//os.imbue(std::locale("C"));
os<<"Width "<<m_width<<"\n";
for(u32 i=0; i<m_items.size(); i++)
{
const ItemStack &item = m_items[i];
if(item.empty())
{
os<<"Empty";
}
else
{
os<<"Item ";
item.serialize(os);
}
os<<"\n";
}
os<<"EndInventoryList\n";
}
void InventoryList::deSerialize(std::istream &is)
{
//is.imbue(std::locale("C"));
clearItems();
u32 item_i = 0;
m_width = 0;
for(;;)
{
std::string line;
std::getline(is, line, '\n');
std::istringstream iss(line);
//iss.imbue(std::locale("C"));
std::string name;
std::getline(iss, name, ' ');
if(name == "EndInventoryList")
{
break;
}
// This is a temporary backwards compatibility fix
else if(name == "end")
{
break;
}
else if(name == "Width")
{
iss >> m_width;
if (iss.fail())
throw SerializationError("incorrect width property");
}
else if(name == "Item")
{
if(item_i > getSize() - 1)
throw SerializationError("too many items");
ItemStack item;
item.deSerialize(iss, m_itemdef);
m_items[item_i++] = item;
}
else if(name == "Empty")
{
if(item_i > getSize() - 1)
throw SerializationError("too many items");
m_items[item_i++].clear();
}
}
}
InventoryList::InventoryList(const InventoryList &other)
{
*this = other;
}
InventoryList & InventoryList::operator = (const InventoryList &other)
{
m_items = other.m_items;
m_size = other.m_size;
m_width = other.m_width;
m_name = other.m_name;
m_itemdef = other.m_itemdef;
//setDirty(true);
return *this;
}
bool InventoryList::operator == (const InventoryList &other) const
{
if(m_size != other.m_size)
return false;
if(m_width != other.m_width)
return false;
if(m_name != other.m_name)
return false;
for(u32 i=0; i<m_items.size(); i++)
{
ItemStack s1 = m_items[i];
ItemStack s2 = other.m_items[i];
if(s1.name != s2.name || s1.wear!= s2.wear || s1.count != s2.count ||
s1.metadata != s2.metadata)
return false;
}
return true;
}
const std::string &InventoryList::getName() const
{
return m_name;
}
u32 InventoryList::getSize() const
{
return m_items.size();
}
u32 InventoryList::getWidth() const
{
return m_width;
}
u32 InventoryList::getUsedSlots() const
{
u32 num = 0;
for(u32 i=0; i<m_items.size(); i++)
{
if(!m_items[i].empty())
num++;
}
return num;
}
u32 InventoryList::getFreeSlots() const
{
return getSize() - getUsedSlots();
}
const ItemStack& InventoryList::getItem(u32 i) const
{
assert(i < m_size);
return m_items[i];
}
ItemStack& InventoryList::getItem(u32 i)
{
assert(i < m_size);
return m_items[i];
}
ItemStack InventoryList::changeItem(u32 i, const ItemStack &newitem)
{
if(i >= m_items.size())
return newitem;
ItemStack olditem = m_items[i];
m_items[i] = newitem;
//setDirty(true);
return olditem;
}
void InventoryList::deleteItem(u32 i)
{
assert(i < m_items.size());
m_items[i].clear();
}
ItemStack InventoryList::addItem(const ItemStack &newitem_)
{
ItemStack newitem = newitem_;
if(newitem.empty())
return newitem;
/*
First try to find if it could be added to some existing items
*/
for(u32 i=0; i<m_items.size(); i++)
{
// Ignore empty slots
if(m_items[i].empty())
continue;
// Try adding
newitem = addItem(i, newitem);
if(newitem.empty())
return newitem; // All was eaten
}
/*
Then try to add it to empty slots
*/
for(u32 i=0; i<m_items.size(); i++)
{
// Ignore unempty slots
if(!m_items[i].empty())
continue;
// Try adding
newitem = addItem(i, newitem);
if(newitem.empty())
return newitem; // All was eaten
}
// Return leftover
return newitem;
}
ItemStack InventoryList::addItem(u32 i, const ItemStack &newitem)
{
if(i >= m_items.size())
return newitem;
ItemStack leftover = m_items[i].addItem(newitem, m_itemdef);
//if(leftover != newitem)
// setDirty(true);
return leftover;
}
bool InventoryList::itemFits(const u32 i, const ItemStack &newitem,
ItemStack *restitem) const
{
if(i >= m_items.size())
{
if(restitem)
*restitem = newitem;
return false;
}
return m_items[i].itemFits(newitem, restitem, m_itemdef);
}
bool InventoryList::roomForItem(const ItemStack &item_) const
{
ItemStack item = item_;
ItemStack leftover;
for(u32 i=0; i<m_items.size(); i++)
{
if(itemFits(i, item, &leftover))
return true;
item = leftover;
}
return false;
}
bool InventoryList::containsItem(const ItemStack &item) const
{
u32 count = item.count;
if(count == 0)
return true;
for(std::vector<ItemStack>::const_reverse_iterator
i = m_items.rbegin();
i != m_items.rend(); i++)
{
if(count == 0)
break;
if(i->name == item.name)
{
if(i->count >= count)
return true;
else
count -= i->count;
}
}
return false;
}
ItemStack InventoryList::removeItem(const ItemStack &item)
{
ItemStack removed;
for(std::vector<ItemStack>::reverse_iterator
i = m_items.rbegin();
i != m_items.rend(); i++)
{
if(i->name == item.name)
{
u32 still_to_remove = item.count - removed.count;
removed.addItem(i->takeItem(still_to_remove), m_itemdef);
if(removed.count == item.count)
break;
}
}
return removed;
}
ItemStack InventoryList::takeItem(u32 i, u32 takecount)
{
if(i >= m_items.size())
return ItemStack();
ItemStack taken = m_items[i].takeItem(takecount);
//if(!taken.empty())
// setDirty(true);
return taken;
}
ItemStack InventoryList::peekItem(u32 i, u32 peekcount) const
{
if(i >= m_items.size())
return ItemStack();
return m_items[i].peekItem(peekcount);
}
void InventoryList::moveItem(u32 i, InventoryList *dest, u32 dest_i, u32 count)
{
if(this == dest && i == dest_i)
return;
// Take item from source list
ItemStack item1;
if(count == 0)
item1 = changeItem(i, ItemStack());
else
item1 = takeItem(i, count);
if(item1.empty())
return;
// Try to add the item to destination list
u32 oldcount = item1.count;
item1 = dest->addItem(dest_i, item1);
// If something is returned, the item was not fully added
if(!item1.empty())
{
// If olditem is returned, nothing was added.
bool nothing_added = (item1.count == oldcount);
// If something else is returned, part of the item was left unadded.
// Add the other part back to the source item
addItem(i, item1);
// If olditem is returned, nothing was added.
// Swap the items
if(nothing_added)
{
// Take item from source list
item1 = changeItem(i, ItemStack());
// Adding was not possible, swap the items.
ItemStack item2 = dest->changeItem(dest_i, item1);
// Put item from destination list to the source list
changeItem(i, item2);
}
}
}
/*
Inventory
*/
Inventory::~Inventory()
{
clear();
}
void Inventory::clear()
{
for(u32 i=0; i<m_lists.size(); i++)
{
delete m_lists[i];
}
m_lists.clear();
}
void Inventory::clearContents()
{
for(u32 i=0; i<m_lists.size(); i++)
{
InventoryList *list = m_lists[i];
for(u32 j=0; j<list->getSize(); j++)
{
list->deleteItem(j);
}
}
}
Inventory::Inventory(IItemDefManager *itemdef)
{
m_itemdef = itemdef;
}
Inventory::Inventory(const Inventory &other)
{
*this = other;
}
Inventory & Inventory::operator = (const Inventory &other)
{
// Gracefully handle self assignment
if(this != &other)
{
clear();
m_itemdef = other.m_itemdef;
for(u32 i=0; i<other.m_lists.size(); i++)
{
m_lists.push_back(new InventoryList(*other.m_lists[i]));
}
}
return *this;
}
bool Inventory::operator == (const Inventory &other) const
{
if(m_lists.size() != other.m_lists.size())
return false;
for(u32 i=0; i<m_lists.size(); i++)
{
if(*m_lists[i] != *other.m_lists[i])
return false;
}
return true;
}
void Inventory::serialize(std::ostream &os) const
{
for(u32 i=0; i<m_lists.size(); i++)
{
InventoryList *list = m_lists[i];
os<<"List "<<list->getName()<<" "<<list->getSize()<<"\n";
list->serialize(os);
}
os<<"EndInventory\n";
}
void Inventory::deSerialize(std::istream &is)
{
clear();
for(;;)
{
std::string line;
std::getline(is, line, '\n');
std::istringstream iss(line);
std::string name;
std::getline(iss, name, ' ');
if(name == "EndInventory")
{
break;
}
// This is a temporary backwards compatibility fix
else if(name == "end")
{
break;
}
else if(name == "List")
{
std::string listname;
u32 listsize;
std::getline(iss, listname, ' ');
iss>>listsize;
InventoryList *list = new InventoryList(listname, listsize, m_itemdef);
list->deSerialize(is);
m_lists.push_back(list);
}
else
{
throw SerializationError("invalid inventory specifier");
}
}
}
InventoryList * Inventory::addList(const std::string &name, u32 size)
{
s32 i = getListIndex(name);
if(i != -1)
{
if(m_lists[i]->getSize() != size)
{
delete m_lists[i];
m_lists[i] = new InventoryList(name, size, m_itemdef);
}
return m_lists[i];
}
else
{
//don't create list with invalid name
if (name.find(" ") != std::string::npos) return NULL;
InventoryList *list = new InventoryList(name, size, m_itemdef);
m_lists.push_back(list);
return list;
}
}
InventoryList * Inventory::getList(const std::string &name)
{
s32 i = getListIndex(name);
if(i == -1)
return NULL;
return m_lists[i];
}
std::vector<const InventoryList*> Inventory::getLists()
{
std::vector<const InventoryList*> lists;
for(u32 i=0; i<m_lists.size(); i++)
{
InventoryList *list = m_lists[i];
lists.push_back(list);
}
return lists;
}
bool Inventory::deleteList(const std::string &name)
{
s32 i = getListIndex(name);
if(i == -1)
return false;
delete m_lists[i];
m_lists.erase(m_lists.begin() + i);
return true;
}
const InventoryList * Inventory::getList(const std::string &name) const
{
s32 i = getListIndex(name);
if(i == -1)
return NULL;
return m_lists[i];
}
const s32 Inventory::getListIndex(const std::string &name) const
{
for(u32 i=0; i<m_lists.size(); i++)
{
if(m_lists[i]->getName() == name)
return i;
}
return -1;
}
//END
// You hacker<|fim▁end|> | name = fnd.next("\"");
} else { // No luck, just read a word then |
<|file_name|>CBLoopViewPager.java<|end_file_name|><|fim▁begin|>/*
* Copyright (C) 2013 Leszek Mzyk
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.convenientbanner;
import android.content.Context;
import android.support.v4.view.PagerAdapter;
import android.support.v4.view.ViewPager;
import android.util.AttributeSet;
/**
* A ViewPager subclass enabling infinte scrolling of the viewPager elements
*
* When used for paginating views (in opposite to fragments), no code changes
* should be needed only change xml's from <android.support.v4.view.ViewPager>
* to <com.imbryk.viewPager.LoopViewPager>
*
* If "blinking" can be seen when paginating to first or last view, simply call
* seBoundaryCaching( true ), or change DEFAULT_BOUNDARY_CASHING to true
*
* When using a FragmentPagerAdapter or FragmentStatePagerAdapter,
* additional changes in the adapter must be done.
* The adapter must be prepared to create 2 extra items e.g.:
*
* The original adapter creates 4 items: [0,1,2,3]
* The modified adapter will have to create 6 items [0,1,2,3,4,5]
* with mapping realPosition=(position-1)%count
* [0->3, 1->0, 2->1, 3->2, 4->3, 5->0]
*/
public class CBLoopViewPager extends ViewPager {
private static final boolean DEFAULT_BOUNDARY_CASHING = false;
OnPageChangeListener mOuterPageChangeListener;
private CBLoopPagerAdapterWrapper mAdapter;
private boolean mBoundaryCaching = DEFAULT_BOUNDARY_CASHING;
/**
* helper function which may be used when implementing FragmentPagerAdapter
*
* @param position
* @param count
* @return (position-1)%count
*/
public static int toRealPosition( int position, int count ){
position = position-1;
if( position < 0 ){
position += count;
}else{
position = position%count;
}
return position;
}
/**
* If set to true, the boundary views (i.e. first and last) will never be destroyed
* This may help to prevent "blinking" of some views
*
* @param flag
*/
public void setBoundaryCaching(boolean flag) {
mBoundaryCaching = flag;
if (mAdapter != null) {
mAdapter.setBoundaryCaching(flag);
}
}
@Override
public void setAdapter(PagerAdapter adapter) {
mAdapter = new CBLoopPagerAdapterWrapper(adapter);
mAdapter.setBoundaryCaching(mBoundaryCaching);
super.setAdapter(mAdapter);
setCurrentItem(0, false);
}
@Override
public PagerAdapter getAdapter() {
return mAdapter != null ? mAdapter.getRealAdapter() : mAdapter;
}
@Override
public int getCurrentItem() {
return mAdapter != null ? mAdapter.toRealPosition(super.getCurrentItem()) : 0;
}
public void setCurrentItem(int item, boolean smoothScroll) {
int realItem = mAdapter.toInnerPosition(item);
super.setCurrentItem(realItem, smoothScroll);
}<|fim▁hole|> public void setCurrentItem(int item) {
if (getCurrentItem() != item) {
setCurrentItem(item, true);
}
}
@Override
public void setOnPageChangeListener(OnPageChangeListener listener) {
mOuterPageChangeListener = listener;
};
public CBLoopViewPager(Context context) {
super(context);
init();
}
public CBLoopViewPager(Context context, AttributeSet attrs) {
super(context, attrs);
init();
}
private void init() {
super.setOnPageChangeListener(onPageChangeListener);
}
private OnPageChangeListener onPageChangeListener = new OnPageChangeListener() {
private float mPreviousOffset = -1;
private float mPreviousPosition = -1;
@Override
public void onPageSelected(int position) {
int realPosition = mAdapter.toRealPosition(position);
if (mPreviousPosition != realPosition) {
mPreviousPosition = realPosition;
if (mOuterPageChangeListener != null) {
mOuterPageChangeListener.onPageSelected(realPosition);
}
}
}
@Override
public void onPageScrolled(int position, float positionOffset,
int positionOffsetPixels) {
int realPosition = position;
if (mAdapter != null) {
realPosition = mAdapter.toRealPosition(position);
if (positionOffset == 0
&& mPreviousOffset == 0
&& (position == 0 || position == mAdapter.getCount() - 1)) {
setCurrentItem(realPosition, false);
}
}
mPreviousOffset = positionOffset;
if (mOuterPageChangeListener != null) {
if (realPosition != mAdapter.getRealCount() - 1) {
mOuterPageChangeListener.onPageScrolled(realPosition,
positionOffset, positionOffsetPixels);
} else {
if (positionOffset > .5) {
mOuterPageChangeListener.onPageScrolled(0, 0, 0);
} else {
mOuterPageChangeListener.onPageScrolled(realPosition,
0, 0);
}
}
}
}
@Override
public void onPageScrollStateChanged(int state) {
if (mAdapter != null) {
int position = CBLoopViewPager.super.getCurrentItem();
int realPosition = mAdapter.toRealPosition(position);
if (state == ViewPager.SCROLL_STATE_IDLE
&& (position == 0 || position == mAdapter.getCount() - 1)) {
setCurrentItem(realPosition, false);
}
}
if (mOuterPageChangeListener != null) {
mOuterPageChangeListener.onPageScrollStateChanged(state);
}
}
};
}<|fim▁end|> |
@Override |
<|file_name|>index.js<|end_file_name|><|fim▁begin|>'use strict';
var yeoman = require('yeoman-generator');
var yosay = require('yosay');
var chalk = require('chalk');
var _ = require('lodash');
module.exports = yeoman.generators.Base.extend({
/**
* Constructor
* @return {undefined}
*/
constructor: function () {
yeoman.generators.Base.apply(this, arguments);
this.option('skip-install', {
desc: 'Skips the installation of dependencies',
type: Boolean
});
},
/**
* Prompt the user
* @return {undefined}
*/
prompting: function () {
var done = this.async();
this.log('Welcome to ' + chalk.red('uniform') + '. The fullstack form generator tool. Built with love. By developers, for developers.');
var prompts = [{
type: 'input',
name: 'configFile',
message: 'What\'s the name of your config file?',
default: 'uniform.json'
}];
this.prompt(prompts, function (answers) {
this.uniformConfig = require(this.destinationPath(answers.configFile));
done();
}.bind(this));
},
/**
* Writing
* @type {Object}
*/
writing: {
/**
* package.json
* @return {undefined}
*/
packageJSON: function() {
this.template('_package.json', 'package.json');
},
/**
* Git
* @return {undefined}
*/
git: function() {
this.copy('gitignore', '.gitignore');
},
/**
* Bower
* @return {undefined}
*/
bower: function() {
this.template('_bower.json', 'bower.json');
this.copy('bowerrc', '.bowerrc');
},
/**
* Main app
* @return {undefined}
*/
app: function() {
// root level folders
this.mkdir('bin');
this.mkdir('public');
this.mkdir('public/components');
this.mkdir('routes');
this.mkdir('views');
// Main route
this.template('routes/apiV1.js', 'routes/apiV1.js');
// Node error view
this.copy('views/error.jade', 'views/error.jade');
// app files
this.copy('bin/www', 'bin/www');
this.copy('app.js', 'app.js');
},
/**
* Images
* @return {undefined}
*/
img: function() {
this.mkdir('public/img');
},
/**
* Javascript
* @return {undefined}
*/
javascript: function() {
this.mkdir('public/js');
this.mkdir('public/js/forms');
this.mkdir('public/js/models');
this.mkdir('public/js/routers');
this.mkdir('public/js/views');
// Forms
this.copy('public/js/forms/entryform.js', 'public/js/forms/entryform.js');
// Models
this.template('public/js/models/_entry.js', 'public/js/models/entry.js');
// Routers
this.copy('public/js/routers/default.js', 'public/js/routers/default.js');
// Views
this.copy('public/js/views/landingView.js', 'public/js/views/landingView.js');
this.copy('public/js/views/successView.js', 'public/js/views/successView.js');
// Main js file
this.copy('public/js/main.js', 'public/js/main.js');
},
/**
* [styles description]
* @return {[type]} [description]
*/
styles: function() {
this.mkdir('public/css');
this.copy('public/css/main.styl', 'public/css/main.styl');
},
/**
* Templates
* @return {undefined}
*/
templates: function() {
this.mkdir('public/templates');
this.template('public/templates/landing.hbs', 'public/templates/landing.hbs');
this.template('public/templates/form.hbs', 'public/templates/form.hbs');
this.copy('public/templates/success.hbs', 'public/templates/success.hbs');
},
/**
* Dist
* @return {undefined}
*/
dist: function() {
// Directories
this.mkdir('public/dist');
this.mkdir('public/dist/css');
this.mkdir('public/dist/js');
this.mkdir('public/dist/templates');
},
/**
* HTML
* @return {undefined}
*/
html: function() {
this.copy('public/index.html', 'public/index.html');
},
/**
* Misc files
* @return {undefined}
*/
miscFiles: function() {
this.copy('public/.editorconfig', 'public/.editorconfig');
this.copy('public/.gitattributes', 'public/.gitattributes');
this.copy('public/.gitignore', 'public/.gitignore');
this.copy('public/.htaccess', 'public/.htaccess');
this.copy('public/apple-touch-icon.png', 'public/apple-touch-icon.png');
this.copy('public/browserconfig.xml', 'public/browserconfig.xml');
this.copy('public/crossdomain.xml', 'public/crossdomain.xml');
this.copy('public/favicon.ico', 'public/favicon.ico');
this.copy('public/LICENSE.txt', 'public/LICENSE.txt');
this.copy('public/robots.txt', 'public/robots.txt');
this.copy('public/tile-wide.png', 'public/tile-wide.png');
this.copy('public/tile.png', 'public/tile.png');
},
/**
* Gulp
* @return {undefined}
*/<|fim▁hole|> }
},
/**
* Install Dependencies
* @return {undefined}
*/
install: function() {
this.installDependencies({
skipInstall: this.options['skip-install']
});
}
});<|fim▁end|> | gulp: function() {
this.copy('gulpfile.js', 'gulpfile.js'); |
<|file_name|>AtomicFieldUpdaterClass.java<|end_file_name|><|fim▁begin|>package uk.co.automatictester.concurrency.classes.atomic;
import org.testng.annotations.Test;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicIntegerFieldUpdater;
import static org.hamcrest.CoreMatchers.not;
import static org.hamcrest.MatcherAssert.assertThat;
import static org.hamcrest.Matchers.equalTo;
public class AtomicFieldUpdaterClass {
public volatile int v;
public volatile int b;
// all updates to volatile field through the same updater are guaranteed to be atomic
private static final AtomicIntegerFieldUpdater<AtomicFieldUpdaterClass> updater =
AtomicIntegerFieldUpdater.newUpdater(AtomicFieldUpdaterClass.class, "v");
@Test
public void test() throws InterruptedException {
int loopCount = 10_000;
int threads = 16;
CountDownLatch latch = new CountDownLatch(threads);
Runnable r = () -> {
try {
latch.countDown();<|fim▁hole|> } catch (InterruptedException e) {
throw new RuntimeException(e);
}
for (int i = 0; i < loopCount; i++) {
updater.incrementAndGet(this);
b++;
}
};
ExecutorService service = Executors.newFixedThreadPool(threads);
for (int i = 0; i < threads; i++) {
service.submit(r);
}
service.shutdown();
service.awaitTermination(10, TimeUnit.SECONDS);
assertThat(v, equalTo(threads * loopCount));
assertThat(b, not(equalTo(threads * loopCount)));
}
}<|fim▁end|> | latch.await(); |
<|file_name|>r112.py<|end_file_name|><|fim▁begin|>""" Python's random module includes a function choice(data) that returns a
random element from a non-empty sequence. The random module includes
a more basic function randrange, with parametrization similar to
the built-in range function, that return a random choice from the given
range. Using only the randrange function, implement your own version
of the choice function.
>>> data = [2,3,4,5,6,7,8,9,10,11,10,9,8,7,6,5,4,3,2,1]
>>> results = list()
>>> for x in range(len(data)*20):
... val = custom_choice(data)
... results.append(val in data)
>>> print(results)
[True, True, True, True, True, True, True, True, True, True, \
True, True, True, True, True, True, True, True, True, True, \
True, True, True, True, True, True, True, True, True, True, \
True, True, True, True, True, True, True, True, True, True, \
True, True, True, True, True, True, True, True, True, True, \
True, True, True, True, True, True, True, True, True, True, \
True, True, True, True, True, True, True, True, True, True, \
True, True, True, True, True, True, True, True, True, True, \
True, True, True, True, True, True, True, True, True, True, \
True, True, True, True, True, True, True, True, True, True, \
True, True, True, True, True, True, True, True, True, True, \
True, True, True, True, True, True, True, True, True, True, \
True, True, True, True, True, True, True, True, True, True, \
True, True, True, True, True, True, True, True, True, True, \
True, True, True, True, True, True, True, True, True, True, \
True, True, True, True, True, True, True, True, True, True, \
True, True, True, True, True, True, True, True, True, True, \
True, True, True, True, True, True, True, True, True, True, \
True, True, True, True, True, True, True, True, True, True, \
True, True, True, True, True, True, True, True, True, True, \
True, True, True, True, True, True, True, True, True, True, \
True, True, True, True, True, True, True, True, True, True, \
True, True, True, True, True, True, True, True, True, True, \<|fim▁hole|>True, True, True, True, True, True, True, True, True, True, \
True, True, True, True, True, True, True, True, True, True, \
True, True, True, True, True, True, True, True, True, True, \
True, True, True, True, True, True, True, True, True, True, \
True, True, True, True, True, True, True, True, True, True, \
True, True, True, True, True, True, True, True, True, True, \
True, True, True, True, True, True, True, True, True, True, \
True, True, True, True, True, True, True, True, True, True, \
True, True, True, True, True, True, True, True, True, True, \
True, True, True, True, True, True, True, True, True, True, \
True, True, True, True, True, True, True, True, True, True, \
True, True, True, True, True, True, True, True, True, True, \
True, True, True, True, True, True, True, True, True, True, \
True, True, True, True, True, True, True, True, True, True]
"""
def custom_choice(data):
import random
return data[random.randrange(0,len(data))]<|fim▁end|> | True, True, True, True, True, True, True, True, True, True, \
True, True, True, True, True, True, True, True, True, True, \
True, True, True, True, True, True, True, True, True, True, \ |
<|file_name|>ViewPagerAdapter.java<|end_file_name|><|fim▁begin|>package com.mbpr.gengjian.playface;
import java.util.List;
import android.content.Context;
import android.support.v4.view.PagerAdapter;
import android.support.v4.view.ViewPager;
import android.view.View;
/**
* Created by gengjian on 15/12/23.
*/
public class ViewPagerAdapter extends PagerAdapter {
private List<View> views;
private Context context;
public ViewPagerAdapter(List<View> views, Context context) {
this.views = views;
this.context = context;
}
@Override
public void destroyItem(View container, int position, Object object) {
//super.destroyItem(container, position, object);
((ViewPager)container).removeView(views.get(position));
}
@Override
public Object instantiateItem(View container, int position) {
((ViewPager)container).addView(views.get(position));
return views.get(position);
}
@Override<|fim▁hole|> }
@Override
public boolean isViewFromObject(View view, Object object) {
return (view == object);
}
}<|fim▁end|> | public int getCount() {
return views.size(); |
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|># coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
try:
from ._models_py3 import DetectedLanguage
from ._models_py3 import DocumentEntities
from ._models_py3 import DocumentError
from ._models_py3 import DocumentKeyPhrases
from ._models_py3 import DocumentLanguage
from ._models_py3 import DocumentLinkedEntities
from ._models_py3 import DocumentSentiment
from ._models_py3 import DocumentStatistics
from ._models_py3 import EntitiesResult
from ._models_py3 import Entity
from ._models_py3 import EntityLinkingResult
from ._models_py3 import ErrorResponse
from ._models_py3 import InnerError
from ._models_py3 import KeyPhraseResult
from ._models_py3 import LanguageBatchInput
from ._models_py3 import LanguageInput
from ._models_py3 import LanguageResult
from ._models_py3 import LinkedEntity
from ._models_py3 import Match
from ._models_py3 import MultiLanguageBatchInput
from ._models_py3 import MultiLanguageInput
from ._models_py3 import RequestStatistics
from ._models_py3 import SentenceSentiment
from ._models_py3 import SentimentConfidenceScorePerLabel
from ._models_py3 import SentimentResponse
from ._models_py3 import TextAnalyticsError
from ._models_py3 import TextAnalyticsWarning
except (SyntaxError, ImportError):
from ._models import DetectedLanguage # type: ignore
from ._models import DocumentEntities # type: ignore
from ._models import DocumentError # type: ignore
from ._models import DocumentKeyPhrases # type: ignore
from ._models import DocumentLanguage # type: ignore
from ._models import DocumentLinkedEntities # type: ignore
from ._models import DocumentSentiment # type: ignore
from ._models import DocumentStatistics # type: ignore
from ._models import EntitiesResult # type: ignore
from ._models import Entity # type: ignore
from ._models import EntityLinkingResult # type: ignore
from ._models import ErrorResponse # type: ignore
from ._models import InnerError # type: ignore
from ._models import KeyPhraseResult # type: ignore
from ._models import LanguageBatchInput # type: ignore
from ._models import LanguageInput # type: ignore
from ._models import LanguageResult # type: ignore
from ._models import LinkedEntity # type: ignore
from ._models import Match # type: ignore
from ._models import MultiLanguageBatchInput # type: ignore<|fim▁hole|> from ._models import RequestStatistics # type: ignore
from ._models import SentenceSentiment # type: ignore
from ._models import SentimentConfidenceScorePerLabel # type: ignore
from ._models import SentimentResponse # type: ignore
from ._models import TextAnalyticsError # type: ignore
from ._models import TextAnalyticsWarning # type: ignore
from ._text_analytics_client_enums import (
DocumentSentimentValue,
ErrorCodeValue,
InnerErrorCodeValue,
SentenceSentimentValue,
WarningCodeValue,
)
__all__ = [
'DetectedLanguage',
'DocumentEntities',
'DocumentError',
'DocumentKeyPhrases',
'DocumentLanguage',
'DocumentLinkedEntities',
'DocumentSentiment',
'DocumentStatistics',
'EntitiesResult',
'Entity',
'EntityLinkingResult',
'ErrorResponse',
'InnerError',
'KeyPhraseResult',
'LanguageBatchInput',
'LanguageInput',
'LanguageResult',
'LinkedEntity',
'Match',
'MultiLanguageBatchInput',
'MultiLanguageInput',
'RequestStatistics',
'SentenceSentiment',
'SentimentConfidenceScorePerLabel',
'SentimentResponse',
'TextAnalyticsError',
'TextAnalyticsWarning',
'DocumentSentimentValue',
'ErrorCodeValue',
'InnerErrorCodeValue',
'SentenceSentimentValue',
'WarningCodeValue',
]<|fim▁end|> | from ._models import MultiLanguageInput # type: ignore |
<|file_name|>ConfigLocationLiteVoAssembler.java<|end_file_name|><|fim▁begin|>//#############################################################################
//# #
//# Copyright (C) <2015> <IMS MAXIMS> #
//# #
//# This program is free software: you can redistribute it and/or modify #
//# it under the terms of the GNU Affero General Public License as #
//# published by the Free Software Foundation, either version 3 of the #
//# License, or (at your option) any later version. #
//# #
//# This program is distributed in the hope that it will be useful, #
//# but WITHOUT ANY WARRANTY; without even the implied warranty of #
//# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
//# GNU Affero General Public License for more details. #
//# #
//# You should have received a copy of the GNU Affero General Public License #
//# along with this program. If not, see <http://www.gnu.org/licenses/>. #
//# #
//# IMS MAXIMS provides absolutely NO GUARANTEE OF THE CLINICAL SAFTEY of #
//# this program. Users of this software do so entirely at their own risk. #
//# IMS MAXIMS only ensures the Clinical Safety of unaltered run-time #
//# software that it builds, deploys and maintains. #
//# #
//#############################################################################
//#EOH
/*
* This code was generated
* Copyright (C) 1995-2004 IMS MAXIMS plc. All rights reserved.
* IMS Development Environment (version 1.80 build 5589.25814)
* WARNING: DO NOT MODIFY the content of this file
* Generated on 12/10/2015, 13:24
*
*/
package ims.admin.vo.domain;
import ims.vo.domain.DomainObjectMap;
import java.util.HashMap;
import org.hibernate.proxy.HibernateProxy;
/**
* @author Cornel Ventuneac
*/
public class ConfigLocationLiteVoAssembler
{
/**
* Copy one ValueObject to another
* @param valueObjectDest to be updated
* @param valueObjectSrc to copy values from
*/
public static ims.admin.vo.ConfigLocationLiteVo copy(ims.admin.vo.ConfigLocationLiteVo valueObjectDest, ims.admin.vo.ConfigLocationLiteVo valueObjectSrc)
{
if (null == valueObjectSrc)
{
return valueObjectSrc;
}
valueObjectDest.setID_Location(valueObjectSrc.getID_Location());
valueObjectDest.setIsRIE(valueObjectSrc.getIsRIE());
// Name
valueObjectDest.setName(valueObjectSrc.getName());
// Type
valueObjectDest.setType(valueObjectSrc.getType());
// isActive
valueObjectDest.setIsActive(valueObjectSrc.getIsActive());
// Address
valueObjectDest.setAddress(valueObjectSrc.getAddress());
// IsVirtual
valueObjectDest.setIsVirtual(valueObjectSrc.getIsVirtual());
return valueObjectDest;
}
/**
* Create the ValueObject collection to hold the set of DomainObjects.
* This is a convenience method only.
* It is intended to be used when one called to an Assembler is made.
* If more than one call to an Assembler is made then #createConfigLocationLiteVoCollectionFromLocation(DomainObjectMap, Set) should be used.
* @param domainObjectSet - Set of ims.core.resource.place.domain.objects.Location objects.
*/
public static ims.admin.vo.ConfigLocationLiteVoCollection createConfigLocationLiteVoCollectionFromLocation(java.util.Set domainObjectSet)
{
return createConfigLocationLiteVoCollectionFromLocation(new DomainObjectMap(), domainObjectSet);
}
/**
* Create the ValueObject collection to hold the set of DomainObjects.
* @param map - maps DomainObjects to created ValueObjects
* @param domainObjectSet - Set of ims.core.resource.place.domain.objects.Location objects.
*/
public static ims.admin.vo.ConfigLocationLiteVoCollection createConfigLocationLiteVoCollectionFromLocation(DomainObjectMap map, java.util.Set domainObjectSet)
{
ims.admin.vo.ConfigLocationLiteVoCollection voList = new ims.admin.vo.ConfigLocationLiteVoCollection();
if ( null == domainObjectSet )
{
return voList;
}
int rieCount=0;
int activeCount=0;
java.util.Iterator iterator = domainObjectSet.iterator();
while( iterator.hasNext() )
{
<|fim▁hole|> ims.admin.vo.ConfigLocationLiteVo vo = create(map, domainObject);
if (vo != null)
voList.add(vo);
if (domainObject != null)
{
if (domainObject.getIsRIE() != null && domainObject.getIsRIE().booleanValue() == true)
rieCount++;
else
activeCount++;
}
}
voList.setRieCount(rieCount);
voList.setActiveCount(activeCount);
return voList;
}
/**
* Create the ValueObject collection to hold the list of DomainObjects.
* @param domainObjectList - List of ims.core.resource.place.domain.objects.Location objects.
*/
public static ims.admin.vo.ConfigLocationLiteVoCollection createConfigLocationLiteVoCollectionFromLocation(java.util.List domainObjectList)
{
return createConfigLocationLiteVoCollectionFromLocation(new DomainObjectMap(), domainObjectList);
}
/**
* Create the ValueObject collection to hold the list of DomainObjects.
* @param map - maps DomainObjects to created ValueObjects
* @param domainObjectList - List of ims.core.resource.place.domain.objects.Location objects.
*/
public static ims.admin.vo.ConfigLocationLiteVoCollection createConfigLocationLiteVoCollectionFromLocation(DomainObjectMap map, java.util.List domainObjectList)
{
ims.admin.vo.ConfigLocationLiteVoCollection voList = new ims.admin.vo.ConfigLocationLiteVoCollection();
if ( null == domainObjectList )
{
return voList;
}
int rieCount=0;
int activeCount=0;
for (int i = 0; i < domainObjectList.size(); i++)
{
ims.core.resource.place.domain.objects.Location domainObject = (ims.core.resource.place.domain.objects.Location) domainObjectList.get(i);
ims.admin.vo.ConfigLocationLiteVo vo = create(map, domainObject);
if (vo != null)
voList.add(vo);
if (domainObject != null)
{
if (domainObject.getIsRIE() != null && domainObject.getIsRIE().booleanValue() == true)
rieCount++;
else
activeCount++;
}
}
voList.setRieCount(rieCount);
voList.setActiveCount(activeCount);
return voList;
}
/**
* Create the ims.core.resource.place.domain.objects.Location set from the value object collection.
* @param domainFactory - used to create existing (persistent) domain objects.
* @param voCollection - the collection of value objects
*/
public static java.util.Set extractLocationSet(ims.domain.ILightweightDomainFactory domainFactory, ims.admin.vo.ConfigLocationLiteVoCollection voCollection)
{
return extractLocationSet(domainFactory, voCollection, null, new HashMap());
}
public static java.util.Set extractLocationSet(ims.domain.ILightweightDomainFactory domainFactory, ims.admin.vo.ConfigLocationLiteVoCollection voCollection, java.util.Set domainObjectSet, HashMap domMap)
{
int size = (null == voCollection) ? 0 : voCollection.size();
if (domainObjectSet == null)
{
domainObjectSet = new java.util.HashSet();
}
java.util.Set newSet = new java.util.HashSet();
for(int i=0; i<size; i++)
{
ims.admin.vo.ConfigLocationLiteVo vo = voCollection.get(i);
ims.core.resource.place.domain.objects.Location domainObject = ConfigLocationLiteVoAssembler.extractLocation(domainFactory, vo, domMap);
//TODO: This can only occur in the situation of a stale object exception. For now leave it to the Interceptor to handle it.
if (domainObject == null)
{
continue;
}
//Trying to avoid the hibernate collection being marked as dirty via its public interface methods. (like add)
if (!domainObjectSet.contains(domainObject)) domainObjectSet.add(domainObject);
newSet.add(domainObject);
}
java.util.Set removedSet = new java.util.HashSet();
java.util.Iterator iter = domainObjectSet.iterator();
//Find out which objects need to be removed
while (iter.hasNext())
{
ims.domain.DomainObject o = (ims.domain.DomainObject)iter.next();
if ((o == null || o.getIsRIE() == null || !o.getIsRIE().booleanValue()) && !newSet.contains(o))
{
removedSet.add(o);
}
}
iter = removedSet.iterator();
//Remove the unwanted objects
while (iter.hasNext())
{
domainObjectSet.remove(iter.next());
}
return domainObjectSet;
}
/**
* Create the ims.core.resource.place.domain.objects.Location list from the value object collection.
* @param domainFactory - used to create existing (persistent) domain objects.
* @param voCollection - the collection of value objects
*/
public static java.util.List extractLocationList(ims.domain.ILightweightDomainFactory domainFactory, ims.admin.vo.ConfigLocationLiteVoCollection voCollection)
{
return extractLocationList(domainFactory, voCollection, null, new HashMap());
}
public static java.util.List extractLocationList(ims.domain.ILightweightDomainFactory domainFactory, ims.admin.vo.ConfigLocationLiteVoCollection voCollection, java.util.List domainObjectList, HashMap domMap)
{
int size = (null == voCollection) ? 0 : voCollection.size();
if (domainObjectList == null)
{
domainObjectList = new java.util.ArrayList();
}
for(int i=0; i<size; i++)
{
ims.admin.vo.ConfigLocationLiteVo vo = voCollection.get(i);
ims.core.resource.place.domain.objects.Location domainObject = ConfigLocationLiteVoAssembler.extractLocation(domainFactory, vo, domMap);
//TODO: This can only occur in the situation of a stale object exception. For now leave it to the Interceptor to handle it.
if (domainObject == null)
{
continue;
}
int domIdx = domainObjectList.indexOf(domainObject);
if (domIdx == -1)
{
domainObjectList.add(i, domainObject);
}
else if (i != domIdx && i < domainObjectList.size())
{
Object tmp = domainObjectList.get(i);
domainObjectList.set(i, domainObjectList.get(domIdx));
domainObjectList.set(domIdx, tmp);
}
}
//Remove all ones in domList where index > voCollection.size() as these should
//now represent the ones removed from the VO collection. No longer referenced.
int i1=domainObjectList.size();
while (i1 > size)
{
domainObjectList.remove(i1-1);
i1=domainObjectList.size();
}
return domainObjectList;
}
/**
* Create the ValueObject from the ims.core.resource.place.domain.objects.Location object.
* @param domainObject ims.core.resource.place.domain.objects.Location
*/
public static ims.admin.vo.ConfigLocationLiteVo create(ims.core.resource.place.domain.objects.Location domainObject)
{
if (null == domainObject)
{
return null;
}
DomainObjectMap map = new DomainObjectMap();
return create(map, domainObject);
}
/**
* Create the ValueObject from the ims.core.resource.place.domain.objects.Location object.
* @param map DomainObjectMap of DomainObjects to already created ValueObjects.
* @param domainObject
*/
public static ims.admin.vo.ConfigLocationLiteVo create(DomainObjectMap map, ims.core.resource.place.domain.objects.Location domainObject)
{
if (null == domainObject)
{
return null;
}
// check if the domainObject already has a valueObject created for it
ims.admin.vo.ConfigLocationLiteVo valueObject = (ims.admin.vo.ConfigLocationLiteVo) map.getValueObject(domainObject, ims.admin.vo.ConfigLocationLiteVo.class);
if ( null == valueObject )
{
valueObject = new ims.admin.vo.ConfigLocationLiteVo(domainObject.getId(), domainObject.getVersion());
map.addValueObject(domainObject, valueObject);
valueObject = insert(map, valueObject, domainObject);
}
return valueObject;
}
/**
* Update the ValueObject with the Domain Object.
* @param valueObject to be updated
* @param domainObject ims.core.resource.place.domain.objects.Location
*/
public static ims.admin.vo.ConfigLocationLiteVo insert(ims.admin.vo.ConfigLocationLiteVo valueObject, ims.core.resource.place.domain.objects.Location domainObject)
{
if (null == domainObject)
{
return valueObject;
}
DomainObjectMap map = new DomainObjectMap();
return insert(map, valueObject, domainObject);
}
/**
* Update the ValueObject with the Domain Object.
* @param map DomainObjectMap of DomainObjects to already created ValueObjects.
* @param valueObject to be updated
* @param domainObject ims.core.resource.place.domain.objects.Location
*/
public static ims.admin.vo.ConfigLocationLiteVo insert(DomainObjectMap map, ims.admin.vo.ConfigLocationLiteVo valueObject, ims.core.resource.place.domain.objects.Location domainObject)
{
if (null == domainObject)
{
return valueObject;
}
if (null == map)
{
map = new DomainObjectMap();
}
valueObject.setID_Location(domainObject.getId());
valueObject.setIsRIE(domainObject.getIsRIE());
// If this is a recordedInError record, and the domainObject
// value isIncludeRecord has not been set, then we return null and
// not the value object
if (valueObject.getIsRIE() != null && valueObject.getIsRIE().booleanValue() == true && !domainObject.isIncludeRecord())
return null;
// If this is not a recordedInError record, and the domainObject
// value isIncludeRecord has been set, then we return null and
// not the value object
if ((valueObject.getIsRIE() == null || valueObject.getIsRIE().booleanValue() == false) && domainObject.isIncludeRecord())
return null;
// Name
valueObject.setName(domainObject.getName());
// Type
ims.domain.lookups.LookupInstance instance2 = domainObject.getType();
if ( null != instance2 ) {
ims.framework.utils.ImagePath img = null;
ims.framework.utils.Color color = null;
img = null;
if (instance2.getImage() != null)
{
img = new ims.framework.utils.ImagePath(instance2.getImage().getImageId(), instance2.getImage().getImagePath());
}
color = instance2.getColor();
if (color != null)
color.getValue();
ims.core.vo.lookups.LocationType voLookup2 = new ims.core.vo.lookups.LocationType(instance2.getId(),instance2.getText(), instance2.isActive(), null, img, color);
ims.core.vo.lookups.LocationType parentVoLookup2 = voLookup2;
ims.domain.lookups.LookupInstance parent2 = instance2.getParent();
while (parent2 != null)
{
if (parent2.getImage() != null)
{
img = new ims.framework.utils.ImagePath(parent2.getImage().getImageId(), parent2.getImage().getImagePath() );
}
else
{
img = null;
}
color = parent2.getColor();
if (color != null)
color.getValue();
parentVoLookup2.setParent(new ims.core.vo.lookups.LocationType(parent2.getId(),parent2.getText(), parent2.isActive(), null, img, color));
parentVoLookup2 = parentVoLookup2.getParent();
parent2 = parent2.getParent();
}
valueObject.setType(voLookup2);
}
// isActive
valueObject.setIsActive( domainObject.isIsActive() );
// Address
valueObject.setAddress(ims.core.vo.domain.PersonAddressAssembler.create(map, domainObject.getAddress()) );
// IsVirtual
valueObject.setIsVirtual( domainObject.isIsVirtual() );
return valueObject;
}
/**
* Create the domain object from the value object.
* @param domainFactory - used to create existing (persistent) domain objects.
* @param valueObject - extract the domain object fields from this.
*/
public static ims.core.resource.place.domain.objects.Location extractLocation(ims.domain.ILightweightDomainFactory domainFactory, ims.admin.vo.ConfigLocationLiteVo valueObject)
{
return extractLocation(domainFactory, valueObject, new HashMap());
}
public static ims.core.resource.place.domain.objects.Location extractLocation(ims.domain.ILightweightDomainFactory domainFactory, ims.admin.vo.ConfigLocationLiteVo valueObject, HashMap domMap)
{
if (null == valueObject)
{
return null;
}
Integer id = valueObject.getID_Location();
ims.core.resource.place.domain.objects.Location domainObject = null;
if ( null == id)
{
if (domMap.get(valueObject) != null)
{
return (ims.core.resource.place.domain.objects.Location)domMap.get(valueObject);
}
// ims.admin.vo.ConfigLocationLiteVo ID_Location field is unknown
domainObject = new ims.core.resource.place.domain.objects.Location();
domMap.put(valueObject, domainObject);
}
else
{
String key = (valueObject.getClass().getName() + "__" + valueObject.getID_Location());
if (domMap.get(key) != null)
{
return (ims.core.resource.place.domain.objects.Location)domMap.get(key);
}
domainObject = (ims.core.resource.place.domain.objects.Location) domainFactory.getDomainObject(ims.core.resource.place.domain.objects.Location.class, id );
//TODO: Not sure how this should be handled. Effectively it must be a staleobject exception, but maybe should be handled as that further up.
if (domainObject == null)
return null;
domMap.put(key, domainObject);
}
domainObject.setVersion(valueObject.getVersion_Location());
//This is to overcome a bug in both Sybase and Oracle which prevents them from storing an empty string correctly
//Sybase stores it as a single space, Oracle stores it as NULL. This fix will make them consistent at least.
if (valueObject.getName() != null && valueObject.getName().equals(""))
{
valueObject.setName(null);
}
domainObject.setName(valueObject.getName());
// create LookupInstance from vo LookupType
ims.domain.lookups.LookupInstance value2 = null;
if ( null != valueObject.getType() )
{
value2 =
domainFactory.getLookupInstance(valueObject.getType().getID());
}
domainObject.setType(value2);
domainObject.setIsActive(valueObject.getIsActive());
// SaveAsRefVO - treated as a refVo in extract methods
ims.core.generic.domain.objects.Address value4 = null;
if ( null != valueObject.getAddress() )
{
if (valueObject.getAddress().getBoId() == null)
{
if (domMap.get(valueObject.getAddress()) != null)
{
value4 = (ims.core.generic.domain.objects.Address)domMap.get(valueObject.getAddress());
}
}
else
{
value4 = (ims.core.generic.domain.objects.Address)domainFactory.getDomainObject(ims.core.generic.domain.objects.Address.class, valueObject.getAddress().getBoId());
}
}
domainObject.setAddress(value4);
domainObject.setIsVirtual(valueObject.getIsVirtual());
return domainObject;
}
}<|fim▁end|> | ims.core.resource.place.domain.objects.Location domainObject = (ims.core.resource.place.domain.objects.Location) iterator.next();
|
<|file_name|>nsNoDataProtocolContentPolicy.cpp<|end_file_name|><|fim▁begin|>/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
/*
* Content policy implementation that prevents all loads of images,
* subframes, etc from protocols that don't return data but rather open
* applications (such as mailto).
*/
#include "nsNoDataProtocolContentPolicy.h"
#include "nsIDOMWindow.h"
#include "nsString.h"
#include "nsIProtocolHandler.h"
#include "nsIIOService.h"
#include "nsIExternalProtocolHandler.h"
#include "nsNetUtil.h"
NS_IMPL_ISUPPORTS1(nsNoDataProtocolContentPolicy, nsIContentPolicy)
NS_IMETHODIMP
nsNoDataProtocolContentPolicy::ShouldLoad(uint32_t aContentType,
nsIURI *aContentLocation,
nsIURI *aRequestingLocation,
nsISupports *aRequestingContext,
const nsACString &aMimeGuess,
nsISupports *aExtra,
nsIPrincipal *aRequestPrincipal,
int16_t *aDecision)
{
*aDecision = nsIContentPolicy::ACCEPT;
// Don't block for TYPE_OBJECT since such URIs are sometimes loaded by the
// plugin, so they don't necessarily open external apps
// TYPE_WEBSOCKET loads can only go to ws:// or wss://, so we don't need to
// concern ourselves with them.
if (aContentType != TYPE_DOCUMENT &&
aContentType != TYPE_SUBDOCUMENT &&
aContentType != TYPE_OBJECT &&
aContentType != TYPE_WEBSOCKET) {
// The following are just quick-escapes for the most common cases
// where we would allow the content to be loaded anyway.
nsAutoCString scheme;
aContentLocation->GetScheme(scheme);
if (scheme.EqualsLiteral("http") ||
scheme.EqualsLiteral("https") ||
scheme.EqualsLiteral("ftp") ||
scheme.EqualsLiteral("file") ||
scheme.EqualsLiteral("chrome")) {
return NS_OK;
}
bool shouldBlock;
nsresult rv = NS_URIChainHasFlags(aContentLocation,
nsIProtocolHandler::URI_DOES_NOT_RETURN_DATA,
&shouldBlock);
if (NS_SUCCEEDED(rv) && shouldBlock) {
*aDecision = nsIContentPolicy::REJECT_REQUEST;<|fim▁hole|> return NS_OK;
}
NS_IMETHODIMP
nsNoDataProtocolContentPolicy::ShouldProcess(uint32_t aContentType,
nsIURI *aContentLocation,
nsIURI *aRequestingLocation,
nsISupports *aRequestingContext,
const nsACString &aMimeGuess,
nsISupports *aExtra,
nsIPrincipal *aRequestPrincipal,
int16_t *aDecision)
{
return ShouldLoad(aContentType, aContentLocation, aRequestingLocation,
aRequestingContext, aMimeGuess, aExtra, aRequestPrincipal,
aDecision);
}<|fim▁end|> | }
}
|
<|file_name|>courses_integration_tests.py<|end_file_name|><|fim▁begin|># Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software<|fim▁hole|>
"""Per-course integration tests for Course Builder."""
__author__ = [
'Todd Larsen ([email protected])'
]
from modules.courses import courses_pageobjects
from tests.integration import integration
class AvailabilityTests(integration.TestBase):
def setUp(self):
super(AvailabilityTests, self).setUp()
self.login(self.LOGIN, admin=True)
def test_availability_page_js(self):
"""Checks the parts of the Publish > Availability page contents that
are dynamically altered by availability.js.
"""
sample_course_name = '' # Power Searching course w/ blank namespace.
sample_availablity_page = self.load_dashboard(
sample_course_name
).click_availability(
cls=courses_pageobjects.CourseAvailabilityPage
).verify_content_present_no_msgs(
has_triggers=True
).verify_add_trigger_button(
)
empty_course_name = self.create_new_course(login=False)[0]
self.load_dashboard(
empty_course_name
).click_availability(
cls=courses_pageobjects.CourseAvailabilityPage
).verify_empty_content_msgs(
).verify_no_trigger_button(
)<|fim▁end|> | # distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License. |
<|file_name|>DescribeReplicationGroupsResult.cpp<|end_file_name|><|fim▁begin|>/*
* Copyright 2010-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
#include <aws/elasticache/model/DescribeReplicationGroupsResult.h>
#include <aws/core/utils/xml/XmlSerializer.h>
#include <aws/core/AmazonWebServiceResult.h>
#include <aws/core/utils/StringUtils.h>
#include <aws/core/utils/logging/LogMacros.h>
#include <utility>
using namespace Aws::ElastiCache::Model;
using namespace Aws::Utils::Xml;
using namespace Aws::Utils::Logging;
using namespace Aws::Utils;
using namespace Aws;
DescribeReplicationGroupsResult::DescribeReplicationGroupsResult()
{
}
DescribeReplicationGroupsResult::DescribeReplicationGroupsResult(const Aws::AmazonWebServiceResult<XmlDocument>& result)
{
*this = result;
}
DescribeReplicationGroupsResult& DescribeReplicationGroupsResult::operator =(const Aws::AmazonWebServiceResult<XmlDocument>& result)
{
const XmlDocument& xmlDocument = result.GetPayload();
XmlNode rootNode = xmlDocument.GetRootElement();
XmlNode resultNode = rootNode;
if (!rootNode.IsNull() && (rootNode.GetName() != "DescribeReplicationGroupsResult"))<|fim▁hole|> resultNode = rootNode.FirstChild("DescribeReplicationGroupsResult");
}
if(!resultNode.IsNull())
{
XmlNode markerNode = resultNode.FirstChild("Marker");
if(!markerNode.IsNull())
{
m_marker = StringUtils::Trim(markerNode.GetText().c_str());
}
XmlNode replicationGroupsNode = resultNode.FirstChild("ReplicationGroups");
if(!replicationGroupsNode.IsNull())
{
XmlNode replicationGroupsMember = replicationGroupsNode.FirstChild("ReplicationGroup");
while(!replicationGroupsMember.IsNull())
{
m_replicationGroups.push_back(replicationGroupsMember);
replicationGroupsMember = replicationGroupsMember.NextNode("ReplicationGroup");
}
}
}
if (!rootNode.IsNull()) {
XmlNode responseMetadataNode = rootNode.FirstChild("ResponseMetadata");
m_responseMetadata = responseMetadataNode;
AWS_LOGSTREAM_DEBUG("Aws::ElastiCache::Model::DescribeReplicationGroupsResult", "x-amzn-request-id: " << m_responseMetadata.GetRequestId() );
}
return *this;
}<|fim▁end|> | { |
<|file_name|>parser.py<|end_file_name|><|fim▁begin|># --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import sys
import argparse
import argcomplete
import azure.cli.core.telemetry as telemetry
import azure.cli.core._help as _help
from azure.cli.core.util import CLIError
from azure.cli.core._pkg_util import handle_module_not_installed
import azure.cli.core.azlogging as azlogging
logger = azlogging.get_az_logger(__name__)
class IncorrectUsageError(CLIError):
'''Raised when a command is incorrectly used and the usage should be
displayed to the user.
'''
pass
class CaseInsensitiveChoicesCompleter(argcomplete.completers.ChoicesCompleter): # pylint: disable=too-few-public-methods
def __call__(self, prefix, **kwargs):
return (c for c in self.choices if c.lower().startswith(prefix.lower()))
# Override the choices completer with one that is case insensitive
argcomplete.completers.ChoicesCompleter = CaseInsensitiveChoicesCompleter
def enable_autocomplete(parser):
argcomplete.autocomplete = argcomplete.CompletionFinder()
argcomplete.autocomplete(parser, validator=lambda c, p: c.lower().startswith(p.lower()),
default_completer=lambda _: ())
class AzCliCommandParser(argparse.ArgumentParser):
"""ArgumentParser implementation specialized for the
Azure CLI utility.
"""
def __init__(self, **kwargs):
self.subparsers = {}
self.parents = kwargs.get('parents', [])
self.help_file = kwargs.pop('help_file', None)
# We allow a callable for description to be passed in in order to delay-load any help
# or description for a command. We better stash it away before handing it off for
# "normal" argparse handling...
self._description = kwargs.pop('description', None)
self.command_source = kwargs.pop('_command_source', None)
super(AzCliCommandParser, self).__init__(**kwargs)
def load_command_table(self, command_table):
"""Load a command table into our parser.
"""
# If we haven't already added a subparser, we
# better do it.
if not self.subparsers:
sp = self.add_subparsers(dest='_command_package')
sp.required = True
self.subparsers = {(): sp}
for command_name, metadata in command_table.items():
subparser = self._get_subparser(command_name.split())
command_verb = command_name.split()[-1]
# To work around http://bugs.python.org/issue9253, we artificially add any new
# parsers we add to the "choices" section of the subparser.
subparser.choices[command_verb] = command_verb
# inject command_module designer's help formatter -- default is HelpFormatter
fc = metadata.formatter_class or argparse.HelpFormatter
command_parser = subparser.add_parser(command_verb,
description=metadata.description,
parents=self.parents,
conflict_handler='error',
help_file=metadata.help,
formatter_class=fc,
_command_source=metadata.command_source)
argument_validators = []
argument_groups = {}
for arg in metadata.arguments.values():
if arg.validator:
argument_validators.append(arg.validator)
if arg.arg_group:
try:
group = argument_groups[arg.arg_group]
except KeyError:
# group not found so create
group_name = '{} Arguments'.format(arg.arg_group)
group = command_parser.add_argument_group(
arg.arg_group, group_name)<|fim▁hole|> argument_groups[arg.arg_group] = group
param = group.add_argument(
*arg.options_list, **arg.options)
else:
try:
param = command_parser.add_argument(
*arg.options_list, **arg.options)
except argparse.ArgumentError:
dest = arg.options['dest']
if dest in ['no_wait', 'raw']:
pass
else:
raise
param.completer = arg.completer
command_parser.set_defaults(
func=metadata,
command=command_name,
_validators=argument_validators,
_parser=command_parser)
def _get_subparser(self, path):
"""For each part of the path, walk down the tree of
subparsers, creating new ones if one doesn't already exist.
"""
for length in range(0, len(path)):
parent_subparser = self.subparsers.get(tuple(path[0:length]), None)
if not parent_subparser:
# No subparser exists for the given subpath - create and register
# a new subparser.
# Since we know that we always have a root subparser (we created)
# one when we started loading the command table, and we walk the
# path from left to right (i.e. for "cmd subcmd1 subcmd2", we start
# with ensuring that a subparser for cmd exists, then for subcmd1,
# subcmd2 and so on), we know we can always back up one step and
# add a subparser if one doesn't exist
grandparent_subparser = self.subparsers[tuple(path[:length - 1])]
new_parser = grandparent_subparser.add_parser(path[length - 1])
# Due to http://bugs.python.org/issue9253, we have to give the subparser
# a destination and set it to required in order to get a
# meaningful error
parent_subparser = new_parser.add_subparsers(dest='subcommand')
parent_subparser.required = True
self.subparsers[tuple(path[0:length])] = parent_subparser
return parent_subparser
def _handle_command_package_error(self, err_msg): # pylint: disable=no-self-use
if err_msg and err_msg.startswith('argument _command_package: invalid choice:'):
import re
try:
possible_module = re.search("argument _command_package: invalid choice: '(.+?)'",
err_msg).group(1)
handle_module_not_installed(possible_module)
except AttributeError:
# regular expression pattern match failed so unable to retrieve
# module name
pass
except Exception as e: # pylint: disable=broad-except
logger.debug('Unable to handle module not installed: %s', str(e))
def validation_error(self, message):
telemetry.set_user_fault('validation error')
return super(AzCliCommandParser, self).error(message)
def error(self, message):
telemetry.set_user_fault('parse error: {}'.format(message))
self._handle_command_package_error(message)
args = {'prog': self.prog, 'message': message}
logger.error('%(prog)s: error: %(message)s', args)
self.print_usage(sys.stderr)
self.exit(2)
def format_help(self):
is_group = self.is_group()
telemetry.set_command_details(command=self.prog[3:])
telemetry.set_success(summary='show help')
_help.show_help(self.prog.split()[1:],
self._actions[-1] if is_group else self,
is_group)
self.exit()
def _check_value(self, action, value):
# Override to customize the error message when a argument is not among the available choices
# converted value must be one of the choices (if specified)
if action.choices is not None and value not in action.choices:
msg = 'invalid choice: {}'.format(value)
raise argparse.ArgumentError(action, msg)
def is_group(self):
""" Determine if this parser instance represents a group
or a command. Anything that has a func default is considered
a group. This includes any dummy commands served up by the
"filter out irrelevant commands based on argv" command filter """
cmd = self._defaults.get('func', None)
return not (cmd and cmd.handler)
def __getattribute__(self, name):
""" Since getting the description can be expensive (require module loads), we defer
this until someone actually wants to use it (i.e. show help for the command)
"""
if name == 'description':
if self._description:
self.description = self._description() \
if callable(self._description) else self._description
self._description = None
return object.__getattribute__(self, name)<|fim▁end|> | |
<|file_name|>Utils.java<|end_file_name|><|fim▁begin|>package utils;
import java.io.*;
import java.net.DatagramSocket;
import java.net.InetAddress;
import java.net.ServerSocket;
import java.net.Socket;
import java.util.ArrayList;
import java.util.List;
import java.util.Scanner;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import ch.ethz.ssh2.Connection;
import ch.ethz.ssh2.Session;
import ueb01.StringBufferImpl;
/**
* Created with IntelliJ IDEA.
* User: Julian
* Date: 16.10.13
* Time: 13:37
*/
public class Utils {
private static long currentTime;
/**
* http://svn.apache.org/viewvc/camel/trunk/components/camel-test/src/main/java/org/apache/camel/test/AvailablePortFinder.java?view=markup#l130
* Checks to see if a specific port is available.
*
* @param port the port to check for availability
*/
public static boolean available(int port) {
ServerSocket ss = null;
DatagramSocket ds = null;
try {
ss = new ServerSocket(port);
ss.setReuseAddress(true);
ds = new DatagramSocket(port);
ds.setReuseAddress(true);
return true;
} catch (IOException e) {
} finally {
if (ds != null) {
ds.close();
}
if (ss != null) {
try {
ss.close();
} catch (IOException e) {
/* should not be thrown */
}
}
}
return false;
}
public static void stopwatchStart() {
currentTime = java.lang.System.nanoTime();
}
static ExecutorService pool = null;
public static class SyncTcpResponse {
public final Socket socket;
public final String message;
public boolean isValid() {
return this.socket != null;
}
public SyncTcpResponse(Socket s, String m) {
this.socket = s;
this.message = m;
}
}
public static String getTCPSync(final Socket socket) {
StringBuilder sb = new StringBuilder();
try {
Scanner s = new Scanner(socket.getInputStream());
while (s.hasNext()) {
sb.append(s.next());
}
} catch (IOException e) {
e.printStackTrace();
}
return sb.toString();
}
public static SyncTcpResponse getTCPSync(final int port) {
ServerSocket server = null;
Socket client = null;
StringBuilder sb = new StringBuilder();
try {
server = new ServerSocket(port);
client = server.accept();
Scanner s = new Scanner(client.getInputStream());
while (s.hasNext()) {
sb.append(s.next());
}
} catch (IOException e) {
e.printStackTrace();
} finally {
if (server != null) try {
server.close();
} catch (IOException e) {
e.printStackTrace();
}
}
return new SyncTcpResponse(client, sb.toString());
}
public static Future<String> getTCP(final int port) {
if (pool == null) {
pool = Executors.newCachedThreadPool();
}
return pool.submit(new Callable<String>() {
@Override
public String call() throws Exception {
ServerSocket server = null;
/*try(ServerSocket socket = new ServerSocket(port)){
Socket client = socket.accept();
Scanner s = new Scanner(client.getInputStream());
StringBuilder sb = new StringBuilder();
while (s.hasNext()){
sb.append(s.next());
}
return sb.toString();
} */
return null;
}
});
}
public static Socket sendTCP(InetAddress address, int port, String message) {
try {
Socket s = new Socket(address, port);
return sendTCP(s, message);
} catch (IOException e) {
e.printStackTrace();
}
return null;
}
public static Socket sendTCP(Socket socket, String message) {
PrintWriter out = null;
try {
out = new PrintWriter(socket.getOutputStream());
out.println(message);
out.flush();
} catch (IOException e) {
e.printStackTrace();
} finally {
if (out != null) {
out.close();
}
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
e.printStackTrace();
}
}
return socket;
}
public static void close() {
if (pool != null) {
pool.shutdown();
}
}
<|fim▁hole|> StringBuilder result = new StringBuilder();
while (scanner.hasNextLine()) {
String line = scanner.nextLine();
result.append(line);
result.append("\n");
}
return result.toString();
}
public static String[] wordsFromScanner(Scanner scanner) {
List<String> result = new ArrayList<String>();
while (scanner.hasNextLine()) {
String line = scanner.nextLine();
String[] words = line.split(" ");
for (String word : words) {
if (word.length() > 0)
result.add(wordify(word));
}
}
return Utils.<String>listToArrayStr(result);
}
public static String wordify(String word) {
return word.replace(",", "").replace(".", "").replace("'", "").replace("\"", "")
.replace("...", "").replace("!", "").replace(";", "").replace(":", "").toLowerCase();
}
public static <T> T[] listToArray(List<T> list) {
T[] result = (T[]) new Object[list.size()];
for (int i = 0; i < list.size(); i++) {
result[i] = list.get(i);
}
return result;
}
public static String[] listToArrayStr(List<String> list) {
String[] result = new String[list.size()];
for (int i = 0; i < list.size(); i++) {
result[i] = list.get(i);
}
return result;
}
public static void stopwatchEnd() {
long current = java.lang.System.nanoTime();
long dif = current - currentTime;
long millis = dif / 1000000;
System.out.println("Millis: {" + millis + "} Nanos: {" + dif + "}");
}
/**
* Method to send a command to a Process
*
* @param p
* @param command
*/
public static void send(Process p, String command) {
OutputStream os = p.getOutputStream();
try {
os.write(command.getBytes());
} catch (IOException e) {
System.out.println("something went wrong... [Utils.send(..) -> " + e.getMessage());
} finally {
try {
os.close();
} catch (IOException e) {
System.out.println("something went wrong while closing... [Utils.send(..) -> " + e.getMessage());
}
}
}
public static void close(Process p) {
try {
p.getOutputStream().close();
p.getInputStream().close();
} catch (IOException e) {
e.printStackTrace();
}
}
/**
* easy exceptionless sleep
*
* @param millis
*/
public static void sleep(long millis) {
try {
Thread.sleep(millis);
} catch (InterruptedException e) {
e.printStackTrace();
System.out.println("ALP5: Utils::sleep crashed..");
}
}
public static int countCharactersInFile(String fileName) {
BufferedReader br = null;
try {
StringBuilder sb = new StringBuilder();
br = new BufferedReader(new FileReader(fileName));
String line = br.readLine();
while (line != null) {
sb.append(line);
line = br.readLine();
}
return sb.length();
} catch (FileNotFoundException e) {
e.printStackTrace();
System.out.println("shit happens... @Utils.countCharactersInFile");
return -1;
} catch (IOException e) {
e.printStackTrace();
System.out.println("shit happens while reading... @Utils.countCharactersInFile");
} finally {
if (br != null) try {
br.close();
} catch (IOException e) {
e.printStackTrace();
return -2;
}
}
return -3;
}
public static String join(String[] l, String connector) {
StringBuilder sb = new StringBuilder();
for (String s : l) {
if (sb.length() > 0) {
sb.append(connector);
}
sb.append(s);
}
return sb.toString();
}
public static String readFromStream(InputStream is){
java.util.Scanner s = new java.util.Scanner(is).useDelimiter("\\A");
return s.hasNext() ? s.next() : "";
}
/**
* Method to receive the output of a Process
*
* @param p
* @return
*/
public static String read(Process p) {
StringBuilder sb = new StringBuilder();
InputStream is = p.getInputStream();
BufferedReader reader = new BufferedReader(new InputStreamReader(is));
String s = null;
try {
while ((s = reader.readLine()) != null) {
if (s.equals("") || s.equals(" ")) break;
sb.append(s);
}
} catch (IOException e) {
System.out.println("something went wrong... [Utils.read(..) -> " + e.getMessage());
}
return sb.toString();
}
public static void main(String[] args) throws IOException, InterruptedException {
System.out.println("yyy");
System.out.println('a' > 'b');
}
/**
* If you want to use your ssh-key-login, you need to generate a pem-File from
* the ssh-private-key and put it into the main folder ( ALP5/ ); You also need
* to define the user with @ (like: [email protected]:...)
*
* @param commandId
* @return
*/
public static Process fork(String commandId) {
String username = null; // HARDCODE ME!
String password = null; // HARDCODE ME!
String host = null;
String command = commandId;
if (commandId.contains(":")) {
String[] temp = commandId.split(":");
if (temp[0].length() > 2) {
// if the host is shorter its probably just a windows drive ('d:// ...')
host = temp[0];
if (host.contains("@")) {
String[] t = host.split("@");
username = t[0];
host = t[1];
}
if (temp.length == 3) {
command = temp[1] + ":" + temp[2]; // to "repair" windows drives...
} else {
command = temp[1];
}
}
}
if (host != null) {
Process remoteP = null;
try {
final Connection conn = new Connection(host);
conn.connect();
boolean isAuth = false;
if (password != null) {
isAuth = conn.authenticateWithPassword(username, password);
}
if (!isAuth) {
File f = new File("private.pem");
isAuth = conn.authenticateWithPublicKey(username, f, "");
if (!isAuth) return null;
}
final Session sess = conn.openSession();
sess.execCommand(command);
remoteP = new Process() {
@Override
public OutputStream getOutputStream() {
return sess.getStdin();
}
@Override
public InputStream getInputStream() {
return sess.getStdout();
}
@Override
public InputStream getErrorStream() {
return sess.getStderr();
}
@Override
public int waitFor() throws InterruptedException {
sess.wait();
return 0;
}
@Override
public int exitValue() {
return 0;
}
@Override
public void destroy() {
sess.close();
conn.close();
}
};
} catch (IOException e) {
System.out.println("shit happens with the ssh connection: @Utils.fork .. " + e.getMessage());
return null;
}
return remoteP;
}
ProcessBuilder b = new ProcessBuilder(command.split(" "));
try {
return b.start();
} catch (IOException e) {
System.out.println("shit happens: @Utils.fork .. " + e.getMessage());
}
return null;
}
}<|fim▁end|> | public static String wordFromScanner(Scanner scanner) {
|
<|file_name|>dash_handler_template.py<|end_file_name|><|fim▁begin|>from {{appname}}.handlers.powhandler import PowHandler
from {{appname}}.conf.config import myapp
from {{appname}}.lib.application import app
import simplejson as json
import tornado.web
from tornado import gen
from {{appname}}.pow_dash import dispatcher
# Please import your model here. (from yourapp.models.dbtype)
@app.add_route("/dash.*", dispatch={"get" :"dash"})
@app.add_route("/_dash.*", dispatch={"get" :"dash_ajax_json", "post": "dash_ajax_json"})
class Dash(PowHandler):
#
# Sample dash handler to embedd dash into PythonOnWheels
#
def dash(self, **kwargs):
"""
This is the place where dash is called.
dispatcher returns the HMTL including title, css, scripts and config via => dash.Dash.index()
(See: in pow_dash.py => myDash.index)
You can then insert the returned HTML into your template.
I do this below in the self.render/self.success call => see base_dash.bs4 template (mustache like syntax)
"""
print("processing dash method")
#external_stylesheets = see config.py dash section
retval = dispatcher(self.request, username="fake", session_id=1234, index=True )
#
# this is the render template call which embeds the dash code (dash_block=retval)
# from dispatcher (see above)<|fim▁hole|>
def dash_ajax_json(self):
"""
respond to the dash ajax json / react request's
"""
print(" processing dash_ajax method")
#
# now hand over to the dispatcher
#
retval = dispatcher(self.request, index=False, username="fake", session_id=1234, powapp=self.application)
#self.set_header('Content-Type', 'application/json')
self.write(retval)
# def dash_ajax_assets(self):
# """
# respond to the dash ajax assets/ react request's
# """
# print(" processing dash_ajax_assets method")
# #
# # now hand over to the dispatcher
# #
# """Handle Dash requests and guess the mimetype. Needed for static files."""
# url = request.path.split('?')[0]
# content_type, _encoding = mimetypes.guess_type(url)
# retval = dispatcher(self.request, index=False, username="fake", session_id=1234, powapp=self.application)
# self.set_header('Content-Type', content_type)
# self.write(retval)<|fim▁end|> | self.set_header('Content-Type', "text/html")
self.render("dash_index.tmpl", dash_block=retval)
# self.success(template="index.tmpl", dash_block=retval, data=res )
|
<|file_name|>tukeys_filter.py<|end_file_name|><|fim▁begin|><|fim▁hole|>import sys
import itertools
from time import time
from lib.modules.base_task import BaseTask
from lib.modules.helper import extract_service_name, get_closest_datapoint
from lib.modules.models import TimeSeriesTuple
class TukeysFilter(BaseTask):
def __init__(self, config, logger, options):
super(TukeysFilter, self).__init__(config, logger, resource={'metric_sink': 'RedisSink',
'output_sink': 'GraphiteSink'})
self.namespace = 'TukeysFilter'
self.service = options['service']
self.params = options['params']
def read(self):
quantile_25 = self.params['quantile_25']
quantile_75 = self.params['quantile_75']
metrics = self.params['metrics']
delay = self.params.get('offset', 0)
maximum_delay = self.params.get('maximum_delay', 600)
# read metrics from metric_sink
quantile_25 = [i for i in self.metric_sink.iread(quantile_25)]
quantile_75 = [i for i in self.metric_sink.iread(quantile_75)]
metrics = [i for i in self.metric_sink.iread(metrics)]
if not (len(quantile_25) * len(quantile_75) * len(metrics)):
self.logger.error(
'No data found for quantile/to be checked metrics. Exiting')
return None
# sort TimeSeriesTuples by timestamp
quantile_25 = sorted(quantile_25, key=lambda tup: tup.timestamp)
quantile_75 = sorted(quantile_75, key=lambda tup: tup.timestamp)
metrics = sorted(metrics, key=lambda tup: (tup.name, tup.timestamp))
# find closest datapoint to now() (corrected by delay) if not too old
time_now = time() - delay
quantile_25 = get_closest_datapoint(quantile_25, time_now)
if time_now - quantile_25.timestamp > maximum_delay:
self.logger.error('Quantile25 Value is too old (Timestamp: %d) of: %s. Exiting' % (
quantile_25.timestamp, quantile_25.name))
return None
quantile_25 = quantile_25.value
quantile_75 = get_closest_datapoint(quantile_75, time_now)
if time_now - quantile_75.timestamp > maximum_delay:
self.logger.error('Quantile75 Value is too old (Timestamp: %d) of: %s. Exiting' % (
quantile_75.timestamp, quantile_75.name))
return None
quantile_75 = quantile_75.value
if quantile_25 > quantile_75:
self.logger.error('Inconsistent Quantile Values (Q25: %f, Q75: %f). Exiting' % (
quantile_25, quantile_75))
return None
# group by metric (e.g. instance) first and find then closest datapoint
distribution = {}
grouped = itertools.groupby(metrics, key=lambda tup: tup.name)
for key, metrics in grouped:
closest_datapoint = get_closest_datapoint(
[metric for metric in metrics], time_now)
if time_now - closest_datapoint.timestamp < maximum_delay:
distribution[key] = closest_datapoint.value
if len(distribution) == 0:
self.logger.error('No Distribution Values. Exiting')
return None
return quantile_25, quantile_75, distribution
def process(self, data):
quantile_25, quantile_75, distribution = data
iqr_scaling = self.params.get('iqr_scaling', 1.5)
iqr = quantile_75 - quantile_25
lower_limit = quantile_25 - iqr_scaling * iqr
upper_limit = quantile_75 + iqr_scaling * iqr
if 'static_lower_threshold' in self.params:
lower_limit = max(
lower_limit, self.params['static_lower_threshold'])
if 'static_upper_threshold' in self.params:
upper_limit = min(
upper_limit, self.params['static_upper_threshold'])
states = {}
for metric, value in distribution.iteritems():
if value > upper_limit:
states[metric] = 1.0
elif value < lower_limit:
states[metric] = -1.0
else:
states[metric] = 0.0
return quantile_25, quantile_75, states
def write(self, data):
quantile_25, quantile_75, states = data
prefix = '%s.%s' % (self.namespace, self.service)
count = len(states)
invalid = 0
now = int(time())
tuples = []
for name, state in states.iteritems():
if state:
invalid += 1
name = extract_service_name(name)
tuples.append(TimeSeriesTuple('%s.%s' % (prefix, name), now, state))
tuples.append(TimeSeriesTuple('%s.%s' % (prefix, 'quantile_25'), now, quantile_25))
tuples.append(TimeSeriesTuple('%s.%s' % (prefix, 'quantile_75'), now, quantile_75))
tuples.append(TimeSeriesTuple('%s.%s' % (prefix, 'count'), now, count))
tuples.append(TimeSeriesTuple('%s.%s' % (prefix, 'invalid'), now, invalid))
self.output_sink.write(tuples)
def run(self):
data = self.read()
if data:
state = self.process(data)
self.write(state)
return True
else:
return None<|fim▁end|> | """
Outlier Detection using Tukeys Filter Class
""" |
<|file_name|>numberwidget.py<|end_file_name|><|fim▁begin|>import os
from PyQt4.QtCore import pyqtSignal
from PyQt4.QtGui import QComboBox, QDoubleValidator
from configmanager.editorwidgets.core import ConfigWidget
from configmanager.editorwidgets.uifiles.ui_numberwidget_config import Ui_Form
class NumberWidgetConfig(Ui_Form, ConfigWidget):
description = 'Number entry widget'
def __init__(self, parent=None):
super(NumberWidgetConfig, self).__init__(parent)
self.setupUi(self)
self.minEdit.setValidator( QDoubleValidator() )
self.maxEdit.setValidator( QDoubleValidator() )
self.minEdit.textChanged.connect(self.widgetchanged)
self.maxEdit.textChanged.connect(self.widgetchanged)
self.prefixEdit.textChanged.connect(self.widgetchanged)
self.suffixEdit.textChanged.connect(self.widgetchanged)
def getconfig(self):
config = {}
config['max'] = self.maxEdit.text()
config['min'] = self.minEdit.text()
config['prefix'] = self.prefixEdit.text()
config['suffix'] = self.suffixEdit.text()
return config
def setconfig(self, config):
self.blockSignals(True)
max = config.get('max', '')
min = config.get('min', '')
prefix = config.get('prefix', '')<|fim▁hole|> self.prefixEdit.setText(prefix)
self.suffixEdit.setText(suffix)
self.blockSignals(False)<|fim▁end|> | suffix = config.get('suffix', '')
self.minEdit.setText(min)
self.maxEdit.setText(max) |
<|file_name|>dictionary.cpp<|end_file_name|><|fim▁begin|>// tinygettext - A gettext replacement that works directly on .po files
// Copyright (C) 2006 Ingo Ruhnke <[email protected]>
//
// This program is free software; you can redistribute it and/or
// modify it under the terms of the GNU General Public License
// as published by the Free Software Foundation; either version 2
// of the License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program; if not, write to the Free Software
// Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#include <assert.h>
#include "log_stream.hpp"
#include "dictionary.hpp"
namespace tinygettext {
Dictionary::Dictionary(const std::string& charset_) :
entries(),
ctxt_entries(),
charset(charset_),
plural_forms()
{
m_has_fallback = false;
}
Dictionary::~Dictionary()
{
}
std::string
Dictionary::get_charset() const
{
return charset;
}
void
Dictionary::set_plural_forms(const PluralForms& plural_forms_)
{
plural_forms = plural_forms_;
}
PluralForms
Dictionary::get_plural_forms() const
{
return plural_forms;
}
std::string
Dictionary::translate_plural(const std::string& msgid, const std::string& msgid_plural, int num)
{
return translate_plural(entries, msgid, msgid_plural, num);
}
std::string
Dictionary::translate_plural(const Entries& dict, const std::string& msgid, const std::string& msgid_plural, int count)
{
Entries::const_iterator i = dict.find(msgid);
const std::vector<std::string>& msgstrs = i->second;
if (i != dict.end())
{
unsigned int n = 0;
n = plural_forms.get_plural(count);
assert(/*n >= 0 &&*/ n < msgstrs.size());
if (!msgstrs[n].empty())
return msgstrs[n];
else
if (count == 1) // default to english rules
return msgid;
else
return msgid_plural;
}
else
{
//log_info << "Couldn't translate: " << msgid << std::endl;
//log_info << "Candidates: " << std::endl;
//for (i = dict.begin(); i != dict.end(); ++i)
// log_info << "'" << i->first << "'" << std::endl;
if (count == 1) // default to english rules
return msgid;
else
return msgid_plural;
}
}
std::string
Dictionary::translate(const std::string& msgid)
{
return translate(entries, msgid);
}
std::string
Dictionary::translate(const Entries& dict, const std::string& msgid)
{
Entries::const_iterator i = dict.find(msgid);<|fim▁hole|> {
return i->second[0];
}
else
{
//log_info << "Couldn't translate: " << msgid << std::endl;
if (m_has_fallback) return m_fallback->translate(msgid);
else return msgid;
}
}
std::string
Dictionary::translate_ctxt(const std::string& msgctxt, const std::string& msgid)
{
CtxtEntries::iterator i = ctxt_entries.find(msgctxt);
if (i != ctxt_entries.end())
{
return translate(i->second, msgid);
}
else
{
//log_info << "Couldn't translate: " << msgid << std::endl;
return msgid;
}
}
std::string
Dictionary::translate_ctxt_plural(const std::string& msgctxt,
const std::string& msgid, const std::string& msgidplural, int num)
{
CtxtEntries::iterator i = ctxt_entries.find(msgctxt);
if (i != ctxt_entries.end())
{
return translate_plural(i->second, msgid, msgidplural, num);
}
else
{
//log_info << "Couldn't translate: " << msgid << std::endl;
if (num != 1) // default to english
return msgidplural;
else
return msgid;
}
}
void
Dictionary::add_translation(const std::string& msgid, const std::string& ,
const std::vector<std::string>& msgstrs)
{
// Do we need msgid2 for anything? its after all supplied to the
// translate call, so we just throw it away here
entries[msgid] = msgstrs;
}
void
Dictionary::add_translation(const std::string& msgid, const std::string& msgstr)
{
std::vector<std::string>& vec = entries[msgid];
if (vec.empty())
{
vec.push_back(msgstr);
}
else
{
log_warning << "collision in add_translation: '"
<< msgid << "' -> '" << msgstr << "' vs '" << vec[0] << "'" << std::endl;
vec[0] = msgstr;
}
}
void
Dictionary::add_translation(const std::string& msgctxt,
const std::string& msgid, const std::string& msgid_plural,
const std::vector<std::string>& msgstrs)
{
std::vector<std::string>& vec = ctxt_entries[msgctxt][msgid];
if (vec.empty())
{
vec = msgstrs;
}
else
{
log_warning << "collision in add_translation(\"" << msgctxt << "\", \"" << msgid << "\", \"" << msgid_plural << "\")" << std::endl;
vec = msgstrs;
}
}
void
Dictionary::add_translation(const std::string& msgctxt, const std::string& msgid, const std::string& msgstr)
{
std::vector<std::string>& vec = ctxt_entries[msgctxt][msgid];
if (vec.empty())
{
vec.push_back(msgstr);
}
else
{
log_warning << "collision in add_translation(\"" << msgctxt << "\", \"" << msgid << "\")" << std::endl;
vec[0] = msgstr;
}
}
} // namespace tinygettext
/* EOF */<|fim▁end|> | if (i != dict.end() && !i->second.empty()) |
<|file_name|>uniprot.py<|end_file_name|><|fim▁begin|>from __future__ import unicode_literals
import pytest
from tests.factories import UniprotFactory
<|fim▁hole|>def uniprot_egfr_human():
return UniprotFactory(
uniprot_acc='P00533',
uniprot_id='EGFR_HUMAN',
description='Epidermal growth factor receptor EC=2.7.10.1'
)<|fim▁end|> |
@pytest.fixture |
<|file_name|>portfolio.component.ts<|end_file_name|><|fim▁begin|>import { Component, OnInit } from '@angular/core';
import { HttpClient } from '@angular/common/http';
import { Observable } from 'rxjs';
import { finalize, map } from 'rxjs/operators';
import { environment } from '../../environments/environment';<|fim▁hole|> selector: 'sec-portfolio',
templateUrl: './portfolio.component.html',
styleUrls: ['./portfolio.component.scss']
})
export class PortfolioComponent implements OnInit {
public view: string = 'composition';
public loading: boolean = false;
public series: any[];
constructor(private http: HttpClient) { }
ngOnInit() {
this.getValuation();
}
getValuation() {
this.loading = true;
return this.http.get(`${environment.apiUrl}/portfolio/audit?view=${this.view}`)
.pipe(finalize(() => { this.loading = false; }))
.subscribe((series: any[]) => {
this.series = series;
});
}
}<|fim▁end|> |
import * as _ from 'lodash';
@Component({ |
<|file_name|>test_gencumsky.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
"""
Created on Fri Jul 27 10:08:25 2018
@author: cdeline
Using pytest to create unit tests for gencumulativesky.<|fim▁hole|>to run coverage tests, run py.test --cov-report term-missing --cov=bifacial_radiance
"""
#from bifacial_radiance import RadianceObj, SceneObj, AnalysisObj
import bifacial_radiance
import numpy as np
import pytest
import os
# try navigating to tests directory so tests run from here.
try:
os.chdir('tests')
except:
pass
TESTDIR = os.path.dirname(__file__) # this folder
# test the readepw on a dummy Boulder EPW file in the /tests/ directory
MET_FILENAME = 'USA_CO_Boulder.724699_TMY2.epw'
# also test a dummy TMY3 Denver file in /tests/
MET_FILENAME2 = "724666TYA.CSV"
DEBUG = True
"""
def test_SingleModule_gencumsky():
import datetime
# 1 module for STC conditions. DNI:900, DHI:100, sun angle: 33 elevation 0 azimuth
name = "_test_fixedtilt_end_to_end"
demo = bifacial_radiance.RadianceObj(name) # Create a RadianceObj 'object'
demo.setGround(0.62)
metdata = demo.readWeatherFile(MET_FILENAME, starttime='06_17_13', endtime='06_17_13')
demo.genCumSky() # 1p, June 17th
# create a scene using panels in landscape at 10 deg tilt, 1.5m pitch. 0.2 m ground clearance
sceneDict = {'tilt':10,'pitch':1.5,'clearance_height':0.2, 'nMods':10, 'nRows':3}
demo.makeModule(name='test',y=0.95,x=1.59, xgap=0)
scene = demo.makeScene('test',sceneDict)
octfile = demo.makeOct(demo.getfilelist()) # makeOct combines all of the ground, sky and object files into a .oct file.
analysis = bifacial_radiance.AnalysisObj(octfile, demo.name) # return an analysis object including the scan dimensions for back irradiance
(frontscan,backscan) = analysis.moduleAnalysis(scene)
analysis.analysis(octfile, demo.name, frontscan, backscan) # compare the back vs front irradiance
assert analysis.mattype[0][:12] == 'a4.1.a0.test'
assert analysis.rearMat[0][:12] == 'a4.1.a0.test'
assert np.mean(analysis.x) == pytest.approx(0)
assert np.mean(analysis.rearY) == pytest.approx(0.00017364868888889194, abs = 0.0001)
if DEBUG:
print(np.mean(analysis.Wm2Front))
print(np.mean(analysis.Wm2Back))
print(np.mean(analysis.backRatio))
# Note: gencumsky has 30-50 Wm-2 variability from run to run... unsure why.
assert np.mean(analysis.Wm2Front) == pytest.approx(1030, abs = 60) #1023,1037,1050, 1035, 1027, 1044, 1015, 1003, 1056
assert np.mean(analysis.Wm2Back) == pytest.approx(133, abs = 15) # 127, 131, 131, 135, 130, 139, 120, 145
# run 1-axis gencumsky option
trackerdict = demo.set1axis(metdata, limit_angle = 45, backtrack = True, gcr = 0.33)
demo.genCumSky1axis(trackerdict)
"""
def test_SingleModule_gencumsky_modelchain():
# duplicate previous sample using modelchain
# 1-axis .ini file
filename = "ini_gencumsky.ini"
(Params)= bifacial_radiance.load.readconfigurationinputfile(inifile=filename)
Params[0]['testfolder'] = TESTDIR
# unpack the Params tuple with *Params
demo2, analysis = bifacial_radiance.modelchain.runModelChain(*Params )
#V 0.2.5 fixed the gcr passed to set1axis. (since gcr was not being passd to set1axis, gcr was default 0.33 default).
assert analysis.mattype[0][:12] == 'a4.1.a0.test'
assert analysis.rearMat[0][:12] == 'a4.1.a0.test'
assert np.mean(analysis.x) == pytest.approx(0)
assert np.mean(analysis.rearY) == pytest.approx(0.00017, abs = 0.00001)
if DEBUG:
print(np.mean(analysis.Wm2Front))
print(np.mean(analysis.Wm2Back))
print(np.mean(analysis.backRatio))
# Note: gencumsky has 30-50 Wm-2 variability from run to run... unsure why.
assert np.mean(analysis.Wm2Front) == pytest.approx(1030, abs = 60) #1023,1037,1050, 1035, 1027, 1044, 1015, 1003, 1056
assert np.mean(analysis.Wm2Back) == pytest.approx(133, abs = 15) # 127, 131, 131, 135, 130, 139, 120, 145<|fim▁end|> | Note that this can't be included in the repo until TravisCI has a Linux version of gencumsky
set up in .travis.yml
to run unit tests, run pytest from the command line in the bifacial_radiance directory |
<|file_name|>soundscape.py<|end_file_name|><|fim▁begin|># Python 3 program for soundscape generation. (C) P.B.L. Meijer 2015
# Direct port of the hificode.c C program
# Last update: October 6, 2015; released under the Creative
# Commons Attribution 4.0 International License (CC BY 4.0),
# see http://www.seeingwithsound.com/im2sound.htm for details
#
# Beware that this program runs excruciatingly slowly under Python,
# while the PyPy python JIT compiler does not (yet) support OpenCV
import math
import os
import struct
import sys
import wave
import cv2 as cv
import numpy as np
file_name = 'hificode.wav' # User-defined parameters
min_frequency = 500 # Lowest frequency (Hz) in soundscape
max_frequency = 5000 # Highest frequency (Hz)
sample_frequency = 44100 # Sample frequency (Hz)
image_to_sound_conversion_time = 1.05 # Image to sound conversion time (s)
use_exponential = False # Linear|Exponential=0|1 distribution
hifi = 1 # 8-bit|16-bit=0|1 sound quality
stereo = 1 # Mono|Stereo=0|1 sound selection
delay = 1 # Nodelay|Delay=0|1 model (stereo=1)
relative_fade = 1 # Relative fade No|Yes=0|1 (stereo=1)
diffraction = 1 # Diffraction No|Yes=0|1 (stereo=1)
use_b_spline = 1 # Rectangular|B-spline=0|1 time window
gray_levels = 0 # 16|2-level=0|1 gray format in P[][]
use_camera = 1 # Use OpenCV camera input No|Yes=0|1
use_screen = 1 # Screen view for debugging No|Yes=0|1
class Soundscape(object):
IR = 0
IA = 9301
IC = 49297
IM = 233280
TwoPi = 6.283185307179586476925287
WHITE = 1.00
BLACK = 0.00
def __init__(self, file_name='hificode.wav', min_frequency=500, max_frequency=5000, sample_frequency=44100,
image_to_sound_conversion_time=1.05, is_exponential=False, hifi=True, stereo=True, delay=True,
relative_fade=True, diffraction=True, use_b_spline=True, gray_levels=16, use_camera=True,
use_screen=True):
"""
:param file_name:
:type file_name: str
:param min_frequency:
:type min_frequency: int
:param max_frequency:
:type max_frequency: int
:param sample_frequency:
:type sample_frequency: int
:param image_to_sound_conversion_time:
:type image_to_sound_conversion_time: float
:param is_exponential:
:type is_exponential: bool
:param hifi:
:type hifi: bool
:param stereo:
:type stereo: bool
:param delay:
:type delay: bool
:param relative_fade:
:type relative_fade: bool
:param diffraction:
:type diffraction: bool
:param use_b_spline:
:type use_b_spline: bool
:param gray_levels:
:type gray_levels: int
:param use_camera:
:type use_camera: bool
:param use_screen:
:type use_screen: bool
:return:
:rtype:
"""
self.file_name = file_name
self.min_frequency = min_frequency
self.max_frequency = max_frequency
self.sample_frequency = sample_frequency
self.image_to_sound_conversion_time = image_to_sound_conversion_time
self.is_exponential = is_exponential
self.hifi = hifi
self.stereo = stereo
self.delay = delay
self.relative_fade = relative_fade
self.diffraction = diffraction
self.use_b_spline = use_b_spline
self.gray_levels = gray_levels
self.use_camera = use_camera
self.use_screen = use_screen
self.hist = (1 + self.hifi) * (1 + self.stereo)
if use_camera:
self.num_columns = 176
self.num_rows = 64
else:
self.num_columns = 64
self.num_rows = 64
self.k = 0
self.b = 0
self.num_frames = 2 * int(0.5 * self.sample_frequency * self.image_to_sound_conversion_time)
self.frames_per_column = int(self.num_frames / self.num_columns)
self.sso = 0 if self.hifi else 128
self.ssm = 32768 if self.hifi else 128
self.scale = 0.5 / math.sqrt(self.num_rows)
self.dt = 1.0 / self.sample_frequency
self.v = 340.0 # v = speed of sound (m/s)
self.hs = 0.20 # hs = characteristic acoustical size of head (m)
self.w = np.arange(self.num_rows, dtype=np.float)
self.phi0 = np.zeros(self.num_rows, dtype=np.float)
self.A = np.zeros((self.num_columns, self.num_rows), dtype=np.uint8)
# Coefficients used in rnd()
IR = 0
IA = 9301
IC = 49297
IM = 233280
TwoPi = 6.283185307179586476925287
HIST = (1 + hifi) * (1 + stereo)
WHITE = 1.00
BLACK = 0.00
if use_camera:
num_columns = 176
num_rows = 64
else:
num_columns = 64
num_rows = 64
# if gray_levels:
# else:
try:
# noinspection PyUnresolvedReferences
import winsound
except ImportError:
def playsound(frequency, duration):
# sudo dnf -y install beep
os.system('beep -f %s -l %s' % (frequency, duration))
else:
def playsound(frequency, duration):
winsound.Beep(frequency, duration)
# def playSound(file):
# if sys.platform == "win32":
# winsound.PlaySound(file, winsound.SND_FILENAME) # Windows only
# # os.system('start %s' %file) # Windows only
# elif sys.platform.startswith('linux'):
# print("No audio player called for Linux")
# else:
# print("No audio player called for your platform")
def wi(file_object, i):
b0 = int(i % 256)
b1 = int((i - b0) / 256)
file_object.write(struct.pack('B', b0 & 0xff))
file_object.write(struct.pack('B', b1 & 0xff))
def wl(fp, l):
i0 = l % 65536
i1 = (l - i0) / 65536
wi(fp, i0)
wi(fp, i1)
def rnd():
global IR, IA, IC, IM
IR = (IR * IA + IC) % IM
return IR / (1.0 * IM)
def main():
current_frame = 0
b = 0
num_frames = 2 * int(0.5 * sample_frequency * image_to_sound_conversion_time)
frames_per_column = int(num_frames / num_columns)
sso = 0 if hifi else 128
ssm = 32768 if hifi else 128
scale = 0.5 / math.sqrt(num_rows)
dt = 1.0 / sample_frequency
v = 340.0 # v = speed of sound (m/s)
hs = 0.20 # hs = characteristic acoustical size of head (m)
w = np.arange(num_rows, dtype=np.float)
phi0 = np.zeros(num_rows)
A = np.zeros((num_columns, num_rows), dtype=np.uint8)
# w = [0 for i in range(num_rows)]
# phi0 = [0 for i in range(num_rows)]
# A = [[0 for j in range(num_columns)] for i in range(num_rows)] # num_rows x num_columns pixel matrix
# Set lin|exp (0|1) frequency distribution and random initial phase
freq_ratio = max_frequency / float(min_frequency)
if use_exponential:
w = TwoPi * min_frequency * np.power(freq_ratio, w / (num_rows - 1))
for i in range(0, num_rows):
w[i] = TwoPi * min_frequency * pow(freq_ratio, 1.0 * i / (num_rows - 1))
else:
for i in range(0, num_rows):
w[i] = TwoPi * min_frequency + TwoPi * (max_frequency - min_frequency) * i / (
num_rows - 1)
for i in range(0, num_rows): phi0[i] = TwoPi * rnd()
cam_id = 0 # First available OpenCV camera
# Optionally override ID from command line parameter: python hificode_OpenCV.py cam_id
if len(sys.argv) > 1:
cam_id = int(sys.argv[1])
try:
# noinspection PyArgumentList
cap = cv.VideoCapture(cam_id)
if not cap.isOpened():
raise ValueError('camera ID')
except ValueError:
print("Could not open camera", cam_id)
raise
# Setting standard capture size, may fail; resize later
cap.read() # Dummy read needed with some devices
# noinspection PyUnresolvedReferences
cap.set(cv.CAP_PROP_FRAME_WIDTH, 176)
# noinspection PyUnresolvedReferences
cap.set(cv.CAP_PROP_FRAME_HEIGHT, 144)
if use_screen: # Screen views only for debugging
cv.namedWindow('Large', cv.WINDOW_AUTOSIZE)
cv.namedWindow('Small', cv.WINDOW_AUTOSIZE)
key = 0
while key != 27: # Escape key
ret, frame = cap.read()
if not ret:
# Sometimes initial frames fail
print("Capture failed\n")
key = cv.waitKey(100)
continue
tmp = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
if frame.shape[1] != num_rows or frame.shape[0] != num_columns:
# cv.resize(tmp, gray, Size(num_columns,num_rows))
gray = cv.resize(tmp, (num_columns, num_rows), interpolation=cv.INTER_AREA)
else:
gray = tmp
if use_screen: # Screen views only for debugging
cv.imwrite("hificodeLarge.jpg", frame)
cv.imshow('Large', frame)
cv.moveWindow('Large', 20, 20)
cv.imwrite("hificodeSmall.jpg", gray)
cv.imshow('Small', gray)
cv.moveWindow('Small', 220, 20)
key = cv.waitKey(10)
if use_camera: # Set live camera image
mVal = gray / 16
A[mVal == 0] = 0
A[mVal > 0] = np.power(10.0, (mVal[mVal > 0] - 15) / 10.0)
# Write 8/16-bit mono/stereo .wav file
with open(file_name, 'wb') as nf:
fp = wave.open(nf)
fp.setnchannels(2 if stereo else 1)
fp.setframerate(sample_frequency)
fp.setsampwidth(2 if hifi else 1)
tau1 = 0.5 / w[num_rows - 1]
tau2 = 0.25 * (tau1 * tau1)
y = yl = yr = z = zl = zr = 0.0
while current_frame < num_frames and not stereo:
if use_b_spline:
q = 1.0 * (current_frame % frames_per_column) / (frames_per_column - 1)
q2 = 0.5 * q * q
j = int(current_frame / frames_per_column)
j = num_columns - 1 if j > num_columns - 1 else j
s = 0.0
t = current_frame * dt
if current_frame < num_frames / (5 * num_columns):
s = (2.0 * rnd() - 1.0) / scale # "click"
else:
for i in range(0, num_rows):
if use_b_spline: # Quadratic B-spline for smooth C1 time window
if j == 0:
a = (1.0 - q2) * A[i][j] + q2 * A[i][j + 1]
elif j == num_columns - 1:
a = (q2 - q + 0.5) * A[i][j - 1] + (0.5 + q - q2) * A[i][j]
else:
a = (q2 - q + 0.5) * A[i][j - 1] + (0.5 + q - q * q) * A[i][j] + q2 * A[i][j + 1]
else:
a = A[i][j] # Rectangular time window
s += a * math.sin(w[i] * t + phi0[i])
yp = y
y = tau1 / dt + tau2 / (dt * dt)
y = (s + y * yp + tau2 / dt * z) / (1.0 + y)
z = (y - yp) / dt
l = sso + 0.5 + scale * ssm * y # y = 2nd order filtered s
if l >= sso - 1 + ssm: l = sso - 1 + ssm
if l < sso - ssm: l = sso - ssm
ss = int(l) & 0xFFFFFFFF # Make unsigned int
if hifi:
wi(fp, ss)
else:
fp.write(struct.pack('B', ss & 0xff))
current_frame += 1
while current_frame < num_frames and stereo:
if use_b_spline:
q = 1.0 * (current_frame % frames_per_column) / (frames_per_column - 1)
q2 = 0.5 * q * q
j = int(current_frame / frames_per_column)
j = num_columns - 1 if j > num_columns - 1 else j
r = 1.0 * current_frame / (num_frames - 1) # Binaural attenuation/delay parameter
theta = (r - 0.5) * TwoPi / 3
x = 0.5 * hs * (theta + math.sin(theta))
tl = tr = current_frame * dt
if delay:
tr += x / v # Time delay model
x = abs(x)
sl = sr = 0.0
hrtfl = hrtfr = 1.0
for i in range(0, num_rows):
if diffraction:
# First order frequency-dependent azimuth diffraction model
hrtf = 1.0 if (TwoPi * v / w[i] > x) else TwoPi * v / (x * w[i])
if theta < 0.0:
hrtfl = 1.0
hrtfr = hrtf
else:
hrtfl = hrtf
hrtfr = 1.0
if relative_fade:
# Simple frequency-independent relative fade model
hrtfl *= (1.0 - 0.7 * r)
hrtfr *= (0.3 + 0.7 * r)
if use_b_spline:
if j == 0:
a = (1.0 - q2) * A[i][j] + q2 * A[i][j + 1]
elif j == num_columns - 1:
a = (q2 - q + 0.5) * A[i][j - 1] + (0.5 + q - q2) * A[i][j]
else:
a = (q2 - q + 0.5) * A[i][j - 1] + (0.5 + q - q * q) * A[i][j] + q2 * A[i][j + 1]
else:
a = A[i][j]
sl += hrtfl * a * math.sin(w[i] * tl + phi0[i])
sr += hrtfr * a * math.sin(w[i] * tr + phi0[i])
sl = (2.0 * rnd() - 1.0) / scale if (current_frame < num_frames / (5 * num_columns)) else sl # Left "click"
if tl < 0.0: sl = 0.0;
if tr < 0.0: sr = 0.0;
ypl = yl
yl = tau1 / dt + tau2 / (dt * dt)
yl = (sl + yl * ypl + tau2 / dt * zl) / (1.0 + yl)
zl = (yl - ypl) / dt
ypr = yr
yr = tau1 / dt + tau2 / (dt * dt)
yr = (sr + yr * ypr + tau2 / dt * zr) / (1.0 + yr)
zr = (yr - ypr) / dt
l = sso + 0.5 + scale * ssm * yl
if l >= sso - 1 + ssm: l = sso - 1 + ssm
if l < sso - ssm: l = sso - ssm
ss = int(l) & 0xFFFFFFFF
# Left channel
if hifi:
wi(fp, ss)
else:
fp.write(struct.pack('B', ss & 0xff))
l = sso + 0.5 + scale * ssm * yr
if l >= sso - 1 + ssm: l = sso - 1 + ssm
if l < sso - ssm: l = sso - ssm
ss = int(l) & 0xFFFFFFFF
# Right channel
if hifi:
wi(fp, ss)
else:
fp.write(struct.pack('B', ss & 0xff))<|fim▁hole|>
fp.close()
playSound("hificode.wav") # Play the soundscape
current_frame = 0 # Reset sample count
cap.release()
cv.destroyAllWindows()
return 0
main()<|fim▁end|> | current_frame += 1 |
<|file_name|>m.js<|end_file_name|><|fim▁begin|>var my = require('my');
var maxHeight = 300, maxWidth = 300;
exports.view = function(data) {
console.log("view: m.js");
console.log(data);
var topic = data.topic;
return(
my.page({title: 'Hello World', scripts:["http://code.jquery.com/jquery-latest.js"]},
/*my.div({id: 'myDiv', style: {height: '800px', border: 'red 1px solid'}},
'Actor ' + data.name
),*/
my.h1(topic.name),
tabs(topic.friends),
gallery2(topic.friends[0].entities, '100%', '300px', '200px', '270px', '30px', '30px')
)
)}
function tabs(friends)
{
var tabs = my.div({});
for (var i = 0; i < friends.length; i++)
tabs.children.push(my.p(friends[i]));
return gallery;
}
function gallery(imgUrls, width, height, thumbWidth, thumbHeight, hGap, vGap) {
var galleryStyle = {
margin: 'auto',
width: width,
height: height
};
var thumbStyle = {<|fim▁hole|> '-moz-box-shadow': '1px 1px 6px #999',
'-webkit-box-shadow': '1px 1px 6px #999'
};
var gallery = my.div({style: galleryStyle});
for (var i = 0; i < imgUrls.length; i++)
gallery.children.push(my.img({style: thumbStyle, src: imgUrls[i]}));
return gallery;
}
function gallery2(imgUrls, width, height, thumbWidth, thumbHeight, hGap, vGap) {
var galleryStyle = {
display: 'inline-block',
width: width,
height: height
};
var thumbDivStyle = {
display: 'inline-block',
'margin-top': vGap,
'margin-left': hGap,
'width': thumbWidth,
'height': thumbHeight,
'text-align': 'center'
};
var thumbStyle = {
'max-width': thumbWidth,
'max-height': thumbHeight,
'-moz-box-shadow': '1px 1px 6px #999',
'-webkit-box-shadow': '1px 1px 6px #999'
};
var gallery = my.div({style: galleryStyle});
for (var i = 0; i < imgUrls.length; i++)
{
var imgUrl = "http://img.freebase.com/api/trans/image_thumb"+imgUrls[i].id+"?mode=fit&maxheight="+maxHeight+"&maxwidth="+maxWidth;
//console.log(imgUrls[i].id);
gallery.children.push(
my.div({style: thumbDivStyle},
my.img({style: thumbStyle, src: imgUrl}
)
));
}
return gallery;
}<|fim▁end|> | 'margin-top': vGap,
'margin-left': hGap,
'max-width': thumbWidth,
'max-height': thumbHeight, |
<|file_name|>services.func.js<|end_file_name|><|fim▁begin|>import Services from '../src/lib/services';
const _ = require('lodash');
require('db-migrate-mysql');
const expect = require('unexpected');
const request = require('./request.func');
let rds;
class Email {
// eslint-disable-next-line
sendHtml() {
return Promise.resolve();
}
}
class Slack {
// eslint-disable-next-line
sendAppUpdate() {
return Promise.resolve();
}
}
export default class FuncTestServices extends Services {
// eslint-disable-next-line class-methods-use-this
getEmail() {
return new Email();
}
// eslint-disable-next-line class-methods-use-this
getSlack() {
return new Slack();
}
getMysql() {
if (!rds) {
// eslint-disable-next-line global-require
rds = require('serverless-mysql')({
config: {
host: this.getEnv('RDS_HOST'),
user: this.getEnv('RDS_USER'),
password: this.getEnv('RDS_PASSWORD'),
database: this.getEnv('RDS_DATABASE'),
ssl: this.getEnv('RDS_SSL'),
port: this.getEnv('RDS_PORT'),
multipleStatements: true,
},
});
}
return rds;<|fim▁hole|> }
static async initDb() {
await rds.query(
'INSERT IGNORE INTO vendors SET id=?, name=?, address=?, email=?, isPublic=?',
[process.env.FUNC_VENDOR, 'test', 'test', process.env.FUNC_USER_EMAIL, 0],
);
await rds.query('DELETE FROM appVersions WHERE vendor=?', [process.env.FUNC_VENDOR]);
}
static async login() {
const res = await expect(request({
method: 'post',
url: `${_.get(process.env, 'API_ENDPOINT')}/auth/login`,
responseType: 'json',
data: {
email: process.env.FUNC_USER_EMAIL,
password: process.env.FUNC_USER_PASSWORD,
},
}), 'to be fulfilled');
expect(_.get(res, 'status'), 'to be', 200);
expect(_.get(res, 'data'), 'to have key', 'token');
return _.get(res, 'data.token');
}
static async cleanIconsFromS3(appId) {
const s3 = Services.getS3();
const data = await s3.listObjects({ Bucket: process.env.S3_BUCKET, Prefix: `${appId}/` }).promise();
if (data && _.has(data, 'Contents')) {
const promises = [];
_.each(data.Contents, (file) => {
promises.push(s3.deleteObject({ Bucket: process.env.S3_BUCKET, Key: file.Key }).promise());
});
return Promise.all(promises);
}
return Promise.resolve();
}
}<|fim▁end|> |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.