prompt
large_stringlengths 70
991k
| completion
large_stringlengths 0
1.02k
|
---|---|
<|file_name|>VariantBool.hpp<|end_file_name|><|fim▁begin|>// Generated from /POI/java/org/apache/poi/hpsf/VariantBool.java
#pragma once
#include <fwd-POI.hpp>
#include <org/apache/poi/hpsf/fwd-POI.hpp>
#include <org/apache/poi/util/fwd-POI.hpp>
#include <java/lang/Object.hpp>
struct default_init_tag;
class poi::hpsf::VariantBool
: public virtual ::java::lang::Object
{
public:
typedef ::java::lang::Object super;
private:
static ::poi::util::POILogger* LOG_;
public: /* package */
static constexpr int32_t SIZE { int32_t(2) };
private:
bool _value { };<|fim▁hole|> virtual void read(::poi::util::LittleEndianByteArrayInputStream* lei);
virtual bool getValue();
virtual void setValue(bool value);
// Generated
VariantBool();
protected:
VariantBool(const ::default_init_tag&);
public:
static ::java::lang::Class *class_();
static void clinit();
private:
static ::poi::util::POILogger*& LOG();
virtual ::java::lang::Class* getClass0();
};<|fim▁end|> | protected:
void ctor();
public: /* package */ |
<|file_name|>chain_data.rs<|end_file_name|><|fim▁begin|>use core::ops::{Add, AddAssign, Mul, MulAssign};
use num_traits::{One, Zero};
/// `ChainData` derives several traits and attempts to allow chaining the IntoIterator
/// implementation over `&` and `&mut` to allow access to the data within.
/// This is critical to allow optimizers to perform per-weight gradient update rules.
#[derive(Clone, Default, Debug, PartialEq, Eq, PartialOrd, Ord)]
pub struct ChainData<A, B>(pub A, pub B);
impl<A, B> Add for ChainData<A, B>
where
A: Add,
B: Add,
{
type Output = ChainData<A::Output, B::Output>;
fn add(self, rhs: Self) -> Self::Output {
ChainData(self.0 + rhs.0, self.1 + rhs.1)
}
}
impl<A, B> Add<f32> for ChainData<A, B>
where
A: Add<f32>,
B: Add<f32>,
{
type Output = ChainData<A::Output, B::Output>;
fn add(self, rhs: f32) -> Self::Output {
ChainData(self.0 + rhs, self.1 + rhs)
}
}
impl<A, B> Add<f64> for ChainData<A, B><|fim▁hole|> A: Add<f64>,
B: Add<f64>,
{
type Output = ChainData<A::Output, B::Output>;
fn add(self, rhs: f64) -> Self::Output {
ChainData(self.0 + rhs, self.1 + rhs)
}
}
impl<A, B> AddAssign for ChainData<A, B>
where
A: AddAssign,
B: AddAssign,
{
fn add_assign(&mut self, rhs: Self) {
self.0 += rhs.0;
self.1 += rhs.1;
}
}
impl<A, B> AddAssign<f32> for ChainData<A, B>
where
A: AddAssign<f32>,
B: AddAssign<f32>,
{
fn add_assign(&mut self, rhs: f32) {
self.0 += rhs;
self.1 += rhs;
}
}
impl<A, B> AddAssign<f64> for ChainData<A, B>
where
A: AddAssign<f64>,
B: AddAssign<f64>,
{
fn add_assign(&mut self, rhs: f64) {
self.0 += rhs;
self.1 += rhs;
}
}
impl<A, B> Zero for ChainData<A, B>
where
A: Zero + Add,
B: Zero + Add,
{
fn zero() -> Self {
Self(A::zero(), B::zero())
}
fn is_zero(&self) -> bool {
self.0.is_zero() && self.1.is_zero()
}
}
impl<A, B> Mul for ChainData<A, B>
where
A: Mul,
B: Mul,
{
type Output = ChainData<A::Output, B::Output>;
fn mul(self, rhs: Self) -> Self::Output {
ChainData(self.0 * rhs.0, self.1 * rhs.1)
}
}
impl<A, B> MulAssign for ChainData<A, B>
where
A: MulAssign,
B: MulAssign,
{
fn mul_assign(&mut self, rhs: Self) {
self.0 *= rhs.0;
self.1 *= rhs.1;
}
}
impl<A, B> MulAssign<f32> for ChainData<A, B>
where
A: MulAssign<f32>,
B: MulAssign<f32>,
{
fn mul_assign(&mut self, rhs: f32) {
self.0 *= rhs;
self.1 *= rhs;
}
}
impl<A, B> MulAssign<f64> for ChainData<A, B>
where
A: MulAssign<f64>,
B: MulAssign<f64>,
{
fn mul_assign(&mut self, rhs: f64) {
self.0 *= rhs;
self.1 *= rhs;
}
}
impl<A, B> One for ChainData<A, B>
where
A: One + Mul,
B: One + Mul,
{
fn one() -> Self {
Self(A::one(), B::one())
}
}<|fim▁end|> | where |
<|file_name|>express-session.d.ts<|end_file_name|><|fim▁begin|>// Type definitions for express-session
// Project: https://www.npmjs.org/package/express-session
// Definitions by: Hiroki Horiuchi <https://github.com/horiuchi/>
// Definitions: https://github.com/DefinitelyTyped/DefinitelyTyped
/// <reference path="../express/express.d.ts" />
/// <reference path="../node/node.d.ts" />
declare namespace Express {
export interface Request {
session?: Session;
}
export interface Session {
[key: string]: any;
regenerate: (callback: (err: any) => void) => void;
destroy: (callback: (err: any) => void) => void;
reload: (callback: (err: any) => void) => void;
save: (callback: (err: any) => void) => void;
touch: (callback: (err: any) => void) => void;
cookie: SessionCookie;
}
export interface SessionCookie {
originalMaxAge: number;
path: string;
maxAge: number;<|fim▁hole|> expires: Date | boolean;
serialize: (name: string, value: string) => string;
}
}
declare module "express-session" {
import express = require('express');
import node = require('events');
function session(options?: session.SessionOptions): express.RequestHandler;
namespace session {
export interface SessionOptions {
secret: string;
name?: string;
store?: Store | MemoryStore;
cookie?: express.CookieOptions;
genid?: (req: express.Request) => string;
rolling?: boolean;
resave?: boolean;
proxy?: boolean;
saveUninitialized?: boolean;
unset?: string;
}
export interface BaseMemoryStore {
get: (sid: string, callback: (err: any, session: Express.Session) => void) => void;
set: (sid: string, session: Express.Session, callback: (err: any) => void) => void;
destroy: (sid: string, callback: (err: any) => void) => void;
length?: (callback: (err: any, length: number) => void) => void;
clear?: (callback: (err: any) => void) => void;
}
export abstract class Store extends node.EventEmitter {
constructor(config?: any);
regenerate (req: express.Request, fn: (err: any) => any): void;
load (sid: string, fn: (err: any, session: Express.Session) => any): void;
createSession (req: express.Request, sess: Express.Session): void;
get: (sid: string, callback: (err: any, session: Express.Session) => void) => void;
set: (sid: string, session: Express.Session, callback: (err: any) => void) => void;
destroy: (sid: string, callback: (err: any) => void) => void;
all: (callback: (err: any, obj: { [sid: string]: Express.Session; }) => void) => void;
length: (callback: (err: any, length: number) => void) => void;
clear: (callback: (err: any) => void) => void;
}
export class MemoryStore implements BaseMemoryStore {
get: (sid: string, callback: (err: any, session: Express.Session) => void) => void;
set: (sid: string, session: Express.Session, callback: (err: any) => void) => void;
destroy: (sid: string, callback: (err: any) => void) => void;
all: (callback: (err: any, obj: { [sid: string]: Express.Session; }) => void) => void;
length: (callback: (err: any, length: number) => void) => void;
clear: (callback: (err: any) => void) => void;
}
}
export = session;
}<|fim▁end|> | secure?: boolean;
httpOnly: boolean;
domain?: string; |
<|file_name|>cargo.rs<|end_file_name|><|fim▁begin|>extern crate rustc_serialize;
extern crate docopt;
use docopt::Docopt;
// Write the Docopt usage string.
const USAGE: &'static str = "
Rust's package manager
Usage:
cargo <command> [<args>...]
cargo [options]
Options:
-h, --help Display this message
-V, --version Print version info and exit
--list List installed commands
-v, --verbose Use verbose output
Some common cargo commands are:
build Compile the current project
clean Remove the target directory
doc Build this project's and its dependencies' documentation
new Create a new cargo project
run Build and execute src/main.rs
test Run the tests
bench Run the benchmarks
update Update dependencies listed in Cargo.lock
See 'cargo help <command>' for more information on a specific command.
";
#[derive(Debug, RustcDecodable)]
struct Args {
arg_command: Option<Command>,
arg_args: Vec<String>,
flag_list: bool,
flag_verbose: bool,
}
#[derive(Debug, RustcDecodable)]
enum Command {
Build, Clean, Doc, New, Run, Test, Bench, Update,
}
fn main() {
let args: Args = Docopt::new(USAGE)
.and_then(|d| d.options_first(true).decode())<|fim▁hole|><|fim▁end|> | .unwrap_or_else(|e| e.exit());
println!("{:?}", args);
} |
<|file_name|>test_dtoc.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python3
# SPDX-License-Identifier: GPL-2.0+
# Copyright (c) 2012 The Chromium OS Authors.
#
"""Tests for the dtb_platdata module
This includes unit tests for some functions and functional tests for the dtoc
tool.
"""
import collections
import os
import struct
import sys
import tempfile
import unittest
from dtoc import dtb_platdata
from dtb_platdata import conv_name_to_c
from dtb_platdata import get_compat_name
from dtb_platdata import get_value
from dtb_platdata import tab_to
from dtoc import fdt
from dtoc import fdt_util
from patman import test_util
from patman import tools
our_path = os.path.dirname(os.path.realpath(__file__))
HEADER = '''/*
* DO NOT MODIFY
*
* This file was generated by dtoc from a .dtb (device tree binary) file.
*/
#include <stdbool.h>
#include <linux/libfdt.h>'''
C_HEADER = '''/*
* DO NOT MODIFY
*
* This file was generated by dtoc from a .dtb (device tree binary) file.
*/
/* Allow use of U_BOOT_DEVICE() in this file */
#define DT_PLATDATA_C
#include <common.h>
#include <dm.h>
#include <dt-structs.h>
'''
C_EMPTY_POPULATE_PHANDLE_DATA = '''void dm_populate_phandle_data(void) {
}
'''
def get_dtb_file(dts_fname, capture_stderr=False):
"""Compile a .dts file to a .dtb
Args:
dts_fname: Filename of .dts file in the current directory
capture_stderr: True to capture and discard stderr output
Returns:
Filename of compiled file in output directory
"""
return fdt_util.EnsureCompiled(os.path.join(our_path, dts_fname),
capture_stderr=capture_stderr)
class TestDtoc(unittest.TestCase):
"""Tests for dtoc"""
@classmethod
def setUpClass(cls):
tools.PrepareOutputDir(None)
cls.maxDiff = None
@classmethod
def tearDownClass(cls):
tools._RemoveOutputDir()
def _WritePythonString(self, fname, data):
"""Write a string with tabs expanded as done in this Python file
Args:
fname: Filename to write to
data: Raw string to convert
"""
data = data.replace('\t', '\\t')
with open(fname, 'w') as fd:
fd.write(data)
def _CheckStrings(self, expected, actual):
"""Check that a string matches its expected value
If the strings do not match, they are written to the /tmp directory in
the same Python format as is used here in the test. This allows for
easy comparison and update of the tests.
Args:
expected: Expected string
actual: Actual string
"""
if expected != actual:
self._WritePythonString('/tmp/binman.expected', expected)
self._WritePythonString('/tmp/binman.actual', actual)
print('Failures written to /tmp/binman.{expected,actual}')
self.assertEquals(expected, actual)
def run_test(self, args, dtb_file, output):
dtb_platdata.run_steps(args, dtb_file, False, output, True)
def test_name(self):
"""Test conversion of device tree names to C identifiers"""
self.assertEqual('serial_at_0x12', conv_name_to_c('serial@0x12'))
self.assertEqual('vendor_clock_frequency',
conv_name_to_c('vendor,clock-frequency'))
self.assertEqual('rockchip_rk3399_sdhci_5_1',
conv_name_to_c('rockchip,rk3399-sdhci-5.1'))
def test_tab_to(self):
"""Test operation of tab_to() function"""
self.assertEqual('fred ', tab_to(0, 'fred'))
self.assertEqual('fred\t', tab_to(1, 'fred'))
self.assertEqual('fred was here ', tab_to(1, 'fred was here'))
self.assertEqual('fred was here\t\t', tab_to(3, 'fred was here'))
self.assertEqual('exactly8 ', tab_to(1, 'exactly8'))
self.assertEqual('exactly8\t', tab_to(2, 'exactly8'))
def test_get_value(self):
"""Test operation of get_value() function"""
self.assertEqual('0x45',
get_value(fdt.TYPE_INT, struct.pack('>I', 0x45)))
self.assertEqual('0x45',
get_value(fdt.TYPE_BYTE, struct.pack('<I', 0x45)))
self.assertEqual('0x0',
get_value(fdt.TYPE_BYTE, struct.pack('>I', 0x45)))
self.assertEqual('"test"', get_value(fdt.TYPE_STRING, 'test'))
self.assertEqual('true', get_value(fdt.TYPE_BOOL, None))
def test_get_compat_name(self):
"""Test operation of get_compat_name() function"""
Prop = collections.namedtuple('Prop', ['value'])
Node = collections.namedtuple('Node', ['props'])
prop = Prop(['rockchip,rk3399-sdhci-5.1', 'arasan,sdhci-5.1'])
node = Node({'compatible': prop})
self.assertEqual((['rockchip_rk3399_sdhci_5_1', 'arasan_sdhci_5_1']),
get_compat_name(node))
prop = Prop(['rockchip,rk3399-sdhci-5.1'])
node = Node({'compatible': prop})
self.assertEqual((['rockchip_rk3399_sdhci_5_1']),
get_compat_name(node))
prop = Prop(['rockchip,rk3399-sdhci-5.1', 'arasan,sdhci-5.1', 'third'])
node = Node({'compatible': prop})
self.assertEqual((['rockchip_rk3399_sdhci_5_1',
'arasan_sdhci_5_1', 'third']),
get_compat_name(node))
def test_empty_file(self):
"""Test output from a device tree file with no nodes"""
dtb_file = get_dtb_file('dtoc_test_empty.dts')
output = tools.GetOutputFilename('output')
self.run_test(['struct'], dtb_file, output)
with open(output) as infile:
lines = infile.read().splitlines()
self.assertEqual(HEADER.splitlines(), lines)
self.run_test(['platdata'], dtb_file, output)
with open(output) as infile:
lines = infile.read().splitlines()
self.assertEqual(C_HEADER.splitlines() + [''] +
C_EMPTY_POPULATE_PHANDLE_DATA.splitlines(), lines)
def test_simple(self):
"""Test output from some simple nodes with various types of data"""
dtb_file = get_dtb_file('dtoc_test_simple.dts')
output = tools.GetOutputFilename('output')
self.run_test(['struct'], dtb_file, output)
with open(output) as infile:
data = infile.read()
self._CheckStrings(HEADER + '''
struct dtd_sandbox_i2c_test {
};
struct dtd_sandbox_pmic_test {
\tbool\t\tlow_power;
\tfdt64_t\t\treg[2];
};
struct dtd_sandbox_spl_test {
\tconst char * acpi_name;
\tbool\t\tboolval;
\tunsigned char\tbytearray[3];
\tunsigned char\tbyteval;
\tfdt32_t\t\tintarray[4];
\tfdt32_t\t\tintval;
\tunsigned char\tlongbytearray[9];
\tunsigned char\tnotstring[5];
\tconst char *\tstringarray[3];
\tconst char *\tstringval;
};
struct dtd_sandbox_spl_test_2 {
};
''', data)
self.run_test(['platdata'], dtb_file, output)
with open(output) as infile:
data = infile.read()
self._CheckStrings(C_HEADER + '''
/* Node /i2c@0 index 0 */
static struct dtd_sandbox_i2c_test dtv_i2c_at_0 = {
};
U_BOOT_DEVICE(i2c_at_0) = {
\t.name\t\t= "sandbox_i2c_test",
\t.platdata\t= &dtv_i2c_at_0,
\t.platdata_size\t= sizeof(dtv_i2c_at_0),
\t.parent_idx\t= -1,
};
/* Node /i2c@0/pmic@9 index 1 */
static struct dtd_sandbox_pmic_test dtv_pmic_at_9 = {
\t.low_power\t\t= true,
\t.reg\t\t\t= {0x9, 0x0},
};
U_BOOT_DEVICE(pmic_at_9) = {
\t.name\t\t= "sandbox_pmic_test",
\t.platdata\t= &dtv_pmic_at_9,
\t.platdata_size\t= sizeof(dtv_pmic_at_9),
\t.parent_idx\t= 0,
};
/* Node /spl-test index 2 */
static struct dtd_sandbox_spl_test dtv_spl_test = {
\t.boolval\t\t= true,
\t.bytearray\t\t= {0x6, 0x0, 0x0},
\t.byteval\t\t= 0x5,
\t.intarray\t\t= {0x2, 0x3, 0x4, 0x0},
\t.intval\t\t\t= 0x1,
\t.longbytearray\t\t= {0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf, 0x10,
\t\t0x11},
\t.notstring\t\t= {0x20, 0x21, 0x22, 0x10, 0x0},
\t.stringarray\t\t= {"multi-word", "message", ""},
\t.stringval\t\t= "message",
};
U_BOOT_DEVICE(spl_test) = {
\t.name\t\t= "sandbox_spl_test",
\t.platdata\t= &dtv_spl_test,
\t.platdata_size\t= sizeof(dtv_spl_test),
\t.parent_idx\t= -1,
};
/* Node /spl-test2 index 3 */
static struct dtd_sandbox_spl_test dtv_spl_test2 = {
\t.acpi_name\t\t= "\\\\_SB.GPO0",
\t.bytearray\t\t= {0x1, 0x23, 0x34},
\t.byteval\t\t= 0x8,
\t.intarray\t\t= {0x5, 0x0, 0x0, 0x0},
\t.intval\t\t\t= 0x3,
\t.longbytearray\t\t= {0x9, 0xa, 0xb, 0xc, 0x0, 0x0, 0x0, 0x0,
\t\t0x0},
\t.stringarray\t\t= {"another", "multi-word", "message"},
\t.stringval\t\t= "message2",
};
U_BOOT_DEVICE(spl_test2) = {
\t.name\t\t= "sandbox_spl_test",
\t.platdata\t= &dtv_spl_test2,
\t.platdata_size\t= sizeof(dtv_spl_test2),
\t.parent_idx\t= -1,
};
/* Node /spl-test3 index 4 */
static struct dtd_sandbox_spl_test dtv_spl_test3 = {
\t.longbytearray\t\t= {0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf, 0x10,
\t\t0x0},
\t.stringarray\t\t= {"one", "", ""},
};
U_BOOT_DEVICE(spl_test3) = {
\t.name\t\t= "sandbox_spl_test",
\t.platdata\t= &dtv_spl_test3,
\t.platdata_size\t= sizeof(dtv_spl_test3),
\t.parent_idx\t= -1,
};
/* Node /spl-test4 index 5 */
static struct dtd_sandbox_spl_test_2 dtv_spl_test4 = {
};
U_BOOT_DEVICE(spl_test4) = {
\t.name\t\t= "sandbox_spl_test_2",
\t.platdata\t= &dtv_spl_test4,
\t.platdata_size\t= sizeof(dtv_spl_test4),
\t.parent_idx\t= -1,
};
''' + C_EMPTY_POPULATE_PHANDLE_DATA, data)
def test_driver_alias(self):
"""Test output from a device tree file with a driver alias"""
dtb_file = get_dtb_file('dtoc_test_driver_alias.dts')
output = tools.GetOutputFilename('output')
self.run_test(['struct'], dtb_file, output)
with open(output) as infile:
data = infile.read()
self._CheckStrings(HEADER + '''
struct dtd_sandbox_gpio {
\tconst char *\tgpio_bank_name;
\tbool\t\tgpio_controller;
\tfdt32_t\t\tsandbox_gpio_count;
};
''', data)
self.run_test(['platdata'], dtb_file, output)
with open(output) as infile:
data = infile.read()
self._CheckStrings(C_HEADER + '''
/* Node /gpios@0 index 0 */
static struct dtd_sandbox_gpio dtv_gpios_at_0 = {
\t.gpio_bank_name\t\t= "a",
\t.gpio_controller\t= true,
\t.sandbox_gpio_count\t= 0x14,
};
U_BOOT_DEVICE(gpios_at_0) = {
\t.name\t\t= "sandbox_gpio",<|fim▁hole|>\t.platdata_size\t= sizeof(dtv_gpios_at_0),
\t.parent_idx\t= -1,
};
void dm_populate_phandle_data(void) {
}
''', data)
def test_invalid_driver(self):
"""Test output from a device tree file with an invalid driver"""
dtb_file = get_dtb_file('dtoc_test_invalid_driver.dts')
output = tools.GetOutputFilename('output')
with test_util.capture_sys_output() as (stdout, stderr):
dtb_platdata.run_steps(['struct'], dtb_file, False, output)
with open(output) as infile:
data = infile.read()
self._CheckStrings(HEADER + '''
struct dtd_invalid {
};
''', data)
with test_util.capture_sys_output() as (stdout, stderr):
dtb_platdata.run_steps(['platdata'], dtb_file, False, output)
with open(output) as infile:
data = infile.read()
self._CheckStrings(C_HEADER + '''
/* Node /spl-test index 0 */
static struct dtd_invalid dtv_spl_test = {
};
U_BOOT_DEVICE(spl_test) = {
\t.name\t\t= "invalid",
\t.platdata\t= &dtv_spl_test,
\t.platdata_size\t= sizeof(dtv_spl_test),
\t.parent_idx\t= -1,
};
void dm_populate_phandle_data(void) {
}
''', data)
def test_phandle(self):
"""Test output from a node containing a phandle reference"""
dtb_file = get_dtb_file('dtoc_test_phandle.dts')
output = tools.GetOutputFilename('output')
self.run_test(['struct'], dtb_file, output)
with open(output) as infile:
data = infile.read()
self._CheckStrings(HEADER + '''
struct dtd_source {
\tstruct phandle_2_arg clocks[4];
};
struct dtd_target {
\tfdt32_t\t\tintval;
};
''', data)
self.run_test(['platdata'], dtb_file, output)
with open(output) as infile:
data = infile.read()
self._CheckStrings(C_HEADER + '''
/* Node /phandle2-target index 0 */
static struct dtd_target dtv_phandle2_target = {
\t.intval\t\t\t= 0x1,
};
U_BOOT_DEVICE(phandle2_target) = {
\t.name\t\t= "target",
\t.platdata\t= &dtv_phandle2_target,
\t.platdata_size\t= sizeof(dtv_phandle2_target),
\t.parent_idx\t= -1,
};
/* Node /phandle3-target index 1 */
static struct dtd_target dtv_phandle3_target = {
\t.intval\t\t\t= 0x2,
};
U_BOOT_DEVICE(phandle3_target) = {
\t.name\t\t= "target",
\t.platdata\t= &dtv_phandle3_target,
\t.platdata_size\t= sizeof(dtv_phandle3_target),
\t.parent_idx\t= -1,
};
/* Node /phandle-target index 4 */
static struct dtd_target dtv_phandle_target = {
\t.intval\t\t\t= 0x0,
};
U_BOOT_DEVICE(phandle_target) = {
\t.name\t\t= "target",
\t.platdata\t= &dtv_phandle_target,
\t.platdata_size\t= sizeof(dtv_phandle_target),
\t.parent_idx\t= -1,
};
/* Node /phandle-source index 2 */
static struct dtd_source dtv_phandle_source = {
\t.clocks\t\t\t= {
\t\t\t{4, {}},
\t\t\t{0, {11}},
\t\t\t{1, {12, 13}},
\t\t\t{4, {}},},
};
U_BOOT_DEVICE(phandle_source) = {
\t.name\t\t= "source",
\t.platdata\t= &dtv_phandle_source,
\t.platdata_size\t= sizeof(dtv_phandle_source),
\t.parent_idx\t= -1,
};
/* Node /phandle-source2 index 3 */
static struct dtd_source dtv_phandle_source2 = {
\t.clocks\t\t\t= {
\t\t\t{4, {}},},
};
U_BOOT_DEVICE(phandle_source2) = {
\t.name\t\t= "source",
\t.platdata\t= &dtv_phandle_source2,
\t.platdata_size\t= sizeof(dtv_phandle_source2),
\t.parent_idx\t= -1,
};
void dm_populate_phandle_data(void) {
}
''', data)
def test_phandle_single(self):
"""Test output from a node containing a phandle reference"""
dtb_file = get_dtb_file('dtoc_test_phandle_single.dts')
output = tools.GetOutputFilename('output')
self.run_test(['struct'], dtb_file, output)
with open(output) as infile:
data = infile.read()
self._CheckStrings(HEADER + '''
struct dtd_source {
\tstruct phandle_0_arg clocks[1];
};
struct dtd_target {
\tfdt32_t\t\tintval;
};
''', data)
def test_phandle_reorder(self):
"""Test that phandle targets are generated before their references"""
dtb_file = get_dtb_file('dtoc_test_phandle_reorder.dts')
output = tools.GetOutputFilename('output')
self.run_test(['platdata'], dtb_file, output)
with open(output) as infile:
data = infile.read()
self._CheckStrings(C_HEADER + '''
/* Node /phandle-target index 1 */
static struct dtd_target dtv_phandle_target = {
};
U_BOOT_DEVICE(phandle_target) = {
\t.name\t\t= "target",
\t.platdata\t= &dtv_phandle_target,
\t.platdata_size\t= sizeof(dtv_phandle_target),
\t.parent_idx\t= -1,
};
/* Node /phandle-source2 index 0 */
static struct dtd_source dtv_phandle_source2 = {
\t.clocks\t\t\t= {
\t\t\t{1, {}},},
};
U_BOOT_DEVICE(phandle_source2) = {
\t.name\t\t= "source",
\t.platdata\t= &dtv_phandle_source2,
\t.platdata_size\t= sizeof(dtv_phandle_source2),
\t.parent_idx\t= -1,
};
void dm_populate_phandle_data(void) {
}
''', data)
def test_phandle_cd_gpio(self):
"""Test that phandle targets are generated when unsing cd-gpios"""
dtb_file = get_dtb_file('dtoc_test_phandle_cd_gpios.dts')
output = tools.GetOutputFilename('output')
dtb_platdata.run_steps(['platdata'], dtb_file, False, output, True)
with open(output) as infile:
data = infile.read()
self._CheckStrings(C_HEADER + '''
/* Node /phandle2-target index 0 */
static struct dtd_target dtv_phandle2_target = {
\t.intval\t\t\t= 0x1,
};
U_BOOT_DEVICE(phandle2_target) = {
\t.name\t\t= "target",
\t.platdata\t= &dtv_phandle2_target,
\t.platdata_size\t= sizeof(dtv_phandle2_target),
\t.parent_idx\t= -1,
};
/* Node /phandle3-target index 1 */
static struct dtd_target dtv_phandle3_target = {
\t.intval\t\t\t= 0x2,
};
U_BOOT_DEVICE(phandle3_target) = {
\t.name\t\t= "target",
\t.platdata\t= &dtv_phandle3_target,
\t.platdata_size\t= sizeof(dtv_phandle3_target),
\t.parent_idx\t= -1,
};
/* Node /phandle-target index 4 */
static struct dtd_target dtv_phandle_target = {
\t.intval\t\t\t= 0x0,
};
U_BOOT_DEVICE(phandle_target) = {
\t.name\t\t= "target",
\t.platdata\t= &dtv_phandle_target,
\t.platdata_size\t= sizeof(dtv_phandle_target),
\t.parent_idx\t= -1,
};
/* Node /phandle-source index 2 */
static struct dtd_source dtv_phandle_source = {
\t.cd_gpios\t\t= {
\t\t\t{4, {}},
\t\t\t{0, {11}},
\t\t\t{1, {12, 13}},
\t\t\t{4, {}},},
};
U_BOOT_DEVICE(phandle_source) = {
\t.name\t\t= "source",
\t.platdata\t= &dtv_phandle_source,
\t.platdata_size\t= sizeof(dtv_phandle_source),
\t.parent_idx\t= -1,
};
/* Node /phandle-source2 index 3 */
static struct dtd_source dtv_phandle_source2 = {
\t.cd_gpios\t\t= {
\t\t\t{4, {}},},
};
U_BOOT_DEVICE(phandle_source2) = {
\t.name\t\t= "source",
\t.platdata\t= &dtv_phandle_source2,
\t.platdata_size\t= sizeof(dtv_phandle_source2),
\t.parent_idx\t= -1,
};
void dm_populate_phandle_data(void) {
}
''', data)
def test_phandle_bad(self):
"""Test a node containing an invalid phandle fails"""
dtb_file = get_dtb_file('dtoc_test_phandle_bad.dts',
capture_stderr=True)
output = tools.GetOutputFilename('output')
with self.assertRaises(ValueError) as e:
self.run_test(['struct'], dtb_file, output)
self.assertIn("Cannot parse 'clocks' in node 'phandle-source'",
str(e.exception))
def test_phandle_bad2(self):
"""Test a phandle target missing its #*-cells property"""
dtb_file = get_dtb_file('dtoc_test_phandle_bad2.dts',
capture_stderr=True)
output = tools.GetOutputFilename('output')
with self.assertRaises(ValueError) as e:
self.run_test(['struct'], dtb_file, output)
self.assertIn("Node 'phandle-target' has no cells property",
str(e.exception))
def test_addresses64(self):
"""Test output from a node with a 'reg' property with na=2, ns=2"""
dtb_file = get_dtb_file('dtoc_test_addr64.dts')
output = tools.GetOutputFilename('output')
self.run_test(['struct'], dtb_file, output)
with open(output) as infile:
data = infile.read()
self._CheckStrings(HEADER + '''
struct dtd_test1 {
\tfdt64_t\t\treg[2];
};
struct dtd_test2 {
\tfdt64_t\t\treg[2];
};
struct dtd_test3 {
\tfdt64_t\t\treg[4];
};
''', data)
self.run_test(['platdata'], dtb_file, output)
with open(output) as infile:
data = infile.read()
self._CheckStrings(C_HEADER + '''
/* Node /test1 index 0 */
static struct dtd_test1 dtv_test1 = {
\t.reg\t\t\t= {0x1234, 0x5678},
};
U_BOOT_DEVICE(test1) = {
\t.name\t\t= "test1",
\t.platdata\t= &dtv_test1,
\t.platdata_size\t= sizeof(dtv_test1),
\t.parent_idx\t= -1,
};
/* Node /test2 index 1 */
static struct dtd_test2 dtv_test2 = {
\t.reg\t\t\t= {0x1234567890123456, 0x9876543210987654},
};
U_BOOT_DEVICE(test2) = {
\t.name\t\t= "test2",
\t.platdata\t= &dtv_test2,
\t.platdata_size\t= sizeof(dtv_test2),
\t.parent_idx\t= -1,
};
/* Node /test3 index 2 */
static struct dtd_test3 dtv_test3 = {
\t.reg\t\t\t= {0x1234567890123456, 0x9876543210987654, 0x2, 0x3},
};
U_BOOT_DEVICE(test3) = {
\t.name\t\t= "test3",
\t.platdata\t= &dtv_test3,
\t.platdata_size\t= sizeof(dtv_test3),
\t.parent_idx\t= -1,
};
''' + C_EMPTY_POPULATE_PHANDLE_DATA, data)
def test_addresses32(self):
"""Test output from a node with a 'reg' property with na=1, ns=1"""
dtb_file = get_dtb_file('dtoc_test_addr32.dts')
output = tools.GetOutputFilename('output')
self.run_test(['struct'], dtb_file, output)
with open(output) as infile:
data = infile.read()
self._CheckStrings(HEADER + '''
struct dtd_test1 {
\tfdt32_t\t\treg[2];
};
struct dtd_test2 {
\tfdt32_t\t\treg[4];
};
''', data)
self.run_test(['platdata'], dtb_file, output)
with open(output) as infile:
data = infile.read()
self._CheckStrings(C_HEADER + '''
/* Node /test1 index 0 */
static struct dtd_test1 dtv_test1 = {
\t.reg\t\t\t= {0x1234, 0x5678},
};
U_BOOT_DEVICE(test1) = {
\t.name\t\t= "test1",
\t.platdata\t= &dtv_test1,
\t.platdata_size\t= sizeof(dtv_test1),
\t.parent_idx\t= -1,
};
/* Node /test2 index 1 */
static struct dtd_test2 dtv_test2 = {
\t.reg\t\t\t= {0x12345678, 0x98765432, 0x2, 0x3},
};
U_BOOT_DEVICE(test2) = {
\t.name\t\t= "test2",
\t.platdata\t= &dtv_test2,
\t.platdata_size\t= sizeof(dtv_test2),
\t.parent_idx\t= -1,
};
''' + C_EMPTY_POPULATE_PHANDLE_DATA, data)
def test_addresses64_32(self):
"""Test output from a node with a 'reg' property with na=2, ns=1"""
dtb_file = get_dtb_file('dtoc_test_addr64_32.dts')
output = tools.GetOutputFilename('output')
self.run_test(['struct'], dtb_file, output)
with open(output) as infile:
data = infile.read()
self._CheckStrings(HEADER + '''
struct dtd_test1 {
\tfdt64_t\t\treg[2];
};
struct dtd_test2 {
\tfdt64_t\t\treg[2];
};
struct dtd_test3 {
\tfdt64_t\t\treg[4];
};
''', data)
self.run_test(['platdata'], dtb_file, output)
with open(output) as infile:
data = infile.read()
self._CheckStrings(C_HEADER + '''
/* Node /test1 index 0 */
static struct dtd_test1 dtv_test1 = {
\t.reg\t\t\t= {0x123400000000, 0x5678},
};
U_BOOT_DEVICE(test1) = {
\t.name\t\t= "test1",
\t.platdata\t= &dtv_test1,
\t.platdata_size\t= sizeof(dtv_test1),
\t.parent_idx\t= -1,
};
/* Node /test2 index 1 */
static struct dtd_test2 dtv_test2 = {
\t.reg\t\t\t= {0x1234567890123456, 0x98765432},
};
U_BOOT_DEVICE(test2) = {
\t.name\t\t= "test2",
\t.platdata\t= &dtv_test2,
\t.platdata_size\t= sizeof(dtv_test2),
\t.parent_idx\t= -1,
};
/* Node /test3 index 2 */
static struct dtd_test3 dtv_test3 = {
\t.reg\t\t\t= {0x1234567890123456, 0x98765432, 0x2, 0x3},
};
U_BOOT_DEVICE(test3) = {
\t.name\t\t= "test3",
\t.platdata\t= &dtv_test3,
\t.platdata_size\t= sizeof(dtv_test3),
\t.parent_idx\t= -1,
};
''' + C_EMPTY_POPULATE_PHANDLE_DATA, data)
def test_addresses32_64(self):
"""Test output from a node with a 'reg' property with na=1, ns=2"""
dtb_file = get_dtb_file('dtoc_test_addr32_64.dts')
output = tools.GetOutputFilename('output')
self.run_test(['struct'], dtb_file, output)
with open(output) as infile:
data = infile.read()
self._CheckStrings(HEADER + '''
struct dtd_test1 {
\tfdt64_t\t\treg[2];
};
struct dtd_test2 {
\tfdt64_t\t\treg[2];
};
struct dtd_test3 {
\tfdt64_t\t\treg[4];
};
''', data)
self.run_test(['platdata'], dtb_file, output)
with open(output) as infile:
data = infile.read()
self._CheckStrings(C_HEADER + '''
/* Node /test1 index 0 */
static struct dtd_test1 dtv_test1 = {
\t.reg\t\t\t= {0x1234, 0x567800000000},
};
U_BOOT_DEVICE(test1) = {
\t.name\t\t= "test1",
\t.platdata\t= &dtv_test1,
\t.platdata_size\t= sizeof(dtv_test1),
\t.parent_idx\t= -1,
};
/* Node /test2 index 1 */
static struct dtd_test2 dtv_test2 = {
\t.reg\t\t\t= {0x12345678, 0x9876543210987654},
};
U_BOOT_DEVICE(test2) = {
\t.name\t\t= "test2",
\t.platdata\t= &dtv_test2,
\t.platdata_size\t= sizeof(dtv_test2),
\t.parent_idx\t= -1,
};
/* Node /test3 index 2 */
static struct dtd_test3 dtv_test3 = {
\t.reg\t\t\t= {0x12345678, 0x9876543210987654, 0x2, 0x3},
};
U_BOOT_DEVICE(test3) = {
\t.name\t\t= "test3",
\t.platdata\t= &dtv_test3,
\t.platdata_size\t= sizeof(dtv_test3),
\t.parent_idx\t= -1,
};
''' + C_EMPTY_POPULATE_PHANDLE_DATA, data)
def test_bad_reg(self):
"""Test that a reg property with an invalid type generates an error"""
# Capture stderr since dtc will emit warnings for this file
dtb_file = get_dtb_file('dtoc_test_bad_reg.dts', capture_stderr=True)
output = tools.GetOutputFilename('output')
with self.assertRaises(ValueError) as e:
self.run_test(['struct'], dtb_file, output)
self.assertIn("Node 'spl-test' reg property is not an int",
str(e.exception))
def test_bad_reg2(self):
"""Test that a reg property with an invalid cell count is detected"""
# Capture stderr since dtc will emit warnings for this file
dtb_file = get_dtb_file('dtoc_test_bad_reg2.dts', capture_stderr=True)
output = tools.GetOutputFilename('output')
with self.assertRaises(ValueError) as e:
self.run_test(['struct'], dtb_file, output)
self.assertIn("Node 'spl-test' reg property has 3 cells which is not a multiple of na + ns = 1 + 1)",
str(e.exception))
def test_add_prop(self):
"""Test that a subequent node can add a new property to a struct"""
dtb_file = get_dtb_file('dtoc_test_add_prop.dts')
output = tools.GetOutputFilename('output')
self.run_test(['struct'], dtb_file, output)
with open(output) as infile:
data = infile.read()
self._CheckStrings(HEADER + '''
struct dtd_sandbox_spl_test {
\tfdt32_t\t\tintarray;
\tfdt32_t\t\tintval;
};
''', data)
self.run_test(['platdata'], dtb_file, output)
with open(output) as infile:
data = infile.read()
self._CheckStrings(C_HEADER + '''
/* Node /spl-test index 0 */
static struct dtd_sandbox_spl_test dtv_spl_test = {
\t.intval\t\t\t= 0x1,
};
U_BOOT_DEVICE(spl_test) = {
\t.name\t\t= "sandbox_spl_test",
\t.platdata\t= &dtv_spl_test,
\t.platdata_size\t= sizeof(dtv_spl_test),
\t.parent_idx\t= -1,
};
/* Node /spl-test2 index 1 */
static struct dtd_sandbox_spl_test dtv_spl_test2 = {
\t.intarray\t\t= 0x5,
};
U_BOOT_DEVICE(spl_test2) = {
\t.name\t\t= "sandbox_spl_test",
\t.platdata\t= &dtv_spl_test2,
\t.platdata_size\t= sizeof(dtv_spl_test2),
\t.parent_idx\t= -1,
};
''' + C_EMPTY_POPULATE_PHANDLE_DATA, data)
def testStdout(self):
"""Test output to stdout"""
dtb_file = get_dtb_file('dtoc_test_simple.dts')
with test_util.capture_sys_output() as (stdout, stderr):
self.run_test(['struct'], dtb_file, '-')
def testNoCommand(self):
"""Test running dtoc without a command"""
with self.assertRaises(ValueError) as e:
self.run_test([], '', '')
self.assertIn("Please specify a command: struct, platdata",
str(e.exception))
def testBadCommand(self):
"""Test running dtoc with an invalid command"""
dtb_file = get_dtb_file('dtoc_test_simple.dts')
output = tools.GetOutputFilename('output')
with self.assertRaises(ValueError) as e:
self.run_test(['invalid-cmd'], dtb_file, output)
self.assertIn("Unknown command 'invalid-cmd': (use: struct, platdata)",
str(e.exception))
def testScanDrivers(self):
"""Test running dtoc with additional drivers to scan"""
dtb_file = get_dtb_file('dtoc_test_simple.dts')
output = tools.GetOutputFilename('output')
with test_util.capture_sys_output() as (stdout, stderr):
dtb_platdata.run_steps(['struct'], dtb_file, False, output, True,
[None, '', 'tools/dtoc/dtoc_test_scan_drivers.cxx'])
def testUnicodeError(self):
"""Test running dtoc with an invalid unicode file
To be able to perform this test without adding a weird text file which
would produce issues when using checkpatch.pl or patman, generate the
file at runtime and then process it.
"""
dtb_file = get_dtb_file('dtoc_test_simple.dts')
output = tools.GetOutputFilename('output')
driver_fn = '/tmp/' + next(tempfile._get_candidate_names())
with open(driver_fn, 'wb+') as df:
df.write(b'\x81')
with test_util.capture_sys_output() as (stdout, stderr):
dtb_platdata.run_steps(['struct'], dtb_file, False, output, True,
[driver_fn])<|fim▁end|> | \t.platdata\t= &dtv_gpios_at_0, |
<|file_name|>horizontalintervals.py<|end_file_name|><|fim▁begin|>#! /usr/bin/env python
<|fim▁hole|>canvas = postscript('horizontalintervals.eps')
d = drawable(canvas, coord=[50,30], xrange=[0,900],
yrange=[0,t.getmax('nodes')])
axis(d, xtitle='Throughput (MB)', xauto=[0,900,300],
ytitle='Nodes', yauto=[0,t.getmax('nodes'),1])
# ylofield and yhifield specify the interval range
p = plotter()
p.horizontalintervals(d, t, yfield='nodes', xlofield='min', xhifield='max')
canvas.render()<|fim▁end|> | from zplot import *
t = table('horizontalintervals.data') |
<|file_name|>debug.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
## Copyright 2015 Rasmus Scholer Sorensen, [email protected]
##
## This file is part of Nascent.
##
## Nascent is free software: you can redistribute it and/or modify
## it under the terms of the GNU Affero General Public License as
## published by the Free Software Foundation, either version 3 of the
## License, or (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU Affero General Public License for more details.
##
## You should have received a copy of the GNU Affero General Public License
## along with this program. If not, see <http://www.gnu.org/licenses/>.
#pylint: disable=C0103,C0111,W0613
from __future__ import absolute_import, print_function, division
from pprint import pprint
do_print = False
def print_debug(*args, **kwargs):
""" Change module-level do_print variable to toggle behaviour. """
if 'origin' in kwargs:
del kwargs['origin']
if do_print:
print(*args, **kwargs)
def pprint_debug(*args, **kwargs):
if do_print:
pprint(*args, **kwargs)
def info_print(*args, **kwargs):
""" Will print the file and line before printing. Can be used to find spurrious print statements. """
from inspect import currentframe, getframeinfo
frameinfo = getframeinfo(currentframe().f_back)
print(frameinfo.filename, frameinfo.lineno)
pprint(*args, **kwargs)
def info_pprint(*args, **kwargs):
""" Will print the file and line before printing the variable. """
from inspect import currentframe, getframeinfo<|fim▁hole|> print(frameinfo.filename, frameinfo.lineno)
pprint(*args, **kwargs)
pprintd = pprint_debug
printd = print_debug<|fim▁end|> | frameinfo = getframeinfo(currentframe().f_back) |
<|file_name|>mod.rs<|end_file_name|><|fim▁begin|>/*<|fim▁hole|> * Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
use extract_graphql::parse_chunks;
use fixture_tests::Fixture;
pub fn transform_fixture(fixture: &Fixture<'_>) -> Result<String, String> {
Ok(format!("{:#?}", parse_chunks(fixture.content)))
}<|fim▁end|> | |
<|file_name|>index.story.tsx<|end_file_name|><|fim▁begin|>import * as React from 'react';
import { Ratings, Mode, Size, Layout } from '..';
import * as examples from './examples';
import {
header,
api,
divider,
importExample,
playground,
tab,
code as baseCode,
tabs,
testkit,
title,
} from 'wix-storybook-utils/Sections';
import { allComponents } from '../../../../stories/utils/allComponents';
import { settingsPanel } from '../../../../stories/utils/SettingsPanel';
import { settingsApi } from '../../../../stories/utils/SettingsApi';
import * as ExtendedRawSource from '!raw-loader!./RatingsExtendedExample.tsx';
import * as ExtendedCSSRawSource from '!raw-loader!./RatingsExtendedExample.st.css';
import { RatingsExtendedExample } from './RatingsExtendedExample';
import { StoryCategory } from '../../../../stories/storyHierarchy';
const code = (config) =>
baseCode({ components: allComponents, compact: true, ...config });
export default {
category: StoryCategory.COMPONENTS,
storyName: 'Ratings',
component: Ratings,
componentPath: '../Ratings.tsx',
componentProps: () => ({
'data-hook': 'storybook-Ratings',
}),
exampleProps: {
mode: Object.values(Mode),
size: Object.values(Size),
layout: Object.values(Layout),
inputOptions: [
{ value: [], label: 'no hovers' },
{<|fim▁hole|> },
dataHook: 'storybook-Ratings',
sections: [
header(),
tabs([
tab({
title: 'Usage',
sections: [
importExample({
source: examples.importExample,
}),
divider(),
title('Examples'),
...[{ title: 'mode="input"', source: examples.defult }].map(code),
...[
{
title: 'mode="input" with inputOptions',
source: examples.inputWithValue,
},
].map(code),
...[{ title: 'mode="display"', source: examples.defaultDisplay }].map(
code,
),
...[
{
title: 'mode="display" with labels',
source: examples.displayWithLables,
},
].map(code),
],
}),
...[
{ title: 'API', sections: [api()] },
{ title: 'Style API', sections: [settingsApi()] },
{ title: 'TestKit', sections: [testkit()] },
{ title: 'Playground', sections: [playground()] },
{
title: 'Settings Panel',
sections: [
settingsPanel({
title: 'Settings Panel',
example: <RatingsExtendedExample />,
rawSource: ExtendedRawSource,
rawCSSSource: ExtendedCSSRawSource,
params: {
colors: [
{
label: 'Icon Color',
wixParam: 'iconColor',
defaultColor: 'color-8',
},
{
label: 'Icon Empty Color',
wixParam: 'iconEmptyColor',
defaultColor: 'color-3',
},
{
label: 'Text Color',
wixParam: 'textColor',
defaultColor: 'color-5',
},
],
},
}),
],
},
].map(tab),
]),
],
};<|fim▁end|> | value: ['Very Baasa', 'Baasa', 'OK', 'Achla', 'Very Achla'],
label: 'with hovers',
},
], |
<|file_name|>polar.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
# -*- coding: utf-8 -*-
from numpy import array, zeros, linspace, meshgrid, ndarray, diag
from numpy import uint8, float64, int8, int0, float128, complex128
from numpy import exp, sqrt, cos, tan, arctan
from numpy import minimum, maximum
from numpy import ceil, floor
from numpy import matrix as npmatrix
from numpy.fft import fft, ifft
from numpy import pi
from scipy.linalg import solve_triangular as solve
from scipy.signal import fftconvolve as conv
from scipy.ndimage import geometric_transform as transform
# We will make use of *reentrant* locks.
from threading import RLock as Lock
from threading import Condition, Thread
# This module is a modification on python's queue module,
# which allows one to interrupt a queue.
import iqueue
# This is a module written to execute code in parallel.
# While python is limited by the Global Interpreter Lock,
# numerical operations on NumPy arrays are generally not
# limited by the GIL.
import parallel
# This module allows the conversion of SAGE symbolic expressions
# to RPN code through the symbolic_to_rpn. RPNProgram is a subclass
# of list that comes equipped with a __call__ method that implements
# execution of the RPN code.
import rpncalc
def _E(m):
return int0(npmatrix(diag((1,) * int(m + 1), k=0)[:, :-1]))
def _X(m):
return int0(npmatrix(diag((1,) * int(m), k=-1)[:, :-1]))
def _Del(m):
return int0(npmatrix(diag(xrange(1, int(m)), k=1)[:-1]))
class _CD_RPN:
def __init__(self):
self.coeffs = [(npmatrix((-1,)), npmatrix((-1,)))]
self.rpn = [(rpncalc.RPNProgram([-1]), rpncalc.RPNProgram([-1]))]
# In case this class is utilized by multiple threads.
self.lock = Lock()
def getcoeffs(self, m):
# Returns coefficients for $c_{m}$ and $d_{m}$.
# If they already exist in cache, just return what is there.
with self.lock:
if len(self.coeffs) <= m:
# Need to generate coefficients for $c_{m}$ and $d_{m}$.
# Fetch the coefficients for $c_{m-1}$ and $d_{m-1}$.
C, D = self.getcoeffs(m - 1)
if m % 2: # $m$ is odd
C_new = _E(m) * D * _X((m + 1) / 2).transpose() \
- ((1 + m) * _E(m) + 3 * _X(m)
+ 2 * (_E(m) + _X(m)) * _X(m - 1) * _Del(m)) * C \
* _E((m + 1) / 2).transpose()
D_new = _X(m) * C - (m * _E(m) + 2 * _X(m)
+ 2 * (_E(m) + _X(m)) * _X(m - 1) * _Del(m)) * D
else: # $m$ is even
C_new = _E(m) * D * _X(m / 2).transpose() \
- ((1 + m) * _E(m) + 3 * _X(m)
+ 2 * (_E(m) + _X(m)) * _X(m - 1) * _Del(m)) * C
D_new = _X(m) * C - (m * _E(m) + 2 * _X(m)
+ 2 * (_E(m) + _X(m)) * _X(m - 1) * _Del(m)) * D \
* _E(m / 2).transpose()
self.coeffs.append((C_new, D_new))
return self.coeffs[m]
def __getitem__(self, m):<|fim▁hole|> # Returns RPN code for $c_j$ and $d_j$. Generate on the fly if needed.
with self.lock:
while len(self.rpn) <= m:
cm_rpn = []
dm_rpn = []
C, D = self.getcoeffs(len(self.rpn))
# Generate RPN code for $c_j$ and $d_j$.
for row in array(C[::-1]):
npoly_rpn = []
for coeff in row[::-1]:
if coeff:
if len(npoly_rpn):
npoly_rpn.extend([n2, mul])
npoly_rpn.extend([coeff, add])
else:
npoly_rpn.append(coeff)
elif len(npoly_rpn):
npoly_rpn.extend([n2, mul])
if len(cm_rpn):
cm_rpn.extend([v2, mul])
cm_rpn.extend(npoly_rpn)
cm_rpn.append(add)
else:
cm_rpn.extend(npoly_rpn)
for row in array(D[::-1]):
npoly_rpn = []
for coeff in row[::-1]:
if coeff:
if len(npoly_rpn):
npoly_rpn.extend([n2, mul])
npoly_rpn.extend([coeff, add])
else:
npoly_rpn.append(coeff)
elif len(npoly_rpn):
npoly_rpn.extend([n2, mul])
if len(dm_rpn):
dm_rpn.extend([v2, mul])
dm_rpn.extend(npoly_rpn)
dm_rpn.append(add)
else:
dm_rpn.extend(npoly_rpn)
self.rpn.append(
(rpncalc.RPNProgram(cm_rpn), rpncalc.RPNProgram(dm_rpn)))
return self.rpn[m]
class Sderiv:
def __init__(self, alpha):
self.alpha = alpha
def __call__(self, A, ds):
H, W = A.shape
psi = rpncalc.decode(u"« x 3 ^ 4 / +/- 3 x * 4 / + »")
N = ceil(self.alpha / ds)
X = linspace(-N * ds - ds, N * ds + ds, 2 * N + 3)
Psi = psi(x=X / self.alpha)
Psi[X > self.alpha] = psi(x=1)
Psi[X < -self.alpha] = psi(x=-1)
stencil = (Psi[:-2] + Psi[2:] - 2 * Psi[1:-1]) / ds
diff = conv([stencil], A)
return N, N, diff[:, 2 * N:-2 * N]
class PolarBrokenRayInversion(parallel.BaseTaskClass):
_cd = _CD_RPN()
_u = rpncalc.decode(u"« q phi sin ⋅ arcsin »")
_v = rpncalc.decode(u"« q phi sin ⋅ +/- q 2 ^ phi sin 2 ^ ⋅ +/- 1 + √ ÷ »")
_w = rpncalc.decode(u"« i phi u - ⋅ exp »")
_tm = rpncalc.decode(u"« i dm ⋅ n ⋅ cm v ⋅ + dlnr m ^ ⋅ m 2 + ! ÷ »")
_cf = rpncalc.decode(u"« dr r ⋅ v 2 ^ ⋅ phi csc ⋅ s 2 ^ ÷ »")
_invlock = Lock()
def __init__(self, Qf, Phi, smin, smax, alpha, nmax=200):
# Parameters:
# $\mathbf{Qf}$ -- $\mathcal{Q}f$, sampled on an $r\theta$ grid.
# $\mathbf{Phi}$ ($\phi$) -- Scattering angle
# $\mathbf{rmin}$ -- $r_{\min}$, defaults to $1$.
# $\mathbf{rmax}$ -- $r_{\max}$, defaults to $6$.
# $\mathbf{D}$ -- Numerical implemenation of $\frac{\partial}{\partial r}$.
# $\mathbf{nmax}$ -- $n_{\max}$, reconstructs $\tilde{f}\left(r,n\right)$
# for $\left|n\right| \le n_{\max}$. Defaults to $200$.
# This reconstruction will assume that $\mathcal{Q}f$ is real and exploit
# conjugate symmetry in the Fourier series.
# Initialize variables.
self.Qf = Qf
self.Phi = Phi
self.smin = smin
self.smax = smax
H, W = Qf.shape
self.thetamin = thetamin = -pi
self.thetamax = thetamax = pi*(1-2.0/H)
self.nmax = nmax
self.F = None
self.F_cartesian = None
self.lock = Lock()
self.status = Condition(self.lock)
self.jobsdone = 0
self.jobcount = nmax + 1
self.running = False
self.projectioncount = 0
self.projecting = False
self.dr = dr = ds = (smax - smin) / float(W - 1)
self.dtheta = dtheta = (thetamax - thetamin) / float(H)
# Compute $\widetilde{\mathcal{Q}f}$.
self.FQf = FQf = fft(Qf, axis=0)
# Perform differentiation of $\widetilde{\mathcal{Q}f}$.
D = Sderiv(alpha)
try:
clip_left, clip_right, self.DFQf = D(FQf, ds)
except:
clip_left, clip_right, self.DFQf = D(float64(FQf), ds)
# Initialize array that will store $\tilde{f}$.
self.Ff = zeros(self.DFQf.shape, dtype=complex128)
# Initialize $rs$ grid.
self.rmin = self.smin + clip_left * ds
self.rmax = self.smax - clip_right * ds
R = linspace(self.rmin, self.rmax, W - clip_left - clip_right)
self.R, self.S = meshgrid(R, R)
# Compute $q$, $u$, $v$, $w$, and $v^{2}r*\csc(\phi)*{\Delta}r/s^2$.
self.Q = self.S / self.R
args = dict(q=self.Q, r=self.R, s=self.S, phi=self.Phi, dr=dr)
args["u"] = self.U = self._u(**args)
args["v"] = self.V = self._v(**args)
self.W = self._w(**args)
self.Factor = self._cf(**args)
def A(self, n, eps=0.0000001, p=16):
# Compute matrix $\mathbf{A}_n$.
H, W = self.DFQf.shape
# Initialize the An matrix (as an array for now).
An = zeros(self.R.shape, dtype=complex128)
# First compute a partial sum for the upper triangular part.
# Start with $m=0$
mask = self.S < self.R
Sum = zeros(self.R.shape, dtype=complex128)
for m in xrange(0, p + 1, 2):
cm_rpn, dm_rpn = self._cd[m]
Term = self._tm(v=self.V[mask], v2=self.V[mask] ** 2,
dlnr=self.dr / self.R[mask],
n=n, n2=n ** 2, m=m, cm=cm_rpn, dm=dm_rpn)
Sum[mask] += Term
mask[mask] *= abs(Term) >= eps
if not mask.any():
break
mask = self.S < self.R
An[mask] = 2 * self.W[mask] ** n * self.Factor[mask] * Sum[mask]
# Now to do the diagonal.
# Since $r=s$ here, we have $q=1$, $u=\phi$, $v=-\tan\phi$,
# and $w=1$.
mask = self.S == self.R
Sum = zeros(self.R.shape, dtype=complex128)
for m in xrange(0, p + 1):
cm_rpn, dm_rpn = self._cd[m]
Term = self._tm(v=-tan(self.Phi), v2=tan(self.Phi) ** 2,
dlnr=self.dr / self.R[mask],
n=n, n2=n ** 2, m=m, cm=cm_rpn, dm=dm_rpn)
Sum[mask] += Term
mask[mask] *= abs(Term) >= eps
if not mask.any():
break
mask = self.S == self.R
An[mask] = self.Factor[mask] * Sum[mask] + \
array([1 - 1 / cos(self.Phi)] * W)
return npmatrix(An)
def f(self, n):
# This is the function that is run in parallel.
An = self.A(n, eps=10 ** -9, p=24)
DFQf = self.DFQf[n]
#AnInv = inv(An).transpose()
#Ff = array(DFQf*AnInv)[0]
Ff = solve(An, DFQf)
return Ff
def populatequeue(self, queue):
for n in xrange(self.nmax + 1):
queue.put(n)
def postproc(self, (n, Ff)):
with self.status:
self.Ff[n] = Ff
if n > 0:
self.Ff[-n] = Ff.conjugate()
self.jobsdone += 1
self.status.notifyAll()
def reconstruct(self):
with self.lock:
self.F = ifft(self.Ff, axis=0)
return self.F<|fim▁end|> | n2 = rpncalc.wild("n2")
v2 = rpncalc.wild("v2")
mul = rpncalc.rpn_funcs[u"⋅"]
add = rpncalc.rpn_funcs[u"+"] |
<|file_name|>ui.rs<|end_file_name|><|fim▁begin|>//
// imag - the personal information management suite for the commandline
// Copyright (C) 2015, 2016 Matthias Beyer <[email protected]> and contributors
//
// This library is free software; you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public
// License as published by the Free Software Foundation; version
// 2.1 of the License.
//
// This library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
// Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public
// License along with this library; if not, write to the Free Software
// Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
//
use clap::{Arg, App, SubCommand};
pub fn build_ui<'a>(app: App<'a, 'a>) -> App<'a, 'a> {
app
.subcommand(SubCommand::with_name("list")
.about("List contacts")
.version("0.1")
.arg(Arg::with_name("filter")
.index(1)
.takes_value(true)
.required(false)
.multiple(true)
.value_name("FILTER")
.help("Filter by these properties (not implemented yet)"))
.arg(Arg::with_name("format")
.long("format")
.takes_value(true)
.required(false)
.multiple(false)
.value_name("FORMAT")
.help("Format to format the listing"))
)
.subcommand(SubCommand::with_name("import")
.about("Import contacts")
.version("0.1")
.arg(Arg::with_name("path")
.index(1)
.takes_value(true)
.required(true)
.multiple(false)
.value_name("PATH")
.help("Import from this file/directory"))
)
.subcommand(SubCommand::with_name("show")
.about("Show contact")
.version("0.1")
.arg(Arg::with_name("hash")
.index(1)
.takes_value(true)
.required(true)
.multiple(false)
.value_name("HASH")
.help("Show the contact pointed to by this reference hash"))
.arg(Arg::with_name("format")
.long("format")
.takes_value(true)
.required(false)
.multiple(false)
.value_name("FORMAT")
.help("Format to format the contact when printing it"))
)<|fim▁hole|> .subcommand(SubCommand::with_name("create")
.about("Create a contact file (.vcf) and track it in imag.")
.version("0.1")
.arg(Arg::with_name("file-location")
.short("F")
.long("file")
.takes_value(true)
.required(false)
.multiple(false)
.value_name("PATH")
.help("Create this file. If a directory is passed, a file with a uuid as name will be created. vcf contents are dumped to stdout if this is not passed."))
.arg(Arg::with_name("dont-track")
.short("T")
.long("no-track")
.takes_value(false)
.required(false)
.multiple(false)
.help("Don't track the new vcf file if one is created."))
)
}<|fim▁end|> | |
<|file_name|>sum_roof_to_leaf.py<|end_file_name|><|fim▁begin|>#! /usr/bin/python
'''
Given a binary tree containing digits from 0-9 only, each root-to-leaf path could represent a number.
An example is the root-to-leaf path 1->2->3 which represents the number 123.
Find the total sum of all root-to-leaf numbers.
For example,
1
/ \
2 3
The root-to-leaf path 1->2 represents the number 12.
The root-to-leaf path 1->3 represents the number 13.
Return the sum = 12 + 13 = 25.
'''
from node_struct import TreeNode
class Solution:
def leafNode(self, root):
if not root.left and not root.right:
return True
return False
def inOrderTraversal(self, root, currentPath, path):
if not root:
return
# visit()
currentPath = 10 * currentPath + root.val
if self.leafNode(root):
path.append(currentPath)
else:
self.inOrderTraversal(root.left, currentPath, path)
self.inOrderTraversal(root.right, currentPath, path)
# @param root, a tree node
# @return an integer
def sumNumbers(self, root):
path = list()
self.inOrderTraversal(root, 0, path)
return sum(path)<|fim▁hole|> root.left = TreeNode(2)
root.right = TreeNode(3)
root.left.right = TreeNode(4)
root.left.left = TreeNode(5)
print solution.sumNumbers(root)
print solution.sumNumbers(None)<|fim▁end|> |
if __name__ == '__main__':
solution = Solution()
root = TreeNode(1) |
<|file_name|>ossHandler.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
#coding=utf-8
# Copyright (C) 2011, Alibaba Cloud Computing
#Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
#The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from oss.oss_api import *
from oss.oss_util import *
from oss.oss_xml_handler import *
from aliyunCliParser import aliyunCliParser
import signal
import ConfigParser
from optparse import OptionParser
from optparse import Values
import os
import re
import time
import Queue
import sys
import socket
import shutil
reload(sys)
sys.setdefaultencoding("utf-8")
CMD_LIST = {}
HELP_CMD_LIST = ['--help','-h','help']
ACL_LIST = ['private', 'public-read', 'public-read-write']
OSS_PREFIX = 'oss://'
CONFIGFILE = "%s/.aliyuncli/osscredentials" % os.path.expanduser('~')
CONFIGSECTION = 'OSSCredentials'
DEFAUL_HOST = "oss.aliyuncs.com"
OSS_HOST = DEFAUL_HOST
ID = ""
KEY = ""
STS_TOKEN = None
TOTAL_PUT = AtomicInt()
PUT_OK = AtomicInt()
PUT_FAIL = AtomicInt()
PUT_SKIP = AtomicInt()
FILE_NUM_TOTAL = AtomicInt()
FILE_NUM_OK = AtomicInt()
GET_OK = AtomicInt()
GET_FAIL = AtomicInt()
GET_SKIP = AtomicInt()
DELETE_OK = AtomicInt()
COPY_OK = AtomicInt()
SEND_BUF_SIZE = 8192
RECV_BUF_SIZE = 1024*1024*10
MAX_OBJECT_SIZE = 5*1024*1024*1024
MAX_RETRY_TIMES = 3
IS_DEBUG = False
ERROR_FILE_LIST = []
AUTO_DUMP_FILE_NUM = 50
RET_OK = 0
RET_FAIL = -1
RET_SKIP = 1
lock = threading.Lock()
HELP = \
'''The valid command as follows::
GetAllBucket
CreateBucket oss://bucket --acl [acl] --location [location]
DeleteBucket oss://bucket
DeleteWholeBucket oss://bucket
GetBucketLocation oss://bucket
PutBucketCors oss://bucket localfile
GetBucketCors oss://bucket
DeleteBucketCors oss://bucket
PutBucketLogging oss://source_bucket oss://target_bucket/[prefix]
GetBucketLogging oss://bucket
DeleteBucketLogging oss://bucket
PutBucketWebsite oss://bucket indexfile [errorfile]
GetBucketWebsite oss://bucket
DeleteBucketWebsite oss://bucket
PutBucketLifeCycle oss://bucket localfile
GetBucketLifeCycle oss://bucket
DeleteBucketLifeCycle oss://bucket
PutBucketReferer oss://bucket --allow_empty_referer true --referer "referer1,referer2,...,refererN"
GetBucketReferer oss://bucket
GetAcl oss://bucket
SetAcl oss://bucket --acl [acl]
allow private, public-read, public-read-write
List oss://bucket/[prefix] [marker] [delimiter] [maxkeys]
oss://bucket/[prefix] --marker xxx --delimiter xxx --maxkeys xxx
MkDir oss://bucket/dirname
ListAllObject oss://bucket/[prefix]
ListAllDir oss://bucket/[prefix]
DeleteAllObject oss://bucket/[prefix] --force false
DownloadAllObject oss://bucket/[prefix] localdir --replace false --thread_num 5
DownloadToDir oss://bucket/[prefix] localdir --replace false --temp_dir xxx --thread_num 5
UploadObjectFromLocalDir localdir oss://bucket/[prefix] --check_point check_point_file --replace false --check_md5 false --thread_num 5
Put oss://bucket/object --content_type [content_type] --headers \"key1:value1#key2:value2\" --check_md5 false
Get oss://bucket/object localfile
MultiGet oss://bucket/object localfile --thread_num 5
Cat oss://bucket/object
Meta oss://bucket/object
Info oss://bucket/object
Copy oss://source_bucket/source_object oss://target_bucket/target_object --headers \"key1:value1#key2:value2\"
CopyLargeFile oss://source_bucket/source_object oss://target_bucket/target_object --part_size 10*1024*1024 --upload_id xxx
CopyBucket oss://source_bucket/[prefix] oss://target_bucket/[prefix] --headers \"key1:value1\" --replace false
Delete oss://bucket/object
SignUrl oss://bucket/object --timeout [timeout_seconds]
CreateLinkFromFile oss://bucket/object object_name_list_file
CreateLink oss://bucket/object object1 object2 ... objectN
GetLinkIndex oss://bucket/object
Options oss://bucket/[object] --origin xxx --method [GET, PUT, DELETE, HEAD, POST]
UploadDisk localdir oss://bucket/[prefix] [--check_point check_point_file --filename filename_file --replace false --content_type xxx --skip_dir false --skip_suffix false --out xxx] --device_id xxx --check_md5 false
Init oss://bucket/object
ListPart oss://bucket/object --upload_id xxx
ListParts oss://bucket
GetAllPartSize oss://bucket
Cancel oss://bucket/object --upload_id xxx
MultiUpload localfile oss://bucket/object --upload_id xxx --thread_num 10 --max_part_num 1000 --check_md5 false
UploadPartFromFile localfile oss://bucket/object --upload_id xxx --part_number xxx
UploadPartFromString oss://bucket/object --upload_id xxx --part_number xxx --data xxx
Config --host oss.aliyuncs.com --accessid accessid --accesskey accesskey --sts_token token
'''
def print_result(cmd, res):
'''
Print HTTP Response if failedd.
'''
try:
if res.status / 100 == 2:
pass
else:
body = res.read()
print "Error Headers:\n"
print res.getheaders()
print "Error Body:\n"
print body[0:1024]
print "Error Status:\n"
print res.status
print cmd, "Failed!"
if res.status == 403:
check_endpoint_error(body)
exit(-1)
except AttributeError:
pass
def format_size(size):
size = float(size)
coeffs = ['K', 'M', 'G', 'T']
coeff = ""
while size > 2048:
size /= 1024
coeff = coeffs.pop(0)
return str("%.2f"%size) + coeff + "B"
def format_utf8(string):
string = smart_code(string)
if isinstance(string, unicode):
string = string.encode('utf-8')
return string
def split_path(path):
if not path.lower().startswith(OSS_PREFIX):
print "%s parameter %s invalid, " \
"must be start with %s" % \
(args[0], args[1], OSS_PREFIX)
sys.exit(1)
pather = path[len(OSS_PREFIX):].split('/')
return pather
def check_upload_id(upload_id):
upload_id_len = 32
if len(upload_id) != upload_id_len:
print "upload_id is a 32-bit string generated by OSS"
print "you can get valid upload_id by init or listparts command"
sys.exit(1)
def check_bucket(bucket):
if len(bucket) == 0:
print "Bucket should not be empty!"
print "Please input oss://bucket"
sys.exit(1)
def check_object(object):
if len(object) == 0:
print "Object should not be empty!"
print "Please input oss://bucket/object"
sys.exit(1)
if object.startswith("/"):
print "object name should not begin with / "
sys.exit(-1)
def check_localfile(localfile):
if not os.path.isfile(localfile):
print "%s is not existed!" % localfile
sys.exit(1)
def check_args(argv, args=None):
if not args:
args = []
if len(args) < argv:
print "%s miss parameters" % args[0]
sys.exit(1)
def check_bucket_object(bucket, object):
check_bucket(bucket)
check_object(object)
def parse_bucket_object(path):
pather = split_path(path)
bucket = ""
object = ""
if len(pather) > 0:
bucket = pather[0]
if len(pather) > 1:
object += '/'.join(pather[1:])
object = smart_code(object)
if object.startswith("/"):
print "object name SHOULD NOT begin with /"
sys.exit(1)
return (bucket, object)
def parse_bucket(path):
bucket = path
if bucket.startswith(OSS_PREFIX):
bucket = bucket[len(OSS_PREFIX):]
tmp_list = bucket.split("/")
if len(tmp_list) > 0:
bucket = tmp_list[0]
return bucket
def check_endpoint_error(xml_string):
try:
xml = minidom.parseString(xml_string)
end_point = get_tag_text(xml, 'Endpoint')
if end_point:
print 'You should send all request to %s' % end_point
except:
pass
def cmd_listing(args, options):
if len(args) == 1:
return cmd_getallbucket(args, options)
(bucket, object) = parse_bucket_object(args[1])
if len(bucket) == 0:
return cmd_getallbucket(args, options)
prefix = object
marker = ''
delimiter = ''
maxkeys = 1000
if options.marker:
marker = options.marker
if options.delimiter:
delimiter = options.delimiter
if options.maxkeys:
maxkeys = options.maxkeys
if len(args) == 3:
marker = args[2]
elif len(args) == 4:
marker = args[2]
delimiter = args[3]
elif len(args) >= 5:
marker = args[2]
delimiter = args[3]
maxkeys = args[4]
prefix = smart_code(prefix)
marker = smart_code(marker)
delimiter = smart_code(delimiter)
maxkeys = smart_code(maxkeys)
exclude = options.exclude
res = get_oss().get_bucket(bucket, prefix, marker, delimiter, maxkeys)
if (res.status / 100) == 2:
body = res.read()
hh = GetBucketXml(body)
(fl, pl) = hh.list()
print "prefix list is: "
for i in pl:
if exclude and i.startswith(exclude):
continue
print i
print "object list is: "
for i in fl:
if len(i) == 7:
try:
if exclude and i[0].startswith(exclude):
continue
print "%16s %6s %8s %s/%s" % (convert_to_localtime(i[1]), format_size((int)(i[3])), i[6], OSS_PREFIX + bucket, i[0])
except:
print "Exception when print :", i
print "\nprefix list number is: %s " % len(pl)
print "object list number is: %s " % len(fl)
return res
def cmd_listparts(args, options):
if len(args) == 1:
return cmd_getallbucket(args, options)
(bucket, object) = parse_bucket_object(args[1])
if len(bucket) == 0:
return cmd_getallbucket(args, options)
print "%20s %20s %20s" % ("UploadId", "Path", "InitTime")
for i in get_all_upload_id_list(get_oss(), bucket, object):
print "%20s oss://%s/%s %20s" % (i[1], bucket, i[0], convert_to_localtime(i[2]))
def cmd_getallpartsize(args, options):
if len(args) == 1:
return cmd_getallbucket(args, options)
(bucket, object) = parse_bucket_object(args[1])
if len(bucket) == 0:
return cmd_getallbucket(args, options)
total_part_size = 0
print "%5s %20s %20s %s" % ("Number", "UploadId", "Size", "Path")
for i in get_all_upload_id_list(get_oss(), bucket):
upload_id = i[1]
object = i[0]
for i in get_part_list(get_oss(), bucket, object, upload_id):
part_size = (int)(i[2])
total_part_size += part_size
print "%5s %20s %10s oss://%s/%s" % (i[0], upload_id, format_size(part_size), bucket, object)
print "totalsize is: real:%s, format:%s " % (total_part_size, format_size(total_part_size))
def cmd_init_upload(args, options):
check_args(2, args)
path = args[1]
(bucket, object) = parse_bucket_object(path)
check_bucket_object(bucket, object)
upload_id = get_upload_id(get_oss(), bucket, object)
print 'Upload Id: %s' % (upload_id)
def cmd_listpart(args, options):
if len(args) == 1:
return cmd_getallbucket(args, options)
path = args[1]
(bucket, object) = parse_bucket_object(path)
if len(bucket) == 0:
return cmd_getallbucket(args, options)
if options.upload_id is None:
print "upload_id invalid, please set with --upload_id=xxx"
sys.exit(1)
print "%5s %32s %20s %20s" % ("PartNumber".ljust(10), "ETag".ljust(34), "Size".ljust(20), "LastModifyTime".ljust(32))
for i in get_part_list(get_oss(), bucket, object, options.upload_id):
if len(i) >= 4:
print "%s %s %s %s" % (str(i[0]).ljust(10), str(i[1]).ljust(34), str(i[2]).ljust(20), str(i[3]).ljust(32))
def cmd_upload_part_from_file(args, options):
check_args(3, args)
localfile = args[1]
check_localfile(localfile)
path = args[2]
(bucket, object) = parse_bucket_object(path)
check_bucket_object(bucket, object)
if options.upload_id is None:
print "upload_id invalid, please set with --upload_id=xxx"
sys.exit(1)
if options.part_number is None:
print "part_number invalid, please set with --part_number=xxx"
sys.exit(1)
res = get_oss().upload_part(bucket, object, localfile, options.upload_id, options.part_number)
return res
def cmd_upload_part_from_string(args, options):
check_args(2, args)
path = args[1]
(bucket, object) = parse_bucket_object(path)
check_bucket_object(bucket, object)
if options.upload_id is None:
print "upload_id invalid, please set with --upload_id=xxx"
sys.exit(1)
if options.part_number is None:
print "part_number invalid, please set with --part_number=xxx"
sys.exit(1)
if options.data is None:
print "data invalid, please set with --data=xxx"
sys.exit(1)
res = get_oss().upload_part_from_string(bucket, object, options.data, options.upload_id, options.part_number)
return res
def cmd_listallobject(args, options):
if len(args) == 1:
return cmd_getallbucket(args, options)
path = args[1]
(bucket, object) = parse_bucket_object(path)
if len(bucket) == 0:
return cmd_getallbucket(args, options)
prefix = object
marker = ""
total_object_num = 0
totalsize = 0
totaltimes = 0
delimiter = ''
maxkeys = '1000'
if options.out:
f = open(options.out, "w")
while 1:
res = get_oss().get_bucket(bucket, prefix, marker, delimiter, maxkeys)
if res.status != 200:
return res
body = res.read()
(tmp_object_list, marker) = get_object_list_marker_from_xml(body)
for i in tmp_object_list:
object = i[0]
length = i[1]
last_modify_time = i[2]
total_object_num += 1
totalsize += (int)(length)
if options.exclude:
exclude = options.exclude
if object.startswith(exclude):
continue
msg = "%s%s/%s" % (OSS_PREFIX, bucket, object)
print "%16s %6s %s/%s " % (convert_to_localtime(last_modify_time), format_size(length), OSS_PREFIX + bucket, object)
if options.out:
f.write(msg)
f.write("\n")
totaltimes += 1
if len(marker) == 0:
break
if options.out:
f.close()
print "the object list result is saved into %s" % options.out
print "object list number is: %s " % total_object_num
print "totalsize is: real:%s, format:%s " % (totalsize, format_size(totalsize))
print "request times is: %s" % totaltimes
return res
def cmd_listalldir(args, options):
if len(args) == 1:
return cmd_getallbucket(args, options)
path = args[1]
(bucket, object) = parse_bucket_object(path)
if len(bucket) == 0:
return cmd_getallbucket(args, options)
prefix = object
if prefix and not prefix.endswith("/"):
prefix = "%s/" % prefix
marker = ""
total_object_num = 0
totalsize = 0
totaltimes = 0
delimiter = '/'
maxkeys = '1000'
while 1:
res = get_oss().get_bucket(bucket, prefix, marker, delimiter, maxkeys)
if res.status != 200:
return res
body = res.read()
(tmp_object_list, marker) = get_dir_list_marker_from_xml(body)
for i in tmp_object_list:
if i.endswith("/"):
i = i[:-1]
msg = "%s" % (os.path.basename(i))
print msg
total_object_num += 1
totaltimes += 1
if len(marker) == 0:
break
print "\ncommon prefix list number is: %s " % total_object_num
print "request times is: %s" % totaltimes
return res
def get_object(bucket, object, object_prefix, local_path, length, last_modify_time, replace, retry_times = MAX_RETRY_TIMES, temp_dir = None):
'''
return RET_OK, RET_FAIL, RET_SKIP
'''
show_bar = False
object = smart_code(object)
tmp_object = object
if object_prefix == object[:len(object_prefix)]:
tmp_object = object[len(object_prefix):]
while 1:
if not tmp_object.startswith("/"):
break
tmp_object = tmp_object[1:]
localfile = os.path.join(local_path, tmp_object)
localfile = smart_code(localfile)
temp_filename = ''
if temp_dir:
temp_filename = get_unique_temp_filename(temp_dir, localfile)
for i in xrange(retry_times):
try:
if os.path.isfile(localfile):
if replace:
os.remove(localfile)
else:
t1 = last_modify_time
t2 = (int)(os.path.getmtime(localfile))
if (int)(length) == os.path.getsize(localfile) and t1 < t2:
#skip download this object these conditions match
print "no need to get %s/%s to %s" % (bucket, object, localfile)
return RET_SKIP
else:
try:
dirname = os.path.dirname(localfile)
if not os.path.isdir(dirname):
os.makedirs(dirname)
if temp_dir:
dirname = os.path.dirname(temp_filename)
if not os.path.isdir(dirname):
os.makedirs(dirname)
except:
pass
filename = localfile
if temp_dir:
filename = temp_filename
if os.path.isdir(filename):
print "no need to get %s/%s to %s" % (bucket, object, filename)
return RET_SKIP
ret = continue_get(bucket, object, filename)
if ret:
print "get %s/%s to %s OK" % (bucket, object, localfile)
if temp_dir:
shutil.move(temp_filename, localfile)
pass
return RET_OK
else:
print "get %s/%s to %s FAIL" % (bucket, object, localfile)
except:
print "get %s/%s to %s exception" % (bucket, object, localfile)
print sys.exc_info()[0], sys.exc_info()[1]
os.remove(temp_filename)
return RET_FAIL
class DownloadObjectWorker(threading.Thread):
def __init__(self, retry_times, queue):
threading.Thread.__init__(self)
self.queue = queue
self.retry_times = retry_times
self.ok_num = 0
self.fail_num = 0
self.skip_num = 0
def run(self):
while 1:
try:
(get_object, bucket, object, object_prefix, local_path, length, last_modify_time, replace, retry_times, temp_dir) = self.queue.get(block=False)
ret = get_object(bucket, object, object_prefix, local_path, length, last_modify_time, replace, self.retry_times, temp_dir)
if ret == RET_OK:
self.ok_num += 1
elif ret == RET_SKIP:
self.skip_num += 1
else:
self.fail_num += 1
self.queue.task_done()
except Queue.Empty:
break
except:
self.fail_num += 1
print sys.exc_info()[0], sys.exc_info()[1]
self.queue.task_done()
global GET_SKIP
global GET_OK
global GET_FAIL
lock.acquire()
GET_SKIP += self.skip_num
GET_OK += self.ok_num
GET_FAIL += self.fail_num
lock.release()
def cmd_downloadallobject(args, options):
check_args(3, args)
path = args[1]
(bucket, object) = parse_bucket_object(path)
check_bucket(bucket)
local_path = args[2]
if os.path.isfile(local_path):
print "%s is not dir, please input localdir" % local_path
exit(-1)
replace = False
if options.replace is not None and options.replace.lower() == "true":
replace = True
prefix = object
thread_num = 5
if options.thread_num:
thread_num = (int)(options.thread_num)
retry_times = MAX_RETRY_TIMES
if options.retry_times:
retry_times = (int)(options.retry_times)
temp_dir = None
if options.temp_dir:
temp_dir = options.temp_dir
if not os.path.exists(temp_dir):
os.makedirs(temp_dir)
marker = ""
delimiter = ''
maxkeys = '1000'
handled_obj_num = 0
while 1:
queue = Queue.Queue(0)
for i in xrange(0, retry_times):
res = get_oss().get_bucket(bucket, prefix, marker, delimiter, maxkeys)
if res.status/100 == 5:
continue
else:
break
if res.status != 200:
return res
body = res.read()
(tmp_object_list, marker) = get_object_list_marker_from_xml(body)
for i in tmp_object_list:
object = i[0]
length = i[1]
last_modify_time = format_unixtime(i[2])
if str(length) == "0" and object.endswith("/"):
continue
handled_obj_num += 1
queue.put((get_object, bucket, object, prefix, local_path, length, last_modify_time, replace, MAX_RETRY_TIMES, temp_dir))
thread_pool = []
for i in xrange(thread_num):
current = DownloadObjectWorker(retry_times, queue)
thread_pool.append(current)
current.start()
queue.join()
for item in thread_pool:
item.join()
if len(marker) == 0:
break
global GET_OK
global GET_SKIP
global GET_FAIL
print "Total being downloaded objects num: %s, they are downloaded into %s" % (GET_OK + GET_FAIL + GET_SKIP, local_path)
print "OK num:%s, SKIP num:%s, FAIL num:%s" % (GET_OK, GET_SKIP, GET_FAIL)
if temp_dir and os.path.abspath(local_path) != os.path.abspath(temp_dir):
shutil.rmtree(temp_dir, True)
if GET_FAIL != 0:
exit(-1)
def put_object(bucket, object, local_file, local_modify_time, is_replace, is_check_md5=False, content_type="", multipart_threshold=100*1024*1024, retry_times=2):
'''
return RET_OK, RET_FAIL, RET_SKIP
'''
if not os.path.isfile(local_file):
print "upload %s FAIL, no such file." % (local_file)
return RET_FAIL
show_bar = False
oss = get_oss(show_bar)
object = smart_code(object)
if len(object) == 0:
print "object is empty when put /%s/%s, skip" % (bucket, object)
return RET_SKIP
local_file_size = os.path.getsize(local_file)
if not is_replace:
try:
res = oss.head_object(bucket, object)
if res.status == 200 and str(local_file_size) == res.getheader('content-length'):
oss_gmt = res.getheader('last-modified')
format = "%a, %d %b %Y %H:%M:%S GMT"
oss_last_modify_time = format_unixtime(oss_gmt, format)
if not local_modify_time:
local_modify_time = (int)(os.path.getmtime(local_file))
if oss_last_modify_time >= local_modify_time:
#print "upload %s is skipped" % (local_file)
return RET_SKIP
except:
print "%s %s" % (sys.exc_info()[0], sys.exc_info()[1])
if is_check_md5:
md5string, base64md5 = get_file_md5(local_file)
for i in xrange(retry_times):
try:
if local_file_size > multipart_threshold:
upload_id = ""
thread_num = 5
max_part_num = 10000
headers = {}
if is_check_md5:
headers['x-oss-meta-md5'] = md5string
if content_type:
headers['Content-Type'] = content_type
res = oss.multi_upload_file(bucket, object, local_file, upload_id, thread_num, max_part_num, headers, check_md5=is_check_md5)
else:
headers = {}
if is_check_md5:
headers['Content-MD5'] = base64md5
headers['x-oss-meta-md5'] = md5string
res = oss.put_object_from_file(bucket, object, local_file, content_type, headers)
if 200 == res.status:
return RET_OK
else:
print "upload %s to /%s/%s FAIL, status:%s, request-id:%s" % (local_file, bucket, object, res.status, res.getheader("x-oss-request-id"))
except:
print "upload %s/%s from %s exception" % (bucket, object, local_file)
print sys.exc_info()[0], sys.exc_info()[1]
return RET_FAIL
class UploadObjectWorker(threading.Thread):
def __init__(self, check_point_file, retry_times, queue):
threading.Thread.__init__(self)
self.check_point_file = check_point_file
self.queue = queue
self.file_time_map = {}
self.error_file_list = []
self.retry_times = retry_times
self.ok_num = 0
self.fail_num = 0
self.skip_num = 0
def run(self):
global PUT_SKIP
global PUT_OK
global PUT_FAIL
global TOTAL_PUT
global FILE_NUM_OK
while 1:
try:
(put_object, bucket, object, local_file, local_modify_time, is_replace, is_check_md5, content_type, multipart_threshold) = self.queue.get(block=False)
ret = put_object(bucket, object, local_file, local_modify_time, is_replace, is_check_md5, content_type, multipart_threshold, self.retry_times)
is_ok = False
if ret == RET_OK:
is_ok = True
self.ok_num += 1
PUT_OK += 1
FILE_NUM_OK += 1
elif ret == RET_SKIP:
is_ok = True
self.skip_num += 1
PUT_SKIP += 1
FILE_NUM_OK += 1
else:
self.fail_num += 1
PUT_FAIL += 1
self.error_file_list.append(local_file)
if is_ok:
local_file_full_path = os.path.abspath(local_file)
local_file_full_path = format_utf8(local_file_full_path)
self.file_time_map[local_file_full_path] = (int)(os.path.getmtime(local_file))
sum = (PUT_SKIP + PUT_OK + PUT_FAIL)
if TOTAL_PUT > 0:
exec("rate = 100*%s/(%s*1.0)" % (sum, TOTAL_PUT))
else:
rate = 0
print '\rOK:%s, FAIL:%s, SKIP:%s, TOTAL_DONE:%s, TOTAL_TO_DO:%s, PROCESS:%.2f%%' % (PUT_OK, PUT_FAIL, PUT_SKIP, sum, TOTAL_PUT, rate),
sys.stdout.flush()
if self.ok_num % AUTO_DUMP_FILE_NUM == 0:
if len(self.file_time_map) != 0:
dump_check_point(self.check_point_file, self.file_time_map)
self.file_time_map = {}
self.queue.task_done()
except Queue.Empty:
break
except:
PUT_FAIL += 1
print sys.exc_info()[0], sys.exc_info()[1]
self.queue.task_done()
if len(self.error_file_list) != 0:
lock.acquire()
ERROR_FILE_LIST.extend(self.error_file_list)
lock.release()
if len(self.file_time_map) != 0:
dump_check_point(self.check_point_file, self.file_time_map)
def load_check_point(check_point_file):
file_time_map = {}
if os.path.isfile(check_point_file):
f = open(check_point_file)
for line in f:
line = line.strip()
tmp_list = line.split('#')
if len(tmp_list) > 1:
time_stamp = (float)(tmp_list[0])
time_stamp = (int)(time_stamp)
#file_name = "".join(tmp_list[1:])
file_name = line[len(tmp_list[0])+1:]
file_name = format_utf8(file_name)
if file_time_map.has_key(file_name) and file_time_map[file_name] > time_stamp:
continue
file_time_map[file_name] = time_stamp
f.close()
return file_time_map
def load_filename(filename_file):
filenames = []
if os.path.isfile(filename_file):
f = open(filename_file)
for line in f:
line = line.strip()
filenames.append(line)
return filenames
def dump_filename(filename_file, filenames=None):
if len(filename_file) == 0 or len(filenames) == 0:
return
try:
f = open(filename_file,"w")
for filename in filenames:
line = "%s\n" %(filename)
f.write(line)
except:
pass
try:
f.close()
except:
pass
def dump_check_point(check_point_file, result_map=None):
if len(check_point_file) == 0 or len(result_map) == 0:
return
lock.acquire()
old_file_time_map = {}
if os.path.isfile(check_point_file):
old_file_time_map = load_check_point(check_point_file)
try:
f = open(check_point_file,"w")
for k, v in result_map.items():
if old_file_time_map.has_key(k) and old_file_time_map[k] < v:
del old_file_time_map[k]
line = "%s#%s\n" % (v, k)
line = format_utf8(line)
f.write(line)
for k, v in old_file_time_map.items():
line = "%s#%s\n" % (v, k)
line = format_utf8(line)
f.write(line)
except:
pass
try:
f.close()
except:
pass
lock.release()
def format_object(object):
tmp_list = object.split(os.sep)
object = "/".join(x for x in tmp_list if x.strip() and x != "/")
while 1:
if object.find('//') == -1:
break
object = object.replace('//', '/')
return object
def get_object_name(filename, filepath):
filename = format_object(filename)
filepath = format_object(filepath)
file_name = os.path.basename(filename)
return file_name
def get_file_names_from_disk(path, topdown):
filenames = []
for root, dirs, files in os.walk(path, topdown):
for filespath in files:
filename = os.path.join(root, filespath)
filenames.append(filename)
return filenames
#for offline uploadfile to oss
def cmd_upload_disk(args, options):
check_args(3, args)
path = args[2]
(bucket, object) = parse_bucket_object(path)
check_bucket(bucket)
local_path = args[1]
if not os.path.isdir(local_path):
print "%s is not dir, please input localdir" % local_path
exit(-1)
if not local_path.endswith(os.sep):
local_path = "%s%s" % (local_path, os.sep)
if not options.device_id:
print "please set device id with --device_id=xxx"
exit(-1)
check_point_file = ""
is_check_point = False
file_time_map = {}
if options.check_point:
is_check_point = True
check_point_file = options.check_point
file_time_map = load_check_point(check_point_file)
filename_file = ""
filenames = []
is_filename_file = False
if options.filename_list:
filename_file = options.filename_list
if os.path.isfile(filename_file):
is_filename_file = True
filenames = load_filename(filename_file)
prefix = object
is_replace = False
if options.replace:
if options.replace.lower() == "true":
is_replace = True
thread_num = 5
if options.thread_num:
thread_num = (int)(options.thread_num)
retry_times = MAX_RETRY_TIMES
if options.retry_times:
retry_times = (int)(options.retry_times)
is_check_md5 = False
if options.check_md5:
if options.check_md5.lower() == "true":
is_check_md5 = True
multipart_threshold = 100*1024*1024
if options.multipart_threshold:
multipart_threshold = (int)(options.multipart_threshold)
total_upload_num = 0
topdown = True
def process_localfile(items):
queue = Queue.Queue(0)
for local_file in items:
if os.path.isfile(local_file):
local_modify_time = 0
local_file_full_path = os.path.abspath(local_file)
local_file_full_path = format_utf8(local_file_full_path)
if is_check_point and file_time_map.has_key(local_file_full_path):
local_modify_time = (int)(os.path.getmtime(local_file))
record_modify_time = file_time_map[local_file_full_path]
if local_modify_time <= record_modify_time:
print 'file:%s already upload' %(local_file_full_path)
global FILE_NUM_OK
FILE_NUM_OK += 1
continue
if options.skip_dir and options.skip_dir.lower() == "true":
object = smart_code(os.path.basename(local_file))
else:
object = smart_code(local_file)
if options.strip_dir:
strip_dir = options.strip_dir
if not strip_dir.endswith("/"):
strip_dir = "%s/" % strip_dir
if object.startswith(strip_dir):
object = object[len(options.strip_dir):]
if options.skip_suffix and options.skip_suffix.lower() == "true":
pos = object.rfind(".")
if pos != -1:
object = object[:pos]
while 1:
if object.startswith("/"):
object = object[1:]
else:
break
if prefix:
if prefix.endswith("/"):
object = "%s%s" % (prefix, object)
else:
object = "%s/%s" % (prefix, object)
queue.put((put_object, bucket, object, local_file, local_modify_time, is_replace, is_check_md5, options.content_type, multipart_threshold))
qsize = queue.qsize()
global TOTAL_PUT
TOTAL_PUT += qsize
thread_pool = []
for i in xrange(thread_num):
current = UploadObjectWorker(check_point_file, retry_times, queue)
thread_pool.append(current)
current.start()
queue.join()
for item in thread_pool:
item.join()
return qsize
if not is_filename_file:
filenames = get_file_names_from_disk(local_path, topdown);
dump_filename(filename_file, filenames)
global FILE_NUM_TOTAL
FILE_NUM_TOTAL += len(filenames)
total_upload_num += process_localfile(filenames);
print ""
print "DEVICEID:sn%s" % options.device_id
global PUT_OK
global PUT_SKIP
global PUT_FAIL
print "This time Total being uploaded localfiles num: %s" % (PUT_OK + PUT_SKIP + PUT_FAIL)
print "This time OK num:%s, SKIP num:%s, FAIL num:%s" % (PUT_OK, PUT_SKIP, PUT_FAIL)
print "Total file num:%s, OK file num:%s" %(FILE_NUM_TOTAL, FILE_NUM_OK)
if PUT_FAIL != 0:
print "FailUploadList:"
for i in set(ERROR_FILE_LIST):
print i
if options.out:
try:
f = open(options.out, "w")
for i in set(ERROR_FILE_LIST):
f.write("%s\n" % i.strip())
f.close()
print "FailUploadList is written into %s" % options.out
except:
print "write upload failed file exception"
print sys.exc_info()[0], sys.exc_info()[1]
exit(-1)
def cmd_upload_object_from_localdir(args, options):
check_args(3, args)
path = args[2]
(bucket, object) = parse_bucket_object(path)
check_bucket(bucket)
local_path = args[1]
if not os.path.isdir(local_path):
print "%s is not dir, please input localdir" % local_path
exit(-1)
if not local_path.endswith(os.sep):
local_path = "%s%s" % (local_path, os.sep)
is_check_point = False
check_point_file = ""
file_time_map = {}
if options.check_point:
is_check_point = True
check_point_file = options.check_point
file_time_map = load_check_point(check_point_file)
prefix = object
is_replace = False
if options.replace:
if options.replace.lower() == "true":
is_replace = True
is_check_md5 = False
if options.check_md5:
if options.check_md5.lower() == "true":
is_check_md5 = True
thread_num = 5
if options.thread_num:
thread_num = (int)(options.thread_num)
retry_times = MAX_RETRY_TIMES
if options.retry_times:
retry_times = (int)(options.retry_times)
multipart_threshold = 100*1024*1024
if options.multipart_threshold:
multipart_threshold = (int)(options.multipart_threshold)
total_upload_num = 0
topdown = True
def process_localfile(items):
queue = Queue.Queue(0)
for item in items:
local_file = os.path.join(root, item)
if os.path.isfile(local_file):
local_file_full_path = os.path.abspath(local_file)
local_file_full_path = format_utf8(local_file_full_path)
local_modify_time = 0
if is_check_point and file_time_map.has_key(local_file_full_path):
local_modify_time = (int)(os.path.getmtime(local_file))
record_modify_time = file_time_map[local_file_full_path]
if local_modify_time <= record_modify_time:
continue
object = get_object_name(smart_code(local_file), smart_code(local_path))
if prefix:
if prefix.endswith("/"):
object = "%s%s" % (prefix, object)
else:
object = "%s/%s" % (prefix, object)
content_type = ''
queue.put((put_object, bucket, object, local_file, local_modify_time, is_replace, is_check_md5, content_type, multipart_threshold))
qsize = queue.qsize()
thread_pool = []
global TOTAL_PUT
TOTAL_PUT += qsize
for i in xrange(thread_num):
current = UploadObjectWorker(check_point_file, retry_times, queue)
thread_pool.append(current)
current.start()
queue.join()
for item in thread_pool:
item.join()
return qsize
for root, dirs, files in os.walk(local_path, topdown):
total_upload_num += process_localfile(files)
total_upload_num += process_localfile(dirs)
global PUT_OK
global PUT_SKIP
global PUT_FAIL
print ""
print "Total being uploaded localfiles num: %s" % (PUT_OK + PUT_SKIP + PUT_FAIL)
print "OK num:%s, SKIP num:%s, FAIL num:%s" % (PUT_OK, PUT_SKIP, PUT_FAIL)
if PUT_FAIL != 0:
exit(-1)
def get_object_list_marker_from_xml(body):
#return ([(object, object_length, last_modify_time)...], marker)
object_meta_list = []
next_marker = ""
hh = GetBucketXml(body)
(fl, pl) = hh.list()
if len(fl) != 0:
for i in fl:
object = convert_utf8(i[0])
last_modify_time = i[1]
length = i[3]
object_meta_list.append((object, length, last_modify_time))
if hh.is_truncated:
next_marker = hh.nextmarker
return (object_meta_list, next_marker)
def cmd_deleteallobject(args, options):
if len(args) == 1:
return cmd_getallbucket(args, options)
path = args[1]
(bucket, object) = parse_bucket_object(path)
if len(bucket) == 0:
return cmd_getallbucket(args, options)
force_delete = False
if options.force and options.force.lower() == "true":
force_delete = True
if not force_delete:
ans = raw_input("DELETE all objects? Y/N, default is N: ")
if ans.lower() != "y":
print "quit."
exit(-1)
prefix = object
marker = ''
delimiter = ''
maxkeys = '1000'
if options.marker:
marker = options.marker
if options.delimiter:
delimiter = options.delimiter
if options.maxkeys:
maxkeys = options.maxkeys
debug = True
if not delete_all_objects(get_oss(), bucket, prefix, delimiter, marker, maxkeys, debug):
exit(-1)
def cmd_getallbucket(args, options):
width = 20
print "%s %s %s" % ("CreateTime".ljust(width), "BucketLocation".ljust(width), "BucketName".ljust(width))
marker = ""
prefix = ""
headers = None
total_num = 0
while 1:
res = get_oss().get_service(headers, prefix, marker)
if (res.status / 100) == 2:
body = res.read()
(bucket_meta_list, marker) = get_bucket_meta_list_marker_from_xml(body)
for i in bucket_meta_list:
print "%s %s %s" % (str(convert_to_localtime(i.creation_date)).ljust(width), i.location.ljust(width), i.name)
total_num += 1
else:
break
if not marker:
break
print "\nBucket Number is: %s" % total_num
return res
def cmd_createbucket(args, options):
check_args(2, args)
if options.acl is not None and options.acl not in ACL_LIST:
print "acl invalid, SHOULD be one of %s" % (ACL_LIST)
sys.exit(1)
acl = ''
if options.acl:
acl = options.acl
bucket = parse_bucket(args[1])
if options.location is not None:
location = options.location
return get_oss().put_bucket_with_location(bucket, acl, location)
return get_oss().put_bucket(bucket, acl)
def cmd_getbucketlocation(args, options):
check_args(2, args)
bucket = parse_bucket(args[1])
res = get_oss().get_bucket_location(bucket)
if res.status / 100 == 2:
body = res.read()
h = GetBucketLocationXml(body)
print h.location
return res
def cmd_deletebucket(args, options):
check_args(2, args)
bucket = parse_bucket(args[1])
return get_oss().delete_bucket(bucket)
def cmd_deletewholebucket(args, options):
check_args(2, args)
ans = raw_input("DELETE whole bucket? Y/N, default is N: ")
if ans.lower() != "y":
print "quit."
exit(-1)
bucket = parse_bucket(args[1])
debug = True
delete_marker = ""
delete_upload_id_marker = ""
if options.marker:
delete_marker = options.marker
if options.upload_id:
delete_upload_id_marker = options.upload_id
if not clear_all_objects_in_bucket(get_oss(), bucket, delete_marker, delete_upload_id_marker, debug):
exit(-1)
def delete_object(bucket, object, retry_times=2):
object = smart_code(object)
global DELETE_OK
ret = False
for i in xrange(retry_times):
try:
oss = get_oss()
res = oss.delete_object(bucket, object)
if 2 == res.status / 100:
ret = True
if ret:
DELETE_OK += 1
print "delete %s/%s OK" % (bucket, object)
return ret
else:
print "delete %s/%s FAIL, status:%s, request-id:%s" % (bucket, object, res.status, res.getheader("x-oss-request-id"))
except:
print "delete %s/%s exception" % (bucket, object)
print sys.exc_info()[0], sys.exc_info()[1]
return False
class DeleteObjectWorker(threading.Thread):
def __init__(self, retry_times, queue):
threading.Thread.__init__(self)
self.queue = queue
self.retry_times = retry_times
def run(self):
while 1:
try:
(delete_object, bucket, object) = self.queue.get(block=False)
delete_object(bucket, object, self.retry_times)
self.queue.task_done()
except Queue.Empty:
break
except:
self.queue.task_done()
def cmd_deletebyfile(args, options):
check_args(2, args)
localfile = args[1]
check_localfile(localfile)
queue = Queue.Queue(0)
f = open(localfile)
for line in f:
line = line.strip()
(bucket, object) = parse_bucket_object(line)
if len(bucket) != 0 and len(object) != 0:
queue.put((delete_object, bucket, object))
f.close()
thread_num = 5
if options.thread_num:
thread_num = (int)(options.thread_num)
retry_times = MAX_RETRY_TIMES
if options.retry_times:
retry_times = (int)(options.retry_times)
thread_pool = []
for i in xrange(thread_num):
current = DeleteObjectWorker(retry_times, queue)
thread_pool.append(current)
current.start()
queue.join()
for item in thread_pool:
item.join()
def cmd_setacl(args, options):
check_args(2, args)
if options.acl is None or options.acl not in ACL_LIST:
print "acl invalid, SHOULD be one of %s" % (ACL_LIST)
sys.exit(1)
bucket = parse_bucket(args[1])
return get_oss().put_bucket(bucket, options.acl)
def cmd_getacl(args, options):
check_args(2, args)
bucket = parse_bucket(args[1])
res = get_oss().get_bucket_acl(bucket)
if (res.status / 100) == 2:
body = res.read()
h = GetBucketAclXml(body)
print h.grant
return res
def to_http_headers(string):
headers_map = {}
for i in string.split('#'):
key_value_list = i.strip().split(':')
if len(key_value_list) >= 2:
headers_map[key_value_list[0]] = ":".join(key_value_list[1:])
return headers_map
def cmd_mkdir(args, options):
check_args(2, args)
if not args[1].endswith('/'):
args[1] += '/'
(bucket, object) = parse_bucket_object(args[1])
res = get_oss().put_object_from_string(bucket, object, "")
return res
def handler(signum, frame):
print 'Signal handler called with signal', signum
raise Exception("timeout")
try:
signal.signal(signal.SIGALRM, handler)
except:
pass
def cmd_put(args, options):
check_args(3, args)
localfile = args[1]
check_localfile(localfile)
if os.path.getsize(localfile) > MAX_OBJECT_SIZE:
print "locafile:%s is bigger than %s, it is not support by put, please use multiupload instead." % (localfile, MAX_OBJECT_SIZE)
exit(-1)
#user specified objectname oss://bucket/[path]/object
(bucket, object) = parse_bucket_object(args[2])
if len(object) == 0:
# e.g. upload to oss://bucket/
object = os.path.basename(localfile)
elif object.endswith("/"):
#e.g. uplod to oss://bucket/a/b/
object += os.path.basename(localfile)
content_type = ""
headers = {}
if options.content_type:
content_type = options.content_type
if options.headers:
headers = to_http_headers(options.headers)
if options.check_md5:
if options.check_md5.lower() == "true":
md5string, base64md5 = get_file_md5(localfile)
headers["Content-MD5"] = base64md5
headers["x-oss-meta-md5"] = md5string
timeout = 0
if options.timeout:
timeout = (int)(options.timeout)
print "timeout", timeout
try:
signal.alarm(timeout)
except:
pass
res = get_oss().put_object_from_file(bucket, object, localfile, content_type, headers)
try:
signal.alarm(0) # Disable the signal
except:
pass
if res.status == 200:
print_url(OSS_HOST, bucket, object, res)
return res
def print_url(host, bucket, object, res):
print ""
second_level_domain = OSS_HOST
orginal_object = object
object = oss_quote(object)
if check_bucket_valid(bucket) and not is_ip(second_level_domain):
if is_oss_host(second_level_domain):
print "Object URL is: http://%s.%s/%s" % (bucket, second_level_domain, object)
else:
print "Object URL is: http://%s/%s" % (second_level_domain, object)
else:
print "Object URL is: http://%s/%s/%s" % (second_level_domain, bucket, object)
print "Object abstract path is: oss://%s/%s" % (bucket, orginal_object)
header_map = convert_header2map(res.getheaders())
print "ETag is %s " % safe_get_element("etag", header_map)
def cmd_upload(args, options):
check_args(3, args)
localfile = args[1]
check_localfile(localfile)
multipart_threshold = 100*1024*1024
if options.multipart_threshold:
multipart_threshold = (int)(options.multipart_threshold)
localfile_size = os.path.getsize(localfile)
if localfile_size > multipart_threshold or localfile_size > MAX_OBJECT_SIZE:
return cmd_multi_upload(args, options)
return cmd_put(args, options)
def cmd_upload_group(args, options):
check_args(3, args)
localfile = args[1]
check_localfile(localfile)
#user specified objectname oss://bucket/[path]/object
(bucket, object) = parse_bucket_object(args[2])
if len(object) == 0:
# e.g. upload to oss://bucket/
object = os.path.basename(localfile)
elif object.endswith("/"):
#e.g. uplod to oss://bucket/a/b/
object += os.path.basename(localfile)
headers = {}
content_type = ''
if options.headers:
headers = to_http_headers(options.headers)
if options.content_type:
content_type = options.content_type
headers['Content-Type'] = content_type
thread_num = 10
if options.thread_num:
thread_num = (int)(options.thread_num)
max_part_num = 1000
if options.max_part_num:
max_part_num = (int)(options.max_part_num)
retry_times = MAX_RETRY_TIMES
if options.retry_times:
retry_times = (int)(options.retry_times)
oss = get_oss()
oss.set_retry_times(retry_times)
res = oss.upload_large_file(bucket, object, localfile, thread_num, max_part_num, headers)
if res.status == 200:
print_url(OSS_HOST, bucket, object, res)
return res
def cmd_multi_upload(args, options):
check_args(3, args)
localfile = args[1]
check_localfile(localfile)
#user specified objectname oss://bucket/[path]/object
(bucket, object) = parse_bucket_object(args[2])
is_check_md5 = False
if len(object) == 0:
# e.g. upload to oss://bucket/
object = os.path.basename(localfile)
elif object.endswith("/"):
#e.g. uplod to oss://bucket/a/b/
object += os.path.basename(localfile)
headers = {}
if options.headers:
headers = to_http_headers(options.headers)
thread_num = 10
if options.thread_num:
thread_num = (int)(options.thread_num)
max_part_num = 1000
if options.max_part_num:
max_part_num = (int)(options.max_part_num)
retry_times = MAX_RETRY_TIMES
if options.retry_times:
retry_times = (int)(options.retry_times)
if options.check_md5:
if options.check_md5.lower() == "true":
is_check_md5 = True
md5string, base64md5 = get_file_md5(localfile)
headers["x-oss-meta-md5"] = md5string
oss = get_oss()
oss.set_retry_times(retry_times)
upload_id = ""
if options.upload_id:
upload_id = options.upload_id
res = oss.get_all_parts(bucket, object, upload_id, max_parts=1)
if res.status != 200:
return res
if not upload_id:
upload_ids = []
upload_ids = get_upload_id_list(oss, bucket, object)
if upload_ids:
upload_ids = sorted(upload_ids)
upload_id = upload_ids[0]
res = oss.multi_upload_file(bucket, object, localfile, upload_id, thread_num, max_part_num, headers, debug=True, check_md5=is_check_md5)
if res.status == 200:
print_url(OSS_HOST, bucket, object, res)
return res
def cmd_copy(args, options):
check_args(3, args)
(bucket_source, object_source) = parse_bucket_object(args[1])
check_bucket_object(bucket_source, object_source)
(bucket, object) = parse_bucket_object(args[2])
check_bucket_object(bucket, object)
content_type = ""
headers = {}
if options.headers:
headers = to_http_headers(options.headers)
if options.content_type:
content_type = options.content_type
headers['Content-Type'] = content_type
res = get_oss().copy_object(bucket_source, object_source, bucket, object, headers)
if res.status == 200:
print_url(OSS_HOST, bucket, object, res)
return res
def cmd_upload_part_copy(args, options):
check_args(3, args)
(bucket_source, object_source) = parse_bucket_object(args[1])
check_bucket_object(bucket_source, object_source)
(bucket, object) = parse_bucket_object(args[2])
check_bucket_object(bucket, object)
#head object to get object size
headers = {}
res = get_oss().head_object(bucket_source, object_source, headers = headers)
if res.status != 200:
print 'copy large file fail because head object fail, status:%s' %(res.status)
sys.exit(-1)
content_len = (int)(res.getheader('Content-Length'))
etag = res.getheader('ETag')
#get part size
default_part_size = 10 * 1024 * 1024
part_size = default_part_size
max_part_num=10000
min_part_size = 5 * 1024 * 1024
if options.part_size:
part_size = (int)(eval(options.part_size))
if part_size < min_part_size:
print 'part size too small, change part size to %s' %(default_part_size)
part_size = default_part_size
if part_size * max_part_num < content_len:
part_size = (content_len + max_part_num - content_len % max_part_num) / max_part_num
print 'part num more than max part num %s, change part size to %s' %(max_part_num, part_size)
if content_len % part_size:
part_size_list = [part_size] * (content_len / part_size) + [ content_len % part_size]
else:
part_size_list = [part_size] * (content_len / part_size)
#get upload id
if options.upload_id:
upload_id = options.upload_id
else:
res = get_oss().init_multi_upload(bucket, object)
if res.status != 200:
print 'copy large file fail because init multipart upload fail, status:%s' %(res.status)
sys.exit(-1)
upload_id = GetInitUploadIdXml(res.read()).upload_id
#upload part copy
start = 0
part_number = 1
for part_size in part_size_list:
headers = {'x-oss-copy-source-range': ('bytes=%d-%d' % (start, start + part_size-1))}
headers['x-oss-copy-source-if-match'] = etag
res = get_oss().copy_object_as_part(bucket_source, object_source, bucket, object, upload_id, part_number, headers)
if res.status != 200:
print 'copy large file fail because upload part copy fail, status:%s, upload_id:%s' %(res.status, upload_id)
sys.exit(-1)
start += part_size
part_number += 1
#complete multipart upload
part_xml = get_part_xml(get_oss(), bucket, object, upload_id)
res = get_oss().complete_upload(bucket, object, upload_id, part_xml)
if res.status != 200:
print 'copy large file fail because complete multipart upload fail, status:%s, upload_id:%s' %(res.status, upload_id)
sys.exit(-1)
else:
print_url(OSS_HOST, bucket, object, res)
return res
def copy_object(src_bucket, src_object, des_bucket, des_object, headers, replace, retry_times = 3):
global COPY_OK
if COPY_OK > 0 and COPY_OK % 100 == 0:
print "%s objects are copied OK, marker is:%s" % (COPY_OK, src_object)
for i in xrange(retry_times):
tmp_headers = headers.copy()
try:
if replace:
res = get_oss().copy_object(src_bucket, src_object, des_bucket, des_object, tmp_headers)
if res.status == 200:
COPY_OK += 1
return True
else:
print "copy /%s/%s to /%s/%s FAIL, status:%s, request-id:%s" % \
(src_bucket, src_object, des_bucket, des_object, res.status, res.getheader("x-oss-request-id"))
else:
res = get_oss().head_object(des_bucket, des_object)
if res.status == 200:
COPY_OK += 1
return True
elif res.status == 404:
res = get_oss().copy_object(src_bucket, src_object, des_bucket, des_object, tmp_headers)
if res.status == 200:
COPY_OK += 1
return True
else:
print "copy /%s/%s to /%s/%s FAIL, status:%s, request-id:%s" % \
(src_bucket, src_object, des_bucket, des_object, res.status, res.getheader("x-oss-request-id"))
except:
print "copy /%s/%s to /%s/%s exception" % (src_bucket, src_object, des_bucket, des_object)
print sys.exc_info()[0], sys.exc_info()[1]
try:
res = get_oss().head_object(src_bucket, src_object)
if res.status == 200:
length = (int)(res.getheader('content-length'))
max_length = 1*1024*1024*1024
if length > max_length:
print "/%s/%s is bigger than %s, copy may fail. skip this one." \
% (src_bucket, src_object, max_length)
print "please use get command to download the object and then use multiupload command to upload the object."
return False
except:
print sys.exc_info()[0], sys.exc_info()[1]
pass
sleep_time = 300
print "sleep %s" % sleep_time
time.sleep(sleep_time)
print "copy /%s/%s to /%s/%s FAIL" % (src_bucket, src_object, des_bucket, des_object)
return False
class CopyObjectWorker(threading.Thread):
def __init__(self, retry_times, queue):
threading.Thread.__init__(self)
self.queue = queue
self.retry_times = retry_times
def run(self):
while 1:
try:
(copy_object, src_bucket, src_object, des_bucket, des_object, replace, headers) = self.queue.get(block=False)
copy_object(src_bucket, src_object, des_bucket, des_object, headers, replace, self.retry_times)
self.queue.task_done()
except Queue.Empty:
break
except:
self.queue.task_done()
def cmd_copy_bucket(args, options):
check_args(3, args)
(src_bucket, src_prefix) = parse_bucket_object(args[1])
(des_bucket, des_prefix) = parse_bucket_object(args[2])
if des_prefix and not des_prefix.endswith("/"):
des_prefix = "%s/" % des_prefix
thread_num = 5
if options.thread_num:
thread_num = (int)(options.thread_num)
retry_times = MAX_RETRY_TIMES
if options.retry_times:
retry_times = (int)(options.retry_times)
replace = False
if options.replace is not None and options.replace.lower() == "true":
replace = True
marker = ""
if options.marker:
marker = options.marker
headers = {}
if options.headers:
headers = to_http_headers(options.headers)
delimiter = ''
maxkeys = '1000'
handled_obj_num = 0
while 1:
queue = Queue.Queue(0)
res = get_oss().get_bucket(src_bucket, src_prefix, marker, delimiter, maxkeys)
if res.status != 200:
return res
body = res.read()
(tmp_object_list, marker) = get_object_list_marker_from_xml(body)
for i in tmp_object_list:
object = i[0]
length = i[1]
last_modify_time = i[2]
if str(length) == "0" and object.endswith("/"):
continue
handled_obj_num += 1
src_object = smart_code(object)
tmp_object = src_object
if src_prefix.endswith("/"):
if src_prefix == object[:len(src_prefix)]:
tmp_object = object[len(src_prefix):]
while 1:
if not tmp_object.startswith("/"):
break
tmp_object = tmp_object[1:]
if des_prefix:
des_object = "%s%s" % (des_prefix, tmp_object)
else:
des_object = tmp_object
queue.put((copy_object, src_bucket, src_object, des_bucket, des_object, replace, headers))
#copy_object(src_bucket, src_object, des_bucket, des_object, replace)
thread_pool = []<|fim▁hole|> for i in xrange(thread_num):
current = CopyObjectWorker(retry_times, queue)
thread_pool.append(current)
current.start()
queue.join()
for item in thread_pool:
item.join()
if len(marker) == 0:
break
print "Total being copied objects num: %s, from /%s/%s to /%s/%s" % \
(handled_obj_num, src_bucket, src_prefix, des_bucket, des_prefix)
global COPY_OK
print "OK num:%s" % COPY_OK
print "FAIL num:%s" % (handled_obj_num - COPY_OK)
def continue_get(bucket, object, localfile, headers=None, retry_times=3):
length = -1
local_length = -2
tmp_headers = {}
header_map = {}
if headers:
tmp_headers = headers.copy()
try:
res = get_oss().head_object(bucket, object, tmp_headers)
if 200 == res.status:
length = (int)(res.getheader('content-length'))
header_map = convert_header2map(res.getheaders())
else:
print "can not get the length of object:", object
return False
except:
print sys.exc_info()[0], sys.exc_info()[1]
return False
endpos = length - 1
for i in xrange(retry_times):
curpos = 0
range_info = 'bytes=%d-%d' % (curpos, endpos)
if os.path.isfile(localfile):
local_length = os.path.getsize(localfile)
if i == 0 and header_map.has_key('x-oss-meta-md5'):
oss_md5_string = header_map['x-oss-meta-md5']
local_md5_string, base64_md5 = get_file_md5(localfile)
if local_md5_string.lower() == oss_md5_string.lower():
return True
else:
os.remove(localfile)
elif local_length == length:
#print "localfile:%s exists and length is equal. please check if it is ok. you can remove it first and download again." % localfile
return True
elif local_length < length:
if i == 0:
os.remove(localfile)
else:
curpos = local_length
range_info = 'bytes=%d-%d' % (curpos, endpos)
print "localfile:%s exists and length is:%s, continue to download. range:%s." % (localfile, local_length, range_info)
else:
os.remove(localfile)
file = open(localfile, "ab+")
tmp_headers = {}
if headers:
tmp_headers = headers.copy()
tmp_headers['Range'] = range_info
file.seek(curpos)
is_read_ok = False
oss_md5_string = ''
try:
res = get_oss().get_object(bucket, object, tmp_headers)
if res.status/100 == 2:
header_map = convert_header2map(res.getheaders())
if header_map.has_key('x-oss-meta-md5'):
oss_md5_string = header_map['x-oss-meta-md5']
while True:
content = res.read(RECV_BUF_SIZE)
if content:
file.write(content)
curpos += len(content)
else:
break
is_read_ok = True
else:
print "range get /%s/%s [%s] ret:%s, request-id:%s" % (bucket, object, range_info, res.status, res.getheader("x-oss-request-id"))
except:
print "range get /%s/%s [%s] exception" % (bucket, object, range_info)
print sys.exc_info()[0], sys.exc_info()[1]
file.flush()
file.close()
file_opened = False
continue
file.flush()
file.close()
if os.path.isfile(localfile):
local_length = os.path.getsize(localfile)
if is_read_ok and length == local_length:
if oss_md5_string != '':
md5string, base64md5 = get_file_md5(localfile)
if md5string.lower() != oss_md5_string.lower():
print "The object %s is download to %s failed. file md5 is incorrect." % (object, localfile)
return False
return True
else:
print "The object %s is download to %s failed. file length is incorrect.length is:%s local_length:%s" % (object, localfile, length, local_length)
return False
def cmd_get(args, options):
check_args(3, args)
(bucket, object) = parse_bucket_object(args[1])
check_bucket_object(bucket, object)
localfile = args[2]
localfile = smart_code(localfile)
headers = {}
if options.headers:
headers = to_http_headers(options.headers)
if options.continue_download:
retry_times = 3
res = continue_get(bucket, object, localfile, headers, retry_times)
else:
tmp_headers = {}
tmp_headers = headers.copy()
res = get_oss().get_object_to_file(bucket, object, localfile, headers=tmp_headers)
if res.status/100 == 2:
header_map = convert_header2map(res.getheaders())
if header_map.has_key('x-oss-meta-md5'):
oss_md5string = header_map['x-oss-meta-md5']
md5string, base64md5 = get_file_md5(localfile)
if md5string.lower() != oss_md5string.lower():
print "The object %s is download to %s failed. file md5 is incorrect." % (object, localfile)
sys.exit(1)
else:
content_length = int(header_map['content-length'])
local_length = os.path.getsize(localfile)
if content_length != local_length:
print "The object %s is download to %s failed. file length is incorrect." % (object, localfile)
sys.exit(1)
else:
return res
if res:
print "The object %s is downloaded to %s, please check." % (object, localfile)
return res
def cmd_multi_get(args, options):
check_args(3, args)
(bucket, object) = parse_bucket_object(args[1])
check_bucket_object(bucket, object)
localfile = args[2]
localfile = smart_code(localfile)
thread_num = 5
if options.thread_num:
thread_num = (int)(options.thread_num)
retry_times = MAX_RETRY_TIMES
if options.retry_times:
retry_times = (int)(options.retry_times)
show_bar = False
oss = get_oss(show_bar)
ret = multi_get(oss, bucket, object, localfile, thread_num, retry_times)
if ret:
print "The object %s is downloaded to %s, please check." % (object, localfile)
else:
print "Download object:%s failed!" % (object)
exit(-1)
def cmd_cat(args, options):
check_args(2, args)
(bucket, object) = parse_bucket_object(args[1])
check_bucket_object(bucket, object)
res = get_oss().get_object(bucket, object)
if res.status == 200:
data = ""
while 1:
data = res.read(10240)
if len(data) != 0:
print data
else:
break
return res
def cmd_meta(args, options):
check_args(2, args)
(bucket, object) = parse_bucket_object(args[1])
check_bucket_object(bucket, object)
headers = {}
res = get_oss().head_object(bucket, object, headers = headers)
if res.status == 200:
header_map = convert_header2map(res.getheaders())
width = 16
print "%s: %s" % ("objectname".ljust(width), object)
for key, value in header_map.items():
print "%s: %s" % (key.ljust(width), value)
return res
def cmd_info(args, options):
check_args(2, args)
(bucket, object) = parse_bucket_object(args[1])
check_bucket_object(bucket, object)
res = get_oss().get_object_info(bucket, object)
if res.status == 200:
print res.read()
return res
def cmd_delete(args, options):
check_args(2, args)
(bucket, object) = parse_bucket_object(args[1])
check_bucket_object(bucket, object)
return get_oss().delete_object(bucket, object)
def cmd_cancel(args, options):
check_args(2, args)
(bucket, object) = parse_bucket_object(args[1])
if options.upload_id is None:
print "upload_id invalid, please set with --upload_id=xxx"
sys.exit(1)
(bucket, object) = parse_bucket_object(args[1])
check_bucket_object(bucket, object)
return get_oss().cancel_upload(bucket, object, options.upload_id)
def cmd_sign_url(args, options):
check_args(2, args)
(bucket, object) = parse_bucket_object(args[1])
if options.timeout:
timeout = options.timeout
else:
timeout = "600"
print "timeout is %s seconds." % timeout
(bucket, object) = parse_bucket_object(args[1])
check_bucket_object(bucket, object)
method = 'GET'
print get_oss().sign_url(method, bucket, object, int(timeout))
def cmd_configure(args, options):
if options.accessid is None or options.accesskey is None:
print "%s miss parameters, use --accessid=[accessid] --accesskey=[accesskey] to specify id/key pair" % args[0]
sys.exit(-1)
config = ConfigParser.RawConfigParser()
config.add_section(CONFIGSECTION)
if options.host is not None:
config.set(CONFIGSECTION, 'host', options.host)
config.set(CONFIGSECTION, 'accessid', options.accessid)
config.set(CONFIGSECTION, 'accesskey', options.accesskey)
if options.sts_token:
config.set(CONFIGSECTION, 'sts_token', options.sts_token)
cfgfile = open(CONFIGFILE, 'w+')
config.write(cfgfile)
print "Your configuration is saved into %s ." % CONFIGFILE
cfgfile.close()
import stat
os.chmod(CONFIGFILE, stat.S_IREAD | stat.S_IWRITE)
def cmd_help(args, options):
print HELP
def cmd_create_link(args, options):
check_args(3, args)
(bucket, object) = parse_bucket_object(args[1])
check_bucket_object(bucket, object)
object_list = args[2:]
return get_oss().create_link_from_list(bucket, object, object_list)
def cmd_create_link_from_file(args, options):
check_args(3, args)
(bucket, object) = parse_bucket_object(args[1])
check_bucket_object(bucket, object)
local_file = args[2]
if not os.path.isfile(local_file):
print "no such file:%s" % local_file
exit(-1)
f = open(local_file)
object_list = f.readlines()
f.close()
return get_oss().create_link_from_list(bucket, object, object_list)
def cmd_get_link_index(args, options):
check_args(2, args)
(bucket, object) = parse_bucket_object(args[1])
check_bucket_object(bucket, object)
res = get_oss().get_link_index(bucket, object)
if res.status == 200:
print res.read()
return res
def cmd_create_group_from_file(args, options):
check_args(3, args)
(bucket, object) = parse_bucket_object(args[1])
check_bucket_object(bucket, object)
local_file = args[2]
if not os.path.isfile(local_file):
print "no such file:%s" % local_file
exit(-1)
f = open(local_file)
object_list = f.readlines()
f.close()
part_msg_list = []
for i in range(len(object_list)):
object_list[i] = object_list[i].rstrip('\n')
res = get_oss().head_object(bucket, object_list[i])
if res.status != 200:
print "head object: ", object_list[i], ", ", res.status
print 'Create Group Fail!'
return res
header_map = convert_header2map(res.getheaders())
etag = safe_get_element("etag", header_map)
etag = etag.replace("\"", "")
list = [str(i), object_list[i], etag]
part_msg_list.append(list)
object_group_msg_xml = create_object_group_msg_xml(part_msg_list)
return get_oss().post_object_group(bucket, object, object_group_msg_xml)
def cmd_create_group(args, options):
check_args(3, args)
(bucket, object) = parse_bucket_object(args[1])
check_bucket_object(bucket, object)
object_list = args[2:]
part_msg_list = []
for i in range(len(object_list)):
res = get_oss().head_object(bucket, object_list[i])
if res.status != 200:
print "head object: ", object_list[i], ", ", res.status
print 'Create Group Fail!'
return res
header_map = convert_header2map(res.getheaders())
etag = safe_get_element("etag", header_map)
etag = etag.replace("\"", "")
list = [str(i), object_list[i], etag]
part_msg_list.append(list)
object_group_msg_xml = create_object_group_msg_xml(part_msg_list)
return get_oss().post_object_group(bucket, object, object_group_msg_xml)
def cmd_get_group_index(args, options):
check_args(2, args)
(bucket, object) = parse_bucket_object(args[1])
check_bucket_object(bucket, object)
res = get_oss().get_object_group_index(bucket, object)
if res.status == 200:
print res.read()
return res
def cmd_put_bucket_logging(args, options):
source_bucket = ''
target_bucket = ''
prefix = ''
check_args(2, args)
if len(args) >= 3:
target_bucket = args[2]
(target_bucket, prefix) = parse_bucket_object(args[2])
source_bucket = parse_bucket(args[1])
target_bucket = parse_bucket(args[2])
res = get_oss().put_logging(source_bucket, target_bucket, prefix)
return res
def cmd_get_bucket_logging(args, options):
check_args(2, args)
bucket = parse_bucket(args[1])
res = get_oss().get_logging(bucket)
if res.status == 200:
print res.read()
return res
def cmd_put_bucket_website(args, options):
bucket = ''
indexfile = ''
errorfile = ''
check_args(3, args)
if len(args) >= 3:
indexfile = args[2]
if len(args) >= 4:
errorfile = args[3]
bucket = parse_bucket(args[1])
res = get_oss().put_website(bucket, indexfile, errorfile)
return res
def cmd_get_bucket_website(args, options):
check_args(2, args)
bucket = parse_bucket(args[1])
res = get_oss().get_website(bucket)
if res.status == 200:
print res.read()
return res
def cmd_delete_bucket_website(args, options):
check_args(2, args)
bucket = parse_bucket(args[1])
res = get_oss().delete_website(bucket)
return res
def cmd_delete_bucket_logging(args, options):
check_args(2, args)
bucket = parse_bucket(args[1])
res = get_oss().delete_logging(bucket)
return res
def cmd_put_bucket_cors(args, options):
check_args(3, args)
bucket = parse_bucket(args[1])
local_file = args[2]
if not os.path.isfile(local_file):
print "no such file:%s" % local_file
exit(-1)
f = open(local_file)
content = f.read()
f.close()
return get_oss().put_cors(bucket, content)
def cmd_get_bucket_cors(args, options):
check_args(2, args)
bucket = parse_bucket(args[1])
res = get_oss().get_cors(bucket)
if res.status == 200:
print res.read()
return res
def cmd_delete_bucket_cors(args, options):
check_args(2, args)
bucket = parse_bucket(args[1])
res = get_oss().delete_cors(bucket)
return res
def cmd_options(args, options):
check_args(2, args)
(bucket, object) = parse_bucket_object(args[1])
headers = {}
is_ok = True
if options.origin:
headers['Origin'] = options.origin
else:
is_ok = False
method_list = ["GET", "PUT", "DELETE", "HEAD", "POST"]
if options.method:
if options.method not in method_list:
is_ok = False
else:
headers['Access-Control-Request-Method'] = options.method
else:
is_ok = False
if not is_ok:
print "please set origin and method with --origin=xxx --method=xxx, the value of --method SHOULD be one of %s" % (" ".join(method_list))
exit(-1)
res = get_oss().options(bucket, object, headers)
return res
def cmd_put_bucket_lifecycle(args, options):
check_args(3, args)
bucket = parse_bucket(args[1])
local_file = args[2]
if not os.path.isfile(local_file):
print "no such file:%s" % local_file
exit(-1)
f = open(local_file)
lifecycle_config = f.read()
f.close()
res = get_oss().put_lifecycle(bucket, lifecycle_config)
return res
def cmd_get_bucket_lifecycle(args, options):
check_args(2, args)
bucket = parse_bucket(args[1])
res = get_oss().get_lifecycle(bucket)
if res.status == 200:
print res.read()
return res
def cmd_put_bucket_referer(args, options):
check_args(2, args)
bucket = parse_bucket(args[1])
allow_empty_referer = True
if options.allow_empty_referer and options.allow_empty_referer.lower() == "false":
allow_empty_referer = False
referer_list = []
if options.referer:
referer_list = options.referer.split(",")
res = get_oss().put_referer(bucket, allow_empty_referer, referer_list)
return res
def cmd_get_bucket_referer(args, options):
check_args(2, args)
bucket = parse_bucket(args[1])
res = get_oss().get_referer(bucket)
if res.status == 200:
print res.read()
return res
def cmd_delete_bucket_lifecycle(args, options):
check_args(2, args)
bucket = parse_bucket(args[1])
res = get_oss().delete_lifecycle(bucket)
return res
def get_oss(show_bar = True):
oss = OssAPI(OSS_HOST, ID, KEY, sts_token=STS_TOKEN)
oss.show_bar = show_bar
oss.set_send_buf_size(SEND_BUF_SIZE)
oss.set_recv_buf_size(RECV_BUF_SIZE)
oss.set_debug(IS_DEBUG)
return oss
def setup_credentials(options):
config = ConfigParser.ConfigParser()
try:
config.read(CONFIGFILE)
global OSS_HOST
global ID
global KEY
global STS_TOKEN
try:
OSS_HOST = config.get(CONFIGSECTION, 'host')
except Exception:
OSS_HOST = DEFAUL_HOST
ID = config.get(CONFIGSECTION, 'accessid')
KEY = config.get(CONFIGSECTION, 'accesskey')
try:
STS_TOKEN = config.get(CONFIGSECTION, 'sts_token')
except:
pass
if options.accessid is not None:
ID = options.accessid
if options.accesskey is not None:
KEY = options.accesskey
if options.sts_token is not None:
STS_TOKEN = options.sts_token
if options.host is not None:
OSS_HOST = options.host
except Exception:
if options.accessid is not None:
ID = options.accessid
if options.accesskey is not None:
KEY = options.accesskey
if options.sts_token is not None:
STS_TOKEN = options.sts_token
if options.host is not None:
OSS_HOST = options.host
if len(ID) == 0 or len(KEY) == 0:
print "can't get accessid/accesskey, setup use : config --accessid=accessid --accesskey=accesskey"
sys.exit(1)
def setup_cmdlist():
CMD_LIST['GetAllBucket'] = cmd_getallbucket
CMD_LIST['CreateBucket'] = cmd_createbucket
CMD_LIST['DeleteBucket'] = cmd_deletebucket
CMD_LIST['DeleteWholeBucket'] = cmd_deletewholebucket
CMD_LIST['DeleteByFile'] = cmd_deletebyfile
CMD_LIST['GetBucketLocation'] = cmd_getbucketlocation
CMD_LIST['GetAcl'] = cmd_getacl
CMD_LIST['SetAcl'] = cmd_setacl
CMD_LIST['List'] = cmd_listing
CMD_LIST['MkDir'] = cmd_mkdir
CMD_LIST['Init'] = cmd_init_upload
CMD_LIST['UploadPartFromString'] = cmd_upload_part_from_string
CMD_LIST['UploadPartFromFile'] = cmd_upload_part_from_file
CMD_LIST['ListPart'] = cmd_listpart
CMD_LIST['ListParts'] = cmd_listparts
CMD_LIST['GetAllPartSize'] = cmd_getallpartsize
CMD_LIST['ListAllObject'] = cmd_listallobject
CMD_LIST['ListAllDir'] = cmd_listalldir
CMD_LIST['DownloadAllObject'] = cmd_downloadallobject
CMD_LIST['UploadObjectFromLocalDir'] = cmd_upload_object_from_localdir
CMD_LIST['UploadDisk'] = cmd_upload_disk
CMD_LIST['DeleteAllObject'] = cmd_deleteallobject
CMD_LIST['Put'] = cmd_put
CMD_LIST['Copy'] = cmd_copy
CMD_LIST['CopyLargeFile'] = cmd_upload_part_copy
CMD_LIST['CopyBucket'] = cmd_copy_bucket
CMD_LIST['Upload'] = cmd_upload
CMD_LIST['UploadGroup'] = cmd_upload_group
CMD_LIST['MultiUpload'] = cmd_multi_upload
CMD_LIST['Get'] = cmd_get
CMD_LIST['MultiGet'] = cmd_multi_get
CMD_LIST['Cat'] = cmd_cat
CMD_LIST['Meta'] = cmd_meta
CMD_LIST['Info'] = cmd_info
CMD_LIST['Delete'] = cmd_delete
CMD_LIST['Cancel'] = cmd_cancel
CMD_LIST['Config'] = cmd_configure
CMD_LIST['Help'] = cmd_help
CMD_LIST['SignUrl'] = cmd_sign_url
CMD_LIST['CreateLink'] = cmd_create_link
CMD_LIST['CreateLinkFromFile'] = cmd_create_link_from_file
CMD_LIST['GetLinkIndex'] = cmd_get_link_index
CMD_LIST['CreateGroup'] = cmd_create_group
CMD_LIST['CreateGroupFromFile'] = cmd_create_group_from_file
CMD_LIST['GetGroupIndex'] = cmd_get_group_index
CMD_LIST['PutBucketLogging'] = cmd_put_bucket_logging
CMD_LIST['GetBucketLogging'] = cmd_get_bucket_logging
CMD_LIST['DeleteBucketLogging'] = cmd_delete_bucket_logging
CMD_LIST['PutBucketWebsite'] = cmd_put_bucket_website
CMD_LIST['GetBucketWebsite'] = cmd_get_bucket_website
CMD_LIST['DeleteBucketWebsite'] = cmd_delete_bucket_website
CMD_LIST['PutBucketCors'] = cmd_put_bucket_cors
CMD_LIST['GetBucketCors'] = cmd_get_bucket_cors
CMD_LIST['DeleteBucketCors'] = cmd_delete_bucket_cors
CMD_LIST['Options'] = cmd_options
CMD_LIST['PutBucketLifeCycle'] = cmd_put_bucket_lifecycle
CMD_LIST['GetBucketLifeCycle'] = cmd_get_bucket_lifecycle
CMD_LIST['DeleteBucketLifeCycle'] = cmd_delete_bucket_lifecycle
CMD_LIST['PutBucketReferer'] = cmd_put_bucket_referer
CMD_LIST['GetBucketReferer'] = cmd_get_bucket_referer
def getSuitableKeyValues(keyValues):
newMap = dict()
if keyValues is not None and isinstance(keyValues,dict):
keys = keyValues.keys()
for key in keys:
value = keyValues.get(key)
if value is not None and isinstance(value,list) and len(value)>0:
value = value[0]
newMap[key] = value
return newMap
def getParameterList():
parametersList = ['origin','sts_token', 'force', 'recv_buf_size', 'accesskey', 'part_size', 'retry_times',\
'replace', 'thread_num', 'marker', 'exclude','skip_dir', 'out', 'check_point', 'strip_dir',\
'check_md5','delimiter', 'skip_suffix', 'maxkeys', 'filename_list', 'location', 'temp_dir', \
'method', 'config_file', 'accessid', 'continue_download', 'allow_empty_referer','host',\
'referer', 'content_type', 'data', 'device_id', 'max_part_num', 'acl','headers',\
'part_number', 'upload_id', 'send_buf_size', 'timeout', 'debug', 'multipart_threshold']
return parametersList
def initKeyValues(parametersList):
newMap = dict.fromkeys(parametersList)
return newMap
def getParametersKV(keyValues,parameters):
if isinstance(keyValues,dict) and isinstance(parameters,dict):
keys = parameters.keys()
for item in keyValues:
if item in keys:
parameters[item] = keyValues[item]
return parameters
def getOptionsFromDict(parameters):
if isinstance(parameters,dict):
options = Values(parameters)
return options
def getOperations(operation):
list = []
if operation is not None:
list.append(operation)
return list
def getAvailableOperations():
setup_cmdlist()
return CMD_LIST.keys()
def handleOss():
parser = aliyunCliParser()
operation = parser._getOperations()
keyValues = parser._getKeyValues()
keyValues = parser.getOpenApiKeyValues(keyValues)
keyValues = getSuitableKeyValues(keyValues)
parameterList = getParameterList()
parameters = initKeyValues(parameterList)
parameters = getParametersKV(keyValues,parameters)
options = getOptionsFromDict(parameters)
args = operation
setup_cmdlist()
if args is None or len(args) < 1 or args[0] in HELP_CMD_LIST:
print HELP
sys.exit(1)
if args[0] not in CMD_LIST.keys():
print "unsupported command : %s " % args[0]
print HELP
sys.exit(1)
if options.config_file is not None:
CONFIGFILE = options.config_file
if options.debug is not None:
debug = options.debug
if debug.lower() == "true":
IS_DEBUG = True
else:
IS_DEBUG = False
if options.send_buf_size is not None:
try:
SEND_BUF_SIZE = (int)(options.send_buf_size)
except ValueError:
pass
if options.recv_buf_size is not None:
try:
RECV_BUF_SIZE = (int)(options.recv_buf_size)
except ValueError:
pass
if options.upload_id is not None:
check_upload_id(options.upload_id)
if args[0] != 'Config':
setup_credentials(options)
else:
CMD_LIST['Config'](args, options)
sys.exit(0)
cmd = args[0]
begin = time.time()
try:
res = CMD_LIST[cmd](args, options)
print_result(cmd, res)
except socket.timeout:
print "Socket timeout, please try again later."
sys.exit(1)
except socket.error, args:
print "Connect to oss failed: %s.\nplease check the host name you provided could be reached.\ne.g:" % (args)
print "\tcurl %s\nor\n\tping %s\n" % (OSS_HOST, OSS_HOST)
sys.exit(1)
end = time.time()
sys.stderr.write("%.3f(s) elapsed\n" % (end - begin))
if __name__ == '__main__':
handleOss()<|fim▁end|> | |
<|file_name|>test_commands.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
#
# Copyright © 2012 - 2014 Michal Čihař <[email protected]>
#
# This file is part of Weblate <http://weblate.org/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
Tests for management commands.
"""
from django.test import TestCase
from weblate.trans.tests.test_models import RepoTestCase
from weblate.trans.models import SubProject
from django.core.management import call_command
from django.core.management.base import CommandError
import django
# Django 1.5 changes behavior here
if django.VERSION >= (1, 5):
COMMAND_EXCEPTION = CommandError
else:
COMMAND_EXCEPTION = SystemExit
class ImportProjectTest(RepoTestCase):
def test_import(self):
project = self.create_project()
call_command(
'import_project',
'test',
self.repo_path,
'master',
'**/*.po',
)
# We should have loaded four subprojects
self.assertEqual(project.subproject_set.count(), 4)
def test_import_po(self):
project = self.create_project()
call_command(
'import_project',
'test',
self.repo_path,
'master',
'**/*.po',
file_format='po'
)
# We should have loaded four subprojects
self.assertEqual(project.subproject_set.count(), 4)
def test_import_invalid(self):
project = self.create_project()
self.assertRaises(
COMMAND_EXCEPTION,
call_command,
'import_project',
'test',
self.repo_path,
'master',
'**/*.po',
file_format='INVALID'
)
# We should have loaded none subprojects
self.assertEqual(project.subproject_set.count(), 0)
def test_import_aresource(self):
project = self.create_project()<|fim▁hole|> self.repo_path,
'master',
'**/values-*/strings.xml',
file_format='aresource',
base_file_template='android/values/strings.xml',
)
# We should have loaded one subproject
self.assertEqual(project.subproject_set.count(), 1)
def test_import_aresource_format(self):
project = self.create_project()
call_command(
'import_project',
'test',
self.repo_path,
'master',
'**/values-*/strings.xml',
file_format='aresource',
base_file_template='%s/values/strings.xml',
)
# We should have loaded one subproject
self.assertEqual(project.subproject_set.count(), 1)
def test_re_import(self):
project = self.create_project()
call_command(
'import_project',
'test',
self.repo_path,
'master',
'**/*.po',
)
# We should have loaded four subprojects
self.assertEqual(project.subproject_set.count(), 4)
call_command(
'import_project',
'test',
self.repo_path,
'master',
'**/*.po',
)
# We should load no more subprojects
self.assertEqual(project.subproject_set.count(), 4)
def test_import_against_existing(self):
'''
Test importing with a weblate:// URL
'''
android = self.create_android()
project = android.project
self.assertEqual(project.subproject_set.count(), 1)
call_command(
'import_project',
project.slug,
'weblate://%s/%s' % (project.slug, android.slug),
'master',
'**/*.po',
)
# We should have loaded five subprojects
self.assertEqual(project.subproject_set.count(), 5)
def test_import_missing_project(self):
'''
Test of correct handling of missing project.
'''
self.assertRaises(
COMMAND_EXCEPTION,
call_command,
'import_project',
'test',
self.repo_path,
'master',
'**/*.po',
)
def test_import_missing_wildcard(self):
'''
Test of correct handling of missing wildcard.
'''
self.create_project()
self.assertRaises(
COMMAND_EXCEPTION,
call_command,
'import_project',
'test',
self.repo_path,
'master',
'*/*.po',
)
class BasicCommandTest(TestCase):
def test_versions(self):
call_command('list_versions')
class PeriodicCommandTest(RepoTestCase):
def setUp(self):
super(PeriodicCommandTest, self).setUp()
self.create_subproject()
def test_cleanup(self):
call_command(
'cleanuptrans'
)
def test_update_index(self):
# Test the command
call_command(
'update_index'
)
def test_list_checks(self):
call_command(
'list_ignored_checks'
)
call_command(
'list_ignored_checks',
list_all=True
)
call_command(
'list_ignored_checks',
count=10
)
class CheckGitTest(RepoTestCase):
'''
Base class for handling tests of WeblateCommand
based commands.
'''
command_name = 'checkgit'
def setUp(self):
super(CheckGitTest, self).setUp()
self.create_subproject()
def do_test(self, *args, **kwargs):
call_command(
self.command_name,
*args,
**kwargs
)
def test_all(self):
self.do_test(
all=True,
)
def test_project(self):
self.do_test(
'test',
)
def test_subproject(self):
self.do_test(
'test/test',
)
def test_nonexisting_project(self):
self.assertRaises(
COMMAND_EXCEPTION,
self.do_test,
'notest',
)
def test_nonexisting_subproject(self):
self.assertRaises(
COMMAND_EXCEPTION,
self.do_test,
'test/notest',
)
class CommitPendingTest(CheckGitTest):
command_name = 'commit_pending'
class CommitGitTest(CheckGitTest):
command_name = 'commitgit'
class PushGitTest(CheckGitTest):
command_name = 'pushgit'
class LoadTest(CheckGitTest):
command_name = 'loadpo'
class UpdateChecksTest(CheckGitTest):
command_name = 'updatechecks'
class UpdateGitTest(CheckGitTest):
command_name = 'updategit'
class RebuildIndexTest(CheckGitTest):
command_name = 'rebuild_index'
def test_all_clean(self):
self.do_test(
all=True,
clean=True,
)
class LockTranslationTest(CheckGitTest):
command_name = 'lock_translation'
class UnLockTranslationTest(CheckGitTest):
command_name = 'unlock_translation'
class LockingCommandTest(RepoTestCase):
'''
Test locking and unlocking.
'''
def setUp(self):
super(LockingCommandTest, self).setUp()
self.create_subproject()
def test_locking(self):
subproject = SubProject.objects.all()[0]
self.assertFalse(
SubProject.objects.filter(locked=True).exists()
)
call_command(
'lock_translation',
'{0}/{1}'.format(
subproject.project.slug,
subproject.slug,
)
)
self.assertTrue(
SubProject.objects.filter(locked=True).exists()
)
call_command(
'unlock_translation',
'{0}/{1}'.format(
subproject.project.slug,
subproject.slug,
)
)
self.assertFalse(
SubProject.objects.filter(locked=True).exists()
)
class BenchmarkCommandTest(RepoTestCase):
'''
Benchmarking test.
'''
def setUp(self):
super(BenchmarkCommandTest, self).setUp()
self.create_subproject()
def test_benchmark(self):
call_command(
'benchmark', 'test', 'weblate://test/test', 'po/*.po'
)<|fim▁end|> | call_command(
'import_project',
'test', |
<|file_name|>abs-floor.js<|end_file_name|><|fim▁begin|>export default function absFloor(number) {
if (number < 0) {
// -0 -> 0<|fim▁hole|> return Math.ceil(number) || 0;
} else {
return Math.floor(number);
}
}<|fim▁end|> | |
<|file_name|>bitcoinrpc.cpp<|end_file_name|><|fim▁begin|>// Copyright (c) 2010 Satoshi Nakamoto
// Copyright (c) 2009-2012 The Bitcoin developers
// Distributed under the MIT/X11 software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include "init.h"
#include "util.h"
#include "sync.h"
#include "ui_interface.h"
#include "base58.h"
#include "bitcoinrpc.h"
#include "db.h"
#include <boost/asio.hpp>
#include <boost/asio/ip/v6_only.hpp>
#include <boost/bind.hpp>
#include <boost/filesystem.hpp>
#include <boost/foreach.hpp>
#include <boost/iostreams/concepts.hpp>
#include <boost/iostreams/stream.hpp>
#include <boost/algorithm/string.hpp>
#include <boost/lexical_cast.hpp>
#include <boost/asio/ssl.hpp>
#include <boost/filesystem/fstream.hpp>
#include <boost/shared_ptr.hpp>
#include <list>
using namespace std;
using namespace boost;
using namespace boost::asio;
using namespace json_spirit;
static std::string strRPCUserColonPass;
// These are created by StartRPCThreads, destroyed in StopRPCThreads
static asio::io_service* rpc_io_service = NULL;
static ssl::context* rpc_ssl_context = NULL;
static boost::thread_group* rpc_worker_group = NULL;
static inline unsigned short GetDefaultRPCPort()
{
return GetBoolArg("-testnet", false) ? 16540 : 16541;
}
Object JSONRPCError(int code, const string& message)
{
Object error;
error.push_back(Pair("code", code));
error.push_back(Pair("message", message));
return error;
}
void RPCTypeCheck(const Array& params,
const list<Value_type>& typesExpected,
bool fAllowNull)
{
unsigned int i = 0;
BOOST_FOREACH(Value_type t, typesExpected)
{
if (params.size() <= i)
break;
const Value& v = params[i];
if (!((v.type() == t) || (fAllowNull && (v.type() == null_type))))
{
string err = strprintf("Expected type %s, got %s",
Value_type_name[t], Value_type_name[v.type()]);
throw JSONRPCError(RPC_TYPE_ERROR, err);
}
i++;
}
}
void RPCTypeCheck(const Object& o,
const map<string, Value_type>& typesExpected,
bool fAllowNull)
{
BOOST_FOREACH(const PAIRTYPE(string, Value_type)& t, typesExpected)
{
const Value& v = find_value(o, t.first);
if (!fAllowNull && v.type() == null_type)
throw JSONRPCError(RPC_TYPE_ERROR, strprintf("Missing %s", t.first.c_str()));
if (!((v.type() == t.second) || (fAllowNull && (v.type() == null_type))))
{
string err = strprintf("Expected type %s for %s, got %s",
Value_type_name[t.second], t.first.c_str(), Value_type_name[v.type()]);
throw JSONRPCError(RPC_TYPE_ERROR, err);
}
}
}
int64 AmountFromValue(const Value& value)
{
double dAmount = value.get_real();
if (dAmount <= 0.0 || dAmount > 84000000.0)
throw JSONRPCError(RPC_TYPE_ERROR, "Invalid amount");
int64 nAmount = roundint64(dAmount * COIN);
if (!MoneyRange(nAmount))
throw JSONRPCError(RPC_TYPE_ERROR, "Invalid amount");
return nAmount;
}
Value ValueFromAmount(int64 amount)
{
return (double)amount / (double)COIN;
}
std::string HexBits(unsigned int nBits)
{
union {
int32_t nBits;
char cBits[4];
} uBits;
uBits.nBits = htonl((int32_t)nBits);
return HexStr(BEGIN(uBits.cBits), END(uBits.cBits));
}
///
/// Note: This interface may still be subject to change.
///
string CRPCTable::help(string strCommand) const
{
string strRet;
set<rpcfn_type> setDone;
for (map<string, const CRPCCommand*>::const_iterator mi = mapCommands.begin(); mi != mapCommands.end(); ++mi)
{
const CRPCCommand *pcmd = mi->second;
string strMethod = mi->first;
// We already filter duplicates, but these deprecated screw up the sort order
if (strMethod.find("label") != string::npos)
continue;
if (strCommand != "" && strMethod != strCommand)
continue;
if (pcmd->reqWallet && !pwalletMain)
continue;
try
{
Array params;
rpcfn_type pfn = pcmd->actor;
if (setDone.insert(pfn).second)
(*pfn)(params, true);
}
catch (std::exception& e)
{
// Help text is returned in an exception
string strHelp = string(e.what());
if (strCommand == "")
if (strHelp.find('\n') != string::npos)
strHelp = strHelp.substr(0, strHelp.find('\n'));
strRet += strHelp + "\n";
}
}
if (strRet == "")
strRet = strprintf("help: unknown command: %s\n", strCommand.c_str());
strRet = strRet.substr(0,strRet.size()-1);
return strRet;
}
Value help(const Array& params, bool fHelp)
{
if (fHelp || params.size() > 1)
throw runtime_error(
"help [command]\n"
"List commands, or get help for a command.");
string strCommand;
if (params.size() > 0)
strCommand = params[0].get_str();
return tableRPC.help(strCommand);
}
Value stop(const Array& params, bool fHelp)
{
// Accept the deprecated and ignored 'detach' boolean argument
if (fHelp || params.size() > 1)
throw runtime_error(
"stop\n"
"Stop TMCoin server.");
// Shutdown will take long enough that the response should get back
StartShutdown();
return "TMCoin server stopping";
}
//
// Call Table
//
static const CRPCCommand vRPCCommands[] =
{ // name actor (function) okSafeMode threadSafe reqWallet
// ------------------------ ----------------------- ---------- ---------- ---------
{ "help", &help, true, true, false },
{ "stop", &stop, true, true, false },
{ "getblockcount", &getblockcount, true, false, false },
{ "getbestblockhash", &getbestblockhash, true, false, false },
{ "getconnectioncount", &getconnectioncount, true, false, false },
{ "getpeerinfo", &getpeerinfo, true, false, false },
{ "addnode", &addnode, true, true, false },
{ "getaddednodeinfo", &getaddednodeinfo, true, true, false },
{ "getdifficulty", &getdifficulty, true, false, false },
{ "getnetworkhashps", &getnetworkhashps, true, false, false },
{ "getgenerate", &getgenerate, true, false, false },
{ "setgenerate", &setgenerate, true, false, true },
{ "gethashespersec", &gethashespersec, true, false, false },
{ "getinfo", &getinfo, true, false, false },
{ "getmininginfo", &getmininginfo, true, false, false },
{ "getnewaddress", &getnewaddress, true, false, true },
{ "getaccountaddress", &getaccountaddress, true, false, true },
{ "setaccount", &setaccount, true, false, true },
{ "getaccount", &getaccount, false, false, true },
{ "getaddressesbyaccount", &getaddressesbyaccount, true, false, true },
{ "sendtoaddress", &sendtoaddress, false, false, true },
{ "getreceivedbyaddress", &getreceivedbyaddress, false, false, true },
{ "getreceivedbyaccount", &getreceivedbyaccount, false, false, true },
{ "listreceivedbyaddress", &listreceivedbyaddress, false, false, true },
{ "listreceivedbyaccount", &listreceivedbyaccount, false, false, true },
{ "backupwallet", &backupwallet, true, false, true },
{ "keypoolrefill", &keypoolrefill, true, false, true },
{ "walletpassphrase", &walletpassphrase, true, false, true },
{ "walletpassphrasechange", &walletpassphrasechange, false, false, true },
{ "walletlock", &walletlock, true, false, true },
{ "encryptwallet", &encryptwallet, false, false, true },
{ "validateaddress", &validateaddress, true, false, false },
{ "getbalance", &getbalance, false, false, true },
{ "move", &movecmd, false, false, true },
{ "sendfrom", &sendfrom, false, false, true },
{ "sendmany", &sendmany, false, false, true },
{ "addmultisigaddress", &addmultisigaddress, false, false, true },
{ "createmultisig", &createmultisig, true, true , false },
{ "getrawmempool", &getrawmempool, true, false, false },
{ "getblock", &getblock, false, false, false },
{ "getblockhash", &getblockhash, false, false, false },
{ "gettransaction", &gettransaction, false, false, true },
{ "listtransactions", &listtransactions, false, false, true },
{ "listaddressgroupings", &listaddressgroupings, false, false, true },
{ "signmessage", &signmessage, false, false, true },
{ "verifymessage", &verifymessage, false, false, false },
{ "getwork", &getwork, true, false, true },
{ "getworkex", &getworkex, true, false, true },
{ "listaccounts", &listaccounts, false, false, true },
{ "settxfee", &settxfee, false, false, true },
{ "getblocktemplate", &getblocktemplate, true, false, false },
{ "submitblock", &submitblock, false, false, false },
{ "setmininput", &setmininput, false, false, false },
{ "listsinceblock", &listsinceblock, false, false, true },
{ "makekeypair", &makekeypair, true, false, true },
{ "dumpprivkey", &dumpprivkey, true, false, true },
{ "importprivkey", &importprivkey, false, false, true },
{ "listunspent", &listunspent, false, false, true },
{ "getrawtransaction", &getrawtransaction, false, false, false },
{ "createrawtransaction", &createrawtransaction, false, false, false },
{ "decoderawtransaction", &decoderawtransaction, false, false, false },
{ "signrawtransaction", &signrawtransaction, false, false, false },
{ "sendrawtransaction", &sendrawtransaction, false, false, false },
{ "gettxoutsetinfo", &gettxoutsetinfo, true, false, false },
{ "gettxout", &gettxout, true, false, false },
{ "lockunspent", &lockunspent, false, false, true },
{ "listlockunspent", &listlockunspent, false, false, true },
{ "verifychain", &verifychain, true, false, false },
};
CRPCTable::CRPCTable()
{
unsigned int vcidx;
for (vcidx = 0; vcidx < (sizeof(vRPCCommands) / sizeof(vRPCCommands[0])); vcidx++)
{
const CRPCCommand *pcmd;
pcmd = &vRPCCommands[vcidx];
mapCommands[pcmd->name] = pcmd;
}
}
const CRPCCommand *CRPCTable::operator[](string name) const
{
map<string, const CRPCCommand*>::const_iterator it = mapCommands.find(name);
if (it == mapCommands.end())
return NULL;
return (*it).second;
}
//
// HTTP protocol
//
// This ain't Apache. We're just using HTTP header for the length field
// and to be compatible with other JSON-RPC implementations.
//
string HTTPPost(const string& strMsg, const map<string,string>& mapRequestHeaders)
{
ostringstream s;
s << "POST / HTTP/1.1\r\n"
<< "User-Agent: tmcoin-json-rpc/" << FormatFullVersion() << "\r\n"
<< "Host: 127.0.0.1\r\n"
<< "Content-Type: application/json\r\n"
<< "Content-Length: " << strMsg.size() << "\r\n"
<< "Connection: close\r\n"
<< "Accept: application/json\r\n";
BOOST_FOREACH(const PAIRTYPE(string, string)& item, mapRequestHeaders)
s << item.first << ": " << item.second << "\r\n";
s << "\r\n" << strMsg;
return s.str();
}
string rfc1123Time()
{
char buffer[64];
time_t now;
time(&now);
struct tm* now_gmt = gmtime(&now);
string locale(setlocale(LC_TIME, NULL));
setlocale(LC_TIME, "C"); // we want POSIX (aka "C") weekday/month strings
strftime(buffer, sizeof(buffer), "%a, %d %b %Y %H:%M:%S +0000", now_gmt);
setlocale(LC_TIME, locale.c_str());
return string(buffer);
}
static string HTTPReply(int nStatus, const string& strMsg, bool keepalive)
{
if (nStatus == HTTP_UNAUTHORIZED)
return strprintf("HTTP/1.0 401 Authorization Required\r\n"
"Date: %s\r\n"
"Server: tmcoin-json-rpc/%s\r\n"
"WWW-Authenticate: Basic realm=\"jsonrpc\"\r\n"
"Content-Type: text/html\r\n"
"Content-Length: 296\r\n"
"\r\n"
"<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.01 Transitional//EN\"\r\n"
"\"http://www.w3.org/TR/1999/REC-html401-19991224/loose.dtd\">\r\n"
"<HTML>\r\n"
"<HEAD>\r\n"
"<TITLE>Error</TITLE>\r\n"
"<META HTTP-EQUIV='Content-Type' CONTENT='text/html; charset=ISO-8859-1'>\r\n"
"</HEAD>\r\n"
"<BODY><H1>401 Unauthorized.</H1></BODY>\r\n"
"</HTML>\r\n", rfc1123Time().c_str(), FormatFullVersion().c_str());
const char *cStatus;
if (nStatus == HTTP_OK) cStatus = "OK";
else if (nStatus == HTTP_BAD_REQUEST) cStatus = "Bad Request";
else if (nStatus == HTTP_FORBIDDEN) cStatus = "Forbidden";
else if (nStatus == HTTP_NOT_FOUND) cStatus = "Not Found";
else if (nStatus == HTTP_INTERNAL_SERVER_ERROR) cStatus = "Internal Server Error";
else cStatus = "";
return strprintf(
"HTTP/1.1 %d %s\r\n"
"Date: %s\r\n"
"Connection: %s\r\n"
"Content-Length: %"PRIszu"\r\n"
"Content-Type: application/json\r\n"
"Server: tmcoin-json-rpc/%s\r\n"
"\r\n"
"%s",
nStatus,
cStatus,
rfc1123Time().c_str(),
keepalive ? "keep-alive" : "close",
strMsg.size(),
FormatFullVersion().c_str(),
strMsg.c_str());
}
bool ReadHTTPRequestLine(std::basic_istream<char>& stream, int &proto,
string& http_method, string& http_uri)
{
string str;
getline(stream, str);
// HTTP request line is space-delimited
vector<string> vWords;
boost::split(vWords, str, boost::is_any_of(" "));
if (vWords.size() < 2)
return false;
// HTTP methods permitted: GET, POST
http_method = vWords[0];
if (http_method != "GET" && http_method != "POST")
return false;
// HTTP URI must be an absolute path, relative to current host
http_uri = vWords[1];
if (http_uri.size() == 0 || http_uri[0] != '/')
return false;
// parse proto, if present
string strProto = "";
if (vWords.size() > 2)
strProto = vWords[2];
proto = 0;
const char *ver = strstr(strProto.c_str(), "HTTP/1.");
if (ver != NULL)
proto = atoi(ver+7);
return true;
}
int ReadHTTPStatus(std::basic_istream<char>& stream, int &proto)
{
string str;
getline(stream, str);
vector<string> vWords;
boost::split(vWords, str, boost::is_any_of(" "));
if (vWords.size() < 2)
return HTTP_INTERNAL_SERVER_ERROR;
proto = 0;
const char *ver = strstr(str.c_str(), "HTTP/1.");
if (ver != NULL)
proto = atoi(ver+7);
return atoi(vWords[1].c_str());
}
int ReadHTTPHeaders(std::basic_istream<char>& stream, map<string, string>& mapHeadersRet)
{
int nLen = 0;
loop
{
string str;
std::getline(stream, str);
if (str.empty() || str == "\r")
break;
string::size_type nColon = str.find(":");
if (nColon != string::npos)
{
string strHeader = str.substr(0, nColon);
boost::trim(strHeader);
boost::to_lower(strHeader);
string strValue = str.substr(nColon+1);
boost::trim(strValue);
mapHeadersRet[strHeader] = strValue;
if (strHeader == "content-length")
nLen = atoi(strValue.c_str());
}
}
return nLen;
}
int ReadHTTPMessage(std::basic_istream<char>& stream, map<string,
string>& mapHeadersRet, string& strMessageRet,
int nProto)
{
mapHeadersRet.clear();
strMessageRet = "";
// Read header
int nLen = ReadHTTPHeaders(stream, mapHeadersRet);
if (nLen < 0 || nLen > (int)MAX_SIZE)
return HTTP_INTERNAL_SERVER_ERROR;
// Read message
if (nLen > 0)
{
vector<char> vch(nLen);
stream.read(&vch[0], nLen);
strMessageRet = string(vch.begin(), vch.end());
}
string sConHdr = mapHeadersRet["connection"];
if ((sConHdr != "close") && (sConHdr != "keep-alive"))
{
if (nProto >= 1)
mapHeadersRet["connection"] = "keep-alive";
else
mapHeadersRet["connection"] = "close";
}
return HTTP_OK;
}
bool HTTPAuthorized(map<string, string>& mapHeaders)
{
string strAuth = mapHeaders["authorization"];
if (strAuth.substr(0,6) != "Basic ")
return false;
string strUserPass64 = strAuth.substr(6); boost::trim(strUserPass64);
string strUserPass = DecodeBase64(strUserPass64);
return TimingResistantEqual(strUserPass, strRPCUserColonPass);
}
//
// JSON-RPC protocol. Bitcoin speaks version 1.0 for maximum compatibility,
// but uses JSON-RPC 1.1/2.0 standards for parts of the 1.0 standard that were
// unspecified (HTTP errors and contents of 'error').
//
// 1.0 spec: http://json-rpc.org/wiki/specification
// 1.2 spec: http://groups.google.com/group/json-rpc/web/json-rpc-over-http
// http://www.codeproject.com/KB/recipes/JSON_Spirit.aspx
//
string JSONRPCRequest(const string& strMethod, const Array& params, const Value& id)
{
Object request;
request.push_back(Pair("method", strMethod));
request.push_back(Pair("params", params));
request.push_back(Pair("id", id));
return write_string(Value(request), false) + "\n";
}
Object JSONRPCReplyObj(const Value& result, const Value& error, const Value& id)
{
Object reply;
if (error.type() != null_type)
reply.push_back(Pair("result", Value::null));
else
reply.push_back(Pair("result", result));
reply.push_back(Pair("error", error));
reply.push_back(Pair("id", id));
return reply;
}
string JSONRPCReply(const Value& result, const Value& error, const Value& id)
{
Object reply = JSONRPCReplyObj(result, error, id);
return write_string(Value(reply), false) + "\n";
}
void ErrorReply(std::ostream& stream, const Object& objError, const Value& id)
{
// Send error reply from json-rpc error object
int nStatus = HTTP_INTERNAL_SERVER_ERROR;
int code = find_value(objError, "code").get_int();
if (code == RPC_INVALID_REQUEST) nStatus = HTTP_BAD_REQUEST;
else if (code == RPC_METHOD_NOT_FOUND) nStatus = HTTP_NOT_FOUND;
string strReply = JSONRPCReply(Value::null, objError, id);
stream << HTTPReply(nStatus, strReply, false) << std::flush;
}
bool ClientAllowed(const boost::asio::ip::address& address)
{
// Make sure that IPv4-compatible and IPv4-mapped IPv6 addresses are treated as IPv4 addresses
if (address.is_v6()
&& (address.to_v6().is_v4_compatible()
|| address.to_v6().is_v4_mapped()))
return ClientAllowed(address.to_v6().to_v4());
if (address == asio::ip::address_v4::loopback()
|| address == asio::ip::address_v6::loopback()
|| (address.is_v4()
// Check whether IPv4 addresses match 127.0.0.0/8 (loopback subnet)
&& (address.to_v4().to_ulong() & 0xff000000) == 0x7f000000))
return true;
const string strAddress = address.to_string();
const vector<string>& vAllow = mapMultiArgs["-rpcallowip"];
BOOST_FOREACH(string strAllow, vAllow)
if (WildcardMatch(strAddress, strAllow))
return true;
return false;
}
//
// IOStream device that speaks SSL but can also speak non-SSL
//
template <typename Protocol>
class SSLIOStreamDevice : public iostreams::device<iostreams::bidirectional> {
public:
SSLIOStreamDevice(asio::ssl::stream<typename Protocol::socket> &streamIn, bool fUseSSLIn) : stream(streamIn)
{
fUseSSL = fUseSSLIn;
fNeedHandshake = fUseSSLIn;
}
void handshake(ssl::stream_base::handshake_type role)
{
if (!fNeedHandshake) return;
fNeedHandshake = false;
stream.handshake(role);
}
std::streamsize read(char* s, std::streamsize n)
{
handshake(ssl::stream_base::server); // HTTPS servers read first
if (fUseSSL) return stream.read_some(asio::buffer(s, n));
return stream.next_layer().read_some(asio::buffer(s, n));
}
std::streamsize write(const char* s, std::streamsize n)
{
handshake(ssl::stream_base::client); // HTTPS clients write first
if (fUseSSL) return asio::write(stream, asio::buffer(s, n));
return asio::write(stream.next_layer(), asio::buffer(s, n));
}
bool connect(const std::string& server, const std::string& port)
{
ip::tcp::resolver resolver(stream.get_io_service());
ip::tcp::resolver::query query(server.c_str(), port.c_str());
ip::tcp::resolver::iterator endpoint_iterator = resolver.resolve(query);
ip::tcp::resolver::iterator end;
boost::system::error_code error = asio::error::host_not_found;
while (error && endpoint_iterator != end)
{
stream.lowest_layer().close();
stream.lowest_layer().connect(*endpoint_iterator++, error);
}
if (error)
return false;
return true;
}
private:
bool fNeedHandshake;
bool fUseSSL;
asio::ssl::stream<typename Protocol::socket>& stream;
};
class AcceptedConnection
{
public:
virtual ~AcceptedConnection() {}
virtual std::iostream& stream() = 0;
virtual std::string peer_address_to_string() const = 0;
virtual void close() = 0;
};
template <typename Protocol>
class AcceptedConnectionImpl : public AcceptedConnection
{
public:
AcceptedConnectionImpl(
asio::io_service& io_service,
ssl::context &context,
bool fUseSSL) :
sslStream(io_service, context),
_d(sslStream, fUseSSL),
_stream(_d)
{
}
virtual std::iostream& stream()
{
return _stream;
}
virtual std::string peer_address_to_string() const
{
return peer.address().to_string();
}
virtual void close()
{
_stream.close();
}
typename Protocol::endpoint peer;
asio::ssl::stream<typename Protocol::socket> sslStream;
private:
SSLIOStreamDevice<Protocol> _d;
iostreams::stream< SSLIOStreamDevice<Protocol> > _stream;
};
void ServiceConnection(AcceptedConnection *conn);
// Forward declaration required for RPCListen
template <typename Protocol, typename SocketAcceptorService>
static void RPCAcceptHandler(boost::shared_ptr< basic_socket_acceptor<Protocol, SocketAcceptorService> > acceptor,
ssl::context& context,
bool fUseSSL,
AcceptedConnection* conn,
const boost::system::error_code& error);
/**
* Sets up I/O resources to accept and handle a new connection.
*/
template <typename Protocol, typename SocketAcceptorService>
static void RPCListen(boost::shared_ptr< basic_socket_acceptor<Protocol, SocketAcceptorService> > acceptor,
ssl::context& context,
const bool fUseSSL)
{
// Accept connection
AcceptedConnectionImpl<Protocol>* conn = new AcceptedConnectionImpl<Protocol>(acceptor->get_io_service(), context, fUseSSL);
acceptor->async_accept(
conn->sslStream.lowest_layer(),
conn->peer,
boost::bind(&RPCAcceptHandler<Protocol, SocketAcceptorService>,
acceptor,
boost::ref(context),
fUseSSL,
conn,
boost::asio::placeholders::error));
}
/**
* Accept and handle incoming connection.
*/
template <typename Protocol, typename SocketAcceptorService>
static void RPCAcceptHandler(boost::shared_ptr< basic_socket_acceptor<Protocol, SocketAcceptorService> > acceptor,
ssl::context& context,
const bool fUseSSL,
AcceptedConnection* conn,
const boost::system::error_code& error)
{
// Immediately start accepting new connections, except when we're cancelled or our socket is closed.
if (error != asio::error::operation_aborted && acceptor->is_open())
RPCListen(acceptor, context, fUseSSL);
AcceptedConnectionImpl<ip::tcp>* tcp_conn = dynamic_cast< AcceptedConnectionImpl<ip::tcp>* >(conn);
// TODO: Actually handle errors
if (error)
{
delete conn;
}
// Restrict callers by IP. It is important to
// do this before starting client thread, to filter out
// certain DoS and misbehaving clients.
else if (tcp_conn && !ClientAllowed(tcp_conn->peer.address()))
{
// Only send a 403 if we're not using SSL to prevent a DoS during the SSL handshake.
if (!fUseSSL)
conn->stream() << HTTPReply(HTTP_FORBIDDEN, "", false) << std::flush;
delete conn;
}
else {
ServiceConnection(conn);
conn->close();
delete conn;
}
}
void StartRPCThreads()
{
strRPCUserColonPass = mapArgs["-rpcuser"] + ":" + mapArgs["-rpcpassword"];
if ((mapArgs["-rpcpassword"] == "") ||
(mapArgs["-rpcuser"] == mapArgs["-rpcpassword"]))
{
unsigned char rand_pwd[32];
RAND_bytes(rand_pwd, 32);
string strWhatAmI = "To use tmcoind";
if (mapArgs.count("-server"))
strWhatAmI = strprintf(_("To use the %s option"), "\"-server\"");
else if (mapArgs.count("-daemon"))
strWhatAmI = strprintf(_("To use the %s option"), "\"-daemon\"");
uiInterface.ThreadSafeMessageBox(strprintf(
_("%s, you must set a rpcpassword in the configuration file:\n"
"%s\n"
"It is recommended you use the following random password:\n"
"rpcuser=tmcoinrpc\n"
"rpcpassword=%s\n"
"(you do not need to remember this password)\n"
"The username and password MUST NOT be the same.\n"
"If the file does not exist, create it with owner-readable-only file permissions.\n"
"It is also recommended to set alertnotify so you are notified of problems;\n"
"for example: alertnotify=echo %%s | mail -s \"TMCoin Alert\" [email protected]\n"),
strWhatAmI.c_str(),
GetConfigFile().string().c_str(),
EncodeBase58(&rand_pwd[0],&rand_pwd[0]+32).c_str()),
"", CClientUIInterface::MSG_ERROR);
StartShutdown();
return;
}
assert(rpc_io_service == NULL);
rpc_io_service = new asio::io_service();
rpc_ssl_context = new ssl::context(*rpc_io_service, ssl::context::sslv23);
const bool fUseSSL = GetBoolArg("-rpcssl");
if (fUseSSL)
{
rpc_ssl_context->set_options(ssl::context::no_sslv2);
filesystem::path pathCertFile(GetArg("-rpcsslcertificatechainfile", "server.cert"));
if (!pathCertFile.is_complete()) pathCertFile = filesystem::path(GetDataDir()) / pathCertFile;
if (filesystem::exists(pathCertFile)) rpc_ssl_context->use_certificate_chain_file(pathCertFile.string());
else printf("ThreadRPCServer ERROR: missing server certificate file %s\n", pathCertFile.string().c_str());
filesystem::path pathPKFile(GetArg("-rpcsslprivatekeyfile", "server.pem"));
if (!pathPKFile.is_complete()) pathPKFile = filesystem::path(GetDataDir()) / pathPKFile;
if (filesystem::exists(pathPKFile)) rpc_ssl_context->use_private_key_file(pathPKFile.string(), ssl::context::pem);
else printf("ThreadRPCServer ERROR: missing server private key file %s\n", pathPKFile.string().c_str());
string strCiphers = GetArg("-rpcsslciphers", "TLSv1+HIGH:!SSLv2:!aNULL:!eNULL:!AH:!3DES:@STRENGTH");
SSL_CTX_set_cipher_list(rpc_ssl_context->impl(), strCiphers.c_str());
}
// Try a dual IPv6/IPv4 socket, falling back to separate IPv4 and IPv6 sockets
const bool loopback = !mapArgs.count("-rpcallowip");
asio::ip::address bindAddress = loopback ? asio::ip::address_v6::loopback() : asio::ip::address_v6::any();
ip::tcp::endpoint endpoint(bindAddress, GetArg("-rpcport", GetDefaultRPCPort()));
boost::system::error_code v6_only_error;
boost::shared_ptr<ip::tcp::acceptor> acceptor(new ip::tcp::acceptor(*rpc_io_service));
bool fListening = false;
std::string strerr;
try
{
acceptor->open(endpoint.protocol());
acceptor->set_option(boost::asio::ip::tcp::acceptor::reuse_address(true));
// Try making the socket dual IPv6/IPv4 (if listening on the "any" address)
acceptor->set_option(boost::asio::ip::v6_only(loopback), v6_only_error);
acceptor->bind(endpoint);
acceptor->listen(socket_base::max_connections);
RPCListen(acceptor, *rpc_ssl_context, fUseSSL);
fListening = true;
}
catch(boost::system::system_error &e)
{
strerr = strprintf(_("An error occurred while setting up the RPC port %u for listening on IPv6, falling back to IPv4: %s"), endpoint.port(), e.what());
}
try {
// If dual IPv6/IPv4 failed (or we're opening loopback interfaces only), open IPv4 separately
if (!fListening || loopback || v6_only_error)
{
bindAddress = loopback ? asio::ip::address_v4::loopback() : asio::ip::address_v4::any();
endpoint.address(bindAddress);
acceptor.reset(new ip::tcp::acceptor(*rpc_io_service));
acceptor->open(endpoint.protocol());
acceptor->set_option(boost::asio::ip::tcp::acceptor::reuse_address(true));
acceptor->bind(endpoint);
acceptor->listen(socket_base::max_connections);
RPCListen(acceptor, *rpc_ssl_context, fUseSSL);
fListening = true;
}
}
catch(boost::system::system_error &e)
{
strerr = strprintf(_("An error occurred while setting up the RPC port %u for listening on IPv4: %s"), endpoint.port(), e.what());
}
if (!fListening) {
uiInterface.ThreadSafeMessageBox(strerr, "", CClientUIInterface::MSG_ERROR);
StartShutdown();
return;
}
rpc_worker_group = new boost::thread_group();
for (int i = 0; i < GetArg("-rpcthreads", 4); i++)
rpc_worker_group->create_thread(boost::bind(&asio::io_service::run, rpc_io_service));
}
void StopRPCThreads()
{
if (rpc_io_service == NULL) return;
rpc_io_service->stop();
rpc_worker_group->join_all();
delete rpc_worker_group; rpc_worker_group = NULL;
delete rpc_ssl_context; rpc_ssl_context = NULL;
delete rpc_io_service; rpc_io_service = NULL;
}
class JSONRequest
{
public:
Value id;
string strMethod;
Array params;
JSONRequest() { id = Value::null; }
void parse(const Value& valRequest);
};
void JSONRequest::parse(const Value& valRequest)
{
// Parse request
if (valRequest.type() != obj_type)
throw JSONRPCError(RPC_INVALID_REQUEST, "Invalid Request object");
const Object& request = valRequest.get_obj();
// Parse id now so errors from here on will have the id
id = find_value(request, "id");
// Parse method
Value valMethod = find_value(request, "method");
if (valMethod.type() == null_type)
throw JSONRPCError(RPC_INVALID_REQUEST, "Missing method");
if (valMethod.type() != str_type)
throw JSONRPCError(RPC_INVALID_REQUEST, "Method must be a string");
strMethod = valMethod.get_str();
if (strMethod != "getwork" && strMethod != "getworkex" && strMethod != "getblocktemplate")
printf("ThreadRPCServer method=%s\n", strMethod.c_str());
// Parse params
Value valParams = find_value(request, "params");
if (valParams.type() == array_type)
params = valParams.get_array();
else if (valParams.type() == null_type)
params = Array();
else
throw JSONRPCError(RPC_INVALID_REQUEST, "Params must be an array");
}
static Object JSONRPCExecOne(const Value& req)
{
Object rpc_result;<|fim▁hole|> jreq.parse(req);
Value result = tableRPC.execute(jreq.strMethod, jreq.params);
rpc_result = JSONRPCReplyObj(result, Value::null, jreq.id);
}
catch (Object& objError)
{
rpc_result = JSONRPCReplyObj(Value::null, objError, jreq.id);
}
catch (std::exception& e)
{
rpc_result = JSONRPCReplyObj(Value::null,
JSONRPCError(RPC_PARSE_ERROR, e.what()), jreq.id);
}
return rpc_result;
}
static string JSONRPCExecBatch(const Array& vReq)
{
Array ret;
for (unsigned int reqIdx = 0; reqIdx < vReq.size(); reqIdx++)
ret.push_back(JSONRPCExecOne(vReq[reqIdx]));
return write_string(Value(ret), false) + "\n";
}
void ServiceConnection(AcceptedConnection *conn)
{
bool fRun = true;
while (fRun)
{
int nProto = 0;
map<string, string> mapHeaders;
string strRequest, strMethod, strURI;
// Read HTTP request line
if (!ReadHTTPRequestLine(conn->stream(), nProto, strMethod, strURI))
break;
// Read HTTP message headers and body
ReadHTTPMessage(conn->stream(), mapHeaders, strRequest, nProto);
if (strURI != "/") {
conn->stream() << HTTPReply(HTTP_NOT_FOUND, "", false) << std::flush;
break;
}
// Check authorization
if (mapHeaders.count("authorization") == 0)
{
conn->stream() << HTTPReply(HTTP_UNAUTHORIZED, "", false) << std::flush;
break;
}
if (!HTTPAuthorized(mapHeaders))
{
printf("ThreadRPCServer incorrect password attempt from %s\n", conn->peer_address_to_string().c_str());
/* Deter brute-forcing short passwords.
If this results in a DOS the user really
shouldn't have their RPC port exposed.*/
if (mapArgs["-rpcpassword"].size() < 20)
MilliSleep(250);
conn->stream() << HTTPReply(HTTP_UNAUTHORIZED, "", false) << std::flush;
break;
}
if (mapHeaders["connection"] == "close")
fRun = false;
JSONRequest jreq;
try
{
// Parse request
Value valRequest;
if (!read_string(strRequest, valRequest))
throw JSONRPCError(RPC_PARSE_ERROR, "Parse error");
string strReply;
// singleton request
if (valRequest.type() == obj_type) {
jreq.parse(valRequest);
Value result = tableRPC.execute(jreq.strMethod, jreq.params);
// Send reply
strReply = JSONRPCReply(result, Value::null, jreq.id);
// array of requests
} else if (valRequest.type() == array_type)
strReply = JSONRPCExecBatch(valRequest.get_array());
else
throw JSONRPCError(RPC_PARSE_ERROR, "Top-level object parse error");
conn->stream() << HTTPReply(HTTP_OK, strReply, fRun) << std::flush;
}
catch (Object& objError)
{
ErrorReply(conn->stream(), objError, jreq.id);
break;
}
catch (std::exception& e)
{
ErrorReply(conn->stream(), JSONRPCError(RPC_PARSE_ERROR, e.what()), jreq.id);
break;
}
}
}
json_spirit::Value CRPCTable::execute(const std::string &strMethod, const json_spirit::Array ¶ms) const
{
// Find method
const CRPCCommand *pcmd = tableRPC[strMethod];
if (!pcmd)
throw JSONRPCError(RPC_METHOD_NOT_FOUND, "Method not found");
if (pcmd->reqWallet && !pwalletMain)
throw JSONRPCError(RPC_METHOD_NOT_FOUND, "Method not found (disabled)");
// Observe safe mode
string strWarning = GetWarnings("rpc");
if (strWarning != "" && !GetBoolArg("-disablesafemode") &&
!pcmd->okSafeMode)
throw JSONRPCError(RPC_FORBIDDEN_BY_SAFE_MODE, string("Safe mode: ") + strWarning);
try
{
// Execute
Value result;
{
if (pcmd->threadSafe)
result = pcmd->actor(params, false);
else if (!pwalletMain) {
LOCK(cs_main);
result = pcmd->actor(params, false);
} else {
LOCK2(cs_main, pwalletMain->cs_wallet);
result = pcmd->actor(params, false);
}
}
return result;
}
catch (std::exception& e)
{
throw JSONRPCError(RPC_MISC_ERROR, e.what());
}
}
Object CallRPC(const string& strMethod, const Array& params)
{
if (mapArgs["-rpcuser"] == "" && mapArgs["-rpcpassword"] == "")
throw runtime_error(strprintf(
_("You must set rpcpassword=<password> in the configuration file:\n%s\n"
"If the file does not exist, create it with owner-readable-only file permissions."),
GetConfigFile().string().c_str()));
// Connect to localhost
bool fUseSSL = GetBoolArg("-rpcssl");
asio::io_service io_service;
ssl::context context(io_service, ssl::context::sslv23);
context.set_options(ssl::context::no_sslv2);
asio::ssl::stream<asio::ip::tcp::socket> sslStream(io_service, context);
SSLIOStreamDevice<asio::ip::tcp> d(sslStream, fUseSSL);
iostreams::stream< SSLIOStreamDevice<asio::ip::tcp> > stream(d);
if (!d.connect(GetArg("-rpcconnect", "127.0.0.1"), GetArg("-rpcport", itostr(GetDefaultRPCPort()))))
throw runtime_error("couldn't connect to server");
// HTTP basic authentication
string strUserPass64 = EncodeBase64(mapArgs["-rpcuser"] + ":" + mapArgs["-rpcpassword"]);
map<string, string> mapRequestHeaders;
mapRequestHeaders["Authorization"] = string("Basic ") + strUserPass64;
// Send request
string strRequest = JSONRPCRequest(strMethod, params, 1);
string strPost = HTTPPost(strRequest, mapRequestHeaders);
stream << strPost << std::flush;
// Receive HTTP reply status
int nProto = 0;
int nStatus = ReadHTTPStatus(stream, nProto);
// Receive HTTP reply message headers and body
map<string, string> mapHeaders;
string strReply;
ReadHTTPMessage(stream, mapHeaders, strReply, nProto);
if (nStatus == HTTP_UNAUTHORIZED)
throw runtime_error("incorrect rpcuser or rpcpassword (authorization failed)");
else if (nStatus >= 400 && nStatus != HTTP_BAD_REQUEST && nStatus != HTTP_NOT_FOUND && nStatus != HTTP_INTERNAL_SERVER_ERROR)
throw runtime_error(strprintf("server returned HTTP error %d", nStatus));
else if (strReply.empty())
throw runtime_error("no response from server");
// Parse reply
Value valReply;
if (!read_string(strReply, valReply))
throw runtime_error("couldn't parse reply from server");
const Object& reply = valReply.get_obj();
if (reply.empty())
throw runtime_error("expected reply to have result, error and id properties");
return reply;
}
template<typename T>
void ConvertTo(Value& value, bool fAllowNull=false)
{
if (fAllowNull && value.type() == null_type)
return;
if (value.type() == str_type)
{
// reinterpret string as unquoted json value
Value value2;
string strJSON = value.get_str();
if (!read_string(strJSON, value2))
throw runtime_error(string("Error parsing JSON:")+strJSON);
ConvertTo<T>(value2, fAllowNull);
value = value2;
}
else
{
value = value.get_value<T>();
}
}
// Convert strings to command-specific RPC representation
Array RPCConvertValues(const std::string &strMethod, const std::vector<std::string> &strParams)
{
Array params;
BOOST_FOREACH(const std::string ¶m, strParams)
params.push_back(param);
int n = params.size();
//
// Special case non-string parameter types
//
if (strMethod == "stop" && n > 0) ConvertTo<bool>(params[0]);
if (strMethod == "getaddednodeinfo" && n > 0) ConvertTo<bool>(params[0]);
if (strMethod == "setgenerate" && n > 0) ConvertTo<bool>(params[0]);
if (strMethod == "setgenerate" && n > 1) ConvertTo<boost::int64_t>(params[1]);
if (strMethod == "getnetworkhashps" && n > 0) ConvertTo<boost::int64_t>(params[0]);
if (strMethod == "getnetworkhashps" && n > 1) ConvertTo<boost::int64_t>(params[1]);
if (strMethod == "sendtoaddress" && n > 1) ConvertTo<double>(params[1]);
if (strMethod == "settxfee" && n > 0) ConvertTo<double>(params[0]);
if (strMethod == "setmininput" && n > 0) ConvertTo<double>(params[0]);
if (strMethod == "getreceivedbyaddress" && n > 1) ConvertTo<boost::int64_t>(params[1]);
if (strMethod == "getreceivedbyaccount" && n > 1) ConvertTo<boost::int64_t>(params[1]);
if (strMethod == "listreceivedbyaddress" && n > 0) ConvertTo<boost::int64_t>(params[0]);
if (strMethod == "listreceivedbyaddress" && n > 1) ConvertTo<bool>(params[1]);
if (strMethod == "listreceivedbyaccount" && n > 0) ConvertTo<boost::int64_t>(params[0]);
if (strMethod == "listreceivedbyaccount" && n > 1) ConvertTo<bool>(params[1]);
if (strMethod == "getbalance" && n > 1) ConvertTo<boost::int64_t>(params[1]);
if (strMethod == "getblockhash" && n > 0) ConvertTo<boost::int64_t>(params[0]);
if (strMethod == "move" && n > 2) ConvertTo<double>(params[2]);
if (strMethod == "move" && n > 3) ConvertTo<boost::int64_t>(params[3]);
if (strMethod == "sendfrom" && n > 2) ConvertTo<double>(params[2]);
if (strMethod == "sendfrom" && n > 3) ConvertTo<boost::int64_t>(params[3]);
if (strMethod == "listtransactions" && n > 1) ConvertTo<boost::int64_t>(params[1]);
if (strMethod == "listtransactions" && n > 2) ConvertTo<boost::int64_t>(params[2]);
if (strMethod == "listaccounts" && n > 0) ConvertTo<boost::int64_t>(params[0]);
if (strMethod == "walletpassphrase" && n > 1) ConvertTo<boost::int64_t>(params[1]);
if (strMethod == "getblocktemplate" && n > 0) ConvertTo<Object>(params[0]);
if (strMethod == "listsinceblock" && n > 1) ConvertTo<boost::int64_t>(params[1]);
if (strMethod == "sendmany" && n > 1) ConvertTo<Object>(params[1]);
if (strMethod == "sendmany" && n > 2) ConvertTo<boost::int64_t>(params[2]);
if (strMethod == "addmultisigaddress" && n > 0) ConvertTo<boost::int64_t>(params[0]);
if (strMethod == "addmultisigaddress" && n > 1) ConvertTo<Array>(params[1]);
if (strMethod == "createmultisig" && n > 0) ConvertTo<boost::int64_t>(params[0]);
if (strMethod == "createmultisig" && n > 1) ConvertTo<Array>(params[1]);
if (strMethod == "listunspent" && n > 0) ConvertTo<boost::int64_t>(params[0]);
if (strMethod == "listunspent" && n > 1) ConvertTo<boost::int64_t>(params[1]);
if (strMethod == "listunspent" && n > 2) ConvertTo<Array>(params[2]);
if (strMethod == "getblock" && n > 1) ConvertTo<bool>(params[1]);
if (strMethod == "getrawtransaction" && n > 1) ConvertTo<boost::int64_t>(params[1]);
if (strMethod == "createrawtransaction" && n > 0) ConvertTo<Array>(params[0]);
if (strMethod == "createrawtransaction" && n > 1) ConvertTo<Object>(params[1]);
if (strMethod == "signrawtransaction" && n > 1) ConvertTo<Array>(params[1], true);
if (strMethod == "signrawtransaction" && n > 2) ConvertTo<Array>(params[2], true);
if (strMethod == "gettxout" && n > 1) ConvertTo<boost::int64_t>(params[1]);
if (strMethod == "gettxout" && n > 2) ConvertTo<bool>(params[2]);
if (strMethod == "lockunspent" && n > 0) ConvertTo<bool>(params[0]);
if (strMethod == "lockunspent" && n > 1) ConvertTo<Array>(params[1]);
if (strMethod == "importprivkey" && n > 2) ConvertTo<bool>(params[2]);
if (strMethod == "verifychain" && n > 0) ConvertTo<boost::int64_t>(params[0]);
if (strMethod == "verifychain" && n > 1) ConvertTo<boost::int64_t>(params[1]);
return params;
}
int CommandLineRPC(int argc, char *argv[])
{
string strPrint;
int nRet = 0;
try
{
// Skip switches
while (argc > 1 && IsSwitchChar(argv[1][0]))
{
argc--;
argv++;
}
// Method
if (argc < 2)
throw runtime_error("too few parameters");
string strMethod = argv[1];
// Parameters default to strings
std::vector<std::string> strParams(&argv[2], &argv[argc]);
Array params = RPCConvertValues(strMethod, strParams);
// Execute
Object reply = CallRPC(strMethod, params);
// Parse reply
const Value& result = find_value(reply, "result");
const Value& error = find_value(reply, "error");
if (error.type() != null_type)
{
// Error
strPrint = "error: " + write_string(error, false);
int code = find_value(error.get_obj(), "code").get_int();
nRet = abs(code);
}
else
{
// Result
if (result.type() == null_type)
strPrint = "";
else if (result.type() == str_type)
strPrint = result.get_str();
else
strPrint = write_string(result, true);
}
}
catch (boost::thread_interrupted) {
throw;
}
catch (std::exception& e) {
strPrint = string("error: ") + e.what();
nRet = 87;
}
catch (...) {
PrintException(NULL, "CommandLineRPC()");
}
if (strPrint != "")
{
fprintf((nRet == 0 ? stdout : stderr), "%s\n", strPrint.c_str());
}
return nRet;
}
#ifdef TEST
int main(int argc, char *argv[])
{
#ifdef _MSC_VER
// Turn off Microsoft heap dump noise
_CrtSetReportMode(_CRT_WARN, _CRTDBG_MODE_FILE);
_CrtSetReportFile(_CRT_WARN, CreateFile("NUL", GENERIC_WRITE, 0, NULL, OPEN_EXISTING, 0, 0));
#endif
setbuf(stdin, NULL);
setbuf(stdout, NULL);
setbuf(stderr, NULL);
try
{
if (argc >= 2 && string(argv[1]) == "-server")
{
printf("server ready\n");
ThreadRPCServer(NULL);
}
else
{
return CommandLineRPC(argc, argv);
}
}
catch (boost::thread_interrupted) {
throw;
}
catch (std::exception& e) {
PrintException(&e, "main()");
} catch (...) {
PrintException(NULL, "main()");
}
return 0;
}
#endif
const CRPCTable tableRPC;<|fim▁end|> |
JSONRequest jreq;
try { |
<|file_name|>mapper_data.py<|end_file_name|><|fim▁begin|>from modelmapper.declarations import Mapper, Field
from modelmapper.qt.fields import QLineEditAccessor
class String(QLineEditAccessor):
<|fim▁hole|> def get_value(self):
return str(self.widget.text())
def set_value(self, value):
self.widget.setText(str(value))
class Integer(QLineEditAccessor):
def get_value(self):
return int(self.widget.text())
def set_value(self, value):
self.widget.setText(int(value))
def get_child_x_mapper(x):
return {
'{}_link'.format(x): (x, 'val_{}'.format(x))
}
def get_d_mapper():
return {
'expediente_link': Mapper('c[0]', 'val_c[0]', get_child_x_mapper('a')),
'masa_bruta_link': Mapper('c[1]', 'val_c[1]', get_child_x_mapper('b')),
'nombre_link': Field('cc', 'val_cc'),
}
def get_model_mapper():
return {
'expediente_link': Field('expediente', String('expediente')),
'masa_bruta_link': Field('masa_bruta', Integer('masa_bruta')),
'nombre_link': Field('nombre', String('nombre'))
}<|fim▁end|> | |
<|file_name|>Excel Sheet Column Title.go<|end_file_name|><|fim▁begin|>func convertToTitle(n int) string {
execl := []string{"A", "B","C","D","E","F","G","H", "I", "J","K","L","M","N","O", "P", "Q","R","S","T","U","V","W","X","Y","Z"}
var temp string
for n >0 {
temp = execl[(n-1)%26] +temp
n = (n-1) /26
}
return temp
}
//------------------------------
func convertToTitle(n int) string {<|fim▁hole|> res = append(res, byte('A') + byte(k))
n /= 26
}
for i, j := 0, len(res)-1; i < j; i, j = i+1, j-1 {
res[i], res[j] = res[j], res[i]
}
return string(res)
}<|fim▁end|> | var res []byte
for n > 0 {
n--
k := n % 26 |
<|file_name|>decorators.py<|end_file_name|><|fim▁begin|># Copyright 2013 django-htmlmin authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
from functools import wraps
from htmlmin.minify import html_minify
def minified_response(f):
@wraps(f)
def minify(*args, **kwargs):
response = f(*args, **kwargs)
minifiable_status = response.status_code == 200
minifiable_content = 'text/html' in response['Content-Type']
if minifiable_status and minifiable_content:
response.content = html_minify(response.content)
return response
return minify
def not_minified_response(f):
@wraps(f)
def not_minify(*args, **kwargs):
response = f(*args, **kwargs)
response.minify_response = False
return response<|fim▁hole|>
return not_minify<|fim▁end|> | |
<|file_name|>genericContextualTypingSpecialization.js<|end_file_name|><|fim▁begin|><|fim▁hole|>b.reduce(function (c, d) {
return c + d;
}, 0); // should not error on '+'<|fim▁end|> | var b;
|
<|file_name|>select.rs<|end_file_name|><|fim▁begin|>// Copyright 2015 Colin Sherratt
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::sync::{Arc, Mutex};
use std::collections::HashMap;
use {Signal, ArmedSignal, Pulse, Waiting, Barrier, Signals};
pub struct Inner {
pub ready: Vec<usize>,
pub trigger: Option<Pulse>,
}
pub struct Handle(pub Arc<Mutex<Inner>>);
/// A `Select` listens to 1 or more signals. It will wait until
/// any signal becomes available before Pulsing. `Select` will then
/// return the `Signal` that has been `Pulsed`. `Select` has no defined
/// ordering of events for `Signal`s when there are more then one `Signals`
/// pending.
pub struct Select {
inner: Arc<Mutex<Inner>>,
signals: HashMap<usize, ArmedSignal>,
}
impl Select {
/// Create a new empty `Select`
pub fn new() -> Select {
Select {
inner: Arc::new(Mutex::new(Inner {
ready: Vec::new(),
trigger: None,
})),
signals: HashMap::new(),
}
}
/// Add a signal to the `Select`, a unique id that is associated
/// With the signal is returned. This can be used to remove the
/// signal from the `Select` or to lookup the `Pulse` when it fires.
pub fn add(&mut self, pulse: Signal) -> usize {
let id = pulse.id();
let p = pulse.arm(Waiting::select(Handle(self.inner.clone())));
self.signals.insert(id, p);
id
}
/// Remove a `Signal1 from the `Select` using it's unique id.
pub fn remove(&mut self, id: usize) -> Option<Signal> {
self.signals
.remove(&id)
.map(|x| x.disarm())
}
/// Convert all the signals present in the `Select` into a `Barrier`
pub fn into_barrier(self) -> Barrier {
let vec: Vec<Signal> = self.signals
.into_iter()
.map(|(_, p)| p.disarm())
.collect();
Barrier::new(&vec)
}
/// This is a non-blocking attempt to get a `Signal` from a `Select`
/// this will return a `Some(Signal)` if there is a pending `Signal`
/// in the select. Otherwise it will return `None`
pub fn try_next(&mut self) -> Option<Signal> {
let mut guard = self.inner.lock().unwrap();
if let Some(x) = guard.ready.pop() {
return Some(self.signals.remove(&x).map(|x| x.disarm()).unwrap());
}
None
}
/// Get the number of Signals being watched
pub fn len(&self) -> usize {
self.signals.len()
}
}
impl Iterator for Select {
type Item = Signal;
fn next(&mut self) -> Option<Signal> {
loop {
if self.signals.len() == 0 {
return None;
}
let pulse = {
let mut guard = self.inner.lock().unwrap();
while let Some(x) = guard.ready.pop() {
if let Some(x) = self.signals.remove(&x) {
return Some(x.disarm());
}
}
let (pulse, t) = Signal::new();
guard.trigger = Some(t);
pulse
};
pulse.wait().unwrap();
}
}
}
impl Signals for Select {
fn signal(&self) -> Signal {
let (pulse, t) = Signal::new();
let mut guard = self.inner.lock().unwrap();
if guard.ready.len() == 0 {
guard.trigger = Some(t);
} else {
t.pulse();
}
pulse
}
}
/// `SelectMap` is a wrapper around a `Select` rather then use
/// a unique id to find out what signal has been asserts, `SelectMap`
/// will return an supplied object.
pub struct SelectMap<T> {
select: Select,
items: HashMap<usize, T>,
}
impl<T> SelectMap<T> {
/// Create a new empty `SelectMap`
pub fn new() -> SelectMap<T> {
SelectMap {
select: Select::new(),
items: HashMap::new(),
}
}
/// Add a `Signal` and an associated value into the `SelectMap`
pub fn add(&mut self, signal: Signal, value: T) {
let id = self.select.add(signal);
self.items.insert(id, value);
}<|fim▁hole|> /// This is a non-blocking attempt to get a `Signal` from a `SelectMap`
/// this will return a `Some((Signal, T))` if there is a pending `Signal`
/// in the select. Otherwise it will return `None`
pub fn try_next(&mut self) -> Option<(Signal, T)> {
self.select.try_next().map(|x| {
let id = x.id();
(x, self.items.remove(&id).unwrap())
})
}
/// Get the number of items in the `SelectMap`
pub fn len(&self) -> usize {
self.items.len()
}
}
impl<T> Iterator for SelectMap<T> {
type Item = (Signal, T);
fn next(&mut self) -> Option<(Signal, T)> {
self.select.next().map(|x| {
let id = x.id();
(x, self.items.remove(&id).unwrap())
})
}
}
impl<T> Signals for SelectMap<T> {
fn signal(&self) -> Signal {
self.select.signal()
}
}<|fim▁end|> | |
<|file_name|>ImgHeaderCallback.java<|end_file_name|><|fim▁begin|>/*
* ImgHeaderCallback.java
*
* Copyright 2001-2007 Goldin-Rudahl Associates
*
* Created by Sally Goldin, 5/16/2001
*
* $Id: ImgHeaderCallback.java,v 1.20 2007/01/05 07:41:57 rudahl Exp $
* $Log: ImgHeaderCallback.java,v $
* Revision 1.20 2007/01/05 07:41:57 rudahl
* added Whatis info
*
* Revision 1.19 2006/12/17 11:35:56 goldin
* fix formatting of reals
*
* Revision 1.18 2006/12/10 12:09:58 goldin
* Adding new menus and panels for revised geometry/geography
*
* Revision 1.17 2006/02/11 07:15:31 goldin
* Enable classnames tab even if no classnames yet
*
* Revision 1.16 2005/08/13 08:41:23 goldin
* Migrate fix in 5.6 regarding display type setting based on header
*
* Revision 1.15 2004/12/06 03:41:56 goldin
* Don't change display type for MEA if =M
*
* Revision 1.14 2002/09/11 23:34:20 goldin
* Call new statusmanager method to translate =R etc into a filename
*
* Revision 1.13 2002/07/25 23:06:18 goldin
* Make Source show up in HEA
*
* Revision 1.12 2002/05/29 17:52:24 goldin
* Add processing for calibration fields
*
* Revision 1.11 2002/03/06 17:48:12 goldin
* Enhance and extend use of ImgHeaderCallback to control display type<|fim▁hole|> * Revision 1.9 2001/11/16 16:41:07 goldin
* Move some files to common .gui package and adjust imports in dragon.ui pkg
*
* Revision 1.8 2001/11/09 17:52:05 goldin
* Set display type to color for =C or classified file
*
* Revision 1.7 2001/11/05 13:59:15 goldin
* Put UI code in a package
*
* Revision 1.6 2001/10/17 10:29:37 goldin
* Modify to use ApplicationManager to get error display, etc.
*
* Revision 1.5 2001/10/12 11:41:05 goldin
* New callbacks for HEA panel
*
* Revision 1.4 2001/07/31 17:40:38 goldin
* display correct range as part of message for out-of-range errors
*
* Revision 1.3 2001/07/25 11:53:05 goldin
* support nlines/npix in SUB
*
* Revision 1.2 2001/05/29 10:35:28 goldin
* Add tab disabling capability
*
* Revision 1.1 2001/05/16 15:43:16 goldin
* Implemen header-based callback
*
*/
package com.grs.dragon.ui;
import com.grs.gui.*;
import java.util.*;
import javax.swing.*;
import java.text.NumberFormat;
/**
* This class implements the Callback interface. It is used to populate
* fields depending on a panel, based on data in the header of the
* image file specified in the field that invokes the callback.
* @author goldin*/
public class ImgHeaderCallback extends HeaderCallback
{
/** Primary method of a callback class.
* Process sigFile if necessary, and set values in the
* appropriate combo box.
* @param field Field whose value will determine the
* effects of the callback.
*/
public void executeCallback(DragonField field)
{
DImageHeader thisHeader = null;
DragonField dispTypeField = null;
DragonPanel parent = field.getTopLevelPanel();
if (parent == null)
{
return;
}
DragonUI mainApp = DragonUI.currentApplication;
String value = field.getFieldValue();
if ((value == null) || (value.length() == 0))
return;
// determine if there is a current header and if so,
// if that is what the user requested
DImageHeader header = mainApp.getMemoryHeader();
if ((header != null) && (header.isInitialized()) &&
(value.equals("=M")))
{
thisHeader = header;
}
else if (value.equals("=C"))
{
dispTypeField = parent.getField("^DSP");
if (dispTypeField != null)
dispTypeField.setFieldValue("C");
return;
}
else
{
if (value.startsWith("="))
value =
DragonUI.currentApplication.getStatusManager().getMemoryFileEquivalent(value);
thisHeader = new DImageHeader(value);
if (!thisHeader.isInitialized())
{
UiErrorDisplay errDisp = (UiErrorDisplay)
ApplicationManager.getErrorDisplay();
errDisp.sendError(thisHeader.getErrorMessage());
return;
}
}
String parentID = parent.getName();
if (parentID.compareTo("rHEA")== 0)
{
processHeaFields(parent, thisHeader);
}
else if ((parentID.compareTo("rMEA")== 0) ||
(parentID.compareTo("rBUF")== 0))
{
processMeaFields(value, parent, thisHeader);
}
else if (parentID.compareTo("rSUB")== 0)
{
processSubFields(parent, thisHeader);
}
else
{
processDisplayType(parent,thisHeader);
}
}
/**
* Set fields in MEA panel based on header values.
*/
protected void processMeaFields(String value,
DragonPanel parent,
DImageHeader header)
{
NumberFormat nformat = NumberFormat.getInstance();
nformat.setMaximumFractionDigits(2);
DragonField units = parent.getField("^U");
if (units != null)
units.setFieldValue(header.getUnitname());
DragonField xcell = parent.getField("^XF");
if (xcell != null)
xcell.setFieldValue(nformat.format(header.getXcell_size()));
DragonField ycell = parent.getField("^YF");
if (ycell != null)
ycell.setFieldValue(nformat.format(header.getYcell_size()));
// we have already dealt with the display type for memory files
if (!value.startsWith("="))
processDisplayType(parent,header);
}
/** Set fields in HEA panel based on header values. */
protected void processHeaFields(DragonPanel parent,
DImageHeader header)
{
DragonField fld = parent.getField("^FXI");
if (fld != null)
fld.setFieldValue(String.valueOf(header.getImg_x()));
fld = parent.getField("^FYI");
if (fld != null)
fld.setFieldValue(String.valueOf(header.getImg_y()));
fld = parent.getField("^RFX");
if (fld != null)
fld.setFieldValue(String.valueOf(header.getRef_x()));
fld = parent.getField("^RFY");
if (fld != null)
fld.setFieldValue(String.valueOf(header.getRef_y()));
fld = parent.getField("^MU");
if (fld != null)
fld.setFieldValue(header.getUnitname());
fld = parent.getField("^CLX");
if (fld != null)
fld.setFieldValue(String.valueOf(header.getXcell_size()));
fld = parent.getField("^CLY");
if (fld != null)
fld.setFieldValue(String.valueOf(header.getYcell_size()));
fld = parent.getField("^ECUTIL");
if (fld != null)
fld.setFieldValue(header.getComment());
fld = parent.getField("^ESID");
if (fld != null)
fld.setFieldValue(header.getScene());
fld = parent.getField("^ESS");
if (fld != null)
fld.setFieldValue(header.getSubscene());
fld = parent.getField("^ECF");
if (fld != null)
fld.setFieldValue(header.getClf());
fld = parent.getField("^ESRC");
if (fld != null)
fld.setFieldValue(header.getSource());
fld = parent.getField("^EB");
if (fld != null)
fld.setFieldValue(header.getBand());
fld = parent.getField("^ET");
if (fld != null)
fld.setFieldValue(header.getFileType());
fld = parent.getField("^CALUNIT");
if (fld != null)
fld.setFieldValue(header.getZUnit());
fld = parent.getField("^CALMULT");
if (fld != null)
fld.setFieldValue(String.valueOf(header.getZScale()));
fld = parent.getField("^CALOFF");
if (fld != null)
fld.setFieldValue(String.valueOf(header.getZOffset()));
if (header.getFileType().compareTo("I") == 0)
{
parent.enableTab(1,false);
clearClassNames(parent,header);
}
else
{
parent.enableTab(1,true);
processClassNames(parent,header);
}
}
/**
* Set fields in SUB panel based on header values.
*/
protected void processSubFields(DragonPanel parent,
DImageHeader header)
{
boolean bEnable = false;
DragonField nlines = parent.getField("^NLS");
if (nlines != null)
nlines.setFieldValue(String.valueOf(header.getNLines()));
DragonField npix = parent.getField("^NPS");
if (npix != null)
npix.setFieldValue(String.valueOf(header.getNPix()));
if (header.getBitsPerPix() == 8)
{
bEnable = false;
}
else
{
bEnable = true;
}
DragonField fld = parent.getField("^SM");
if (fld != null)
fld.setEnabled(bEnable);
processDisplayType(parent,header);
}
/**
* If the panel has a "display" type option, set it
* to 'C' if the file is classified.
*/
protected void processDisplayType(DragonPanel parent,
DImageHeader header)
{
DragonField dispType = parent.getField("^DSP");
if (dispType == null)
return;
if (header.getFileType().startsWith("I"))
dispType.setFieldValue("G");
else if (header.getFileType().startsWith("C"))
dispType.setFieldValue("C");
else if (header.getFileType().startsWith("L"))
dispType.setFieldValue("C");
}
protected static String cvsInfo = null;
protected static void setCvsInfo()
{
cvsInfo = "\n@(#) $Id: ImgHeaderCallback.java,v 1.20 2007/01/05 07:41:57 rudahl Exp $ \n";
}
}<|fim▁end|> | *
* Revision 1.10 2001/11/30 18:01:21 goldin
* Moved most of the UI basic components to the com.grs.gui package
* |
<|file_name|>TransformationClient.py<|end_file_name|><|fim▁begin|>""" Class that contains client access to the transformation DB handler. """
__RCSID__ = "$Id$"
import types
from DIRAC import S_OK, S_ERROR, gLogger
from DIRAC.Core.Base.Client import Client
from DIRAC.Core.Utilities.List import breakListIntoChunks
from DIRAC.Resources.Catalog.FileCatalogueBase import FileCatalogueBase
from DIRAC.ConfigurationSystem.Client.Helpers.Operations import Operations
rpc = None
url = None
class TransformationClient( Client, FileCatalogueBase ):
""" Exposes the functionality available in the DIRAC/TransformationHandler
This inherits the DIRAC base Client for direct execution of server functionality.
The following methods are available (although not visible here).
Transformation (table) manipulation
deleteTransformation(transName)
getTransformationParameters(transName,paramNames)
getTransformationWithStatus(status)
setTransformationParameter(transName,paramName,paramValue)
deleteTransformationParameter(transName,paramName)
TransformationFiles table manipulation
addFilesToTransformation(transName,lfns)
addTaskForTransformation(transName,lfns=[],se='Unknown')
getTransformationStats(transName)
TransformationTasks table manipulation
setTaskStatus(transName, taskID, status)
setTaskStatusAndWmsID(transName, taskID, status, taskWmsID)
getTransformationTaskStats(transName)
deleteTasks(transName, taskMin, taskMax)
extendTransformation( transName, nTasks)
getTasksToSubmit(transName,numTasks,site='')
TransformationLogging table manipulation
getTransformationLogging(transName)
File/directory manipulation methods (the remainder of the interface can be found below)
getFileSummary(lfns)
exists(lfns)
Web monitoring tools
getDistinctAttributeValues(attribute, selectDict)
getTransformationStatusCounters()
getTransformationSummary()
getTransformationSummaryWeb(selectDict, sortList, startItem, maxItems)
"""
def __init__( self, **kwargs ):
Client.__init__( self, **kwargs )
opsH = Operations()
self.maxResetCounter = opsH.getValue( 'Productions/ProductionFilesMaxResetCounter', 10 )
self.setServer( 'Transformation/TransformationManager' )
def setServer( self, url ):
self.serverURL = url
def getCounters( self, table, attrList, condDict, older = None, newer = None, timeStamp = None,
rpc = '', url = '' ):
rpcClient = self._getRPC( rpc = rpc, url = url )
return rpcClient. getCounters( table, attrList, condDict, older, newer, timeStamp )
def addTransformation( self, transName, description, longDescription, transType, plugin, agentType, fileMask,
transformationGroup = 'General',
groupSize = 1,
inheritedFrom = 0,
body = '',
maxTasks = 0,
eventsPerTask = 0,
addFiles = True,
rpc = '', url = '', timeout = 1800 ):
""" add a new transformation
"""
rpcClient = self._getRPC( rpc = rpc, url = url, timeout = timeout )
return rpcClient.addTransformation( transName, description, longDescription, transType, plugin,
agentType, fileMask, transformationGroup, groupSize, inheritedFrom,
body, maxTasks, eventsPerTask, addFiles )
def getTransformations( self, condDict = {}, older = None, newer = None, timeStamp = 'CreationDate',
orderAttribute = None, limit = 100, extraParams = False, rpc = '', url = '', timeout = None ):
""" gets all the transformations in the system, incrementally. "limit" here is just used to determine the offset.
"""
rpcClient = self._getRPC( rpc = rpc, url = url, timeout = timeout )
transformations = []
# getting transformations - incrementally
offsetToApply = 0
while True:
res = rpcClient.getTransformations( condDict, older, newer, timeStamp, orderAttribute, limit,
extraParams, offsetToApply )
if not res['OK']:
return res
else:
gLogger.verbose( "Result for limit %d, offset %d: %d" % ( limit, offsetToApply, len( res['Value'] ) ) )
if res['Value']:
transformations = transformations + res['Value']
offsetToApply += limit
if len( res['Value'] ) < limit:
break
return S_OK( transformations )
def getTransformation( self, transName, extraParams = False, rpc = '', url = '', timeout = None ):
rpcClient = self._getRPC( rpc = rpc, url = url, timeout = timeout )
return rpcClient.getTransformation( transName, extraParams )
def getTransformationFiles( self, condDict = {}, older = None, newer = None, timeStamp = 'LastUpdate',
orderAttribute = None, limit = 10000, rpc = '', url = '', timeout = 1800 ):
""" gets all the transformation files for a transformation, incrementally.
"limit" here is just used to determine the offset.
"""
rpcClient = self._getRPC( rpc = rpc, url = url, timeout = timeout )
transformationFiles = []
# getting transformationFiles - incrementally
offsetToApply = 0
while True:
res = rpcClient.getTransformationFiles( condDict, older, newer, timeStamp, orderAttribute, limit, offsetToApply )
if not res['OK']:
return res
else:
gLogger.verbose( "Result for limit %d, offset %d: %d" % ( limit, offsetToApply, len( res['Value'] ) ) )
if res['Value']:
transformationFiles = transformationFiles + res['Value']
offsetToApply += limit
if len( res['Value'] ) < limit:
break
return S_OK( transformationFiles )
def getTransformationTasks( self, condDict = {}, older = None, newer = None, timeStamp = 'CreationTime',
orderAttribute = None, limit = 10000, inputVector = False, rpc = '',
url = '', timeout = None ):
""" gets all the transformation tasks for a transformation, incrementally.
"limit" here is just used to determine the offset.
"""
rpcClient = self._getRPC( rpc = rpc, url = url, timeout = timeout )
transformationTasks = []
# getting transformationFiles - incrementally
offsetToApply = 0
while True:
res = rpcClient.getTransformationTasks( condDict, older, newer, timeStamp, orderAttribute, limit,
inputVector, offsetToApply )
if not res['OK']:
return res
else:
gLogger.verbose( "Result for limit %d, offset %d: %d" % ( limit, offsetToApply, len( res['Value'] ) ) )
if res['Value']:
transformationTasks = transformationTasks + res['Value']
offsetToApply += limit
if len( res['Value'] ) < limit:
break
return S_OK( transformationTasks )
def cleanTransformation( self, transID, rpc = '', url = '', timeout = None ):
""" Clean the transformation, and set the status parameter (doing it here, for easier extensibility)
"""
# Cleaning
rpcClient = self._getRPC( rpc = rpc, url = url, timeout = timeout )
res = rpcClient.cleanTransformation( transID )
if not res['OK']:
return res
# Setting the status
return self.setTransformationParameter( transID, 'Status', 'TransformationCleaned' )
def moveFilesToDerivedTransformation( self, transDict, resetUnused = True ):
""" move files input to a transformation, to the derived one
"""
prod = transDict['TransformationID']
parentProd = int( transDict.get( 'InheritedFrom', 0 ) )
movedFiles = {}
if not parentProd:
gLogger.warn( "[None] [%d] .moveFilesToDerivedTransformation: Transformation was not derived..." % prod )
return S_OK( ( parentProd, movedFiles ) )
# get the lfns in status Unused/MaxReset of the parent production
res = self.getTransformationFiles( condDict = {'TransformationID': parentProd, 'Status': [ 'Unused', 'MaxReset' ]} )
if not res['OK']:
gLogger.error( "[None] [%d] .moveFilesToDerivedTransformation: Error getting Unused files from transformation %s:" % ( prod, parentProd ), res['Message'] )
return res
parentFiles = res['Value']
lfns = [lfnDict['LFN'] for lfnDict in parentFiles]
if not lfns:
gLogger.info( "[None] [%d] .moveFilesToDerivedTransformation: No files found to be moved from transformation %d" % ( prod, parentProd ) )
return S_OK( ( parentProd, movedFiles ) )
# get the lfns of the derived production that were Unused/MaxReset in the parent one
res = self.getTransformationFiles( condDict = { 'TransformationID': prod, 'LFN': lfns} )
if not res['OK']:
gLogger.error( "[None] [%d] .moveFilesToDerivedTransformation: Error getting files from derived transformation" % prod, res['Message'] )
return res
derivedFiles = res['Value']
suffix = '-%d' % parentProd
derivedStatusDict = dict( [( derivedDict['LFN'], derivedDict['Status'] ) for derivedDict in derivedFiles] )
newStatusFiles = {}
parentStatusFiles = {}
force = False
for parentDict in parentFiles:
lfn = parentDict['LFN']
derivedStatus = derivedStatusDict.get( lfn )
if derivedStatus:
parentStatus = parentDict['Status']
if resetUnused and parentStatus == 'MaxReset':
status = 'Unused'
moveStatus = 'Unused from MaxReset'
force = True
else:
status = parentStatus
moveStatus = parentStatus
if derivedStatus.endswith( suffix ):
# This file is Unused or MaxReset while it was most likely Assigned at the time of derivation
parentStatusFiles.setdefault( 'Moved-%s' % str( prod ), [] ).append( lfn )
newStatusFiles.setdefault( ( status, parentStatus ), [] ).append( lfn )
movedFiles[moveStatus] = movedFiles.setdefault( moveStatus, 0 ) + 1
elif parentDict['Status'] == 'Unused':
# If the file was Unused already at derivation time, set it NotProcessed
parentStatusFiles.setdefault( 'NotProcessed', [] ).append( lfn )
<|fim▁hole|> res = self.setFileStatusForTransformation( parentProd, status, lfnChunk )
if not res['OK']:
gLogger.error( "[None] [%d] .moveFilesToDerivedTransformation: Error setting status %s for %d files in transformation %d "
% ( prod, status, len( lfnList ), parentProd ),
res['Message'] )
# Set the status in the new transformation
for ( status, oldStatus ), lfnList in newStatusFiles.items():
for lfnChunk in breakListIntoChunks( lfnList, 5000 ):
res = self.setFileStatusForTransformation( prod, status, lfnChunk, force = force )
if not res['OK']:
gLogger.error( "[None] [%d] .moveFilesToDerivedTransformation: Error setting status %s for %d files; resetting them %s in transformation %d"
% ( prod, status, len( lfnChunk ), oldStatus, parentProd ),
res['Message'] )
res = self.setFileStatusForTransformation( parentProd, oldStatus, lfnChunk )
if not res['OK']:
gLogger.error( "[None] [%d] .moveFilesToDerivedTransformation: Error setting status %s for %d files in transformation %d"
% ( prod, oldStatus, len( lfnChunk ), parentProd ),
res['Message'] )
return S_OK( ( parentProd, movedFiles ) )
def setFileStatusForTransformation( self, transName, newLFNsStatus = {}, lfns = [], force = False,
rpc = '', url = '', timeout = 120 ):
""" sets the file status for LFNs of a transformation
For backward compatibility purposes, the status and LFNs can be passed in 2 ways:
- newLFNsStatus is a dictionary with the form:
{'/this/is/an/lfn1.txt': 'StatusA', '/this/is/an/lfn2.txt': 'StatusB', ... }
and at this point lfns is not considered
- newLFNStatus is a string, that applies to all the LFNs in lfns
"""
rpcClient = self._getRPC( rpc = rpc, url = url, timeout = timeout )
# create dictionary in case newLFNsStatus is a string
if type( newLFNsStatus ) == type( '' ):
newLFNsStatus = dict( [( lfn, newLFNsStatus ) for lfn in lfns ] )
# gets status as of today
tsFiles = self.getTransformationFiles( {'TransformationID':transName, 'LFN': newLFNsStatus.keys()} )
if not tsFiles['OK']:
return tsFiles
tsFiles = tsFiles['Value']
if tsFiles:
# for convenience, makes a small dictionary out of the tsFiles, with the lfn as key
tsFilesAsDict = {}
for tsFile in tsFiles:
tsFilesAsDict[tsFile['LFN']] = [tsFile['Status'], tsFile['ErrorCount'], tsFile['FileID']]
# applying the state machine to the proposed status
newStatuses = self._applyTransformationFilesStateMachine( tsFilesAsDict, newLFNsStatus, force )
if newStatuses: # if there's something to update
# must do it for the file IDs...
newStatusForFileIDs = dict( [( tsFilesAsDict[lfn][2], newStatuses[lfn] ) for lfn in newStatuses.keys()] )
res = rpcClient.setFileStatusForTransformation( transName, newStatusForFileIDs )
if not res['OK']:
return res
return S_OK( newStatuses )
def _applyTransformationFilesStateMachine( self, tsFilesAsDict, dictOfProposedLFNsStatus, force ):
""" For easier extension, here we apply the state machine of the production files.
VOs might want to replace the standard here with something they prefer.
tsFiles is a dictionary with the lfn as key and as value a list of [Status, ErrorCount, FileID]
dictOfNewLFNsStatus is a dictionary with the proposed status
force is a boolean
It returns a dictionary with the status updates
"""
newStatuses = {}
for lfn in dictOfProposedLFNsStatus.keys():
if lfn not in tsFilesAsDict.keys():
continue
else:
newStatus = dictOfProposedLFNsStatus[lfn]
# Apply optional corrections
if tsFilesAsDict[lfn][0].lower() == 'processed' and dictOfProposedLFNsStatus[lfn].lower() != 'processed':
if not force:
newStatus = 'Processed'
elif tsFilesAsDict[lfn][0].lower() == 'maxreset':
if not force:
newStatus = 'MaxReset'
elif dictOfProposedLFNsStatus[lfn].lower() == 'unused':
errorCount = tsFilesAsDict[lfn][1]
# every 10 retries (by default)
if errorCount and ( ( errorCount % self.maxResetCounter ) == 0 ):
if not force:
newStatus = 'MaxReset'
if tsFilesAsDict[lfn][0].lower() != newStatus:
newStatuses[lfn] = newStatus
return newStatuses
def setTransformationParameter( self, transID, paramName, paramValue, force = False,
rpc = '', url = '', timeout = 120 ):
""" Sets a transformation parameter. There's a special case when coming to setting the status of a transformation.
"""
rpcClient = self._getRPC( rpc = rpc, url = url, timeout = timeout )
if paramName.lower() == 'status':
# get transformation Type
transformation = self.getTransformation( transID )
if not transformation['OK']:
return transformation
transformationType = transformation['Value']['Type']
# get status as of today
originalStatus = self.getTransformationParameters( transID, 'Status' )
if not originalStatus['OK']:
return originalStatus
originalStatus = originalStatus['Value']
transIDAsDict = {transID: [originalStatus, transformationType]}
dictOfProposedstatus = {transID: paramValue}
# applying the state machine to the proposed status
value = self._applyTransformationStatusStateMachine( transIDAsDict, dictOfProposedstatus, force )
else:
value = paramValue
return rpcClient.setTransformationParameter( transID, paramName, value )
def _applyTransformationStatusStateMachine( self, transIDAsDict, dictOfProposedstatus, force ):
""" For easier extension, here we apply the state machine of the transformation status.
VOs might want to replace the standard here with something they prefer.
transIDAsDict is a dictionary with the transID as key and as value a list with [Status, Type]
dictOfProposedstatus is a dictionary with the proposed status
force is a boolean
It returns the new status (the standard is just doing nothing: everything is possible)
"""
return dictOfProposedstatus.values()[0]
#####################################################################
#
# These are the file catalog interface methods
#
def isOK( self ):
return self.valid
def getName( self, DN = '' ):
""" Get the file catalog type name
"""
return self.name
def addDirectory( self, path, force = False, rpc = '', url = '', timeout = None ):
rpcClient = self._getRPC( rpc = rpc, url = url, timeout = timeout )
return rpcClient.addDirectory( path, force )
def getReplicas( self, lfn, rpc = '', url = '', timeout = None ):
res = self.__checkArgumentFormat( lfn )
if not res['OK']:
return res
lfns = res['Value'].keys()
rpcClient = self._getRPC( rpc = rpc, url = url, timeout = timeout )
return rpcClient.getReplicas( lfns )
def addFile( self, lfn, force = False, rpc = '', url = '', timeout = None ):
res = self.__checkArgumentFormat( lfn )
if not res['OK']:
return res
lfndicts = res['Value']
rpcClient = self._getRPC( rpc = rpc, url = url, timeout = timeout )
return rpcClient.addFile( lfndicts, force )
def addReplica( self, lfn, force = False, rpc = '', url = '', timeout = None ):
res = self.__checkArgumentFormat( lfn )
if not res['OK']:
return res
lfndicts = res['Value']
rpcClient = self._getRPC( rpc = rpc, url = url, timeout = timeout )
return rpcClient.addReplica( lfndicts, force )
def removeFile( self, lfn, rpc = '', url = '', timeout = None ):
res = self.__checkArgumentFormat( lfn )
if not res['OK']:
return res
lfns = res['Value'].keys()
rpcClient = self._getRPC( rpc = rpc, url = url, timeout = timeout )
successful = {}
failed = {}
listOfLists = breakListIntoChunks( lfns, 100 )
for fList in listOfLists:
res = rpcClient.removeFile( fList )
if not res['OK']:
return res
successful.update( res['Value']['Successful'] )
failed.update( res['Value']['Failed'] )
resDict = {'Successful': successful, 'Failed':failed}
return S_OK( resDict )
def removeReplica( self, lfn, rpc = '', url = '', timeout = None ):
res = self.__checkArgumentFormat( lfn )
if not res['OK']:
return res
lfndicts = res['Value']
rpcClient = self._getRPC( rpc = rpc, url = url, timeout = timeout )
successful = {}
failed = {}
# as lfndicts is a dict, the breakListIntoChunks will fail. Fake it!
listOfDicts = []
localdicts = {}
for lfn, info in lfndicts.items():
localdicts.update( { lfn : info } )
if len( localdicts.keys() ) % 100 == 0:
listOfDicts.append( localdicts )
localdicts = {}
for fDict in listOfDicts:
res = rpcClient.removeReplica( fDict )
if not res['OK']:
return res
successful.update( res['Value']['Successful'] )
failed.update( res['Value']['Failed'] )
resDict = {'Successful': successful, 'Failed':failed}
return S_OK( resDict )
def getReplicaStatus( self, lfn, rpc = '', url = '', timeout = None ):
res = self.__checkArgumentFormat( lfn )
if not res['OK']:
return res
lfndict = res['Value']
rpcClient = self._getRPC( rpc = rpc, url = url, timeout = timeout )
return rpcClient.getReplicaStatus( lfndict )
def setReplicaStatus( self, lfn, rpc = '', url = '', timeout = None ):
res = self.__checkArgumentFormat( lfn )
if not res['OK']:
return res
lfndict = res['Value']
rpcClient = self._getRPC( rpc = rpc, url = url, timeout = timeout )
return rpcClient.setReplicaStatus( lfndict )
def setReplicaHost( self, lfn, rpc = '', url = '', timeout = None ):
res = self.__checkArgumentFormat( lfn )
if not res['OK']:
return res
lfndict = res['Value']
rpcClient = self._getRPC( rpc = rpc, url = url, timeout = timeout )
return rpcClient.setReplicaHost( lfndict )
def removeDirectory( self, lfn, rpc = '', url = '', timeout = None ):
return self.__returnOK( lfn )
def createDirectory( self, lfn, rpc = '', url = '', timeout = None ):
return self.__returnOK( lfn )
def createLink( self, lfn, rpc = '', url = '', timeout = None ):
return self.__returnOK( lfn )
def removeLink( self, lfn, rpc = '', url = '', timeout = None ):
return self.__returnOK( lfn )
def __returnOK( self, lfn ):
res = self.__checkArgumentFormat( lfn )
if not res['OK']:
return res
successful = {}
for lfn in res['Value'].keys():
successful[lfn] = True
resDict = {'Successful':successful, 'Failed':{}}
return S_OK( resDict )
def __checkArgumentFormat( self, path ):
if type( path ) in types.StringTypes:
urls = {path:False}
elif type( path ) == types.ListType:
urls = {}
for url in path:
urls[url] = False
elif type( path ) == types.DictType:
urls = path
else:
return S_ERROR( "TransformationClient.__checkArgumentFormat: Supplied path is not of the correct format." )
return S_OK( urls )<|fim▁end|> | # Set the status in the parent transformation first
for status, lfnList in parentStatusFiles.items():
for lfnChunk in breakListIntoChunks( lfnList, 5000 ): |
<|file_name|>range_traits-5.rs<|end_file_name|><|fim▁begin|>// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// compile-pass
use std::ops::*;
#[derive(Copy, Clone)]<|fim▁hole|>
fn main() {}<|fim▁end|> | struct R(RangeFull);
|
<|file_name|>axios-response-interceptor.spec.ts<|end_file_name|><|fim▁begin|>import axios, { AxiosInstance } from 'axios';
import * as moxios from 'moxios';
import { AxiosResponseInterceptor } from '../axios-response-interceptor';
describe( 'AxiosResponseInterceptor', () => {
let apiResponseInterceptor: AxiosResponseInterceptor;
let axiosInstance: AxiosInstance;
beforeEach( () => {
axiosInstance = axios.create();
moxios.install( axiosInstance );
apiResponseInterceptor = new AxiosResponseInterceptor();
apiResponseInterceptor.start( axiosInstance );
} );
afterEach( () => {
apiResponseInterceptor.stop( axiosInstance );
moxios.uninstall();
} );
it( 'should transform responses into an HTTPResponse', async () => {
moxios.stubRequest( 'http://test.test', {
status: 200,
headers: {
'Content-Type': 'application/json',
},
responseText: JSON.stringify( { test: 'value' } ),
} );
const response = await axiosInstance.get( 'http://test.test' );
expect( response ).toMatchObject( {
statusCode: 200,
headers: {
'content-type': 'application/json',
},
data: {
test: 'value',
},
} );
} );
it( 'should transform error responses into an HTTPResponse', async () => {
moxios.stubRequest( 'http://test.test', {
status: 404,
headers: {
'Content-Type': 'application/json',
},
responseText: JSON.stringify( { code: 'error_code', message: 'value' } ),
} );
await expect( axiosInstance.get( 'http://test.test' ) ).rejects.toMatchObject( {
statusCode: 404,
headers: {
'content-type': 'application/json',<|fim▁hole|> },
data: {
code: 'error_code',
message: 'value',
},
} );
} );
it( 'should bubble non-response errors', async () => {
moxios.stubTimeout( 'http://test.test' );
await expect( axiosInstance.get( 'http://test.test' ) ).rejects.toMatchObject(
new Error( 'timeout of 0ms exceeded' ),
);
} );
} );<|fim▁end|> | |
<|file_name|>buf.rs<|end_file_name|><|fim▁begin|>use bytes::Buf;
use iovec::IoVec;
/// A `Buf` wrapping a static byte slice.
#[derive(Debug)]
pub(crate) struct StaticBuf(pub(crate) &'static [u8]);
impl Buf for StaticBuf {
#[inline]
fn remaining(&self) -> usize {
self.0.len()
}
#[inline]
fn bytes(&self) -> &[u8] {
self.0
}
#[inline]
fn advance(&mut self, cnt: usize) {
self.0 = &self.0[cnt..];
}
#[inline]
fn bytes_vec<'t>(&'t self, dst: &mut [&'t IoVec]) -> usize {
if dst.is_empty() || self.0.is_empty() {
0
} else {
dst[0] = self.0.into();
1<|fim▁hole|>}<|fim▁end|> | }
} |
<|file_name|>test_input_thermo_elasticity_ess.py<|end_file_name|><|fim▁begin|>from __future__ import absolute_import
input_name = '../examples/multi_physics/thermo_elasticity_ess.py'
output_name = 'test_thermo_elasticity_ess.vtk'
from tests_basic import TestInput<|fim▁hole|>class Test(TestInput):
pass<|fim▁end|> | |
<|file_name|>example.py<|end_file_name|><|fim▁begin|>import matplotlib.pyplot as plt
import numpy as np
from ad3 import simple_grid, general_graph
def example_binary():
# generate trivial data
x = np.ones((10, 10))
x[:, 5:] = -1
x_noisy = x + np.random.normal(0, 0.8, size=x.shape)
x_thresh = x_noisy > .0
# create unaries
unaries = x_noisy
# as we convert to int, we need to multipy to get sensible values
unaries = np.dstack([-unaries, unaries])
# create potts pairwise
pairwise = np.eye(2)
# do simple cut
result = np.argmax(simple_grid(unaries, pairwise)[0], axis=-1)
# use the gerneral graph algorithm
# first, we construct the grid graph
inds = np.arange(x.size).reshape(x.shape)
horz = np.c_[inds[:, :-1].ravel(), inds[:, 1:].ravel()]
vert = np.c_[inds[:-1, :].ravel(), inds[1:, :].ravel()]
edges = np.vstack([horz, vert])
# we flatten the unaries
pairwise_per_edge = np.repeat(pairwise[np.newaxis, :, :], edges.shape[0],
axis=0)
result_graph = np.argmax(general_graph(unaries.reshape(-1, 2), edges,
pairwise_per_edge)[0], axis=-1)
# plot results
plt.subplot(231, title="original")
plt.imshow(x, interpolation='nearest')
plt.subplot(232, title="noisy version")
plt.imshow(x_noisy, interpolation='nearest')
plt.subplot(234, title="thresholding result")
plt.imshow(x_thresh, interpolation='nearest')<|fim▁hole|> plt.imshow(result_graph.reshape(x.shape), interpolation='nearest')
plt.show()
def example_multinomial():
# generate dataset with three stripes
np.random.seed(4)
x = np.zeros((10, 12, 3))
x[:, :4, 0] = 1
x[:, 4:8, 1] = 1
x[:, 8:, 2] = 1
unaries = x + 1.5 * np.random.normal(size=x.shape)
x = np.argmax(x, axis=2)
unaries = unaries
x_thresh = np.argmax(unaries, axis=2)
# potts potential
pairwise_potts = 2 * np.eye(3)
result = np.argmax(simple_grid(unaries, pairwise_potts)[0], axis=-1)
# potential that penalizes 0-1 and 1-2 less than 0-2
pairwise_1d = 2 * np.eye(3) + 2
pairwise_1d[-1, 0] = 0
pairwise_1d[0, -1] = 0
print(pairwise_1d)
result_1d = np.argmax(simple_grid(unaries, pairwise_1d)[0], axis=-1)
plt.subplot(141, title="original")
plt.imshow(x, interpolation="nearest")
plt.subplot(142, title="thresholded unaries")
plt.imshow(x_thresh, interpolation="nearest")
plt.subplot(143, title="potts potentials")
plt.imshow(result, interpolation="nearest")
plt.subplot(144, title="1d topology potentials")
plt.imshow(result_1d, interpolation="nearest")
plt.show()
#example_binary()
example_multinomial()<|fim▁end|> | plt.subplot(235, title="cut_simple")
plt.imshow(result, interpolation='nearest')
plt.subplot(236, title="cut_from_graph") |
<|file_name|>apps.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.apps import AppConfig
<|fim▁hole|> name = 'blog'<|fim▁end|> | class BlogConfig(AppConfig):
|
<|file_name|>const.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
__author__ = 'LIWEI240'
"""
Constants definition
"""
class Const(object):
class RetCode(object):
OK = 0
InvalidParam = -1<|fim▁hole|> NotExist = -2
ParseError = -3<|fim▁end|> | |
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|>""" Documentation package """
import neuroptikon
import wx, wx.html
import os.path, sys, urllib
_sharedFrame = None
def baseURL():
if neuroptikon.runningFromSource:
basePath = os.path.join(neuroptikon.rootDir, 'documentation', 'build', 'Documentation')
else:<|fim▁hole|> basePath = os.path.join(neuroptikon.rootDir, 'documentation')
return 'file:' + urllib.pathname2url(basePath) + '/'
def showPage(page):
pageURL = baseURL() + page
# Try to open an embedded WebKit-based help browser.
try:
import documentation_frame
documentation_frame.showPage(pageURL)
except:
# Fall back to using the user's default browser outside of Neuroptikon.
wx.LaunchDefaultBrowser(pageURL)<|fim▁end|> | |
<|file_name|>main.cpp<|end_file_name|><|fim▁begin|>/**************************************************************************
**
** This file is part of Qt Creator
**
** Copyright (c) 2010 Nokia Corporation and/or its subsidiary(-ies).
**
** Contact: Nokia Corporation ([email protected])
**
** Commercial Usage
**
** Licensees holding valid Qt Commercial licenses may use this file in
** accordance with the Qt Commercial License Agreement provided with the
** Software or, alternatively, in accordance with the terms contained in
** a written agreement between you and Nokia.
**
** GNU Lesser General Public License Usage
**
** Alternatively, this file may be used under the terms of the GNU Lesser
** General Public License version 2.1 as published by the Free Software
** Foundation and appearing in the file LICENSE.LGPL included in the
** packaging of this file. Please review the following information to
** ensure the GNU Lesser General Public License version 2.1 requirements
** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.<|fim▁hole|>**
** If you are unsure which license is appropriate for your use, please
** contact the sales department at http://qt.nokia.com/contact.
**
**************************************************************************/
#include "qrceditor.h"
#include "mainwindow.h"
int main(int argc, char *argv[])
{
QApplication app(argc, argv);
MainWindow mw;
mw.show();
return app.exec();
}<|fim▁end|> | |
<|file_name|>user.js<|end_file_name|><|fim▁begin|>"use strict";
let mongoose = require('mongoose'),
Schema = mongoose.Schema,
EmailValidator = require('./emailValidator');
class User {
constructor(explicitConnection) {
this.explicitConnection = explicitConnection;
this.schema = new Schema({
email: {
type: 'string',
trim: true,
required: true
},
organization: {
type: 'string',
trim: true,
required: true
},
password: {type: 'string', required: true},
isVisibleAccount: {type: 'boolean'},
userApiKey: {type: 'string'},
userApiSecret: {type: 'string'},
linkedApps: {},
avatarProto: {type: 'string'},
gmailAccount: {type: 'string'},
facebookAccount: {type: 'string'},
twitterAccount: {type: 'string'},
fullname: {type: 'string'},
loginHistories: [],
changeProfileHistories: [],
changeAuthorizationHistories: [],
sparqlQuestions: [],
blockingHistories: [],
authorizations: {},
tripleUsage: {type: 'number', default: 0},
tripleUsageHistory: [],
isBlocked: {
type: 'boolean',
required: true
},
blockedCause: {
type: 'string',
}
}).index({ email: 1, organization: 1 }, { unique: true });
}
getModel() {
if (this.explicitConnection === undefined) {<|fim▁hole|> }
}
module.exports = User;<|fim▁end|> | return mongoose.model('User', this.schema);
} else {
return this.explicitConnection.model('User', this.schema);
} |
<|file_name|>conn_rpc_test.go<|end_file_name|><|fim▁begin|>/*
Copyright 2019 The Vitess Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package grpcvtgateconn
import (
"flag"
"io"
"net"
"os"
"testing"
"google.golang.org/grpc"
"context"
"vitess.io/vitess/go/vt/servenv"
"vitess.io/vitess/go/vt/vtgate/grpcvtgateservice"
"vitess.io/vitess/go/vt/vtgate/vtgateconn"
)
// TestGRPCVTGateConn makes sure the grpc service works
func TestGRPCVTGateConn(t *testing.T) {
// fake service
service := CreateFakeServer(t)
// listen on a random port
listener, err := net.Listen("tcp", "127.0.0.1:0")
if err != nil {
t.Fatalf("Cannot listen: %v", err)
}
// Create a gRPC server and listen on the port
server := grpc.NewServer()
grpcvtgateservice.RegisterForTest(server, service)
go server.Serve(listener)
// Create a Go RPC client connecting to the server
ctx := context.Background()
client, err := dial(ctx, listener.Addr().String())
if err != nil {
t.Fatalf("dial failed: %v", err)
}
RegisterTestDialProtocol(client)
// run the test suite
RunTests(t, client, service)
RunErrorTests(t, service)
// and clean up
client.Close()
}<|fim▁hole|>
// TestGRPCVTGateConnAuth makes sure the grpc with auth plugin works
func TestGRPCVTGateConnAuth(t *testing.T) {
var opts []grpc.ServerOption
// fake service
service := CreateFakeServer(t)
// listen on a random port
listener, err := net.Listen("tcp", "127.0.0.1:0")
if err != nil {
t.Fatalf("Cannot listen: %v", err)
}
// add auth interceptors
opts = append(opts, grpc.StreamInterceptor(servenv.FakeAuthStreamInterceptor))
opts = append(opts, grpc.UnaryInterceptor(servenv.FakeAuthUnaryInterceptor))
// Create a gRPC server and listen on the port
server := grpc.NewServer(opts...)
grpcvtgateservice.RegisterForTest(server, service)
go server.Serve(listener)
authJSON := `{
"Username": "valid",
"Password": "valid"
}`
f, err := os.CreateTemp("", "static_auth_creds.json")
if err != nil {
t.Fatal(err)
}
defer os.Remove(f.Name())
if _, err := io.WriteString(f, authJSON); err != nil {
t.Fatal(err)
}
if err := f.Close(); err != nil {
t.Fatal(err)
}
// Create a Go RPC client connecting to the server
ctx := context.Background()
flag.Set("grpc_auth_static_client_creds", f.Name())
client, err := dial(ctx, listener.Addr().String())
if err != nil {
t.Fatalf("dial failed: %v", err)
}
RegisterTestDialProtocol(client)
// run the test suite
RunTests(t, client, service)
RunErrorTests(t, service)
// and clean up
client.Close()
invalidAuthJSON := `{
"Username": "invalid",
"Password": "valid"
}`
f, err = os.CreateTemp("", "static_auth_creds.json")
if err != nil {
t.Fatal(err)
}
defer os.Remove(f.Name())
if _, err := io.WriteString(f, invalidAuthJSON); err != nil {
t.Fatal(err)
}
if err := f.Close(); err != nil {
t.Fatal(err)
}
// Create a Go RPC client connecting to the server
ctx = context.Background()
flag.Set("grpc_auth_static_client_creds", f.Name())
client, err = dial(ctx, listener.Addr().String())
if err != nil {
t.Fatalf("dial failed: %v", err)
}
RegisterTestDialProtocol(client)
conn, _ := vtgateconn.DialProtocol(context.Background(), "test", "")
// run the test suite
_, err = conn.Session("", nil).Execute(context.Background(), "select * from t", nil)
want := "rpc error: code = Unauthenticated desc = username and password must be provided"
if err == nil || err.Error() != want {
t.Errorf("expected auth failure:\n%v, want\n%s", err, want)
}
// and clean up again
client.Close()
}<|fim▁end|> | |
<|file_name|>homepage.js<|end_file_name|><|fim▁begin|>/* Set the width of the side navigation to 250px */
function openNav() {
document.getElementById("mySidenav").style.width = "250px";
}
<|fim▁hole|>function closeNav() {
document.getElementById("mySidenav").style.width = "0";
}<|fim▁end|> | /* Set the width of the side navigation to 0 */ |
<|file_name|>setup.py<|end_file_name|><|fim▁begin|>"""
Python script "setup.py"
by Matthew Garcia, PhD student
Dept. of Forest and Wildlife Ecology
University of Wisconsin - Madison
[email protected]
Copyright (C) 2015-2016 by Matthew Garcia
Licensed Gnu GPL v3; see 'LICENSE_GnuGPLv3.txt' for complete terms
Send questions, bug reports, any related requests to [email protected]
See also 'README.md', 'DISCLAIMER.txt', 'CITATION.txt', 'ACKNOWLEDGEMENTS.txt'
Treat others as you would be treated. Pay it forward. Valar dohaeris.
PURPOSE: Verifies sample data, scripts, modules, documents, auxiliary files.
Verifies availability of python dependencies used by various scripts.
Uncompresses certain large example data files<|fim▁hole|> Builds directory structure for script output products.
DEPENDENCIES: all software package source dependencies are polled here
USAGE: '$ python setup.py'
"""
import os
import sys
import glob
def message(char_string):
"""
prints a string to the terminal and flushes the buffer
"""
print char_string
sys.stdout.flush()
return
txt_files = ['ACKNOWLEDGEMENTS.txt', 'CITATION.txt', 'DISCLAIMER.txt',
'LICENSE_GnuGPLv3.txt']
md_files = ['README.md']
main_dirs = ['data', 'docs', 'htcondor', 'source', 'tools']
#
scripts = ['process_NCEI_00.py', 'process_NCEI_01.py',
'process_NCEI_02a.py', 'process_NCEI_02b.py',
'process_NCEI_03_chill_d.py', 'process_NCEI_03_chill_dd.py',
'process_NCEI_03_grow_dd.py', 'process_NCEI_03_grow_dd_base0.py',
'process_NCEI_03_prcp_03d.py', 'process_NCEI_03_prcp_07d.py',
'process_NCEI_03_prcp_120d.py', 'process_NCEI_03_prcp_15d.py',
'process_NCEI_03_prcp_180d.py', 'process_NCEI_03_prcp_30d.py',
'process_NCEI_03_prcp_365d.py', 'process_NCEI_03_prcp_60d.py',
'process_NCEI_03_prcp_90d.py', 'process_NCEI_03_prcp_90d_nd0.py',
'process_NCEI_03_prcp_90d_nd10.py',
'process_NCEI_03_prcp_90d_nd25.py',
'process_NCEI_03_preprocess.py', 'process_NCEI_03_tavg_03d.py',
'process_NCEI_03_tavg_07d.py', 'process_NCEI_03_tavg_15d.py',
'process_NCEI_03_tavg_30d.py', 'process_NCEI_03_tavg_60d.py',
'process_NCEI_03_tavg_90d.py', 'process_NCEI_03_tavg_frz.py',
'process_NCEI_03_tmax_03d.py', 'process_NCEI_03_tmax_07d.py',
'process_NCEI_03_tmax_15d.py', 'process_NCEI_03_tmax_30d.py',
'process_NCEI_03_tmax_60d.py', 'process_NCEI_03_tmax_90d.py',
'process_NCEI_03_tmax_frz.py', 'process_NCEI_03_tmin_03d.py',
'process_NCEI_03_tmin_07d.py', 'process_NCEI_03_tmin_15d.py',
'process_NCEI_03_tmin_30d.py', 'process_NCEI_03_tmin_60d.py',
'process_NCEI_03_tmin_90d.py', 'process_NCEI_03_tmin_frz.py',
'process_NCEI_03_vpd_03d.py', 'process_NCEI_03_vpd_07d.py',
'process_NCEI_03_vpd_15d.py', 'process_NCEI_03_vpd_30d.py',
'process_NCEI_03_vpd_60d.py', 'process_NCEI_03_vpd_90d.py',
'process_NCEI_04a.py', 'process_NCEI_04b.py', 'process_NCEI_05.py',
'process_NCEI_06.py', 'process_NCEI_07.py', 'process_NCEI_08.py',
'process_NCEI_09.py', 'process_NCEI_10.py', 'process_NCEI_11.py',
'process_NCEI_12.py', 'process_NCEI_13.py', 'process_NCEI_14.py',
'process_NCEI_15.py']
#
modules = ['Date_Convert.py', 'Interpolation.py', 'Plots.py',
'process_NCEI_03_aux.py', 'Read_Header_Files.py', 'Stats.py',
'Teleconnections.py', 'UTM_Geo_Convert.py']
#
htcondor = ['process_NCEI_00.sh', 'process_NCEI_00.sub',
'process_NCEI_01.sh', 'process_NCEI_01.sub',
'process_NCEI_02a.sh', 'process_NCEI_02a.sub',
'process_NCEI_02b.sh', 'process_NCEI_02b.sub',
'process_NCEI_02b_dag.sub', 'process_NCEI_03_chill_d.sh',
'process_NCEI_03_chill_dd.sh', 'process_NCEI_03_dag_gen.py',
'process_NCEI_03_generic.sub', 'process_NCEI_03_grow_dd.sh',
'process_NCEI_03_grow_dd_base0.sh', 'process_NCEI_03_prcp_03d.sh',
'process_NCEI_03_prcp_07d.sh', 'process_NCEI_03_prcp_120d.sh',
'process_NCEI_03_prcp_15d.sh', 'process_NCEI_03_prcp_180d.sh',
'process_NCEI_03_prcp_30d.sh', 'process_NCEI_03_prcp_365d.sh',
'process_NCEI_03_prcp_60d.sh', 'process_NCEI_03_prcp_90d.sh',
'process_NCEI_03_prcp_90d_nd0.sh',
'process_NCEI_03_prcp_90d_nd10.sh',
'process_NCEI_03_prcp_90d_nd25.sh',
'process_NCEI_03_preprocess.sh', 'process_NCEI_03_tavg_03d.sh',
'process_NCEI_03_tavg_07d.sh', 'process_NCEI_03_tavg_15d.sh',
'process_NCEI_03_tavg_30d.sh', 'process_NCEI_03_tavg_60d.sh',
'process_NCEI_03_tavg_90d.sh', 'process_NCEI_03_tavg_frz.sh',
'process_NCEI_03_tmax_03d.sh', 'process_NCEI_03_tmax_07d.sh',
'process_NCEI_03_tmax_15d.sh', 'process_NCEI_03_tmax_30d.sh',
'process_NCEI_03_tmax_60d.sh', 'process_NCEI_03_tmax_90d.sh',
'process_NCEI_03_tmax_frz.sh', 'process_NCEI_03_tmin_03d.sh',
'process_NCEI_03_tmin_07d.sh', 'process_NCEI_03_tmin_15d.sh',
'process_NCEI_03_tmin_30d.sh', 'process_NCEI_03_tmin_60d.sh',
'process_NCEI_03_tmin_90d.sh', 'process_NCEI_03_tmin_frz.sh',
'process_NCEI_03_vpd_03d.sh', 'process_NCEI_03_vpd_07d.sh',
'process_NCEI_03_vpd_15d.sh', 'process_NCEI_03_vpd_30d.sh',
'process_NCEI_03_vpd_60d.sh', 'process_NCEI_03_vpd_90d.sh',
'process_NCEI_04a.sh', 'process_NCEI_04a.sub',
'process_NCEI_04b.sh', 'process_NCEI_04b.sub',
'process_NCEI_05.sh', 'process_NCEI_05.sub',
'process_NCEI_06.sh', 'process_NCEI_06.sub',
'process_NCEI_07.sh', 'process_NCEI_07.sub',
'process_NCEI_08.sh', 'process_NCEI_08.sub',
'process_NCEI_09.sh', 'process_NCEI_09.sub']
#
dependencies = ['os', 'sys', 'datetime', 'glob', 'numpy', 'pandas', 'h5py',
'matplotlib', 'matplotlib.pyplot', 'gdal', 'osgeo.osr',
'scipy.interpolate', 'scipy.ndimage', 'scipy.stats',
'mpl_toolkits', 'mpl_toolkits.basemap', 'pickle']
#
gz_data_files = ['EPA_L4_Ecoregions_WLS_UTM15N.bil.gz',
'NCEI_WLS_19830101-20151031.csv.gz',
'NLCD_2011_WLS_UTM15N.bil.gz']
#
data_files = ['EPA_L4_Ecoregions_WLS_polygonIDs.txt',
'EPA_L4_Ecoregions_WLS_UTM15N.bil',
'EPA_L4_Ecoregions_WLS_UTM15N.hdr',
'NCEI_WLS_19830101-20151031.csv',
'NCEP_CPC_AO_indices.csv',
'NCEP_CPC_ENSO_indices.csv',
'NCEP_CPC_NAO_indices.csv',
'NCEP_CPC_PNA_indices.csv',
'NLCD_2011_WLS_UTM15N.bil',
'NLCD_2011_WLS_UTM15N.hdr',
'NOAA_ESRL_AMO_indices.csv',
'NOAA_ESRL_PDO_indices.csv',
'NSIDC_MIFL_Superior_Ice.csv',
'Query_locations_dates_sample.csv']
#
doc_files = ['How_to_get_NCEI_GHCND_data.txt',
'NCEI_GHCND_documentation.pdf']
#
tools = ['query_NCEI_grids.py', 'orientation_maps.py']
#
add_dirs = ['analyses', 'grids', 'images']
#
analyses_dirs = ['annual_maps', 'cluster_maps', 'ecoregion_maps',
'figures', 'summary_maps']
#
os.system('rm .DS_Store')
os.system('rm */.DS_Store')
os.system('rm ._*')
os.system('rm */._*')
#
message('checking for auxiliary files that should accompany this software')
txts_present = glob.glob('*.txt')
mds_present = glob.glob('*.md')
absent = 0
for txt in txt_files:
if txt in txts_present:
message('- found auxiliary file \'%s\' as expected' % txt)
else:
message('- auxiliary file \'%s\' is absent' % txt)
absent += 1
for md in md_files:
if md in mds_present:
message('- found auxiliary file \'%s\' as expected' % md)
else:
message('- auxiliary file \'%s\' is absent' % md)
absent += 1
if absent > 0:
message('- you don\'t need them to run things, but you do need them to \
understand things')
message('- you should probably download this package again from scratch')
message('- exiting setup procedure')
sys.exit(1)
message(' ')
#
message('checking for top-level directories that should already exist')
dirs_present = [d.replace('/', '') for d in glob.glob('*/')]
absent = 0
for dirname in main_dirs:
if dirname in dirs_present:
message('- found main directory \'%s\' as expected' % dirname)
else:
message('- main directory \'%s\' is absent' % dirname)
absent += 1
if absent > 0:
message('- you should download this package again from scratch')
message('- exiting setup procedure')
sys.exit(1)
message(' ')
#
message('checking for main scripts and modules that comprise this software')
src_present = glob.glob('source/*')
absent = 0
for srcfile in scripts:
srcfile = 'source/%s' % srcfile
if srcfile in src_present:
message('- found script \'%s\' as expected' % srcfile)
else:
message('- script \'%s\' is absent' % srcfile)
absent += 1
for srcfile in modules:
srcfile = 'source/%s' % srcfile
if srcfile in src_present:
message('- found module \'%s\' as expected' % srcfile)
else:
message('- module \'%s\' is absent' % srcfile)
absent += 1
if absent > 0:
message('- you should download this package again from scratch')
message('- exiting setup procedure')
sys.exit(1)
message(' ')
#
message('checking for script-based tools that accompany this software')
src_present = glob.glob('tools/*')
absent = 0
for srcfile in tools:
srcfile = 'tools/%s' % srcfile
if srcfile in src_present:
message('- found script \'%s\' as expected' % srcfile)
else:
message('- script \'%s\' is absent' % srcfile)
absent += 1
if absent > 0:
message('- if you need these tools, you should download this package \
again from scratch')
message(' ')
#
message('checking for HTCondor example files that accompany this software')
src_present = glob.glob('htcondor/*')
absent = 0
for srcfile in htcondor:
srcfile = 'htcondor/%s' % srcfile
if srcfile in src_present:
message('- found htcondor file \'%s\' as expected' % srcfile)
else:
message('- htcondor file \'%s\' is absent' % srcfile)
absent += 1
if absent > 0:
message('- if you need these files, you should download this package \
again from scratch')
message(' ')
#
message('checking for essential python package dependencies for this software')
err = 0
#
try:
import os
message('- python dependency \'os\' is available')
except ImportError:
message('- essential python dependency \'os\' is not available')
err += 1
#
try:
import sys
message('- python dependency \'sys\' is available')
except ImportError:
message('- essential python dependency \'sys\' is not available')
err += 1
#
try:
import datetime
message('- python dependency \'datetime\' is available')
except ImportError:
message('- essential python dependency \'datetime\' is not available')
err += 1
#
try:
import glob
message('- python dependency \'glob\' is available')
except ImportError:
message('- essential python dependency \'glob\' is not available')
err += 1
#
try:
import pickle
message('- python dependency \'pickle\' is available')
except ImportError:
message('- essential python dependency \'pickle\' is not available')
err += 1
#
try:
import numpy
message('- python dependency \'numpy\' is available')
except ImportError:
message('- essential python dependency \'numpy\' is not available')
err += 1
#
try:
import pandas
message('- python dependency \'pandas\' is available')
except ImportError:
message('- essential python dependency \'pandas\' is not available')
err += 1
#
try:
import h5py
message('- python dependency \'h5py\' is available')
except ImportError:
message('- essential python dependency \'h5py\' is not available')
err += 1
#
try:
import gdal
message('- python dependency \'gdal\' is available')
except ImportError:
message('- essential python dependency \'gdal\' is not available')
err += 1
#
try:
import osgeo.osr
message('- python dependency \'osgeo.osr\' is available')
except ImportError:
message('- essential python dependency \'osgeo.osr\' is not available')
err += 1
#
try:
import scipy.interpolate
message('- python dependency \'scipy.interpolate\' is available')
except ImportError:
message('- essential python dependency \'scipy.interpolate\' is not \
available')
err += 1
#
try:
import scipy.ndimage
message('- python dependency \'scipy.ndimage\' is available')
except ImportError:
message('- essential python dependency \'scipy.ndimage\' is not available')
err += 1
#
try:
import scipy.stats
message('- python dependency \'scipy.stats\' is available')
except ImportError:
message('- essential python dependency \'scipy.stats\' is not available')
err += 1
#
try:
import matplotlib
message('- python dependency \'matplotlib\' is available')
except ImportError:
message('- essential python dependency \'matplotlib\' is not available')
err += 1
#
try:
import matplotlib.pyplot
message('- python dependency \'matplotlib.pyplot\' is available')
except ImportError:
message('- essential python dependency \'matplotlib.pyplot\' is not \
available')
err += 1
#
try:
import mpl_toolkits
message('- python dependency \'mpl_toolkits\' is available')
except ImportError:
message('- essential python dependency \'mpl_toolkits\' is not available')
err += 1
#
try:
import mpl_toolkits.basemap
message('- python dependency \'mpl_toolkits.basemap\' is available')
except ImportError:
message('- essential python dependency \'mpl_toolkits.basemap\' is not \
available')
err += 1
#
if err > 0:
message('- you need to install one or more additional python packages for \
this software to work')
message('- all of these packages are available via Anaconda (\'conda\') \
and/or PyPI (\'pip\') repositories')
message('- exiting setup procedure')
sys.exit(1)
message(' ')
#
message('checking for example data files that should accompany this software')
gz_data_present = glob.glob('data/*.gz')
absent = 0
for gz_dfile in gz_data_files:
gz_dfile_path = 'data/%s' % gz_dfile
if gz_dfile_path in gz_data_present:
message('- found compressed data file \'%s\' as expected' % gz_dfile)
message('-- uncompressing \'%s\'' % gz_dfile)
os.system('cd data')
os.system('gunzip %s' % gz_dfile)
os.system('cd ..')
else:
message('- compressed example data file \'%s\' is absent' % gz_dfile)
absent += 1
if absent > 0:
message('- you don\'t need these if you have your own data in the right \
formats')
message('- if you need the examples, you can find them at on GitHub at')
message(' https://github.com/megarcia/WxCD')
#
data_present = glob.glob('data/*')
absent = 0
for dfile in data_files:
dfile_path = 'data/%s' % dfile
if dfile_path in data_present:
message('- found data file \'%s\' as expected' % dfile)
else:
message('- example data file \'%s\' is absent' % dfile)
absent += 1
if absent > 0:
message('- you don\'t need these if you have your own data in the right \
formats')
message('- if you need the examples, you can find them at on GitHub at')
message(' https://github.com/megarcia/WxCD')
message(' ')
#
message('checking for data documentation files that should accompany this \
software')
docs_present = glob.glob('docs/*')
absent = 0
for dfile in doc_files:
dfile = 'docs/%s' % dfile
if dfile in docs_present:
message('- found documentation file \'%s\' as expected' % dfile)
else:
message('- data documentation file \'%s\' is absent' % dfile)
absent += 1
if absent > 0:
message('- you don\'t need these if you have your own documentation')
message('- if you need the examples, you can find them at on GitHub at')
message(' https://github.com/megarcia/GT16_JGRA')
message(' ')
#
message('creating top-level and sub-directories that will be used for process \
output')
for dirname in add_dirs:
os.system('mkdir %s' % dirname)
message('- made top-level directory \'%s\' ' % dirname)
for dirname in analyses_dirs:
os.system('mkdir analyses/%s' % dirname)
message('- made sub-directory \'analyses/%s\' ' % dirname)
message(' ')
#
message('copying source scripts and modules to top-level directory')
os.system('cp source/*.py .')
message('archiving original scripts and modules to \'source_orig\' directory')
os.system('mv source source_orig')
#
message('copying tools to top-level directory')
os.system('cp tools/*.py .')
message('archiving original tools scripts to \'tools_orig\' directory')
os.system('mv tools tools_orig')
message(' ')
#
message('all set!')
message(' ')
#
message('if you plan to use the HTCondor example files, you\'ll need to \
move or copy them to')
message(' your top-level directory')
message(' ')
#
message('make sure to read the \'README.md\' file before you get started on \
the scripts')
message(' ')
#
message('if you need help getting your own dataset of GHCND weather \
observations, there is')
message(' a how-to document in the \'docs\' directory')
message(' ')
#
message('please send questions, bug reports, any other requests to \
[email protected]')
message(' (and include a helpfully descriptive subject line, if you could)')
message('or submit them through the Issues tab at the GitHub repository for \
this package')
message(' ')
#
sys.exit(0)<|fim▁end|> | |
<|file_name|>pgoapi.py<|end_file_name|><|fim▁begin|>"""
pgoapi - Pokemon Go API
Copyright (c) 2016 tjado <https://github.com/tejado>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
OR OTHER DEALINGS IN THE SOFTWARE.
Author: tjado <https://github.com/tejado>
"""
import logging
import re
import requests
from utilities import f2i, h2f
from rpc_api import RpcApi
from auth_ptc import AuthPtc
from auth_google import AuthGoogle
from exceptions import AuthException, NotLoggedInException, ServerBusyOrOfflineException
import protos.RpcEnum_pb2 as RpcEnum
logger = logging.getLogger(__name__)<|fim▁hole|>
def __init__(self):
self.log = logging.getLogger(__name__)
self._auth_provider = None
self._api_endpoint = None
self._position_lat = 0
self._position_lng = 0
self._position_alt = 0
self._req_method_list = []
def call(self):
if not self._req_method_list:
return False
if self._auth_provider is None or not self._auth_provider.is_login():
self.log.info('Not logged in')
return False
player_position = self.get_position()
request = RpcApi(self._auth_provider)
if self._api_endpoint:
api_endpoint = self._api_endpoint
else:
api_endpoint = self.API_ENTRY
self.log.info('Execution of RPC')
response = None
try:
response = request.request(api_endpoint, self._req_method_list, player_position)
except ServerBusyOrOfflineException as e:
self.log.info('Server seems to be busy or offline - try again!')
# cleanup after call execution
self.log.info('Cleanup of request!')
self._req_method_list = []
return response
#def get_player(self):
def list_curr_methods(self):
for i in self._req_method_list:
print("{} ({})".format(RpcEnum.RequestMethod.Name(i),i))
def set_logger(self, logger):
self._ = logger or logging.getLogger(__name__)
def get_position(self):
return (self._position_lat, self._position_lng, self._position_alt)
def set_position(self, lat, lng, alt):
self.log.debug('Set Position - Lat: %s Long: %s Alt: %s', lat, lng, alt)
self._position_lat = f2i(lat)
self._position_lng = f2i(lng)
self._position_alt = f2i(alt)
def __getattr__(self, func):
def function(**kwargs):
if not self._req_method_list:
self.log.info('Create new request...')
name = func.upper()
if kwargs:
self._req_method_list.append( { RpcEnum.RequestMethod.Value(name): kwargs } )
self.log.info("Adding '%s' to RPC request including arguments", name)
self.log.debug("Arguments of '%s': \n\r%s", name, kwargs)
else:
self._req_method_list.append( RpcEnum.RequestMethod.Value(name) )
self.log.info("Adding '%s' to RPC request", name)
return self
if func.upper() in RpcEnum.RequestMethod.keys():
return function
else:
raise AttributeError
def login(self, provider, username, password):
if not isinstance(username, basestring) or not isinstance(password, basestring):
raise AuthException("Username/password not correctly specified")
if provider == 'ptc':
self._auth_provider = AuthPtc()
elif provider == 'google':
self._auth_provider = AuthGoogle()
else:
raise AuthException("Invalid authentication provider - only ptc/google available.")
self.log.debug('Auth provider: %s', provider)
if not self._auth_provider.login(username, password):
self.log.info('Login process failed')
return False
self.log.info('Starting RPC login sequence (app simulation)')
# making a standard call, like it is also done by the client
self.get_player()
self.get_hatched_eggs()
self.get_inventory()
self.check_awarded_badges()
self.download_settings(hash="4a2e9bc330dae60e7b74fc85b98868ab4700802e")
response = self.call()
if not response:
self.log.info('Login failed!')
return False
if 'api_url' in response:
self._api_endpoint = ('https://{}/rpc'.format(response['api_url']))
self.log.debug('Setting API endpoint to: %s', self._api_endpoint)
else:
self.log.error('Login failed - unexpected server response!')
return False
if 'auth_ticket' in response:
self._auth_provider.set_ticket(response['auth_ticket'].values())
self.log.info('Finished RPC login sequence (app simulation)')
self.log.info('Login process completed')
return True<|fim▁end|> |
class PGoApi:
API_ENTRY = 'https://pgorelease.nianticlabs.com/plfe/rpc' |
<|file_name|>weird-exprs.rs<|end_file_name|><|fim▁begin|>// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use core::util;
// Just a grab bag of stuff that you wouldn't want to actually write.
fn strange() -> bool { let _x: bool = return true; }
fn funny() {
fn f(_x: ()) { }
f(return);
}
fn what() {
fn the(x: @mut bool) { return while !*x { *x = true; }; }
let i = @mut false;
let dont = {||the(i)};
dont();
assert!((*i));
}<|fim▁hole|> if (return) {
match (return) {
1 => {
if (return) {
return
} else {
return
}
}
_ => { return }
};
} else if (return) {
return;
}
}
if (return) { break; }
}
}
fn notsure() {
let mut _x;
let mut _y = (_x = 0) == (_x = 0);
let mut _z = (_x = 0) < (_x = 0);
let _a = (_x += 0) == (_x = 0);
let _b = util::swap(&mut _y, &mut _z) == util::swap(&mut _y, &mut _z);
}
fn canttouchthis() -> uint {
fn p() -> bool { true }
let _a = (assert!((true)) == (assert!(p())));
let _c = (assert!((p())) == ());
let _b: bool = (debug!("%d", 0) == (return 0u));
}
fn angrydome() {
loop { if break { } }
let mut i = 0;
loop { i += 1; if i == 1 { match (loop) { 1 => { }, _ => fail!("wat") } }
break; }
}
fn evil_lincoln() { let evil = debug!("lincoln"); }
pub fn main() {
strange();
funny();
what();
zombiejesus();
notsure();
canttouchthis();
angrydome();
evil_lincoln();
}<|fim▁end|> |
fn zombiejesus() {
loop {
while (return) { |
<|file_name|>wf.rs<|end_file_name|><|fim▁begin|>// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use astconv::AstConv;
use check::{FnCtxt, Inherited, blank_fn_ctxt, regionck};
use constrained_type_params::{identify_constrained_type_params, Parameter};
use CrateCtxt;
use middle::region;
use middle::subst::{self, TypeSpace, FnSpace, ParamSpace, SelfSpace};
use middle::traits;
use middle::ty::{self, Ty};
use middle::ty::liberate_late_bound_regions;
use middle::ty_fold::{TypeFolder, TypeFoldable, super_fold_ty};
use util::ppaux::{Repr, UserString};
use std::collections::HashSet;
use syntax::ast;
use syntax::ast_util::local_def;
use syntax::codemap::{DUMMY_SP, Span};
use syntax::parse::token::{self, special_idents};
use syntax::visit;
use syntax::visit::Visitor;
pub struct CheckTypeWellFormedVisitor<'ccx, 'tcx:'ccx> {
ccx: &'ccx CrateCtxt<'ccx, 'tcx>,
cache: HashSet<Ty<'tcx>>
}
impl<'ccx, 'tcx> CheckTypeWellFormedVisitor<'ccx, 'tcx> {
pub fn new(ccx: &'ccx CrateCtxt<'ccx, 'tcx>) -> CheckTypeWellFormedVisitor<'ccx, 'tcx> {
CheckTypeWellFormedVisitor { ccx: ccx, cache: HashSet::new() }
}
fn tcx(&self) -> &ty::ctxt<'tcx> {
self.ccx.tcx
}
/// Checks that the field types (in a struct def'n) or argument types (in an enum def'n) are
/// well-formed, meaning that they do not require any constraints not declared in the struct
/// definition itself. For example, this definition would be illegal:
///
/// struct Ref<'a, T> { x: &'a T }
///
/// because the type did not declare that `T:'a`.
///
/// We do this check as a pre-pass before checking fn bodies because if these constraints are
/// not included it frequently leads to confusing errors in fn bodies. So it's better to check
/// the types first.
fn check_item_well_formed(&mut self, item: &ast::Item) {
let ccx = self.ccx;
debug!("check_item_well_formed(it.id={}, it.ident={})",
item.id,
ty::item_path_str(ccx.tcx, local_def(item.id)));
match item.node {
/// Right now we check that every default trait implementation
/// has an implementation of itself. Basically, a case like:
///
/// `impl Trait for T {}`
///
/// has a requirement of `T: Trait` which was required for default
/// method implementations. Although this could be improved now that
/// there's a better infrastructure in place for this, it's being left
/// for a follow-up work.
///
/// Since there's such a requirement, we need to check *just* positive
/// implementations, otherwise things like:
///
/// impl !Send for T {}
///
/// won't be allowed unless there's an *explicit* implementation of `Send`
/// for `T`
ast::ItemImpl(_, ast::ImplPolarity::Positive, _, _, _, _) => {
self.check_impl(item);
}
ast::ItemImpl(_, ast::ImplPolarity::Negative, _, Some(_), _, _) => {
let trait_ref = ty::impl_trait_ref(ccx.tcx,
local_def(item.id)).unwrap();
ty::populate_implementations_for_trait_if_necessary(ccx.tcx, trait_ref.def_id);
match ccx.tcx.lang_items.to_builtin_kind(trait_ref.def_id) {
Some(ty::BoundSend) | Some(ty::BoundSync) => {}
Some(_) | None => {
if !ty::trait_has_default_impl(ccx.tcx, trait_ref.def_id) {
span_err!(ccx.tcx.sess, item.span, E0192,
"negative impls are only allowed for traits with \
default impls (e.g., `Send` and `Sync`)")
}
}
}
}
ast::ItemFn(..) => {
self.check_item_type(item);
}
ast::ItemStatic(..) => {
self.check_item_type(item);
}
ast::ItemConst(..) => {
self.check_item_type(item);
}
ast::ItemStruct(ref struct_def, ref ast_generics) => {
self.check_type_defn(item, |fcx| {
vec![struct_variant(fcx, &**struct_def)]
});
self.check_variances_for_type_defn(item, ast_generics);
}
ast::ItemEnum(ref enum_def, ref ast_generics) => {
self.check_type_defn(item, |fcx| {
enum_variants(fcx, enum_def)
});
self.check_variances_for_type_defn(item, ast_generics);
}
ast::ItemTrait(_, _, _, ref items) => {
let trait_predicates =
ty::lookup_predicates(ccx.tcx, local_def(item.id));
reject_non_type_param_bounds(ccx.tcx, item.span, &trait_predicates);
if ty::trait_has_default_impl(ccx.tcx, local_def(item.id)) {
if !items.is_empty() {
span_err!(ccx.tcx.sess, item.span, E0380,
"traits with default impls (`e.g. unsafe impl \
Trait for ..`) must have no methods or associated items")
}
}
}
_ => {}
}
}
fn with_fcx<F>(&mut self, item: &ast::Item, mut f: F) where
F: for<'fcx> FnMut(&mut CheckTypeWellFormedVisitor<'ccx, 'tcx>, &FnCtxt<'fcx, 'tcx>),
{
let ccx = self.ccx;
let item_def_id = local_def(item.id);
let type_scheme = ty::lookup_item_type(ccx.tcx, item_def_id);
let type_predicates = ty::lookup_predicates(ccx.tcx, item_def_id);
reject_non_type_param_bounds(ccx.tcx, item.span, &type_predicates);
let param_env =
ty::construct_parameter_environment(ccx.tcx,
item.span,
&type_scheme.generics,
&type_predicates,
item.id);
let inh = Inherited::new(ccx.tcx, param_env);
let fcx = blank_fn_ctxt(ccx, &inh, ty::FnConverging(type_scheme.ty), item.id);
f(self, &fcx);
fcx.select_all_obligations_or_error();
regionck::regionck_item(&fcx, item);
}
/// In a type definition, we check that to ensure that the types of the fields are well-formed.
fn check_type_defn<F>(&mut self, item: &ast::Item, mut lookup_fields: F) where
F: for<'fcx> FnMut(&FnCtxt<'fcx, 'tcx>) -> Vec<AdtVariant<'tcx>>,
{
self.with_fcx(item, |this, fcx| {
let variants = lookup_fields(fcx);
let mut bounds_checker = BoundsChecker::new(fcx,
item.id,
Some(&mut this.cache));
debug!("check_type_defn at bounds_checker.scope: {:?}", bounds_checker.scope);
for variant in &variants {
for field in &variant.fields {
// Regions are checked below.
bounds_checker.check_traits_in_ty(field.ty, field.span);
}
// For DST, all intermediate types must be sized.
if !variant.fields.is_empty() {
for field in variant.fields.init() {
fcx.register_builtin_bound(
field.ty,
ty::BoundSized,
traits::ObligationCause::new(field.span,
fcx.body_id,
traits::FieldSized));
}
}
}
let field_tys: Vec<Ty> =
variants.iter().flat_map(|v| v.fields.iter().map(|f| f.ty)).collect();
regionck::regionck_ensure_component_tys_wf(
fcx, item.span, &field_tys);
});
}
fn check_item_type(&mut self,
item: &ast::Item)
{
self.with_fcx(item, |this, fcx| {
let mut bounds_checker = BoundsChecker::new(fcx,
item.id,
Some(&mut this.cache));
debug!("check_item_type at bounds_checker.scope: {:?}", bounds_checker.scope);
let type_scheme = ty::lookup_item_type(fcx.tcx(), local_def(item.id));
let item_ty = fcx.instantiate_type_scheme(item.span,
&fcx.inh.param_env.free_substs,
&type_scheme.ty);
bounds_checker.check_traits_in_ty(item_ty, item.span);
});
}
fn check_impl(&mut self,
item: &ast::Item)
{
self.with_fcx(item, |this, fcx| {
let mut bounds_checker = BoundsChecker::new(fcx,
item.id,
Some(&mut this.cache));
debug!("check_impl at bounds_checker.scope: {:?}", bounds_checker.scope);
// Find the impl self type as seen from the "inside" --
// that is, with all type parameters converted from bound
// to free.
let self_ty = ty::node_id_to_type(fcx.tcx(), item.id);
let self_ty = fcx.instantiate_type_scheme(item.span,
&fcx.inh.param_env.free_substs,
&self_ty);
bounds_checker.check_traits_in_ty(self_ty, item.span);
// Similarly, obtain an "inside" reference to the trait
// that the impl implements.
let trait_ref = match ty::impl_trait_ref(fcx.tcx(), local_def(item.id)) {
None => { return; }
Some(t) => { t }
};
let trait_ref = fcx.instantiate_type_scheme(item.span,
&fcx.inh.param_env.free_substs,
&trait_ref);
// We are stricter on the trait-ref in an impl than the
// self-type. In particular, we enforce region
// relationships. The reason for this is that (at least
// presently) "applying" an impl does not require that the
// application site check the well-formedness constraints on the
// trait reference. Instead, this is done at the impl site.
// Arguably this is wrong and we should treat the trait-reference
// the same way as we treat the self-type.
bounds_checker.check_trait_ref(&trait_ref, item.span);
let cause =
traits::ObligationCause::new(
item.span,
fcx.body_id,
traits::ItemObligation(trait_ref.def_id));
// Find the supertrait bounds. This will add `int:Bar`.
let poly_trait_ref = ty::Binder(trait_ref);
let predicates = ty::lookup_super_predicates(fcx.tcx(), poly_trait_ref.def_id());
let predicates = predicates.instantiate_supertrait(fcx.tcx(), &poly_trait_ref);
let predicates = {
let selcx = &mut traits::SelectionContext::new(fcx.infcx(), fcx);
traits::normalize(selcx, cause.clone(), &predicates)
};
for predicate in predicates.value.predicates {
fcx.register_predicate(traits::Obligation::new(cause.clone(), predicate));
}
for obligation in predicates.obligations {
fcx.register_predicate(obligation);
}
});
}
fn check_variances_for_type_defn(&self,
item: &ast::Item,
ast_generics: &ast::Generics)
{
let item_def_id = local_def(item.id);
let ty_predicates = ty::lookup_predicates(self.tcx(), item_def_id);
let variances = ty::item_variances(self.tcx(), item_def_id);
let mut constrained_parameters: HashSet<_> =
variances.types
.iter_enumerated()
.filter(|&(_, _, &variance)| variance != ty::Bivariant)
.map(|(space, index, _)| self.param_ty(ast_generics, space, index))
.map(|p| Parameter::Type(p))
.collect();
identify_constrained_type_params(self.tcx(),
ty_predicates.predicates.as_slice(),
None,
&mut constrained_parameters);
for (space, index, _) in variances.types.iter_enumerated() {
let param_ty = self.param_ty(ast_generics, space, index);
if constrained_parameters.contains(&Parameter::Type(param_ty)) {
continue;
}
let span = self.ty_param_span(ast_generics, item, space, index);
self.report_bivariance(span, param_ty.name);
}
for (space, index, &variance) in variances.regions.iter_enumerated() {
if variance != ty::Bivariant {
continue;
}
assert_eq!(space, TypeSpace);
let span = ast_generics.lifetimes[index].lifetime.span;
let name = ast_generics.lifetimes[index].lifetime.name;
self.report_bivariance(span, name);
}
}
fn param_ty(&self,
ast_generics: &ast::Generics,
space: ParamSpace,
index: usize)
-> ty::ParamTy
{
let name = match space {
TypeSpace => ast_generics.ty_params[index].ident.name,
SelfSpace => special_idents::type_self.name,
FnSpace => self.tcx().sess.bug("Fn space occupied?"),
};
ty::ParamTy { space: space, idx: index as u32, name: name }
}
fn ty_param_span(&self,
ast_generics: &ast::Generics,
item: &ast::Item,
space: ParamSpace,
index: usize)
-> Span
{
match space {
TypeSpace => ast_generics.ty_params[index].span,
SelfSpace => item.span,
FnSpace => self.tcx().sess.span_bug(item.span, "Fn space occupied?"),
}
}
fn report_bivariance(&self,
span: Span,
param_name: ast::Name)
{
span_err!(self.tcx().sess, span, E0392,
"parameter `{}` is never used", param_name.user_string(self.tcx()));
let suggested_marker_id = self.tcx().lang_items.phantom_data();
match suggested_marker_id {
Some(def_id) => {
self.tcx().sess.fileline_help(
span,
&format!("consider removing `{}` or using a marker such as `{}`",
param_name.user_string(self.tcx()),
ty::item_path_str(self.tcx(), def_id)));
}
None => {
// no lang items, no help!
}
}
}
}
// Reject any predicates that do not involve a type parameter.
fn reject_non_type_param_bounds<'tcx>(tcx: &ty::ctxt<'tcx>,
span: Span,
predicates: &ty::GenericPredicates<'tcx>) {
for predicate in &predicates.predicates {
match predicate {
&ty::Predicate::Trait(ty::Binder(ref tr)) => {
let found_param = tr.input_types().iter()
.flat_map(|ty| ty.walk())
.any(is_ty_param);
if !found_param { report_bound_error(tcx, span, tr.self_ty() )}
}
&ty::Predicate::TypeOutlives(ty::Binder(ty::OutlivesPredicate(ty, _))) => {
let found_param = ty.walk().any(|t| is_ty_param(t));
if !found_param { report_bound_error(tcx, span, ty) }
}
_ => {}
};
}
fn report_bound_error<'t>(tcx: &ty::ctxt<'t>,
span: Span,
bounded_ty: ty::Ty<'t>) {
span_err!(tcx.sess, span, E0193,
"cannot bound type `{}`, where clause \
bounds may only be attached to types involving \
type parameters",
bounded_ty.repr(tcx))
}
fn is_ty_param(ty: ty::Ty) -> bool {
match &ty.sty {
&ty::TyParam(_) => true,
_ => false
}
}
}
fn reject_shadowing_type_parameters<'tcx>(tcx: &ty::ctxt<'tcx>,
span: Span,
generics: &ty::Generics<'tcx>) {
let impl_params = generics.types.get_slice(subst::TypeSpace).iter()
.map(|tp| tp.name).collect::<HashSet<_>>();
for method_param in generics.types.get_slice(subst::FnSpace) {
if impl_params.contains(&method_param.name) {
span_err!(tcx.sess, span, E0194,
"type parameter `{}` shadows another type parameter of the same name",
token::get_name(method_param.name));
}
}
}
impl<'ccx, 'tcx, 'v> Visitor<'v> for CheckTypeWellFormedVisitor<'ccx, 'tcx> {
fn visit_item(&mut self, i: &ast::Item) {
self.check_item_well_formed(i);
visit::walk_item(self, i);
}
fn visit_fn(&mut self,
fk: visit::FnKind<'v>, fd: &'v ast::FnDecl,
b: &'v ast::Block, span: Span, id: ast::NodeId) {
match fk {
visit::FkFnBlock | visit::FkItemFn(..) => {}
visit::FkMethod(..) => {
match ty::impl_or_trait_item(self.tcx(), local_def(id)) {
ty::ImplOrTraitItem::MethodTraitItem(ty_method) => {
reject_shadowing_type_parameters(self.tcx(), span, &ty_method.generics)
}
_ => {}
}
}
}
visit::walk_fn(self, fk, fd, b, span)
}
fn visit_trait_item(&mut self, trait_item: &'v ast::TraitItem) {
if let ast::MethodTraitItem(_, None) = trait_item.node {
match ty::impl_or_trait_item(self.tcx(), local_def(trait_item.id)) {
ty::ImplOrTraitItem::MethodTraitItem(ty_method) => {
reject_non_type_param_bounds(
self.tcx(),
trait_item.span,
&ty_method.predicates);
reject_shadowing_type_parameters(
self.tcx(),
trait_item.span,
&ty_method.generics);
}
_ => {}
}
}
visit::walk_trait_item(self, trait_item)
}
}
pub struct BoundsChecker<'cx,'tcx:'cx> {
fcx: &'cx FnCtxt<'cx,'tcx>,
span: Span,
// This field is often attached to item impls; it is not clear
// that `CodeExtent` is well-defined for such nodes, so pnkfelix
// has left it as a NodeId rather than porting to CodeExtent.
scope: ast::NodeId,
binding_count: usize,
cache: Option<&'cx mut HashSet<Ty<'tcx>>>,
}
impl<'cx,'tcx> BoundsChecker<'cx,'tcx> {
pub fn new(fcx: &'cx FnCtxt<'cx,'tcx>,
scope: ast::NodeId,
cache: Option<&'cx mut HashSet<Ty<'tcx>>>)
-> BoundsChecker<'cx,'tcx> {
BoundsChecker { fcx: fcx, span: DUMMY_SP, scope: scope,
cache: cache, binding_count: 0 }
}
/// Given a trait ref like `A : Trait<B>`, where `Trait` is defined as (say):
///
/// trait Trait<B:OtherTrait> : Copy { ... }
///
/// This routine will check that `B : OtherTrait` and `A : Trait<B>`. It will also recursively
/// check that the types `A` and `B` are well-formed.
///
/// Note that it does not (currently, at least) check that `A : Copy` (that check is delegated
/// to the point where impl `A : Trait<B>` is implemented).
pub fn check_trait_ref(&mut self, trait_ref: &ty::TraitRef<'tcx>, span: Span) {
let trait_predicates = ty::lookup_predicates(self.fcx.tcx(), trait_ref.def_id);
let bounds = self.fcx.instantiate_bounds(span,
trait_ref.substs,
&trait_predicates);
self.fcx.add_obligations_for_parameters(
traits::ObligationCause::new(
span,
self.fcx.body_id,
traits::ItemObligation(trait_ref.def_id)),
&bounds);
for &ty in &trait_ref.substs.types {
self.check_traits_in_ty(ty, span);
}
}
pub fn check_ty(&mut self, ty: Ty<'tcx>, span: Span) {
self.span = span;
ty.fold_with(self);
}
fn check_traits_in_ty(&mut self, ty: Ty<'tcx>, span: Span) {
self.span = span;
// When checking types outside of a type def'n, we ignore
// region obligations. See discussion below in fold_ty().
self.binding_count += 1;
ty.fold_with(self);
self.binding_count -= 1;
}
}
impl<'cx,'tcx> TypeFolder<'tcx> for BoundsChecker<'cx,'tcx> {
fn tcx(&self) -> &ty::ctxt<'tcx> {
self.fcx.tcx()
}
fn fold_binder<T>(&mut self, binder: &ty::Binder<T>) -> ty::Binder<T>
where T : TypeFoldable<'tcx> + Repr<'tcx>
{
self.binding_count += 1;
let value = liberate_late_bound_regions(
self.fcx.tcx(),
region::DestructionScopeData::new(self.scope),
binder);
debug!("BoundsChecker::fold_binder: late-bound regions replaced: {} at scope: {:?}",
value.repr(self.tcx()), self.scope);
let value = value.fold_with(self);
self.binding_count -= 1;
ty::Binder(value)
}
fn fold_ty(&mut self, t: Ty<'tcx>) -> Ty<'tcx> {
debug!("BoundsChecker t={}",
t.repr(self.tcx()));
match self.cache {
Some(ref mut cache) => {<|fim▁hole|> if !cache.insert(t) {
// Already checked this type! Don't check again.
debug!("cached");
return t;
}
}
None => { }
}
match t.sty{
ty::TyStruct(type_id, substs) |
ty::TyEnum(type_id, substs) => {
let type_predicates = ty::lookup_predicates(self.fcx.tcx(), type_id);
let bounds = self.fcx.instantiate_bounds(self.span, substs,
&type_predicates);
if self.binding_count == 0 {
self.fcx.add_obligations_for_parameters(
traits::ObligationCause::new(self.span,
self.fcx.body_id,
traits::ItemObligation(type_id)),
&bounds);
} else {
// There are two circumstances in which we ignore
// region obligations.
//
// The first is when we are inside of a closure
// type. This is because in that case the region
// obligations for the parameter types are things
// that the closure body gets to assume and the
// caller must prove at the time of call. In other
// words, if there is a type like `<'a, 'b> | &'a
// &'b int |`, it is well-formed, and caller will
// have to show that `'b : 'a` at the time of
// call.
//
// The second is when we are checking for
// well-formedness outside of a type def'n or fn
// body. This is for a similar reason: in general,
// we only do WF checking for regions in the
// result of expressions and type definitions, so
// to as allow for implicit where clauses.
//
// (I believe we should do the same for traits, but
// that will require an RFC. -nmatsakis)
let bounds = filter_to_trait_obligations(bounds);
self.fcx.add_obligations_for_parameters(
traits::ObligationCause::new(self.span,
self.fcx.body_id,
traits::ItemObligation(type_id)),
&bounds);
}
self.fold_substs(substs);
}
_ => {
super_fold_ty(self, t);
}
}
t // we're not folding to produce a new type, so just return `t` here
}
}
///////////////////////////////////////////////////////////////////////////
// ADT
struct AdtVariant<'tcx> {
fields: Vec<AdtField<'tcx>>,
}
struct AdtField<'tcx> {
ty: Ty<'tcx>,
span: Span,
}
fn struct_variant<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
struct_def: &ast::StructDef)
-> AdtVariant<'tcx> {
let fields =
struct_def.fields
.iter()
.map(|field| {
let field_ty = ty::node_id_to_type(fcx.tcx(), field.node.id);
let field_ty = fcx.instantiate_type_scheme(field.span,
&fcx.inh.param_env.free_substs,
&field_ty);
AdtField { ty: field_ty, span: field.span }
})
.collect();
AdtVariant { fields: fields }
}
fn enum_variants<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
enum_def: &ast::EnumDef)
-> Vec<AdtVariant<'tcx>> {
enum_def.variants.iter()
.map(|variant| {
match variant.node.kind {
ast::TupleVariantKind(ref args) if !args.is_empty() => {
let ctor_ty = ty::node_id_to_type(fcx.tcx(), variant.node.id);
// the regions in the argument types come from the
// enum def'n, and hence will all be early bound
let arg_tys =
ty::no_late_bound_regions(
fcx.tcx(), &ty::ty_fn_args(ctor_ty)).unwrap();
AdtVariant {
fields: args.iter().enumerate().map(|(index, arg)| {
let arg_ty = arg_tys[index];
let arg_ty =
fcx.instantiate_type_scheme(variant.span,
&fcx.inh.param_env.free_substs,
&arg_ty);
AdtField {
ty: arg_ty,
span: arg.ty.span
}
}).collect()
}
}
ast::TupleVariantKind(_) => {
AdtVariant {
fields: Vec::new()
}
}
ast::StructVariantKind(ref struct_def) => {
struct_variant(fcx, &**struct_def)
}
}
})
.collect()
}
fn filter_to_trait_obligations<'tcx>(bounds: ty::InstantiatedPredicates<'tcx>)
-> ty::InstantiatedPredicates<'tcx>
{
let mut result = ty::InstantiatedPredicates::empty();
for (space, _, predicate) in bounds.predicates.iter_enumerated() {
match *predicate {
ty::Predicate::Trait(..) |
ty::Predicate::Projection(..) => {
result.predicates.push(space, predicate.clone())
}
ty::Predicate::Equate(..) |
ty::Predicate::TypeOutlives(..) |
ty::Predicate::RegionOutlives(..) => {
}
}
}
result
}<|fim▁end|> | |
<|file_name|>ReplProbe.js<|end_file_name|><|fim▁begin|>// ReplProbe.js (c) 2010-2013 Loren West and other contributors
// May be freely distributed under the MIT license.
// For further details and documentation:
// http://lorenwest.github.com/monitor-min
(function(root){
// Module loading - this runs server-side only
var Monitor = root.Monitor || require('../Monitor'),
_ = Monitor._,
Probe = Monitor.Probe,
REPL = require('repl'),
Stream = require('stream'),
util = require('util'),
events = require('events'),
ChildProcess = require('child_process');
// Statics
var CONSOLE_PROMPT = '> ';
var NEW_REPL = (typeof REPL.disableColors === 'undefined');
/**
* A probe based Read-Execute-Print-Loop console for node.js processes
*
* @class ReplProbe
* @extends Probe
* @constructor
* @param initParams {Object} Probe initialization parameters
* @param initParams.uniqueInstance - Usually specified to obtain a unique REPL probe instance
* @param model {Object} Monitor data model elements
* @param model.output {String} Last (current) REPL output line
* @param model.sequence {Integer} Increasing sequence number - to enforce unique line output
*/
var ReplProbe = Monitor.ReplProbe = Probe.extend({
probeClass: 'Repl',
description: 'A socket.io based Read-Execute-Print-Loop console for node.js processes.',
defaults: {
// This assures output events are sent, even if the
// data is the same as the prior output.
sequence: 0,
output: ''
},
initialize: function(attributes, options){
var t = this;
Probe.prototype.initialize.apply(t, arguments);
// Don't send change events before connected
process.nextTick(function(){
t.stream = new ReplStream(t);
if (NEW_REPL) {
t.repl = require('repl').start({
prompt: CONSOLE_PROMPT,
input: t.stream,
output: t.stream
});
} else {
t.repl = REPL.start(CONSOLE_PROMPT, t.stream);
}
t.htmlConsole = new HtmlConsole(t);
t.shellCmd = null;
t.repl.context.console = t.htmlConsole;
});
},
/**
* Send output to the terminal
*
* This forces the change event even if the last output is the same
* as this output.
*
* @protected
* @method output
* @param str {String} String to output to the repl console
*/
_output: function(str) {
var t = this;
t.set({
output: str,
sequence: t.get('sequence') + 1
});
},
/**
* Release any resources consumed by this probe.
*
* Stop the REPL console. Consoles live 1-1 with a UI counterpart, so stop
* requests exit the underlying repl console. If the probe is re-started it
* will get a new repl stream and console.
*
* @method release
*/
release: function(){
var t = this;
t.stream = null;
t.repl = null;
},
/**
* Process an autocomplete request from the client
*
* @method autocomplete
* @param {Object} params Named parameters
* @param {Function(error, returnParams)} callback Callback function
*/
autocomplete_control: function(params, callback) {
var t = this;
if (typeof(params) !== 'string' || params.length < 1) {
callback("Autocomplete paramter must be a nonzero string");
}
// Forward to the completion mechanism if it can be completed
if (params.substr(-1).match(/([0-9])|([a-z])|([A-Z])|([_])/)) {
t.repl.complete(params, callback);
} else {
// Return a no-op autocomplete
callback(null, [[],'']);
}
},
/**
* Handle user input from the console line
*
* @method input
* @param {Object} params Named parameters
* @param {Function(error, returnParams)} callback Callback function
*/
input_control: function(params, callback) {
var t = this;
if (params === '.break' && t.shellCmd) {
t.shellCmd.kill();
}
if (NEW_REPL) {
t.stream.emit('data', params + "\n");
} else {
t.stream.emit('data', params);
}
return callback(null);
},
/**
* Execute a shell command
*
* @method sh
* @param {Object} params Named parameters
* @param {Function(error, returnParams)} callback Callback function
*/
sh_control: function(params, callback) {
var t = this;
return callback(null, t._runShellCmd(params));
},
/**
* Run a shell command and emit the output to the browser.
*
* @private
* @method _runShellCmd
* @param {String} command - The shell command to invoke
*/
_runShellCmd: function(command) {
var t = this;
t.shellCmd = ChildProcess.exec(command, function(err, stdout, stderr) {
if (err) {
var outstr = 'exit';
if (err.code) {
outstr += ' (' + err.code + ')';
}
if (err.signal) {
outstr += ' ' + err.signal;
}
t._output(outstr);
return null;
}
if (stdout.length) {
t._output(stdout);
}
if (stderr.length) {
t._output(stderr);
}
t.shellCmd = null;
t._output(CONSOLE_PROMPT);
});
return null;
}
});
// Define an internal stream class for the probe
var ReplStream = function(probe){
var t = this;
t.probe = probe;
events.EventEmitter.call(t);
if (t.setEncoding) {
t.setEncoding('utf8');
}
};
util.inherits(ReplStream, events.EventEmitter);
// util.inherits(ReplStream, require('stream'));
ReplStream.prototype.readable = true;
ReplStream.prototype.writable = true;
['pause','resume','destroySoon','pipe', 'end']
.forEach(function(fnName){
ReplStream.prototype[fnName] = function(){
console.log("REPL Stream function unexpected: " + fnName);
};
});
['resume']
.forEach(function(fnName){
ReplStream.prototype[fnName] = function(){
// Handled
};
});
ReplStream.prototype.write = function(data) {
var t = this;
t.probe._output(data);
};
ReplStream.prototype.destroy = function(data) {
var t = this;
console.log("REPL stream destroy " + t.probe.get('id'));
t.probe.stop();
};
// Define format if it's not in util.
var formatRegExp = /%[sdj]/g;
var format = util.format || function (f) {
if (typeof f !== 'string') {
var objects = [];
for (var i = 0; i < arguments.length; i++) {
objects.push(util.inspect(arguments[i]));
}
return objects.join(' ');
}
var j = 1;
var args = arguments;
var str = String(f).replace(formatRegExp, function(x) {
switch (x) {
case '%s': return String(args[j++]);
case '%d': return Number(args[j++]);
case '%j': return JSON.stringify(args[j++]);
default:
return x;
}
});
for (var len = args.length, x = args[j]; j < len; x = args[++j]) {
if (x === null || typeof x !== 'object') {
str += ' ' + x;
} else {
str += ' ' + util.inspect(x);
}
}
return str;
};
// Re-define the console so it goes to the HTML window
var HtmlConsole = function(probe){
this.probe = probe;
};
HtmlConsole.prototype.log = function(msg) {
this.probe._output(format.apply(this, arguments));
};
HtmlConsole.prototype.info = HtmlConsole.prototype.log;
HtmlConsole.prototype.warn = HtmlConsole.prototype.log;
HtmlConsole.prototype.error = HtmlConsole.prototype.log;
HtmlConsole.prototype.dir = function(object) {
this.probe._output(util.inspect(object));
};
var times = {};
HtmlConsole.prototype.time = function(label) {
times[label] = Date.now();
};
HtmlConsole.prototype.timeEnd = function(label) {
var duration = Date.now() - times[label];
this.log('%s: %dms', label, duration);
};
<|fim▁hole|><|fim▁end|> | }(this)); |
<|file_name|>format_labels.js<|end_file_name|><|fim▁begin|>'use strict';
var Axes = require('../../plots/cartesian/axes');
module.exports = function formatLabels(cdi, trace, fullLayout) {
var labels = {};
var mockGd = {_fullLayout: fullLayout};
var xa = Axes.getFromTrace(mockGd, trace, 'x');
var ya = Axes.getFromTrace(mockGd, trace, 'y');
<|fim▁hole|> return labels;
};<|fim▁end|> | labels.xLabel = Axes.tickText(xa, xa.c2l(cdi.x), true).text;
labels.yLabel = Axes.tickText(ya, ya.c2l(cdi.y), true).text;
|
<|file_name|>worksheet.ts<|end_file_name|><|fim▁begin|>import { ExcelOOXMLTemplate, ExcelWorksheet, ExcelRow, ExcelColumn } from '@ag-grid-community/core';
import columnFactory from './column';
import rowFactory from './row';
import mergeCell from './mergeCell';
const updateColMinMax = (col: ExcelColumn, min: number, range: number, prevCol?: ExcelColumn): void => {
if (!col.min) {
col.min = min;
col.max = min + range;
return;
}
let currentMin = min;
if (prevCol) {
currentMin = Math.max(currentMin, prevCol.min!);
}
col.min = Math.max(col.min, currentMin);
col.max = Math.max(col.max!, currentMin + range);
};
const getMergedCells = (rows: ExcelRow[], cols: ExcelColumn[]): string[] => {
const mergedCells: string[] = [];
rows.forEach((currentRow, rowIdx) => {
const cells = currentRow.cells;
let merges = 0;
currentRow.index = rowIdx + 1;
let lastCol: ExcelColumn;
cells.forEach((currentCell, cellIdx) => {
const min = cellIdx + merges + 1;
const start = getExcelColumnName(min);
const outputRow = rowIdx + 1;
if (currentCell.mergeAcross) {
merges += currentCell.mergeAcross;
const end = getExcelColumnName(cellIdx + merges + 1);
mergedCells.push(`${start}${outputRow}:${end}${outputRow}`);
}
if (!cols[min - 1]) {
cols[min - 1] = {} as ExcelColumn;<|fim▁hole|> }
updateColMinMax(cols[min - 1], min, merges, lastCol);
lastCol = cols[min - 1];
currentCell.ref = `${start}${outputRow}`;
});
});
return mergedCells;
};
export const getExcelColumnName = (colIdx: number): string => {
const startCode = 65;
const tableWidth = 26;
const fromCharCode = String.fromCharCode;
const pos = Math.floor(colIdx / tableWidth);
const tableIdx = colIdx % tableWidth;
if (!pos || colIdx === tableWidth) { return fromCharCode(startCode + colIdx - 1); }
if (!tableIdx) { return getExcelColumnName(pos - 1) + 'Z'; }
if (pos < tableWidth) { return fromCharCode(startCode + pos - 1) + fromCharCode(startCode + tableIdx - 1); }
return getExcelColumnName(pos) + fromCharCode(startCode + tableIdx - 1);
};
const worksheetFactory: ExcelOOXMLTemplate = {
getTemplate(config: ExcelWorksheet) {
const {table} = config;
const {rows, columns} = table;
const mergedCells = (columns && columns.length) ? getMergedCells(rows, columns) : [];
const children = [];
if (columns.length) {
children.push({
name: 'cols',
children: columns.map(columnFactory.getTemplate)
});
}
if (rows.length) {
children.push({
name: 'sheetData',
children: rows.map(rowFactory.getTemplate)
});
}
if (mergedCells.length) {
children.push({
name: 'mergeCells',
properties: {
rawMap: {
count: mergedCells.length
}
},
children: mergedCells.map(mergeCell.getTemplate)
});
}
return {
name: "worksheet",
properties: {
prefixedAttributes:[{
prefix: "xmlns:",
map: {
r: "http://schemas.openxmlformats.org/officeDocument/2006/relationships"
}
}],
rawMap: {
xmlns: "http://schemas.openxmlformats.org/spreadsheetml/2006/main"
}
},
children
};
}
};
export default worksheetFactory;<|fim▁end|> | |
<|file_name|>authentication-guard.service.ts<|end_file_name|><|fim▁begin|>import { Injectable } from '@angular/core';
import { CanActivate, ActivatedRouteSnapshot, RouterStateSnapshot, Router } from '@angular/router';
import { Observable } from 'rxjs';
import { AuthenticationService } from './';
@Injectable()
export class AuthenticationGuardService implements CanActivate {
<|fim▁hole|> canActivate(route: ActivatedRouteSnapshot, state: RouterStateSnapshot): Observable<boolean>|Promise<boolean>|boolean {
if (AuthenticationService.hasToken(true) === true) {
return true;
} else {
this.$router.navigate(['/login']);
return false;
}
}
}<|fim▁end|> | constructor(private $router: Router) {}
|
<|file_name|>amp-embedly-card-impl.js<|end_file_name|><|fim▁begin|>import {removeElement} from '#core/dom';
import {Layout_Enum, applyFillContent} from '#core/dom/layout';
import {Services} from '#service';
import {userAssert} from '#utils/log';
import {TAG as KEY_TAG} from './amp-embedly-key';
import {getIframe} from '../../../src/3p-frame';
import {listenFor} from '../../../src/iframe-helper';
/**
* Component tag identifier.
* @const {string}
*/
export const TAG = 'amp-embedly-card';
/**
* Attribute name used to set api key with name
* expected by embedly.
* @const {string}
*/
const API_KEY_ATTR_NAME = 'data-card-key';
/**
* Implementation of the amp-embedly-card component.
* See {@link ../amp-embedly-card.md} for the spec.
*/
export class AmpEmbedlyCard extends AMP.BaseElement {
/** @param {!AmpElement} element */
constructor(element) {
super(element);
/** @private {?HTMLIFrameElement} */
this.iframe_ = null;
/** @private {?string} */
this.apiKey_ = null;
}
/** @override */
buildCallback() {
userAssert(
this.element.getAttribute('data-url'),
'The data-url attribute is required for <%s> %s',
TAG,
this.element
);
const ampEmbedlyKeyElement = document.querySelector(KEY_TAG);
if (ampEmbedlyKeyElement) {
this.apiKey_ = ampEmbedlyKeyElement.getAttribute('value');
}
}
/** @override */
layoutCallback() {
// Add optional paid api key attribute if provided
// to remove embedly branding.
if (this.apiKey_) {
this.element.setAttribute(API_KEY_ATTR_NAME, this.apiKey_);
}
const iframe = getIframe(this.win, this.element, 'embedly');
iframe.title = this.element.title || 'Embedly card';
const opt_is3P = true;
listenFor(
iframe,
'embed-size',
(data) => {
this.forceChangeHeight(data['height']);
},
opt_is3P
);
applyFillContent(iframe);
this.getVsync().mutate(() => {
this.element.appendChild(iframe);
});
this.iframe_ = iframe;
return this.loadPromise(iframe);<|fim▁hole|> if (this.iframe_) {
removeElement(this.iframe_);
this.iframe_ = null;
}
return true;
}
/** @override */
isLayoutSupported(layout) {
return layout == Layout_Enum.RESPONSIVE;
}
/**
* @param {boolean=} opt_onLayout
* @override
*/
preconnectCallback(opt_onLayout) {
Services.preconnectFor(this.win).url(
this.getAmpDoc(),
'https://cdn.embedly.com',
opt_onLayout
);
}
}<|fim▁end|> | }
/** @override */
unlayoutCallback() { |
<|file_name|>cover.min.js<|end_file_name|><|fim▁begin|>version https://git-lfs.github.com/spec/v1
oid sha256:c1d57d1ad50c4639ecd398deb6c1db998e272cc6faf1314dec77ca509ca49153<|fim▁hole|><|fim▁end|> | size 1303 |
<|file_name|>overwriting_attribute.py<|end_file_name|><|fim▁begin|><|fim▁hole|>
def __init__(self):
self.var = 0
class D(C):
def __init__(self):
self.var = 1 # self.var will be overwritten
C.__init__(self)
#Attribute set in both superclass and subclass
class E(object):
def __init__(self):
self.var = 0 # self.var will be overwritten
class F(E):
def __init__(self):
E.__init__(self)
self.var = 1<|fim▁end|> | #Attribute set in both superclass and subclass
class C(object): |
<|file_name|>matrans.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
#-------------------------------------------------------------------------------
#License GPL v3.0
#Author: Alexandre Manhaes Savio <[email protected]>
#Grupo de Inteligencia Computational <www.ehu.es/ccwintco>
#Universidad del Pais Vasco UPV/EHU
#Use this at your own risk!
#-------------------------------------------------------------------------------
#-------------------------------------------------------------------------------
#README:
#Transforms a NxN matrix volume (N^2 volumes in 4th dimension) into other measure maps.
#You can make a list of measures and they will be applied in order.
#A list of the implemented measures are listed below.
#Geodesic anisotropy equation was extracted from
#P. G. Batchelor et al. - A Rigorous Framework for Diffusion Tensor Calculus - Magnetic Resonance in Medicine 53:221-225 (2005)
# What is tensor denoising?
#Log-Euclidean tensor denoising was used to eliminate singular, negative definite, or rank-deficient tensors
#-------------------------------------------------------------------------------
#from IPython.core.debugger import Tracer; debug_here = Tracer()
import argparse, os, sys
from time import clock
import nibabel as nib
import numpy as np
from scipy.linalg import logm
from scipy.linalg.matfuncs import sqrtm
from numpy.linalg import det
from numpy.linalg import eigvals
from numpy.linalg import eigvalsh
#-------------------------------------------------------------------------------
#definining measure functions
def mylogm (v):
return np.reshape(logm(v.reshape(N,N)), [1,N*N])
#-------------------------------------------------------------------------------
def mydet (v):
return det(v.reshape(N,N))
#-------------------------------------------------------------------------------
def mytrace (v):
return np.trace(v.reshape(N,N))
#-------------------------------------------------------------------------------
def myeigvals (v):
return eigvals(v.reshape(N,N)).flatten()
#-------------------------------------------------------------------------------
def mymaxeigvals (v):
return max (myeigvals(v))
#-------------------------------------------------------------------------------
def myeigvalsh (v):
return eigvalsh(v.reshape(N,N)).flatten()
#-------------------------------------------------------------------------------
def mymaxeigvalsh (v):
return max (myeigvalsh(v))
#-------------------------------------------------------------------------------
def mydeftensor (v):
j = v.reshape([N,N])
s = sqrtm(j.transpose()*j)
return S.reshape([1,N*N])
#-------------------------------------------------------------------------------
def mygeodan (v):
s = logm(v.reshape(N,N))
return np.sqrt(np.trace(np.square(s - np.trace(s)/N * np.eye(N))))
#-------------------------------------------------------------------------------
def calculate_measures (funcs, data, odims):
for i in range(len(funcs)):
measure = funcs[i]
odim = odims[i]
data = measure(data)
return data
#-------------------------------------------------------------------------------
def set_parser():
parser = argparse.ArgumentParser(description='Transforms a NxN matrix volume (N^2 volumes in 4th dimension) into other measure maps. \n You can make a list of measures and they will be applied in order. \n A list of the implemented measures are listed below.', prefix_chars='-')
parser.add_argument('-i', '--in', dest='infile', required=True,
help='Jacobian matrix volume (4DVolume with 9 volumes)')
parser.add_argument('-m', '--mask', dest='maskfile', required=False,
help='Mask file')
parser.add_argument('-o', '--out', dest='outfile', required=True,
help='Output file name')
parser.add_argument('-N', '--dims', dest='dims', required=False, default=3, type=int,
help='Order of the matrices in the volume')
parser.add_argument('--matlog', dest='funcs', action='append_const', const='matlog',
help='Matrix logarithm')
parser.add_argument('--deftensor', dest='funcs', action='append_const', const='deftensor',
help='Deformation tensor S=sqrtm(J`*J)')
parser.add_argument('--det', dest='funcs', action='append_const', const='det',
help='Determinant')
parser.add_argument('--trace', dest='funcs', action='append_const', const='trace',
help='Trace')
parser.add_argument('--eigvals', dest='funcs', action='append_const', const='eigvals',
help='Eigenvalues of a general matrix')
parser.add_argument('--maxeigvals', dest='funcs', action='append_const', const='maxeigvals',
help='Maximum eigenvalue of a general matrix')
parser.add_argument('--eigvalsh', dest='funcs', action='append_const', const='eigvalsh',
help='Eigenvalues of a Hermitian or real symmetric matrix')
parser.add_argument('--maxeigvalsh', dest='funcs', action='append_const', const='maxeigvalsh',
help='Maximum eigenvalue of a Hermitian or real symmetric matrix')
parser.add_argument('--geodan', dest='funcs', action='append_const', const='geodan',
help='Geodesic anisotropy: sqrt(trace(matlog(S) - (trace(matlog(S))/N)*eye(N))^2, where N==3 ')
return parser
#Geodesic anisotropy from:
#COMPARISON OF FRACTIONAL AND GEODESIC ANISOTROPY IN DIFFUSION TENSOR IMAGES OF 90 MONOZYGOTIC AND DIZYGOTIC TWINS
#Agatha D. Lee1, Natasha Lepore1, Marina Barysheva1, Yi-Yu Chou1, Caroline Brun1, Sarah K. Madsen1, Katie L. McMahon2, 1 Greig I. de Zubicaray2, Matthew Meredith2, Margaret J. Wright3, Arthur W. Toga1, Paul M. Thompson
#http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.142.3274
#-------------------------------------------------------------------------------
## START MATRIX TRANSFORMATIONS
#-------------------------------------------------------------------------------
def main():
#parsing arguments
parser = set_parser()
#parsing arguments
try:
args = parser.parse_args ()
except argparse.ArgumentError, exc:
print (exc.message + '\n' + exc.argument)
parser.error(str(msg))
return -1
ifile = args.infile.strip()
ofile = args.outfile.strip()
maskf = args.maskfile.strip()
funcs = args.funcs
#setting the global variable that indicates the order of the matrices
global N
N = args.dims
#loading file and preprocessing
iinfo = nib.load(ifile)
affine = iinfo.get_affine()
minfo = nib.load(maskf)
if len(iinfo.shape) != 4:
err = 'File ' + ifile + ' should be a 4D volume'
print(err)
return -1
#global variable N (for the nested functions)
N = np.sqrt(iinfo.shape[3])
if not N % 1 == 0:
err = 'File ' + ifile + ' should have N volumes along its 4th dimension, where N is an exponent of 2.'
print(err)
return -1
try:
#deciding what function to use
# and indicating size of 4th dimension of output
myfuncs = {}
odims = np.empty(len(funcs), dtype=int)
for i in range(len(funcs)):
if funcs [i] == 'matlog':
myfuncs[i] = mylogm
odims [i] = N
elif funcs[i] == 'det':
myfuncs[i] = mydet
odims [i] = 1
elif funcs[i] == 'trace':
myfuncs[i] = mytrace
odims [i] = 1
elif funcs[i] == 'deftensor':
myfuncs[i] = mydeftensor
odims [i] = N
elif funcs[i] == 'eigvalsh':
myfuncs[i] = myeigvalsh
odims [i] = 3
elif funcs[i] == 'eigvals':
myfuncs[i] = myeigvals
odims [i] = 3
elif funcs[i] == 'maxeigvalsh':
myfuncs[i] = myeigvalsh
odims [i] = 1
elif funcs[i] == 'maxeigvals':
myfuncs[i] = myeigvals
odims [i] = 1
elif funcs[i] == 'geodan':
myfuncs[i] = mygeodan
odims [i] = 1
#reading input data
img = iinfo.get_data()
mask = minfo.get_data()
sx = img.shape[0]
sy = img.shape[1]
sz = img.shape[2]
nvox = sx*sy*sz
im = img.reshape(nvox,9)
msk = mask.flatten()<|fim▁hole|> tic = clock();
#processing
lm = np.zeros([nvox, odims[-1]])
for i in idx:
lm[i,:] = calculate_measures (myfuncs, im[i,:], odims)
#lm[i,:] = meafun(im[i,:])
toc = clock() - tic
print ('Time spent: ' + str(toc))
#saving output
lm = lm.reshape([sx, sy, sz, odims[-1]])
lm = lm.squeeze()
# debug_here()
new_image = nib.Nifti1Image(lm, affine)
nib.save(new_image, ofile)
except:
print ('Ooops! Error processing file ' + ifile)
print 'Unexpected error: ', sys.exc_info()
return -1
if __name__ == "__main__":
sys.exit(main())
#Testing multiprocessing. Not implemented. Leaving for patience to solve.
#for i in range(len(im)/7000):
# p.apply_async(mylogm, args=(im[i,:],i))
##determining multiprocessing stuff
#if nthreads > 1:
# from multiprocessing.pool import Pool
#ncpus = multiprocessing.cpu_count()
#if nthreads > ncpus:
# nthreads = ncpus - 1
#if nthreads > 1:
# p = ThreadPool(nthreads)
# print ('Using ' + nthreads + ' threads for execution')
#import nibabel as nib
#import numpy as np
#from time import clock
#from multiprocessing import Pool
#ifile='patient.M.90..5.OAS1_0247_MR1_mpr_n4_anon_111_t88_masked_gfc_spline_jacmat.nii.gz'
#meta = nib.load(ifile)
#img = meta.get_data()
#sx = img.shape[0]
#sy = img.shape[1]
#sz = img.shape[2]
#im = img.reshape(sx*sy*sz,9)
#p = Pool(4)
#p.map(mylogm, im)
#from time import clock
#for l in range(3):
# ti = clock();
# p = ThreadPool(4)
# lm = im[np.arange(len(im)/500),:]
# lm = np.zeros(lm.shape)
# lm = p.map(mylogm, im)
## for i in range(len(im)/500):
## v = p.apply_async(mylogm, args=(im[i,:]))
## p.close()
## p.join()
# tf = clock()-ti
# print tf
#for l in range(3):
# lm = np.empty(im.shape)
# ti = clock();
# for i in range(len(im)/500):
# lm[i,:] = mylogm(im[i,:])
# tf = clock()-ti
# print tf<|fim▁end|> | idx = np.where(msk > 0)[0]
|
<|file_name|>negative-out-of-bounds-target.js<|end_file_name|><|fim▁begin|>// Copyright (C) 2016 the V8 project authors. All rights reserved.
// This code is governed by the BSD license found in the LICENSE file.
/*---
esid: sec-%typedarray%.prototype.copywithin
description: >
Set values with out of bounds negative target argument.
info: |
22.2.3.5 %TypedArray%.prototype.copyWithin (target, start [ , end ] )
%TypedArray%.prototype.copyWithin is a distinct function that implements the
same algorithm as Array.prototype.copyWithin as defined in 22.1.3.3 except
that the this object's [[ArrayLength]] internal slot is accessed in place of
performing a [[Get]] of "length" and the actual copying of values in step 12
must be performed in a manner that preserves the bit-level encoding of the
source data.
...
22.1.3.3 Array.prototype.copyWithin (target, start [ , end ] )
...
4. If relativeTarget < 0, let to be max((len + relativeTarget), 0); else let
to be min(relativeTarget, len).
...
includes: [compareArray.js, testBigIntTypedArray.js]
features: [BigInt, TypedArray]
---*/
testWithBigIntTypedArrayConstructors(function(TA) {
assert(
compareArray(
new TA([0n, 1n, 2n, 3n]).copyWithin(-10, 0),
[0n, 1n, 2n, 3n]
),
'[0, 1, 2, 3].copyWithin(-10, 0) -> [0, 1, 2, 3]'
);
assert(
compareArray(<|fim▁hole|> ),
'[1, 2, 3, 4, 5].copyWithin(-Infinity, 0) -> [1, 2, 3, 4, 5]'
);
assert(
compareArray(
new TA([0n, 1n, 2n, 3n, 4n]).copyWithin(-10, 2),
[2n, 3n, 4n, 3n, 4n]
),
'[0, 1, 2, 3, 4].copyWithin(-10, 2) -> [2, 3, 4, 3, 4]'
);
assert(
compareArray(
new TA([1n, 2n, 3n, 4n, 5n]).copyWithin(-Infinity, 2),
[3n, 4n, 5n, 4n, 5n]
),
'[1, 2, 3, 4, 5].copyWithin(-Infinity, 2) -> [3, 4, 5, 4, 5]'
);
});<|fim▁end|> | new TA([1n, 2n, 3n, 4n, 5n]).copyWithin(-Infinity, 0),
[1n, 2n, 3n, 4n, 5n] |
<|file_name|>tests.py<|end_file_name|><|fim▁begin|>import sys
from django.conf import settings
from django.core.exceptions import MiddlewareNotUsed
from django.core.signals import got_request_exception
from django.http import HttpResponse
from django.template import engines
from django.template.response import TemplateResponse
from django.test import RequestFactory, SimpleTestCase, override_settings
from django.test.utils import patch_logger
class TestException(Exception):
pass
# A middleware base class that tracks which methods have been called
class TestMiddleware(object):
def __init__(self):
self.process_request_called = False
self.process_view_called = False
self.process_response_called = False
self.process_template_response_called = False
self.process_exception_called = False
def process_request(self, request):
self.process_request_called = True
def process_view(self, request, view_func, view_args, view_kwargs):
self.process_view_called = True
def process_template_response(self, request, response):
self.process_template_response_called = True
return response
def process_response(self, request, response):
self.process_response_called = True
return response
def process_exception(self, request, exception):
self.process_exception_called = True
# Middleware examples that do the right thing
class RequestMiddleware(TestMiddleware):
def process_request(self, request):
super(RequestMiddleware, self).process_request(request)
return HttpResponse('Request Middleware')
class ViewMiddleware(TestMiddleware):
def process_view(self, request, view_func, view_args, view_kwargs):
super(ViewMiddleware, self).process_view(request, view_func, view_args, view_kwargs)
return HttpResponse('View Middleware')
class ResponseMiddleware(TestMiddleware):
def process_response(self, request, response):
super(ResponseMiddleware, self).process_response(request, response)
return HttpResponse('Response Middleware')
class TemplateResponseMiddleware(TestMiddleware):
def process_template_response(self, request, response):
super(TemplateResponseMiddleware, self).process_template_response(request, response)
template = engines['django'].from_string('Template Response Middleware')
return TemplateResponse(request, template)
class ExceptionMiddleware(TestMiddleware):
def process_exception(self, request, exception):
super(ExceptionMiddleware, self).process_exception(request, exception)
return HttpResponse('Exception Middleware')
# Sample middlewares that raise exceptions
class BadRequestMiddleware(TestMiddleware):
def process_request(self, request):
super(BadRequestMiddleware, self).process_request(request)
raise TestException('Test Request Exception')
class BadViewMiddleware(TestMiddleware):
def process_view(self, request, view_func, view_args, view_kwargs):
super(BadViewMiddleware, self).process_view(request, view_func, view_args, view_kwargs)
raise TestException('Test View Exception')
class BadTemplateResponseMiddleware(TestMiddleware):
def process_template_response(self, request, response):
super(BadTemplateResponseMiddleware, self).process_template_response(request, response)
raise TestException('Test Template Response Exception')
class BadResponseMiddleware(TestMiddleware):
def process_response(self, request, response):
super(BadResponseMiddleware, self).process_response(request, response)
raise TestException('Test Response Exception')
class BadExceptionMiddleware(TestMiddleware):
def process_exception(self, request, exception):
super(BadExceptionMiddleware, self).process_exception(request, exception)
raise TestException('Test Exception Exception')
# Sample middlewares that omit to return an HttpResonse
class NoTemplateResponseMiddleware(TestMiddleware):
def process_template_response(self, request, response):
super(NoTemplateResponseMiddleware, self).process_template_response(request, response)
class NoResponseMiddleware(TestMiddleware):
def process_response(self, request, response):
super(NoResponseMiddleware, self).process_response(request, response)
@override_settings(ROOT_URLCONF='middleware_exceptions.urls')
class BaseMiddlewareExceptionTest(SimpleTestCase):
def setUp(self):
self.exceptions = []
got_request_exception.connect(self._on_request_exception)
self.client.handler.load_middleware()
def tearDown(self):
got_request_exception.disconnect(self._on_request_exception)
self.exceptions = []
def _on_request_exception(self, sender, request, **kwargs):
self.exceptions.append(sys.exc_info())
def _add_middleware(self, middleware):
self.client.handler._request_middleware.insert(0, middleware.process_request)
self.client.handler._view_middleware.insert(0, middleware.process_view)
self.client.handler._template_response_middleware.append(middleware.process_template_response)
self.client.handler._response_middleware.append(middleware.process_response)
self.client.handler._exception_middleware.append(middleware.process_exception)
def assert_exceptions_handled(self, url, errors, extra_error=None):
try:
self.client.get(url)
except TestException:
# Test client intentionally re-raises any exceptions being raised
# during request handling. Hence actual testing that exception was
# properly handled is done by relying on got_request_exception
# signal being sent.
pass
except Exception as e:
if type(extra_error) != type(e):
self.fail("Unexpected exception: %s" % e)
self.assertEqual(len(self.exceptions), len(errors))
for i, error in enumerate(errors):
exception, value, tb = self.exceptions[i]
<|fim▁hole|> self.assertEqual(middleware.process_request_called, request)
self.assertEqual(middleware.process_view_called, view)
self.assertEqual(middleware.process_template_response_called, template_response)
self.assertEqual(middleware.process_response_called, response)
self.assertEqual(middleware.process_exception_called, exception)
class MiddlewareTests(BaseMiddlewareExceptionTest):
def test_process_request_middleware(self):
pre_middleware = TestMiddleware()
middleware = RequestMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/view/', [])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, False, False, True, False)
self.assert_middleware_usage(middleware, True, False, False, True, False)
self.assert_middleware_usage(post_middleware, False, False, False, True, False)
def test_process_view_middleware(self):
pre_middleware = TestMiddleware()
middleware = ViewMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/view/', [])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, False)
self.assert_middleware_usage(middleware, True, True, False, True, False)
self.assert_middleware_usage(post_middleware, True, False, False, True, False)
def test_process_response_middleware(self):
pre_middleware = TestMiddleware()
middleware = ResponseMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/view/', [])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, False)
self.assert_middleware_usage(middleware, True, True, False, True, False)
self.assert_middleware_usage(post_middleware, True, True, False, True, False)
def test_process_template_response_middleware(self):
pre_middleware = TestMiddleware()
middleware = TemplateResponseMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/template_response/', [])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, True, True, False)
self.assert_middleware_usage(middleware, True, True, True, True, False)
self.assert_middleware_usage(post_middleware, True, True, True, True, False)
def test_process_exception_middleware(self):
pre_middleware = TestMiddleware()
middleware = ExceptionMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/view/', [])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, False)
self.assert_middleware_usage(middleware, True, True, False, True, False)
self.assert_middleware_usage(post_middleware, True, True, False, True, False)
def test_process_request_middleware_not_found(self):
pre_middleware = TestMiddleware()
middleware = RequestMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/not_found/', [])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, False, False, True, False)
self.assert_middleware_usage(middleware, True, False, False, True, False)
self.assert_middleware_usage(post_middleware, False, False, False, True, False)
def test_process_view_middleware_not_found(self):
pre_middleware = TestMiddleware()
middleware = ViewMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/not_found/', [])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, False)
self.assert_middleware_usage(middleware, True, True, False, True, False)
self.assert_middleware_usage(post_middleware, True, False, False, True, False)
def test_process_template_response_middleware_not_found(self):
pre_middleware = TestMiddleware()
middleware = TemplateResponseMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/not_found/', [])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, True)
self.assert_middleware_usage(middleware, True, True, False, True, True)
self.assert_middleware_usage(post_middleware, True, True, False, True, True)
def test_process_response_middleware_not_found(self):
pre_middleware = TestMiddleware()
middleware = ResponseMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/not_found/', [])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, True)
self.assert_middleware_usage(middleware, True, True, False, True, True)
self.assert_middleware_usage(post_middleware, True, True, False, True, True)
def test_process_exception_middleware_not_found(self):
pre_middleware = TestMiddleware()
middleware = ExceptionMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/not_found/', [])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, False)
self.assert_middleware_usage(middleware, True, True, False, True, True)
self.assert_middleware_usage(post_middleware, True, True, False, True, True)
def test_process_request_middleware_exception(self):
pre_middleware = TestMiddleware()
middleware = RequestMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/error/', [])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, False, False, True, False)
self.assert_middleware_usage(middleware, True, False, False, True, False)
self.assert_middleware_usage(post_middleware, False, False, False, True, False)
def test_process_view_middleware_exception(self):
pre_middleware = TestMiddleware()
middleware = ViewMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/error/', [])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, False)
self.assert_middleware_usage(middleware, True, True, False, True, False)
self.assert_middleware_usage(post_middleware, True, False, False, True, False)
def test_process_response_middleware_exception(self):
pre_middleware = TestMiddleware()
middleware = ResponseMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/error/', ['Error in view'], Exception())
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, True)
self.assert_middleware_usage(middleware, True, True, False, True, True)
self.assert_middleware_usage(post_middleware, True, True, False, True, True)
def test_process_exception_middleware_exception(self):
pre_middleware = TestMiddleware()
middleware = ExceptionMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/error/', [])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, False)
self.assert_middleware_usage(middleware, True, True, False, True, True)
self.assert_middleware_usage(post_middleware, True, True, False, True, True)
def test_process_request_middleware_null_view(self):
pre_middleware = TestMiddleware()
middleware = RequestMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/null_view/', [])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, False, False, True, False)
self.assert_middleware_usage(middleware, True, False, False, True, False)
self.assert_middleware_usage(post_middleware, False, False, False, True, False)
def test_process_view_middleware_null_view(self):
pre_middleware = TestMiddleware()
middleware = ViewMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/null_view/', [])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, False)
self.assert_middleware_usage(middleware, True, True, False, True, False)
self.assert_middleware_usage(post_middleware, True, False, False, True, False)
def test_process_response_middleware_null_view(self):
pre_middleware = TestMiddleware()
middleware = ResponseMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled(
'/middleware_exceptions/null_view/', [
"The view middleware_exceptions.views.null_view didn't return "
"an HttpResponse object. It returned None instead."
],
ValueError()
)
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, False)
self.assert_middleware_usage(middleware, True, True, False, True, False)
self.assert_middleware_usage(post_middleware, True, True, False, True, False)
def test_process_exception_middleware_null_view(self):
pre_middleware = TestMiddleware()
middleware = ExceptionMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled(
'/middleware_exceptions/null_view/', [
"The view middleware_exceptions.views.null_view didn't return "
"an HttpResponse object. It returned None instead."
],
ValueError()
)
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, False)
self.assert_middleware_usage(middleware, True, True, False, True, False)
self.assert_middleware_usage(post_middleware, True, True, False, True, False)
def test_process_request_middleware_permission_denied(self):
pre_middleware = TestMiddleware()
middleware = RequestMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/permission_denied/', [])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, False, False, True, False)
self.assert_middleware_usage(middleware, True, False, False, True, False)
self.assert_middleware_usage(post_middleware, False, False, False, True, False)
def test_process_view_middleware_permission_denied(self):
pre_middleware = TestMiddleware()
middleware = ViewMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/permission_denied/', [])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, False)
self.assert_middleware_usage(middleware, True, True, False, True, False)
self.assert_middleware_usage(post_middleware, True, False, False, True, False)
def test_process_response_middleware_permission_denied(self):
pre_middleware = TestMiddleware()
middleware = ResponseMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/permission_denied/', [])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, True)
self.assert_middleware_usage(middleware, True, True, False, True, True)
self.assert_middleware_usage(post_middleware, True, True, False, True, True)
def test_process_exception_middleware_permission_denied(self):
pre_middleware = TestMiddleware()
middleware = ExceptionMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/permission_denied/', [])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, False)
self.assert_middleware_usage(middleware, True, True, False, True, True)
self.assert_middleware_usage(post_middleware, True, True, False, True, True)
def test_process_template_response_error(self):
middleware = TestMiddleware()
self._add_middleware(middleware)
self.assert_exceptions_handled('/middleware_exceptions/template_response_error/', [])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(middleware, True, True, True, True, False)
@override_settings(
MIDDLEWARE_CLASSES=['middleware_exceptions.middleware.ProcessExceptionMiddleware'],
)
def test_exception_in_render_passed_to_process_exception(self):
# Repopulate the list of middlewares since it's already been populated
# by setUp() before the MIDDLEWARE_CLASSES setting got overridden
self.client.handler.load_middleware()
response = self.client.get('/middleware_exceptions/exception_in_render/')
self.assertEqual(response.content, b'Exception caught')
class BadMiddlewareTests(BaseMiddlewareExceptionTest):
def test_process_request_bad_middleware(self):
pre_middleware = TestMiddleware()
bad_middleware = BadRequestMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(bad_middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/view/', ['Test Request Exception'])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, False, False, True, False)
self.assert_middleware_usage(bad_middleware, True, False, False, True, False)
self.assert_middleware_usage(post_middleware, False, False, False, True, False)
def test_process_view_bad_middleware(self):
pre_middleware = TestMiddleware()
bad_middleware = BadViewMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(bad_middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/view/', ['Test View Exception'])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, False)
self.assert_middleware_usage(bad_middleware, True, True, False, True, False)
self.assert_middleware_usage(post_middleware, True, False, False, True, False)
def test_process_template_response_bad_middleware(self):
pre_middleware = TestMiddleware()
bad_middleware = BadTemplateResponseMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(bad_middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled(
'/middleware_exceptions/template_response/',
['Test Template Response Exception']
)
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, False)
self.assert_middleware_usage(bad_middleware, True, True, True, True, False)
self.assert_middleware_usage(post_middleware, True, True, True, True, False)
def test_process_response_bad_middleware(self):
pre_middleware = TestMiddleware()
bad_middleware = BadResponseMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(bad_middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/view/', ['Test Response Exception'])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, False, False)
self.assert_middleware_usage(bad_middleware, True, True, False, True, False)
self.assert_middleware_usage(post_middleware, True, True, False, True, False)
def test_process_exception_bad_middleware(self):
pre_middleware = TestMiddleware()
bad_middleware = BadExceptionMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(bad_middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/view/', [])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, False)
self.assert_middleware_usage(bad_middleware, True, True, False, True, False)
self.assert_middleware_usage(post_middleware, True, True, False, True, False)
def test_process_request_bad_middleware_not_found(self):
pre_middleware = TestMiddleware()
bad_middleware = BadRequestMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(bad_middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/not_found/', ['Test Request Exception'])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, False, False, True, False)
self.assert_middleware_usage(bad_middleware, True, False, False, True, False)
self.assert_middleware_usage(post_middleware, False, False, False, True, False)
def test_process_view_bad_middleware_not_found(self):
pre_middleware = TestMiddleware()
bad_middleware = BadViewMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(bad_middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/not_found/', ['Test View Exception'])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, False)
self.assert_middleware_usage(bad_middleware, True, True, False, True, False)
self.assert_middleware_usage(post_middleware, True, False, False, True, False)
def test_process_response_bad_middleware_not_found(self):
pre_middleware = TestMiddleware()
bad_middleware = BadResponseMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(bad_middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/not_found/', ['Test Response Exception'])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, False, True)
self.assert_middleware_usage(bad_middleware, True, True, False, True, True)
self.assert_middleware_usage(post_middleware, True, True, False, True, True)
def test_process_exception_bad_middleware_not_found(self):
pre_middleware = TestMiddleware()
bad_middleware = BadExceptionMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(bad_middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/not_found/', ['Test Exception Exception'])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, False)
self.assert_middleware_usage(bad_middleware, True, True, False, True, True)
self.assert_middleware_usage(post_middleware, True, True, False, True, True)
def test_process_request_bad_middleware_exception(self):
pre_middleware = TestMiddleware()
bad_middleware = BadRequestMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(bad_middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/error/', ['Test Request Exception'])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, False, False, True, False)
self.assert_middleware_usage(bad_middleware, True, False, False, True, False)
self.assert_middleware_usage(post_middleware, False, False, False, True, False)
def test_process_view_bad_middleware_exception(self):
pre_middleware = TestMiddleware()
bad_middleware = BadViewMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(bad_middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/error/', ['Test View Exception'])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, False)
self.assert_middleware_usage(bad_middleware, True, True, False, True, False)
self.assert_middleware_usage(post_middleware, True, False, False, True, False)
def test_process_response_bad_middleware_exception(self):
pre_middleware = TestMiddleware()
bad_middleware = BadResponseMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(bad_middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/error/', ['Error in view', 'Test Response Exception'])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, False, True)
self.assert_middleware_usage(bad_middleware, True, True, False, True, True)
self.assert_middleware_usage(post_middleware, True, True, False, True, True)
def test_process_exception_bad_middleware_exception(self):
pre_middleware = TestMiddleware()
bad_middleware = BadExceptionMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(bad_middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/error/', ['Test Exception Exception'])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, False)
self.assert_middleware_usage(bad_middleware, True, True, False, True, True)
self.assert_middleware_usage(post_middleware, True, True, False, True, True)
def test_process_request_bad_middleware_null_view(self):
pre_middleware = TestMiddleware()
bad_middleware = BadRequestMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(bad_middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/null_view/', ['Test Request Exception'])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, False, False, True, False)
self.assert_middleware_usage(bad_middleware, True, False, False, True, False)
self.assert_middleware_usage(post_middleware, False, False, False, True, False)
def test_process_view_bad_middleware_null_view(self):
pre_middleware = TestMiddleware()
bad_middleware = BadViewMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(bad_middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/null_view/', ['Test View Exception'])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, False)
self.assert_middleware_usage(bad_middleware, True, True, False, True, False)
self.assert_middleware_usage(post_middleware, True, False, False, True, False)
def test_process_response_bad_middleware_null_view(self):
pre_middleware = TestMiddleware()
bad_middleware = BadResponseMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(bad_middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled(
'/middleware_exceptions/null_view/', [
"The view middleware_exceptions.views.null_view didn't return "
"an HttpResponse object. It returned None instead.",
'Test Response Exception'
]
)
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, False, False)
self.assert_middleware_usage(bad_middleware, True, True, False, True, False)
self.assert_middleware_usage(post_middleware, True, True, False, True, False)
def test_process_exception_bad_middleware_null_view(self):
pre_middleware = TestMiddleware()
bad_middleware = BadExceptionMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(bad_middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled(
'/middleware_exceptions/null_view/', [
"The view middleware_exceptions.views.null_view didn't return "
"an HttpResponse object. It returned None instead."
],
ValueError()
)
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, False)
self.assert_middleware_usage(bad_middleware, True, True, False, True, False)
self.assert_middleware_usage(post_middleware, True, True, False, True, False)
def test_process_request_bad_middleware_permission_denied(self):
pre_middleware = TestMiddleware()
bad_middleware = BadRequestMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(bad_middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/permission_denied/', ['Test Request Exception'])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, False, False, True, False)
self.assert_middleware_usage(bad_middleware, True, False, False, True, False)
self.assert_middleware_usage(post_middleware, False, False, False, True, False)
def test_process_view_bad_middleware_permission_denied(self):
pre_middleware = TestMiddleware()
bad_middleware = BadViewMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(bad_middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/permission_denied/', ['Test View Exception'])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, False)
self.assert_middleware_usage(bad_middleware, True, True, False, True, False)
self.assert_middleware_usage(post_middleware, True, False, False, True, False)
def test_process_response_bad_middleware_permission_denied(self):
pre_middleware = TestMiddleware()
bad_middleware = BadResponseMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(bad_middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/permission_denied/', ['Test Response Exception'])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, False, True)
self.assert_middleware_usage(bad_middleware, True, True, False, True, True)
self.assert_middleware_usage(post_middleware, True, True, False, True, True)
def test_process_exception_bad_middleware_permission_denied(self):
pre_middleware = TestMiddleware()
bad_middleware = BadExceptionMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(bad_middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/permission_denied/', ['Test Exception Exception'])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, False)
self.assert_middleware_usage(bad_middleware, True, True, False, True, True)
self.assert_middleware_usage(post_middleware, True, True, False, True, True)
def test_process_response_no_response_middleware(self):
pre_middleware = TestMiddleware()
middleware = NoResponseMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/view/', [
"NoResponseMiddleware.process_response didn't return an HttpResponse object. It returned None instead."
],
ValueError())
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, False, False)
self.assert_middleware_usage(middleware, True, True, False, True, False)
self.assert_middleware_usage(post_middleware, True, True, False, True, False)
def test_process_template_response_no_response_middleware(self):
pre_middleware = TestMiddleware()
middleware = NoTemplateResponseMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled(
'/middleware_exceptions/template_response/', [
"NoTemplateResponseMiddleware.process_template_response didn't "
"return an HttpResponse object. It returned None instead."
],
ValueError()
)
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, False)
self.assert_middleware_usage(middleware, True, True, True, True, False)
self.assert_middleware_usage(post_middleware, True, True, True, True, False)
_missing = object()
@override_settings(ROOT_URLCONF='middleware_exceptions.urls')
class RootUrlconfTests(SimpleTestCase):
@override_settings(ROOT_URLCONF=None)
def test_missing_root_urlconf(self):
# Removing ROOT_URLCONF is safe, as override_settings will restore
# the previously defined settings.
del settings.ROOT_URLCONF
with self.assertRaises(AttributeError):
self.client.get("/middleware_exceptions/view/")
class MyMiddleware(object):
def __init__(self):
raise MiddlewareNotUsed
def process_request(self, request):
pass
class MyMiddlewareWithExceptionMessage(object):
def __init__(self):
raise MiddlewareNotUsed('spam eggs')
def process_request(self, request):
pass
@override_settings(
DEBUG=True,
ROOT_URLCONF='middleware_exceptions.urls',
)
class MiddlewareNotUsedTests(SimpleTestCase):
rf = RequestFactory()
def test_raise_exception(self):
request = self.rf.get('middleware_exceptions/view/')
with self.assertRaises(MiddlewareNotUsed):
MyMiddleware().process_request(request)
@override_settings(MIDDLEWARE_CLASSES=[
'middleware_exceptions.tests.MyMiddleware',
])
def test_log(self):
with patch_logger('django.request', 'debug') as calls:
self.client.get('/middleware_exceptions/view/')
self.assertEqual(len(calls), 1)
self.assertEqual(
calls[0],
"MiddlewareNotUsed: 'middleware_exceptions.tests.MyMiddleware'"
)
@override_settings(MIDDLEWARE_CLASSES=[
'middleware_exceptions.tests.MyMiddlewareWithExceptionMessage',
])
def test_log_custom_message(self):
with patch_logger('django.request', 'debug') as calls:
self.client.get('/middleware_exceptions/view/')
self.assertEqual(len(calls), 1)
self.assertEqual(
calls[0],
"MiddlewareNotUsed('middleware_exceptions.tests.MyMiddlewareWithExceptionMessage'): spam eggs"
)
@override_settings(DEBUG=False)
def test_do_not_log_when_debug_is_false(self):
with patch_logger('django.request', 'debug') as calls:
self.client.get('/middleware_exceptions/view/')
self.assertEqual(len(calls), 0)<|fim▁end|> | self.assertEqual(value.args, (error, ))
def assert_middleware_usage(self, middleware, request, view, template_response, response, exception):
|
<|file_name|>_customunitary.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
# Copyright 2017 IBM RESEARCH. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
<|fim▁hole|>#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""
Node for an OPENQASM custom gate statement.
"""
from ._node import Node
class CustomUnitary(Node):
"""Node for an OPENQASM custom gate statement.
children[0] is an id node.
children[1] is an exp_list (if len==3) or primary_list.
children[2], if present, is a primary_list.
Has properties:
.id = id node
.name = gate name string
.arguments = None or exp_list node
.bitlist = primary_list node
"""
def __init__(self, children):
"""Create the custom gate node."""
Node.__init__(self, 'custom_unitary', children, None)
self.id = children[0]
self.name = self.id.name
if len(children) == 3:
self.arguments = children[1]
self.bitlist = children[2]
else:
self.arguments = None
self.bitlist = children[1]
def qasm(self, prec=15):
"""Return the corresponding OPENQASM string."""
string = self.name
if self.arguments is not None:
string += "(" + self.arguments.qasm(prec) + ")"
string += " " + self.bitlist.qasm(prec) + ";"
return string<|fim▁end|> | # You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
|
<|file_name|>main.rs<|end_file_name|><|fim▁begin|>/* Aurélien DESBRIÈRES
aurelien(at)hackers(dot)camp
License GNU GPL latest */
// Rust experimentations
// Borrowing Aliasing in Rust
struct Point { x: i32, y: i32, z: i32 }
fn main() {
let mut point = Point { x: 0, y: 0, z: 0 };
{
let borrowed_point = &point;
let another_borrow = &point;
// Data can be accessed via the references an the original owner
println!("Point has coordiantes: ({}, {}, {})",
borrowed_point.x, another_borrow.y, point.z);
// Error! Can't borrow point as mutable because it's currently
// borrowed as immutable.
//let mutable_borrow = &mut point;<|fim▁hole|>
{
let mutable_borrow = &mut point;
// Change data via mutable reference
mutable_borrow.x = 5;
mutable_borrow.y = 2;
mutable_borrow.z = 1;
// Error! Can't borrow `point` as immutable because it's currently
// borrowed as mutable.
//let y = &point.y.
// TODO ^ Try uncommenting this line
// Error! Can't print because `println!` takes an immutable reference.
//println!("Point Z coordinate is {}", piont.z);
// TODO ^ Try uncommenting this line
// Ok! Mutable references can be passed as immutable to `println!`
println!("Point has coordiantes: ({}, {}, {})",
mutable_borrow.x, mutable_borrow.y, mutable_borrow.z);
// Mutable reference goes out of scope
}
// Immutable reference to point are allowed again
let borrowed_point = &point;
println!("Point now has coordinates: ({}, {}, {})",
borrowed_point.x, borrowed_point.y, borrowed_point.z);
}<|fim▁end|> | // TODO ^ Try uncommenting this line
// Immutable references go out of scope
} |
<|file_name|>test_distributions.py<|end_file_name|><|fim▁begin|>""" Test functions for stats module
"""
import warnings
import re
import sys
import pickle
import os
from numpy.testing import (assert_equal, assert_array_equal,
assert_almost_equal, assert_array_almost_equal,
assert_allclose, assert_, assert_warns,
assert_array_less, suppress_warnings)
import pytest
from pytest import raises as assert_raises
import numpy
import numpy as np
from numpy import typecodes, array
from numpy.lib.recfunctions import rec_append_fields
from scipy import special
from scipy._lib._util import check_random_state
from scipy.integrate import IntegrationWarning
import scipy.stats as stats
from scipy.stats._distn_infrastructure import argsreduce
import scipy.stats.distributions
from scipy.special import xlogy
from .test_continuous_basic import distcont
# python -OO strips docstrings
DOCSTRINGS_STRIPPED = sys.flags.optimize > 1
def _assert_hasattr(a, b, msg=None):
if msg is None:
msg = '%s does not have attribute %s' % (a, b)
assert_(hasattr(a, b), msg=msg)
def test_api_regression():
# https://github.com/scipy/scipy/issues/3802
_assert_hasattr(scipy.stats.distributions, 'f_gen')
def check_vonmises_pdf_periodic(k, l, s, x):
vm = stats.vonmises(k, loc=l, scale=s)
assert_almost_equal(vm.pdf(x), vm.pdf(x % (2*numpy.pi*s)))
def check_vonmises_cdf_periodic(k, l, s, x):
vm = stats.vonmises(k, loc=l, scale=s)
assert_almost_equal(vm.cdf(x) % 1, vm.cdf(x % (2*numpy.pi*s)) % 1)
def test_vonmises_pdf_periodic():
for k in [0.1, 1, 101]:
for x in [0, 1, numpy.pi, 10, 100]:
check_vonmises_pdf_periodic(k, 0, 1, x)
check_vonmises_pdf_periodic(k, 1, 1, x)
check_vonmises_pdf_periodic(k, 0, 10, x)
check_vonmises_cdf_periodic(k, 0, 1, x)
check_vonmises_cdf_periodic(k, 1, 1, x)
check_vonmises_cdf_periodic(k, 0, 10, x)
def test_vonmises_line_support():
assert_equal(stats.vonmises_line.a, -np.pi)
assert_equal(stats.vonmises_line.b, np.pi)
def test_vonmises_numerical():
vm = stats.vonmises(800)
assert_almost_equal(vm.cdf(0), 0.5)
@pytest.mark.parametrize('dist',
['alpha', 'betaprime',
'fatiguelife', 'invgamma', 'invgauss', 'invweibull',
'johnsonsb', 'levy', 'levy_l', 'lognorm', 'gilbrat',
'powerlognorm', 'rayleigh', 'wald'])
def test_support(dist):
"""gh-6235"""
dct = dict(distcont)
args = dct[dist]
dist = getattr(stats, dist)
assert_almost_equal(dist.pdf(dist.a, *args), 0)
assert_equal(dist.logpdf(dist.a, *args), -np.inf)
assert_almost_equal(dist.pdf(dist.b, *args), 0)
assert_equal(dist.logpdf(dist.b, *args), -np.inf)
class TestRandInt(object):
def setup_method(self):
np.random.seed(1234)
def test_rvs(self):
vals = stats.randint.rvs(5, 30, size=100)
assert_(numpy.all(vals < 30) & numpy.all(vals >= 5))
assert_(len(vals) == 100)
vals = stats.randint.rvs(5, 30, size=(2, 50))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.randint.rvs(15, 46)
assert_((val >= 15) & (val < 46))
assert_(isinstance(val, numpy.ScalarType), msg=repr(type(val)))
val = stats.randint(15, 46).rvs(3)
assert_(val.dtype.char in typecodes['AllInteger'])
def test_pdf(self):
k = numpy.r_[0:36]
out = numpy.where((k >= 5) & (k < 30), 1.0/(30-5), 0)
vals = stats.randint.pmf(k, 5, 30)
assert_array_almost_equal(vals, out)
def test_cdf(self):
x = np.linspace(0, 36, 100)
k = numpy.floor(x)
out = numpy.select([k >= 30, k >= 5], [1.0, (k-5.0+1)/(30-5.0)], 0)
vals = stats.randint.cdf(x, 5, 30)
assert_array_almost_equal(vals, out, decimal=12)
class TestBinom(object):
def setup_method(self):
np.random.seed(1234)
def test_rvs(self):
vals = stats.binom.rvs(10, 0.75, size=(2, 50))
assert_(numpy.all(vals >= 0) & numpy.all(vals <= 10))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.binom.rvs(10, 0.75)
assert_(isinstance(val, int))
val = stats.binom(10, 0.75).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllInteger'])
def test_pmf(self):
# regression test for Ticket #1842
vals1 = stats.binom.pmf(100, 100, 1)
vals2 = stats.binom.pmf(0, 100, 0)
assert_allclose(vals1, 1.0, rtol=1e-15, atol=0)
assert_allclose(vals2, 1.0, rtol=1e-15, atol=0)
def test_entropy(self):
# Basic entropy tests.
b = stats.binom(2, 0.5)
expected_p = np.array([0.25, 0.5, 0.25])
expected_h = -sum(xlogy(expected_p, expected_p))
h = b.entropy()
assert_allclose(h, expected_h)
b = stats.binom(2, 0.0)
h = b.entropy()
assert_equal(h, 0.0)
b = stats.binom(2, 1.0)
h = b.entropy()
assert_equal(h, 0.0)
def test_warns_p0(self):
# no spurious warnigns are generated for p=0; gh-3817
with warnings.catch_warnings():
warnings.simplefilter("error", RuntimeWarning)
assert_equal(stats.binom(n=2, p=0).mean(), 0)
assert_equal(stats.binom(n=2, p=0).std(), 0)
class TestBernoulli(object):
def setup_method(self):
np.random.seed(1234)
def test_rvs(self):
vals = stats.bernoulli.rvs(0.75, size=(2, 50))
assert_(numpy.all(vals >= 0) & numpy.all(vals <= 1))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.bernoulli.rvs(0.75)
assert_(isinstance(val, int))
val = stats.bernoulli(0.75).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllInteger'])
def test_entropy(self):
# Simple tests of entropy.
b = stats.bernoulli(0.25)
expected_h = -0.25*np.log(0.25) - 0.75*np.log(0.75)
h = b.entropy()
assert_allclose(h, expected_h)
b = stats.bernoulli(0.0)
h = b.entropy()
assert_equal(h, 0.0)
b = stats.bernoulli(1.0)
h = b.entropy()
assert_equal(h, 0.0)
class TestBradford(object):
# gh-6216
def test_cdf_ppf(self):
c = 0.1
x = np.logspace(-20, -4)
q = stats.bradford.cdf(x, c)
xx = stats.bradford.ppf(q, c)
assert_allclose(x, xx)
class TestNBinom(object):
def setup_method(self):
np.random.seed(1234)
def test_rvs(self):
vals = stats.nbinom.rvs(10, 0.75, size=(2, 50))
assert_(numpy.all(vals >= 0))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.nbinom.rvs(10, 0.75)
assert_(isinstance(val, int))
val = stats.nbinom(10, 0.75).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllInteger'])
def test_pmf(self):
# regression test for ticket 1779
assert_allclose(np.exp(stats.nbinom.logpmf(700, 721, 0.52)),
stats.nbinom.pmf(700, 721, 0.52))
# logpmf(0,1,1) shouldn't return nan (regression test for gh-4029)
val = scipy.stats.nbinom.logpmf(0, 1, 1)
assert_equal(val, 0)
class TestGenInvGauss(object):
def setup_method(self):
np.random.seed(1234)
@pytest.mark.slow
def test_rvs_with_mode_shift(self):
# ratio_unif w/ mode shift
gig = stats.geninvgauss(2.3, 1.5)
_, p = stats.kstest(gig.rvs(size=1500, random_state=1234), gig.cdf)
assert_equal(p > 0.05, True)
@pytest.mark.slow
def test_rvs_without_mode_shift(self):
# ratio_unif w/o mode shift
gig = stats.geninvgauss(0.9, 0.75)
_, p = stats.kstest(gig.rvs(size=1500, random_state=1234), gig.cdf)
assert_equal(p > 0.05, True)
@pytest.mark.slow
def test_rvs_new_method(self):
# new algorithm of Hoermann / Leydold
gig = stats.geninvgauss(0.1, 0.2)
_, p = stats.kstest(gig.rvs(size=1500, random_state=1234), gig.cdf)
assert_equal(p > 0.05, True)
@pytest.mark.slow
def test_rvs_p_zero(self):
def my_ks_check(p, b):
gig = stats.geninvgauss(p, b)
rvs = gig.rvs(size=1500, random_state=1234)
return stats.kstest(rvs, gig.cdf)[1] > 0.05
# boundary cases when p = 0
assert_equal(my_ks_check(0, 0.2), True) # new algo
assert_equal(my_ks_check(0, 0.9), True) # ratio_unif w/o shift
assert_equal(my_ks_check(0, 1.5), True) # ratio_unif with shift
def test_rvs_negative_p(self):
# if p negative, return inverse
assert_equal(
stats.geninvgauss(-1.5, 2).rvs(size=10, random_state=1234),
1 / stats.geninvgauss(1.5, 2).rvs(size=10, random_state=1234))
def test_invgauss(self):
# test that invgauss is special case
ig = stats.geninvgauss.rvs(size=1500, p=-0.5, b=1, random_state=1234)
assert_equal(stats.kstest(ig, 'invgauss', args=[1])[1] > 0.15, True)
# test pdf and cdf
mu, x = 100, np.linspace(0.01, 1, 10)
pdf_ig = stats.geninvgauss.pdf(x, p=-0.5, b=1 / mu, scale=mu)
assert_allclose(pdf_ig, stats.invgauss(mu).pdf(x))
cdf_ig = stats.geninvgauss.cdf(x, p=-0.5, b=1 / mu, scale=mu)
assert_allclose(cdf_ig, stats.invgauss(mu).cdf(x))
def test_pdf_R(self):
# test against R package GIGrvg
# x <- seq(0.01, 5, length.out = 10)
# GIGrvg::dgig(x, 0.5, 1, 1)
vals_R = np.array([2.081176820e-21, 4.488660034e-01, 3.747774338e-01,
2.693297528e-01, 1.905637275e-01, 1.351476913e-01,
9.636538981e-02, 6.909040154e-02, 4.978006801e-02,
3.602084467e-02])
x = np.linspace(0.01, 5, 10)
assert_allclose(vals_R, stats.geninvgauss.pdf(x, 0.5, 1))
def test_pdf_zero(self):
# pdf at 0 is 0, needs special treatment to avoid 1/x in pdf
assert_equal(stats.geninvgauss.pdf(0, 0.5, 0.5), 0)
# if x is large and p is moderate, make sure that pdf does not
# overflow because of x**(p-1); exp(-b*x) forces pdf to zero
assert_equal(stats.geninvgauss.pdf(2e6, 50, 2), 0)
class TestNormInvGauss(object):
def setup_method(self):
np.random.seed(1234)
def test_cdf_R(self):
# test pdf and cdf vals against R
# require("GeneralizedHyperbolic")
# x_test <- c(-7, -5, 0, 8, 15)
# r_cdf <- GeneralizedHyperbolic::pnig(x_test, mu = 0, a = 1, b = 0.5)
# r_pdf <- GeneralizedHyperbolic::dnig(x_test, mu = 0, a = 1, b = 0.5)
r_cdf = np.array([8.034920282e-07, 2.512671945e-05, 3.186661051e-01,
9.988650664e-01, 9.999848769e-01])
x_test = np.array([-7, -5, 0, 8, 15])
vals_cdf = stats.norminvgauss.cdf(x_test, a=1, b=0.5)
assert_allclose(vals_cdf, r_cdf, atol=1e-9)
def test_pdf_R(self):
# values from R as defined in test_cdf_R
r_pdf = np.array([1.359600783e-06, 4.413878805e-05, 4.555014266e-01,
7.450485342e-04, 8.917889931e-06])
x_test = np.array([-7, -5, 0, 8, 15])
vals_pdf = stats.norminvgauss.pdf(x_test, a=1, b=0.5)
assert_allclose(vals_pdf, r_pdf, atol=1e-9)
def test_stats(self):
a, b = 1, 0.5
gamma = np.sqrt(a**2 - b**2)
v_stats = (b / gamma, a**2 / gamma**3, 3.0 * b / (a * np.sqrt(gamma)),
3.0 * (1 + 4 * b**2 / a**2) / gamma)
assert_equal(v_stats, stats.norminvgauss.stats(a, b, moments='mvsk'))
def test_ppf(self):
a, b = 1, 0.5
x_test = np.array([0.001, 0.5, 0.999])
vals = stats.norminvgauss.ppf(x_test, a, b)
assert_allclose(x_test, stats.norminvgauss.cdf(vals, a, b))
class TestGeom(object):
def setup_method(self):
np.random.seed(1234)
def test_rvs(self):
vals = stats.geom.rvs(0.75, size=(2, 50))
assert_(numpy.all(vals >= 0))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.geom.rvs(0.75)
assert_(isinstance(val, int))
val = stats.geom(0.75).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllInteger'])
def test_pmf(self):
vals = stats.geom.pmf([1, 2, 3], 0.5)
assert_array_almost_equal(vals, [0.5, 0.25, 0.125])
def test_logpmf(self):
# regression test for ticket 1793
vals1 = np.log(stats.geom.pmf([1, 2, 3], 0.5))
vals2 = stats.geom.logpmf([1, 2, 3], 0.5)
assert_allclose(vals1, vals2, rtol=1e-15, atol=0)
# regression test for gh-4028
val = stats.geom.logpmf(1, 1)
assert_equal(val, 0.0)
def test_cdf_sf(self):
vals = stats.geom.cdf([1, 2, 3], 0.5)
vals_sf = stats.geom.sf([1, 2, 3], 0.5)
expected = array([0.5, 0.75, 0.875])
assert_array_almost_equal(vals, expected)
assert_array_almost_equal(vals_sf, 1-expected)
def test_logcdf_logsf(self):
vals = stats.geom.logcdf([1, 2, 3], 0.5)
vals_sf = stats.geom.logsf([1, 2, 3], 0.5)
expected = array([0.5, 0.75, 0.875])
assert_array_almost_equal(vals, np.log(expected))
assert_array_almost_equal(vals_sf, np.log1p(-expected))
def test_ppf(self):
vals = stats.geom.ppf([0.5, 0.75, 0.875], 0.5)
expected = array([1.0, 2.0, 3.0])
assert_array_almost_equal(vals, expected)
def test_ppf_underflow(self):
# this should not underflow
assert_allclose(stats.geom.ppf(1e-20, 1e-20), 1.0, atol=1e-14)
class TestPlanck(object):
def setup_method(self):
np.random.seed(1234)
def test_sf(self):
vals = stats.planck.sf([1, 2, 3], 5.)
expected = array([4.5399929762484854e-05,
3.0590232050182579e-07,
2.0611536224385579e-09])
assert_array_almost_equal(vals, expected)
def test_logsf(self):
vals = stats.planck.logsf([1000., 2000., 3000.], 1000.)
expected = array([-1001000., -2001000., -3001000.])
assert_array_almost_equal(vals, expected)
class TestGennorm(object):
def test_laplace(self):
# test against Laplace (special case for beta=1)
points = [1, 2, 3]
pdf1 = stats.gennorm.pdf(points, 1)
pdf2 = stats.laplace.pdf(points)
assert_almost_equal(pdf1, pdf2)
def test_norm(self):
# test against normal (special case for beta=2)
points = [1, 2, 3]
pdf1 = stats.gennorm.pdf(points, 2)
pdf2 = stats.norm.pdf(points, scale=2**-.5)
assert_almost_equal(pdf1, pdf2)
class TestHalfgennorm(object):
def test_expon(self):
# test against exponential (special case for beta=1)
points = [1, 2, 3]
pdf1 = stats.halfgennorm.pdf(points, 1)
pdf2 = stats.expon.pdf(points)
assert_almost_equal(pdf1, pdf2)
def test_halfnorm(self):
# test against half normal (special case for beta=2)
points = [1, 2, 3]
pdf1 = stats.halfgennorm.pdf(points, 2)
pdf2 = stats.halfnorm.pdf(points, scale=2**-.5)
assert_almost_equal(pdf1, pdf2)
def test_gennorm(self):
# test against generalized normal
points = [1, 2, 3]
pdf1 = stats.halfgennorm.pdf(points, .497324)
pdf2 = stats.gennorm.pdf(points, .497324)
assert_almost_equal(pdf1, 2*pdf2)
class TestTruncnorm(object):
def setup_method(self):
np.random.seed(1234)
def test_ppf_ticket1131(self):
vals = stats.truncnorm.ppf([-0.5, 0, 1e-4, 0.5, 1-1e-4, 1, 2], -1., 1.,
loc=[3]*7, scale=2)
expected = np.array([np.nan, 1, 1.00056419, 3, 4.99943581, 5, np.nan])
assert_array_almost_equal(vals, expected)
def test_isf_ticket1131(self):
vals = stats.truncnorm.isf([-0.5, 0, 1e-4, 0.5, 1-1e-4, 1, 2], -1., 1.,
loc=[3]*7, scale=2)
expected = np.array([np.nan, 5, 4.99943581, 3, 1.00056419, 1, np.nan])
assert_array_almost_equal(vals, expected)
def test_gh_2477_small_values(self):
# Check a case that worked in the original issue.
low, high = -11, -10
x = stats.truncnorm.rvs(low, high, 0, 1, size=10)
assert_(low < x.min() < x.max() < high)
# Check a case that failed in the original issue.
low, high = 10, 11
x = stats.truncnorm.rvs(low, high, 0, 1, size=10)
assert_(low < x.min() < x.max() < high)
# @pytest.mark.xfail(reason="truncnorm rvs is know to fail at extreme tails")
def test_gh_2477_large_values(self):
# Check a case that used to fail because of extreme tailness.
low, high = 100, 101
with np.errstate(divide='ignore'):
x = stats.truncnorm.rvs(low, high, 0, 1, size=10)
print(low, x.min(), x.max(), high)
assert_(low <= x.min() <= x.max() <= high), str([low, high, x])
# Check some additional extreme tails
low, high = 1000, 1001
x = stats.truncnorm.rvs(low, high, 0, 1, size=10)
assert_(low < x.min() < x.max() < high)
low, high = 10000, 10001
x = stats.truncnorm.rvs(low, high, 0, 1, size=10)
assert_(low < x.min() < x.max() < high)
def test_gh_9403_nontail_values(self):
for low, high in [[3, 4], [-4, -3]]:
xvals = np.array([-np.inf, low, high, np.inf])
xmid = (high+low)/2.0
cdfs = stats.truncnorm.cdf(xvals, low, high)
sfs = stats.truncnorm.sf(xvals, low, high)
pdfs = stats.truncnorm.pdf(xvals, low, high)
expected_cdfs = np.array([0, 0, 1, 1])
expected_sfs = np.array([1.0, 1.0, 0.0, 0.0])
expected_pdfs = np.array([0, 3.3619772, 0.1015229, 0])
if low < 0:
expected_pdfs = np.array([0, 0.1015229, 3.3619772, 0])
assert_almost_equal(cdfs, expected_cdfs)
assert_almost_equal(sfs, expected_sfs)
assert_almost_equal(pdfs, expected_pdfs)
assert_almost_equal(np.log(expected_pdfs[1]/expected_pdfs[2]), low+0.5)
pvals = np.array([0, 0.5, 1.0])
ppfs = stats.truncnorm.ppf(pvals, low, high)
expected_ppfs = np.array([low, np.sign(low)*3.1984741, high])
assert_almost_equal(ppfs, expected_ppfs)
if low < 0:
assert_almost_equal(stats.truncnorm.sf(xmid, low, high), 0.8475544278436675)
assert_almost_equal(stats.truncnorm.cdf(xmid, low, high), 0.1524455721563326)
else:
assert_almost_equal(stats.truncnorm.cdf(xmid, low, high), 0.8475544278436675)
assert_almost_equal(stats.truncnorm.sf(xmid, low, high), 0.1524455721563326)
pdf = stats.truncnorm.pdf(xmid, low, high)
assert_almost_equal(np.log(pdf/expected_pdfs[2]), (xmid+0.25)/2)
def test_gh_9403_medium_tail_values(self):
for low, high in [[39, 40], [-40, -39]]:
xvals = np.array([-np.inf, low, high, np.inf])
xmid = (high+low)/2.0
cdfs = stats.truncnorm.cdf(xvals, low, high)
sfs = stats.truncnorm.sf(xvals, low, high)
pdfs = stats.truncnorm.pdf(xvals, low, high)
expected_cdfs = np.array([0, 0, 1, 1])
expected_sfs = np.array([1.0, 1.0, 0.0, 0.0])
expected_pdfs = np.array([0, 3.90256074e+01, 2.73349092e-16, 0])
if low < 0:
expected_pdfs = np.array([0, 2.73349092e-16, 3.90256074e+01, 0])
assert_almost_equal(cdfs, expected_cdfs)
assert_almost_equal(sfs, expected_sfs)
assert_almost_equal(pdfs, expected_pdfs)
assert_almost_equal(np.log(expected_pdfs[1]/expected_pdfs[2]), low+0.5)
pvals = np.array([0, 0.5, 1.0])
ppfs = stats.truncnorm.ppf(pvals, low, high)
expected_ppfs = np.array([low, np.sign(low)*39.01775731, high])
assert_almost_equal(ppfs, expected_ppfs)
cdfs = stats.truncnorm.cdf(ppfs, low, high)
assert_almost_equal(cdfs, pvals)
if low < 0:
assert_almost_equal(stats.truncnorm.sf(xmid, low, high), 0.9999999970389126)
assert_almost_equal(stats.truncnorm.cdf(xmid, low, high), 2.961048103554866e-09)
else:
assert_almost_equal(stats.truncnorm.cdf(xmid, low, high), 0.9999999970389126)
assert_almost_equal(stats.truncnorm.sf(xmid, low, high), 2.961048103554866e-09)
pdf = stats.truncnorm.pdf(xmid, low, high)
assert_almost_equal(np.log(pdf/expected_pdfs[2]), (xmid+0.25)/2)
xvals = np.linspace(low, high, 11)
xvals2 = -xvals[::-1]
assert_almost_equal(stats.truncnorm.cdf(xvals, low, high), stats.truncnorm.sf(xvals2, -high, -low)[::-1])
assert_almost_equal(stats.truncnorm.sf(xvals, low, high), stats.truncnorm.cdf(xvals2, -high, -low)[::-1])
assert_almost_equal(stats.truncnorm.pdf(xvals, low, high), stats.truncnorm.pdf(xvals2, -high, -low)[::-1])
def _test_moments_one_range(self, a, b, expected):
m0, v0, s0, k0 = expected[:4]
m, v, s, k = stats.truncnorm.stats(a, b, moments='mvsk')
assert_almost_equal(m, m0)
assert_almost_equal(v, v0)
assert_almost_equal(s, s0)
assert_almost_equal(k, k0)
@pytest.mark.xfail_on_32bit("reduced accuracy with 32bit platforms.")
def test_moments(self):
# Values validated by changing TRUNCNORM_TAIL_X so as to evaluate
# using both the _norm_XXX() and _norm_logXXX() functions, and by
# removing the _stats and _munp methods in truncnorm tp force
# numerical quadrature.
self._test_moments_one_range(-30, 30, [0, 1, 0.0, 0.0])
self._test_moments_one_range(-10, 10, [0, 1, 0.0, 0.0])
self._test_moments_one_range(-3, 3, [0, 0.97333692, 0.0, -0.17111444])
self._test_moments_one_range(-2, 2, [0, 0.7737413, 0.0, -0.63446328])
self._test_moments_one_range(0, np.inf, [0.79788456, 0.36338023, 0.99527175, 0.8691773])
self._test_moments_one_range(-1, 3, [0.2827861, 0.61614174, 0.53930185, -0.20582065])
self._test_moments_one_range(-3, 1, [-0.2827861, 0.61614174, -0.53930185, -0.20582065])
self._test_moments_one_range(-10, -9, [-9.10845629, 0.01144881, -1.89856073, 5.07334611])
self._test_moments_one_range(-20, -19, [-19.05234395, 0.00272507, -1.9838686, 5.87208674])
self._test_moments_one_range(-30, -29, [-29.03440124, 0.00118066, -1.99297727, 5.9303358])
self._test_moments_one_range(-40, -39, [-39.02560741993262, 0.0006548, -1.99631464, 5.61677584])
self._test_moments_one_range(39, 40, [39.02560741993262, 0.0006548, 1.99631464, 5.61677584])
def test_9902_moments(self):
m, v = stats.truncnorm.stats(0, np.inf, moments='mv')
assert_almost_equal(m, 0.79788456)
assert_almost_equal(v, 0.36338023)
def test_gh_1489_trac_962_rvs(self):
# Check the original example.
low, high = 10, 15
x = stats.truncnorm.rvs(low, high, 0, 1, size=10)
assert_(low < x.min() < x.max() < high)
class TestHypergeom(object):
def setup_method(self):
np.random.seed(1234)
def test_rvs(self):
vals = stats.hypergeom.rvs(20, 10, 3, size=(2, 50))
assert_(numpy.all(vals >= 0) &
numpy.all(vals <= 3))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.hypergeom.rvs(20, 3, 10)
assert_(isinstance(val, int))
val = stats.hypergeom(20, 3, 10).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllInteger'])
def test_precision(self):
# comparison number from mpmath
M = 2500
n = 50
N = 500
tot = M
good = n
hgpmf = stats.hypergeom.pmf(2, tot, good, N)
assert_almost_equal(hgpmf, 0.0010114963068932233, 11)
def test_args(self):
# test correct output for corner cases of arguments
# see gh-2325
assert_almost_equal(stats.hypergeom.pmf(0, 2, 1, 0), 1.0, 11)
assert_almost_equal(stats.hypergeom.pmf(1, 2, 1, 0), 0.0, 11)
assert_almost_equal(stats.hypergeom.pmf(0, 2, 0, 2), 1.0, 11)
assert_almost_equal(stats.hypergeom.pmf(1, 2, 1, 0), 0.0, 11)
def test_cdf_above_one(self):
# for some values of parameters, hypergeom cdf was >1, see gh-2238
assert_(0 <= stats.hypergeom.cdf(30, 13397950, 4363, 12390) <= 1.0)
def test_precision2(self):
# Test hypergeom precision for large numbers. See #1218.
# Results compared with those from R.
oranges = 9.9e4
pears = 1.1e5
fruits_eaten = np.array([3, 3.8, 3.9, 4, 4.1, 4.2, 5]) * 1e4
quantile = 2e4
res = [stats.hypergeom.sf(quantile, oranges + pears, oranges, eaten)
for eaten in fruits_eaten]
expected = np.array([0, 1.904153e-114, 2.752693e-66, 4.931217e-32,
8.265601e-11, 0.1237904, 1])
assert_allclose(res, expected, atol=0, rtol=5e-7)
# Test with array_like first argument
quantiles = [1.9e4, 2e4, 2.1e4, 2.15e4]
res2 = stats.hypergeom.sf(quantiles, oranges + pears, oranges, 4.2e4)
expected2 = [1, 0.1237904, 6.511452e-34, 3.277667e-69]
assert_allclose(res2, expected2, atol=0, rtol=5e-7)
def test_entropy(self):
# Simple tests of entropy.
hg = stats.hypergeom(4, 1, 1)
h = hg.entropy()
expected_p = np.array([0.75, 0.25])
expected_h = -np.sum(xlogy(expected_p, expected_p))
assert_allclose(h, expected_h)
hg = stats.hypergeom(1, 1, 1)
h = hg.entropy()
assert_equal(h, 0.0)
def test_logsf(self):
# Test logsf for very large numbers. See issue #4982
# Results compare with those from R (v3.2.0):
# phyper(k, n, M-n, N, lower.tail=FALSE, log.p=TRUE)
# -2239.771
k = 1e4
M = 1e7
n = 1e6
N = 5e4
result = stats.hypergeom.logsf(k, M, n, N)
expected = -2239.771 # From R
assert_almost_equal(result, expected, decimal=3)
k = 1
M = 1600
n = 600
N = 300
result = stats.hypergeom.logsf(k, M, n, N)
expected = -2.566567e-68 # From R
assert_almost_equal(result, expected, decimal=15)
def test_logcdf(self):
# Test logcdf for very large numbers. See issue #8692
# Results compare with those from R (v3.3.2):
# phyper(k, n, M-n, N, lower.tail=TRUE, log.p=TRUE)
# -5273.335
k = 1
M = 1e7
n = 1e6
N = 5e4
result = stats.hypergeom.logcdf(k, M, n, N)
expected = -5273.335 # From R
assert_almost_equal(result, expected, decimal=3)
# Same example as in issue #8692
k = 40
M = 1600
n = 50
N = 300
result = stats.hypergeom.logcdf(k, M, n, N)
expected = -7.565148879229e-23 # From R
assert_almost_equal(result, expected, decimal=15)
k = 125
M = 1600
n = 250
N = 500
result = stats.hypergeom.logcdf(k, M, n, N)
expected = -4.242688e-12 # From R
assert_almost_equal(result, expected, decimal=15)
# test broadcasting robustness based on reviewer
# concerns in PR 9603; using an array version of
# the example from issue #8692
k = np.array([40, 40, 40])
M = 1600
n = 50
N = 300
result = stats.hypergeom.logcdf(k, M, n, N)
expected = np.full(3, -7.565148879229e-23) # filled from R result
assert_almost_equal(result, expected, decimal=15)
class TestLoggamma(object):
def test_stats(self):
# The following precomputed values are from the table in section 2.2
# of "A Statistical Study of Log-Gamma Distribution", by Ping Shing
# Chan (thesis, McMaster University, 1993).
table = np.array([
# c, mean, var, skew, exc. kurt.
0.5, -1.9635, 4.9348, -1.5351, 4.0000,
1.0, -0.5772, 1.6449, -1.1395, 2.4000,
12.0, 2.4427, 0.0869, -0.2946, 0.1735,
]).reshape(-1, 5)
for c, mean, var, skew, kurt in table:
computed = stats.loggamma.stats(c, moments='msvk')
assert_array_almost_equal(computed, [mean, var, skew, kurt],
decimal=4)
class TestLogistic(object):
# gh-6226
def test_cdf_ppf(self):
x = np.linspace(-20, 20)
y = stats.logistic.cdf(x)
xx = stats.logistic.ppf(y)
assert_allclose(x, xx)
def test_sf_isf(self):
x = np.linspace(-20, 20)
y = stats.logistic.sf(x)
xx = stats.logistic.isf(y)
assert_allclose(x, xx)
def test_extreme_values(self):
# p is chosen so that 1 - (1 - p) == p in double precision
p = 9.992007221626409e-16
desired = 34.53957599234088
assert_allclose(stats.logistic.ppf(1 - p), desired)
assert_allclose(stats.logistic.isf(p), desired)
class TestLogser(object):
def setup_method(self):
np.random.seed(1234)
def test_rvs(self):
vals = stats.logser.rvs(0.75, size=(2, 50))
assert_(numpy.all(vals >= 1))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.logser.rvs(0.75)
assert_(isinstance(val, int))
val = stats.logser(0.75).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllInteger'])
def test_pmf_small_p(self):
m = stats.logser.pmf(4, 1e-20)
# The expected value was computed using mpmath:
# >>> import mpmath
# >>> mpmath.mp.dps = 64
# >>> k = 4
# >>> p = mpmath.mpf('1e-20')
# >>> float(-(p**k)/k/mpmath.log(1-p))
# 2.5e-61
# It is also clear from noticing that for very small p,
# log(1-p) is approximately -p, and the formula becomes
# p**(k-1) / k
assert_allclose(m, 2.5e-61)
def test_mean_small_p(self):
m = stats.logser.mean(1e-8)
# The expected mean was computed using mpmath:
# >>> import mpmath
# >>> mpmath.dps = 60
# >>> p = mpmath.mpf('1e-8')
# >>> float(-p / ((1 - p)*mpmath.log(1 - p)))
# 1.000000005
assert_allclose(m, 1.000000005)
class TestPareto(object):
def test_stats(self):
# Check the stats() method with some simple values. Also check
# that the calculations do not trigger RuntimeWarnings.
with warnings.catch_warnings():
warnings.simplefilter("error", RuntimeWarning)
m, v, s, k = stats.pareto.stats(0.5, moments='mvsk')
assert_equal(m, np.inf)
assert_equal(v, np.inf)
assert_equal(s, np.nan)
assert_equal(k, np.nan)
m, v, s, k = stats.pareto.stats(1.0, moments='mvsk')
assert_equal(m, np.inf)
assert_equal(v, np.inf)
assert_equal(s, np.nan)
assert_equal(k, np.nan)
m, v, s, k = stats.pareto.stats(1.5, moments='mvsk')
assert_equal(m, 3.0)
assert_equal(v, np.inf)
assert_equal(s, np.nan)
assert_equal(k, np.nan)
m, v, s, k = stats.pareto.stats(2.0, moments='mvsk')
assert_equal(m, 2.0)
assert_equal(v, np.inf)
assert_equal(s, np.nan)
assert_equal(k, np.nan)
m, v, s, k = stats.pareto.stats(2.5, moments='mvsk')
assert_allclose(m, 2.5 / 1.5)
assert_allclose(v, 2.5 / (1.5*1.5*0.5))
assert_equal(s, np.nan)
assert_equal(k, np.nan)
m, v, s, k = stats.pareto.stats(3.0, moments='mvsk')
assert_allclose(m, 1.5)
assert_allclose(v, 0.75)
assert_equal(s, np.nan)
assert_equal(k, np.nan)
m, v, s, k = stats.pareto.stats(3.5, moments='mvsk')
assert_allclose(m, 3.5 / 2.5)
assert_allclose(v, 3.5 / (2.5*2.5*1.5))
assert_allclose(s, (2*4.5/0.5)*np.sqrt(1.5/3.5))
assert_equal(k, np.nan)
m, v, s, k = stats.pareto.stats(4.0, moments='mvsk')
assert_allclose(m, 4.0 / 3.0)
assert_allclose(v, 4.0 / 18.0)
assert_allclose(s, 2*(1+4.0)/(4.0-3) * np.sqrt((4.0-2)/4.0))
assert_equal(k, np.nan)
m, v, s, k = stats.pareto.stats(4.5, moments='mvsk')
assert_allclose(m, 4.5 / 3.5)
assert_allclose(v, 4.5 / (3.5*3.5*2.5))
assert_allclose(s, (2*5.5/1.5) * np.sqrt(2.5/4.5))
assert_allclose(k, 6*(4.5**3 + 4.5**2 - 6*4.5 - 2)/(4.5*1.5*0.5))
def test_sf(self):
x = 1e9
b = 2
scale = 1.5
p = stats.pareto.sf(x, b, loc=0, scale=scale)
expected = (scale/x)**b # 2.25e-18
assert_allclose(p, expected)
class TestGenpareto(object):
def test_ab(self):
# c >= 0: a, b = [0, inf]
for c in [1., 0.]:
c = np.asarray(c)
a, b = stats.genpareto._get_support(c)
assert_equal(a, 0.)<|fim▁hole|> a, b = stats.genpareto._get_support(c)
assert_allclose([a, b], [0., 0.5])
def test_c0(self):
# with c=0, genpareto reduces to the exponential distribution
# rv = stats.genpareto(c=0.)
rv = stats.genpareto(c=0.)
x = np.linspace(0, 10., 30)
assert_allclose(rv.pdf(x), stats.expon.pdf(x))
assert_allclose(rv.cdf(x), stats.expon.cdf(x))
assert_allclose(rv.sf(x), stats.expon.sf(x))
q = np.linspace(0., 1., 10)
assert_allclose(rv.ppf(q), stats.expon.ppf(q))
def test_cm1(self):
# with c=-1, genpareto reduces to the uniform distr on [0, 1]
rv = stats.genpareto(c=-1.)
x = np.linspace(0, 10., 30)
assert_allclose(rv.pdf(x), stats.uniform.pdf(x))
assert_allclose(rv.cdf(x), stats.uniform.cdf(x))
assert_allclose(rv.sf(x), stats.uniform.sf(x))
q = np.linspace(0., 1., 10)
assert_allclose(rv.ppf(q), stats.uniform.ppf(q))
# logpdf(1., c=-1) should be zero
assert_allclose(rv.logpdf(1), 0)
def test_x_inf(self):
# make sure x=inf is handled gracefully
rv = stats.genpareto(c=0.1)
assert_allclose([rv.pdf(np.inf), rv.cdf(np.inf)], [0., 1.])
assert_(np.isneginf(rv.logpdf(np.inf)))
rv = stats.genpareto(c=0.)
assert_allclose([rv.pdf(np.inf), rv.cdf(np.inf)], [0., 1.])
assert_(np.isneginf(rv.logpdf(np.inf)))
rv = stats.genpareto(c=-1.)
assert_allclose([rv.pdf(np.inf), rv.cdf(np.inf)], [0., 1.])
assert_(np.isneginf(rv.logpdf(np.inf)))
def test_c_continuity(self):
# pdf is continuous at c=0, -1
x = np.linspace(0, 10, 30)
for c in [0, -1]:
pdf0 = stats.genpareto.pdf(x, c)
for dc in [1e-14, -1e-14]:
pdfc = stats.genpareto.pdf(x, c + dc)
assert_allclose(pdf0, pdfc, atol=1e-12)
cdf0 = stats.genpareto.cdf(x, c)
for dc in [1e-14, 1e-14]:
cdfc = stats.genpareto.cdf(x, c + dc)
assert_allclose(cdf0, cdfc, atol=1e-12)
def test_c_continuity_ppf(self):
q = np.r_[np.logspace(1e-12, 0.01, base=0.1),
np.linspace(0.01, 1, 30, endpoint=False),
1. - np.logspace(1e-12, 0.01, base=0.1)]
for c in [0., -1.]:
ppf0 = stats.genpareto.ppf(q, c)
for dc in [1e-14, -1e-14]:
ppfc = stats.genpareto.ppf(q, c + dc)
assert_allclose(ppf0, ppfc, atol=1e-12)
def test_c_continuity_isf(self):
q = np.r_[np.logspace(1e-12, 0.01, base=0.1),
np.linspace(0.01, 1, 30, endpoint=False),
1. - np.logspace(1e-12, 0.01, base=0.1)]
for c in [0., -1.]:
isf0 = stats.genpareto.isf(q, c)
for dc in [1e-14, -1e-14]:
isfc = stats.genpareto.isf(q, c + dc)
assert_allclose(isf0, isfc, atol=1e-12)
def test_cdf_ppf_roundtrip(self):
# this should pass with machine precision. hat tip @pbrod
q = np.r_[np.logspace(1e-12, 0.01, base=0.1),
np.linspace(0.01, 1, 30, endpoint=False),
1. - np.logspace(1e-12, 0.01, base=0.1)]
for c in [1e-8, -1e-18, 1e-15, -1e-15]:
assert_allclose(stats.genpareto.cdf(stats.genpareto.ppf(q, c), c),
q, atol=1e-15)
def test_logsf(self):
logp = stats.genpareto.logsf(1e10, .01, 0, 1)
assert_allclose(logp, -1842.0680753952365)
# Values in 'expected_stats' are
# [mean, variance, skewness, excess kurtosis].
@pytest.mark.parametrize(
'c, expected_stats',
[(0, [1, 1, 2, 6]),
(1/4, [4/3, 32/9, 10/np.sqrt(2), np.nan]),
(1/9, [9/8, (81/64)*(9/7), (10/9)*np.sqrt(7), 754/45]),
(-1, [1/2, 1/12, 0, -6/5])])
def test_stats(self, c, expected_stats):
result = stats.genpareto.stats(c, moments='mvsk')
assert_allclose(result, expected_stats, rtol=1e-13, atol=1e-15)
def test_var(self):
# Regression test for gh-11168.
v = stats.genpareto.var(1e-8)
assert_allclose(v, 1.000000040000001, rtol=1e-13)
class TestPearson3(object):
def setup_method(self):
np.random.seed(1234)
def test_rvs(self):
vals = stats.pearson3.rvs(0.1, size=(2, 50))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllFloat'])
val = stats.pearson3.rvs(0.5)
assert_(isinstance(val, float))
val = stats.pearson3(0.5).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllFloat'])
assert_(len(val) == 3)
def test_pdf(self):
vals = stats.pearson3.pdf(2, [0.0, 0.1, 0.2])
assert_allclose(vals, np.array([0.05399097, 0.05555481, 0.05670246]),
atol=1e-6)
vals = stats.pearson3.pdf(-3, 0.1)
assert_allclose(vals, np.array([0.00313791]), atol=1e-6)
vals = stats.pearson3.pdf([-3, -2, -1, 0, 1], 0.1)
assert_allclose(vals, np.array([0.00313791, 0.05192304, 0.25028092,
0.39885918, 0.23413173]), atol=1e-6)
def test_cdf(self):
vals = stats.pearson3.cdf(2, [0.0, 0.1, 0.2])
assert_allclose(vals, np.array([0.97724987, 0.97462004, 0.97213626]),
atol=1e-6)
vals = stats.pearson3.cdf(-3, 0.1)
assert_allclose(vals, [0.00082256], atol=1e-6)
vals = stats.pearson3.cdf([-3, -2, -1, 0, 1], 0.1)
assert_allclose(vals, [8.22563821e-04, 1.99860448e-02, 1.58550710e-01,
5.06649130e-01, 8.41442111e-01], atol=1e-6)
class TestKappa4(object):
def test_cdf_genpareto(self):
# h = 1 and k != 0 is generalized Pareto
x = [0.0, 0.1, 0.2, 0.5]
h = 1.0
for k in [-1.9, -1.0, -0.5, -0.2, -0.1, 0.1, 0.2, 0.5, 1.0,
1.9]:
vals = stats.kappa4.cdf(x, h, k)
# shape parameter is opposite what is expected
vals_comp = stats.genpareto.cdf(x, -k)
assert_allclose(vals, vals_comp)
def test_cdf_genextreme(self):
# h = 0 and k != 0 is generalized extreme value
x = np.linspace(-5, 5, 10)
h = 0.0
k = np.linspace(-3, 3, 10)
vals = stats.kappa4.cdf(x, h, k)
vals_comp = stats.genextreme.cdf(x, k)
assert_allclose(vals, vals_comp)
def test_cdf_expon(self):
# h = 1 and k = 0 is exponential
x = np.linspace(0, 10, 10)
h = 1.0
k = 0.0
vals = stats.kappa4.cdf(x, h, k)
vals_comp = stats.expon.cdf(x)
assert_allclose(vals, vals_comp)
def test_cdf_gumbel_r(self):
# h = 0 and k = 0 is gumbel_r
x = np.linspace(-5, 5, 10)
h = 0.0
k = 0.0
vals = stats.kappa4.cdf(x, h, k)
vals_comp = stats.gumbel_r.cdf(x)
assert_allclose(vals, vals_comp)
def test_cdf_logistic(self):
# h = -1 and k = 0 is logistic
x = np.linspace(-5, 5, 10)
h = -1.0
k = 0.0
vals = stats.kappa4.cdf(x, h, k)
vals_comp = stats.logistic.cdf(x)
assert_allclose(vals, vals_comp)
def test_cdf_uniform(self):
# h = 1 and k = 1 is uniform
x = np.linspace(-5, 5, 10)
h = 1.0
k = 1.0
vals = stats.kappa4.cdf(x, h, k)
vals_comp = stats.uniform.cdf(x)
assert_allclose(vals, vals_comp)
def test_integers_ctor(self):
# regression test for gh-7416: _argcheck fails for integer h and k
# in numpy 1.12
stats.kappa4(1, 2)
class TestPoisson(object):
def setup_method(self):
np.random.seed(1234)
def test_pmf_basic(self):
# Basic case
ln2 = np.log(2)
vals = stats.poisson.pmf([0, 1, 2], ln2)
expected = [0.5, ln2/2, ln2**2/4]
assert_allclose(vals, expected)
def test_mu0(self):
# Edge case: mu=0
vals = stats.poisson.pmf([0, 1, 2], 0)
expected = [1, 0, 0]
assert_array_equal(vals, expected)
interval = stats.poisson.interval(0.95, 0)
assert_equal(interval, (0, 0))
def test_rvs(self):
vals = stats.poisson.rvs(0.5, size=(2, 50))
assert_(numpy.all(vals >= 0))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.poisson.rvs(0.5)
assert_(isinstance(val, int))
val = stats.poisson(0.5).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllInteger'])
def test_stats(self):
mu = 16.0
result = stats.poisson.stats(mu, moments='mvsk')
assert_allclose(result, [mu, mu, np.sqrt(1.0/mu), 1.0/mu])
mu = np.array([0.0, 1.0, 2.0])
result = stats.poisson.stats(mu, moments='mvsk')
expected = (mu, mu, [np.inf, 1, 1/np.sqrt(2)], [np.inf, 1, 0.5])
assert_allclose(result, expected)
class TestKSTwo(object):
def setup_method(self):
np.random.seed(1234)
def test_cdf(self):
for n in [1, 2, 3, 10, 100, 1000]:
# Test x-values:
# 0, 1/2n, where the cdf should be 0
# 1/n, where the cdf should be n!/n^n
# 0.5, where the cdf should match ksone.cdf
# 1-1/n, where cdf = 1-2/n^n
# 1, where cdf == 1
# (E.g. Exact values given by Eqn 1 in Simard / L'Ecuyer)
x = np.array([0, 0.5/n, 1/n, 0.5, 1-1.0/n, 1])
v1 = (1.0/n)**n
lg = scipy.special.gammaln(n+1)
elg = (np.exp(lg) if v1 != 0 else 0)
expected = np.array([0, 0, v1 * elg,
1 - 2*stats.ksone.sf(0.5, n),
max(1 - 2*v1, 0.0),
1.0])
vals_cdf = stats.kstwo.cdf(x, n)
assert_allclose(vals_cdf, expected)
def test_sf(self):
x = np.linspace(0, 1, 11)
for n in [1, 2, 3, 10, 100, 1000]:
# Same x values as in test_cdf, and use sf = 1 - cdf
x = np.array([0, 0.5/n, 1/n, 0.5, 1-1.0/n, 1])
v1 = (1.0/n)**n
lg = scipy.special.gammaln(n+1)
elg = (np.exp(lg) if v1 != 0 else 0)
expected = np.array([1.0, 1.0,
1 - v1 * elg,
2*stats.ksone.sf(0.5, n),
min(2*v1, 1.0), 0])
vals_sf = stats.kstwo.sf(x, n)
assert_allclose(vals_sf, expected)
def test_cdf_sqrtn(self):
# For fixed a, cdf(a/sqrt(n), n) -> kstwobign(a) as n->infinity
# cdf(a/sqrt(n), n) is an increasing function of n (and a)
# Check that the function is indeed increasing (allowing for some
# small floating point and algorithm differences.)
x = np.linspace(0, 2, 11)[1:]
ns = [50, 100, 200, 400, 1000, 2000]
for _x in x:
xn = _x / np.sqrt(ns)
probs = stats.kstwo.cdf(xn, ns)
diffs = np.diff(probs)
assert_array_less(diffs, 1e-8)
def test_cdf_sf(self):
x = np.linspace(0, 1, 11)
for n in [1, 2, 3, 10, 100, 1000]:
vals_cdf = stats.kstwo.cdf(x, n)
vals_sf = stats.kstwo.sf(x, n)
assert_array_almost_equal(vals_cdf, 1 - vals_sf)
def test_cdf_sf_sqrtn(self):
x = np.linspace(0, 1, 11)
for n in [1, 2, 3, 10, 100, 1000]:
xn = x / np.sqrt(n)
vals_cdf = stats.kstwo.cdf(xn, n)
vals_sf = stats.kstwo.sf(xn, n)
assert_array_almost_equal(vals_cdf, 1 - vals_sf)
def test_ppf_of_cdf(self):
x = np.linspace(0, 1, 11)
for n in [1, 2, 3, 10, 100, 1000]:
xn = x[x > 0.5/n]
vals_cdf = stats.kstwo.cdf(xn, n)
# CDFs close to 1 are better dealt with using the SF
cond = (0 < vals_cdf) & (vals_cdf < 0.99)
vals = stats.kstwo.ppf(vals_cdf, n)
assert_allclose(vals[cond], xn[cond], rtol=1e-4)
def test_isf_of_sf(self):
x = np.linspace(0, 1, 11)
for n in [1, 2, 3, 10, 100, 1000]:
xn = x[x > 0.5/n]
vals_isf = stats.kstwo.isf(xn, n)
cond = (0 < vals_isf) & (vals_isf < 1.0)
vals = stats.kstwo.sf(vals_isf, n)
assert_allclose(vals[cond], xn[cond], rtol=1e-4)
def test_ppf_of_cdf_sqrtn(self):
x = np.linspace(0, 1, 11)
for n in [1, 2, 3, 10, 100, 1000]:
xn = (x / np.sqrt(n))[x > 0.5/n]
vals_cdf = stats.kstwo.cdf(xn, n)
cond = (0 < vals_cdf) & (vals_cdf < 1.0)
vals = stats.kstwo.ppf(vals_cdf, n)
assert_allclose(vals[cond], xn[cond])
def test_isf_of_sf_sqrtn(self):
x = np.linspace(0, 1, 11)
for n in [1, 2, 3, 10, 100, 1000]:
xn = (x / np.sqrt(n))[x > 0.5/n]
vals_sf = stats.kstwo.sf(xn, n)
# SFs close to 1 are better dealt with using the CDF
cond = (0 < vals_sf) & (vals_sf < 0.95)
vals = stats.kstwo.isf(vals_sf, n)
assert_allclose(vals[cond], xn[cond])
def test_ppf(self):
probs = np.linspace(0, 1, 11)[1:]
for n in [1, 2, 3, 10, 100, 1000]:
xn = stats.kstwo.ppf(probs, n)
vals_cdf = stats.kstwo.cdf(xn, n)
assert_allclose(vals_cdf, probs)
def test_simard_lecuyer_table1(self):
# Compute the cdf for values near the mean of the distribution.
# The mean u ~ log(2)*sqrt(pi/(2n))
# Compute for x in [u/4, u/3, u/2, u, 2u, 3u]
# This is the computation of Table 1 of Simard, R., L'Ecuyer, P. (2011)
# "Computing the Two-Sided Kolmogorov-Smirnov Distribution".
# Except that the values below are not from the published table, but
# were generated using an independent SageMath implementation of
# Durbin's algorithm (with the exponentiation and scaling of
# Marsaglia/Tsang/Wang's version) using 500 bit arithmetic.
# Some of the values in the published table have relative
# errors greater than 1e-4.
ns = [10, 50, 100, 200, 500, 1000]
ratios = np.array([1.0/4, 1.0/3, 1.0/2, 1, 2, 3])
expected = np.array([
[1.92155292e-08, 5.72933228e-05, 2.15233226e-02, 6.31566589e-01, 9.97685592e-01, 9.99999942e-01],
[2.28096224e-09, 1.99142563e-05, 1.42617934e-02, 5.95345542e-01, 9.96177701e-01, 9.99998662e-01],
[1.00201886e-09, 1.32673079e-05, 1.24608594e-02, 5.86163220e-01, 9.95866877e-01, 9.99998240e-01],
[4.93313022e-10, 9.52658029e-06, 1.12123138e-02, 5.79486872e-01, 9.95661824e-01, 9.99997964e-01],
[2.37049293e-10, 6.85002458e-06, 1.01309221e-02, 5.73427224e-01, 9.95491207e-01, 9.99997750e-01],
[1.56990874e-10, 5.71738276e-06, 9.59725430e-03, 5.70322692e-01, 9.95409545e-01, 9.99997657e-01]
])
for idx, n in enumerate(ns):
x = ratios * np.log(2) * np.sqrt(np.pi/2/n)
vals_cdf = stats.kstwo.cdf(x, n)
assert_allclose(vals_cdf, expected[idx], rtol=1e-5)
class TestZipf(object):
def setup_method(self):
np.random.seed(1234)
def test_rvs(self):
vals = stats.zipf.rvs(1.5, size=(2, 50))
assert_(numpy.all(vals >= 1))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.zipf.rvs(1.5)
assert_(isinstance(val, int))
val = stats.zipf(1.5).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllInteger'])
def test_moments(self):
# n-th moment is finite iff a > n + 1
m, v = stats.zipf.stats(a=2.8)
assert_(np.isfinite(m))
assert_equal(v, np.inf)
s, k = stats.zipf.stats(a=4.8, moments='sk')
assert_(not np.isfinite([s, k]).all())
class TestDLaplace(object):
def setup_method(self):
np.random.seed(1234)
def test_rvs(self):
vals = stats.dlaplace.rvs(1.5, size=(2, 50))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.dlaplace.rvs(1.5)
assert_(isinstance(val, int))
val = stats.dlaplace(1.5).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllInteger'])
assert_(stats.dlaplace.rvs(0.8) is not None)
def test_stats(self):
# compare the explicit formulas w/ direct summation using pmf
a = 1.
dl = stats.dlaplace(a)
m, v, s, k = dl.stats('mvsk')
N = 37
xx = np.arange(-N, N+1)
pp = dl.pmf(xx)
m2, m4 = np.sum(pp*xx**2), np.sum(pp*xx**4)
assert_equal((m, s), (0, 0))
assert_allclose((v, k), (m2, m4/m2**2 - 3.), atol=1e-14, rtol=1e-8)
def test_stats2(self):
a = np.log(2.)
dl = stats.dlaplace(a)
m, v, s, k = dl.stats('mvsk')
assert_equal((m, s), (0., 0.))
assert_allclose((v, k), (4., 3.25))
class TestInvGamma(object):
def test_invgamma_inf_gh_1866(self):
# invgamma's moments are only finite for a>n
# specific numbers checked w/ boost 1.54
with warnings.catch_warnings():
warnings.simplefilter('error', RuntimeWarning)
mvsk = stats.invgamma.stats(a=19.31, moments='mvsk')
expected = [0.05461496450, 0.0001723162534, 1.020362676,
2.055616582]
assert_allclose(mvsk, expected)
a = [1.1, 3.1, 5.6]
mvsk = stats.invgamma.stats(a=a, moments='mvsk')
expected = ([10., 0.476190476, 0.2173913043], # mmm
[np.inf, 0.2061430632, 0.01312749422], # vvv
[np.nan, 41.95235392, 2.919025532], # sss
[np.nan, np.nan, 24.51923076]) # kkk
for x, y in zip(mvsk, expected):
assert_almost_equal(x, y)
def test_cdf_ppf(self):
# gh-6245
x = np.logspace(-2.6, 0)
y = stats.invgamma.cdf(x, 1)
xx = stats.invgamma.ppf(y, 1)
assert_allclose(x, xx)
def test_sf_isf(self):
# gh-6245
if sys.maxsize > 2**32:
x = np.logspace(2, 100)
else:
# Invgamme roundtrip on 32-bit systems has relative accuracy
# ~1e-15 until x=1e+15, and becomes inf above x=1e+18
x = np.logspace(2, 18)
y = stats.invgamma.sf(x, 1)
xx = stats.invgamma.isf(y, 1)
assert_allclose(x, xx, rtol=1.0)
class TestF(object):
def test_endpoints(self):
# Compute the pdf at the left endpoint dst.a.
data = [[stats.f, (2, 1), 1.0]]
for _f, _args, _correct in data:
ans = _f.pdf(_f.a, *_args)
print(_f, (_args), ans, _correct, ans == _correct)
ans = [_f.pdf(_f.a, *_args) for _f, _args, _ in data]
correct = [_correct_ for _f, _args, _correct_ in data]
assert_array_almost_equal(ans, correct)
def test_f_moments(self):
# n-th moment of F distributions is only finite for n < dfd / 2
m, v, s, k = stats.f.stats(11, 6.5, moments='mvsk')
assert_(np.isfinite(m))
assert_(np.isfinite(v))
assert_(np.isfinite(s))
assert_(not np.isfinite(k))
def test_moments_warnings(self):
# no warnings should be generated for dfd = 2, 4, 6, 8 (div by zero)
with warnings.catch_warnings():
warnings.simplefilter('error', RuntimeWarning)
stats.f.stats(dfn=[11]*4, dfd=[2, 4, 6, 8], moments='mvsk')
def test_stats_broadcast(self):
dfn = np.array([[3], [11]])
dfd = np.array([11, 12])
m, v, s, k = stats.f.stats(dfn=dfn, dfd=dfd, moments='mvsk')
m2 = [dfd / (dfd - 2)]*2
assert_allclose(m, m2)
v2 = 2 * dfd**2 * (dfn + dfd - 2) / dfn / (dfd - 2)**2 / (dfd - 4)
assert_allclose(v, v2)
s2 = ((2*dfn + dfd - 2) * np.sqrt(8*(dfd - 4)) /
((dfd - 6) * np.sqrt(dfn*(dfn + dfd - 2))))
assert_allclose(s, s2)
k2num = 12 * (dfn * (5*dfd - 22) * (dfn + dfd - 2) +
(dfd - 4) * (dfd - 2)**2)
k2den = dfn * (dfd - 6) * (dfd - 8) * (dfn + dfd - 2)
k2 = k2num / k2den
assert_allclose(k, k2)
def test_rvgeneric_std():
# Regression test for #1191
assert_array_almost_equal(stats.t.std([5, 6]), [1.29099445, 1.22474487])
def test_moments_t():
# regression test for #8786
assert_equal(stats.t.stats(df=1, moments='mvsk'),
(np.inf, np.nan, np.nan, np.nan))
assert_equal(stats.t.stats(df=1.01, moments='mvsk'),
(0.0, np.inf, np.nan, np.nan))
assert_equal(stats.t.stats(df=2, moments='mvsk'),
(0.0, np.inf, np.nan, np.nan))
assert_equal(stats.t.stats(df=2.01, moments='mvsk'),
(0.0, 2.01/(2.01-2.0), np.nan, np.inf))
assert_equal(stats.t.stats(df=3, moments='sk'), (np.nan, np.inf))
assert_equal(stats.t.stats(df=3.01, moments='sk'), (0.0, np.inf))
assert_equal(stats.t.stats(df=4, moments='sk'), (0.0, np.inf))
assert_equal(stats.t.stats(df=4.01, moments='sk'), (0.0, 6.0/(4.01 - 4.0)))
class TestRvDiscrete(object):
def setup_method(self):
np.random.seed(1234)
def test_rvs(self):
states = [-1, 0, 1, 2, 3, 4]
probability = [0.0, 0.3, 0.4, 0.0, 0.3, 0.0]
samples = 1000
r = stats.rv_discrete(name='sample', values=(states, probability))
x = r.rvs(size=samples)
assert_(isinstance(x, numpy.ndarray))
for s, p in zip(states, probability):
assert_(abs(sum(x == s)/float(samples) - p) < 0.05)
x = r.rvs()
assert_(isinstance(x, int))
def test_entropy(self):
# Basic tests of entropy.
pvals = np.array([0.25, 0.45, 0.3])
p = stats.rv_discrete(values=([0, 1, 2], pvals))
expected_h = -sum(xlogy(pvals, pvals))
h = p.entropy()
assert_allclose(h, expected_h)
p = stats.rv_discrete(values=([0, 1, 2], [1.0, 0, 0]))
h = p.entropy()
assert_equal(h, 0.0)
def test_pmf(self):
xk = [1, 2, 4]
pk = [0.5, 0.3, 0.2]
rv = stats.rv_discrete(values=(xk, pk))
x = [[1., 4.],
[3., 2]]
assert_allclose(rv.pmf(x),
[[0.5, 0.2],
[0., 0.3]], atol=1e-14)
def test_cdf(self):
xk = [1, 2, 4]
pk = [0.5, 0.3, 0.2]
rv = stats.rv_discrete(values=(xk, pk))
x_values = [-2, 1., 1.1, 1.5, 2.0, 3.0, 4, 5]
expected = [0, 0.5, 0.5, 0.5, 0.8, 0.8, 1, 1]
assert_allclose(rv.cdf(x_values), expected, atol=1e-14)
# also check scalar arguments
assert_allclose([rv.cdf(xx) for xx in x_values],
expected, atol=1e-14)
def test_ppf(self):
xk = [1, 2, 4]
pk = [0.5, 0.3, 0.2]
rv = stats.rv_discrete(values=(xk, pk))
q_values = [0.1, 0.5, 0.6, 0.8, 0.9, 1.]
expected = [1, 1, 2, 2, 4, 4]
assert_allclose(rv.ppf(q_values), expected, atol=1e-14)
# also check scalar arguments
assert_allclose([rv.ppf(q) for q in q_values],
expected, atol=1e-14)
def test_cdf_ppf_next(self):
# copied and special cased from test_discrete_basic
vals = ([1, 2, 4, 7, 8], [0.1, 0.2, 0.3, 0.3, 0.1])
rv = stats.rv_discrete(values=vals)
assert_array_equal(rv.ppf(rv.cdf(rv.xk[:-1]) + 1e-8),
rv.xk[1:])
def test_expect(self):
xk = [1, 2, 4, 6, 7, 11]
pk = [0.1, 0.2, 0.2, 0.2, 0.2, 0.1]
rv = stats.rv_discrete(values=(xk, pk))
assert_allclose(rv.expect(), np.sum(rv.xk * rv.pk), atol=1e-14)
def test_multidimension(self):
xk = np.arange(12).reshape((3, 4))
pk = np.array([[0.1, 0.1, 0.15, 0.05],
[0.1, 0.1, 0.05, 0.05],
[0.1, 0.1, 0.05, 0.05]])
rv = stats.rv_discrete(values=(xk, pk))
assert_allclose(rv.expect(), np.sum(rv.xk * rv.pk), atol=1e-14)
def test_bad_input(self):
xk = [1, 2, 3]
pk = [0.5, 0.5]
assert_raises(ValueError, stats.rv_discrete, **dict(values=(xk, pk)))
pk = [1, 2, 3]
assert_raises(ValueError, stats.rv_discrete, **dict(values=(xk, pk)))
xk = [1, 2, 3]
pk = [0.5, 1.2, -0.7]
assert_raises(ValueError, stats.rv_discrete, **dict(values=(xk, pk)))
xk = [1, 2, 3, 4, 5]
pk = [0.3, 0.3, 0.3, 0.3, -0.2]
assert_raises(ValueError, stats.rv_discrete, **dict(values=(xk, pk)))
def test_shape_rv_sample(self):
# tests added for gh-9565
# mismatch of 2d inputs
xk, pk = np.arange(4).reshape((2, 2)), np.full((2, 3), 1/6)
assert_raises(ValueError, stats.rv_discrete, **dict(values=(xk, pk)))
# same number of elements, but shapes not compatible
xk, pk = np.arange(6).reshape((3, 2)), np.full((2, 3), 1/6)
assert_raises(ValueError, stats.rv_discrete, **dict(values=(xk, pk)))
# same shapes => no error
xk, pk = np.arange(6).reshape((3, 2)), np.full((3, 2), 1/6)
assert_equal(stats.rv_discrete(values=(xk, pk)).pmf(0), 1/6)
class TestSkewNorm(object):
def setup_method(self):
self.rng = check_random_state(1234)
def test_normal(self):
# When the skewness is 0 the distribution is normal
x = np.linspace(-5, 5, 100)
assert_array_almost_equal(stats.skewnorm.pdf(x, a=0),
stats.norm.pdf(x))
def test_rvs(self):
shape = (3, 4, 5)
x = stats.skewnorm.rvs(a=0.75, size=shape, random_state=self.rng)
assert_equal(shape, x.shape)
x = stats.skewnorm.rvs(a=-3, size=shape, random_state=self.rng)
assert_equal(shape, x.shape)
def test_moments(self):
X = stats.skewnorm.rvs(a=4, size=int(1e6), loc=5, scale=2,
random_state=self.rng)
expected = [np.mean(X), np.var(X), stats.skew(X), stats.kurtosis(X)]
computed = stats.skewnorm.stats(a=4, loc=5, scale=2, moments='mvsk')
assert_array_almost_equal(computed, expected, decimal=2)
X = stats.skewnorm.rvs(a=-4, size=int(1e6), loc=5, scale=2,
random_state=self.rng)
expected = [np.mean(X), np.var(X), stats.skew(X), stats.kurtosis(X)]
computed = stats.skewnorm.stats(a=-4, loc=5, scale=2, moments='mvsk')
assert_array_almost_equal(computed, expected, decimal=2)
def test_cdf_large_x(self):
# Regression test for gh-7746.
# The x values are large enough that the closest 64 bit floating
# point representation of the exact CDF is 1.0.
p = stats.skewnorm.cdf([10, 20, 30], -1)
assert_allclose(p, np.ones(3), rtol=1e-14)
p = stats.skewnorm.cdf(25, 2.5)
assert_allclose(p, 1.0, rtol=1e-14)
def test_cdf_sf_small_values(self):
# Triples are [x, a, cdf(x, a)]. These values were computed
# using CDF[SkewNormDistribution[0, 1, a], x] in Wolfram Alpha.
cdfvals = [
[-8, 1, 3.870035046664392611e-31],
[-4, 2, 8.1298399188811398e-21],
[-2, 5, 1.55326826787106273e-26],
[-9, -1, 2.257176811907681295e-19],
[-10, -4, 1.523970604832105213e-23],
]
for x, a, cdfval in cdfvals:
p = stats.skewnorm.cdf(x, a)
assert_allclose(p, cdfval, rtol=1e-8)
# For the skew normal distribution, sf(-x, -a) = cdf(x, a).
p = stats.skewnorm.sf(-x, -a)
assert_allclose(p, cdfval, rtol=1e-8)
class TestExpon(object):
def test_zero(self):
assert_equal(stats.expon.pdf(0), 1)
def test_tail(self): # Regression test for ticket 807
assert_equal(stats.expon.cdf(1e-18), 1e-18)
assert_equal(stats.expon.isf(stats.expon.sf(40)), 40)
def test_nan_raises_error(self):
# see gh-issue 10300
x = np.array([1.6483, 2.7169, 2.4667, 1.1791, 3.5433, np.nan])
assert_raises(RuntimeError, stats.expon.fit, x)
def test_inf_raises_error(self):
# see gh-issue 10300
x = np.array([1.6483, 2.7169, 2.4667, 1.1791, 3.5433, np.inf])
assert_raises(RuntimeError, stats.expon.fit, x)
class TestNorm(object):
def test_nan_raises_error(self):
# see gh-issue 10300
x = np.array([1.6483, 2.7169, 2.4667, 1.1791, 3.5433, np.nan])
assert_raises(RuntimeError, stats.norm.fit, x)
def test_inf_raises_error(self):
# see gh-issue 10300
x = np.array([1.6483, 2.7169, 2.4667, 1.1791, 3.5433, np.inf])
assert_raises(RuntimeError, stats.norm.fit, x)
def test_bad_keyword_arg(self):
x = [1, 2, 3]
assert_raises(TypeError, stats.norm.fit, x, plate="shrimp")
class TestUniform(object):
"""gh-10300"""
def test_nan_raises_error(self):
# see gh-issue 10300
x = np.array([1.6483, 2.7169, 2.4667, 1.1791, 3.5433, np.nan])
assert_raises(RuntimeError, stats.uniform.fit, x)
def test_inf_raises_error(self):
# see gh-issue 10300
x = np.array([1.6483, 2.7169, 2.4667, 1.1791, 3.5433, np.inf])
assert_raises(RuntimeError, stats.uniform.fit, x)
class TestExponNorm(object):
def test_moments(self):
# Some moment test cases based on non-loc/scaled formula
def get_moms(lam, sig, mu):
# See wikipedia for these formulae
# where it is listed as an exponentially modified gaussian
opK2 = 1.0 + 1 / (lam*sig)**2
exp_skew = 2 / (lam * sig)**3 * opK2**(-1.5)
exp_kurt = 6.0 * (1 + (lam * sig)**2)**(-2)
return [mu + 1/lam, sig*sig + 1.0/(lam*lam), exp_skew, exp_kurt]
mu, sig, lam = 0, 1, 1
K = 1.0 / (lam * sig)
sts = stats.exponnorm.stats(K, loc=mu, scale=sig, moments='mvsk')
assert_almost_equal(sts, get_moms(lam, sig, mu))
mu, sig, lam = -3, 2, 0.1
K = 1.0 / (lam * sig)
sts = stats.exponnorm.stats(K, loc=mu, scale=sig, moments='mvsk')
assert_almost_equal(sts, get_moms(lam, sig, mu))
mu, sig, lam = 0, 3, 1
K = 1.0 / (lam * sig)
sts = stats.exponnorm.stats(K, loc=mu, scale=sig, moments='mvsk')
assert_almost_equal(sts, get_moms(lam, sig, mu))
mu, sig, lam = -5, 11, 3.5
K = 1.0 / (lam * sig)
sts = stats.exponnorm.stats(K, loc=mu, scale=sig, moments='mvsk')
assert_almost_equal(sts, get_moms(lam, sig, mu))
def test_nan_raises_error(self):
# see gh-issue 10300
x = np.array([1.6483, 2.7169, 2.4667, 1.1791, 3.5433, np.nan])
assert_raises(RuntimeError, stats.exponnorm.fit, x, floc=0, fscale=1)
def test_inf_raises_error(self):
# see gh-issue 10300
x = np.array([1.6483, 2.7169, 2.4667, 1.1791, 3.5433, np.inf])
assert_raises(RuntimeError, stats.exponnorm.fit, x, floc=0, fscale=1)
def test_extremes_x(self):
# Test for extreme values against overflows
assert_almost_equal(stats.exponnorm.pdf(-900, 1), 0.0)
assert_almost_equal(stats.exponnorm.pdf(+900, 1), 0.0)
assert_almost_equal(stats.exponnorm.pdf(1, 0.01), 0.0)
assert_almost_equal(stats.exponnorm.pdf(-900, 0.01), 0.0)
assert_almost_equal(stats.exponnorm.pdf(+900, 0.01), 0.0)
class TestGenExpon(object):
def test_pdf_unity_area(self):
from scipy.integrate import simps
# PDF should integrate to one
p = stats.genexpon.pdf(numpy.arange(0, 10, 0.01), 0.5, 0.5, 2.0)
assert_almost_equal(simps(p, dx=0.01), 1, 1)
def test_cdf_bounds(self):
# CDF should always be positive
cdf = stats.genexpon.cdf(numpy.arange(0, 10, 0.01), 0.5, 0.5, 2.0)
assert_(numpy.all((0 <= cdf) & (cdf <= 1)))
class TestExponpow(object):
def test_tail(self):
assert_almost_equal(stats.exponpow.cdf(1e-10, 2.), 1e-20)
assert_almost_equal(stats.exponpow.isf(stats.exponpow.sf(5, .8), .8),
5)
class TestSkellam(object):
def test_pmf(self):
# comparison to R
k = numpy.arange(-10, 15)
mu1, mu2 = 10, 5
skpmfR = numpy.array(
[4.2254582961926893e-005, 1.1404838449648488e-004,
2.8979625801752660e-004, 6.9177078182101231e-004,
1.5480716105844708e-003, 3.2412274963433889e-003,
6.3373707175123292e-003, 1.1552351566696643e-002,
1.9606152375042644e-002, 3.0947164083410337e-002,
4.5401737566767360e-002, 6.1894328166820688e-002,
7.8424609500170578e-002, 9.2418812533573133e-002,
1.0139793148019728e-001, 1.0371927988298846e-001,
9.9076583077406091e-002, 8.8546660073089561e-002,
7.4187842052486810e-002, 5.8392772862200251e-002,
4.3268692953013159e-002, 3.0248159818374226e-002,
1.9991434305603021e-002, 1.2516877303301180e-002,
7.4389876226229707e-003])
assert_almost_equal(stats.skellam.pmf(k, mu1, mu2), skpmfR, decimal=15)
def test_cdf(self):
# comparison to R, only 5 decimals
k = numpy.arange(-10, 15)
mu1, mu2 = 10, 5
skcdfR = numpy.array(
[6.4061475386192104e-005, 1.7810985988267694e-004,
4.6790611790020336e-004, 1.1596768997212152e-003,
2.7077485103056847e-003, 5.9489760066490718e-003,
1.2286346724161398e-002, 2.3838698290858034e-002,
4.3444850665900668e-002, 7.4392014749310995e-002,
1.1979375231607835e-001, 1.8168808048289900e-001,
2.6011268998306952e-001, 3.5253150251664261e-001,
4.5392943399683988e-001, 5.5764871387982828e-001,
6.5672529695723436e-001, 7.4527195703032389e-001,
8.1945979908281064e-001, 8.7785257194501087e-001,
9.2112126489802404e-001, 9.5136942471639818e-001,
9.7136085902200120e-001, 9.8387773632530240e-001,
9.9131672394792536e-001])
assert_almost_equal(stats.skellam.cdf(k, mu1, mu2), skcdfR, decimal=5)
class TestLognorm(object):
def test_pdf(self):
# Regression test for Ticket #1471: avoid nan with 0/0 situation
# Also make sure there are no warnings at x=0, cf gh-5202
with warnings.catch_warnings():
warnings.simplefilter('error', RuntimeWarning)
pdf = stats.lognorm.pdf([0, 0.5, 1], 1)
assert_array_almost_equal(pdf, [0.0, 0.62749608, 0.39894228])
def test_logcdf(self):
# Regression test for gh-5940: sf et al would underflow too early
x2, mu, sigma = 201.68, 195, 0.149
assert_allclose(stats.lognorm.sf(x2-mu, s=sigma),
stats.norm.sf(np.log(x2-mu)/sigma))
assert_allclose(stats.lognorm.logsf(x2-mu, s=sigma),
stats.norm.logsf(np.log(x2-mu)/sigma))
class TestBeta(object):
def test_logpdf(self):
# Regression test for Ticket #1326: avoid nan with 0*log(0) situation
logpdf = stats.beta.logpdf(0, 1, 0.5)
assert_almost_equal(logpdf, -0.69314718056)
logpdf = stats.beta.logpdf(0, 0.5, 1)
assert_almost_equal(logpdf, np.inf)
def test_logpdf_ticket_1866(self):
alpha, beta = 267, 1472
x = np.array([0.2, 0.5, 0.6])
b = stats.beta(alpha, beta)
assert_allclose(b.logpdf(x).sum(), -1201.699061824062)
assert_allclose(b.pdf(x), np.exp(b.logpdf(x)))
def test_fit_bad_keyword_args(self):
x = [0.1, 0.5, 0.6]
assert_raises(TypeError, stats.beta.fit, x, floc=0, fscale=1,
plate="shrimp")
def test_fit_duplicated_fixed_parameter(self):
# At most one of 'f0', 'fa' or 'fix_a' can be given to the fit method.
# More than one raises a ValueError.
x = [0.1, 0.5, 0.6]
assert_raises(ValueError, stats.beta.fit, x, fa=0.5, fix_a=0.5)
class TestBetaPrime(object):
def test_logpdf(self):
alpha, beta = 267, 1472
x = np.array([0.2, 0.5, 0.6])
b = stats.betaprime(alpha, beta)
assert_(np.isfinite(b.logpdf(x)).all())
assert_allclose(b.pdf(x), np.exp(b.logpdf(x)))
def test_cdf(self):
# regression test for gh-4030: Implementation of
# scipy.stats.betaprime.cdf()
x = stats.betaprime.cdf(0, 0.2, 0.3)
assert_equal(x, 0.0)
alpha, beta = 267, 1472
x = np.array([0.2, 0.5, 0.6])
cdfs = stats.betaprime.cdf(x, alpha, beta)
assert_(np.isfinite(cdfs).all())
# check the new cdf implementation vs generic one:
gen_cdf = stats.rv_continuous._cdf_single
cdfs_g = [gen_cdf(stats.betaprime, val, alpha, beta) for val in x]
assert_allclose(cdfs, cdfs_g, atol=0, rtol=2e-12)
class TestGamma(object):
def test_pdf(self):
# a few test cases to compare with R
pdf = stats.gamma.pdf(90, 394, scale=1./5)
assert_almost_equal(pdf, 0.002312341)
pdf = stats.gamma.pdf(3, 10, scale=1./5)
assert_almost_equal(pdf, 0.1620358)
def test_logpdf(self):
# Regression test for Ticket #1326: cornercase avoid nan with 0*log(0)
# situation
logpdf = stats.gamma.logpdf(0, 1)
assert_almost_equal(logpdf, 0)
def test_fit_bad_keyword_args(self):
x = [0.1, 0.5, 0.6]
assert_raises(TypeError, stats.gamma.fit, x, floc=0, plate="shrimp")
class TestChi2(object):
# regression tests after precision improvements, ticket:1041, not verified
def test_precision(self):
assert_almost_equal(stats.chi2.pdf(1000, 1000), 8.919133934753128e-003,
decimal=14)
assert_almost_equal(stats.chi2.pdf(100, 100), 0.028162503162596778,
decimal=14)
def test_ppf(self):
# Expected values computed with mpmath.
df = 4.8
x = stats.chi2.ppf(2e-47, df)
assert_allclose(x, 1.098472479575179840604902808e-19, rtol=1e-10)
x = stats.chi2.ppf(0.5, df)
assert_allclose(x, 4.15231407598589358660093156, rtol=1e-10)
df = 13
x = stats.chi2.ppf(2e-77, df)
assert_allclose(x, 1.0106330688195199050507943e-11, rtol=1e-10)
x = stats.chi2.ppf(0.1, df)
assert_allclose(x, 7.041504580095461859307179763, rtol=1e-10)
class TestGumbelL(object):
# gh-6228
def test_cdf_ppf(self):
x = np.linspace(-100, -4)
y = stats.gumbel_l.cdf(x)
xx = stats.gumbel_l.ppf(y)
assert_allclose(x, xx)
def test_logcdf_logsf(self):
x = np.linspace(-100, -4)
y = stats.gumbel_l.logcdf(x)
z = stats.gumbel_l.logsf(x)
u = np.exp(y)
v = -special.expm1(z)
assert_allclose(u, v)
def test_sf_isf(self):
x = np.linspace(-20, 5)
y = stats.gumbel_l.sf(x)
xx = stats.gumbel_l.isf(y)
assert_allclose(x, xx)
class TestLevyStable(object):
def test_fit(self):
# construct data to have percentiles that match
# example in McCulloch 1986.
x = [-.05413,-.05413,
0.,0.,0.,0.,
.00533,.00533,.00533,.00533,.00533,
.03354,.03354,.03354,.03354,.03354,
.05309,.05309,.05309,.05309,.05309]
alpha1, beta1, loc1, scale1 = stats.levy_stable._fitstart(x)
assert_allclose(alpha1, 1.48, rtol=0, atol=0.01)
assert_almost_equal(beta1, -.22, 2)
assert_almost_equal(scale1, 0.01717, 4)
assert_almost_equal(loc1, 0.00233, 2) # to 2 dps due to rounding error in McCulloch86
# cover alpha=2 scenario
x2 = x + [.05309,.05309,.05309,.05309,.05309]
alpha2, beta2, loc2, scale2 = stats.levy_stable._fitstart(x2)
assert_equal(alpha2, 2)
assert_equal(beta2, -1)
assert_almost_equal(scale2, .02503, 4)
assert_almost_equal(loc2, .03354, 4)
@pytest.mark.slow
def test_pdf_nolan_samples(self):
""" Test pdf values against Nolan's stablec.exe output
see - http://fs2.american.edu/jpnolan/www/stable/stable.html
There's a known limitation of Nolan's executable for alpha < 0.2.
Repeat following with beta = -1, -.5, 0, .5 and 1
stablec.exe <<
1 # pdf
1 # Nolan S equivalent to S0 in scipy
.25,2,.25 # alpha
-1,-1,0 # beta
-10,10,1 # x
1,0 # gamma, delta
2 # output file
"""
data = np.load(os.path.abspath(os.path.join(os.path.dirname(__file__),
'data/stable-pdf-sample-data.npy')))
data = np.core.records.fromarrays(data.T, names='x,p,alpha,beta')
# support numpy 1.8.2 for travis
npisin = np.isin if hasattr(np, "isin") else np.in1d
tests = [
# best selects
['best', None, 8, None],
# quadrature is accurate for most alpha except 0.25; perhaps limitation of Nolan stablec?
# we reduce size of x to speed up computation as numerical integration slow.
['quadrature', None, 8, lambda r: (r['alpha'] > 0.25) & (npisin(r['x'], [-10,-5,0,5,10]))],
# zolatarev is accurate except at alpha==1, beta != 0
['zolotarev', None, 8, lambda r: r['alpha'] != 1],
['zolotarev', None, 8, lambda r: (r['alpha'] == 1) & (r['beta'] == 0)],
['zolotarev', None, 1, lambda r: (r['alpha'] == 1) & (r['beta'] != 0)],
# fft accuracy reduces as alpha decreases, fails at low values of alpha and x=0
['fft', 0, 4, lambda r: r['alpha'] > 1],
['fft', 0, 3, lambda r: (r['alpha'] < 1) & (r['alpha'] > 0.25)],
['fft', 0, 1, lambda r: (r['alpha'] == 0.25) & (r['x'] != 0)], # not useful here
]
for ix, (default_method, fft_min_points, decimal_places, filter_func) in enumerate(tests):
stats.levy_stable.pdf_default_method = default_method
stats.levy_stable.pdf_fft_min_points_threshold = fft_min_points
subdata = data[filter_func(data)] if filter_func is not None else data
with suppress_warnings() as sup:
sup.record(RuntimeWarning, "Density calculation unstable for alpha=1 and beta!=0.*")
sup.record(RuntimeWarning, "Density calculations experimental for FFT method.*")
p = stats.levy_stable.pdf(subdata['x'], subdata['alpha'], subdata['beta'], scale=1, loc=0)
subdata2 = rec_append_fields(subdata, 'calc', p)
failures = subdata2[(np.abs(p-subdata['p']) >= 1.5*10.**(-decimal_places)) | np.isnan(p)]
assert_almost_equal(p, subdata['p'], decimal_places, "pdf test %s failed with method '%s'\n%s" % (ix, default_method, failures), verbose=False)
@pytest.mark.slow
def test_cdf_nolan_samples(self):
""" Test cdf values against Nolan's stablec.exe output
see - http://fs2.american.edu/jpnolan/www/stable/stable.html
There's a known limitation of Nolan's executable for alpha < 0.2.
Repeat following with beta = -1, -.5, 0, .5 and 1
stablec.exe <<
2 # cdf
1 # Nolan S equivalent to S0 in scipy
.25,2,.25 # alpha
-1,-1,0 # beta
-10,10,1 # x
1,0 # gamma, delta
2 # output file
"""
data = np.load(os.path.abspath(os.path.join(os.path.dirname(__file__),
'data/stable-cdf-sample-data.npy')))
data = np.core.records.fromarrays(data.T, names='x,p,alpha,beta')
tests = [
# zolatarev is accurate for all values
['zolotarev', None, 8, None],
# fft accuracy poor, very poor alpha < 1
['fft', 0, 2, lambda r: r['alpha'] > 1],
]
for ix, (default_method, fft_min_points, decimal_places, filter_func) in enumerate(tests):
stats.levy_stable.pdf_default_method = default_method
stats.levy_stable.pdf_fft_min_points_threshold = fft_min_points
subdata = data[filter_func(data)] if filter_func is not None else data
with suppress_warnings() as sup:
sup.record(RuntimeWarning, 'FFT method is considered ' +
'experimental for cumulative distribution ' +
'function evaluations.*')
p = stats.levy_stable.cdf(subdata['x'], subdata['alpha'], subdata['beta'], scale=1, loc=0)
subdata2 = rec_append_fields(subdata, 'calc', p)
failures = subdata2[(np.abs(p-subdata['p']) >= 1.5*10.**(-decimal_places)) | np.isnan(p)]
assert_almost_equal(p, subdata['p'], decimal_places, "cdf test %s failed with method '%s'\n%s" % (ix, default_method, failures), verbose=False)
def test_pdf_alpha_equals_one_beta_non_zero(self):
""" sample points extracted from Tables and Graphs of Stable Probability
Density Functions - Donald R Holt - 1973 - p 187.
"""
xs = np.array([0, 0, 0, 0,
1, 1, 1, 1,
2, 2, 2, 2,
3, 3, 3, 3,
4, 4, 4, 4])
density = np.array([.3183, .3096, .2925, .2622,
.1591, .1587, .1599, .1635,
.0637, .0729, .0812, .0955,
.0318, .0390, .0458, .0586,
.0187, .0236, .0285, .0384])
betas = np.array([0, .25, .5, 1,
0, .25, .5, 1,
0, .25, .5, 1,
0, .25, .5, 1,
0, .25, .5, 1])
tests = [
['quadrature', None, 4],
#['fft', 0, 4],
['zolotarev', None, 1],
]
with np.errstate(all='ignore'), suppress_warnings() as sup:
sup.filter(category=RuntimeWarning, message="Density calculation unstable.*")
for default_method, fft_min_points, decimal_places in tests:
stats.levy_stable.pdf_default_method = default_method
stats.levy_stable.pdf_fft_min_points_threshold = fft_min_points
#stats.levy_stable.fft_grid_spacing = 0.0001
pdf = stats.levy_stable.pdf(xs, 1, betas, scale=1, loc=0)
assert_almost_equal(pdf, density, decimal_places, default_method)
def test_stats(self):
param_sets = [
[(1.48,-.22, 0, 1), (0,np.inf,np.NaN,np.NaN)],
[(2,.9, 10, 1.5), (10,4.5,0,0)]
]
for args, exp_stats in param_sets:
calc_stats = stats.levy_stable.stats(args[0], args[1], loc=args[2], scale=args[3], moments='mvsk')
assert_almost_equal(calc_stats, exp_stats)
class TestArrayArgument(object): # test for ticket:992
def setup_method(self):
np.random.seed(1234)
def test_noexception(self):
rvs = stats.norm.rvs(loc=(np.arange(5)), scale=np.ones(5),
size=(10, 5))
assert_equal(rvs.shape, (10, 5))
class TestDocstring(object):
def test_docstrings(self):
# See ticket #761
if stats.rayleigh.__doc__ is not None:
assert_("rayleigh" in stats.rayleigh.__doc__.lower())
if stats.bernoulli.__doc__ is not None:
assert_("bernoulli" in stats.bernoulli.__doc__.lower())
def test_no_name_arg(self):
# If name is not given, construction shouldn't fail. See #1508.
stats.rv_continuous()
stats.rv_discrete()
class TestEntropy(object):
def test_entropy_positive(self):
# See ticket #497
pk = [0.5, 0.2, 0.3]
qk = [0.1, 0.25, 0.65]
eself = stats.entropy(pk, pk)
edouble = stats.entropy(pk, qk)
assert_(0.0 == eself)
assert_(edouble >= 0.0)
def test_entropy_base(self):
pk = np.ones(16, float)
S = stats.entropy(pk, base=2.)
assert_(abs(S - 4.) < 1.e-5)
qk = np.ones(16, float)
qk[:8] = 2.
S = stats.entropy(pk, qk)
S2 = stats.entropy(pk, qk, base=2.)
assert_(abs(S/S2 - np.log(2.)) < 1.e-5)
def test_entropy_zero(self):
# Test for PR-479
assert_almost_equal(stats.entropy([0, 1, 2]), 0.63651416829481278,
decimal=12)
def test_entropy_2d(self):
pk = [[0.1, 0.2], [0.6, 0.3], [0.3, 0.5]]
qk = [[0.2, 0.1], [0.3, 0.6], [0.5, 0.3]]
assert_array_almost_equal(stats.entropy(pk, qk),
[0.1933259, 0.18609809])
def test_entropy_2d_zero(self):
pk = [[0.1, 0.2], [0.6, 0.3], [0.3, 0.5]]
qk = [[0.0, 0.1], [0.3, 0.6], [0.5, 0.3]]
assert_array_almost_equal(stats.entropy(pk, qk),
[np.inf, 0.18609809])
pk[0][0] = 0.0
assert_array_almost_equal(stats.entropy(pk, qk),
[0.17403988, 0.18609809])
def test_entropy_base_2d_nondefault_axis(self):
pk = [[0.1, 0.2], [0.6, 0.3], [0.3, 0.5]]
assert_array_almost_equal(stats.entropy(pk, axis=1),
[0.63651417, 0.63651417, 0.66156324])
def test_entropy_2d_nondefault_axis(self):
pk = [[0.1, 0.2], [0.6, 0.3], [0.3, 0.5]]
qk = [[0.2, 0.1], [0.3, 0.6], [0.5, 0.3]]
assert_array_almost_equal(stats.entropy(pk, qk, axis=1),
[0.231049, 0.231049, 0.127706])
def test_entropy_raises_value_error(self):
pk = [[0.1, 0.2], [0.6, 0.3], [0.3, 0.5]]
qk = [[0.1, 0.2], [0.6, 0.3]]
assert_raises(ValueError, stats.entropy, pk, qk)
def test_base_entropy_with_axis_0_is_equal_to_default(self):
pk = [[0.1, 0.2], [0.6, 0.3], [0.3, 0.5]]
assert_array_almost_equal(stats.entropy(pk, axis=0),
stats.entropy(pk))
def test_entropy_with_axis_0_is_equal_to_default(self):
pk = [[0.1, 0.2], [0.6, 0.3], [0.3, 0.5]]
qk = [[0.2, 0.1], [0.3, 0.6], [0.5, 0.3]]
assert_array_almost_equal(stats.entropy(pk, qk, axis=0),
stats.entropy(pk, qk))
def test_base_entropy_transposed(self):
pk = np.array([[0.1, 0.2], [0.6, 0.3], [0.3, 0.5]])
assert_array_almost_equal(stats.entropy(pk.T).T,
stats.entropy(pk, axis=1))
def test_entropy_transposed(self):
pk = np.array([[0.1, 0.2], [0.6, 0.3], [0.3, 0.5]])
qk = np.array([[0.2, 0.1], [0.3, 0.6], [0.5, 0.3]])
assert_array_almost_equal(stats.entropy(pk.T, qk.T).T,
stats.entropy(pk, qk, axis=1))
def TestArgsreduce():
a = array([1, 3, 2, 1, 2, 3, 3])
b, c = argsreduce(a > 1, a, 2)
assert_array_equal(b, [3, 2, 2, 3, 3])
assert_array_equal(c, [2, 2, 2, 2, 2])
b, c = argsreduce(2 > 1, a, 2)
assert_array_equal(b, a[0])
assert_array_equal(c, [2])
b, c = argsreduce(a > 0, a, 2)
assert_array_equal(b, a)
assert_array_equal(c, [2] * numpy.size(a))
class TestFitMethod(object):
skip = ['ncf', 'ksone', 'kstwo']
def setup_method(self):
np.random.seed(1234)
# skip these b/c deprecated, or only loc and scale arguments
fitSkipNonFinite = ['frechet_l', 'frechet_r', 'expon', 'norm', 'uniform', ]
@pytest.mark.parametrize('dist,args', distcont)
def test_fit_w_non_finite_data_values(self, dist, args):
"""gh-10300"""
if dist in self.fitSkipNonFinite:
pytest.skip("%s fit known to fail or deprecated" % dist)
x = np.array([1.6483, 2.7169, 2.4667, 1.1791, 3.5433, np.nan])
y = np.array([1.6483, 2.7169, 2.4667, 1.1791, 3.5433, np.inf])
distfunc = getattr(stats, dist)
assert_raises(RuntimeError, distfunc.fit, x, floc=0, fscale=1)
assert_raises(RuntimeError, distfunc.fit, y, floc=0, fscale=1)
def test_fix_fit_2args_lognorm(self):
# Regression test for #1551.
np.random.seed(12345)
with np.errstate(all='ignore'):
x = stats.lognorm.rvs(0.25, 0., 20.0, size=20)
expected_shape = np.sqrt(((np.log(x) - np.log(20))**2).mean())
assert_allclose(np.array(stats.lognorm.fit(x, floc=0, fscale=20)),
[expected_shape, 0, 20], atol=1e-8)
def test_fix_fit_norm(self):
x = np.arange(1, 6)
loc, scale = stats.norm.fit(x)
assert_almost_equal(loc, 3)
assert_almost_equal(scale, np.sqrt(2))
loc, scale = stats.norm.fit(x, floc=2)
assert_equal(loc, 2)
assert_equal(scale, np.sqrt(3))
loc, scale = stats.norm.fit(x, fscale=2)
assert_almost_equal(loc, 3)
assert_equal(scale, 2)
def test_fix_fit_gamma(self):
x = np.arange(1, 6)
meanlog = np.log(x).mean()
# A basic test of gamma.fit with floc=0.
floc = 0
a, loc, scale = stats.gamma.fit(x, floc=floc)
s = np.log(x.mean()) - meanlog
assert_almost_equal(np.log(a) - special.digamma(a), s, decimal=5)
assert_equal(loc, floc)
assert_almost_equal(scale, x.mean()/a, decimal=8)
# Regression tests for gh-2514.
# The problem was that if `floc=0` was given, any other fixed
# parameters were ignored.
f0 = 1
floc = 0
a, loc, scale = stats.gamma.fit(x, f0=f0, floc=floc)
assert_equal(a, f0)
assert_equal(loc, floc)
assert_almost_equal(scale, x.mean()/a, decimal=8)
f0 = 2
floc = 0
a, loc, scale = stats.gamma.fit(x, f0=f0, floc=floc)
assert_equal(a, f0)
assert_equal(loc, floc)
assert_almost_equal(scale, x.mean()/a, decimal=8)
# loc and scale fixed.
floc = 0
fscale = 2
a, loc, scale = stats.gamma.fit(x, floc=floc, fscale=fscale)
assert_equal(loc, floc)
assert_equal(scale, fscale)
c = meanlog - np.log(fscale)
assert_almost_equal(special.digamma(a), c)
def test_fix_fit_beta(self):
# Test beta.fit when both floc and fscale are given.
def mlefunc(a, b, x):
# Zeros of this function are critical points of
# the maximum likelihood function.
n = len(x)
s1 = np.log(x).sum()
s2 = np.log(1-x).sum()
psiab = special.psi(a + b)
func = [s1 - n * (-psiab + special.psi(a)),
s2 - n * (-psiab + special.psi(b))]
return func
# Basic test with floc and fscale given.
x = np.array([0.125, 0.25, 0.5])
a, b, loc, scale = stats.beta.fit(x, floc=0, fscale=1)
assert_equal(loc, 0)
assert_equal(scale, 1)
assert_allclose(mlefunc(a, b, x), [0, 0], atol=1e-6)
# Basic test with f0, floc and fscale given.
# This is also a regression test for gh-2514.
x = np.array([0.125, 0.25, 0.5])
a, b, loc, scale = stats.beta.fit(x, f0=2, floc=0, fscale=1)
assert_equal(a, 2)
assert_equal(loc, 0)
assert_equal(scale, 1)
da, db = mlefunc(a, b, x)
assert_allclose(db, 0, atol=1e-5)
# Same floc and fscale values as above, but reverse the data
# and fix b (f1).
x2 = 1 - x
a2, b2, loc2, scale2 = stats.beta.fit(x2, f1=2, floc=0, fscale=1)
assert_equal(b2, 2)
assert_equal(loc2, 0)
assert_equal(scale2, 1)
da, db = mlefunc(a2, b2, x2)
assert_allclose(da, 0, atol=1e-5)
# a2 of this test should equal b from above.
assert_almost_equal(a2, b)
# Check for detection of data out of bounds when floc and fscale
# are given.
assert_raises(ValueError, stats.beta.fit, x, floc=0.5, fscale=1)
y = np.array([0, .5, 1])
assert_raises(ValueError, stats.beta.fit, y, floc=0, fscale=1)
assert_raises(ValueError, stats.beta.fit, y, floc=0, fscale=1, f0=2)
assert_raises(ValueError, stats.beta.fit, y, floc=0, fscale=1, f1=2)
# Check that attempting to fix all the parameters raises a ValueError.
assert_raises(ValueError, stats.beta.fit, y, f0=0, f1=1,
floc=2, fscale=3)
def test_expon_fit(self):
x = np.array([2, 2, 4, 4, 4, 4, 4, 8])
loc, scale = stats.expon.fit(x)
assert_equal(loc, 2) # x.min()
assert_equal(scale, 2) # x.mean() - x.min()
loc, scale = stats.expon.fit(x, fscale=3)
assert_equal(loc, 2) # x.min()
assert_equal(scale, 3) # fscale
loc, scale = stats.expon.fit(x, floc=0)
assert_equal(loc, 0) # floc
assert_equal(scale, 4) # x.mean() - loc
def test_lognorm_fit(self):
x = np.array([1.5, 3, 10, 15, 23, 59])
lnxm1 = np.log(x - 1)
shape, loc, scale = stats.lognorm.fit(x, floc=1)
assert_allclose(shape, lnxm1.std(), rtol=1e-12)
assert_equal(loc, 1)
assert_allclose(scale, np.exp(lnxm1.mean()), rtol=1e-12)
shape, loc, scale = stats.lognorm.fit(x, floc=1, fscale=6)
assert_allclose(shape, np.sqrt(((lnxm1 - np.log(6))**2).mean()),
rtol=1e-12)
assert_equal(loc, 1)
assert_equal(scale, 6)
shape, loc, scale = stats.lognorm.fit(x, floc=1, fix_s=0.75)
assert_equal(shape, 0.75)
assert_equal(loc, 1)
assert_allclose(scale, np.exp(lnxm1.mean()), rtol=1e-12)
def test_uniform_fit(self):
x = np.array([1.0, 1.1, 1.2, 9.0])
loc, scale = stats.uniform.fit(x)
assert_equal(loc, x.min())
assert_equal(scale, x.ptp())
loc, scale = stats.uniform.fit(x, floc=0)
assert_equal(loc, 0)
assert_equal(scale, x.max())
loc, scale = stats.uniform.fit(x, fscale=10)
assert_equal(loc, 0)
assert_equal(scale, 10)
assert_raises(ValueError, stats.uniform.fit, x, floc=2.0)
assert_raises(ValueError, stats.uniform.fit, x, fscale=5.0)
def test_fshapes(self):
# take a beta distribution, with shapes='a, b', and make sure that
# fa is equivalent to f0, and fb is equivalent to f1
a, b = 3., 4.
x = stats.beta.rvs(a, b, size=100, random_state=1234)
res_1 = stats.beta.fit(x, f0=3.)
res_2 = stats.beta.fit(x, fa=3.)
assert_allclose(res_1, res_2, atol=1e-12, rtol=1e-12)
res_2 = stats.beta.fit(x, fix_a=3.)
assert_allclose(res_1, res_2, atol=1e-12, rtol=1e-12)
res_3 = stats.beta.fit(x, f1=4.)
res_4 = stats.beta.fit(x, fb=4.)
assert_allclose(res_3, res_4, atol=1e-12, rtol=1e-12)
res_4 = stats.beta.fit(x, fix_b=4.)
assert_allclose(res_3, res_4, atol=1e-12, rtol=1e-12)
# cannot specify both positional and named args at the same time
assert_raises(ValueError, stats.beta.fit, x, fa=1, f0=2)
# check that attempting to fix all parameters raises a ValueError
assert_raises(ValueError, stats.beta.fit, x, fa=0, f1=1,
floc=2, fscale=3)
# check that specifying floc, fscale and fshapes works for
# beta and gamma which override the generic fit method
res_5 = stats.beta.fit(x, fa=3., floc=0, fscale=1)
aa, bb, ll, ss = res_5
assert_equal([aa, ll, ss], [3., 0, 1])
# gamma distribution
a = 3.
data = stats.gamma.rvs(a, size=100)
aa, ll, ss = stats.gamma.fit(data, fa=a)
assert_equal(aa, a)
def test_extra_params(self):
# unknown parameters should raise rather than be silently ignored
dist = stats.exponnorm
data = dist.rvs(K=2, size=100)
dct = dict(enikibeniki=-101)
assert_raises(TypeError, dist.fit, data, **dct)
class TestFrozen(object):
def setup_method(self):
np.random.seed(1234)
# Test that a frozen distribution gives the same results as the original
# object.
#
# Only tested for the normal distribution (with loc and scale specified)
# and for the gamma distribution (with a shape parameter specified).
def test_norm(self):
dist = stats.norm
frozen = stats.norm(loc=10.0, scale=3.0)
result_f = frozen.pdf(20.0)
result = dist.pdf(20.0, loc=10.0, scale=3.0)
assert_equal(result_f, result)
result_f = frozen.cdf(20.0)
result = dist.cdf(20.0, loc=10.0, scale=3.0)
assert_equal(result_f, result)
result_f = frozen.ppf(0.25)
result = dist.ppf(0.25, loc=10.0, scale=3.0)
assert_equal(result_f, result)
result_f = frozen.isf(0.25)
result = dist.isf(0.25, loc=10.0, scale=3.0)
assert_equal(result_f, result)
result_f = frozen.sf(10.0)
result = dist.sf(10.0, loc=10.0, scale=3.0)
assert_equal(result_f, result)
result_f = frozen.median()
result = dist.median(loc=10.0, scale=3.0)
assert_equal(result_f, result)
result_f = frozen.mean()
result = dist.mean(loc=10.0, scale=3.0)
assert_equal(result_f, result)
result_f = frozen.var()
result = dist.var(loc=10.0, scale=3.0)
assert_equal(result_f, result)
result_f = frozen.std()
result = dist.std(loc=10.0, scale=3.0)
assert_equal(result_f, result)
result_f = frozen.entropy()
result = dist.entropy(loc=10.0, scale=3.0)
assert_equal(result_f, result)
result_f = frozen.moment(2)
result = dist.moment(2, loc=10.0, scale=3.0)
assert_equal(result_f, result)
assert_equal(frozen.a, dist.a)
assert_equal(frozen.b, dist.b)
def test_gamma(self):
a = 2.0
dist = stats.gamma
frozen = stats.gamma(a)
result_f = frozen.pdf(20.0)
result = dist.pdf(20.0, a)
assert_equal(result_f, result)
result_f = frozen.cdf(20.0)
result = dist.cdf(20.0, a)
assert_equal(result_f, result)
result_f = frozen.ppf(0.25)
result = dist.ppf(0.25, a)
assert_equal(result_f, result)
result_f = frozen.isf(0.25)
result = dist.isf(0.25, a)
assert_equal(result_f, result)
result_f = frozen.sf(10.0)
result = dist.sf(10.0, a)
assert_equal(result_f, result)
result_f = frozen.median()
result = dist.median(a)
assert_equal(result_f, result)
result_f = frozen.mean()
result = dist.mean(a)
assert_equal(result_f, result)
result_f = frozen.var()
result = dist.var(a)
assert_equal(result_f, result)
result_f = frozen.std()
result = dist.std(a)
assert_equal(result_f, result)
result_f = frozen.entropy()
result = dist.entropy(a)
assert_equal(result_f, result)
result_f = frozen.moment(2)
result = dist.moment(2, a)
assert_equal(result_f, result)
assert_equal(frozen.a, frozen.dist.a)
assert_equal(frozen.b, frozen.dist.b)
def test_regression_ticket_1293(self):
# Create a frozen distribution.
frozen = stats.lognorm(1)
# Call one of its methods that does not take any keyword arguments.
m1 = frozen.moment(2)
# Now call a method that takes a keyword argument.
frozen.stats(moments='mvsk')
# Call moment(2) again.
# After calling stats(), the following was raising an exception.
# So this test passes if the following does not raise an exception.
m2 = frozen.moment(2)
# The following should also be true, of course. But it is not
# the focus of this test.
assert_equal(m1, m2)
def test_ab(self):
# test that the support of a frozen distribution
# (i) remains frozen even if it changes for the original one
# (ii) is actually correct if the shape parameters are such that
# the values of [a, b] are not the default [0, inf]
# take a genpareto as an example where the support
# depends on the value of the shape parameter:
# for c > 0: a, b = 0, inf
# for c < 0: a, b = 0, -1/c
c = -0.1
rv = stats.genpareto(c=c)
a, b = rv.dist._get_support(c)
assert_equal([a, b], [0., 10.])
c = 0.1
stats.genpareto.pdf(0, c=c)
assert_equal(rv.dist._get_support(c), [0, np.inf])
c = -0.1
rv = stats.genpareto(c=c)
a, b = rv.dist._get_support(c)
assert_equal([a, b], [0., 10.])
c = 0.1
stats.genpareto.pdf(0, c) # this should NOT change genpareto.b
assert_equal((rv.dist.a, rv.dist.b), stats.genpareto._get_support(c))
rv1 = stats.genpareto(c=0.1)
assert_(rv1.dist is not rv.dist)
# c >= 0: a, b = [0, inf]
for c in [1., 0.]:
c = np.asarray(c)
rv = stats.genpareto(c=c)
a, b = rv.a, rv.b
assert_equal(a, 0.)
assert_(np.isposinf(b))
# c < 0: a=0, b=1/|c|
c = np.asarray(-2.)
a, b = stats.genpareto._get_support(c)
assert_allclose([a, b], [0., 0.5])
def test_rv_frozen_in_namespace(self):
# Regression test for gh-3522
assert_(hasattr(stats.distributions, 'rv_frozen'))
def test_random_state(self):
# only check that the random_state attribute exists,
frozen = stats.norm()
assert_(hasattr(frozen, 'random_state'))
# ... that it can be set,
frozen.random_state = 42
assert_equal(frozen.random_state.get_state(),
np.random.RandomState(42).get_state())
# ... and that .rvs method accepts it as an argument
rndm = np.random.RandomState(1234)
frozen.rvs(size=8, random_state=rndm)
def test_pickling(self):
# test that a frozen instance pickles and unpickles
# (this method is a clone of common_tests.check_pickling)
beta = stats.beta(2.3098496451481823, 0.62687954300963677)
poiss = stats.poisson(3.)
sample = stats.rv_discrete(values=([0, 1, 2, 3],
[0.1, 0.2, 0.3, 0.4]))
for distfn in [beta, poiss, sample]:
distfn.random_state = 1234
distfn.rvs(size=8)
s = pickle.dumps(distfn)
r0 = distfn.rvs(size=8)
unpickled = pickle.loads(s)
r1 = unpickled.rvs(size=8)
assert_equal(r0, r1)
# also smoke test some methods
medians = [distfn.ppf(0.5), unpickled.ppf(0.5)]
assert_equal(medians[0], medians[1])
assert_equal(distfn.cdf(medians[0]),
unpickled.cdf(medians[1]))
def test_expect(self):
# smoke test the expect method of the frozen distribution
# only take a gamma w/loc and scale and poisson with loc specified
def func(x):
return x
gm = stats.gamma(a=2, loc=3, scale=4)
gm_val = gm.expect(func, lb=1, ub=2, conditional=True)
gamma_val = stats.gamma.expect(func, args=(2,), loc=3, scale=4,
lb=1, ub=2, conditional=True)
assert_allclose(gm_val, gamma_val)
p = stats.poisson(3, loc=4)
p_val = p.expect(func)
poisson_val = stats.poisson.expect(func, args=(3,), loc=4)
assert_allclose(p_val, poisson_val)
class TestExpect(object):
# Test for expect method.
#
# Uses normal distribution and beta distribution for finite bounds, and
# hypergeom for discrete distribution with finite support
def test_norm(self):
v = stats.norm.expect(lambda x: (x-5)*(x-5), loc=5, scale=2)
assert_almost_equal(v, 4, decimal=14)
m = stats.norm.expect(lambda x: (x), loc=5, scale=2)
assert_almost_equal(m, 5, decimal=14)
lb = stats.norm.ppf(0.05, loc=5, scale=2)
ub = stats.norm.ppf(0.95, loc=5, scale=2)
prob90 = stats.norm.expect(lambda x: 1, loc=5, scale=2, lb=lb, ub=ub)
assert_almost_equal(prob90, 0.9, decimal=14)
prob90c = stats.norm.expect(lambda x: 1, loc=5, scale=2, lb=lb, ub=ub,
conditional=True)
assert_almost_equal(prob90c, 1., decimal=14)
def test_beta(self):
# case with finite support interval
v = stats.beta.expect(lambda x: (x-19/3.)*(x-19/3.), args=(10, 5),
loc=5, scale=2)
assert_almost_equal(v, 1./18., decimal=13)
m = stats.beta.expect(lambda x: x, args=(10, 5), loc=5., scale=2.)
assert_almost_equal(m, 19/3., decimal=13)
ub = stats.beta.ppf(0.95, 10, 10, loc=5, scale=2)
lb = stats.beta.ppf(0.05, 10, 10, loc=5, scale=2)
prob90 = stats.beta.expect(lambda x: 1., args=(10, 10), loc=5.,
scale=2., lb=lb, ub=ub, conditional=False)
assert_almost_equal(prob90, 0.9, decimal=13)
prob90c = stats.beta.expect(lambda x: 1, args=(10, 10), loc=5,
scale=2, lb=lb, ub=ub, conditional=True)
assert_almost_equal(prob90c, 1., decimal=13)
def test_hypergeom(self):
# test case with finite bounds
# without specifying bounds
m_true, v_true = stats.hypergeom.stats(20, 10, 8, loc=5.)
m = stats.hypergeom.expect(lambda x: x, args=(20, 10, 8), loc=5.)
assert_almost_equal(m, m_true, decimal=13)
v = stats.hypergeom.expect(lambda x: (x-9.)**2, args=(20, 10, 8),
loc=5.)
assert_almost_equal(v, v_true, decimal=14)
# with bounds, bounds equal to shifted support
v_bounds = stats.hypergeom.expect(lambda x: (x-9.)**2,
args=(20, 10, 8),
loc=5., lb=5, ub=13)
assert_almost_equal(v_bounds, v_true, decimal=14)
# drop boundary points
prob_true = 1-stats.hypergeom.pmf([5, 13], 20, 10, 8, loc=5).sum()
prob_bounds = stats.hypergeom.expect(lambda x: 1, args=(20, 10, 8),
loc=5., lb=6, ub=12)
assert_almost_equal(prob_bounds, prob_true, decimal=13)
# conditional
prob_bc = stats.hypergeom.expect(lambda x: 1, args=(20, 10, 8), loc=5.,
lb=6, ub=12, conditional=True)
assert_almost_equal(prob_bc, 1, decimal=14)
# check simple integral
prob_b = stats.hypergeom.expect(lambda x: 1, args=(20, 10, 8),
lb=0, ub=8)
assert_almost_equal(prob_b, 1, decimal=13)
def test_poisson(self):
# poisson, use lower bound only
prob_bounds = stats.poisson.expect(lambda x: 1, args=(2,), lb=3,
conditional=False)
prob_b_true = 1-stats.poisson.cdf(2, 2)
assert_almost_equal(prob_bounds, prob_b_true, decimal=14)
prob_lb = stats.poisson.expect(lambda x: 1, args=(2,), lb=2,
conditional=True)
assert_almost_equal(prob_lb, 1, decimal=14)
def test_genhalflogistic(self):
# genhalflogistic, changes upper bound of support in _argcheck
# regression test for gh-2622
halflog = stats.genhalflogistic
# check consistency when calling expect twice with the same input
res1 = halflog.expect(args=(1.5,))
halflog.expect(args=(0.5,))
res2 = halflog.expect(args=(1.5,))
assert_almost_equal(res1, res2, decimal=14)
def test_rice_overflow(self):
# rice.pdf(999, 0.74) was inf since special.i0 silentyly overflows
# check that using i0e fixes it
assert_(np.isfinite(stats.rice.pdf(999, 0.74)))
assert_(np.isfinite(stats.rice.expect(lambda x: 1, args=(0.74,))))
assert_(np.isfinite(stats.rice.expect(lambda x: 2, args=(0.74,))))
assert_(np.isfinite(stats.rice.expect(lambda x: 3, args=(0.74,))))
def test_logser(self):
# test a discrete distribution with infinite support and loc
p, loc = 0.3, 3
res_0 = stats.logser.expect(lambda k: k, args=(p,))
# check against the correct answer (sum of a geom series)
assert_allclose(res_0,
p / (p - 1.) / np.log(1. - p), atol=1e-15)
# now check it with `loc`
res_l = stats.logser.expect(lambda k: k, args=(p,), loc=loc)
assert_allclose(res_l, res_0 + loc, atol=1e-15)
def test_skellam(self):
# Use a discrete distribution w/ bi-infinite support. Compute two first
# moments and compare to known values (cf skellam.stats)
p1, p2 = 18, 22
m1 = stats.skellam.expect(lambda x: x, args=(p1, p2))
m2 = stats.skellam.expect(lambda x: x**2, args=(p1, p2))
assert_allclose(m1, p1 - p2, atol=1e-12)
assert_allclose(m2 - m1**2, p1 + p2, atol=1e-12)
def test_randint(self):
# Use a discrete distribution w/ parameter-dependent support, which
# is larger than the default chunksize
lo, hi = 0, 113
res = stats.randint.expect(lambda x: x, (lo, hi))
assert_allclose(res,
sum(_ for _ in range(lo, hi)) / (hi - lo), atol=1e-15)
def test_zipf(self):
# Test that there is no infinite loop even if the sum diverges
assert_warns(RuntimeWarning, stats.zipf.expect,
lambda x: x**2, (2,))
def test_discrete_kwds(self):
# check that discrete expect accepts keywords to control the summation
n0 = stats.poisson.expect(lambda x: 1, args=(2,))
n1 = stats.poisson.expect(lambda x: 1, args=(2,),
maxcount=1001, chunksize=32, tolerance=1e-8)
assert_almost_equal(n0, n1, decimal=14)
def test_moment(self):
# test the .moment() method: compute a higher moment and compare to
# a known value
def poiss_moment5(mu):
return mu**5 + 10*mu**4 + 25*mu**3 + 15*mu**2 + mu
for mu in [5, 7]:
m5 = stats.poisson.moment(5, mu)
assert_allclose(m5, poiss_moment5(mu), rtol=1e-10)
class TestNct(object):
def test_nc_parameter(self):
# Parameter values c<=0 were not enabled (gh-2402).
# For negative values c and for c=0 results of rv.cdf(0) below were nan
rv = stats.nct(5, 0)
assert_equal(rv.cdf(0), 0.5)
rv = stats.nct(5, -1)
assert_almost_equal(rv.cdf(0), 0.841344746069, decimal=10)
def test_broadcasting(self):
res = stats.nct.pdf(5, np.arange(4, 7)[:, None],
np.linspace(0.1, 1, 4))
expected = array([[0.00321886, 0.00557466, 0.00918418, 0.01442997],
[0.00217142, 0.00395366, 0.00683888, 0.01126276],
[0.00153078, 0.00291093, 0.00525206, 0.00900815]])
assert_allclose(res, expected, rtol=1e-5)
def test_variance_gh_issue_2401(self):
# Computation of the variance of a non-central t-distribution resulted
# in a TypeError: ufunc 'isinf' not supported for the input types,
# and the inputs could not be safely coerced to any supported types
# according to the casting rule 'safe'
rv = stats.nct(4, 0)
assert_equal(rv.var(), 2.0)
def test_nct_inf_moments(self):
# n-th moment of nct only exists for df > n
m, v, s, k = stats.nct.stats(df=1.9, nc=0.3, moments='mvsk')
assert_(np.isfinite(m))
assert_equal([v, s, k], [np.inf, np.nan, np.nan])
m, v, s, k = stats.nct.stats(df=3.1, nc=0.3, moments='mvsk')
assert_(np.isfinite([m, v, s]).all())
assert_equal(k, np.nan)
class TestRice(object):
def test_rice_zero_b(self):
# rice distribution should work with b=0, cf gh-2164
x = [0.2, 1., 5.]
assert_(np.isfinite(stats.rice.pdf(x, b=0.)).all())
assert_(np.isfinite(stats.rice.logpdf(x, b=0.)).all())
assert_(np.isfinite(stats.rice.cdf(x, b=0.)).all())
assert_(np.isfinite(stats.rice.logcdf(x, b=0.)).all())
q = [0.1, 0.1, 0.5, 0.9]
assert_(np.isfinite(stats.rice.ppf(q, b=0.)).all())
mvsk = stats.rice.stats(0, moments='mvsk')
assert_(np.isfinite(mvsk).all())
# furthermore, pdf is continuous as b\to 0
# rice.pdf(x, b\to 0) = x exp(-x^2/2) + O(b^2)
# see e.g. Abramovich & Stegun 9.6.7 & 9.6.10
b = 1e-8
assert_allclose(stats.rice.pdf(x, 0), stats.rice.pdf(x, b),
atol=b, rtol=0)
def test_rice_rvs(self):
rvs = stats.rice.rvs
assert_equal(rvs(b=3.).size, 1)
assert_equal(rvs(b=3., size=(3, 5)).shape, (3, 5))
class TestErlang(object):
def setup_method(self):
np.random.seed(1234)
def test_erlang_runtimewarning(self):
# erlang should generate a RuntimeWarning if a non-integer
# shape parameter is used.
with warnings.catch_warnings():
warnings.simplefilter("error", RuntimeWarning)
# The non-integer shape parameter 1.3 should trigger a
# RuntimeWarning
assert_raises(RuntimeWarning,
stats.erlang.rvs, 1.3, loc=0, scale=1, size=4)
# Calling the fit method with `f0` set to an integer should
# *not* trigger a RuntimeWarning. It should return the same
# values as gamma.fit(...).
data = [0.5, 1.0, 2.0, 4.0]
result_erlang = stats.erlang.fit(data, f0=1)
result_gamma = stats.gamma.fit(data, f0=1)
assert_allclose(result_erlang, result_gamma, rtol=1e-3)
def test_gh_pr_10949_argcheck(self):
assert_equal(stats.erlang.pdf(0.5, a=[1, -1]), stats.gamma.pdf(0.5, a=[1, -1]))
class TestRayleigh(object):
# gh-6227
def test_logpdf(self):
y = stats.rayleigh.logpdf(50)
assert_allclose(y, -1246.0879769945718)
def test_logsf(self):
y = stats.rayleigh.logsf(50)
assert_allclose(y, -1250)
class TestExponWeib(object):
def test_pdf_logpdf(self):
# Regression test for gh-3508.
x = 0.1
a = 1.0
c = 100.0
p = stats.exponweib.pdf(x, a, c)
logp = stats.exponweib.logpdf(x, a, c)
# Expected values were computed with mpmath.
assert_allclose([p, logp],
[1.0000000000000054e-97, -223.35075402042244])
def test_a_is_1(self):
# For issue gh-3508.
# Check that when a=1, the pdf and logpdf methods of exponweib are the
# same as those of weibull_min.
x = np.logspace(-4, -1, 4)
a = 1
c = 100
p = stats.exponweib.pdf(x, a, c)
expected = stats.weibull_min.pdf(x, c)
assert_allclose(p, expected)
logp = stats.exponweib.logpdf(x, a, c)
expected = stats.weibull_min.logpdf(x, c)
assert_allclose(logp, expected)
def test_a_is_1_c_is_1(self):
# When a = 1 and c = 1, the distribution is exponential.
x = np.logspace(-8, 1, 10)
a = 1
c = 1
p = stats.exponweib.pdf(x, a, c)
expected = stats.expon.pdf(x)
assert_allclose(p, expected)
logp = stats.exponweib.logpdf(x, a, c)
expected = stats.expon.logpdf(x)
assert_allclose(logp, expected)
class TestWeibull(object):
def test_logpdf(self):
# gh-6217
y = stats.weibull_min.logpdf(0, 1)
assert_equal(y, 0)
def test_with_maxima_distrib(self):
# Tests for weibull_min and weibull_max.
# The expected values were computed using the symbolic algebra
# program 'maxima' with the package 'distrib', which has
# 'pdf_weibull' and 'cdf_weibull'. The mapping between the
# scipy and maxima functions is as follows:
# -----------------------------------------------------------------
# scipy maxima
# --------------------------------- ------------------------------
# weibull_min.pdf(x, a, scale=b) pdf_weibull(x, a, b)
# weibull_min.logpdf(x, a, scale=b) log(pdf_weibull(x, a, b))
# weibull_min.cdf(x, a, scale=b) cdf_weibull(x, a, b)
# weibull_min.logcdf(x, a, scale=b) log(cdf_weibull(x, a, b))
# weibull_min.sf(x, a, scale=b) 1 - cdf_weibull(x, a, b)
# weibull_min.logsf(x, a, scale=b) log(1 - cdf_weibull(x, a, b))
#
# weibull_max.pdf(x, a, scale=b) pdf_weibull(-x, a, b)
# weibull_max.logpdf(x, a, scale=b) log(pdf_weibull(-x, a, b))
# weibull_max.cdf(x, a, scale=b) 1 - cdf_weibull(-x, a, b)
# weibull_max.logcdf(x, a, scale=b) log(1 - cdf_weibull(-x, a, b))
# weibull_max.sf(x, a, scale=b) cdf_weibull(-x, a, b)
# weibull_max.logsf(x, a, scale=b) log(cdf_weibull(-x, a, b))
# -----------------------------------------------------------------
x = 1.5
a = 2.0
b = 3.0
# weibull_min
p = stats.weibull_min.pdf(x, a, scale=b)
assert_allclose(p, np.exp(-0.25)/3)
lp = stats.weibull_min.logpdf(x, a, scale=b)
assert_allclose(lp, -0.25 - np.log(3))
c = stats.weibull_min.cdf(x, a, scale=b)
assert_allclose(c, -special.expm1(-0.25))
lc = stats.weibull_min.logcdf(x, a, scale=b)
assert_allclose(lc, np.log(-special.expm1(-0.25)))
s = stats.weibull_min.sf(x, a, scale=b)
assert_allclose(s, np.exp(-0.25))
ls = stats.weibull_min.logsf(x, a, scale=b)
assert_allclose(ls, -0.25)
# Also test using a large value x, for which computing the survival
# function using the CDF would result in 0.
s = stats.weibull_min.sf(30, 2, scale=3)
assert_allclose(s, np.exp(-100))
ls = stats.weibull_min.logsf(30, 2, scale=3)
assert_allclose(ls, -100)
# weibull_max
x = -1.5
p = stats.weibull_max.pdf(x, a, scale=b)
assert_allclose(p, np.exp(-0.25)/3)
lp = stats.weibull_max.logpdf(x, a, scale=b)
assert_allclose(lp, -0.25 - np.log(3))
c = stats.weibull_max.cdf(x, a, scale=b)
assert_allclose(c, np.exp(-0.25))
lc = stats.weibull_max.logcdf(x, a, scale=b)
assert_allclose(lc, -0.25)
s = stats.weibull_max.sf(x, a, scale=b)
assert_allclose(s, -special.expm1(-0.25))
ls = stats.weibull_max.logsf(x, a, scale=b)
assert_allclose(ls, np.log(-special.expm1(-0.25)))
# Also test using a value of x close to 0, for which computing the
# survival function using the CDF would result in 0.
s = stats.weibull_max.sf(-1e-9, 2, scale=3)
assert_allclose(s, -special.expm1(-1/9000000000000000000))
ls = stats.weibull_max.logsf(-1e-9, 2, scale=3)
assert_allclose(ls, np.log(-special.expm1(-1/9000000000000000000)))
class TestRdist(object):
def test_rdist_cdf_gh1285(self):
# check workaround in rdist._cdf for issue gh-1285.
distfn = stats.rdist
values = [0.001, 0.5, 0.999]
assert_almost_equal(distfn.cdf(distfn.ppf(values, 541.0), 541.0),
values, decimal=5)
def test_rdist_beta(self):
# rdist is a special case of stats.beta
x = np.linspace(-0.99, 0.99, 10)
c = 2.7
assert_almost_equal(0.5*stats.beta(c/2, c/2).pdf((x + 1)/2),
stats.rdist(c).pdf(x))
class TestTrapz(object):
def test_reduces_to_triang(self):
modes = [0, 0.3, 0.5, 1]
for mode in modes:
x = [0, mode, 1]
assert_almost_equal(stats.trapz.pdf(x, mode, mode),
stats.triang.pdf(x, mode))
assert_almost_equal(stats.trapz.cdf(x, mode, mode),
stats.triang.cdf(x, mode))
def test_reduces_to_uniform(self):
x = np.linspace(0, 1, 10)
assert_almost_equal(stats.trapz.pdf(x, 0, 1), stats.uniform.pdf(x))
assert_almost_equal(stats.trapz.cdf(x, 0, 1), stats.uniform.cdf(x))
def test_cases(self):
# edge cases
assert_almost_equal(stats.trapz.pdf(0, 0, 0), 2)
assert_almost_equal(stats.trapz.pdf(1, 1, 1), 2)
assert_almost_equal(stats.trapz.pdf(0.5, 0, 0.8),
1.11111111111111111)
assert_almost_equal(stats.trapz.pdf(0.5, 0.2, 1.0),
1.11111111111111111)
# straightforward case
assert_almost_equal(stats.trapz.pdf(0.1, 0.2, 0.8), 0.625)
assert_almost_equal(stats.trapz.pdf(0.5, 0.2, 0.8), 1.25)
assert_almost_equal(stats.trapz.pdf(0.9, 0.2, 0.8), 0.625)
assert_almost_equal(stats.trapz.cdf(0.1, 0.2, 0.8), 0.03125)
assert_almost_equal(stats.trapz.cdf(0.2, 0.2, 0.8), 0.125)
assert_almost_equal(stats.trapz.cdf(0.5, 0.2, 0.8), 0.5)
assert_almost_equal(stats.trapz.cdf(0.9, 0.2, 0.8), 0.96875)
assert_almost_equal(stats.trapz.cdf(1.0, 0.2, 0.8), 1.0)
def test_trapz_vect(self):
# test that array-valued shapes and arguments are handled
c = np.array([0.1, 0.2, 0.3])
d = np.array([0.5, 0.6])[:, None]
x = np.array([0.15, 0.25, 0.9])
v = stats.trapz.pdf(x, c, d)
cc, dd, xx = np.broadcast_arrays(c, d, x)
res = np.empty(xx.size, dtype=xx.dtype)
ind = np.arange(xx.size)
for i, x1, c1, d1 in zip(ind, xx.ravel(), cc.ravel(), dd.ravel()):
res[i] = stats.trapz.pdf(x1, c1, d1)
assert_allclose(v, res.reshape(v.shape), atol=1e-15)
class TestTriang(object):
def test_edge_cases(self):
with np.errstate(all='raise'):
assert_equal(stats.triang.pdf(0, 0), 2.)
assert_equal(stats.triang.pdf(0.5, 0), 1.)
assert_equal(stats.triang.pdf(1, 0), 0.)
assert_equal(stats.triang.pdf(0, 1), 0)
assert_equal(stats.triang.pdf(0.5, 1), 1.)
assert_equal(stats.triang.pdf(1, 1), 2)
assert_equal(stats.triang.cdf(0., 0.), 0.)
assert_equal(stats.triang.cdf(0.5, 0.), 0.75)
assert_equal(stats.triang.cdf(1.0, 0.), 1.0)
assert_equal(stats.triang.cdf(0., 1.), 0.)
assert_equal(stats.triang.cdf(0.5, 1.), 0.25)
assert_equal(stats.triang.cdf(1., 1.), 1)
class TestMielke(object):
def test_moments(self):
k, s = 4.642, 0.597
# n-th moment exists only if n < s
assert_equal(stats.mielke(k, s).moment(1), np.inf)
assert_equal(stats.mielke(k, 1.0).moment(1), np.inf)
assert_(np.isfinite(stats.mielke(k, 1.01).moment(1)))
def test_burr_equivalence(self):
x = np.linspace(0.01, 100, 50)
k, s = 2.45, 5.32
assert_allclose(stats.burr.pdf(x, s, k/s), stats.mielke.pdf(x, k, s))
class TestBurr(object):
def test_endpoints_7491(self):
# gh-7491
# Compute the pdf at the left endpoint dst.a.
data = [
[stats.fisk, (1,), 1],
[stats.burr, (0.5, 2), 1],
[stats.burr, (1, 1), 1],
[stats.burr, (2, 0.5), 1],
[stats.burr12, (1, 0.5), 0.5],
[stats.burr12, (1, 1), 1.0],
[stats.burr12, (1, 2), 2.0]]
ans = [_f.pdf(_f.a, *_args) for _f, _args, _ in data]
correct = [_correct_ for _f, _args, _correct_ in data]
assert_array_almost_equal(ans, correct)
ans = [_f.logpdf(_f.a, *_args) for _f, _args, _ in data]
correct = [np.log(_correct_) for _f, _args, _correct_ in data]
assert_array_almost_equal(ans, correct)
def test_burr_stats_9544(self):
# gh-9544. Test from gh-9978
c, d = 5.0, 3
mean, variance = stats.burr(c, d).stats()
# mean = sc.beta(3 + 1/5, 1. - 1/5) * 3 = 1.4110263...
# var = sc.beta(3 + 2 / 5, 1. - 2 / 5) * 3 - (sc.beta(3 + 1 / 5, 1. - 1 / 5) * 3) ** 2
mean_hc, variance_hc = 1.4110263183925857, 0.22879948026191643
assert_allclose(mean, mean_hc)
assert_allclose(variance, variance_hc)
def test_burr_nan_mean_var_9544(self):
# gh-9544. Test from gh-9978
c, d = 0.5, 3
mean, variance = stats.burr(c, d).stats()
assert_(np.isnan(mean))
assert_(np.isnan(variance))
c, d = 1.5, 3
mean, variance = stats.burr(c, d).stats()
assert_(np.isfinite(mean))
assert_(np.isnan(variance))
c, d = 0.5, 3
e1, e2, e3, e4 = stats.burr._munp(np.array([1, 2, 3, 4]), c, d)
assert_(np.isnan(e1))
assert_(np.isnan(e2))
assert_(np.isnan(e3))
assert_(np.isnan(e4))
c, d = 1.5, 3
e1, e2, e3, e4 = stats.burr._munp([1, 2, 3, 4], c, d)
assert_(np.isfinite(e1))
assert_(np.isnan(e2))
assert_(np.isnan(e3))
assert_(np.isnan(e4))
c, d = 2.5, 3
e1, e2, e3, e4 = stats.burr._munp([1, 2, 3, 4], c, d)
assert_(np.isfinite(e1))
assert_(np.isfinite(e2))
assert_(np.isnan(e3))
assert_(np.isnan(e4))
c, d = 3.5, 3
e1, e2, e3, e4 = stats.burr._munp([1, 2, 3, 4], c, d)
assert_(np.isfinite(e1))
assert_(np.isfinite(e2))
assert_(np.isfinite(e3))
assert_(np.isnan(e4))
c, d = 4.5, 3
e1, e2, e3, e4 = stats.burr._munp([1, 2, 3, 4], c, d)
assert_(np.isfinite(e1))
assert_(np.isfinite(e2))
assert_(np.isfinite(e3))
assert_(np.isfinite(e4))
def test_540_567():
# test for nan returned in tickets 540, 567
assert_almost_equal(stats.norm.cdf(-1.7624320982), 0.03899815971089126,
decimal=10, err_msg='test_540_567')
assert_almost_equal(stats.norm.cdf(-1.7624320983), 0.038998159702449846,
decimal=10, err_msg='test_540_567')
assert_almost_equal(stats.norm.cdf(1.38629436112, loc=0.950273420309,
scale=0.204423758009),
0.98353464004309321,
decimal=10, err_msg='test_540_567')
def test_regression_ticket_1316():
# The following was raising an exception, because _construct_default_doc()
# did not handle the default keyword extradoc=None. See ticket #1316.
stats._continuous_distns.gamma_gen(name='gamma')
def test_regression_ticket_1326():
# adjust to avoid nan with 0*log(0)
assert_almost_equal(stats.chi2.pdf(0.0, 2), 0.5, 14)
def test_regression_tukey_lambda():
# Make sure that Tukey-Lambda distribution correctly handles
# non-positive lambdas.
x = np.linspace(-5.0, 5.0, 101)
olderr = np.seterr(divide='ignore')
try:
for lam in [0.0, -1.0, -2.0, np.array([[-1.0], [0.0], [-2.0]])]:
p = stats.tukeylambda.pdf(x, lam)
assert_((p != 0.0).all())
assert_(~np.isnan(p).all())
lam = np.array([[-1.0], [0.0], [2.0]])
p = stats.tukeylambda.pdf(x, lam)
finally:
np.seterr(**olderr)
assert_(~np.isnan(p).all())
assert_((p[0] != 0.0).all())
assert_((p[1] != 0.0).all())
assert_((p[2] != 0.0).any())
assert_((p[2] == 0.0).any())
@pytest.mark.skipif(DOCSTRINGS_STRIPPED, reason="docstrings stripped")
def test_regression_ticket_1421():
assert_('pdf(x, mu, loc=0, scale=1)' not in stats.poisson.__doc__)
assert_('pmf(x,' in stats.poisson.__doc__)
def test_nan_arguments_gh_issue_1362():
with np.errstate(invalid='ignore'):
assert_(np.isnan(stats.t.logcdf(1, np.nan)))
assert_(np.isnan(stats.t.cdf(1, np.nan)))
assert_(np.isnan(stats.t.logsf(1, np.nan)))
assert_(np.isnan(stats.t.sf(1, np.nan)))
assert_(np.isnan(stats.t.pdf(1, np.nan)))
assert_(np.isnan(stats.t.logpdf(1, np.nan)))
assert_(np.isnan(stats.t.ppf(1, np.nan)))
assert_(np.isnan(stats.t.isf(1, np.nan)))
assert_(np.isnan(stats.bernoulli.logcdf(np.nan, 0.5)))
assert_(np.isnan(stats.bernoulli.cdf(np.nan, 0.5)))
assert_(np.isnan(stats.bernoulli.logsf(np.nan, 0.5)))
assert_(np.isnan(stats.bernoulli.sf(np.nan, 0.5)))
assert_(np.isnan(stats.bernoulli.pmf(np.nan, 0.5)))
assert_(np.isnan(stats.bernoulli.logpmf(np.nan, 0.5)))
assert_(np.isnan(stats.bernoulli.ppf(np.nan, 0.5)))
assert_(np.isnan(stats.bernoulli.isf(np.nan, 0.5)))
def test_frozen_fit_ticket_1536():
np.random.seed(5678)
true = np.array([0.25, 0., 0.5])
x = stats.lognorm.rvs(true[0], true[1], true[2], size=100)
olderr = np.seterr(divide='ignore')
try:
params = np.array(stats.lognorm.fit(x, floc=0.))
finally:
np.seterr(**olderr)
assert_almost_equal(params, true, decimal=2)
params = np.array(stats.lognorm.fit(x, fscale=0.5, loc=0))
assert_almost_equal(params, true, decimal=2)
params = np.array(stats.lognorm.fit(x, f0=0.25, loc=0))
assert_almost_equal(params, true, decimal=2)
params = np.array(stats.lognorm.fit(x, f0=0.25, floc=0))
assert_almost_equal(params, true, decimal=2)
np.random.seed(5678)
loc = 1
floc = 0.9
x = stats.norm.rvs(loc, 2., size=100)
params = np.array(stats.norm.fit(x, floc=floc))
expected = np.array([floc, np.sqrt(((x-floc)**2).mean())])
assert_almost_equal(params, expected, decimal=4)
def test_regression_ticket_1530():
# Check the starting value works for Cauchy distribution fit.
np.random.seed(654321)
rvs = stats.cauchy.rvs(size=100)
params = stats.cauchy.fit(rvs)
expected = (0.045, 1.142)
assert_almost_equal(params, expected, decimal=1)
def test_gh_pr_4806():
# Check starting values for Cauchy distribution fit.
np.random.seed(1234)
x = np.random.randn(42)
for offset in 10000.0, 1222333444.0:
loc, scale = stats.cauchy.fit(x + offset)
assert_allclose(loc, offset, atol=1.0)
assert_allclose(scale, 0.6, atol=1.0)
def test_tukeylambda_stats_ticket_1545():
# Some test for the variance and kurtosis of the Tukey Lambda distr.
# See test_tukeylamdba_stats.py for more tests.
mv = stats.tukeylambda.stats(0, moments='mvsk')
# Known exact values:
expected = [0, np.pi**2/3, 0, 1.2]
assert_almost_equal(mv, expected, decimal=10)
mv = stats.tukeylambda.stats(3.13, moments='mvsk')
# 'expected' computed with mpmath.
expected = [0, 0.0269220858861465102, 0, -0.898062386219224104]
assert_almost_equal(mv, expected, decimal=10)
mv = stats.tukeylambda.stats(0.14, moments='mvsk')
# 'expected' computed with mpmath.
expected = [0, 2.11029702221450250, 0, -0.02708377353223019456]
assert_almost_equal(mv, expected, decimal=10)
def test_poisson_logpmf_ticket_1436():
assert_(np.isfinite(stats.poisson.logpmf(1500, 200)))
def test_powerlaw_stats():
"""Test the powerlaw stats function.
This unit test is also a regression test for ticket 1548.
The exact values are:
mean:
mu = a / (a + 1)
variance:
sigma**2 = a / ((a + 2) * (a + 1) ** 2)
skewness:
One formula (see https://en.wikipedia.org/wiki/Skewness) is
gamma_1 = (E[X**3] - 3*mu*E[X**2] + 2*mu**3) / sigma**3
A short calculation shows that E[X**k] is a / (a + k), so gamma_1
can be implemented as
n = a/(a+3) - 3*(a/(a+1))*a/(a+2) + 2*(a/(a+1))**3
d = sqrt(a/((a+2)*(a+1)**2)) ** 3
gamma_1 = n/d
Either by simplifying, or by a direct calculation of mu_3 / sigma**3,
one gets the more concise formula:
gamma_1 = -2.0 * ((a - 1) / (a + 3)) * sqrt((a + 2) / a)
kurtosis: (See https://en.wikipedia.org/wiki/Kurtosis)
The excess kurtosis is
gamma_2 = mu_4 / sigma**4 - 3
A bit of calculus and algebra (sympy helps) shows that
mu_4 = 3*a*(3*a**2 - a + 2) / ((a+1)**4 * (a+2) * (a+3) * (a+4))
so
gamma_2 = 3*(3*a**2 - a + 2) * (a+2) / (a*(a+3)*(a+4)) - 3
which can be rearranged to
gamma_2 = 6 * (a**3 - a**2 - 6*a + 2) / (a*(a+3)*(a+4))
"""
cases = [(1.0, (0.5, 1./12, 0.0, -1.2)),
(2.0, (2./3, 2./36, -0.56568542494924734, -0.6))]
for a, exact_mvsk in cases:
mvsk = stats.powerlaw.stats(a, moments="mvsk")
assert_array_almost_equal(mvsk, exact_mvsk)
def test_powerlaw_edge():
# Regression test for gh-3986.
p = stats.powerlaw.logpdf(0, 1)
assert_equal(p, 0.0)
def test_exponpow_edge():
# Regression test for gh-3982.
p = stats.exponpow.logpdf(0, 1)
assert_equal(p, 0.0)
# Check pdf and logpdf at x = 0 for other values of b.
p = stats.exponpow.pdf(0, [0.25, 1.0, 1.5])
assert_equal(p, [np.inf, 1.0, 0.0])
p = stats.exponpow.logpdf(0, [0.25, 1.0, 1.5])
assert_equal(p, [np.inf, 0.0, -np.inf])
def test_gengamma_edge():
# Regression test for gh-3985.
p = stats.gengamma.pdf(0, 1, 1)
assert_equal(p, 1.0)
# Regression tests for gh-4724.
p = stats.gengamma._munp(-2, 200, 1.)
assert_almost_equal(p, 1./199/198)
p = stats.gengamma._munp(-2, 10, 1.)
assert_almost_equal(p, 1./9/8)
def test_ksone_fit_freeze():
# Regression test for ticket #1638.
d = np.array(
[-0.18879233, 0.15734249, 0.18695107, 0.27908787, -0.248649,
-0.2171497, 0.12233512, 0.15126419, 0.03119282, 0.4365294,
0.08930393, -0.23509903, 0.28231224, -0.09974875, -0.25196048,
0.11102028, 0.1427649, 0.10176452, 0.18754054, 0.25826724,
0.05988819, 0.0531668, 0.21906056, 0.32106729, 0.2117662,
0.10886442, 0.09375789, 0.24583286, -0.22968366, -0.07842391,
-0.31195432, -0.21271196, 0.1114243, -0.13293002, 0.01331725,
-0.04330977, -0.09485776, -0.28434547, 0.22245721, -0.18518199,
-0.10943985, -0.35243174, 0.06897665, -0.03553363, -0.0701746,
-0.06037974, 0.37670779, -0.21684405])
try:
olderr = np.seterr(invalid='ignore')
with suppress_warnings() as sup:
sup.filter(IntegrationWarning,
"The maximum number of subdivisions .50. has been "
"achieved.")
sup.filter(RuntimeWarning,
"floating point number truncated to an integer")
stats.ksone.fit(d)
finally:
np.seterr(**olderr)
def test_norm_logcdf():
# Test precision of the logcdf of the normal distribution.
# This precision was enhanced in ticket 1614.
x = -np.asarray(list(range(0, 120, 4)))
# Values from R
expected = [-0.69314718, -10.36010149, -35.01343716, -75.41067300,
-131.69539607, -203.91715537, -292.09872100, -396.25241451,
-516.38564863, -652.50322759, -804.60844201, -972.70364403,
-1156.79057310, -1356.87055173, -1572.94460885, -1805.01356068,
-2053.07806561, -2317.13866238, -2597.19579746, -2893.24984493,
-3205.30112136, -3533.34989701, -3877.39640444, -4237.44084522,
-4613.48339520, -5005.52420869, -5413.56342187, -5837.60115548,
-6277.63751711, -6733.67260303]
assert_allclose(stats.norm().logcdf(x), expected, atol=1e-8)
# also test the complex-valued code path
assert_allclose(stats.norm().logcdf(x + 1e-14j).real, expected, atol=1e-8)
# test the accuracy: d(logcdf)/dx = pdf / cdf \equiv exp(logpdf - logcdf)
deriv = (stats.norm.logcdf(x + 1e-10j)/1e-10).imag
deriv_expected = np.exp(stats.norm.logpdf(x) - stats.norm.logcdf(x))
assert_allclose(deriv, deriv_expected, atol=1e-10)
def test_levy_cdf_ppf():
# Test levy.cdf, including small arguments.
x = np.array([1000, 1.0, 0.5, 0.1, 0.01, 0.001])
# Expected values were calculated separately with mpmath.
# E.g.
# >>> mpmath.mp.dps = 100
# >>> x = mpmath.mp.mpf('0.01')
# >>> cdf = mpmath.erfc(mpmath.sqrt(1/(2*x)))
expected = np.array([0.9747728793699604,
0.3173105078629141,
0.1572992070502851,
0.0015654022580025495,
1.523970604832105e-23,
1.795832784800726e-219])
y = stats.levy.cdf(x)
assert_allclose(y, expected, rtol=1e-10)
# ppf(expected) should get us back to x.
xx = stats.levy.ppf(expected)
assert_allclose(xx, x, rtol=1e-13)
def test_hypergeom_interval_1802():
# these two had endless loops
assert_equal(stats.hypergeom.interval(.95, 187601, 43192, 757),
(152.0, 197.0))
assert_equal(stats.hypergeom.interval(.945, 187601, 43192, 757),
(152.0, 197.0))
# this was working also before
assert_equal(stats.hypergeom.interval(.94, 187601, 43192, 757),
(153.0, 196.0))
# degenerate case .a == .b
assert_equal(stats.hypergeom.ppf(0.02, 100, 100, 8), 8)
assert_equal(stats.hypergeom.ppf(1, 100, 100, 8), 8)
def test_distribution_too_many_args():
np.random.seed(1234)
# Check that a TypeError is raised when too many args are given to a method
# Regression test for ticket 1815.
x = np.linspace(0.1, 0.7, num=5)
assert_raises(TypeError, stats.gamma.pdf, x, 2, 3, loc=1.0)
assert_raises(TypeError, stats.gamma.pdf, x, 2, 3, 4, loc=1.0)
assert_raises(TypeError, stats.gamma.pdf, x, 2, 3, 4, 5)
assert_raises(TypeError, stats.gamma.pdf, x, 2, 3, loc=1.0, scale=0.5)
assert_raises(TypeError, stats.gamma.rvs, 2., 3, loc=1.0, scale=0.5)
assert_raises(TypeError, stats.gamma.cdf, x, 2., 3, loc=1.0, scale=0.5)
assert_raises(TypeError, stats.gamma.ppf, x, 2., 3, loc=1.0, scale=0.5)
assert_raises(TypeError, stats.gamma.stats, 2., 3, loc=1.0, scale=0.5)
assert_raises(TypeError, stats.gamma.entropy, 2., 3, loc=1.0, scale=0.5)
assert_raises(TypeError, stats.gamma.fit, x, 2., 3, loc=1.0, scale=0.5)
# These should not give errors
stats.gamma.pdf(x, 2, 3) # loc=3
stats.gamma.pdf(x, 2, 3, 4) # loc=3, scale=4
stats.gamma.stats(2., 3)
stats.gamma.stats(2., 3, 4)
stats.gamma.stats(2., 3, 4, 'mv')
stats.gamma.rvs(2., 3, 4, 5)
stats.gamma.fit(stats.gamma.rvs(2., size=7), 2.)
# Also for a discrete distribution
stats.geom.pmf(x, 2, loc=3) # no error, loc=3
assert_raises(TypeError, stats.geom.pmf, x, 2, 3, 4)
assert_raises(TypeError, stats.geom.pmf, x, 2, 3, loc=4)
# And for distributions with 0, 2 and 3 args respectively
assert_raises(TypeError, stats.expon.pdf, x, 3, loc=1.0)
assert_raises(TypeError, stats.exponweib.pdf, x, 3, 4, 5, loc=1.0)
assert_raises(TypeError, stats.exponweib.pdf, x, 3, 4, 5, 0.1, 0.1)
assert_raises(TypeError, stats.ncf.pdf, x, 3, 4, 5, 6, loc=1.0)
assert_raises(TypeError, stats.ncf.pdf, x, 3, 4, 5, 6, 1.0, scale=0.5)
stats.ncf.pdf(x, 3, 4, 5, 6, 1.0) # 3 args, plus loc/scale
def test_ncx2_tails_ticket_955():
# Trac #955 -- check that the cdf computed by special functions
# matches the integrated pdf
a = stats.ncx2.cdf(np.arange(20, 25, 0.2), 2, 1.07458615e+02)
b = stats.ncx2._cdfvec(np.arange(20, 25, 0.2), 2, 1.07458615e+02)
assert_allclose(a, b, rtol=1e-3, atol=0)
def test_ncx2_tails_pdf():
# ncx2.pdf does not return nans in extreme tails(example from gh-1577)
# NB: this is to check that nan_to_num is not needed in ncx2.pdf
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, "divide by zero encountered in log")
assert_equal(stats.ncx2.pdf(1, np.arange(340, 350), 2), 0)
logval = stats.ncx2.logpdf(1, np.arange(340, 350), 2)
assert_(np.isneginf(logval).all())
@pytest.mark.parametrize('method, expected', [
('cdf', np.array([2.497951336e-09, 3.437288941e-10])),
('pdf', np.array([1.238579980e-07, 1.710041145e-08])),
('logpdf', np.array([-15.90413011, -17.88416331])),
('ppf', np.array([4.865182052, 7.017182271]))
])
def test_ncx2_zero_nc(method, expected):
# gh-5441
# ncx2 with nc=0 is identical to chi2
# Comparison to R (v3.5.1)
# > options(digits=10)
# > pchisq(0.1, df=10, ncp=c(0,4))
# > dchisq(0.1, df=10, ncp=c(0,4))
# > dchisq(0.1, df=10, ncp=c(0,4), log=TRUE)
# > qchisq(0.1, df=10, ncp=c(0,4))
result = getattr(stats.ncx2, method)(0.1, nc=[0, 4], df=10)
assert_allclose(result, expected, atol=1e-15)
def test_ncx2_zero_nc_rvs():
# gh-5441
# ncx2 with nc=0 is identical to chi2
result = stats.ncx2.rvs(df=10, nc=0, random_state=1)
expected = stats.chi2.rvs(df=10, random_state=1)
assert_allclose(result, expected, atol=1e-15)
def test_foldnorm_zero():
# Parameter value c=0 was not enabled, see gh-2399.
rv = stats.foldnorm(0, scale=1)
assert_equal(rv.cdf(0), 0) # rv.cdf(0) previously resulted in: nan
def test_stats_shapes_argcheck():
# stats method was failing for vector shapes if some of the values
# were outside of the allowed range, see gh-2678
mv3 = stats.invgamma.stats([0.0, 0.5, 1.0], 1, 0.5) # 0 is not a legal `a`
mv2 = stats.invgamma.stats([0.5, 1.0], 1, 0.5)
mv2_augmented = tuple(np.r_[np.nan, _] for _ in mv2)
assert_equal(mv2_augmented, mv3)
# -1 is not a legal shape parameter
mv3 = stats.lognorm.stats([2, 2.4, -1])
mv2 = stats.lognorm.stats([2, 2.4])
mv2_augmented = tuple(np.r_[_, np.nan] for _ in mv2)
assert_equal(mv2_augmented, mv3)
# FIXME: this is only a quick-and-dirty test of a quick-and-dirty bugfix.
# stats method with multiple shape parameters is not properly vectorized
# anyway, so some distributions may or may not fail.
# Test subclassing distributions w/ explicit shapes
class _distr_gen(stats.rv_continuous):
def _pdf(self, x, a):
return 42
class _distr2_gen(stats.rv_continuous):
def _cdf(self, x, a):
return 42 * a + x
class _distr3_gen(stats.rv_continuous):
def _pdf(self, x, a, b):
return a + b
def _cdf(self, x, a):
# Different # of shape params from _pdf, to be able to check that
# inspection catches the inconsistency."""
return 42 * a + x
class _distr6_gen(stats.rv_continuous):
# Two shape parameters (both _pdf and _cdf defined, consistent shapes.)
def _pdf(self, x, a, b):
return a*x + b
def _cdf(self, x, a, b):
return 42 * a + x
class TestSubclassingExplicitShapes(object):
# Construct a distribution w/ explicit shapes parameter and test it.
def test_correct_shapes(self):
dummy_distr = _distr_gen(name='dummy', shapes='a')
assert_equal(dummy_distr.pdf(1, a=1), 42)
def test_wrong_shapes_1(self):
dummy_distr = _distr_gen(name='dummy', shapes='A')
assert_raises(TypeError, dummy_distr.pdf, 1, **dict(a=1))
def test_wrong_shapes_2(self):
dummy_distr = _distr_gen(name='dummy', shapes='a, b, c')
dct = dict(a=1, b=2, c=3)
assert_raises(TypeError, dummy_distr.pdf, 1, **dct)
def test_shapes_string(self):
# shapes must be a string
dct = dict(name='dummy', shapes=42)
assert_raises(TypeError, _distr_gen, **dct)
def test_shapes_identifiers_1(self):
# shapes must be a comma-separated list of valid python identifiers
dct = dict(name='dummy', shapes='(!)')
assert_raises(SyntaxError, _distr_gen, **dct)
def test_shapes_identifiers_2(self):
dct = dict(name='dummy', shapes='4chan')
assert_raises(SyntaxError, _distr_gen, **dct)
def test_shapes_identifiers_3(self):
dct = dict(name='dummy', shapes='m(fti)')
assert_raises(SyntaxError, _distr_gen, **dct)
def test_shapes_identifiers_nodefaults(self):
dct = dict(name='dummy', shapes='a=2')
assert_raises(SyntaxError, _distr_gen, **dct)
def test_shapes_args(self):
dct = dict(name='dummy', shapes='*args')
assert_raises(SyntaxError, _distr_gen, **dct)
def test_shapes_kwargs(self):
dct = dict(name='dummy', shapes='**kwargs')
assert_raises(SyntaxError, _distr_gen, **dct)
def test_shapes_keywords(self):
# python keywords cannot be used for shape parameters
dct = dict(name='dummy', shapes='a, b, c, lambda')
assert_raises(SyntaxError, _distr_gen, **dct)
def test_shapes_signature(self):
# test explicit shapes which agree w/ the signature of _pdf
class _dist_gen(stats.rv_continuous):
def _pdf(self, x, a):
return stats.norm._pdf(x) * a
dist = _dist_gen(shapes='a')
assert_equal(dist.pdf(0.5, a=2), stats.norm.pdf(0.5)*2)
def test_shapes_signature_inconsistent(self):
# test explicit shapes which do not agree w/ the signature of _pdf
class _dist_gen(stats.rv_continuous):
def _pdf(self, x, a):
return stats.norm._pdf(x) * a
dist = _dist_gen(shapes='a, b')
assert_raises(TypeError, dist.pdf, 0.5, **dict(a=1, b=2))
def test_star_args(self):
# test _pdf with only starargs
# NB: **kwargs of pdf will never reach _pdf
class _dist_gen(stats.rv_continuous):
def _pdf(self, x, *args):
extra_kwarg = args[0]
return stats.norm._pdf(x) * extra_kwarg
dist = _dist_gen(shapes='extra_kwarg')
assert_equal(dist.pdf(0.5, extra_kwarg=33), stats.norm.pdf(0.5)*33)
assert_equal(dist.pdf(0.5, 33), stats.norm.pdf(0.5)*33)
assert_raises(TypeError, dist.pdf, 0.5, **dict(xxx=33))
def test_star_args_2(self):
# test _pdf with named & starargs
# NB: **kwargs of pdf will never reach _pdf
class _dist_gen(stats.rv_continuous):
def _pdf(self, x, offset, *args):
extra_kwarg = args[0]
return stats.norm._pdf(x) * extra_kwarg + offset
dist = _dist_gen(shapes='offset, extra_kwarg')
assert_equal(dist.pdf(0.5, offset=111, extra_kwarg=33),
stats.norm.pdf(0.5)*33 + 111)
assert_equal(dist.pdf(0.5, 111, 33),
stats.norm.pdf(0.5)*33 + 111)
def test_extra_kwarg(self):
# **kwargs to _pdf are ignored.
# this is a limitation of the framework (_pdf(x, *goodargs))
class _distr_gen(stats.rv_continuous):
def _pdf(self, x, *args, **kwargs):
# _pdf should handle *args, **kwargs itself. Here "handling"
# is ignoring *args and looking for ``extra_kwarg`` and using
# that.
extra_kwarg = kwargs.pop('extra_kwarg', 1)
return stats.norm._pdf(x) * extra_kwarg
dist = _distr_gen(shapes='extra_kwarg')
assert_equal(dist.pdf(1, extra_kwarg=3), stats.norm.pdf(1))
def shapes_empty_string(self):
# shapes='' is equivalent to shapes=None
class _dist_gen(stats.rv_continuous):
def _pdf(self, x):
return stats.norm.pdf(x)
dist = _dist_gen(shapes='')
assert_equal(dist.pdf(0.5), stats.norm.pdf(0.5))
class TestSubclassingNoShapes(object):
# Construct a distribution w/o explicit shapes parameter and test it.
def test_only__pdf(self):
dummy_distr = _distr_gen(name='dummy')
assert_equal(dummy_distr.pdf(1, a=1), 42)
def test_only__cdf(self):
# _pdf is determined from _cdf by taking numerical derivative
dummy_distr = _distr2_gen(name='dummy')
assert_almost_equal(dummy_distr.pdf(1, a=1), 1)
@pytest.mark.skipif(DOCSTRINGS_STRIPPED, reason="docstring stripped")
def test_signature_inspection(self):
# check that _pdf signature inspection works correctly, and is used in
# the class docstring
dummy_distr = _distr_gen(name='dummy')
assert_equal(dummy_distr.numargs, 1)
assert_equal(dummy_distr.shapes, 'a')
res = re.findall(r'logpdf\(x, a, loc=0, scale=1\)',
dummy_distr.__doc__)
assert_(len(res) == 1)
@pytest.mark.skipif(DOCSTRINGS_STRIPPED, reason="docstring stripped")
def test_signature_inspection_2args(self):
# same for 2 shape params and both _pdf and _cdf defined
dummy_distr = _distr6_gen(name='dummy')
assert_equal(dummy_distr.numargs, 2)
assert_equal(dummy_distr.shapes, 'a, b')
res = re.findall(r'logpdf\(x, a, b, loc=0, scale=1\)',
dummy_distr.__doc__)
assert_(len(res) == 1)
def test_signature_inspection_2args_incorrect_shapes(self):
# both _pdf and _cdf defined, but shapes are inconsistent: raises
assert_raises(TypeError, _distr3_gen, name='dummy')
def test_defaults_raise(self):
# default arguments should raise
class _dist_gen(stats.rv_continuous):
def _pdf(self, x, a=42):
return 42
assert_raises(TypeError, _dist_gen, **dict(name='dummy'))
def test_starargs_raise(self):
# without explicit shapes, *args are not allowed
class _dist_gen(stats.rv_continuous):
def _pdf(self, x, a, *args):
return 42
assert_raises(TypeError, _dist_gen, **dict(name='dummy'))
def test_kwargs_raise(self):
# without explicit shapes, **kwargs are not allowed
class _dist_gen(stats.rv_continuous):
def _pdf(self, x, a, **kwargs):
return 42
assert_raises(TypeError, _dist_gen, **dict(name='dummy'))
@pytest.mark.skipif(DOCSTRINGS_STRIPPED, reason="docstring stripped")
def test_docstrings():
badones = [r',\s*,', r'\(\s*,', r'^\s*:']
for distname in stats.__all__:
dist = getattr(stats, distname)
if isinstance(dist, (stats.rv_discrete, stats.rv_continuous)):
for regex in badones:
assert_(re.search(regex, dist.__doc__) is None)
def test_infinite_input():
assert_almost_equal(stats.skellam.sf(np.inf, 10, 11), 0)
assert_almost_equal(stats.ncx2._cdf(np.inf, 8, 0.1), 1)
def test_lomax_accuracy():
# regression test for gh-4033
p = stats.lomax.ppf(stats.lomax.cdf(1e-100, 1), 1)
assert_allclose(p, 1e-100)
def test_gompertz_accuracy():
# Regression test for gh-4031
p = stats.gompertz.ppf(stats.gompertz.cdf(1e-100, 1), 1)
assert_allclose(p, 1e-100)
def test_truncexpon_accuracy():
# regression test for gh-4035
p = stats.truncexpon.ppf(stats.truncexpon.cdf(1e-100, 1), 1)
assert_allclose(p, 1e-100)
def test_rayleigh_accuracy():
# regression test for gh-4034
p = stats.rayleigh.isf(stats.rayleigh.sf(9, 1), 1)
assert_almost_equal(p, 9.0, decimal=15)
def test_genextreme_give_no_warnings():
"""regression test for gh-6219"""
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
stats.genextreme.cdf(.5, 0)
stats.genextreme.pdf(.5, 0)
stats.genextreme.ppf(.5, 0)
stats.genextreme.logpdf(-np.inf, 0.0)
number_of_warnings_thrown = len(w)
assert_equal(number_of_warnings_thrown, 0)
def test_genextreme_entropy():
# regression test for gh-5181
euler_gamma = 0.5772156649015329
h = stats.genextreme.entropy(-1.0)
assert_allclose(h, 2*euler_gamma + 1, rtol=1e-14)
h = stats.genextreme.entropy(0)
assert_allclose(h, euler_gamma + 1, rtol=1e-14)
h = stats.genextreme.entropy(1.0)
assert_equal(h, 1)
h = stats.genextreme.entropy(-2.0, scale=10)
assert_allclose(h, euler_gamma*3 + np.log(10) + 1, rtol=1e-14)
h = stats.genextreme.entropy(10)
assert_allclose(h, -9*euler_gamma + 1, rtol=1e-14)
h = stats.genextreme.entropy(-10)
assert_allclose(h, 11*euler_gamma + 1, rtol=1e-14)
def test_genextreme_sf_isf():
# Expected values were computed using mpmath:
#
# import mpmath
#
# def mp_genextreme_sf(x, xi, mu=0, sigma=1):
# # Formula from wikipedia, which has a sign convention for xi that
# # is the opposite of scipy's shape parameter.
# if xi != 0:
# t = mpmath.power(1 + ((x - mu)/sigma)*xi, -1/xi)
# else:
# t = mpmath.exp(-(x - mu)/sigma)
# return 1 - mpmath.exp(-t)
#
# >>> mpmath.mp.dps = 1000
# >>> s = mp_genextreme_sf(mpmath.mp.mpf("1e8"), mpmath.mp.mpf("0.125"))
# >>> float(s)
# 1.6777205262585625e-57
# >>> s = mp_genextreme_sf(mpmath.mp.mpf("7.98"), mpmath.mp.mpf("-0.125"))
# >>> float(s)
# 1.52587890625e-21
# >>> s = mp_genextreme_sf(mpmath.mp.mpf("7.98"), mpmath.mp.mpf("0"))
# >>> float(s)
# 0.00034218086528426593
x = 1e8
s = stats.genextreme.sf(x, -0.125)
assert_allclose(s, 1.6777205262585625e-57)
x2 = stats.genextreme.isf(s, -0.125)
assert_allclose(x2, x)
x = 7.98
s = stats.genextreme.sf(x, 0.125)
assert_allclose(s, 1.52587890625e-21)
x2 = stats.genextreme.isf(s, 0.125)
assert_allclose(x2, x)
x = 7.98
s = stats.genextreme.sf(x, 0)
assert_allclose(s, 0.00034218086528426593)
x2 = stats.genextreme.isf(s, 0)
assert_allclose(x2, x)
def test_burr12_ppf_small_arg():
prob = 1e-16
quantile = stats.burr12.ppf(prob, 2, 3)
# The expected quantile was computed using mpmath:
# >>> import mpmath
# >>> mpmath.mp.dps = 100
# >>> prob = mpmath.mpf('1e-16')
# >>> c = mpmath.mpf(2)
# >>> d = mpmath.mpf(3)
# >>> float(((1-prob)**(-1/d) - 1)**(1/c))
# 5.7735026918962575e-09
assert_allclose(quantile, 5.7735026918962575e-09)
def test_crystalball_function():
"""
All values are calculated using the independent implementation of the
ROOT framework (see https://root.cern.ch/).
Corresponding ROOT code is given in the comments.
"""
X = np.linspace(-5.0, 5.0, 21)[:-1]
# for(float x = -5.0; x < 5.0; x+=0.5)
# std::cout << ROOT::Math::crystalball_pdf(x, 1.0, 2.0, 1.0) << ", ";
calculated = stats.crystalball.pdf(X, beta=1.0, m=2.0)
expected = np.array([0.0202867, 0.0241428, 0.0292128, 0.0360652, 0.045645,
0.059618, 0.0811467, 0.116851, 0.18258, 0.265652,
0.301023, 0.265652, 0.18258, 0.097728, 0.0407391,
0.013226, 0.00334407, 0.000658486, 0.000100982,
1.20606e-05])
assert_allclose(expected, calculated, rtol=0.001)
# for(float x = -5.0; x < 5.0; x+=0.5)
# std::cout << ROOT::Math::crystalball_pdf(x, 2.0, 3.0, 1.0) << ", ";
calculated = stats.crystalball.pdf(X, beta=2.0, m=3.0)
expected = np.array([0.0019648, 0.00279754, 0.00417592, 0.00663121,
0.0114587, 0.0223803, 0.0530497, 0.12726, 0.237752,
0.345928, 0.391987, 0.345928, 0.237752, 0.12726,
0.0530497, 0.0172227, 0.00435458, 0.000857469,
0.000131497, 1.57051e-05])
assert_allclose(expected, calculated, rtol=0.001)
# for(float x = -5.0; x < 5.0; x+=0.5) {
# std::cout << ROOT::Math::crystalball_pdf(x, 2.0, 3.0, 2.0, 0.5);
# std::cout << ", ";
# }
calculated = stats.crystalball.pdf(X, beta=2.0, m=3.0, loc=0.5, scale=2.0)
expected = np.array([0.00785921, 0.0111902, 0.0167037, 0.0265249,
0.0423866, 0.0636298, 0.0897324, 0.118876, 0.147944,
0.172964, 0.189964, 0.195994, 0.189964, 0.172964,
0.147944, 0.118876, 0.0897324, 0.0636298, 0.0423866,
0.0265249])
assert_allclose(expected, calculated, rtol=0.001)
# for(float x = -5.0; x < 5.0; x+=0.5)
# std::cout << ROOT::Math::crystalball_cdf(x, 1.0, 2.0, 1.0) << ", ";
calculated = stats.crystalball.cdf(X, beta=1.0, m=2.0)
expected = np.array([0.12172, 0.132785, 0.146064, 0.162293, 0.18258,
0.208663, 0.24344, 0.292128, 0.36516, 0.478254,
0.622723, 0.767192, 0.880286, 0.94959, 0.982834,
0.995314, 0.998981, 0.999824, 0.999976, 0.999997])
assert_allclose(expected, calculated, rtol=0.001)
# for(float x = -5.0; x < 5.0; x+=0.5)
# std::cout << ROOT::Math::crystalball_cdf(x, 2.0, 3.0, 1.0) << ", ";
calculated = stats.crystalball.cdf(X, beta=2.0, m=3.0)
expected = np.array([0.00442081, 0.00559509, 0.00730787, 0.00994682,
0.0143234, 0.0223803, 0.0397873, 0.0830763, 0.173323,
0.320592, 0.508717, 0.696841, 0.844111, 0.934357,
0.977646, 0.993899, 0.998674, 0.999771, 0.999969,
0.999997])
assert_allclose(expected, calculated, rtol=0.001)
# for(float x = -5.0; x < 5.0; x+=0.5) {
# std::cout << ROOT::Math::crystalball_cdf(x, 2.0, 3.0, 2.0, 0.5);
# std::cout << ", ";
# }
calculated = stats.crystalball.cdf(X, beta=2.0, m=3.0, loc=0.5, scale=2.0)
expected = np.array([0.0176832, 0.0223803, 0.0292315, 0.0397873, 0.0567945,
0.0830763, 0.121242, 0.173323, 0.24011, 0.320592,
0.411731, 0.508717, 0.605702, 0.696841, 0.777324,
0.844111, 0.896192, 0.934357, 0.960639, 0.977646])
assert_allclose(expected, calculated, rtol=0.001)
def test_crystalball_function_moments():
"""
All values are calculated using the pdf formula and the integrate function
of Mathematica
"""
# The Last two (alpha, n) pairs test the special case n == alpha**2
beta = np.array([2.0, 1.0, 3.0, 2.0, 3.0])
m = np.array([3.0, 3.0, 2.0, 4.0, 9.0])
# The distribution should be correctly normalised
expected_0th_moment = np.array([1.0, 1.0, 1.0, 1.0, 1.0])
calculated_0th_moment = stats.crystalball._munp(0, beta, m)
assert_allclose(expected_0th_moment, calculated_0th_moment, rtol=0.001)
# calculated using wolframalpha.com
# e.g. for beta = 2 and m = 3 we calculate the norm like this:
# integrate exp(-x^2/2) from -2 to infinity +
# integrate (3/2)^3*exp(-2^2/2)*(3/2-2-x)^(-3) from -infinity to -2
norm = np.array([2.5511, 3.01873, 2.51065, 2.53983, 2.507410455])
a = np.array([-0.21992, -3.03265, np.inf, -0.135335, -0.003174])
expected_1th_moment = a / norm
calculated_1th_moment = stats.crystalball._munp(1, beta, m)
assert_allclose(expected_1th_moment, calculated_1th_moment, rtol=0.001)
a = np.array([np.inf, np.inf, np.inf, 3.2616, 2.519908])
expected_2th_moment = a / norm
calculated_2th_moment = stats.crystalball._munp(2, beta, m)
assert_allclose(expected_2th_moment, calculated_2th_moment, rtol=0.001)
a = np.array([np.inf, np.inf, np.inf, np.inf, -0.0577668])
expected_3th_moment = a / norm
calculated_3th_moment = stats.crystalball._munp(3, beta, m)
assert_allclose(expected_3th_moment, calculated_3th_moment, rtol=0.001)
a = np.array([np.inf, np.inf, np.inf, np.inf, 7.78468])
expected_4th_moment = a / norm
calculated_4th_moment = stats.crystalball._munp(4, beta, m)
assert_allclose(expected_4th_moment, calculated_4th_moment, rtol=0.001)
a = np.array([np.inf, np.inf, np.inf, np.inf, -1.31086])
expected_5th_moment = a / norm
calculated_5th_moment = stats.crystalball._munp(5, beta, m)
assert_allclose(expected_5th_moment, calculated_5th_moment, rtol=0.001)
def test_ncf_variance():
# Regression test for gh-10658 (incorrect variance formula for ncf).
# The correct value of ncf.var(2, 6, 4), 42.75, can be verified with, for
# example, Wolfram Alpha with the expression
# Variance[NoncentralFRatioDistribution[2, 6, 4]]
# or with the implementation of the noncentral F distribution in the C++
# library Boost.
v = stats.ncf.var(2, 6, 4)
assert_allclose(v, 42.75, rtol=1e-14)
class TestHistogram(object):
def setup_method(self):
np.random.seed(1234)
# We have 8 bins
# [1,2), [2,3), [3,4), [4,5), [5,6), [6,7), [7,8), [8,9)
# But actually np.histogram will put the last 9 also in the [8,9) bin!
# Therefore there is a slight difference below for the last bin, from
# what you might have expected.
histogram = np.histogram([1, 2, 2, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 5,
6, 6, 6, 6, 7, 7, 7, 8, 8, 9], bins=8)
self.template = stats.rv_histogram(histogram)
data = stats.norm.rvs(loc=1.0, scale=2.5, size=10000, random_state=123)
norm_histogram = np.histogram(data, bins=50)
self.norm_template = stats.rv_histogram(norm_histogram)
def test_pdf(self):
values = np.array([0.0, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5,
5.0, 5.5, 6.0, 6.5, 7.0, 7.5, 8.0, 8.5, 9.0, 9.5])
pdf_values = np.asarray([0.0/25.0, 0.0/25.0, 1.0/25.0, 1.0/25.0,
2.0/25.0, 2.0/25.0, 3.0/25.0, 3.0/25.0,
4.0/25.0, 4.0/25.0, 5.0/25.0, 5.0/25.0,
4.0/25.0, 4.0/25.0, 3.0/25.0, 3.0/25.0,
3.0/25.0, 3.0/25.0, 0.0/25.0, 0.0/25.0])
assert_allclose(self.template.pdf(values), pdf_values)
# Test explicitly the corner cases:
# As stated above the pdf in the bin [8,9) is greater than
# one would naively expect because np.histogram putted the 9
# into the [8,9) bin.
assert_almost_equal(self.template.pdf(8.0), 3.0/25.0)
assert_almost_equal(self.template.pdf(8.5), 3.0/25.0)
# 9 is outside our defined bins [8,9) hence the pdf is already 0
# for a continuous distribution this is fine, because a single value
# does not have a finite probability!
assert_almost_equal(self.template.pdf(9.0), 0.0/25.0)
assert_almost_equal(self.template.pdf(10.0), 0.0/25.0)
x = np.linspace(-2, 2, 10)
assert_allclose(self.norm_template.pdf(x),
stats.norm.pdf(x, loc=1.0, scale=2.5), rtol=0.1)
def test_cdf_ppf(self):
values = np.array([0.0, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5,
5.0, 5.5, 6.0, 6.5, 7.0, 7.5, 8.0, 8.5, 9.0, 9.5])
cdf_values = np.asarray([0.0/25.0, 0.0/25.0, 0.0/25.0, 0.5/25.0,
1.0/25.0, 2.0/25.0, 3.0/25.0, 4.5/25.0,
6.0/25.0, 8.0/25.0, 10.0/25.0, 12.5/25.0,
15.0/25.0, 17.0/25.0, 19.0/25.0, 20.5/25.0,
22.0/25.0, 23.5/25.0, 25.0/25.0, 25.0/25.0])
assert_allclose(self.template.cdf(values), cdf_values)
# First three and last two values in cdf_value are not unique
assert_allclose(self.template.ppf(cdf_values[2:-1]), values[2:-1])
# Test of cdf and ppf are inverse functions
x = np.linspace(1.0, 9.0, 100)
assert_allclose(self.template.ppf(self.template.cdf(x)), x)
x = np.linspace(0.0, 1.0, 100)
assert_allclose(self.template.cdf(self.template.ppf(x)), x)
x = np.linspace(-2, 2, 10)
assert_allclose(self.norm_template.cdf(x),
stats.norm.cdf(x, loc=1.0, scale=2.5), rtol=0.1)
def test_rvs(self):
N = 10000
sample = self.template.rvs(size=N, random_state=123)
assert_equal(np.sum(sample < 1.0), 0.0)
assert_allclose(np.sum(sample <= 2.0), 1.0/25.0 * N, rtol=0.2)
assert_allclose(np.sum(sample <= 2.5), 2.0/25.0 * N, rtol=0.2)
assert_allclose(np.sum(sample <= 3.0), 3.0/25.0 * N, rtol=0.1)
assert_allclose(np.sum(sample <= 3.5), 4.5/25.0 * N, rtol=0.1)
assert_allclose(np.sum(sample <= 4.0), 6.0/25.0 * N, rtol=0.1)
assert_allclose(np.sum(sample <= 4.5), 8.0/25.0 * N, rtol=0.1)
assert_allclose(np.sum(sample <= 5.0), 10.0/25.0 * N, rtol=0.05)
assert_allclose(np.sum(sample <= 5.5), 12.5/25.0 * N, rtol=0.05)
assert_allclose(np.sum(sample <= 6.0), 15.0/25.0 * N, rtol=0.05)
assert_allclose(np.sum(sample <= 6.5), 17.0/25.0 * N, rtol=0.05)
assert_allclose(np.sum(sample <= 7.0), 19.0/25.0 * N, rtol=0.05)
assert_allclose(np.sum(sample <= 7.5), 20.5/25.0 * N, rtol=0.05)
assert_allclose(np.sum(sample <= 8.0), 22.0/25.0 * N, rtol=0.05)
assert_allclose(np.sum(sample <= 8.5), 23.5/25.0 * N, rtol=0.05)
assert_allclose(np.sum(sample <= 9.0), 25.0/25.0 * N, rtol=0.05)
assert_allclose(np.sum(sample <= 9.0), 25.0/25.0 * N, rtol=0.05)
assert_equal(np.sum(sample > 9.0), 0.0)
def test_munp(self):
for n in range(4):
assert_allclose(self.norm_template._munp(n),
stats.norm._munp(n, 1.0, 2.5), rtol=0.05)
def test_entropy(self):
assert_allclose(self.norm_template.entropy(),
stats.norm.entropy(loc=1.0, scale=2.5), rtol=0.05)
def test_loguniform():
# This test makes sure the alias of "loguniform" is log-uniform
rv = stats.loguniform(10 ** -3, 10 ** 0)
rvs = rv.rvs(size=10000, random_state=42)
vals, _ = np.histogram(np.log10(rvs), bins=10)
assert 900 <= vals.min() <= vals.max() <= 1100
assert np.abs(np.median(vals) - 1000) <= 10
class TestArgus(object):
def test_argus_rvs_large_chi(self):
# test that the algorithm can handle large values of chi
x = stats.argus.rvs(50, size=500, random_state=325)
assert_almost_equal(stats.argus(50).mean(), x.mean(), decimal=4)
def test_argus_rvs_ratio_uniforms(self):
# test that the ratio of uniforms algorithms works for chi > 2.611
x = stats.argus.rvs(3.5, size=1500, random_state=1535)
assert_almost_equal(stats.argus(3.5).mean(), x.mean(), decimal=3)
assert_almost_equal(stats.argus(3.5).std(), x.std(), decimal=3)<|fim▁end|> | assert_(np.isposinf(b))
# c < 0: a=0, b=1/|c|
c = np.asarray(-2.) |
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|>from functools import reduce
def vartype(var):
if var.is_discrete:
return 1
elif var.is_continuous:
return 2
elif var.is_string:
return 3
else:<|fim▁hole|> return set([int(i*count/float(iterations)) for i in range(iterations)])
def getdeepattr(obj, attr, *arg, **kwarg):
if isinstance(obj, dict):
return obj.get(attr)
try:
return reduce(getattr, attr.split("."), obj)
except AttributeError:
if arg:
return arg[0]
if kwarg:
return kwarg["default"]
raise
def getHtmlCompatibleString(strVal):
return strVal.replace("<=", "≤").replace(">=","≥").replace("<", "<").replace(">",">").replace("=\\=", "≠")<|fim▁end|> | return 0
def progress_bar_milestones(count, iterations=100): |
<|file_name|>test_eng.py<|end_file_name|><|fim▁begin|># test_eng.py
# Copyright (c) 2013-2016 Pablo Acosta-Serafini
# See LICENSE for details
# pylint: disable=C0103,C0111,C0302,E0611,R0913,R0915,W0108,W0212
# Standard library imports
import functools
import sys
import pytest
from numpy import array, ndarray
# Putil imports
import putil.eng
from putil.test import AE, AI, CS
###
# Global variables
###
DFLT = 'def'
PY2 = bool(sys.hexversion < 0x03000000)
###
# Helper functions
###
isdflt = lambda obj: bool(obj == DFLT)
h = lambda num: '100.'+('0'*num)
o = lambda num: '1.'+('0'*num)
pv = lambda py2arg, py3arg: py2arg if PY2 else py3arg
sarg = lambda msg: 'Argument `{0}` is not valid'.format(msg)
t = lambda num: '10.'+('0'*num)
def to_sci_string(number):
"""
Returns a string with the number formatted in scientific notation. This
function does not have all the configurability of the public function
to_scientific_string, it is a convenience function to test _to_eng_tuple
"""
mant, exp = putil.eng._to_eng_tuple(number)
return '{mant}E{exp_sign}{exp}'.format(
mant=mant, exp_sign='-' if exp < 0 else '+', exp=abs(exp)
)
###
# Test functions
###
@pytest.mark.parametrize(
'text, sep, num, lstrip, rstrip, ref', [
('a, b, c, d', ',', 1, DFLT, DFLT, ('a', ' b', ' c', ' d')),
('a , b , c , d ', ',', 1, DFLT, DFLT, ('a ', ' b ', ' c ', ' d ')),
('a , b , c , d ', ',', 1, True, DFLT, ('a ', 'b ', 'c ', 'd ')),
('a , b , c , d ', ',', 1, DFLT, True, ('a', ' b', ' c', ' d')),
('a , b , c , d ', ',', 1, True, True, ('a', 'b', 'c', 'd')),
('a, b, c, d', ',', 2, DFLT, DFLT, ('a, b', ' c, d')),
('a, b, c, d', ',', 3, DFLT, DFLT, ('a, b, c', ' d')),
('a, b, c, d', ',', 4, DFLT, DFLT, ('a, b, c, d',)),
('a, b, c, d', ',', 5, DFLT, DFLT, ('a, b, c, d',)),
]
)
def test_split_every(text, sep, num, lstrip, rstrip, ref):
""" Test _split_every function behavior """
# DFLT in lstrip or rstrip means default argument values should be used
obj = putil.eng._split_every
obj = obj if isdflt(lstrip) else functools.partial(obj, lstrip=lstrip)
obj = obj if isdflt(rstrip) else functools.partial(obj, rstrip=rstrip)
assert obj(text, sep, num) == ref
@pytest.mark.parametrize(
'num, ref', [
(0.000000000000000000000001001234567890, '1.00123456789E-24'),
(0.000000000000000000000001, '1E-24'),
(0.00000000000000000000001001234567890, '10.0123456789E-24'),
(0.00000000000000000000001, '10E-24'),
(0.0000000000000000000001001234567890, '100.123456789E-24'),
(0.0000000000000000000001, '100E-24'),
(0.000000000000000000001001234567890, '1.00123456789E-21'),
(0.000000000000000000001, '1E-21'),
(0.00000000000000000001001234567890, '10.0123456789E-21'),
(0.00000000000000000001, '10E-21'),
(0.0000000000000000001001234567890, '100.123456789E-21'),
(0.0000000000000000001, '100E-21'),
(0.000000000000000001001234567890, '1.00123456789E-18'),
(0.000000000000000001, '1E-18'),
(0.00000000000000001001234567890, '10.0123456789E-18'),
(0.00000000000000001, '10E-18'),
(0.0000000000000001001234567890, '100.123456789E-18'),
(0.0000000000000001, '100E-18'),
(0.000000000000001001234567890, '1.00123456789E-15'),
(0.000000000000001, '1E-15'),
(0.00000000000001001234567890, '10.0123456789E-15'),
(0.00000000000001, '10E-15'),
(0.0000000000001001234567890, '100.123456789E-15'),
(0.0000000000001, '100E-15'),
(0.000000000001001234567890, '1.00123456789E-12'),
(0.000000000001, '1E-12'),
(0.00000000001001234567890, '10.0123456789E-12'),
(0.00000000001, '10E-12'),
(0.0000000001001234567890, '100.123456789E-12'),
(0.0000000001, '100E-12'),
(0.000000001001234567890, '1.00123456789E-9'),
(0.000000001, '1E-9'),
(0.00000001001234567890, '10.0123456789E-9'),
(0.00000001, '10E-9'),
(0.0000001001234567890, '100.123456789E-9'),
(0.0000001, '100E-9'),
(0.000001001234567890, '1.00123456789E-6'),
(0.000001, '1E-6'),
(0.00001001234567890, '10.0123456789E-6'),
(0.00001, '10E-6'),
(0.0001001234567890, '100.123456789E-6'),
(0.0001, '100E-6'),
(0.001001234567890, '1.00123456789E-3'),
(0.001, '1E-3'),
(0.01001234567890, '10.0123456789E-3'),
(0.01, '10E-3'),
(0.1001234567890, '100.123456789E-3'),
(0.1, '100E-3'),
(0, '0E+0'),
(1, '1E+0'),
(1.1234567890, '1.123456789E+0'),
(10, '10E+0'),
(10.1234567890, '10.123456789E+0'),
(100, '100E+0'),
(100.1234567890, '100.123456789E+0'),
(1000, '1E+3'),
(1000.1234567890, pv('1.00012345679E+3', '1.000123456789E+3')),
(10000, '10E+3'),
(10000.1234567890, pv('10.0001234568E+3', '10.000123456789E+3')),
(100000, '100E+3'),
(100000.1234567890, pv('100.000123457E+3', '100.000123456789E+3')),
(1000000, '1E+6'),
(1000000.1234567890, pv('1.00000012346E+6', '1.000000123456789E+6')),
(10000000, '10E+6'),
(10000000.1234567890, pv('10.0000001235E+6', '10.00000012345679E+6')),
(100000000, '100E+6'),
(100000000.1234567890, pv('100.000000123E+6', '100.00000012345679E+6')),
(1000000000, '1E+9'),
(1000000000.1234567890, pv('1.00000000012E+9', '1.0000000001234568E+9')),
(10000000000, '10E+9'),
(10000000000.1234567890, pv(t(9)+'1E+9', '10.000000000123457E+9')),
(100000000000, '100E+9'),
(100000000000.1234567890, pv('100E+9', '100.00000000012346E+9')),
(1000000000000, '1E+12'),
(1000000000000.1234567890, pv('1E+12', '1.0000000000001234E+12')),
(10000000000000, '10E+12'),
(10000000000000.1234567890, pv('10E+12', '10.000000000000123E+12')),
(100000000000000, '100E+12'),
(100000000000000.1234567890, pv('100E+12', '100.00000000000012E+12')),
(1000000000000000, '1E+15'),
(1000000000000000.1234567890, pv('1E+15', '1.0000000000000001E+15')),
(10000000000000000, '10E+15'),
(10000000000000000.1234567890, '10E+15'),
(100000000000000000, '100E+15'),
(100000000000000000.1234567890, '100E+15'),
(1000000000000000000, '1E+18'),
(1000000000000000000.1234567890, '1E+18'),
(10000000000000000000, '10E+18'),
(10000000000000000000.1234567890, '10E+18'),
(100000000000000000000, '100E+18'),
(100000000000000000000.1234567890, '100E+18'),
(1000000000000000000000, '1E+21'),
(1000000000000000000000.1234567890, '1E+21'),
(10000000000000000000000, '10E+21'),
(10000000000000000000000.1234567890, '10E+21'),
(100000000000000000000000, '100E+21'),
(100000000000000000000000.1234567890, pv('100E+21', h(13)+'1E+21')),
(1000000000000000000000000, '1E+24'),
(1000000000000000000000000.1234567890, '1E+24'),
(10000000000000000000000000, '10E+24'),
(10000000000000000000000000.1234567890, '10E+24'),
(100000000000000000000000000, '100E+24'),
(100000000000000000000000000.1234567890, '100E+24'),
(-0.000000000000000000000001001234567890, '-1.00123456789E-24'),
(-0.000000000000000000000001, '-1E-24'),
(-0.00000000000000000000001001234567890, '-10.0123456789E-24'),
(-0.00000000000000000000001, '-10E-24'),
(-0.0000000000000000000001001234567890, '-100.123456789E-24'),
(-0.0000000000000000000001, '-100E-24'),
(-0.000000000000000000001001234567890, '-1.00123456789E-21'),
(-0.000000000000000000001, '-1E-21'),
(-0.00000000000000000001001234567890, '-10.0123456789E-21'),
(-0.00000000000000000001, '-10E-21'),
(-0.0000000000000000001001234567890, '-100.123456789E-21'),
(-0.0000000000000000001, '-100E-21'),
(-0.000000000000000001001234567890, '-1.00123456789E-18'),
(-0.000000000000000001, '-1E-18'),
(-0.00000000000000001001234567890, '-10.0123456789E-18'),
(-0.00000000000000001, '-10E-18'),
(-0.0000000000000001001234567890, '-100.123456789E-18'),
(-0.0000000000000001, '-100E-18'),
(-0.000000000000001001234567890, '-1.00123456789E-15'),
(-0.000000000000001, '-1E-15'),
(-0.00000000000001001234567890, '-10.0123456789E-15'),
(-0.00000000000001, '-10E-15'),
(-0.0000000000001001234567890, '-100.123456789E-15'),
(-0.0000000000001, '-100E-15'),
(-0.000000000001001234567890, '-1.00123456789E-12'),
(-0.000000000001, '-1E-12'),
(-0.00000000001001234567890, '-10.0123456789E-12'),
(-0.00000000001, '-10E-12'),
(-0.0000000001001234567890, '-100.123456789E-12'),
(-0.0000000001, '-100E-12'),
(-0.000000001001234567890, '-1.00123456789E-9'),
(-0.000000001, '-1E-9'),
(-0.00000001001234567890, '-10.0123456789E-9'),
(-0.00000001, '-10E-9'),
(-0.0000001001234567890, '-100.123456789E-9'),
(-0.0000001, '-100E-9'),
(-0.000001001234567890, '-1.00123456789E-6'),
(-0.000001, '-1E-6'),
(-0.00001001234567890, '-10.0123456789E-6'),
(-0.00001, '-10E-6'),
(-0.0001001234567890, '-100.123456789E-6'),
(-0.0001, '-100E-6'),
(-0.001001234567890, '-1.00123456789E-3'),
(-0.001, '-1E-3'),
(-0.01001234567890, '-10.0123456789E-3'),
(-0.01, '-10E-3'),
(-0.1001234567890, '-100.123456789E-3'),
(-0.1, '-100E-3'),
(-1, '-1E+0'),
(-1.1234567890, '-1.123456789E+0'),
(-10, '-10E+0'),
(-10.1234567890, '-10.123456789E+0'),
(-100, '-100E+0'),
(-100.1234567890, '-100.123456789E+0'),
(-1000, '-1E+3'),
(-1000.1234567890, pv('-1.00012345679E+3', '-1.000123456789E+3')),
(-10000, '-10E+3'),
(-10000.1234567890, pv('-10.0001234568E+3', '-10.000123456789E+3')),
(-100000, '-100E+3'),
(-100000.1234567890, pv('-100.000123457E+3', '-100.000123456789E+3')),
(-1000000, '-1E+6'),
(-1000000.1234567890, pv('-1.00000012346E+6', '-1.000000123456789E+6')),
(-10000000, '-10E+6'),
(-10000000.1234567890, pv('-10.0000001235E+6', '-10.00000012345679E+6')),
(-100000000, '-100E+6'),
(-100000000.1234567890, pv('-'+h(6)+'123E+6', '-100.00000012345679E+6')),
(-1000000000, '-1E+9'),
(-1000000000.1234567890, pv('-'+o(9)+'12E+9', '-1.0000000001234568E+9')),
(-10000000000, '-10E+9'),
(-10000000000.1234567890, pv('-'+t(9)+'1E+9', '-'+t(9)+'123457E+9')),
(-100000000000, '-100E+9'),
(-100000000000.1234567890, pv('-100E+9', '-100.00000000012346E+9')),
(-1000000000000, '-1E+12'),
(-1000000000000.1234567890, pv('-1E+12', '-1.0000000000001234E+12')),
(-10000000000000, '-10E+12'),
(-10000000000000.1234567890, pv('-10E+12', '-10.000000000000123E+12')),
(-100000000000000, '-100E+12'),
(-100000000000000.1234567890, pv('-100E+12', '-100.00000000000012E+12')),
(-1000000000000000, '-1E+15'),
(-1000000000000000.1234567890, pv('-1E+15', '-1.0000000000000001E+15')),
(-10000000000000000, '-10E+15'),
(-10000000000000000.1234567890, '-10E+15'),
(-100000000000000000, '-100E+15'),
(-100000000000000000.1234567890, '-100E+15'),
(-1000000000000000000, '-1E+18'),
(-1000000000000000000.1234567890, '-1E+18'),
(-10000000000000000000, '-10E+18'),
(-10000000000000000000.1234567890, '-10E+18'),
(-100000000000000000000, '-100E+18'),
(-100000000000000000000.1234567890, '-100E+18'),
(-1000000000000000000000, '-1E+21'),
(-1000000000000000000000.1234567890, '-1E+21'),
(-10000000000000000000000, '-10E+21'),
(-10000000000000000000000.1234567890, '-10E+21'),
(-100000000000000000000000, '-100E+21'),
(-100000000000000000000000.1234567890, pv('-100E+21', '-'+h(13)+'1E+21')),
(-1000000000000000000000000, '-1E+24'),
(-1000000000000000000000000.1234567890, '-1E+24'),
(-10000000000000000000000000, '-10E+24'),
(-10000000000000000000000000.1234567890, '-10E+24'),
(-100000000000000000000000000, '-100E+24'),
(-100000000000000000000000000.1234567890, '-100E+24'),
('100000.1234567890', '100.000123456789E+3'),
('-100000.1234567890', '-100.000123456789E+3'),
]
)
def test_to_sci_string(num, ref):
""" Test _to_eng_string function behavior """
assert to_sci_string(num) == ref
@pytest.mark.parametrize(
'num, ref', [
(0, '0'),
(0.0, '0.0'),
(4, '4'),
(4.0, '4.0'),
(45, '45'),
(450, '450'),
(1234567, '1234567'),
(4.5, '4.5'),
(4.1234, '4.1234'),
(4123.4E4, '41234000'),
(0.1, '0.1'),
(1.43E-2, '0.0143'),
(100000000.0, '100000000.0'),
(1000000, '1000000'),
(1e3, '1000.0'),
]
)
def test_no_exp(num, ref):
""" Test no_exp function behavior """
assert putil.eng.no_exp(num) == ref
@pytest.mark.eng
def test_no_ex_exceptions():
""" Test no_exp function exceptions """
AI(putil.eng.no_exp, 'number', number='a')
@pytest.mark.eng
@pytest.mark.parametrize(
'args, name', [
(dict(number=['5'], frac_length=3, rjust=True), 'number'),
(dict(number=5, frac_length=3.5, rjust=True), 'frac_length'),
(dict(number=5, frac_length=-2, rjust=True), 'frac_length'),
(dict(number=5, frac_length=3, rjust='a'), 'rjust')
]
)
def test_peng_exceptions(args, name):
""" Test peng function exceptions """
AI(putil.eng.peng, name, **args)
@pytest.mark.parametrize(
'num, mant, rjust, ref', [
(3.0333333333, 1, False, '3.0'),
(0, 3, True, ' 0.000 '),
(0, 3, False, '0.000'),
(125.5, 0, False, '126'),
(1e-25, 3, True, ' 1.000y'),
(1e-24, 3, True, ' 1.000y'),
(1e-23, 3, True, ' 10.000y'),
(1e-22, 3, True, ' 100.000y'),
(1e-21, 3, True, ' 1.000z'),
(1e-20, 3, True, ' 10.000z'),
(1e-19, 3, True, ' 100.000z'),
(1e-18, 3, True, ' 1.000a'),
(1e-17, 3, True, ' 10.000a'),
(1e-16, 3, True, ' 100.000a'),
(1e-15, 3, True, ' 1.000f'),
(1e-14, 3, True, ' 10.000f'),
(1e-13, 3, True, ' 100.000f'),
(1e-12, 3, True, ' 1.000p'),
(1e-11, 3, True, ' 10.000p'),
(1e-10, 3, True, ' 100.000p'),
(1e-9, 3, True, ' 1.000n'),
(1e-8, 3, True, ' 10.000n'),
(1e-7, 3, True, ' 100.000n'),
(1e-6, 3, True, ' 1.000u'),
(1e-5, 3, True, ' 10.000u'),
(1e-4, 3, True, ' 100.000u'),
(1e-3, 3, True, ' 1.000m'),
(1e-2, 3, True, ' 10.000m'),
(1e-1, 3, True, ' 100.000m'),
(1e-0, 3, True, ' 1.000 '),
(1e+1, 3, True, ' 10.000 '),
(1e+2, 3, True, ' 100.000 '),
(1e+3, 3, True, ' 1.000k'),
(1e+4, 3, True, ' 10.000k'),
(1e+5, 3, True, ' 100.000k'),
(1e+6, 3, True, ' 1.000M'),
(1e+7, 3, True, ' 10.000M'),
(1e+8, 3, True, ' 100.000M'),
(1e+9, 3, True, ' 1.000G'),
(1e+10, 3, True, ' 10.000G'),
(1e+11, 3, True, ' 100.000G'),
(1e+12, 3, True, ' 1.000T'),
(1e+13, 3, True, ' 10.000T'),
(1e+14, 3, True, ' 100.000T'),
(1e+15, 3, True, ' 1.000P'),
(1e+16, 3, True, ' 10.000P'),
(1e+17, 3, True, ' 100.000P'),
(1e+18, 3, True, ' 1.000E'),
(1e+19, 3, True, ' 10.000E'),
(1e+20, 3, True, ' 100.000E'),
(1e+21, 3, True, ' 1.000Z'),
(1e+22, 3, True, ' 10.000Z'),
(1e+23, 3, True, ' 100.000Z'),
(1e+24, 3, True, ' 1.000Y'),
(1e+25, 3, True, ' 10.000Y'),
(1e+26, 3, True, ' 100.000Y'),
(1e+27, 3, True, ' 999.999Y'),
(12.45, 1, True, ' 12.5 '),
(998.999e3, 1, True, ' 999.0k'),
(998.999e3, 1, False, '999.0k'),
(999.999e3, 1, True, ' 1.0M'),
(999.999e3, 1, DFLT, ' 1.0M'),
(999.999e3, 1, False, '1.0M'),
(0.995, 0, False, '995m'),
(0.9999, 0, False, '1'),
(1.9999, 0, False, '2'),
(999.99, 0, False, '1k'),
(9.99, 1, False, '10.0'),
(5.25e3, 1, True, ' 5.3k'),
(1.05e3, 0, True, ' 1k'),
(-1e-25, 3, True, ' -1.000y'),
(-1e-24, 3, True, ' -1.000y'),
(-1e-23, 3, True, ' -10.000y'),
(-1e-22, 3, True, '-100.000y'),
(-1e-21, 3, True, ' -1.000z'),
(-1e-20, 3, True, ' -10.000z'),
(-1e-19, 3, True, '-100.000z'),
(-1e-18, 3, True, ' -1.000a'),
(-1e-17, 3, True, ' -10.000a'),
(-1e-16, 3, True, '-100.000a'),
(-1e-15, 3, True, ' -1.000f'),
(-1e-14, 3, True, ' -10.000f'),
(-1e-13, 3, True, '-100.000f'),
(-1e-12, 3, True, ' -1.000p'),
(-1e-11, 3, True, ' -10.000p'),
(-1e-10, 3, True, '-100.000p'),
(-1e-9, 3, True, ' -1.000n'),
(-1e-8, 3, True, ' -10.000n'),
(-1e-7, 3, True, '-100.000n'),
(-1e-6, 3, True, ' -1.000u'),
(-1e-5, 3, True, ' -10.000u'),
(-1e-4, 3, True, '-100.000u'),
(-1e-3, 3, True, ' -1.000m'),
(-1e-2, 3, True, ' -10.000m'),
(-1e-1, 3, True, '-100.000m'),
(-1e-0, 3, True, ' -1.000 '),
(-1e+1, 3, True, ' -10.000 '),
(-1e+2, 3, True, '-100.000 '),
(-1e+3, 3, True, ' -1.000k'),
(-1e+4, 3, True, ' -10.000k'),
(-1e+5, 3, True, '-100.000k'),
(-1e+6, 3, True, ' -1.000M'),
(-1e+7, 3, True, ' -10.000M'),
(-1e+8, 3, True, '-100.000M'),
(-1e+9, 3, True, ' -1.000G'),
(-1e+10, 3, True, ' -10.000G'),
(-1e+11, 3, True, '-100.000G'),
(-1e+12, 3, True, ' -1.000T'),
(-1e+13, 3, True, ' -10.000T'),
(-1e+14, 3, True, '-100.000T'),
(-1e+15, 3, True, ' -1.000P'),
(-1e+16, 3, True, ' -10.000P'),
(-1e+17, 3, True, '-100.000P'),
(-1e+18, 3, True, ' -1.000E'),
(-1e+19, 3, True, ' -10.000E'),
(-1e+20, 3, True, '-100.000E'),
(-1e+21, 3, True, ' -1.000Z'),
(-1e+22, 3, True, ' -10.000Z'),
(-1e+23, 3, True, '-100.000Z'),
(-1e+24, 3, True, ' -1.000Y'),
(-1e+25, 3, True, ' -10.000Y'),
(-1e+26, 3, True, '-100.000Y'),
(-1e+27, 3, True, '-999.999Y'),
(-12.45, 1, True, ' -12.5 '),
(-998.999e3, 1, True, '-999.0k'),
(-998.999e3, 1, False, '-999.0k'),
(-999.999e3, 1, True, ' -1.0M'),
(-999.999e3, 1, DFLT, ' -1.0M'),
(-999.999e3, 1, False, '-1.0M'),
(-0.995, 0, False, '-995m'),
(-0.9999, 0, False, '-1'),
(-1.9999, 0, False, '-2'),
(-999.99, 0, False, '-1k'),
(-9.99, 1, False, '-10.0'),
(-5.25e3, 1, True, ' -5.3k'),
(-1.05e3, 0, True, ' -1k')
]
)
def test_peng(num, mant, rjust, ref):
""" Test peng function behavior """
obj = putil.eng.peng
obj = obj if isdflt(rjust) else functools.partial(obj, rjust=rjust)
assert obj(num, mant) == ref
@pytest.mark.eng
@pytest.mark.parametrize('arg', [None, 5, '', ' 5x', 'a5M', '- - a5M'])
@pytest.mark.parametrize(
'func', [
putil.eng.peng_float,
putil.eng.peng_frac,
putil.eng.peng_int,
putil.eng.peng_mant,
putil.eng.peng_power,
putil.eng.peng_suffix,
]
)
def test_peng_snum_exceptions(func, arg):
"""
Test exceptions of functions that receive a string representing
a number in engineering notation
"""
AI(func, 'snum', **dict(snum=arg))
@pytest.mark.parametrize(
'arg, ref', [
(putil.eng.peng(5234.567, 3, True), 5.235e3),
(' 5.235k ', 5.235e3),
(' -5.235k ', -5.235e3),
]
)
def test_peng_float(arg, ref):
""" Test peng_float function behavior """
assert putil.eng.peng_float(arg) == ref
@pytest.mark.parametrize(
'arg, ref', [
(putil.eng.peng(5234.567, 6, True), 234567),
(putil.eng.peng(5234, 0, True), 0)
]
)
def test_peng_frac(arg, ref):
""" Test peng_frac function behavior """
assert putil.eng.peng_frac(arg) == ref
def test_peng_int():
""" Test peng_int function behavior """
assert putil.eng.peng_int(putil.eng.peng(5234.567, 6, True)) == 5
def test_peng_mant():
""" Test peng_mant function behavior """
assert putil.eng.peng_mant(putil.eng.peng(5234.567, 3, True)) == 5.235
def test_peng_power():
""" Test peng_power function behavior """
tup = putil.eng.peng_power(putil.eng.peng(1234.567, 3, True))
assert tup == ('k', 1000.0)
assert isinstance(tup[1], float)
@pytest.mark.parametrize(
'arg, ref', [
(putil.eng.peng(1, 3, True), ' '),
(putil.eng.peng(-10.5e-6, 3, False), 'u')
]
)
def test_peng_suffix(arg, ref):
""" Test peng_suffix function behavior """
assert putil.eng.peng_suffix(arg) == ref
@pytest.mark.eng
@pytest.mark.parametrize(
'args, extype, name', [
(dict(suffix='X', offset=-1), RuntimeError, 'suffix'),
(dict(suffix='M', offset='a'), RuntimeError, 'offset'),
(dict(suffix='M', offset=20), ValueError, 'offset'),
]
)
@pytest.mark.eng
def test_peng_suffix_math_exceptions(args, extype, name):
""" Test peng_suffix_math function exceptions """
AE(putil.eng.peng_suffix_math, extype, sarg(name), **args)
@pytest.mark.parametrize('args, ref', [((' ', 3), 'G'), (('u', -2), 'p')])
def test_peng_suffix_math(args, ref):
""" Test peng_suffix_math function behavior """
assert putil.eng.peng_suffix_math(*args) == ref
@pytest.mark.parametrize(
'num, frac_length, exp_length, sign_always, ref', [
('5.35E+3', DFLT, DFLT, DFLT, '5.35E+3'),
(0, DFLT, DFLT, DFLT, '0E+0'),
(0.1, DFLT, DFLT, DFLT, '1E-1'),
(0.01, DFLT, DFLT, DFLT, '1E-2'),
(0.001, DFLT, DFLT, DFLT, '1E-3'),
(0.00101, DFLT, DFLT, DFLT, '1.01E-3'),
(0.123456789012, DFLT, DFLT, DFLT, '1.23456789012E-1'),
(1234567.89012, DFLT, DFLT, DFLT, '1.23456789012E+6'),
(1, DFLT, DFLT, DFLT, '1E+0'),
(20, DFLT, DFLT, DFLT, '2E+1'),
(100, DFLT, DFLT, DFLT, '1E+2'),
(200, DFLT, DFLT, DFLT, '2E+2'),
(333, DFLT, DFLT, DFLT, '3.33E+2'),
(4567, DFLT, DFLT, DFLT, '4.567E+3'),
(4567.890, DFLT, DFLT, DFLT, '4.56789E+3'),
(500, 3, DFLT, DFLT, '5.000E+2'),
(4567.890, 8, DFLT, DFLT, '4.56789000E+3'),
(99.999, 1, DFLT, DFLT, '1.0E+2'),
(4567.890, DFLT, DFLT, True, '+4.56789E+3'),
(500, 3, DFLT, True, '+5.000E+2'),
(4567.890, 8, DFLT, True, '+4.56789000E+3'),
(99.999, 1, DFLT, True, '+1.0E+2'),
(500, 3, 2, True, '+5.000E+02'),
(4567.890, 8, 3, True, '+4.56789000E+003'),
(9999999999.999, 1, 1, True, '+1.0E+10'),
(-0.1, DFLT, DFLT, DFLT, '-1E-1'),
(-0.01, DFLT, DFLT, DFLT, '-1E-2'),
(-0.001, DFLT, DFLT, DFLT, '-1E-3'),
(-0.00101, DFLT, DFLT, DFLT, '-1.01E-3'),
(-0.123456789012, DFLT, DFLT, DFLT, '-1.23456789012E-1'),
(-1234567.89012, DFLT, DFLT, DFLT, '-1.23456789012E+6'),
(-1, DFLT, DFLT, DFLT, '-1E+0'),
(-20, DFLT, DFLT, DFLT, '-2E+1'),
(-100, DFLT, DFLT, DFLT, '-1E+2'),
(-200, DFLT, DFLT, DFLT, '-2E+2'),
(-333, DFLT, DFLT, DFLT, '-3.33E+2'),
(-4567, DFLT, DFLT, DFLT, '-4.567E+3'),
(-4567.890, DFLT, DFLT, DFLT, '-4.56789E+3'),
(-500, 3, DFLT, DFLT, '-5.000E+2'),
(-4567.890, 8, DFLT, DFLT, '-4.56789000E+3'),
(-99.999, 1, DFLT, DFLT, '-1.0E+2'),
(-4567.890, DFLT, DFLT, True, '-4.56789E+3'),
(-500, 3, DFLT, True, '-5.000E+2'),
(-4567.890, 8, DFLT, True, '-4.56789000E+3'),
(-99.999, 1, DFLT, True, '-1.0E+2'),
(-500, 3, 2, True, '-5.000E+02'),
(-4567.890, 8, 3, True, '-4.56789000E+003'),
(-9999999999.999, 1, 1, True, '-1.0E+10'),
]
)
def test_to_scientific_string(num, frac_length, exp_length, sign_always, ref):
""" Test _to_scientific function behavior """
fp = functools.partial
obj = putil.eng.to_scientific_string
obj = obj if isdflt(frac_length) else fp(obj, frac_length=frac_length)
obj = obj if isdflt(exp_length) else fp(obj, exp_length=exp_length)
obj = obj if isdflt(sign_always) else fp(obj, sign_always=sign_always)
assert obj(num) == ref
CVECTOR = [-1+2j, 3+4j, 5+6j, 7+8j, 9-10j, 11+12j, -13+14j, 15678-16j]
@pytest.mark.parametrize(
'vector, args, ref, header', [
(
None,
DFLT,
'None',
''
),
(
[1, 2, 3, 4, 5, 6, 7, 8],
DFLT,
'[ 1, 2, 3, 4, 5, 6, 7, 8 ]',
''
),
(
[1, 2, 3, 4, 5, 6, 7, 8],
dict(indent=20),
'[ 1, 2, 3, 4, 5, 6, 7, 8 ]',
''
),
(
[1, 2, 3, 4, 5, 6, 7, 8],
dict(indent=20),
'[ 1, 2, 3, 4, 5, 6, 7, 8 ]',
''
),
(
[1, 2, 3, 4, 5, 6, 7, 8],
dict(limit=True),
'[ 1, 2, 3, ..., 6, 7, 8 ]',
''
),
(
[1, 2, 3, 4, 5, 6, 7, 8],
dict(limit=True, indent=20),
'[ 1, 2, 3, ..., 6, 7, 8 ]',
''
),
# Float and integer item #ref = (
(
[1e-3, 20e-6, 300e+6, 4e-12, 5.25e3, -6e-9, 700, 0.8],
dict(eng=True),
'[ 1.000m, 20.000u, 300.000M, 4.000p,'
' 5.250k, -6.000n, 700.000 , 800.000m ]',
''
),
(
[1e-3, 20e-6, 300e+6, 4e-12, 5.25e3, -6e-9, 700, 0.8],
dict(eng=True, indent=20),
'[ 1.000m, 20.000u, 300.000M, 4.000p,'
' 5.250k, -6.000n, 700.000 , 800.000m ]',
''
),
(
[1e-3, 20e-6, 300e+6, 4e-12, 5.25e3, -6e-9, 700, 0.8],
dict(limit=True, eng=True),
'[ 1.000m, 20.000u, 300.000M,'
' ...,'
' -6.000n, 700.000 , 800.000m ]',
''
),
(
[1e-3, 20e-6, 300e+6, 4e-12, 5.25e3, -6e-9, 700, 0.8],
dict(limit=True, eng=True, indent=20),
'[ 1.000m, 20.000u, 300.000M,'
' ...,'
' -6.000n, 700.000 , 800.000m ]',
''
),
(
[1e-3, 20e-6, 300e+6, 4e-12, 5.25e3, -6e-9, 700, 0.8],
dict(eng=True, frac_length=1),
'[ 1.0m, 20.0u, 300.0M, 4.0p,'
' 5.3k, -6.0n, 700.0 , 800.0m ]',
''
),
(
[1e-3, 20e-6, 300e+6, 4e-12, 5.25e3, -6e-9, 700, 0.8],
dict(eng=True, frac_length=1, indent=20),
'[ 1.0m, 20.0u, 300.0M, 4.0p,'
' 5.3k, -6.0n, 700.0 , 800.0m ]',
''
),
(
[1e-3, 20e-6, 300e+6, 4e-12, 5.25e3, -6e-9, 700, 0.8],
dict(limit=True, eng=True, frac_length=1),
'[ 1.0m, 20.0u, 300.0M, ..., -6.0n, 700.0 , 800.0m ]',
''
),
(
[1e-3, 20e-6, 300e+6, 4e-12, 5.25e3, -6e-9, 700, 0.8],
dict(limit=True, indent=20, eng=True, frac_length=1),
'[ 1.0m, 20.0u, 300.0M, ..., -6.0n, 700.0 , 800.0m ]',
''
),
(
[1, 2, 3, 4, 5, 6, 7, 8],
dict(width=8),
#12345678
'[ 1, 2,\n'
' 3, 4,\n'
' 5, 6,\n'
' 7, 8 ]',
''
),
(
[1, 2, 3, 4, 5, 6, 7, 8],
dict(width=10),
'[ 1, 2, 3,\n'
' 4, 5, 6,\n'
' 7, 8 ]',
''
),
(
[1e-3, 20e-6, 300e+6, 4e-12, 5.25e3, -6e-9, 700, 8, 9],
dict(width=20, eng=True, frac_length=0),
'[ 1m, 20u,\n'
' 300M, 4p,\n'
' 5k, -6n,\n'
' 700 , 8 ,\n'
' 9 ]',
''
),
(
[1e-3, 20e-6, 300e+6, 4e-12, 5.25e3, -6e-9, 700, 0.8],
dict(width=30, eng=True, frac_length=1),
'[ 1.0m, 20.0u, 300.0M,\n'
' 4.0p, 5.3k, -6.0n,\n'
' 700.0 , 800.0m ]',
''
),
(
[1e-3, 20e-6, 300e+6, 4e-12, 5.25e3, -6e-9, 700, 8, 9],
dict(width=20, eng=True, frac_length=0, limit=True),
'[ 1m,\n'
' 20u,\n'
' 300M,\n'
' ...\n'
' 700 ,\n'
' 8 ,\n'
' 9 ]',
''
),
(
[1e-3, 20e-6, 300e+6, 4e-12, 5.25e3, -6e-9, 700, 8, 9],
dict(width=30, eng=True, frac_length=1, limit=True),
'[ 1.0m, 20.0u, 300.0M,\n'
' ...\n'
' 700.0 , 8.0 , 9.0 ]',
''
),
(
[1e-3, 20e-6, 300e+6, 4e-12, 5.25e3, -6e-9, 700, 8, 9],
dict(width=30, eng=True, frac_length=1, limit=True, indent=8),
'Vector: [ 1.0m, 20.0u, 300.0M,\n'
' ...\n'
' 700.0 , 8.0 , 9.0 ]',
'Vector: '
),
(
[1e-3, 20e-6, 300e+6, 4e-12, 5.25e3, -6e-9, 700, 0.8],
dict(width=30, eng=True, frac_length=1, indent=8),
'Vector: [ 1.0m, 20.0u, 300.0M,\n'
' 4.0p, 5.3k, -6.0n,\n'
' 700.0 , 800.0m ]',
'Vector: '
),
(
[
1.23456789, 2.45678901, 3.45678901, 4.56789012,
5.67890123, 6.78901234, 7.89012345
],
dict(limit=True, width=80-22, indent=22),
'Independent variable: [ 1.23456789, 2.45678901, 3.45678901,\n'
' ...\n'
' 5.67890123, 6.78901234, 7.89012345 ]',
'Independent variable: '
),
(
[
1.23456789, 2.45678901, 3.45678901, 4.56789012,
5.67890123, 6.78901234, 7.89012345
],
dict(width=49, indent=17),
'Independent var: [ 1.23456789, 2.45678901, 3.45678901, '
'4.56789012,\n'
' 5.67890123, 6.78901234, 7.89012345 ]',
'Independent var: '
),
# Complex items
(
CVECTOR,
DFLT,
'[ -1+2j, 3+4j, 5+6j, 7+8j, 9-10j, 11+12j, -13+14j, 15678-16j ]',
''
),
(
CVECTOR,
dict(indent=20),
'[ -1+2j, 3+4j, 5+6j, 7+8j, 9-10j, 11+12j, -13+14j, 15678-16j ]',
''
),
(
CVECTOR,
dict(limit=True),
'[ -1+2j, 3+4j, 5+6j, ..., 11+12j, -13+14j, 15678-16j ]',
''
),
(
CVECTOR,
dict(limit=True, indent=20),
'[ -1+2j, 3+4j, 5+6j, ..., 11+12j, -13+14j, 15678-16j ]',
''
),
(
CVECTOR,
dict(eng=True),
'[ -1.000 + 2.000 j, 3.000 + 4.000 j,'
' 5.000 + 6.000 j,'
' 7.000 + 8.000 j, 9.000 - 10.000 j,'
' 11.000 + 12.000 j,'
' -13.000 + 14.000 j, 15.678k- 16.000 j ]',
''
),
(
CVECTOR,
dict(eng=True, indent=20),
'[ -1.000 + 2.000 j, 3.000 + 4.000 j,'
' 5.000 + 6.000 j,'
' 7.000 + 8.000 j, 9.000 - 10.000 j,'
' 11.000 + 12.000 j,'
' -13.000 + 14.000 j, 15.678k- 16.000 j ]',
''
),
(
CVECTOR,
dict(limit=True, eng=True),
'[ -1.000 + 2.000 j, 3.000 + 4.000 j,'
' 5.000 + 6.000 j,'
' ..., 11.000 + 12.000 j, -13.000 + 14.000 j,'
' 15.678k- 16.000 j ]',
''
),
(
CVECTOR,
dict(limit=True, eng=True, indent=20),
'[ -1.000 + 2.000 j, 3.000 + 4.000 j,'
' 5.000 + 6.000 j,'
' ..., 11.000 + 12.000 j, -13.000 + 14.000 j,'
' 15.678k- 16.000 j ]',
''
),
(
CVECTOR,
dict(eng=True, frac_length=1),
'[ -1.0 + 2.0 j, 3.0 + 4.0 j, 5.0 + 6.0 j,'
' 7.0 + 8.0 j, 9.0 - 10.0 j, 11.0 + 12.0 j,'
' -13.0 + 14.0 j, 15.7k- 16.0 j ]',
''
),
(
CVECTOR,
dict(eng=True, frac_length=1, indent=20),
'[ -1.0 + 2.0 j, 3.0 + 4.0 j, 5.0 + 6.0 j,'
' 7.0 + 8.0 j, 9.0 - 10.0 j, 11.0 + 12.0 j,'
' -13.0 + 14.0 j, 15.7k- 16.0 j ]',
''
),
(
CVECTOR,
dict(limit=True, eng=True, frac_length=1),
'[ -1.0 + 2.0 j, 3.0 + 4.0 j, 5.0 + 6.0 j,'
' ..., 11.0 + 12.0 j, -13.0 + 14.0 j, 15.7k- 16.0 j ]',
''
),
(
CVECTOR,
dict(limit=True, eng=True, frac_length=1, indent=20),
'[ -1.0 + 2.0 j, 3.0 + 4.0 j, 5.0 + 6.0 j,'
' ..., 11.0 + 12.0 j, -13.0 + 14.0 j, 15.7k- 16.0 j ]',
''
),
(
CVECTOR,
dict(width=22),
'[ -1+2j, 3+4j, 5+6j,\n'
' 7+8j, 9-10j, 11+12j,\n'
' -13+14j, 15678-16j ]',
''
),
(
CVECTOR,
dict(width=20),
'[ -1+2j, 3+4j, 5+6j,\n'
' 7+8j, 9-10j,\n'
' 11+12j, -13+14j,\n'
' 15678-16j ]',
''
),
(
CVECTOR,
dict(width=29, eng=True, frac_length=0),
'[ -1 + 2 j, 3 + 4 j,\n'
' 5 + 6 j, 7 + 8 j,\n'
' 9 - 10 j, 11 + 12 j,\n'
' -13 + 14 j, 16k- 16 j ]',
''
),
(
CVECTOR,
dict(width=37, eng=True, frac_length=1),
'[ -1.0 + 2.0 j, 3.0 + 4.0 j,\n'
' 5.0 + 6.0 j, 7.0 + 8.0 j,\n'
' 9.0 - 10.0 j, 11.0 + 12.0 j,\n'
' -13.0 + 14.0 j, 15.7k- 16.0 j ]',
''
),
(
CVECTOR,
dict(width=16, eng=True, frac_length=0),
'[ -1 + 2 j,\n'
' 3 + 4 j,\n'
' 5 + 6 j,\n'
' 7 + 8 j,\n'
' 9 - 10 j,\n'
' 11 + 12 j,\n'
' -13 + 14 j,\n'
' 16k- 16 j ]',
''
),
(
CVECTOR,
dict(width=16, eng=True, frac_length=0, limit=True),
'[ -1 + 2 j,\n'
' 3 + 4 j,\n'
' 5 + 6 j,\n'
' ...\n'
' 11 + 12 j,\n'
' -13 + 14 j,\n'
' 16k- 16 j ]',
''
),
(
CVECTOR,
dict(width=56, eng=True, frac_length=1, limit=True),
'[ -1.0 + 2.0 j, 3.0 + 4.0 j, 5.0 + 6.0 j,\n'
' ...\n'
' 11.0 + 12.0 j, -13.0 + 14.0 j, 15.7k- 16.0 j ]',
''
),
(
CVECTOR,
dict(width=64, eng=True, frac_length=1, limit=True, indent=8),
'Vector: [ -1.0 + 2.0 j, 3.0 + 4.0 j, 5.0 + 6.0 j,\n'
' ...\n'
' 11.0 + 12.0 j, -13.0 + 14.0 j, 15.7k- 16.0 j ]',
'Vector: '
),
(
CVECTOR,
dict(width=20, indent=8),
'Vector: [ -1+2j, 3+4j, 5+6j,\n'
' 7+8j, 9-10j,\n'
' 11+12j, -13+14j,\n'
' 15678-16j ]',
'Vector: '
),
(
CVECTOR,
dict(width=30, indent=8, limit=True),
'Vector: [ -1+2j, 3+4j, 5+6j,\n'
' ...\n'
' 11+12j, -13+14j, 15678-16j ]',
'Vector: '
),
(
CVECTOR,
dict(width=20, indent=8, limit=True),
'Vector: [ -1+2j,\n'
' 3+4j,\n'
' 5+6j,\n'
' ...\n'
' 11+12j,\n'
' -13+14j,\n'
' 15678-16j ]',
'Vector: '
),
(
array(
[
-0.10081675027325637-0.06910517142735251j,
0.018754229185649937+0.017142783560861786j,
0+18j
]
),
DFLT,
'[ -0.100816750273-0.0691051714274j, '
'0.0187542291856+0.0171427835609j, 18j ]',
''
),
(
array(
[
-0.10081675027325637-0.06910517142735251j,
0.018754229185649937+0.017142783560861786j,
0+18j
]
),
dict(width=60, limit=True, indent=20),
'Dependent variable: [ -0.100816750273-0.0691051714274j,\n'
' 0.0187542291856+0.0171427835609j, 18j ]',
'Dependent variable: '
),
(
array(
[
-0.10081675027325637-0.06910517142735251j,
0.018754229185649937+0.017142783560861786j,
0+18j,
0.118754229185649937+0.117142783560861786j,
0.218754229185649937+0.217142783560861786j,
0+28j,
10+2j,
]
),
dict(width=60),
'[ -0.100816750273-0.0691051714274j,\n'
' 0.0187542291856+0.0171427835609j, 18j,\n'
' 0.118754229186+0.117142783561j,\n'
' 0.218754229186+0.217142783561j, 28j, 10+2j ]',
''
),
(
array(
[
-0.10081675027325637-0.06910517142735251j,
0.018754229185649937+0.017142783560861786j,
0+18j,
0.118754229185649937+0.117142783560861786j,
0.218754229185649937+0.217142783560861786j,
0+28j,
10+2j,
]
),
dict(width=60, limit=True),
'[ -0.100816750273-0.0691051714274j,\n'
' 0.0187542291856+0.0171427835609j,\n'
' 18j,\n'
' ...\n'
' 0.218754229186+0.217142783561j,\n'
' 28j,\n'
' 10+2j ]',
''
),
]
)
def test_pprint_vector(vector, args, ref, header):
""" Test pprint_vector function behavior """
obj = putil.eng.pprint_vector
obj = obj if isdflt(args) else functools.partial(obj, **args)
CS(header+obj(vector), ref)
@pytest.mark.parametrize(
'args', [
dict(
vector=[1e-3, 20e-6, 300e+6, 4e-12, 5.25e3, -6e-9, 700, 8, 9],
width=5, eng=True, frac_length=1, limit=True
),
dict(
vector=[-1+2j, 3, 5+6j, 7+8j, 9-10j, 11+12j, -13+14j, 15678-16j],
width=8, limit=True
)
]
)
@pytest.mark.eng
def test_pprint_vector_exceptions(args):
""" Test pprint_vector function exceptions """
msg = 'Argument `width` is too small'
AE(putil.eng.pprint_vector, ValueError, msg, **args)
@pytest.mark.parametrize(
'num, dec, ref', [
(None, DFLT, None),
(1.3333, 2, 1.33),
(1.5555E-12, 2, 1.56E-12),<|fim▁hole|> (3, 2, 3),
(array([1.3333, 2.666666]), 2, array([1.33, 2.67])),
(array([1.3333E-12, 2.666666E-12]), 2, array([1.33E-12, 2.67E-12])),
(array([1, 3]), 2, array([1, 3])),
]
)
def test_round_mantissa(num, dec, ref):
""" Test round_mantissa function behavior """
obj = putil.eng.round_mantissa
obj = obj if isdflt(dec) else functools.partial(obj, decimals=dec)
test = obj(num) == ref
assert test.all() if isinstance(num, ndarray) else test<|fim▁end|> | |
<|file_name|>cmdbServer.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
import httplib
import pprint
import json
import sys
import logging
import datetime
import os
import os.path
import codecs
class cmdb( object ):
def __init__( self, args , info=None ):
self.res = {}
self.result = {}
self.info = info
self.args = args
self.device_type = '机架服务器'
self.conn = httplib.HTTPConnection( self.args['host'],self.args['port'] , timeout=10 )
def search( self, manifest, total ,start , limit , conditions ):
cond = ''
for x in conditions:
cond += '%20and%20'+x['name']+x['tag']+x['value']
if total :
limit = 1
rr = {}
url = (self.args['baseURL']+'username='+self.args['user']
+ '&auth='+self.args['key']
+ '&num='+str(limit)+'&return_total=1&start='+ str(start)
+ '&q=manifest=='+manifest+cond
)
self.conn.connect()
self.conn.request( 'GET', url ,'',self.args['headers'] )
res = self.conn.getresponse( )
if res.status == 200 :
rs = json.loads( res.read())
try:
if len( rs['result'] ) != 0 :
if total :
rr = rs['total']
else:
rr = rs['result']
else:
self.logger('info' , 'Search: rack server %s is not in cmdb ' % an)
except:
pass
else:
self.logger('info', an + 'bad request' )
self.conn.close()
return rr
def update( self ):
pass
def logger( self, level , loginfo ):
dt = datetime.datetime.now()
ds = dt.strftime('%Y%m%d%H%M%S')
logfile = ds + self.args['logfile']
logging.basicConfig( filename = os.path.join(os.getcwd()+self.args['logPath'],logfile),
level = logging.WARN,
filemode = 'w',
format = '%(asctime)s - %(levelname)s: %(message)s'
)
if level == 'info': logging.info( loginfo )
if level == 'warn' :logging.warning( loginfo )
if level == 'error' :logging.error( loginfo )
def dataFormat( self,data,cmdb_node ):
rr = {}
if cmdb_node != {}:
rr['id'] = cmdb_node['.id']
rr['manifest'] = cmdb_node['.manifest']
rr['value'] = data
else:
rr['id'] = ''
rr['manifest'] = ''
rr['value'] = data
return rr
if __name__ == '__main__':
import conf.cmdb_config as conf
conditions = [
{'name':'rack','tag':'~','value':r'永丰'},
{'name':'state','tag':'~','value':r'在线'}
]
num = 100
cmdb = cmdb( args=conf.CMDBAPI , info=None )
total = cmdb.search( 'rack_server' , True, 0, 1 , conditions )
if total % num == 0 :
times = total / num
else:
times = total / num + 1
print 'servers total is ' + str(total) + ', run '+ str(times) + '.'
wfile = WorkProfile( )
<|fim▁hole|> for i in range( times ) :
print 'run time ' + str(i+1)
res = cmdb.search( 'rack_server' , False, start, num , conditions )
start = start + num
content = ''
for r in res :
content += r['asset_number'] +"\t"+ r['sn'] +"\t"+ r['rack'].split('.')[0].strip() +"\t"+ r['ips'] + "\n"
wfile.writeFile( None , 'servers.txt', content )<|fim▁end|> | start = 0 |
<|file_name|>talk.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# -*- coding: utf-8 -*-
DEBUG = True
observer = None
ser_port = None
s = 0
ser = None
#--------------------------------------------------------------------
import signal
import sys
import os
def signal_handler(signal, frame):
global s, ser
print '\nYou pressed Ctrl+C!'
if s > 18:
print "MTK_Finalize"
serialPost(ser, "B7".decode("hex"))
time.sleep(0.1)
if ser.isOpen(): ser.close()
#sys.exit(0)
os._exit(0)
signal.signal(signal.SIGINT, signal_handler)
#--------------------------------------------------------------------
import os
import serial
from serial.tools import list_ports
def serial_ports():
"""
Returns a generator for all available serial ports
"""
if os.name == 'nt':
# windows
for i in range(256):
try:
s = serial.Serial(i)
s.close()
yield 'COM' + str(i + 1)
except serial.SerialException:
pass
else:
# unix
for port in list_ports.comports():
yield port[0]
#if __name__ == '__main__':
# print(list(serial_ports()))
#exit()
#--------------------------------------------------------------------
import serial, time, binascii
def serialPost(ser, data):
#time.sleep(0.5)
#data = chr(0x44)
print " -> " + binascii.b2a_hex(data)
ser.write(data)
#ser.flush()
def serialPostL(ser, data, slen, scnt):
sys.stdout.write("\r" + str(scnt) + " of " + str(slen) + " <- " + binascii.b2a_hex(data))
if slen == scnt: sys.stdout.write("\n")
#sys.stdout.flush()
ser.write(data)
def summ(block, length):
res = 0
for i in range(length):
res = res + ord(block[i])
#print str(res)
return chr(res & int(0xFF))
def swapSerialData(data):
l = len(data)
#if l > 16:
# print "-> " + str(l) + " bytes"
#else:
# print "-> " + binascii.b2a_hex(data)
if len(data) > 0: ser.write(data)
n = 0
while n < 1:
n = ser.inWaiting()
#time.sleep(1)
data = ser.read(n)
l = len(data)
#print "RX is L: " + str(l) + " -> " + binascii.b2a_hex(data)
return data
#----- CONNECT TO PORT----------
def conn_port (ser_port):
print ser_port
print "module PySerial version: " + serial.VERSION
# if: error open serial port: (22, 'Invalid argument')
# http://superuser.com/questions/572034/how-to-restart-ttyusb
# cat /proc/tty/drivers
# lsmod | grep usbserial
# sudo modprobe -r pl2303 qcaux
# sudo modprobe -r usbserial
#import subprocess
#subprocess.call(['statserial', ser_port])
#subprocess.call(['setserial', '-G', ser_port])
# http://www.roman10.net/serial-port-communication-in-python/
# initialization and open the port
# possible timeout values:
# 1. None: wait forever, block call
# 2. 0: non-blocking mode, return immediately
# 3. x, x is bigger than 0, float allowed, timeout block call
global ser
ser = serial.Serial()
#ser.port = "COM29"
ser.port = ser_port
ser.baudrate = 115200
ser.bytesize = serial.EIGHTBITS # number of bits per bytes
ser.parity = serial.PARITY_EVEN
ser.stopbits = serial.STOPBITS_ONE # number of stop bits
ser.timeout = None # block read
ser.rtscts = True # enable hardware (RTS/CTS) flow control (Hardware handshaking)
#ser.port = "/dev/ttyS0"
#ser.port = "/dev/ttyUSB0"
#ser.port = "2" # COM3
#ser.baudrate = 9600
#ser.parity = serial.PARITY_NONE # set parity check: no parity
#ser.timeout = 0 # non-block read
#ser.xonxoff = False # disable software flow control
#ser.rtscts = False # disable hardware (RTS/CTS) flow control
#ser.dsrdtr = False # disable hardware (DSR/DTR) flow control
#ser.writeTimeout = 2 # timeout for write
#data = chr(0x44) + chr(0x59)
#print "-> " + binascii.b2a_hex(data)
#exit()
try:
ser.open()
except Exception, e:
print "error open serial port: " + str(e)
print "for full reset serial device you must reload drivers:"
print " "
print " cat /proc/tty/drivers "
print " lsmod | grep usbserial "
print " sudo modprobe -r pl2303 qcaux "
print " sudo modprobe -r usbserial "
print " "
exit()
from hktool.bootload.samsung import sgh_e730
#loader1 = open("loader1.bin", "rb").read()
loader1 = sgh_e730.load_bootcode_first()
print "loader1.bin data size is: " + str(len(loader1))
ldr1_i = 0
ldr1_l = len(loader1)
ldr1_c = "4c00".decode("hex")
#loader2 = open("loader2.bin", "rb").read()
loader2 = sgh_e730.load_bootcode_second()
print "loader2.bin data size is: " + str(len(loader2))
ldr2_i = 0
ldr2_l = len(loader2)
#f = open("loader1.bin", "rb")
#try:
# byte = f.read(1)
# while byte != "":
# # Do stuff with byte.
# byte = f.read(1)
#except Exception, e1:
# print "error: " + str(e1)
# ser.close()
# import traceback
# traceback.print_exc()
#finally:
# f.close()
global s
if ser.isOpen():
try:
print 'Work with Samsung SGH-E730:'
print '- wait for SWIFT power on...'
ser.flushInput() # flush input buffer, discarding all its contents
ser.flushOutput() # flush output buffer, aborting current output
# and discard all that is in buffer
# write data
#ser.write("AT+CSQ=?\x0D")
#print("write data: AT+CSQ=?\x0D")
# steps
s = 0
serialPost(ser, "A0".decode("hex"))
while True:
n = 0
s += 1
while n < 1:
n = ser.inWaiting()
#time.sleep(1)
data = ser.read(n)
l = len(data)
#if s != 6 or ldr1_i == 0:
print "RX is L: " + str(l) + " <- " + binascii.b2a_hex(data)
if s == 1:
if data[l-1] == chr(0x5F):
serialPost(ser, chr(0x0A))
elif s == 2:
if data[l-1] == chr(0xF5):
serialPost(ser, chr(0x50))
elif s == 3:
#if l == 16:
# serialPost(ser, "4412345678".decode("hex") + data)
# -> AF
serialPost(ser, "05".decode("hex"))
elif s == 4:
#if data[l-1] == chr(0x4f):
# # set timeout to 1600 ms (10h)
# serialPost(ser, chr(0x54) + chr(0x10))
# # set timeout to 1600 ms (20h)
# #serialPost(ser, chr(0x54) + chr(0x20))
# -> FA
# A2 - read from memory
serialPost(ser, "A2".decode("hex"))
elif s == 5:
#if data[l-1] == chr(0x4f):
# serialPost(ser, "530000000c".decode("hex"))
# -> A2 - read command ACK
# 80 01 00 00 - Configuration Register: Hardware Version Register
serialPost(ser, "80010000".decode("hex"))
elif s == 6:
# -> 80 01 00 00
# 00 00 00 01 - read one byte
serialPost(ser, "00000001".decode("hex"))
#ldr1_i4 = 4*ldr1_i
#ldr1_i8 = 4*ldr1_i + 4
#if ldr1_i8 < ldr1_l:
# serialPostL(ser, ldr1_c + loader1[ldr1_i4:ldr1_i8], ldr1_l, ldr1_i8)
# s -= 1
#else:
# serialPostL(ser, ldr1_c + loader1[ldr1_i4:ldr1_l ], ldr1_l, ldr1_l )
#ldr1_i += 1
elif s == 7:
if l == 6: s += 1
elif s == 8:
# -> 00 00 00 01 - byte is read
# -> XX XX - byte:
serialPost(ser, "A2".decode("hex"))
#if data[l-1] == chr(0x4f):
# serialPost(ser, "530000000c".decode("hex"))
elif s == 9:
# -> A2
# 80 01 00 08 - Hardware Code Register
serialPost(ser, "80010008".decode("hex"))
#if data[l-1] == chr(0x4f):
# serialPost(ser, "4a".decode("hex"))
elif s == 10:
# -> 80 01 00 08
serialPost(ser, "00000001".decode("hex"))
#s = 20;
#if data[l-1] == chr(0xAB):
# # 0x00 -> Speed = 115200
# # 0x01 -> Speed = 230400
# # 0x02 -> Speed = 460800
# # 0x03 -> Speed = 921600
# serialPost(ser, "00".decode("hex"))
# # close comms, bootup completed
# ser.flushInput() # flush input buffer, discarding all its contents
# ser.flushOutput() # flush output buffer, aborting current output
# ser.close()
# # reopen comms at the new speed
# time.sleep(0.1)
# ser.port = "COM3"
# ser.baudrate = 115200
# ser.parity = serial.PARITY_NONE # set parity check: no parity
# ser.open()
# ser.flushInput() # flush input buffer, discarding all its contents
# ser.flushOutput() # flush output buffer, aborting current output
# serialPost(ser, "d9".decode("hex"))
elif s == 11:
if l == 6: s += 1
elif s == 12:
# -> 00 00 00 01
# -> XX XX - we hawe a MediaTek MT6253
serialPost(ser, "A2".decode("hex"))
elif s == 13:
# -> A2
# 80 01 00 04 - Software Version Register
serialPost(ser, "80010004".decode("hex"))
elif s == 14:
# -> 80 01 00 04
serialPost(ser, "00000001".decode("hex"))
elif s == 15:
if l == 6: s += 1
elif s == 16:
# -> 00 00 00 01
# -> XX XX -
# A1 - write to register
serialPost(ser, "A1".decode("hex"))
elif s == 17:
# -> A1 - write command ack
# 80 03 00 00 - Reset Generation Unit (RGU): Watchdog Timer Control Register
serialPost(ser, "80030000".decode("hex"))
elif s == 18:
# -> 80 03 00 00
serialPost(ser, "00000001".decode("hex"))
elif s == 19:
# -> 00 00 00 01
# 22 00 - set
serialPost(ser, "2200".decode("hex"))
elif s == 20:
s -= 1
elif s == 111:
data = "d4".decode("hex")
data0 = chr((ldr2_l >> 24) & int(0xFF))
data0 += chr((ldr2_l >> 16) & int(0xFF))
data0 += chr((ldr2_l >> 8) & int(0xFF))
data0 += chr((ldr2_l ) & int(0xFF))
data += data0
serialPost(ser, data)
elif s == 112:
# zapominaem CRC
crc = data
my_crc = summ(data0, 4)
print "crc is: " + binascii.b2a_hex(crc)
print "my_crc is: " + binascii.b2a_hex(my_crc)
if crc == my_crc:
send_len = 0
for i in range((ldr2_l - 1) >> 11):
send_len = ldr2_l - (i << 11)
if send_len > 2048: send_len = 2048
# calculate sum
ss = i << 11
su = summ(loader2[ss:ss+send_len], send_len)
# send command
data = swapSerialData("f7".decode("hex"))
data = swapSerialData(loader2[ss:ss+send_len])
#print "2 crc is: " + binascii.b2a_hex(data)
#print "2 my_crc is: " + binascii.b2a_hex(su)
#print "i: " + str(i)
sys.stdout.write("\ri: " + str(i))
sys.stdout.write("\n")
serialPost(ser, "FF".decode("hex"))
elif s == 113:
serialPost(ser, "D010000000".decode("hex"))
elif s == 114:
serialPost(ser, "D1".decode("hex"))
elif s == 115:
nand_id = (ord(data[8])<<8) + ord(data[9])
# nado proverit, chto 2,3,4 baity ravny sootvetstvenno 0xEC 0x22 0xFC
#
# additionally identify NAND for Swift
print "Flash... "
if nand_id == int(0x04): print " 16MB (128Mbit) NAND"
elif nand_id == int(0x14): print " 32MB (256Mbit) NAND"
elif nand_id == int(0x24): print " 64MB (512Mbit) NAND"
elif nand_id == int(0x34): print "128MB ( 1Gbit) NAND"
elif nand_id == int(0x0C): print " 16MB (128Mbit) NAND"
elif nand_id == int(0x1C): print " 32MB (256Mbit) NAND"
elif nand_id == int(0x2C): print " 64MB (512Mbit) NAND"
elif nand_id == int(0x3C): print "128MB ( 1Gbit) NAND"
else: print "Unknown NAND: " + str("%02x" % nand_id)
# here, the bootup is completed
# delay slightly (required!)
time.sleep(0.25)
else:
#data = chr(0x44)
data = chr(0x00)
print "-> " + binascii.b2a_hex(data)
#ser.write(data)
data = ser.read()
print "serial RX: " + binascii.b2a_hex(data)
data = chr(0x44)
print "-> " + binascii.b2a_hex(data)
ser.write(data)
#ser.flush()
data = ser.read()
print "serial RX: " + binascii.b2a_hex(data)
data = chr(0x51)
print "-> " + binascii.b2a_hex(data)
ser.write(data)
data = ser.read()
print "serial RX: " + binascii.b2a_hex(data)
#print ser.portstr
time.sleep(0.5) # give the serial port sometime to receive the data
numOfLines = 0
while True:
response = ser.readline()
print("read data: " + response)
numOfLines = numOfLines + 1
if (numOfLines >= 5):
break
ser.close()
except Exception, e1:
print "error communicating...: " + str(e1)
ser.close()
import traceback
traceback.print_exc()
except KeyboardInterrupt:
print "\nmanual interrupted!"
ser.close()
else:
print "cannot open serial port "
exit()
#===========================================================
#from hktool.bootload import mediatek
from hktool.bootload.mediatek import MTKBootload
from threading import Thread
from time import sleep as Sleep
def logical_xor(str1, str2):
return bool(str1) ^ bool(str2)
#----- MAIN CODE -------------------------------------------
if __name__=='__main__':
from sys import platform as _platform
import os
if _platform == "linux" or _platform == "linux2":
# linux
print "it is linux?"
from hktool.hotplug import linux_udev as port_notify
elif _platform == "darwin":
# OS X
print "it is osx?"
print "WARNING: port_notify is not realised !!!"
elif _platform == "win32":
# Windows...
print "it is windows?"
from hktool.hotplug import windevnotif as port_notify
print "sys.platform: " + _platform + ", os.name: " + os.name
print ""
print "Select: xml, boot, sgh, crc, usb, exit, quit, q"
print ""
tsk = str(raw_input("enter command > "))
if tsk.lower() in ['exit', 'quit', 'q']:
os._exit(0)
if tsk.lower() in ['boot']:
print "Working with device communication..."
print ""
Thread(target = port_notify.run_notify).start()
Sleep(1)
port = port_notify.get_notify()
print "port_name is: " + port
#conn_port(port)
#mediatek.init(port)
m = MTKBootload(port)
if 'sgh' in tsk.lower():
tsks = tsk.split()
print ""
print "Working with device communication..."
print ""
Sleep(1)
port = tsks[1]
print "port_name is: " + port
#m = SGHBootload(port)
if tsk.lower() in ['xml', 'lxml']:
print "Working with lxml..."
print ""
from lxml import etree
tree = etree.parse('../../mtk-tests/Projects/_lg-a290/data/UTLog_DownloadAgent_FlashTool.xml')
root = tree.getroot()
print root
#entries = tree.xpath("//atom:category[@term='accessibility']/..", namespaces=NSMAP)
entries = tree.xpath("//UTLOG/Request[@Dir='[OUT]']/Data")
#print entries
old_text = None
dmp_text = False
cnt_text = 0
bin_file = None
for xent in entries:
new_text = xent.text
if new_text == old_text:
continue
old_text = new_text
#print "-> " + new_text
bin_text = new_text.replace(" ", "")
bin_text = bin_text.decode("hex")
bin_len = len(bin_text)
print str(bin_len) + " -> " + new_text
if dmp_text is False and bin_len == 1024:
dmp_text = True
prt = xent.getparent()
atr = prt.attrib
num = atr["Number"]
nam = "big_" + num + ".bin"
bin_file = open(nam, 'wb')
print ""
print "start dump big data to: " + nam
if dmp_text is True:
#---
import array
a = array.array('H', bin_text) # array.array('H', bin_text)
a.byteswap()
bin_text = a.tostring()
#---
bin_file.write(bin_text)
if bin_len == 1024:
cnt_text += 1
else:
cnt_text = cnt_text * 1024 + bin_len
dmp_text = False
bin_file.close()
print "big data length is: " + str(cnt_text)
print ""
cnt_text = 0
pass
if tsk.lower() in ['crc']:
str1 = raw_input("Enter string one:")
str2 = raw_input("Enter string two:")
if logical_xor(str1, str2):
print "ok"
else:
print "bad"
pass
print hex(0x12ef ^ 0xabcd)
print hex(int("12ef", 16) ^ int("abcd", 16))
str1 = raw_input("Enter string one: ")
str2 = raw_input("Enter string two: ")
print hex(int(str1, 16) ^ int(str2, 16))
pass
if tsk.lower() in ['usb']:
import usb.core
#import usb.backend.libusb1
import usb.backend.libusb0
import logging
#PYUSB_DEBUG_LEVEL = "debug"
#PYUSB_LOG_FILENAME = "C:\dump"
__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
__backend__ = os.path.join(__location__, "libusb0.dll")
#PYUSB_LOG_FILENAME = __location__
#backend = usb.backend.libusb1.get_backend(find_library=lambda x: "/usr/lib/libusb-1.0.so")
#backend = usb.backend.libusb1.get_backend(find_library=lambda x: __backend__)
backend = usb.backend.libusb0.get_backend(find_library=lambda x: __backend__)
dev = usb.core.find(find_all=True, backend=backend)
#dev = usb.core.find(find_all=True)
busses = usb.busses()
print busses
if dev is None:
raise ValueError('Our device is not connected')
for bus in busses:
devices = bus.devices
for dev in devices:
try:
_name = usb.util.get_string(dev.dev, 19, 1)
except:
continue
dev.set_configuration()
cfg = dev.get_active_configuration()
interface_number = cfg[(0,0)].bInterfaceNumber
alternate_settting = usb.control.get_interface(interface_number)
print "Device name:",_name
print "Device:", dev.filename
print " idVendor:",hex(dev.idVendor)
print " idProduct:",hex(dev.idProduct)
for config in dev.configurations:
print " Configuration:", config.value
print " Total length:", config.totalLength
print " selfPowered:", config.selfPowered
print " remoteWakeup:", config.remoteWakeup
print " maxPower:", config.maxPower<|fim▁hole|><|fim▁end|> | print |
<|file_name|>host_daily_profile.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
#
# MIT License
#
# Copyright (c) 2017 Michal Stefanik <stefanik dot [email protected]>, Tomas Jirsik <[email protected]>
# Institute of Computer Science, Masaryk University
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
"""
Description: A method for computing statistics for hosts in network. Computed statistics
for each host each window contain:
- a list of top n most active ports as sorted by a number of flows on a given port
Usage:
host_daily_profile.py -iz <input-zookeeper-hostname>:<input-zookeeper-port> -it <input-topic>
-oz <output-zookeeper-hostname>:<output-zookeeper-port> -ot <output-topic>
To run this on the Stream4Flow, you need to receive flows by IPFIXCol and make them available via Kafka topic. Then
you can run the example
$ ./run-application.sh .statistics/hosts_profiling/host_daily_profile.py -iz producer:2181 -it host.stats
-oz producer:9092 -ot results.daily
"""
import sys # Common system functions
import os # Common operating system functions
import argparse # Arguments parser
import ujson as json # Fast JSON parser
import socket # Socket interface
import time # Time handling
import ipaddress # IP address handling
from termcolor import cprint # Colors in the console output
from pyspark import SparkContext # Spark API
from pyspark.streaming import StreamingContext # Spark streaming API
from pyspark.streaming.kafka import KafkaUtils # Spark streaming Kafka receiver
from kafka import KafkaProducer # Kafka Python client
from collections import namedtuple
# casting structures
IPStats = namedtuple('IPStats', 'ports dst_ips http_hosts')
StatsItem = namedtuple('StatsItem', 'packets bytes flows')
ZERO_ITEM = StatsItem(0, 0, 0) # neutral item used if no new data about the IP was collected in recent interval
# temporal constants
# default values are overridden from input params if available
hourly_interval = 3600 # aggregation interval for one item of temporal array
daily_interval = 86400 # collection interval of all aggregations as items in temporal array
INCREMENT = 0
# temporal methods resolving the temporal array consistency and construction:
def increment():
"""
increments the global counter that should keep consistent with the duration of the app run in hours
"""
global INCREMENT
INCREMENT += 1
def modulate_position(timestamp):
"""
counts the position in time-sorted log of IP activity as based on the timestamp attached to
the particular log in rdd
timestamp: attached timestamp
"""
result = (INCREMENT - timestamp) % time_dimension
return result
def update_array(array, position, value):
"""
updates an array inserting a _value_ to a chosen _position_ in an _array_
overcomes a general restriction disabling to use an assignment in lambda statements
:param array: _array_
:param position: _position_
:param value: _value_
"""
array[int(position)] = value
return array
def initialize_array(value, timestamp):
"""
initializes an empty array of default log length (=time_dimension) with a _value_
inserted on a position at a given _timestamp_
:param value: _value_
:param timestamp: _timestamp_
"""
return update_array(list([ZERO_ITEM] * time_dimension), modulate_position(timestamp), value)
def merge_init_arrays(a1, a2):
""" Merges the given arrays so that the output array contains either value of a1, or a2 for each nonzero value
Arrays should be in disjunction append -1 when both arrays are filled, so the error is traceable
:param a1 array of the size of a2
:param a2 array of the size of a1
:return Merged arrays
"""
merge = []
for i in range(len(a1)):
if a1[i] != ZERO_ITEM and a2[i] != ZERO_ITEM:
# should not happen
merge.append(-1)
else:
merge.append(a1[i] if a1[i] != ZERO_ITEM else a2[i])
return merge
# post-processing methods for resulting temporal arrays:
def send_to_kafka(data, producer, topic):
"""
Send given data to the specified kafka topic.
:param data: data to send
:param producer: producer that sends the data
:param topic: name of the receiving kafka topic
"""
# Debug print - data to be sent to kafka in resulting format
# print data
producer.send(topic, str(data))
def process_results(json_rdd, producer, topic):
"""
Transform given computation results into the JSON format and send them to the specified kafka instance.
JSON format:
{"src_ipv4":"<host src IPv4 address>",
"@type":"host_stats_temporal_profile",
"stats":{
{
<t=1>: {"packets": <val>, "bytes": <val>, "flows": <val>},
<t=2>: {"packets": <val>, "bytes": <val>, "flows": <val>},
...
<t=time_dimension>: {"port":<port #n>, "flows":# of flows}
}
}
Where <val> is aggregated sum of the specified attribute in an interval of hourly_interval length
that has started in time: <current time> - (<entry's t> * hourly_interval) and ended roughly in a time of send
:param json_rrd: Map in a format: (src IP , [ IPStats(packets, bytes, flows), ..., IPStats(packets, bytes, flows) ])
:param producer: producer that sends the data
:param topic: name of the receiving kafka topic
:return:
"""
for ip, ip_stats in json_rdd.iteritems():
stats_dict = dict()
for stat_idx in range(len(ip_stats)):
temporal_stats = {"packets": ip_stats[stat_idx].packets,
"bytes": ip_stats[stat_idx].bytes,
"flows": ip_stats[stat_idx].flows}
stats_dict[stat_idx] = temporal_stats
# construct the output object in predefined format
result_dict = {"@type": "host_stats_temporal_profile",
"src_ipv4": ip,
"stats": stats_dict}
# send the processed data in json form
send_to_kafka(json.dumps(result_dict)+"\n", producer, topic)
# logging terminal output
print("%s: Stats of %s IPs parsed and sent" % (time.strftime("%c"), len(json_rdd.keys())))
# main computation methods:
def collect_hourly_stats(stats_json):
"""
Performs a hourly aggregation on input data, which result is to be collected as items of daily aggregation
:param stats_json: RDDs of stats in json format matching the output format of host_stats.py application
:type stats_json: Initialized spark streaming context, with data in json format as in host_stats application
"""
stats_windowed = stats_json.window(hourly_interval, hourly_interval)
stats_windowed_keyed = stats_windowed.map(lambda json_rdd: (json_rdd["src_ipv4"],
(json_rdd["stats"]["total"]["packets"],
json_rdd["stats"]["total"]["bytes"],
json_rdd["stats"]["total"]["flow"])
))
ip_stats_summed = stats_windowed_keyed.reduceByKey(lambda current, update: (current[0] + update[0],
current[1] + update[1],
current[2] + update[2]))
ip_stats_objected = ip_stats_summed.mapValues(lambda summed_values: (StatsItem(*summed_values), INCREMENT))
return ip_stats_objected
def collect_daily_stats(hourly_stats):
"""
Aggregation of the time stats of _small_window_data_ in a tuple format (data, timestamp) into a log vector
in format [data_t_n, data_t_n-1, ... , data_t_n-k] containing the entries of the most k recent
_small_window_data_ rdd-s where k = time_dimension (= daily_interval/hourly_interval)
:param hourly_stats: _hourly_stats_ aggregated in hourly_interval window
"""
global INCREMENT
# set a window of DAY_WINDOW_INTERVAL on small-windowed RDDs
long_window_base = hourly_stats.window(daily_interval, hourly_interval)
# Debug print - see how recent incoming RDDs are transformed after each HOUR_WINDOW_INTERVAL
# long_window_debug = long_window_base.map(lambda rdd: {"ip": rdd[0]
# "rdd_timestamp": rdd[1][1],
# "current_inc": INCREMENT,
# "mod_pos": modulate_position(int(rdd[1][1])),
# "value": rdd[1][0]})
# long_window_debug.pprint()
# Here, RDDs of small window in format (IP: (data, timestamp)) are mapped into sparse vector=[0, 0, .. , volume, 0]
# where vector has a size of time_dimension and data inserted on modulated position (see modulate_position())
# then sparse vectors are combined/merged: "summing-up" nonzero positions (see merge_init_arrays())
long_window_data_stream = long_window_base.map(lambda rdd: (rdd[0], initialize_array(rdd[1][0], rdd[1][1]))) \
.reduceByKey(lambda current, update: merge_init_arrays(current, update))
# position counter is consistent with small window length cycle - here increments on each new data from hourly_stats
long_window_data_stream.reduce(lambda current, update: 1).foreachRDD(lambda rdd: increment())
# Debug print of target temporal arrays in interval of a small window
long_window_data_stream.pprint(5)
# return the temporal arrays windowed in a daily interval
return long_window_data_stream.window(hourly_interval, daily_interval)
if __name__ == "__main__":
# Prepare arguments parser (automatically creates -h argument).
parser = argparse.ArgumentParser()
parser.add_argument("-iz", "--input_zookeeper", help="input zookeeper hostname:port", type=str, required=True)
parser.add_argument("-it", "--input_topic", help="input kafka topic", type=str, required=True)
parser.add_argument("-oz", "--output_zookeeper", help="output zookeeper hostname:port", type=str, required=True)
parser.add_argument("-ot", "--output_topic", help="output kafka topic", type=str, required=True)
parser.add_argument("-sw", "--short_window", help="small window duration", type=int, required=False)
parser.add_argument("-lw", "--long_window", help="long window duration", type=int, required=False)<|fim▁hole|>
# if input arguments are filled, override the default temporal values
if args.short_window and args.long_window:
hourly_interval = args.short_window
daily_interval = args.long_window
time_dimension = daily_interval / hourly_interval # set a number of aggregation entries in temporal array
print("Time dimension set to %s" % time_dimension)
# Set variables
application_name = os.path.basename(sys.argv[0]) # Application name used as identifier
kafka_partitions = 1 # Number of partitions of the input Kafka topic
# Spark context initialization
sc = SparkContext(appName=application_name + " " + " ".join(sys.argv[1:])) # Application name used as the appName
ssc = StreamingContext(sc, 1) # Spark micro batch is 1 second
# Initialize input DStream of flows from specified Zookeeper server and Kafka topic
input_stream = KafkaUtils.createStream(ssc, args.input_zookeeper, "spark-consumer-" + application_name,
{args.input_topic: kafka_partitions})
# Parse flows in the JSON format
input_stream_json = input_stream.map(lambda x: json.loads(x[1]))
# Process data to the defined function.
hourly_host_statistics = collect_hourly_stats(input_stream_json)
daily_host_statistics = collect_daily_stats(hourly_host_statistics)
kafka_producer = KafkaProducer(bootstrap_servers=args.output_zookeeper,
client_id="spark-producer-" + application_name)
# Transform computed statistics into desired json format and send it to output_host as given in -oh input param
daily_host_statistics.foreachRDD(lambda rdd: process_results(rdd.collectAsMap(), kafka_producer, args.output_topic))
# drop the processed RDDs to balance the memory usage
daily_host_statistics.foreachRDD(lambda rdd: rdd.unpersist())
# Start input data processing
ssc.start()
ssc.awaitTermination()<|fim▁end|> |
# Parse arguments.
args = parser.parse_args() |
<|file_name|>crvserver_crpolicy_binding.py<|end_file_name|><|fim▁begin|>#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class crvserver_crpolicy_binding(base_resource) :
""" Binding class showing the crpolicy that can be bound to crvserver.
"""
def __init__(self) :
self._policyname = ""
self._priority = 0
self._hits = 0
self._name = ""
self._targetvserver = ""
self.___count = 0
@property
def priority(self) :
ur"""The priority for the policy.
"""
try :
return self._priority
except Exception as e:
raise e
@priority.setter
def priority(self, priority) :
ur"""The priority for the policy.
"""
try :
self._priority = priority
except Exception as e:
raise e
@property
def policyname(self) :
ur"""Policies bound to this vserver.
"""
try :
return self._policyname
except Exception as e:
raise e
@policyname.setter
def policyname(self, policyname) :
ur"""Policies bound to this vserver.
"""
try :
self._policyname = policyname
except Exception as e:
raise e
@property
def name(self) :
ur"""Name of the cache redirection virtual server to which to bind the cache redirection policy.<br/>Minimum length = 1.
"""
try :
return self._name
except Exception as e:
raise e
<|fim▁hole|> """
try :
self._name = name
except Exception as e:
raise e
@property
def targetvserver(self) :
ur"""Name of the virtual server to which content is forwarded. Applicable only if the policy is a map policy and the cache redirection virtual server is of type REVERSE.
"""
try :
return self._targetvserver
except Exception as e:
raise e
@targetvserver.setter
def targetvserver(self, targetvserver) :
ur"""Name of the virtual server to which content is forwarded. Applicable only if the policy is a map policy and the cache redirection virtual server is of type REVERSE.
"""
try :
self._targetvserver = targetvserver
except Exception as e:
raise e
@property
def hits(self) :
ur"""Number of hits.
"""
try :
return self._hits
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
ur""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(crvserver_crpolicy_binding_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.crvserver_crpolicy_binding
except Exception as e :
raise e
def _get_object_name(self) :
ur""" Returns the value of object identifier argument
"""
try :
if self.name is not None :
return str(self.name)
return None
except Exception as e :
raise e
@classmethod
def add(cls, client, resource) :
try :
if resource and type(resource) is not list :
updateresource = crvserver_crpolicy_binding()
updateresource.name = resource.name
updateresource.policyname = resource.policyname
updateresource.targetvserver = resource.targetvserver
updateresource.priority = resource.priority
return updateresource.update_resource(client)
else :
if resource and len(resource) > 0 :
updateresources = [crvserver_crpolicy_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
updateresources[i].name = resource[i].name
updateresources[i].policyname = resource[i].policyname
updateresources[i].targetvserver = resource[i].targetvserver
updateresources[i].priority = resource[i].priority
return cls.update_bulk_request(client, updateresources)
except Exception as e :
raise e
@classmethod
def delete(cls, client, resource) :
try :
if resource and type(resource) is not list :
deleteresource = crvserver_crpolicy_binding()
deleteresource.name = resource.name
deleteresource.policyname = resource.policyname
return deleteresource.delete_resource(client)
else :
if resource and len(resource) > 0 :
deleteresources = [crvserver_crpolicy_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].name = resource[i].name
deleteresources[i].policyname = resource[i].policyname
return cls.delete_bulk_request(client, deleteresources)
except Exception as e :
raise e
@classmethod
def get(cls, service, name) :
ur""" Use this API to fetch crvserver_crpolicy_binding resources.
"""
try :
obj = crvserver_crpolicy_binding()
obj.name = name
response = obj.get_resources(service)
return response
except Exception as e:
raise e
@classmethod
def get_filtered(cls, service, name, filter_) :
ur""" Use this API to fetch filtered set of crvserver_crpolicy_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = crvserver_crpolicy_binding()
obj.name = name
option_ = options()
option_.filter = filter_
response = obj.getfiltered(service, option_)
return response
except Exception as e:
raise e
@classmethod
def count(cls, service, name) :
ur""" Use this API to count crvserver_crpolicy_binding resources configued on NetScaler.
"""
try :
obj = crvserver_crpolicy_binding()
obj.name = name
option_ = options()
option_.count = True
response = obj.get_resources(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
@classmethod
def count_filtered(cls, service, name, filter_) :
ur""" Use this API to count the filtered set of crvserver_crpolicy_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = crvserver_crpolicy_binding()
obj.name = name
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
class crvserver_crpolicy_binding_response(base_response) :
def __init__(self, length=1) :
self.crvserver_crpolicy_binding = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.crvserver_crpolicy_binding = [crvserver_crpolicy_binding() for _ in range(length)]<|fim▁end|> | @name.setter
def name(self, name) :
ur"""Name of the cache redirection virtual server to which to bind the cache redirection policy.<br/>Minimum length = 1 |
<|file_name|>test_profiler.py<|end_file_name|><|fim▁begin|># Copyright 2014 Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import copy
import datetime
import re
import mock
import six
from osprofiler import profiler
from osprofiler.tests import test
class ProfilerGlobMethodsTestCase(test.TestCase):
def test_get_profiler_not_inited(self):
profiler.clean()
self.assertIsNone(profiler.get())
def test_get_profiler_and_init(self):
p = profiler.init("secret", base_id="1", parent_id="2")
self.assertEqual(profiler.get(), p)
self.assertEqual(p.get_base_id(), "1")
# NOTE(boris-42): until we make first start we don't have
self.assertEqual(p.get_id(), "2")
def test_start_not_inited(self):
profiler.clean()
profiler.start("name")
def test_start(self):
p = profiler.init("secret", base_id="1", parent_id="2")
p.start = mock.MagicMock()
profiler.start("name", info="info")
p.start.assert_called_once_with("name", info="info")
def test_stop_not_inited(self):
profiler.clean()
profiler.stop()
def test_stop(self):
p = profiler.init("secret", base_id="1", parent_id="2")
p.stop = mock.MagicMock()
profiler.stop(info="info")
p.stop.assert_called_once_with(info="info")
class ProfilerTestCase(test.TestCase):
def test_profiler_get_shorten_id(self):
uuid_id = "4e3e0ec6-2938-40b1-8504-09eb1d4b0dee"
prof = profiler._Profiler("secret", base_id="1", parent_id="2")
result = prof.get_shorten_id(uuid_id)
expected = "850409eb1d4b0dee"
self.assertEqual(expected, result)
def test_profiler_get_shorten_id_int(self):
short_id_int = 42
prof = profiler._Profiler("secret", base_id="1", parent_id="2")
result = prof.get_shorten_id(short_id_int)
expected = "2a"
self.assertEqual(expected, result)
def test_profiler_get_base_id(self):
prof = profiler._Profiler("secret", base_id="1", parent_id="2")
self.assertEqual(prof.get_base_id(), "1")
@mock.patch("osprofiler.profiler.uuidutils.generate_uuid")
def test_profiler_get_parent_id(self, mock_generate_uuid):
mock_generate_uuid.return_value = "42"
prof = profiler._Profiler("secret", base_id="1", parent_id="2")
prof.start("test")
self.assertEqual(prof.get_parent_id(), "2")
@mock.patch("osprofiler.profiler.uuidutils.generate_uuid")
def test_profiler_get_base_id_unset_case(self, mock_generate_uuid):
mock_generate_uuid.return_value = "42"
prof = profiler._Profiler("secret")
self.assertEqual(prof.get_base_id(), "42")
self.assertEqual(prof.get_parent_id(), "42")
@mock.patch("osprofiler.profiler.uuidutils.generate_uuid")
def test_profiler_get_id(self, mock_generate_uuid):
mock_generate_uuid.return_value = "43"
prof = profiler._Profiler("secret")
prof.start("test")
self.assertEqual(prof.get_id(), "43")
@mock.patch("osprofiler.profiler.datetime")
@mock.patch("osprofiler.profiler.uuidutils.generate_uuid")
@mock.patch("osprofiler.profiler.notifier.notify")
def test_profiler_start(self, mock_notify, mock_generate_uuid,
mock_datetime):
mock_generate_uuid.return_value = "44"
now = datetime.datetime.utcnow()
mock_datetime.datetime.utcnow.return_value = now
info = {"some": "info"}
payload = {
"name": "test-start",
"base_id": "1",
"parent_id": "2",
"trace_id": "44",
"info": info,
"timestamp": now.strftime("%Y-%m-%dT%H:%M:%S.%f"),
}
prof = profiler._Profiler("secret", base_id="1", parent_id="2")
prof.start("test", info=info)
mock_notify.assert_called_once_with(payload)
@mock.patch("osprofiler.profiler.datetime")
@mock.patch("osprofiler.profiler.notifier.notify")
def test_profiler_stop(self, mock_notify, mock_datetime):
now = datetime.datetime.utcnow()
mock_datetime.datetime.utcnow.return_value = now
prof = profiler._Profiler("secret", base_id="1", parent_id="2")
prof._trace_stack.append("44")
prof._name.append("abc")
info = {"some": "info"}
prof.stop(info=info)
payload = {
"name": "abc-stop",
"base_id": "1",
"parent_id": "2",
"trace_id": "44",
"info": info,
"timestamp": now.strftime("%Y-%m-%dT%H:%M:%S.%f"),
}
mock_notify.assert_called_once_with(payload)
self.assertEqual(len(prof._name), 0)
self.assertEqual(prof._trace_stack, collections.deque(["1", "2"]))
def test_profiler_hmac(self):
hmac = "secret"
prof = profiler._Profiler(hmac, base_id="1", parent_id="2")
self.assertEqual(hmac, prof.hmac_key)
class WithTraceTestCase(test.TestCase):
@mock.patch("osprofiler.profiler.stop")
@mock.patch("osprofiler.profiler.start")
def test_with_trace(self, mock_start, mock_stop):
with profiler.Trace("a", info="a1"):
mock_start.assert_called_once_with("a", info="a1")
mock_start.reset_mock()
with profiler.Trace("b", info="b1"):
mock_start.assert_called_once_with("b", info="b1")
mock_stop.assert_called_once_with()
mock_stop.reset_mock()
mock_stop.assert_called_once_with()
@mock.patch("osprofiler.profiler.stop")
@mock.patch("osprofiler.profiler.start")
def test_with_trace_etype(self, mock_start, mock_stop):
def foo():
with profiler.Trace("foo"):
raise ValueError("bar")
self.assertRaises(ValueError, foo)
mock_start.assert_called_once_with("foo", info=None)
mock_stop.assert_called_once_with(info={
"etype": "ValueError",
"message": "bar"
})
@profiler.trace("function", info={"info": "some_info"})
def traced_func(i):
return i
@profiler.trace("hide_args", hide_args=True)
def trace_hide_args_func(a, i=10):
return (a, i)
@profiler.trace("foo", hide_args=True)
def test_fn_exc():
raise ValueError()
@profiler.trace("hide_result", hide_result=False)
def trace_with_result_func(a, i=10):
return (a, i)
class TraceDecoratorTestCase(test.TestCase):
@mock.patch("osprofiler.profiler.stop")
@mock.patch("osprofiler.profiler.start")
def test_duplicate_trace_disallow(self, mock_start, mock_stop):
@profiler.trace("test")
def trace_me():
pass
self.assertRaises(
ValueError,
profiler.trace("test-again", allow_multiple_trace=False),
trace_me)
@mock.patch("osprofiler.profiler.stop")
@mock.patch("osprofiler.profiler.start")
def test_with_args(self, mock_start, mock_stop):
self.assertEqual(1, traced_func(1))
expected_info = {
"info": "some_info",
"function": {
"name": "osprofiler.tests.unit.test_profiler.traced_func",
"args": str((1,)),
"kwargs": str({})
}
}
mock_start.assert_called_once_with("function", info=expected_info)
mock_stop.assert_called_once_with()
@mock.patch("osprofiler.profiler.stop")
@mock.patch("osprofiler.profiler.start")
def test_without_args(self, mock_start, mock_stop):
self.assertEqual((1, 2), trace_hide_args_func(1, i=2))
expected_info = {
"function": {
"name": "osprofiler.tests.unit.test_profiler"
".trace_hide_args_func"
}
}
mock_start.assert_called_once_with("hide_args", info=expected_info)
mock_stop.assert_called_once_with()
@mock.patch("osprofiler.profiler.stop")
@mock.patch("osprofiler.profiler.start")
def test_with_exception(self, mock_start, mock_stop):
self.assertRaises(ValueError, test_fn_exc)
expected_info = {
"function": {
"name": "osprofiler.tests.unit.test_profiler.test_fn_exc"
}
}
expected_stop_info = {"etype": "ValueError", "message": ""}
mock_start.assert_called_once_with("foo", info=expected_info)
mock_stop.assert_called_once_with(info=expected_stop_info)
@mock.patch("osprofiler.profiler.stop")
@mock.patch("osprofiler.profiler.start")
def test_with_result(self, mock_start, mock_stop):
self.assertEqual((1, 2), trace_with_result_func(1, i=2))
start_info = {
"function": {
"name": "osprofiler.tests.unit.test_profiler"
".trace_with_result_func",
"args": str((1,)),
"kwargs": str({"i": 2})
}
}
stop_info = {
"function": {
"result": str((1, 2))
}
}
mock_start.assert_called_once_with("hide_result", info=start_info)
mock_stop.assert_called_once_with(info=stop_info)
class FakeTracedCls(object):
def method1(self, a, b, c=10):
return a + b + c
def method2(self, d, e):
return d - e
def method3(self, g=10, h=20):
return g * h
def _method(self, i):
return i
@profiler.trace_cls("rpc", info={"a": 10})
class FakeTraceClassWithInfo(FakeTracedCls):
pass
@profiler.trace_cls("a", info={"b": 20}, hide_args=True)
class FakeTraceClassHideArgs(FakeTracedCls):
pass
@profiler.trace_cls("rpc", trace_private=True)
class FakeTracePrivate(FakeTracedCls):
pass
class FakeTraceStaticMethodBase(FakeTracedCls):
@staticmethod
def static_method(arg):
return arg
@profiler.trace_cls("rpc", trace_static_methods=True)
class FakeTraceStaticMethod(FakeTraceStaticMethodBase):
pass
@profiler.trace_cls("rpc")
class FakeTraceStaticMethodSkip(FakeTraceStaticMethodBase):
pass
class FakeTraceClassMethodBase(FakeTracedCls):
@classmethod
def class_method(cls, arg):
return arg
@profiler.trace_cls("rpc")
class FakeTraceClassMethodSkip(FakeTraceClassMethodBase):
pass
def py3_info(info):
# NOTE(boris-42): py33 I hate you.
info_py3 = copy.deepcopy(info)
new_name = re.sub("FakeTrace[^.]*", "FakeTracedCls",
info_py3["function"]["name"])
info_py3["function"]["name"] = new_name
return info_py3
def possible_mock_calls(name, info):
# NOTE(boris-42): py33 I hate you.
return [mock.call(name, info=info), mock.call(name, info=py3_info(info))]
class TraceClsDecoratorTestCase(test.TestCase):
@mock.patch("osprofiler.profiler.stop")
@mock.patch("osprofiler.profiler.start")
def test_args(self, mock_start, mock_stop):
fake_cls = FakeTraceClassWithInfo()
self.assertEqual(30, fake_cls.method1(5, 15))
expected_info = {
"a": 10,
"function": {
"name": ("osprofiler.tests.unit.test_profiler"
".FakeTraceClassWithInfo.method1"),
"args": str((fake_cls, 5, 15)),
"kwargs": str({})
}
}
self.assertEqual(1, len(mock_start.call_args_list))
self.assertIn(mock_start.call_args_list[0],
possible_mock_calls("rpc", expected_info))
mock_stop.assert_called_once_with()
@mock.patch("osprofiler.profiler.stop")
@mock.patch("osprofiler.profiler.start")
def test_kwargs(self, mock_start, mock_stop):
fake_cls = FakeTraceClassWithInfo()
self.assertEqual(50, fake_cls.method3(g=5, h=10))
expected_info = {
"a": 10,
"function": {
"name": ("osprofiler.tests.unit.test_profiler"
".FakeTraceClassWithInfo.method3"),
"args": str((fake_cls,)),
"kwargs": str({"g": 5, "h": 10})
}
}
self.assertEqual(1, len(mock_start.call_args_list))
self.assertIn(mock_start.call_args_list[0],
possible_mock_calls("rpc", expected_info))
mock_stop.assert_called_once_with()
@mock.patch("osprofiler.profiler.stop")
@mock.patch("osprofiler.profiler.start")
def test_without_private(self, mock_start, mock_stop):
fake_cls = FakeTraceClassHideArgs()
self.assertEqual(10, fake_cls._method(10))
self.assertFalse(mock_start.called)
self.assertFalse(mock_stop.called)
@mock.patch("osprofiler.profiler.stop")
@mock.patch("osprofiler.profiler.start")
def test_without_args(self, mock_start, mock_stop):
fake_cls = FakeTraceClassHideArgs()
self.assertEqual(40, fake_cls.method1(5, 15, c=20))
expected_info = {
"b": 20,
"function": {
"name": ("osprofiler.tests.unit.test_profiler"
".FakeTraceClassHideArgs.method1"),
}
}
self.assertEqual(1, len(mock_start.call_args_list))
self.assertIn(mock_start.call_args_list[0],
possible_mock_calls("a", expected_info))
mock_stop.assert_called_once_with()
@mock.patch("osprofiler.profiler.stop")
@mock.patch("osprofiler.profiler.start")
def test_private_methods(self, mock_start, mock_stop):
fake_cls = FakeTracePrivate()
self.assertEqual(5, fake_cls._method(5))
expected_info = {
"function": {
"name": ("osprofiler.tests.unit.test_profiler"
".FakeTracePrivate._method"),
"args": str((fake_cls, 5)),
"kwargs": str({})
}
}
self.assertEqual(1, len(mock_start.call_args_list))
self.assertIn(mock_start.call_args_list[0],
possible_mock_calls("rpc", expected_info))
mock_stop.assert_called_once_with()
@mock.patch("osprofiler.profiler.stop")
@mock.patch("osprofiler.profiler.start")
@test.testcase.skip(
"Static method tracing was disabled due the bug. This test should be "
"skipped until we find the way to address it.")
def test_static(self, mock_start, mock_stop):
fake_cls = FakeTraceStaticMethod()
self.assertEqual(25, fake_cls.static_method(25))
expected_info = {
"function": {
# fixme(boris-42): Static methods are treated differently in
# Python 2.x and Python 3.x. So in PY2 we
# expect to see method4 because method is
# static and doesn't have reference to class
# - and FakeTraceStatic.method4 in PY3
"name":
"osprofiler.tests.unit.test_profiler"
".method4" if six.PY2 else
"osprofiler.tests.unit.test_profiler.FakeTraceStatic"
".method4",
"args": str((25,)),
"kwargs": str({})
}
}
<|fim▁hole|>
@mock.patch("osprofiler.profiler.stop")
@mock.patch("osprofiler.profiler.start")
def test_static_method_skip(self, mock_start, mock_stop):
self.assertEqual(25, FakeTraceStaticMethodSkip.static_method(25))
self.assertFalse(mock_start.called)
self.assertFalse(mock_stop.called)
@mock.patch("osprofiler.profiler.stop")
@mock.patch("osprofiler.profiler.start")
def test_class_method_skip(self, mock_start, mock_stop):
self.assertEqual("foo", FakeTraceClassMethodSkip.class_method("foo"))
self.assertFalse(mock_start.called)
self.assertFalse(mock_stop.called)
@six.add_metaclass(profiler.TracedMeta)
class FakeTraceWithMetaclassBase(object):
__trace_args__ = {"name": "rpc",
"info": {"a": 10}}
def method1(self, a, b, c=10):
return a + b + c
def method2(self, d, e):
return d - e
def method3(self, g=10, h=20):
return g * h
def _method(self, i):
return i
class FakeTraceDummy(FakeTraceWithMetaclassBase):
def method4(self, j):
return j
class FakeTraceWithMetaclassHideArgs(FakeTraceWithMetaclassBase):
__trace_args__ = {"name": "a",
"info": {"b": 20},
"hide_args": True}
def method5(self, k, l):
return k + l
class FakeTraceWithMetaclassPrivate(FakeTraceWithMetaclassBase):
__trace_args__ = {"name": "rpc",
"trace_private": True}
def _new_private_method(self, m):
return 2 * m
class TraceWithMetaclassTestCase(test.TestCase):
def test_no_name_exception(self):
def define_class_with_no_name():
@six.add_metaclass(profiler.TracedMeta)
class FakeTraceWithMetaclassNoName(FakeTracedCls):
pass
self.assertRaises(TypeError, define_class_with_no_name, 1)
@mock.patch("osprofiler.profiler.stop")
@mock.patch("osprofiler.profiler.start")
def test_args(self, mock_start, mock_stop):
fake_cls = FakeTraceWithMetaclassBase()
self.assertEqual(30, fake_cls.method1(5, 15))
expected_info = {
"a": 10,
"function": {
"name": ("osprofiler.tests.unit.test_profiler"
".FakeTraceWithMetaclassBase.method1"),
"args": str((fake_cls, 5, 15)),
"kwargs": str({})
}
}
self.assertEqual(1, len(mock_start.call_args_list))
self.assertIn(mock_start.call_args_list[0],
possible_mock_calls("rpc", expected_info))
mock_stop.assert_called_once_with()
@mock.patch("osprofiler.profiler.stop")
@mock.patch("osprofiler.profiler.start")
def test_kwargs(self, mock_start, mock_stop):
fake_cls = FakeTraceWithMetaclassBase()
self.assertEqual(50, fake_cls.method3(g=5, h=10))
expected_info = {
"a": 10,
"function": {
"name": ("osprofiler.tests.unit.test_profiler"
".FakeTraceWithMetaclassBase.method3"),
"args": str((fake_cls,)),
"kwargs": str({"g": 5, "h": 10})
}
}
self.assertEqual(1, len(mock_start.call_args_list))
self.assertIn(mock_start.call_args_list[0],
possible_mock_calls("rpc", expected_info))
mock_stop.assert_called_once_with()
@mock.patch("osprofiler.profiler.stop")
@mock.patch("osprofiler.profiler.start")
def test_without_private(self, mock_start, mock_stop):
fake_cls = FakeTraceWithMetaclassHideArgs()
self.assertEqual(10, fake_cls._method(10))
self.assertFalse(mock_start.called)
self.assertFalse(mock_stop.called)
@mock.patch("osprofiler.profiler.stop")
@mock.patch("osprofiler.profiler.start")
def test_without_args(self, mock_start, mock_stop):
fake_cls = FakeTraceWithMetaclassHideArgs()
self.assertEqual(20, fake_cls.method5(5, 15))
expected_info = {
"b": 20,
"function": {
"name": ("osprofiler.tests.unit.test_profiler"
".FakeTraceWithMetaclassHideArgs.method5")
}
}
self.assertEqual(1, len(mock_start.call_args_list))
self.assertIn(mock_start.call_args_list[0],
possible_mock_calls("a", expected_info))
mock_stop.assert_called_once_with()
@mock.patch("osprofiler.profiler.stop")
@mock.patch("osprofiler.profiler.start")
def test_private_methods(self, mock_start, mock_stop):
fake_cls = FakeTraceWithMetaclassPrivate()
self.assertEqual(10, fake_cls._new_private_method(5))
expected_info = {
"function": {
"name": ("osprofiler.tests.unit.test_profiler"
".FakeTraceWithMetaclassPrivate._new_private_method"),
"args": str((fake_cls, 5)),
"kwargs": str({})
}
}
self.assertEqual(1, len(mock_start.call_args_list))
self.assertIn(mock_start.call_args_list[0],
possible_mock_calls("rpc", expected_info))
mock_stop.assert_called_once_with()<|fim▁end|> | self.assertEqual(1, len(mock_start.call_args_list))
self.assertIn(mock_start.call_args_list[0],
possible_mock_calls("rpc", expected_info))
mock_stop.assert_called_once_with() |
<|file_name|>forms.py<|end_file_name|><|fim▁begin|># Copyright 2013, Big Switch Networks, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import logging
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import messages
from horizon.utils import validators
from openstack_dashboard import api
port_validator = validators.validate_port_or_colon_separated_port_range
LOG = logging.getLogger(__name__)
class UpdateRule(forms.SelfHandlingForm):
name = forms.CharField(max_length=80, label=_("Name"), required=False)
description = forms.CharField(
required=False,
max_length=80, label=_("Description"))
protocol = forms.ChoiceField(
label=_("Protocol"), required=False,
choices=[('TCP', _('TCP')), ('UDP', _('UDP')), ('ICMP', _('ICMP')),
('ANY', _('ANY'))],
help_text=_('Protocol for the firewall rule'))
action = forms.ChoiceField(
label=_("Action"), required=False,
choices=[('ALLOW', _('ALLOW')), ('DENY', _('DENY'))],
help_text=_('Action for the firewall rule'))
source_ip_address = forms.IPField(
label=_("Source IP Address/Subnet"),
version=forms.IPv4 | forms.IPv6,
required=False, mask=True,
help_text=_('Source IP address or subnet'))
destination_ip_address = forms.IPField(
label=_('Destination IP Address/Subnet'),
version=forms.IPv4 | forms.IPv6,
required=False, mask=True,
help_text=_('Destination IP address or subnet'))
source_port = forms.CharField(
max_length=80,
label=_("Source Port/Port Range"),
required=False,
validators=[port_validator],
help_text=_('Source port (integer in [1, 65535] or range in a:b)'))
destination_port = forms.CharField(
max_length=80,
label=_("Destination Port/Port Range"),
required=False,
validators=[port_validator],
help_text=_('Destination port (integer in [1, 65535] or range'
' in a:b)'))
shared = forms.BooleanField(label=_("Shared"), required=False)
enabled = forms.BooleanField(label=_("Enabled"), required=False)
failure_url = 'horizon:project:firewalls:index'
def handle(self, request, context):
rule_id = self.initial['rule_id']
name_or_id = context.get('name') or rule_id
if context['protocol'] == 'ANY':
context['protocol'] = None
for f in ['source_ip_address', 'destination_ip_address',
'source_port', 'destination_port']:
if not context[f]:
context[f] = None
try:
rule = api.fwaas.rule_update(request, rule_id, **context)
msg = _('Rule %s was successfully updated.') % name_or_id
LOG.debug(msg)
messages.success(request, msg)
return rule
except Exception as e:
msg = (_('Failed to update rule %(name)s: %(reason)s') %
{'name': name_or_id, 'reason': e})
LOG.error(msg)
redirect = reverse(self.failure_url)
exceptions.handle(request, msg, redirect=redirect)
class UpdatePolicy(forms.SelfHandlingForm):
name = forms.CharField(max_length=80, label=_("Name"), required=False)
description = forms.CharField(required=False,
max_length=80, label=_("Description"))
shared = forms.BooleanField(label=_("Shared"), required=False)
audited = forms.BooleanField(label=_("Audited"), required=False)
failure_url = 'horizon:project:firewalls:index'
def handle(self, request, context):
policy_id = self.initial['policy_id']
name_or_id = context.get('name') or policy_id
try:
policy = api.fwaas.policy_update(request, policy_id, **context)
msg = _('Policy %s was successfully updated.') % name_or_id
LOG.debug(msg)
messages.success(request, msg)
return policy
except Exception as e:
msg = _('Failed to update policy %(name)s: %(reason)s') % {
'name': name_or_id, 'reason': e}
LOG.error(msg)
redirect = reverse(self.failure_url)
exceptions.handle(request, msg, redirect=redirect)
class UpdateFirewall(forms.SelfHandlingForm):
name = forms.CharField(max_length=80,
label=_("Name"),
required=False)
description = forms.CharField(max_length=80,
label=_("Description"),
required=False)
firewall_policy_id = forms.ChoiceField(label=_("Policy"))
admin_state_up = forms.ChoiceField(choices=[(True, _('UP')),
(False, _('DOWN'))],
label=_("Admin State"))
failure_url = 'horizon:project:firewalls:index'
def __init__(self, request, *args, **kwargs):
super(UpdateFirewall, self).__init__(request, *args, **kwargs)
try:
tenant_id = self.request.user.tenant_id
policies = api.fwaas.policy_list_for_tenant(request, tenant_id)
policies = sorted(policies, key=lambda policy: policy.name)
except Exception:
exceptions.handle(request,
_('Unable to retrieve policy list.'))
policies = []
policy_id = kwargs['initial']['firewall_policy_id']
policy_name = [p.name for p in policies if p.id == policy_id][0]
firewall_policy_id_choices = [(policy_id, policy_name)]
for p in policies:
if p.id != policy_id:
firewall_policy_id_choices.append((p.id, p.name_or_id))
self.fields['firewall_policy_id'].choices = firewall_policy_id_choices
def handle(self, request, context):
firewall_id = self.initial['firewall_id']
name_or_id = context.get('name') or firewall_id
context['admin_state_up'] = (context['admin_state_up'] == 'True')
try:
firewall = api.fwaas.firewall_update(request, firewall_id,
**context)
msg = _('Firewall %s was successfully updated.') % name_or_id
LOG.debug(msg)
messages.success(request, msg)
return firewall
except Exception as e:
msg = _('Failed to update firewall %(name)s: %(reason)s') % {
'name': name_or_id, 'reason': e}
LOG.error(msg)
redirect = reverse(self.failure_url)
exceptions.handle(request, msg, redirect=redirect)
class InsertRuleToPolicy(forms.SelfHandlingForm):
firewall_rule_id = forms.ChoiceField(label=_("Insert Rule"))
insert_before = forms.ChoiceField(label=_("Before"),
required=False)
insert_after = forms.ChoiceField(label=_("After"),
required=False)
failure_url = 'horizon:project:firewalls:index'
def __init__(self, request, *args, **kwargs):
super(InsertRuleToPolicy, self).__init__(request, *args, **kwargs)
try:
tenant_id = self.request.user.tenant_id
all_rules = api.fwaas.rule_list_for_tenant(request, tenant_id)
all_rules = sorted(all_rules, key=lambda rule: rule.name_or_id)
available_rules = [r for r in all_rules
if not r.firewall_policy_id]
current_rules = []
for r in kwargs['initial']['firewall_rules']:
r_obj = [rule for rule in all_rules if r == rule.id][0]
current_rules.append(r_obj)
available_choices = [(r.id, r.name_or_id) for r in available_rules]
current_choices = [(r.id, r.name_or_id) for r in current_rules]
except Exception as e:
msg = _('Failed to retrieve available rules: %s') % e
LOG.error(msg)
redirect = reverse(self.failure_url)
exceptions.handle(request, msg, redirect=redirect)
self.fields['firewall_rule_id'].choices = available_choices
self.fields['insert_before'].choices = [('', '')] + current_choices
self.fields['insert_after'].choices = [('', '')] + current_choices
def handle(self, request, context):
policy_id = self.initial['policy_id']
policy_name_or_id = self.initial['name'] or policy_id
try:
insert_rule_id = context['firewall_rule_id']
insert_rule = api.fwaas.rule_get(request, insert_rule_id)
body = {'firewall_rule_id': insert_rule_id,
'insert_before': context['insert_before'],
'insert_after': context['insert_after']}
policy = api.fwaas.policy_insert_rule(request, policy_id, **body)
msg = _('Rule %(rule)s was successfully inserted to policy '
'%(policy)s.') % {
'rule': insert_rule.name or insert_rule.id,
'policy': policy_name_or_id}
LOG.debug(msg)
messages.success(request, msg)
return policy
except Exception as e:
msg = _('Failed to insert rule to policy %(name)s: %(reason)s') % {
'name': policy_id, 'reason': e}
LOG.error(msg)
redirect = reverse(self.failure_url)
exceptions.handle(request, msg, redirect=redirect)
class RemoveRuleFromPolicy(forms.SelfHandlingForm):
firewall_rule_id = forms.ChoiceField(label=_("Remove Rule"))
failure_url = 'horizon:project:firewalls:index'
def __init__(self, request, *args, **kwargs):
super(RemoveRuleFromPolicy, self).__init__(request, *args, **kwargs)
try:
tenant_id = request.user.tenant_id
all_rules = api.fwaas.rule_list_for_tenant(request, tenant_id)
current_rules = []
for r in kwargs['initial']['firewall_rules']:
r_obj = [rule for rule in all_rules if r == rule.id][0]
current_rules.append(r_obj)
current_choices = [(r.id, r.name_or_id) for r in current_rules]
except Exception as e:
msg = _('Failed to retrieve current rules in policy %(name)s: '
'%(reason)s') % {'name': self.initial['name'], 'reason': e}
LOG.error(msg)
redirect = reverse(self.failure_url)
exceptions.handle(request, msg, redirect=redirect)
self.fields['firewall_rule_id'].choices = current_choices
def handle(self, request, context):
policy_id = self.initial['policy_id']
policy_name_or_id = self.initial['name'] or policy_id
try:
remove_rule_id = context['firewall_rule_id']
remove_rule = api.fwaas.rule_get(request, remove_rule_id)
body = {'firewall_rule_id': remove_rule_id}
policy = api.fwaas.policy_remove_rule(request, policy_id, **body)
msg = _('Rule %(rule)s was successfully removed from policy '
'%(policy)s.') % {
'rule': remove_rule.name or remove_rule.id,
'policy': policy_name_or_id}
LOG.debug(msg)
messages.success(request, msg)
return policy
except Exception as e:
msg = _('Failed to remove rule from policy %(name)s: '
'%(reason)s') % {'name': self.initial['name'],
'reason': e}
LOG.error(msg)
redirect = reverse(self.failure_url)
exceptions.handle(request, msg, redirect=redirect)
class RouterInsertionFormBase(forms.SelfHandlingForm):
def __init__(self, request, *args, **kwargs):
super(RouterInsertionFormBase, self).__init__(request, *args, **kwargs)
try:
router_choices = self.get_router_choices(request, kwargs)
self.fields['router_ids'].choices = router_choices
except Exception as e:
msg = self.init_failure_msg % {'name': self.initial['name'],
'reason': e}
LOG.error(msg)
redirect = reverse(self.failure_url)
exceptions.handle(request, msg, redirect=redirect)
@abc.abstractmethod
def get_router_choices(self, request, kwargs):
"""Return a list of selectable routers."""
@abc.abstractmethod
def get_new_router_ids(self, context):
"""Return a new list of router IDs associated with the firewall."""
def handle(self, request, context):
firewall_id = self.initial['firewall_id']
firewall_name_or_id = self.initial['name'] or firewall_id
try:
body = {'router_ids': self.get_new_router_ids(context)}
firewall = api.fwaas.firewall_update(request, firewall_id, **body)
msg = self.success_msg % {'firewall': firewall_name_or_id}
LOG.debug(msg)
messages.success(request, msg)
return firewall
except Exception as e:
msg = self.failure_msg % {'name': firewall_name_or_id, 'reason': e}
LOG.error(msg)
redirect = reverse(self.failure_url)
exceptions.handle(request, msg, redirect=redirect)
class AddRouterToFirewall(RouterInsertionFormBase):
router_ids = forms.MultipleChoiceField(
label=_("Add Routers"),
required=False,
widget=forms.CheckboxSelectMultiple(),
help_text=_("Add selected router(s) to the firewall."))
failure_url = 'horizon:project:firewalls:index'
success_msg = _('Router(s) was/were successfully added to firewall '
'%(firewall)s.')
failure_msg = _('Failed to add router(s) to firewall %(name)s: %(reason)s')
init_failure_msg = _('Failed to retrieve available routers: %(reason)s')
def get_router_choices(self, request, kwargs):
tenant_id = self.request.user.tenant_id
routers_list = api.fwaas.firewall_unassociated_routers_list(
request, tenant_id)
return [(r.id, r.name_or_id) for r in routers_list]
def get_new_router_ids(self, context):
existing_router_ids = self.initial['router_ids']
add_router_ids = context['router_ids']
return add_router_ids + existing_router_ids
class RemoveRouterFromFirewall(RouterInsertionFormBase):<|fim▁hole|> router_ids = forms.MultipleChoiceField(
label=_("Remove Routers"),
required=False,
widget=forms.CheckboxSelectMultiple(),
help_text=_("Unselect the router(s) to be removed from firewall."))
failure_url = 'horizon:project:firewalls:index'
success_msg = _('Router(s) was successfully removed from firewall '
'%(firewall)s.')
failure_msg = _('Failed to remove router(s) from firewall %(name)s: '
'%(reason)s')
init_failure_msg = _('Failed to retrieve current routers in firewall '
'%(name)s: %(reason)s')
def get_router_choices(self, request, kwargs):
tenant_id = self.request.user.tenant_id
all_routers = api.neutron.router_list(request, tenant_id=tenant_id)
current_routers = [r for r in all_routers
if r['id'] in kwargs['initial']['router_ids']]
return [(r.id, r.name_or_id) for r in current_routers]
def get_new_router_ids(self, context):
# context[router_ids] is router IDs to be kept.
return context['router_ids']<|fim▁end|> | |
<|file_name|>0011_auto_20150702_1812.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('postcode_api', '0010_auto_20150601_1513'),
]
operations = [<|fim▁hole|> ]<|fim▁end|> | migrations.AlterIndexTogether(
name='address',
index_together=set([('postcode_index', 'uprn')]),
), |
<|file_name|>account.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
##
## test_account.py
## Login : <[email protected]>
## Started on Wed Feb 14 08:23:17 2007 David Rousselie
## $Id$
##
## Copyright (C) 2007 David Rousselie
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##
import unittest
import thread
from jcl.tests import JCLTestCase
import jcl.model as model
from jcl.error import FieldError
from jcl.model.account import Account, PresenceAccount, User
import jmc.model.account
from jmc.model.account import MailAccount, POP3Account, IMAPAccount, \
GlobalSMTPAccount, AbstractSMTPAccount, SMTPAccount
from jmc.lang import Lang
from jcl.model.tests.account import Account_TestCase, \
PresenceAccount_TestCase, InheritableAccount_TestCase, \
ExampleAccount
from jmc.model.tests import email_generator, server
class AccountModule_TestCase(unittest.TestCase):
def test_validate_login_with_empty_login(self):
self.assertRaises(FieldError, jmc.model.account.validate_login,
None, None, None)
def test_validate_login_with_login_with_whitespace(self):
self.assertRaises(FieldError, jmc.model.account.validate_login,
"login with spaces", None, None)
def test_validate_host_with_empty_login(self):
self.assertRaises(FieldError, jmc.model.account.validate_host,
None, None, None)
def test_validate_host_with_host_with_whitespace(self):
self.assertRaises(FieldError, jmc.model.account.validate_host,
"host with spaces", None, None)
class MailAccount_TestCase(PresenceAccount_TestCase):
def setUp(self):
PresenceAccount_TestCase.setUp(self, tables=[MailAccount])
self.account = MailAccount(user=User(jid="[email protected]"),
name="account1",
jid="[email protected]")
self.account_class = MailAccount
def make_test(email_type, tested_func, expected_res):
def inner(self):
encoded, multipart, header = email_type
email = email_generator.generate(encoded,
multipart,
header)
part = tested_func(self, email)
self.assertEquals(part, expected_res)
return inner
test_get_decoded_part_not_encoded = \
make_test((False, False, False), \
lambda self, email: \
self.account.get_decoded_part(email, None),
u"Not encoded single part")
test_get_decoded_part_encoded = \
make_test((True, False, False),
lambda self, email: \
self.account.get_decoded_part(email, None),
u"Encoded single part with 'iso-8859-15' charset (éàê)")
test_format_message_summary_not_encoded = \
make_test((False, False, True),
lambda self, email: \
self.account.format_message_summary(email),
(u"From : not encoded from\nSubject : not encoded subject\n\n",
u"not encoded from"))
test_format_message_summary_encoded = \
make_test((True, False, True),
lambda self, email: \
self.account.format_message_summary(email),
(u"From : encoded from (éàê)\nSubject : encoded subject " + \
u"(éàê)\n\n",
u"encoded from (éàê)"))
test_format_message_summary_partial_encoded = \
make_test((True, False, True),
lambda self, email: \
email.replace_header("Subject",
"\" " + str(email["Subject"]) \
+ " \" not encoded part") or \
email.replace_header("From",
"\" " + str(email["From"]) \
+ " \" not encoded part") or \
self.account.format_message_summary(email),
(u"From : \"encoded from (éàê)\" not encoded part\nSubject " + \
u": \"encoded subject (éàê)\" not encoded part\n\n",
u"\"encoded from (éàê)\" not encoded part"))
test_format_message_single_not_encoded = \
make_test((False, False, True),
lambda self, email: \
self.account.format_message(email),
(u"From : not encoded from\nSubject : not encoded subject" + \
u"\n\nNot encoded single part\n",
u"not encoded from"))
test_format_message_single_encoded = \
make_test((True, False, True),
lambda self, email: \
self.account.format_message(email),
(u"From : encoded from (éàê)\nSubject : encoded subject " + \
u"(éàê)\n\nEncoded single part with 'iso-8859-15' charset" + \
u" (éàê)\n",
u"encoded from (éàê)"))
test_format_message_multi_not_encoded = \
make_test((False, True, True),
lambda self, email: \
self.account.format_message(email),
(u"From : not encoded from\nSubject : not encoded subject" + \
u"\n\nNot encoded multipart1\nNot encoded multipart2\n",
u"not encoded from"))
test_format_message_multi_encoded = \
make_test((True, True, True),
lambda self, email: \
self.account.format_message(email),
(u"From : encoded from (éàê)\nSubject : encoded subject (éà" + \
u"ê)\n\nutf-8 multipart1 with no charset (éàê)" + \
u"\nEncoded multipart2 with 'iso-8859-15' charset (éàê)\n" + \
u"Encoded multipart3 with no charset (éàê)\n",
u"encoded from (éàê)"))
def test_get_default_status_msg(self):
"""
Get default status message for MailAccount.
Should raise NotImplementedError because get_type() method
is not implemented
"""
try:
self.account.get_default_status_msg(Lang.en)
except NotImplementedError:
return
fail("No NotImplementedError raised")
class POP3Account_TestCase(InheritableAccount_TestCase):
def setUp(self):
JCLTestCase.setUp(self, tables=[Account, PresenceAccount, User,
MailAccount, POP3Account])
self.pop3_account = POP3Account(user=User(jid="[email protected]"),
name="account1",
jid="[email protected]",
login="login")
self.pop3_account.password = "pass"
self.pop3_account.host = "localhost"
self.pop3_account.port = 1110
self.pop3_account.ssl = False
model.db_disconnect()
self.account_class = POP3Account
def make_test(responses=None, queries=None, core=None):
def inner(self):
self.server = server.DummyServer("localhost", 1110)
thread.start_new_thread(self.server.serve, ())
self.server.responses = ["+OK connected\r\n",
"+OK name is a valid mailbox\r\n",
"+OK pass\r\n"]
if responses:
self.server.responses += responses
self.server.queries = ["USER login\r\n",
"PASS pass\r\n"]
if queries:
self.server.queries += queries
self.server.queries += ["QUIT\r\n"]
self.pop3_account.connect()
self.failUnless(self.pop3_account.connection,
"Cannot establish connection")
if core:
model.db_connect()
core(self)
model.db_disconnect()
self.pop3_account.disconnect()
self.failUnless(self.server.verify_queries(),
"Sended queries does not match expected queries.")
return inner
test_connection = make_test
test_get_mail_list_summary = \
make_test(["+OK 2 20\r\n",
"+OK 10 octets\r\n" + \
"From: [email protected]\r\n" + \
"Subject: mail subject 1\r\n.\r\n",
"+OK 10 octets\r\n" + \
"From: [email protected]\r\n" + \
"Subject: mail subject 2\r\n.\r\n",
"+OK\r\n"],
["STAT\r\n",
"TOP 1 0\r\n",
"TOP 2 0\r\n",
"RSET\r\n"],
lambda self: \
self.assertEquals(self.pop3_account.get_mail_list_summary(),
[("1", "mail subject 1"),
("2", "mail subject 2")]))
test_get_mail_list_summary_start_index = \
make_test(["+OK 3 30\r\n",
"+OK 10 octets\r\n" + \
"From: [email protected]\r\n" + \
"Subject: mail subject 2\r\n.\r\n",
"+OK 10 octets\r\n" + \
"From: [email protected]\r\n" + \
"Subject: mail subject 3\r\n.\r\n",
"+OK\r\n"],
["STAT\r\n",
"TOP 2 0\r\n",
"TOP 3 0\r\n",
"RSET\r\n"],
lambda self: \
self.assertEquals(self.pop3_account.get_mail_list_summary(start_index=2),
[("2", "mail subject 2"),
("3", "mail subject 3")]))
test_get_mail_list_summary_end_index = \
make_test(["+OK 3 30\r\n",
"+OK 10 octets\r\n" + \
"From: [email protected]\r\n" + \
"Subject: mail subject 1\r\n.\r\n",
"+OK 10 octets\r\n" + \
"From: [email protected]\r\n" + \
"Subject: mail subject 2\r\n.\r\n",
"+OK\r\n"],
["STAT\r\n",
"TOP 1 0\r\n",
"TOP 2 0\r\n",
"RSET\r\n"],
lambda self: \
self.assertEquals(self.pop3_account.get_mail_list_summary(end_index=2),
[("1", "mail subject 1"),
("2", "mail subject 2")]))
test_get_new_mail_list = \
make_test(["+OK 2 20\r\n"],
["STAT\r\n"],
lambda self: \
self.assertEquals(self.pop3_account.get_new_mail_list(),
["1", "2"]))
test_get_mail_summary = \
make_test(["+OK 10 octets\r\n" + \
"From: [email protected]\r\n" + \
"Subject: subject test\r\n\r\n" + \
"mymessage\r\n.\r\n",
"+OK\r\n"],
["RETR 1\r\n",
"RSET\r\n"],
lambda self: \
self.assertEquals(self.pop3_account.get_mail_summary(1),
(u"From : [email protected]\n" + \
u"Subject : subject test\n\n",
u"[email protected]")))
test_get_mail = \
make_test(["+OK 10 octets\r\n" + \
"From: [email protected]\r\n" + \
"Subject: subject test\r\n\r\n" + \
"mymessage\r\n.\r\n",
"+OK\r\n"],
["RETR 1\r\n",
"RSET\r\n"],
lambda self: \
self.assertEquals(self.pop3_account.get_mail(1),
(u"From : [email protected]\n" + \
u"Subject : subject test\n\n" + \
u"mymessage\n",
u"[email protected]")))
test_unsupported_reset_command_get_mail_summary = \
make_test(["+OK 10 octets\r\n" + \
"From: [email protected]\r\n" + \
"Subject: subject test\r\n\r\n" + \
"mymessage\r\n.\r\n",
"-ERR unknown command\r\n"],
["RETR 1\r\n",
"RSET\r\n"],
lambda self: \
self.assertEquals(self.pop3_account.get_mail_summary(1),
(u"From : [email protected]\n" + \
u"Subject : subject test\n\n",
u"[email protected]")))
test_unsupported_reset_command_get_mail = \
make_test(["+OK 10 octets\r\n" + \
"From: [email protected]\r\n" + \
"Subject: subject test\r\n\r\n" + \
"mymessage\r\n.\r\n",
"-ERR unknown command\r\n"],
["RETR 1\r\n",
"RSET\r\n"],
lambda self: \
self.assertEquals(self.pop3_account.get_mail(1),
(u"From : [email protected]\n" + \
u"Subject : subject test\n\n" + \
u"mymessage\n",
u"[email protected]")))
def test_get_next_mail_index_empty(self):
"""
Test get_next_mail_index with empty mail_list parameter.
"""
mail_list = []
self.pop3_account.nb_mail = 0
self.pop3_account.lastmail = 0
result = []
for elt in self.pop3_account.get_next_mail_index(mail_list):
result.append(elt)
self.assertEquals(result, [])
def test_get_next_mail_index(self):
"""
Test get_next_mail_index first check.
"""
mail_list = [1, 2, 3, 4]
self.pop3_account.nb_mail = 4
self.pop3_account.lastmail = 0
result = []
for elt in self.pop3_account.get_next_mail_index(mail_list):
result.append(elt)
self.assertEquals(result, [1, 2, 3, 4])
self.assertEquals(self.pop3_account.lastmail, 4)
def test_get_next_mail_index_second_check(self):
"""
Test get_next_mail_index second check (no parallel checking).
"""
mail_list = [1, 2, 3, 4, 5, 6, 7, 8]
self.pop3_account.nb_mail = 8
self.pop3_account.lastmail = 4
result = []
for elt in self.pop3_account.get_next_mail_index(mail_list):
result.append(elt)
self.assertEquals(result, [5, 6, 7, 8])
self.assertEquals(self.pop3_account.lastmail, 8)
def test_get_next_mail_index_second_check_parallel_check(self):
"""
Test get_next_mail_index second check (with parallel checking
but not more new emails than last index jmc stopped:
3 new emails after another client checked emails).
"""
mail_list = [1, 2, 3]
self.pop3_account.nb_mail = 3
self.pop3_account.lastmail = 4
result = []
for elt in self.pop3_account.get_next_mail_index(mail_list):
result.append(elt)
self.assertEquals(result, [1, 2, 3])
self.assertEquals(self.pop3_account.lastmail, 3)
def test_get_next_mail_index_second_check_bug_parallel_check(self):
"""
Test get_next_mail_index second check (with parallel checking
but with more new emails than last index jmc stopped:
5 new emails after another client checked emails). Cannot make
the difference with one new email since last jmc email check!!
"""
mail_list = [1, 2, 3, 4, 5]
self.pop3_account.nb_mail = 5
self.pop3_account.lastmail = 4
result = []
for elt in self.pop3_account.get_next_mail_index(mail_list):
result.append(elt)
# with no bug it should be:
# self.assertEquals(result, [1, 2, 3, 4, 5])
self.assertEquals(result, [5])
self.assertEquals(self.pop3_account.lastmail, 5)
def test_get_default_status_msg(self):
"""
Get default status message for POP3Account.
"""
status_msg = self.pop3_account.get_default_status_msg(Lang.en)
self.assertEquals(status_msg, "pop3://login@localhost:1110")
def test_get_default_status_msg_ssl(self):
"""
Get default status message for SSL POP3Account.
"""
self.pop3_account.ssl = True
status_msg = self.pop3_account.get_default_status_msg(Lang.en)
self.assertEquals(status_msg, "pop3s://login@localhost:1110")
class IMAPAccount_TestCase(InheritableAccount_TestCase):
def setUp(self):
JCLTestCase.setUp(self, tables=[Account, PresenceAccount, User,
MailAccount, IMAPAccount])
self.imap_account = IMAPAccount(user=User(jid="[email protected]"),
name="account1",
jid="[email protected]",
login="login")
self.imap_account.password = "pass"
self.imap_account.host = "localhost"
self.imap_account.port = 1143
self.imap_account.ssl = False
self.account_class = IMAPAccount
def make_test(self, responses=None, queries=None, core=None):
def inner():
self.server = server.DummyServer("localhost", 1143)
thread.start_new_thread(self.server.serve, ())
self.server.responses = ["* OK [CAPABILITY IMAP4 LOGIN-REFERRALS " + \
"AUTH=PLAIN]\r\n", \
lambda data: "* CAPABILITY IMAP4 " + \
"LOGIN-REFERRALS AUTH=PLAIN\r\n" + \
data.split()[0] + \
" OK CAPABILITY completed\r\n", \
lambda data: data.split()[0] + \
" OK LOGIN completed\r\n"]
if responses:
self.server.responses += responses
self.server.queries = ["^[^ ]* CAPABILITY", \
"^[^ ]* LOGIN login \"pass\""]
if queries:
self.server.queries += queries
self.server.queries += ["^[^ ]* LOGOUT"]
if not self.imap_account.connected:
self.imap_account.connect()
self.failUnless(self.imap_account.connection, \
"Cannot establish connection")
if core:
model.db_connect()
core(self)
model.db_disconnect()
if self.imap_account.connected:
self.imap_account.disconnect()
self.failUnless(self.server.verify_queries())
return inner
def test_connection(self):
test_func = self.make_test()
test_func()
def test_get_mail_list_summary(self):
test_func = self.make_test(\
[lambda data: "* 42 EXISTS\r\n* 1 RECENT\r\n* OK" +\
" [UNSEEN 9]\r\n* FLAGS (\Deleted \Seen\*)\r\n*" +\
" OK [PERMANENTFLAGS (\Deleted \Seen\*)\r\n" + \
data.split()[0] + \
" OK [READ-WRITE] SELECT completed\r\n",
lambda data: "* 1 FETCH ((RFC822.header) {38}\r\n" + \
"Subject: mail subject 1\r\n\r\nbody text\r\n)\r\n" + \
"* 2 FETCH ((RFC822.header) {38}\r\n" + \
"Subject: mail subject 2\r\n\r\nbody text\r\n)\r\n" + \
data.split()[0] + " OK FETCH completed\r\n"],
["^[^ ]* EXAMINE INBOX",
"^[^ ]* FETCH 1:20 RFC822.header"],
lambda self: \
self.assertEquals(self.imap_account.get_mail_list_summary(),
[('1', 'mail subject 1'),
('2', 'mail subject 2')]))
test_func()
def test_get_mail_list_summary_inbox_does_not_exist(self):
self.__test_select_inbox_does_not_exist(\
lambda: self.imap_account.get_mail_list_summary(), readonly=True)
def test_get_mail_list_summary_start_index(self):
test_func = self.make_test(\
[lambda data: "* 42 EXISTS\r\n* 1 RECENT\r\n* OK" +\
" [UNSEEN 9]\r\n* FLAGS (\Deleted \Seen\*)\r\n*" +\
" OK [PERMANENTFLAGS (\Deleted \Seen\*)\r\n" + \
data.split()[0] + \
" OK [READ-WRITE] SELECT completed\r\n",
lambda data: "* 2 FETCH ((RFC822.header) {38}\r\n" + \
"Subject: mail subject 2\r\n\r\nbody text\r\n)\r\n" + \
"* 3 FETCH ((RFC822.header) {38}\r\n" + \
"Subject: mail subject 3\r\n\r\nbody text\r\n)\r\n" + \
data.split()[0] + " OK FETCH completed\r\n"],
["^[^ ]* EXAMINE INBOX",
"^[^ ]* FETCH 2:20 RFC822.header"],
lambda self: \
self.assertEquals(self.imap_account.get_mail_list_summary(start_index=2),
[('2', 'mail subject 2'),
('3', 'mail subject 3')]))
test_func()
def test_get_mail_list_summary_end_index(self):
test_func = self.make_test(\
[lambda data: "* 42 EXISTS\r\n* 1 RECENT\r\n* OK" +\
" [UNSEEN 9]\r\n* FLAGS (\Deleted \Seen\*)\r\n*" +\
" OK [PERMANENTFLAGS (\Deleted \Seen\*)\r\n" + \
data.split()[0] + \
" OK [READ-WRITE] SELECT completed\r\n",
lambda data: "* 1 FETCH ((RFC822.header) {38}\r\n" + \
"Subject: mail subject 1\r\n\r\nbody text\r\n)\r\n" + \
"* 2 FETCH ((RFC822.header) {38}\r\n" + \
"Subject: mail subject 2\r\n\r\nbody text\r\n)\r\n" + \
data.split()[0] + " OK FETCH completed\r\n"],
["^[^ ]* EXAMINE INBOX",
"^[^ ]* FETCH 1:2 RFC822.header"],
lambda self: \
self.assertEquals(self.imap_account.get_mail_list_summary(end_index=2),
[('1', 'mail subject 1'),
('2', 'mail subject 2')]))
test_func()
<|fim▁hole|> [lambda data: "* 42 EXISTS\r\n* 1 RECENT\r\n* OK" + \
" [UNSEEN 9]\r\n* FLAGS (\Deleted \Seen\*)\r\n*" + \
" OK [PERMANENTFLAGS (\Deleted \Seen\*)\r\n" + \
data.split()[0] + \
" OK [READ-WRITE] SELECT completed\r\n",
lambda data: "* SEARCH 9 10\r\n" + \
data.split()[0] + " OK SEARCH completed\r\n"],
["^[^ ]* SELECT INBOX",
"^[^ ]* SEARCH RECENT"],
lambda self: \
self.assertEquals(self.imap_account.get_new_mail_list(),
['9', '10']))
test_func()
def __test_select_inbox_does_not_exist(self, tested_func,
exception_message="Mailbox does not exist",
readonly=False):
def check_func(self):
try:
tested_func()
except Exception, e:
self.assertEquals(str(e), exception_message)
return
self.fail("No exception raised when selecting non existing mailbox")
test_func = self.make_test(\
[lambda data: "* 42 EXISTS\r\n* 1 RECENT\r\n* OK" + \
" [UNSEEN 9]\r\n* FLAGS (\Deleted \Seen\*)\r\n*" + \
" OK [PERMANENTFLAGS (\Deleted \Seen\*)\r\n" + \
data.split()[0] + \
" NO Mailbox does not exist\r\n"],
["^[^ ]* " + (readonly and "EXAMINE" or "SELECT") + " INBOX"],
check_func)
test_func()
def test_get_new_mail_list_inbox_does_not_exist(self):
self.__test_select_inbox_does_not_exist(\
lambda: self.imap_account_get_new_mail_list())
def test_get_new_mail_list_delimiter1(self):
self.imap_account.mailbox = "INBOX/dir1/subdir2"
self.imap_account.delimiter = "."
test_func = self.make_test( \
[lambda data: "* 42 EXISTS\r\n* 1 RECENT\r\n* OK" + \
" [UNSEEN 9]\r\n* FLAGS (\Deleted \Seen\*)\r\n*" + \
" OK [PERMANENTFLAGS (\Deleted \Seen\*)\r\n" + \
data.split()[0] + \
" OK [READ-WRITE] SELECT completed\r\n",
lambda data: "* SEARCH 9 10\r\n" + \
data.split()[0] + " OK SEARCH completed\r\n"],
["^[^ ]* SELECT \"?INBOX\.dir1\.subdir2\"?",
"^[^ ]* SEARCH RECENT"],
lambda self: \
self.assertEquals(self.imap_account.get_new_mail_list(),
['9', '10']))
test_func()
def test_get_new_mail_list_delimiter2(self):
self.imap_account.mailbox = "INBOX/dir1/subdir2"
self.imap_account.delimiter = "/"
test_func = self.make_test( \
[lambda data: "* 42 EXISTS\r\n* 1 RECENT\r\n* OK" + \
" [UNSEEN 9]\r\n* FLAGS (\Deleted \Seen\*)\r\n*" + \
" OK [PERMANENTFLAGS (\Deleted \Seen\*)\r\n" + \
data.split()[0] + \
" OK [READ-WRITE] SELECT completed\r\n",
lambda data: "* SEARCH 9 10\r\n" + \
data.split()[0] + " OK SEARCH completed\r\n"],
["^[^ ]* SELECT \"?INBOX/dir1/subdir2\"?",
"^[^ ]* SEARCH RECENT"],
lambda self: \
self.assertEquals(self.imap_account.get_new_mail_list(),
['9', '10']))
test_func()
def test_get_mail_summary(self):
test_func = self.make_test(\
[lambda data: "* 42 EXISTS\r\n* 1 RECENT\r\n* OK" +\
" [UNSEEN 9]\r\n* FLAGS (\Deleted \Seen\*)\r\n*" +\
" OK [PERMANENTFLAGS (\Deleted \Seen\*)\r\n" + \
data.split()[0] + \
" OK [READ-WRITE] SELECT completed\r\n",
lambda data: "* 1 FETCH ((RFC822) {12}\r\nbody" + \
" text\r\n)\r\n" + \
data.split()[0] + " OK FETCH completed\r\n"],
["^[^ ]* EXAMINE INBOX",
"^[^ ]* FETCH 1 \(RFC822.header\)"],
lambda self: \
self.assertEquals(self.imap_account.get_mail_summary(1),
(u"From : None\nSubject : None\n\n",
u"None")))
test_func()
def test_get_mail_summary_inbox_does_not_exist(self):
self.__test_select_inbox_does_not_exist(\
lambda: self.imap_account.get_mail_summary(1),
"Mailbox does not exist (email 1)", True)
def test_get_new_mail_list_inbox_does_not_exist(self):
def check_func(self):
try:
self.imap_account.get_new_mail_list()
except Exception, e:
self.assertEquals(str(e), "Mailbox does not exist")
return
self.fail("No exception raised when selecting non existing mailbox")
test_func = self.make_test(\
[lambda data: "* 42 EXISTS\r\n* 1 RECENT\r\n* OK" + \
" [UNSEEN 9]\r\n* FLAGS (\Deleted \Seen\*)\r\n*" + \
" OK [PERMANENTFLAGS (\Deleted \Seen\*)\r\n" + \
data.split()[0] + \
" NO Mailbox does not exist\r\n"],
["^[^ ]* SELECT INBOX"],
check_func)
test_func()
def test_get_mail_summary_delimiter(self):
self.imap_account.mailbox = "INBOX/dir1/subdir2"
self.imap_account.delimiter = "."
test_func = self.make_test(\
[lambda data: "* 42 EXISTS\r\n* 1 RECENT\r\n* OK" +\
" [UNSEEN 9]\r\n* FLAGS (\Deleted \Seen\*)\r\n*" +\
" OK [PERMANENTFLAGS (\Deleted \Seen\*)\r\n" + \
data.split()[0] + \
" OK [READ-WRITE] SELECT completed\r\n",
lambda data: "* 1 FETCH ((RFC822) {12}\r\nbody" + \
" text\r\n)\r\n" + \
data.split()[0] + " OK FETCH completed\r\n"],
["^[^ ]* EXAMINE \"?INBOX\.dir1\.subdir2\"?",
"^[^ ]* FETCH 1 \(RFC822.header\)"],
lambda self: \
self.assertEquals(self.imap_account.get_mail_summary(1),
(u"From : None\nSubject : None\n\n",
u"None")))
test_func()
def test_get_mail(self):
test_func = self.make_test(\
[lambda data: "* 42 EXISTS\r\n* 1 RECENT\r\n* OK" + \
" [UNSEEN 9]\r\n* FLAGS (\Deleted \Seen\*)\r\n*" + \
" OK [PERMANENTFLAGS (\Deleted \Seen\*)\r\n" + \
data.split()[0] + \
" OK [READ-WRITE] SELECT completed\r\n",
lambda data: "* 1 FETCH ((RFC822) {11}\r\nbody" + \
" text\r\n)\r\n" + \
data.split()[0] + " OK FETCH completed\r\n"],
["^[^ ]* EXAMINE INBOX",
"^[^ ]* FETCH 1 \(RFC822\)"],
lambda self: \
self.assertEquals(self.imap_account.get_mail(1),
(u"From : None\nSubject : None\n\nbody text\r\n\n",
u"None")))
test_func()
def test_get_mail_inbox_does_not_exist(self):
self.__test_select_inbox_does_not_exist(\
lambda: self.imap_account.get_mail(1),
"Mailbox does not exist (email 1)", True)
def test_get_mail_delimiter(self):
self.imap_account.mailbox = "INBOX/dir1/subdir2"
self.imap_account.delimiter = "."
test_func = self.make_test(\
[lambda data: "* 42 EXISTS\r\n* 1 RECENT\r\n* OK" + \
" [UNSEEN 9]\r\n* FLAGS (\Deleted \Seen\*)\r\n*" + \
" OK [PERMANENTFLAGS (\Deleted \Seen\*)\r\n" + \
data.split()[0] + \
" OK [READ-WRITE] SELECT completed\r\n",
lambda data: "* 1 FETCH ((RFC822) {11}\r\nbody" + \
" text\r\n)\r\n" + \
data.split()[0] + " OK FETCH completed\r\n"],
["^[^ ]* EXAMINE \"?INBOX\.dir1\.subdir2\"?",
"^[^ ]* FETCH 1 \(RFC822\)"],
lambda self: \
self.assertEquals(self.imap_account.get_mail(1),
(u"From : None\nSubject : None\n\nbody text\r\n\n",
u"None")))
test_func()
def test_build_folder_cache(self):
test_func = self.make_test(\
[lambda data: '* LIST () "." "INBOX"\r\n' + \
'* LIST () "." "INBOX.dir1"\r\n' + \
'* LIST () "." "INBOX.dir1.subdir1"\r\n' + \
'* LIST () "." "INBOX.dir1.subdir2"\r\n' + \
'* LIST () "." "INBOX.dir2"\r\n' + \
data.split()[0] + ' OK LIST completed\r\n'],
["^[^ ]* LIST \"\" \*"],
lambda self: self.assertEquals(self.imap_account._build_folder_cache(),
{"INBOX":
{"dir1":
{"subdir1": {},
"subdir2": {}},
"dir2": {}}}))
test_func()
def test_ls_dir_base(self):
self.test_build_folder_cache()
self.assertEquals(self.imap_account.ls_dir(""),
["INBOX"])
def test_ls_dir_subdir(self):
self.test_build_folder_cache()
result = self.imap_account.ls_dir("INBOX")
result.sort()
self.assertEquals(result,
["dir1", "dir2"])
def test_ls_dir_subsubdir_delim1(self):
self.test_build_folder_cache()
self.imap_account.default_delimiter = "."
result = self.imap_account.ls_dir("INBOX/dir1")
result.sort()
self.assertEquals(result,
["subdir1", "subdir2"])
def test_ls_dir_subsubdir_delim2(self):
self.test_build_folder_cache()
result = self.imap_account.ls_dir("INBOX/dir1")
result.sort()
self.assertEquals(result,
["subdir1", "subdir2"])
def test_populate_handler(self):
self.assertEquals(".", self.imap_account.delimiter)
self.imap_account.mailbox = "INBOX/dir1/subdir2"
def call_func(self):
self.imap_account.populate_handler()
self.assertEquals("INBOX.dir1.subdir2", self.imap_account.mailbox)
test_func = self.make_test(\
[lambda data: '* LIST () "." "INBOX.dir1.subdir2"\r\n' + \
data.split()[0] + ' OK LIST completed\r\n'],
["^[^ ]* LIST \"?INBOX.dir1.subdir2\"? \*"],
call_func)
test_func()
def test_populate_handler_wrong_default_delimiter(self):
self.imap_account.delimiter = "/"
self.imap_account.mailbox = "INBOX/dir1/subdir2"
def call_func(self):
self.imap_account.populate_handler()
self.assertEquals("INBOX.dir1.subdir2", self.imap_account.mailbox)
self.assertEquals(".", self.imap_account.delimiter)
test_func = self.make_test(\
[lambda data: data.split()[0] + ' OK LIST completed\r\n',
lambda data: '* LIST () "." "INBOX.dir1.subdir2"\r\n' + \
data.split()[0] + ' OK LIST completed\r\n'],
["^[^ ]* LIST \"?INBOX/dir1/subdir2\"? \*",
"^[^ ]* LIST \"?INBOX.dir1.subdir2\"? \*"],
call_func)
test_func()
def test_populate_handler_wrong_mailbox(self):
self.assertEquals(".", self.imap_account.delimiter)
self.imap_account.mailbox = "INBOX.dir1.subdir2"
def call_func(self):
try:
self.imap_account.populate_handler()
except Exception, e:
return
self.fail("Exception should have been raised")
test_func = self.make_test(\
[lambda data: data.split()[0] + ' ERR LIST completed\r\n'],
["^[^ ]* LIST \"?INBOX.dir1.subdir2\"? \*"],
call_func)
test_func()
def check_get_next_mail_index(self, mail_list):
"""
Common tests for get_next_mail_index method.
"""
result = []
original_mail_list = [elt for elt in mail_list]
for elt in self.imap_account.get_next_mail_index(mail_list):
result.append(elt)
self.assertEquals(mail_list, [])
self.assertEquals(result, original_mail_list)
def test_get_next_mail_index_empty(self):
"""
Test get_next_mail_index with empty mail_list parameter.
"""
mail_list = []
self.check_get_next_mail_index(mail_list)
def test_get_next_mail_index(self):
"""
Test get_next_mail_index.
"""
mail_list = [1, 2, 3, 4]
self.check_get_next_mail_index(mail_list)
def test_get_default_status_msg(self):
"""
Get default status message for IMAPAccount.
"""
status_msg = self.imap_account.get_default_status_msg(Lang.en)
self.assertEquals(status_msg, "imap://login@localhost:1143")
def test_get_default_status_msg_ssl(self):
"""
Get default status message for SSL IMAPAccount.
"""
self.imap_account.ssl = True
status_msg = self.imap_account.get_default_status_msg(Lang.en)
self.assertEquals(status_msg, "imaps://login@localhost:1143")
class AbstractSMTPAccount_TestCase(Account_TestCase):
def setUp(self):
JCLTestCase.setUp(self, tables=[Account, ExampleAccount, User,
GlobalSMTPAccount, AbstractSMTPAccount])
self.account_class = AbstractSMTPAccount
def test_default_account_post_func_no_default_true(self):
user1 = User(jid="[email protected]")
account11 = AbstractSMTPAccount(user=user1,
name="account11",
jid="[email protected]")
account12 = AbstractSMTPAccount(user=user1,
name="account12",
jid="[email protected]")
(name, field_type, field_options, post_func, default_func) = \
AbstractSMTPAccount.get_register_fields()[0]
value = post_func("True", None, "[email protected]")
self.assertTrue(value)
def test_default_account_post_func_no_default_false(self):
user1 = User(jid="[email protected]")
account11 = AbstractSMTPAccount(user=user1,
name="account11",
jid="[email protected]")
account12 = AbstractSMTPAccount(user=user1,
name="account12",
jid="[email protected]")
(name, field_type, field_options, post_func, default_func) = \
AbstractSMTPAccount.get_register_fields()[0]
value = post_func("False", None, "[email protected]")
self.assertTrue(value)
def test_default_account_post_func_true(self):
user1 = User(jid="[email protected]")
account11 = AbstractSMTPAccount(user=user1,
name="account11",
jid="[email protected]")
account12 = AbstractSMTPAccount(user=user1,
name="account12",
jid="[email protected]")
account12.default_account = True
(name, field_type, field_options, post_func, default_func) = \
AbstractSMTPAccount.get_register_fields()[0]
value = post_func("True", None, "[email protected]")
self.assertTrue(value)
self.assertFalse(account12.default_account)
def test_default_account_post_func_false(self):
user1 = User(jid="[email protected]")
account11 = AbstractSMTPAccount(user=user1,
name="account11",
jid="[email protected]")
account12 = AbstractSMTPAccount(user=user1,
name="account12",
jid="[email protected]")
account12.default_account = True
(name, field_type, field_options, post_func, default_func) = \
AbstractSMTPAccount.get_register_fields()[0]
value = post_func("False", None, "[email protected]")
self.assertFalse(value)
self.assertTrue(account12.default_account)
def test_create_email(self):
account11 = AbstractSMTPAccount(user=User(jid="[email protected]"),
name="account11",
jid="[email protected]")
email = account11.create_email("[email protected]",
"[email protected]",
"subject",
"body")
self.assertEqual(email['From'], "[email protected]")
self.assertEqual(email['To'], "[email protected]")
self.assertEqual(email['Subject'], "subject")
self.assertEqual(email.get_payload(), "body")
def test_create_email_other_headers(self):
account11 = AbstractSMTPAccount(user=User(jid="[email protected]"),
name="account11",
jid="[email protected]")
email = account11.create_email("[email protected]",
"[email protected]",
"subject",
"body",
{"Bcc": "[email protected]",
"Cc": "[email protected]"})
self.assertEqual(email['From'], "[email protected]")
self.assertEqual(email['To'], "[email protected]")
self.assertEqual(email['Subject'], "subject")
self.assertEqual(email['Bcc'], "[email protected]")
self.assertEqual(email['Cc'], "[email protected]")
self.assertEqual(email.get_payload(), "body")
class SMTPAccount_TestCase(Account_TestCase):
def setUp(self):
JCLTestCase.setUp(self, tables=[Account, ExampleAccount, User,
GlobalSMTPAccount,
AbstractSMTPAccount, SMTPAccount])
self.account_class = SMTPAccount
def make_test(self, responses=None, queries=None, core=None):
def inner():
self.server = server.DummyServer("localhost", 1025)
thread.start_new_thread(self.server.serve, ())
self.server.responses = []
if responses:
self.server.responses += responses
self.server.responses += ["221 localhost closing connection\r\n"]
self.server.queries = []
if queries:
self.server.queries += queries
self.server.queries += ["quit\r\n"]
if core:
model.db_connect()
core(self)
model.db_disconnect()
self.failUnless(self.server.verify_queries())
return inner
def test_send_email_esmtp_no_auth(self):
model.db_connect()
smtp_account = SMTPAccount(user=User(jid="[email protected]"),
name="account11",
jid="[email protected]")
smtp_account.host = "localhost"
smtp_account.port = 1025
model.db_disconnect()
email = smtp_account.create_email("[email protected]",
"[email protected]",
"subject",
"body")
test_func = self.make_test(["220 localhost ESMTP\r\n",
"250-localhost Hello 127.0.0.1\r\n"
+ "250-SIZE 52428800\r\n"
+ "250-PIPELINING\r\n"
+ "250 HELP\r\n",
"250 OK\r\n",
"250 Accepted\r\n",
"354 Enter message\r\n",
None, None, None, None,
None, None, None, None,
"250 OK\r\n"],
["ehlo .*\r\n",
"mail FROM:<" + str(email['From']) + ">.*",
"rcpt TO:<" + str(email['To']) + ">\r\n",
"data\r\n"] +
email.as_string().split("\n") + [".\r\n"],
lambda self: \
smtp_account.send_email(email))
test_func()
def test_send_email_no_auth(self):
model.db_connect()
smtp_account = SMTPAccount(user=User(jid="[email protected]"),
name="account11",
jid="[email protected]")
smtp_account.host = "localhost"
smtp_account.port = 1025
model.db_disconnect()
email = smtp_account.create_email("[email protected]",
"[email protected]",
"subject",
"body")
test_func = self.make_test(["220 localhost SMTP\r\n",
"504 ESMTP not supported\r\n",
"250-localhost Hello 127.0.0.1\r\n"
+ "250-SIZE 52428800\r\n"
+ "250-PIPELINING\r\n"
+ "250 HELP\r\n",
"250 OK\r\n",
"250 Accepted\r\n",
"354 Enter message\r\n",
None, None, None, None,
None, None, None, None,
"250 OK\r\n"],
["ehlo .*\r\n",
"helo .*\r\n",
"mail FROM:<" + str(email['From']) + ">.*",
"rcpt TO:<" + str(email['To']) + ">\r\n",
"data\r\n"] +
email.as_string().split("\n") + [".\r\n"],
lambda self: \
smtp_account.send_email(email))
test_func()
def test_send_email_esmtp_auth(self):
model.db_connect()
smtp_account = SMTPAccount(user=User(jid="[email protected]"),
name="account11",
jid="[email protected]")
smtp_account.host = "localhost"
smtp_account.port = 1025
smtp_account.login = "user"
smtp_account.password = "pass"
model.db_disconnect()
email = smtp_account.create_email("[email protected]",
"[email protected]",
"subject",
"body")
test_func = self.make_test(["220 localhost ESMTP\r\n",
"250-localhost Hello 127.0.0.1\r\n"
+ "250-SIZE 52428800\r\n"
+ "250-AUTH PLAIN LOGIN CRAM-MD5\r\n"
+ "250-PIPELINING\r\n"
+ "250 HELP\r\n",
"334 ZGF4IDNmNDM2NzY0YzBhNjgyMTQ1MzhhZGNiMjE2YTYxZjRm\r\n",
"235 Authentication succeeded\r\n",
"250 OK\r\n",
"250 Accepted\r\n",
"354 Enter message\r\n",
None, None, None, None,
None, None, None, None,
"250 OK\r\n"],
["ehlo .*\r\n",
"AUTH CRAM-MD5\r\n",
".*\r\n",
"mail FROM:<" + str(email['From']) + ">.*",
"rcpt TO:<" + str(email['To']) + ">\r\n",
"data\r\n"] +
email.as_string().split("\n") + [".\r\n"],
lambda self: \
smtp_account.send_email(email))
test_func()
def test_send_email_esmtp_auth_method2(self):
model.db_connect()
smtp_account = SMTPAccount(user=User(jid="[email protected]"),
name="account11",
jid="[email protected]")
smtp_account.host = "localhost"
smtp_account.port = 1025
smtp_account.login = "user"
smtp_account.password = "pass"
model.db_disconnect()
email = smtp_account.create_email("[email protected]",
"[email protected]",
"subject",
"body")
test_func = self.make_test(["220 localhost ESMTP\r\n",
"250-localhost Hello 127.0.0.1\r\n"
+ "250-SIZE 52428800\r\n"
+ "250-AUTH PLAIN LOGIN CRAM-MD5\r\n"
+ "250-PIPELINING\r\n"
+ "250 HELP\r\n",
"334 ZGF4IDNmNDM2NzY0YzBhNjgyMTQ1MzhhZGNiMjE2YTYxZjRm\r\n",
"535 Incorrect Authentication data\r\n",
"334 asd235r4\r\n",
"235 Authentication succeeded\r\n",
"250 OK\r\n",
"250 Accepted\r\n",
"354 Enter message\r\n",
None, None, None, None,
None, None, None, None,
"250 OK\r\n"],
["ehlo .*\r\n",
"AUTH CRAM-MD5\r\n",
".*\r\n",
"AUTH LOGIN .*\r\n",
".*\r\n",
"mail FROM:<" + str(email['From']) + ">.*",
"rcpt TO:<" + str(email['To']) + ">\r\n",
"data\r\n"] +
email.as_string().split("\n") + [".\r\n"],
lambda self: \
smtp_account.send_email(email))
test_func()
def test_send_email_esmtp_auth_method_with_no_suitable_auth_method_error(self):
model.db_connect()
smtp_account = SMTPAccount(user=User(jid="[email protected]"),
name="account11",
jid="[email protected]")
smtp_account.host = "localhost"
smtp_account.port = 1025
smtp_account.login = "user"
smtp_account.password = "pass"
model.db_disconnect()
email = smtp_account.create_email("[email protected]",
"[email protected]",
"subject",
"body")
test_func = self.make_test(["220 localhost ESMTP\r\n",
"250-localhost Hello 127.0.0.1\r\n"
+ "250-SIZE 52428800\r\n"
+ "250-AUTH PLAIN LOGIN DIGEST-MD5\r\n"
+ "250-PIPELINING\r\n"
+ "250 HELP\r\n",
"334 asd235r4\r\n",
"235 Authentication succeeded\r\n",
"250 OK\r\n",
"250 Accepted\r\n",
"354 Enter message\r\n",
None, None, None, None,
None, None, None, None,
"250 OK\r\n"],
["ehlo .*\r\n",
"AUTH LOGIN .*\r\n",
".*\r\n",
"mail FROM:<" + str(email['From']) + ">.*",
"rcpt TO:<" + str(email['To']) + ">\r\n",
"data\r\n"] +
email.as_string().split("\n") + [".\r\n"],
lambda self: \
smtp_account.send_email(email))
test_func()
def test_get_default_status_msg(self):
"""
Get default status message for IMAPAccount.
"""
smtp_account = SMTPAccount(user=User(jid="[email protected]"),
name="account11",
jid="[email protected]")
smtp_account.host = "localhost"
smtp_account.port = 1025
smtp_account.login = "user"
smtp_account.password = "pass"
status_msg = smtp_account.get_default_status_msg(Lang.en)
self.assertEquals(status_msg, "smtp://user@localhost:1025")
def test_get_default_status_msg_ssl(self):
"""
Get default status message for SSL IMAPAccount.
"""
smtp_account = SMTPAccount(user=User(jid="[email protected]"),
name="account11",
jid="[email protected]")
smtp_account.host = "localhost"
smtp_account.port = 1025
smtp_account.login = "user"
smtp_account.password = "pass"
smtp_account.tls = True
status_msg = smtp_account.get_default_status_msg(Lang.en)
self.assertEquals(status_msg, "smtps://user@localhost:1025")
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(AccountModule_TestCase, 'test'))
suite.addTest(unittest.makeSuite(MailAccount_TestCase, 'test'))
suite.addTest(unittest.makeSuite(POP3Account_TestCase, 'test'))
suite.addTest(unittest.makeSuite(IMAPAccount_TestCase, 'test'))
suite.addTest(unittest.makeSuite(AbstractSMTPAccount_TestCase, 'test'))
suite.addTest(unittest.makeSuite(SMTPAccount_TestCase, 'test'))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')<|fim▁end|> | def test_get_new_mail_list(self):
test_func = self.make_test(\ |
<|file_name|>tasks.js<|end_file_name|><|fim▁begin|>/*
* taskapi.
*/
var input_taskname_text = '<input type="text" id="task_name_{0}" value="{1}" class="form-control">';
var input_taskdesc_text = '<input type="text" id="task_desc_{0}" value="{1}" class="form-control">';
var btn_update_task = '<a href="#" class="btn-update-task btn btn-sm btn-success" data-taskid="{0}" id="task_update_btn_{0}">Update</a> ';
var btn_remove_task = '<a href="#" class="{2} btn btn-sm btn-danger" data-taskid="{0}" id="taskbtn_{0}">{1}</a>';
var taskilyTasks = (function (survey, table) {
var tsk = {};
var tableID = table;
var surveyID = survey;
tsk.loadTasks = function()
{
$.ajax({
url: '/api/tasks/all/' + surveyID,
type: 'GET',
success: function(data)
{
for (var i = 0; i < data.length; i++) {
var activeclass = '';
if (data[i].Active == false) {
activeclass = 'task-row-inactive';
}
var row = ['<tr id="taskrow_' + data[i].ID + '" class="task-row '+ activeclass + '">',
'<td>' + input_taskname_text.format(data[i].ID, data[i].Name) + '</td>',
'<td>' + input_taskdesc_text.format(data[i].ID, data[i].Description) + '</td>'] ;
if (data[i].Active == true) {
row.push('<td>'
+ btn_update_task.format(data[i].ID)
+ btn_remove_task.format(data[i].ID, 'Remove', 'btn-remove-task')
+ '</td>');
}
else {
row.push('<td>' + btn_remove_task.format(data[i].ID, 'Activate', 'btn-activate-task') + '</td>') ;
}
row.push('</tr>');
$( row.join() ).appendTo(tableID);
}
bindTaskButtons();
},
error: function(msg)
{
alert(msg);
}
});
}
function bindTaskButtons()
{
$(".btn-remove-task").click(function (e) {
e.stopPropagation();
e.preventDefault();
var taskID = $(this).data("taskid");
removeTask(taskID);
});
$(".btn-update-task").click(function (e) {
e.stopPropagation();
e.preventDefault();
var taskID = $(this).data("taskid");
updateTask(taskID);
});
$("#addTask").click(function (e) {
e.stopPropagation();
e.preventDefault();
var tName = $("#newTask").val();
var tDesc = $("#newDesc").val();
addTask(tName, tDesc);
});
$(".btn-activate-task").click(function (e) {
e.stopPropagation();
e.preventDefault();
var taskID = $(this).data("taskid");
activateTask(taskID);
});
}
function removeTask(taskId) {
$.ajax({
url: '/api/tasks/delete/' + taskId,
type: 'DELETE',
success: function (data) {
if (data.Active == false) {
toggleButton(data.ID);
btn.unbind('click');
btn.click(function (e) {
e.stopPropagation();
var taskID = $(this).data("taskid");
activateTask(taskID);
});
}
else {
$("#taskrow_" + data.ID).fadeOut();
}
},
error: function (data) {
alert("Error removing task");
}
});
}
function updateTask(taskId)
{
var taskName = $("#task_name_" + taskId).val();
var taskDesc = $("#task_desc_" + taskId).val();
var task = {
'ID': taskId,
'SurveyID': surveyID,
'Name': taskName,
'Description': taskDesc
};
$.ajax({
url: '/api/tasks/update/' + surveyID,
type: 'POST',
contentType: 'application/json',
data: JSON.stringify(task),
success: function (data) {
var row = $('#taskrow_' + data.ID).addClass('task-done') ;
setTimeout(function () {
row.toggleClass('task-done');
}, 1000);
},
error: function (msg) {
alert('failed to update task');
}
});
}
function activateTask(taskId)
{
$.ajax({
url: '/api/tasks/activate/' + taskId,
type: 'PUT',
success: function (data) {
toggleButton(data.ID);
btn.unbind('click');
btn.click(function (e) {
e.stopPropagation();
var taskID = $(this).data("taskid");
removeTask(taskID);
});
},
error: function (data) {
alert("Error activating task");
}
});
}
function toggleButton(taskId)
{
var btn = $('#taskbtn_' + taskId);
var row = $('#taskrow_' + taskId).addClass('task-done');
if (btn.html() == 'Activate') {
btn.addClass("btn-remove-task");
btn.addClass("btn-danger");
btn.removeClass("btn-activate-task");
btn.removeClass("btn-warning");
row.removeClass("task-row-inactive");
btn.html("Remove");
}
else {
btn.removeClass("btn-remove-task");
btn.removeClass("btn-danger");
btn.addClass("btn-activate-task");
btn.addClass("btn-warning");
btn.html("Activate");<|fim▁hole|> setTimeout(function () {
row.toggleClass('task-done');
}, 1000);
}
function addTask(name, description) {
var task = {
'SurveyID': surveyID,
'Active': true,
'Name': name,
'Description': description
};
$.ajax({
url: '/api/tasks/add/' + surveyID,
type: 'POST',
contentType: 'application/json',
data: JSON.stringify(task),
success: function (data) {
$(
['<tr id="taskrow_' + data.ID + '" class="task-row">',
'<td>' + input_taskname_text.format(data.ID, data.Name) + '</td>',
'<td>' + input_taskdesc_text.format(data.ID, data.Description) + '</td>',
'<td>' + btn_update_task.format(data.ID)
+ btn_remove_task.format(data.ID, 'Remove', 'btn-remove-task') + '</td>',
'</tr>'
].join()).appendTo(tableID);
},
error: function (msg) {
alert( "Failed to add " + msg)
}
});
}
return tsk;
});<|fim▁end|> | row.addClass("task-row-inactive");
}
|
<|file_name|>IRIS_DF_Controller.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
'''======================================================
Created by: D. Spencer Maughan
Last updated: May 2015
File name: IRIS_DF_Controller.py
Organization: RISC Lab, Utah State University
Notes:
======================================================'''
import roslib; roslib.load_manifest('risc_msgs')
import rospy
from math import *
import numpy as np
import time
#=======================#
# Messages Needed #
#=======================#
from risc_msgs.msg import *
from std_msgs.msg import Bool
from roscopter.msg import Status
#=====================#
# Gain Matrices #
#=====================#
K = np.matrix([[ 1.8, 0, 0, 1.4, 0, 0, 0],\
[ 0, 1.8, 0, 0, 1.4, 0, 0],\
[ 0, 0, 3, 0, 0, 5, 0],\
[ 0, 0, 0, 0, 0, 0,.5]])
#========================#
# Globals #
#========================#
nominal_thrust = 0 # thrust necessary to maintain hover given battery level
phi_scale = 3.053261127645355
phi_trim = 0.0#0.058941904209906
theta_scale = 3.815398742249453
theta_trim = 0.0#-0.091216767651723
ctrl_status = False
states = Cortex()
states.Obj = [States()]*1
traj = Trajectories()
traj.Obj = [Trajectory()]*1
euler_max = 45*np.pi/180
max_yaw_rate = .3490659 #in radians/sec
rate = 45 # Hz
image = 0
start_time = 0
#==================#
# Publishers #
#==================#
pub_ctrl = rospy.Publisher('/controls', Controls, queue_size = 1)
#========================#
# Get Cortex States #
#========================#
def GetStates(S):
global states
states = S
#=====================#
# Get Trajectory #
#=====================#
def GetTraj(S):
global traj
traj = S
#=========================#
# Get Battery Status #
#=========================#
def GetBatt(S):
global nominal_thrust
B = S.battery_remaining
# coefficients for fourth order fit
# determined 11 May 2015 by Spencer Maughan and Ishmaal Erekson
c0 = 0.491674747062374
c1 = -0.024809293286468
c2 = 0.000662710609466
c3 = -0.000008160593348
c4 = 0.000000033699651
nominal_thrust = c0+c1*B+c2*B**2+c3*B**3+c4*B**4
#============================#
# Get Controller Status #
#============================#
def GetStatus(S):
global ctrl_status
ctrl_status = S.data
#========================#
# Basic Controller #
#========================#
def Basic_Controller():
global states, euler_max, max_yaw_rate, pub_ctrl,K,traj
Ctrl = Controls()
Ctrl.Obj = [Control()]*1
Ctrl.header.stamp = states.header.stamp
g = 9.80665 # average value of earth's gravitational constant m/s^2
m = 1.282 # IRIS mass in kg
#===================================#
# Get State Trajectory Errors #
#===================================#
if states.Obj[0].visible:
X = np.asmatrix(np.zeros((7,1)))
X[0] = traj.Obj[0].x-states.Obj[0].x
X[1] = traj.Obj[0].y-states.Obj[0].y
X[2] = traj.Obj[0].z-states.Obj[0].z
X[3] = traj.Obj[0].xdot-states.Obj[0].u
X[4] = traj.Obj[0].ydot-states.Obj[0].v
X[5] = traj.Obj[0].zdot-states.Obj[0].w
X[6] = traj.Obj[0].psi-states.Obj[0].psi*np.pi/180
#============================================#
# Differential Flatness Control Input #
#============================================#
# LQR input
utilde = -K*X
# required input
u_r = np.asmatrix(np.zeros((4,1)))
u = utilde+u_r-np.matrix([[0],[0],[9.81],[0]])<|fim▁hole|> #==================================#
psi = states.Obj[0].psi*np.pi/180
rotZ = np.matrix([[cos(psi), sin(psi), 0],[-sin(psi), cos(psi), 0],[0, 0, 1]])
Cart = np.matrix([[1, 0, 0],[0, -1, 0],[0, 0, -1]])
u[:-1] = Cart*rotZ*u[:-1]
#===================================#
# Normalize given the Thrust #
#===================================#
T = sqrt(u[0:3].T*u[0:3])
u[:-1] = np.divide(u[:-1],-T)
#==================#
# Set Controls #
#==================#
# Controls for Ardrone
# -phi = right... +phi = left
# -theta = back... +theta = forward
# -psi = right... +psi = left
global phi_trim,theta_trim,phi_scale,theta_scale
phi_d = (asin(u[1,-1]))
theta_d = (-asin(u[0,-1]))
ctrl = Control()
ctrl.name = states.Obj[0].name
ctrl.phi = phi_trim + phi_scale*phi_d
ctrl.theta = theta_trim + theta_scale*theta_d
ctrl.psi = -u[3,-1]/max_yaw_rate
global nominal_thrust
T_d = nominal_thrust+(T-g)/g
ctrl.T = T_d
Ctrl.Obj[0] = ctrl
Ctrl.header = states.header
#rospy.loginfo("latency = %f",states.header.stamp.to_sec()-rospy.get_time())
pub_ctrl.publish(Ctrl)
#===================#
# Main #
#===================#
if __name__=='__main__':
import sys
rospy.init_node('IRIS_DF_Controller')
#=====================================#
# Set up Publish/Subscribe Loop #
#=====================================#
r = rospy.Rate(rate)
while not rospy.is_shutdown():
sub_cortex = rospy.Subscriber('/cortex_raw' , Cortex, GetStates, queue_size=1, buff_size=2**24)
sub_traj = rospy.Subscriber('/trajectory' , Trajectories, GetTraj, queue_size=1, buff_size=2**24)
sub_Batt = rospy.Subscriber('/apm/status' , Status, GetBatt)
sub_status = rospy.Subscriber('/controller_status' , Bool, GetStatus)
Basic_Controller()
r.sleep()<|fim▁end|> |
#==================================#
# Rotate to Vehicle 1 Frame # |
<|file_name|>index.js<|end_file_name|><|fim▁begin|>/// <reference path="./typings/globals.d.ts"/>
/// <reference path="./typings/lib.d.ts"/>
var angular = require('angular');
var magics_scene_1 = require('./directives/magics-scene');
var magics_spy_1 = require('./directives/magics-spy');
var magics_stage_1 = require('./directives/magics-stage');
var constants_1 = require('./services/constants');
var magics_1 = require('./services/magics');
exports.__esModule = true;
exports["default"] = angular.module('ngMagics', [
magics_scene_1["default"],<|fim▁hole|>])
.name;<|fim▁end|> | magics_spy_1["default"],
magics_stage_1["default"],
constants_1["default"],
magics_1["default"] |
<|file_name|>button.component.d.ts<|end_file_name|><|fim▁begin|><|fim▁hole|> colored: string;
btnDisabled: boolean;
}<|fim▁end|> | export declare class ButtonDemo {
buttonType: string;
doRipple: boolean;
|
<|file_name|>underscore.js<|end_file_name|><|fim▁begin|>// Underscore.js 1.8.3
// http://underscorejs.org
// (c) 2009-2016 Jeremy Ashkenas, DocumentCloud and Investigative Reporters & Editors
// Underscore may be freely distributed under the MIT license.
(function() {
// Baseline setup
// --------------
// Establish the root object, `window` (`self`) in the browser, `global`
// on the server, or `this` in some virtual machines. We use `self`
// instead of `window` for `WebWorker` support.
var root = typeof self == 'object' && self.self === self && self ||
typeof global == 'object' && global.global === global && global ||
this;
// Save the previous value of the `_` variable.
var previousUnderscore = root._;
// Save bytes in the minified (but not gzipped) version:
var ArrayProto = Array.prototype, ObjProto = Object.prototype;
// Create quick reference variables for speed access to core prototypes.
var push = ArrayProto.push,
slice = ArrayProto.slice,
toString = ObjProto.toString,
hasOwnProperty = ObjProto.hasOwnProperty;
// All **ECMAScript 5** native function implementations that we hope to use
// are declared here.
var nativeIsArray = Array.isArray,
nativeKeys = Object.keys,
nativeCreate = Object.create;
// Naked function reference for surrogate-prototype-swapping.
var Ctor = function(){};
// Create a safe reference to the Underscore object for use below.
var _ = function(obj) {
if (obj instanceof _) return obj;
if (!(this instanceof _)) return new _(obj);
this._wrapped = obj;
};
// Export the Underscore object for **Node.js**, with
// backwards-compatibility for their old module API. If we're in
// the browser, add `_` as a global object.
// (`nodeType` is checked to ensure that `module`
// and `exports` are not HTML elements.)
if (typeof exports != 'undefined' && !exports.nodeType) {
if (typeof module != 'undefined' && !module.nodeType && module.exports) {
exports = module.exports = _;
}
exports._ = _;
} else {
root._ = _;
}
// Current version.
_.VERSION = '1.8.3';
// Internal function that returns an efficient (for current engines) version
// of the passed-in callback, to be repeatedly applied in other Underscore
// functions.
var optimizeCb = function(func, context, argCount) {
if (context === void 0) return func;
switch (argCount == null ? 3 : argCount) {
case 1: return function(value) {
return func.call(context, value);
};
// The 2-parameter case has been omitted only because no current consumers
// made use of it.
case 3: return function(value, index, collection) {
return func.call(context, value, index, collection);<|fim▁hole|> };
case 4: return function(accumulator, value, index, collection) {
return func.call(context, accumulator, value, index, collection);
};
}
return function() {
return func.apply(context, arguments);
};
};
// An internal function to generate callbacks that can be applied to each
// element in a collection, returning the desired result — either `identity`,
// an arbitrary callback, a property matcher, or a property accessor.
var cb = function(value, context, argCount) {
if (value == null) return _.identity;
if (_.isFunction(value)) return optimizeCb(value, context, argCount);
if (_.isObject(value)) return _.matcher(value);
return _.property(value);
};
// An external wrapper for the internal callback generator
_.iteratee = function(value, context) {
return cb(value, context, Infinity);
};
// Similar to ES6's rest param (http://ariya.ofilabs.com/2013/03/es6-and-rest-parameter.html)
// This accumulates the arguments passed into an array, after a given index.
var restArgs = function(func, startIndex) {
startIndex = startIndex == null ? func.length - 1 : +startIndex;
return function() {
var length = Math.max(arguments.length - startIndex, 0);
var rest = Array(length);
for (var index = 0; index < length; index++) {
rest[index] = arguments[index + startIndex];
}
switch (startIndex) {
case 0: return func.call(this, rest);
case 1: return func.call(this, arguments[0], rest);
case 2: return func.call(this, arguments[0], arguments[1], rest);
}
var args = Array(startIndex + 1);
for (index = 0; index < startIndex; index++) {
args[index] = arguments[index];
}
args[startIndex] = rest;
return func.apply(this, args);
};
};
// An internal function for creating a new object that inherits from another.
var baseCreate = function(prototype) {
if (!_.isObject(prototype)) return {};
if (nativeCreate) return nativeCreate(prototype);
Ctor.prototype = prototype;
var result = new Ctor;
Ctor.prototype = null;
return result;
};
var property = function(key) {
return function(obj) {
return obj == null ? void 0 : obj[key];
};
};
// Helper for collection methods to determine whether a collection
// should be iterated as an array or as an object.
// Related: http://people.mozilla.org/~jorendorff/es6-draft.html#sec-tolength
// Avoids a very nasty iOS 8 JIT bug on ARM-64. #2094
var MAX_ARRAY_INDEX = Math.pow(2, 53) - 1;
var getLength = property('length');
var isArrayLike = function(collection) {
var length = getLength(collection);
return typeof length == 'number' && length >= 0 && length <= MAX_ARRAY_INDEX;
};
// Collection Functions
// --------------------
// The cornerstone, an `each` implementation, aka `forEach`.
// Handles raw objects in addition to array-likes. Treats all
// sparse array-likes as if they were dense.
_.each = _.forEach = function(obj, iteratee, context) {
iteratee = optimizeCb(iteratee, context);
var i, length;
if (isArrayLike(obj)) {
for (i = 0, length = obj.length; i < length; i++) {
iteratee(obj[i], i, obj);
}
} else {
var keys = _.keys(obj);
for (i = 0, length = keys.length; i < length; i++) {
iteratee(obj[keys[i]], keys[i], obj);
}
}
return obj;
};
// Return the results of applying the iteratee to each element.
_.map = _.collect = function(obj, iteratee, context) {
iteratee = cb(iteratee, context);
var keys = !isArrayLike(obj) && _.keys(obj),
length = (keys || obj).length,
results = Array(length);
for (var index = 0; index < length; index++) {
var currentKey = keys ? keys[index] : index;
results[index] = iteratee(obj[currentKey], currentKey, obj);
}
return results;
};
// Create a reducing function iterating left or right.
var createReduce = function(dir) {
// Wrap code that reassigns argument variables in a separate function than
// the one that accesses `arguments.length` to avoid a perf hit. (#1991)
var reducer = function(obj, iteratee, memo, initial) {
var keys = !isArrayLike(obj) && _.keys(obj),
length = (keys || obj).length,
index = dir > 0 ? 0 : length - 1;
if (!initial) {
memo = obj[keys ? keys[index] : index];
index += dir;
}
for (; index >= 0 && index < length; index += dir) {
var currentKey = keys ? keys[index] : index;
memo = iteratee(memo, obj[currentKey], currentKey, obj);
}
return memo;
};
return function(obj, iteratee, memo, context) {
var initial = arguments.length >= 3;
return reducer(obj, optimizeCb(iteratee, context, 4), memo, initial);
};
};
// **Reduce** builds up a single result from a list of values, aka `inject`,
// or `foldl`.
_.reduce = _.foldl = _.inject = createReduce(1);
// The right-associative version of reduce, also known as `foldr`.
_.reduceRight = _.foldr = createReduce(-1);
// Return the first value which passes a truth test. Aliased as `detect`.
_.find = _.detect = function(obj, predicate, context) {
var key;
if (isArrayLike(obj)) {
key = _.findIndex(obj, predicate, context);
} else {
key = _.findKey(obj, predicate, context);
}
if (key !== void 0 && key !== -1) return obj[key];
};
// Return all the elements that pass a truth test.
// Aliased as `select`.
_.filter = _.select = function(obj, predicate, context) {
var results = [];
predicate = cb(predicate, context);
_.each(obj, function(value, index, list) {
if (predicate(value, index, list)) results.push(value);
});
return results;
};
// Return all the elements for which a truth test fails.
_.reject = function(obj, predicate, context) {
return _.filter(obj, _.negate(cb(predicate)), context);
};
// Determine whether all of the elements match a truth test.
// Aliased as `all`.
_.every = _.all = function(obj, predicate, context) {
predicate = cb(predicate, context);
var keys = !isArrayLike(obj) && _.keys(obj),
length = (keys || obj).length;
for (var index = 0; index < length; index++) {
var currentKey = keys ? keys[index] : index;
if (!predicate(obj[currentKey], currentKey, obj)) return false;
}
return true;
};
// Determine if at least one element in the object matches a truth test.
// Aliased as `any`.
_.some = _.any = function(obj, predicate, context) {
predicate = cb(predicate, context);
var keys = !isArrayLike(obj) && _.keys(obj),
length = (keys || obj).length;
for (var index = 0; index < length; index++) {
var currentKey = keys ? keys[index] : index;
if (predicate(obj[currentKey], currentKey, obj)) return true;
}
return false;
};
// Determine if the array or object contains a given item (using `===`).
// Aliased as `includes` and `include`.
_.contains = _.includes = _.include = function(obj, item, fromIndex, guard) {
if (!isArrayLike(obj)) obj = _.values(obj);
if (typeof fromIndex != 'number' || guard) fromIndex = 0;
return _.indexOf(obj, item, fromIndex) >= 0;
};
// Invoke a method (with arguments) on every item in a collection.
_.invoke = restArgs(function(obj, method, args) {
var isFunc = _.isFunction(method);
return _.map(obj, function(value) {
var func = isFunc ? method : value[method];
return func == null ? func : func.apply(value, args);
});
});
// Convenience version of a common use case of `map`: fetching a property.
_.pluck = function(obj, key) {
return _.map(obj, _.property(key));
};
// Convenience version of a common use case of `filter`: selecting only objects
// containing specific `key:value` pairs.
_.where = function(obj, attrs) {
return _.filter(obj, _.matcher(attrs));
};
// Convenience version of a common use case of `find`: getting the first object
// containing specific `key:value` pairs.
_.findWhere = function(obj, attrs) {
return _.find(obj, _.matcher(attrs));
};
// Return the maximum element (or element-based computation).
_.max = function(obj, iteratee, context) {
var result = -Infinity, lastComputed = -Infinity,
value, computed;
if (iteratee == null || (typeof iteratee == 'number' && typeof obj[0] != 'object') && obj != null) {
obj = isArrayLike(obj) ? obj : _.values(obj);
for (var i = 0, length = obj.length; i < length; i++) {
value = obj[i];
if (value != null && value > result) {
result = value;
}
}
} else {
iteratee = cb(iteratee, context);
_.each(obj, function(v, index, list) {
computed = iteratee(v, index, list);
if (computed > lastComputed || computed === -Infinity && result === -Infinity) {
result = v;
lastComputed = computed;
}
});
}
return result;
};
// Return the minimum element (or element-based computation).
_.min = function(obj, iteratee, context) {
var result = Infinity, lastComputed = Infinity,
value, computed;
if (iteratee == null || (typeof iteratee == 'number' && typeof obj[0] != 'object') && obj != null) {
obj = isArrayLike(obj) ? obj : _.values(obj);
for (var i = 0, length = obj.length; i < length; i++) {
value = obj[i];
if (value != null && value < result) {
result = value;
}
}
} else {
iteratee = cb(iteratee, context);
_.each(obj, function(v, index, list) {
computed = iteratee(v, index, list);
if (computed < lastComputed || computed === Infinity && result === Infinity) {
result = v;
lastComputed = computed;
}
});
}
return result;
};
// Shuffle a collection.
_.shuffle = function(obj) {
return _.sample(obj, Infinity);
};
// Sample **n** random values from a collection using the modern version of the
// [Fisher-Yates shuffle](http://en.wikipedia.org/wiki/Fisher–Yates_shuffle).
// If **n** is not specified, returns a single random element.
// The internal `guard` argument allows it to work with `map`.
_.sample = function(obj, n, guard) {
if (n == null || guard) {
if (!isArrayLike(obj)) obj = _.values(obj);
return obj[_.random(obj.length - 1)];
}
var sample = isArrayLike(obj) ? _.clone(obj) : _.values(obj);
var length = getLength(sample);
n = Math.max(Math.min(n, length), 0);
var last = length - 1;
for (var index = 0; index < n; index++) {
var rand = _.random(index, last);
var temp = sample[index];
sample[index] = sample[rand];
sample[rand] = temp;
}
return sample.slice(0, n);
};
// Sort the object's values by a criterion produced by an iteratee.
_.sortBy = function(obj, iteratee, context) {
var index = 0;
iteratee = cb(iteratee, context);
return _.pluck(_.map(obj, function(value, key, list) {
return {
value: value,
index: index++,
criteria: iteratee(value, key, list)
};
}).sort(function(left, right) {
var a = left.criteria;
var b = right.criteria;
if (a !== b) {
if (a > b || a === void 0) return 1;
if (a < b || b === void 0) return -1;
}
return left.index - right.index;
}), 'value');
};
// An internal function used for aggregate "group by" operations.
var group = function(behavior, partition) {
return function(obj, iteratee, context) {
var result = partition ? [[], []] : {};
iteratee = cb(iteratee, context);
_.each(obj, function(value, index) {
var key = iteratee(value, index, obj);
behavior(result, value, key);
});
return result;
};
};
// Groups the object's values by a criterion. Pass either a string attribute
// to group by, or a function that returns the criterion.
_.groupBy = group(function(result, value, key) {
if (_.has(result, key)) result[key].push(value); else result[key] = [value];
});
// Indexes the object's values by a criterion, similar to `groupBy`, but for
// when you know that your index values will be unique.
_.indexBy = group(function(result, value, key) {
result[key] = value;
});
// Counts instances of an object that group by a certain criterion. Pass
// either a string attribute to count by, or a function that returns the
// criterion.
_.countBy = group(function(result, value, key) {
if (_.has(result, key)) result[key]++; else result[key] = 1;
});
var reStrSymbol = /[^\ud800-\udfff]|[\ud800-\udbff][\udc00-\udfff]|[\ud800-\udfff]/g;
// Safely create a real, live array from anything iterable.
_.toArray = function(obj) {
if (!obj) return [];
if (_.isArray(obj)) return slice.call(obj);
if (_.isString(obj)) {
// Keep surrogate pair characters together
return obj.match(reStrSymbol);
}
if (isArrayLike(obj)) return _.map(obj, _.identity);
return _.values(obj);
};
// Return the number of elements in an object.
_.size = function(obj) {
if (obj == null) return 0;
return isArrayLike(obj) ? obj.length : _.keys(obj).length;
};
// Split a collection into two arrays: one whose elements all satisfy the given
// predicate, and one whose elements all do not satisfy the predicate.
_.partition = group(function(result, value, pass) {
result[pass ? 0 : 1].push(value);
}, true);
// Array Functions
// ---------------
// Get the first element of an array. Passing **n** will return the first N
// values in the array. Aliased as `head` and `take`. The **guard** check
// allows it to work with `_.map`.
_.first = _.head = _.take = function(array, n, guard) {
if (array == null) return void 0;
if (n == null || guard) return array[0];
return _.initial(array, array.length - n);
};
// Returns everything but the last entry of the array. Especially useful on
// the arguments object. Passing **n** will return all the values in
// the array, excluding the last N.
_.initial = function(array, n, guard) {
return slice.call(array, 0, Math.max(0, array.length - (n == null || guard ? 1 : n)));
};
// Get the last element of an array. Passing **n** will return the last N
// values in the array.
_.last = function(array, n, guard) {
if (array == null) return void 0;
if (n == null || guard) return array[array.length - 1];
return _.rest(array, Math.max(0, array.length - n));
};
// Returns everything but the first entry of the array. Aliased as `tail` and `drop`.
// Especially useful on the arguments object. Passing an **n** will return
// the rest N values in the array.
_.rest = _.tail = _.drop = function(array, n, guard) {
return slice.call(array, n == null || guard ? 1 : n);
};
// Trim out all falsy values from an array.
_.compact = function(array) {
return _.filter(array, _.identity);
};
// Internal implementation of a recursive `flatten` function.
var flatten = function(input, shallow, strict, output) {
output = output || [];
var idx = output.length;
for (var i = 0, length = getLength(input); i < length; i++) {
var value = input[i];
if (isArrayLike(value) && (_.isArray(value) || _.isArguments(value))) {
// Flatten current level of array or arguments object
if (shallow) {
var j = 0, len = value.length;
while (j < len) output[idx++] = value[j++];
} else {
flatten(value, shallow, strict, output);
idx = output.length;
}
} else if (!strict) {
output[idx++] = value;
}
}
return output;
};
// Flatten out an array, either recursively (by default), or just one level.
_.flatten = function(array, shallow) {
return flatten(array, shallow, false);
};
// Return a version of the array that does not contain the specified value(s).
_.without = restArgs(function(array, otherArrays) {
return _.difference(array, otherArrays);
});
// Produce a duplicate-free version of the array. If the array has already
// been sorted, you have the option of using a faster algorithm.
// Aliased as `unique`.
_.uniq = _.unique = function(array, isSorted, iteratee, context) {
if (!_.isBoolean(isSorted)) {
context = iteratee;
iteratee = isSorted;
isSorted = false;
}
if (iteratee != null) iteratee = cb(iteratee, context);
var result = [];
var seen = [];
for (var i = 0, length = getLength(array); i < length; i++) {
var value = array[i],
computed = iteratee ? iteratee(value, i, array) : value;
if (isSorted) {
if (!i || seen !== computed) result.push(value);
seen = computed;
} else if (iteratee) {
if (!_.contains(seen, computed)) {
seen.push(computed);
result.push(value);
}
} else if (!_.contains(result, value)) {
result.push(value);
}
}
return result;
};
// Produce an array that contains the union: each distinct element from all of
// the passed-in arrays.
_.union = restArgs(function(arrays) {
return _.uniq(flatten(arrays, true, true));
});
// Produce an array that contains every item shared between all the
// passed-in arrays.
_.intersection = function(array) {
var result = [];
var argsLength = arguments.length;
for (var i = 0, length = getLength(array); i < length; i++) {
var item = array[i];
if (_.contains(result, item)) continue;
var j;
for (j = 1; j < argsLength; j++) {
if (!_.contains(arguments[j], item)) break;
}
if (j === argsLength) result.push(item);
}
return result;
};
// Take the difference between one array and a number of other arrays.
// Only the elements present in just the first array will remain.
_.difference = restArgs(function(array, rest) {
rest = flatten(rest, true, true);
return _.filter(array, function(value){
return !_.contains(rest, value);
});
});
// Complement of _.zip. Unzip accepts an array of arrays and groups
// each array's elements on shared indices
_.unzip = function(array) {
var length = array && _.max(array, getLength).length || 0;
var result = Array(length);
for (var index = 0; index < length; index++) {
result[index] = _.pluck(array, index);
}
return result;
};
// Zip together multiple lists into a single array -- elements that share
// an index go together.
_.zip = restArgs(_.unzip);
// Converts lists into objects. Pass either a single array of `[key, value]`
// pairs, or two parallel arrays of the same length -- one of keys, and one of
// the corresponding values.
_.object = function(list, values) {
var result = {};
for (var i = 0, length = getLength(list); i < length; i++) {
if (values) {
result[list[i]] = values[i];
} else {
result[list[i][0]] = list[i][1];
}
}
return result;
};
// Generator function to create the findIndex and findLastIndex functions
var createPredicateIndexFinder = function(dir) {
return function(array, predicate, context) {
predicate = cb(predicate, context);
var length = getLength(array);
var index = dir > 0 ? 0 : length - 1;
for (; index >= 0 && index < length; index += dir) {
if (predicate(array[index], index, array)) return index;
}
return -1;
};
};
// Returns the first index on an array-like that passes a predicate test
_.findIndex = createPredicateIndexFinder(1);
_.findLastIndex = createPredicateIndexFinder(-1);
// Use a comparator function to figure out the smallest index at which
// an object should be inserted so as to maintain order. Uses binary search.
_.sortedIndex = function(array, obj, iteratee, context) {
iteratee = cb(iteratee, context, 1);
var value = iteratee(obj);
var low = 0, high = getLength(array);
while (low < high) {
var mid = Math.floor((low + high) / 2);
if (iteratee(array[mid]) < value) low = mid + 1; else high = mid;
}
return low;
};
// Generator function to create the indexOf and lastIndexOf functions
var createIndexFinder = function(dir, predicateFind, sortedIndex) {
return function(array, item, idx) {
var i = 0, length = getLength(array);
if (typeof idx == 'number') {
if (dir > 0) {
i = idx >= 0 ? idx : Math.max(idx + length, i);
} else {
length = idx >= 0 ? Math.min(idx + 1, length) : idx + length + 1;
}
} else if (sortedIndex && idx && length) {
idx = sortedIndex(array, item);
return array[idx] === item ? idx : -1;
}
if (item !== item) {
idx = predicateFind(slice.call(array, i, length), _.isNaN);
return idx >= 0 ? idx + i : -1;
}
for (idx = dir > 0 ? i : length - 1; idx >= 0 && idx < length; idx += dir) {
if (array[idx] === item) return idx;
}
return -1;
};
};
// Return the position of the first occurrence of an item in an array,
// or -1 if the item is not included in the array.
// If the array is large and already in sort order, pass `true`
// for **isSorted** to use binary search.
_.indexOf = createIndexFinder(1, _.findIndex, _.sortedIndex);
_.lastIndexOf = createIndexFinder(-1, _.findLastIndex);
// Generate an integer Array containing an arithmetic progression. A port of
// the native Python `range()` function. See
// [the Python documentation](http://docs.python.org/library/functions.html#range).
_.range = function(start, stop, step) {
if (stop == null) {
stop = start || 0;
start = 0;
}
if (!step) {
step = stop < start ? -1 : 1;
}
var length = Math.max(Math.ceil((stop - start) / step), 0);
var range = Array(length);
for (var idx = 0; idx < length; idx++, start += step) {
range[idx] = start;
}
return range;
};
// Split an **array** into several arrays containing **count** or less elements
// of initial array
_.chunk = function(array, count) {
if (count == null || count < 1) return [];
var result = [];
var i = 0, length = array.length;
while (i < length) {
result.push(slice.call(array, i, i += count));
}
return result;
};
// Function (ahem) Functions
// ------------------
// Determines whether to execute a function as a constructor
// or a normal function with the provided arguments
var executeBound = function(sourceFunc, boundFunc, context, callingContext, args) {
if (!(callingContext instanceof boundFunc)) return sourceFunc.apply(context, args);
var self = baseCreate(sourceFunc.prototype);
var result = sourceFunc.apply(self, args);
if (_.isObject(result)) return result;
return self;
};
// Create a function bound to a given object (assigning `this`, and arguments,
// optionally). Delegates to **ECMAScript 5**'s native `Function.bind` if
// available.
_.bind = restArgs(function(func, context, args) {
if (!_.isFunction(func)) throw new TypeError('Bind must be called on a function');
var bound = restArgs(function(callArgs) {
return executeBound(func, bound, context, this, args.concat(callArgs));
});
return bound;
});
// Partially apply a function by creating a version that has had some of its
// arguments pre-filled, without changing its dynamic `this` context. _ acts
// as a placeholder by default, allowing any combination of arguments to be
// pre-filled. Set `_.partial.placeholder` for a custom placeholder argument.
_.partial = restArgs(function(func, boundArgs) {
var placeholder = _.partial.placeholder;
var bound = function() {
var position = 0, length = boundArgs.length;
var args = Array(length);
for (var i = 0; i < length; i++) {
args[i] = boundArgs[i] === placeholder ? arguments[position++] : boundArgs[i];
}
while (position < arguments.length) args.push(arguments[position++]);
return executeBound(func, bound, this, this, args);
};
return bound;
});
_.partial.placeholder = _;
// Bind a number of an object's methods to that object. Remaining arguments
// are the method names to be bound. Useful for ensuring that all callbacks
// defined on an object belong to it.
_.bindAll = restArgs(function(obj, keys) {
keys = flatten(keys, false, false);
var index = keys.length;
if (index < 1) throw new Error('bindAll must be passed function names');
while (index--) {
var key = keys[index];
obj[key] = _.bind(obj[key], obj);
}
});
// Memoize an expensive function by storing its results.
_.memoize = function(func, hasher) {
var memoize = function(key) {
var cache = memoize.cache;
var address = '' + (hasher ? hasher.apply(this, arguments) : key);
if (!_.has(cache, address)) cache[address] = func.apply(this, arguments);
return cache[address];
};
memoize.cache = {};
return memoize;
};
// Delays a function for the given number of milliseconds, and then calls
// it with the arguments supplied.
_.delay = restArgs(function(func, wait, args) {
return setTimeout(function() {
return func.apply(null, args);
}, wait);
});
// Defers a function, scheduling it to run after the current call stack has
// cleared.
_.defer = _.partial(_.delay, _, 1);
// Returns a function, that, when invoked, will only be triggered at most once
// during a given window of time. Normally, the throttled function will run
// as much as it can, without ever going more than once per `wait` duration;
// but if you'd like to disable the execution on the leading edge, pass
// `{leading: false}`. To disable execution on the trailing edge, ditto.
_.throttle = function(func, wait, options) {
var timeout, context, args, result;
var previous = 0;
if (!options) options = {};
var later = function() {
previous = options.leading === false ? 0 : _.now();
timeout = null;
result = func.apply(context, args);
if (!timeout) context = args = null;
};
var throttled = function() {
var now = _.now();
if (!previous && options.leading === false) previous = now;
var remaining = wait - (now - previous);
context = this;
args = arguments;
if (remaining <= 0 || remaining > wait) {
if (timeout) {
clearTimeout(timeout);
timeout = null;
}
previous = now;
result = func.apply(context, args);
if (!timeout) context = args = null;
} else if (!timeout && options.trailing !== false) {
timeout = setTimeout(later, remaining);
}
return result;
};
throttled.cancel = function() {
clearTimeout(timeout);
previous = 0;
timeout = context = args = null;
};
return throttled;
};
// Returns a function, that, as long as it continues to be invoked, will not
// be triggered. The function will be called after it stops being called for
// N milliseconds. If `immediate` is passed, trigger the function on the
// leading edge, instead of the trailing.
_.debounce = function(func, wait, immediate) {
var timeout, result;
var later = function(context, args) {
timeout = null;
if (args) result = func.apply(context, args);
};
var debounced = restArgs(function(args) {
var callNow = immediate && !timeout;
if (timeout) clearTimeout(timeout);
if (callNow) {
timeout = setTimeout(later, wait);
result = func.apply(this, args);
} else if (!immediate) {
timeout = _.delay(later, wait, this, args);
}
return result;
});
debounced.cancel = function() {
clearTimeout(timeout);
timeout = null;
};
return debounced;
};
// Returns the first function passed as an argument to the second,
// allowing you to adjust arguments, run code before and after, and
// conditionally execute the original function.
_.wrap = function(func, wrapper) {
return _.partial(wrapper, func);
};
// Returns a negated version of the passed-in predicate.
_.negate = function(predicate) {
return function() {
return !predicate.apply(this, arguments);
};
};
// Returns a function that is the composition of a list of functions, each
// consuming the return value of the function that follows.
_.compose = function() {
var args = arguments;
var start = args.length - 1;
return function() {
var i = start;
var result = args[start].apply(this, arguments);
while (i--) result = args[i].call(this, result);
return result;
};
};
// Returns a function that will only be executed on and after the Nth call.
_.after = function(times, func) {
return function() {
if (--times < 1) {
return func.apply(this, arguments);
}
};
};
// Returns a function that will only be executed up to (but not including) the Nth call.
_.before = function(times, func) {
var memo;
return function() {
if (--times > 0) {
memo = func.apply(this, arguments);
}
if (times <= 1) func = null;
return memo;
};
};
// Returns a function that will be executed at most one time, no matter how
// often you call it. Useful for lazy initialization.
_.once = _.partial(_.before, 2);
_.restArgs = restArgs;
// Object Functions
// ----------------
// Keys in IE < 9 that won't be iterated by `for key in ...` and thus missed.
var hasEnumBug = !{toString: null}.propertyIsEnumerable('toString');
var nonEnumerableProps = ['valueOf', 'isPrototypeOf', 'toString',
'propertyIsEnumerable', 'hasOwnProperty', 'toLocaleString'];
var collectNonEnumProps = function(obj, keys) {
var nonEnumIdx = nonEnumerableProps.length;
var constructor = obj.constructor;
var proto = _.isFunction(constructor) && constructor.prototype || ObjProto;
// Constructor is a special case.
var prop = 'constructor';
if (_.has(obj, prop) && !_.contains(keys, prop)) keys.push(prop);
while (nonEnumIdx--) {
prop = nonEnumerableProps[nonEnumIdx];
if (prop in obj && obj[prop] !== proto[prop] && !_.contains(keys, prop)) {
keys.push(prop);
}
}
};
// Retrieve the names of an object's own properties.
// Delegates to **ECMAScript 5**'s native `Object.keys`
_.keys = function(obj) {
if (!_.isObject(obj)) return [];
if (nativeKeys) return nativeKeys(obj);
var keys = [];
for (var key in obj) if (_.has(obj, key)) keys.push(key);
// Ahem, IE < 9.
if (hasEnumBug) collectNonEnumProps(obj, keys);
return keys;
};
// Retrieve all the property names of an object.
_.allKeys = function(obj) {
if (!_.isObject(obj)) return [];
var keys = [];
for (var key in obj) keys.push(key);
// Ahem, IE < 9.
if (hasEnumBug) collectNonEnumProps(obj, keys);
return keys;
};
// Retrieve the values of an object's properties.
_.values = function(obj) {
var keys = _.keys(obj);
var length = keys.length;
var values = Array(length);
for (var i = 0; i < length; i++) {
values[i] = obj[keys[i]];
}
return values;
};
// Returns the results of applying the iteratee to each element of the object
// In contrast to _.map it returns an object
_.mapObject = function(obj, iteratee, context) {
iteratee = cb(iteratee, context);
var keys = _.keys(obj),
length = keys.length,
results = {};
for (var index = 0; index < length; index++) {
var currentKey = keys[index];
results[currentKey] = iteratee(obj[currentKey], currentKey, obj);
}
return results;
};
// Convert an object into a list of `[key, value]` pairs.
_.pairs = function(obj) {
var keys = _.keys(obj);
var length = keys.length;
var pairs = Array(length);
for (var i = 0; i < length; i++) {
pairs[i] = [keys[i], obj[keys[i]]];
}
return pairs;
};
// Invert the keys and values of an object. The values must be serializable.
_.invert = function(obj) {
var result = {};
var keys = _.keys(obj);
for (var i = 0, length = keys.length; i < length; i++) {
result[obj[keys[i]]] = keys[i];
}
return result;
};
// Return a sorted list of the function names available on the object.
// Aliased as `methods`
_.functions = _.methods = function(obj) {
var names = [];
for (var key in obj) {
if (_.isFunction(obj[key])) names.push(key);
}
return names.sort();
};
// An internal function for creating assigner functions.
var createAssigner = function(keysFunc, defaults) {
return function(obj) {
var length = arguments.length;
if (defaults) obj = Object(obj);
if (length < 2 || obj == null) return obj;
for (var index = 1; index < length; index++) {
var source = arguments[index],
keys = keysFunc(source),
l = keys.length;
for (var i = 0; i < l; i++) {
var key = keys[i];
if (!defaults || obj[key] === void 0) obj[key] = source[key];
}
}
return obj;
};
};
// Extend a given object with all the properties in passed-in object(s).
_.extend = createAssigner(_.allKeys);
// Assigns a given object with all the own properties in the passed-in object(s)
// (https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Object/assign)
_.extendOwn = _.assign = createAssigner(_.keys);
// Returns the first key on an object that passes a predicate test
_.findKey = function(obj, predicate, context) {
predicate = cb(predicate, context);
var keys = _.keys(obj), key;
for (var i = 0, length = keys.length; i < length; i++) {
key = keys[i];
if (predicate(obj[key], key, obj)) return key;
}
};
// Internal pick helper function to determine if `obj` has key `key`.
var keyInObj = function(value, key, obj) {
return key in obj;
};
// Return a copy of the object only containing the whitelisted properties.
_.pick = restArgs(function(obj, keys) {
var result = {}, iteratee = keys[0];
if (obj == null) return result;
if (_.isFunction(iteratee)) {
if (keys.length > 1) iteratee = optimizeCb(iteratee, keys[1]);
keys = _.allKeys(obj);
} else {
iteratee = keyInObj;
keys = flatten(keys, false, false);
obj = Object(obj);
}
for (var i = 0, length = keys.length; i < length; i++) {
var key = keys[i];
var value = obj[key];
if (iteratee(value, key, obj)) result[key] = value;
}
return result;
});
// Return a copy of the object without the blacklisted properties.
_.omit = restArgs(function(obj, keys) {
var iteratee = keys[0], context;
if (_.isFunction(iteratee)) {
iteratee = _.negate(iteratee);
if (keys.length > 1) context = keys[1];
} else {
keys = _.map(flatten(keys, false, false), String);
iteratee = function(value, key) {
return !_.contains(keys, key);
};
}
return _.pick(obj, iteratee, context);
});
// Fill in a given object with default properties.
_.defaults = createAssigner(_.allKeys, true);
// Creates an object that inherits from the given prototype object.
// If additional properties are provided then they will be added to the
// created object.
_.create = function(prototype, props) {
var result = baseCreate(prototype);
if (props) _.extendOwn(result, props);
return result;
};
// Create a (shallow-cloned) duplicate of an object.
_.clone = function(obj) {
if (!_.isObject(obj)) return obj;
return _.isArray(obj) ? obj.slice() : _.extend({}, obj);
};
// Invokes interceptor with the obj, and then returns obj.
// The primary purpose of this method is to "tap into" a method chain, in
// order to perform operations on intermediate results within the chain.
_.tap = function(obj, interceptor) {
interceptor(obj);
return obj;
};
// Returns whether an object has a given set of `key:value` pairs.
_.isMatch = function(object, attrs) {
var keys = _.keys(attrs), length = keys.length;
if (object == null) return !length;
var obj = Object(object);
for (var i = 0; i < length; i++) {
var key = keys[i];
if (attrs[key] !== obj[key] || !(key in obj)) return false;
}
return true;
};
// Internal recursive comparison function for `isEqual`.
var eq, deepEq;
eq = function(a, b, aStack, bStack) {
// Identical objects are equal. `0 === -0`, but they aren't identical.
// See the [Harmony `egal` proposal](http://wiki.ecmascript.org/doku.php?id=harmony:egal).
if (a === b) return a !== 0 || 1 / a === 1 / b;
// A strict comparison is necessary because `null == undefined`.
if (a == null || b == null) return a === b;
// `NaN`s are equivalent, but non-reflexive.
if (a !== a) return b !== b;
// Exhaust primitive checks
var type = typeof a;
if (type !== 'function' && type !== 'object' && typeof b != 'object') return false;
return deepEq(a, b, aStack, bStack);
};
// Internal recursive comparison function for `isEqual`.
deepEq = function(a, b, aStack, bStack) {
// Unwrap any wrapped objects.
if (a instanceof _) a = a._wrapped;
if (b instanceof _) b = b._wrapped;
// Compare `[[Class]]` names.
var className = toString.call(a);
if (className !== toString.call(b)) return false;
switch (className) {
// Strings, numbers, regular expressions, dates, and booleans are compared by value.
case '[object RegExp]':
// RegExps are coerced to strings for comparison (Note: '' + /a/i === '/a/i')
case '[object String]':
// Primitives and their corresponding object wrappers are equivalent; thus, `"5"` is
// equivalent to `new String("5")`.
return '' + a === '' + b;
case '[object Number]':
// `NaN`s are equivalent, but non-reflexive.
// Object(NaN) is equivalent to NaN
if (+a !== +a) return +b !== +b;
// An `egal` comparison is performed for other numeric values.
return +a === 0 ? 1 / +a === 1 / b : +a === +b;
case '[object Date]':
case '[object Boolean]':
// Coerce dates and booleans to numeric primitive values. Dates are compared by their
// millisecond representations. Note that invalid dates with millisecond representations
// of `NaN` are not equivalent.
return +a === +b;
}
var areArrays = className === '[object Array]';
if (!areArrays) {
if (typeof a != 'object' || typeof b != 'object') return false;
// Objects with different constructors are not equivalent, but `Object`s or `Array`s
// from different frames are.
var aCtor = a.constructor, bCtor = b.constructor;
if (aCtor !== bCtor && !(_.isFunction(aCtor) && aCtor instanceof aCtor &&
_.isFunction(bCtor) && bCtor instanceof bCtor)
&& ('constructor' in a && 'constructor' in b)) {
return false;
}
}
// Assume equality for cyclic structures. The algorithm for detecting cyclic
// structures is adapted from ES 5.1 section 15.12.3, abstract operation `JO`.
// Initializing stack of traversed objects.
// It's done here since we only need them for objects and arrays comparison.
aStack = aStack || [];
bStack = bStack || [];
var length = aStack.length;
while (length--) {
// Linear search. Performance is inversely proportional to the number of
// unique nested structures.
if (aStack[length] === a) return bStack[length] === b;
}
// Add the first object to the stack of traversed objects.
aStack.push(a);
bStack.push(b);
// Recursively compare objects and arrays.
if (areArrays) {
// Compare array lengths to determine if a deep comparison is necessary.
length = a.length;
if (length !== b.length) return false;
// Deep compare the contents, ignoring non-numeric properties.
while (length--) {
if (!eq(a[length], b[length], aStack, bStack)) return false;
}
} else {
// Deep compare objects.
var keys = _.keys(a), key;
length = keys.length;
// Ensure that both objects contain the same number of properties before comparing deep equality.
if (_.keys(b).length !== length) return false;
while (length--) {
// Deep compare each member
key = keys[length];
if (!(_.has(b, key) && eq(a[key], b[key], aStack, bStack))) return false;
}
}
// Remove the first object from the stack of traversed objects.
aStack.pop();
bStack.pop();
return true;
};
// Perform a deep comparison to check if two objects are equal.
_.isEqual = function(a, b) {
return eq(a, b);
};
// Is a given array, string, or object empty?
// An "empty" object has no enumerable own-properties.
_.isEmpty = function(obj) {
if (obj == null) return true;
if (isArrayLike(obj) && (_.isArray(obj) || _.isString(obj) || _.isArguments(obj))) return obj.length === 0;
return _.keys(obj).length === 0;
};
// Is a given value a DOM element?
_.isElement = function(obj) {
return !!(obj && obj.nodeType === 1);
};
// Is a given value an array?
// Delegates to ECMA5's native Array.isArray
_.isArray = nativeIsArray || function(obj) {
return toString.call(obj) === '[object Array]';
};
// Is a given variable an object?
_.isObject = function(obj) {
var type = typeof obj;
return type === 'function' || type === 'object' && !!obj;
};
// Add some isType methods: isArguments, isFunction, isString, isNumber, isDate, isRegExp, isError.
_.each(['Arguments', 'Function', 'String', 'Number', 'Date', 'RegExp', 'Error', 'Symbol'], function(name) {
_['is' + name] = function(obj) {
return toString.call(obj) === '[object ' + name + ']';
};
});
// Define a fallback version of the method in browsers (ahem, IE < 9), where
// there isn't any inspectable "Arguments" type.
if (!_.isArguments(arguments)) {
_.isArguments = function(obj) {
return _.has(obj, 'callee');
};
}
// Optimize `isFunction` if appropriate. Work around some typeof bugs in old v8,
// IE 11 (#1621), Safari 8 (#1929), and PhantomJS (#2236).
var nodelist = root.document && root.document.childNodes;
if (typeof /./ != 'function' && typeof Int8Array != 'object' && typeof nodelist != 'function') {
_.isFunction = function(obj) {
return typeof obj == 'function' || false;
};
}
// Is a given object a finite number?
_.isFinite = function(obj) {
return !_.isSymbol(obj) && isFinite(obj) && !isNaN(parseFloat(obj));
};
// Is the given value `NaN`?
_.isNaN = function(obj) {
return _.isNumber(obj) && isNaN(obj);
};
// Is a given value a boolean?
_.isBoolean = function(obj) {
return obj === true || obj === false || toString.call(obj) === '[object Boolean]';
};
// Is a given value equal to null?
_.isNull = function(obj) {
return obj === null;
};
// Is a given variable undefined?
_.isUndefined = function(obj) {
return obj === void 0;
};
// Shortcut function for checking if an object has a given property directly
// on itself (in other words, not on a prototype).
_.has = function(obj, key) {
return obj != null && hasOwnProperty.call(obj, key);
};
// Utility Functions
// -----------------
// Run Underscore.js in *noConflict* mode, returning the `_` variable to its
// previous owner. Returns a reference to the Underscore object.
_.noConflict = function() {
root._ = previousUnderscore;
return this;
};
// Keep the identity function around for default iteratees.
_.identity = function(value) {
return value;
};
// Predicate-generating functions. Often useful outside of Underscore.
_.constant = function(value) {
return function() {
return value;
};
};
_.noop = function(){};
_.property = property;
// Generates a function for a given object that returns a given property.
_.propertyOf = function(obj) {
return obj == null ? function(){} : function(key) {
return obj[key];
};
};
// Returns a predicate for checking whether an object has a given set of
// `key:value` pairs.
_.matcher = _.matches = function(attrs) {
attrs = _.extendOwn({}, attrs);
return function(obj) {
return _.isMatch(obj, attrs);
};
};
// Run a function **n** times.
_.times = function(n, iteratee, context) {
var accum = Array(Math.max(0, n));
iteratee = optimizeCb(iteratee, context, 1);
for (var i = 0; i < n; i++) accum[i] = iteratee(i);
return accum;
};
// Return a random integer between min and max (inclusive).
_.random = function(min, max) {
if (max == null) {
max = min;
min = 0;
}
return min + Math.floor(Math.random() * (max - min + 1));
};
// A (possibly faster) way to get the current timestamp as an integer.
_.now = Date.now || function() {
return new Date().getTime();
};
// List of HTML entities for escaping.
var escapeMap = {
'&': '&',
'<': '<',
'>': '>',
'"': '"',
"'": ''',
'`': '`'
};
var unescapeMap = _.invert(escapeMap);
// Functions for escaping and unescaping strings to/from HTML interpolation.
var createEscaper = function(map) {
var escaper = function(match) {
return map[match];
};
// Regexes for identifying a key that needs to be escaped
var source = '(?:' + _.keys(map).join('|') + ')';
var testRegexp = RegExp(source);
var replaceRegexp = RegExp(source, 'g');
return function(string) {
string = string == null ? '' : '' + string;
return testRegexp.test(string) ? string.replace(replaceRegexp, escaper) : string;
};
};
_.escape = createEscaper(escapeMap);
_.unescape = createEscaper(unescapeMap);
// If the value of the named `property` is a function then invoke it with the
// `object` as context; otherwise, return it.
_.result = function(object, prop, fallback) {
var value = object == null ? void 0 : object[prop];
if (value === void 0) {
value = fallback;
}
return _.isFunction(value) ? value.call(object) : value;
};
// Generate a unique integer id (unique within the entire client session).
// Useful for temporary DOM ids.
var idCounter = 0;
_.uniqueId = function(prefix) {
var id = ++idCounter + '';
return prefix ? prefix + id : id;
};
// By default, Underscore uses ERB-style template delimiters, change the
// following template settings to use alternative delimiters.
_.templateSettings = {
evaluate: /<%([\s\S]+?)%>/g,
interpolate: /<%=([\s\S]+?)%>/g,
escape: /<%-([\s\S]+?)%>/g
};
// When customizing `templateSettings`, if you don't want to define an
// interpolation, evaluation or escaping regex, we need one that is
// guaranteed not to match.
var noMatch = /(.)^/;
// Certain characters need to be escaped so that they can be put into a
// string literal.
var escapes = {
"'": "'",
'\\': '\\',
'\r': 'r',
'\n': 'n',
'\u2028': 'u2028',
'\u2029': 'u2029'
};
var escapeRegExp = /\\|'|\r|\n|\u2028|\u2029/g;
var escapeChar = function(match) {
return '\\' + escapes[match];
};
// JavaScript micro-templating, similar to John Resig's implementation.
// Underscore templating handles arbitrary delimiters, preserves whitespace,
// and correctly escapes quotes within interpolated code.
// NB: `oldSettings` only exists for backwards compatibility.
_.template = function(text, settings, oldSettings) {
if (!settings && oldSettings) settings = oldSettings;
settings = _.defaults({}, settings, _.templateSettings);
// Combine delimiters into one regular expression via alternation.
var matcher = RegExp([
(settings.escape || noMatch).source,
(settings.interpolate || noMatch).source,
(settings.evaluate || noMatch).source
].join('|') + '|$', 'g');
// Compile the template source, escaping string literals appropriately.
var index = 0;
var source = "__p+='";
text.replace(matcher, function(match, escape, interpolate, evaluate, offset) {
source += text.slice(index, offset).replace(escapeRegExp, escapeChar);
index = offset + match.length;
if (escape) {
source += "'+\n((__t=(" + escape + "))==null?'':_.escape(__t))+\n'";
} else if (interpolate) {
source += "'+\n((__t=(" + interpolate + "))==null?'':__t)+\n'";
} else if (evaluate) {
source += "';\n" + evaluate + "\n__p+='";
}
// Adobe VMs need the match returned to produce the correct offset.
return match;
});
source += "';\n";
// If a variable is not specified, place data values in local scope.
if (!settings.variable) source = 'with(obj||{}){\n' + source + '}\n';
source = "var __t,__p='',__j=Array.prototype.join," +
"print=function(){__p+=__j.call(arguments,'');};\n" +
source + 'return __p;\n';
var render;
try {
render = new Function(settings.variable || 'obj', '_', source);
} catch (e) {
e.source = source;
throw e;
}
var template = function(data) {
return render.call(this, data, _);
};
// Provide the compiled source as a convenience for precompilation.
var argument = settings.variable || 'obj';
template.source = 'function(' + argument + '){\n' + source + '}';
return template;
};
// Add a "chain" function. Start chaining a wrapped Underscore object.
_.chain = function(obj) {
var instance = _(obj);
instance._chain = true;
return instance;
};
// OOP
// ---------------
// If Underscore is called as a function, it returns a wrapped object that
// can be used OO-style. This wrapper holds altered versions of all the
// underscore functions. Wrapped objects may be chained.
// Helper function to continue chaining intermediate results.
var chainResult = function(instance, obj) {
return instance._chain ? _(obj).chain() : obj;
};
// Add your own custom functions to the Underscore object.
_.mixin = function(obj) {
_.each(_.functions(obj), function(name) {
var func = _[name] = obj[name];
_.prototype[name] = function() {
var args = [this._wrapped];
push.apply(args, arguments);
return chainResult(this, func.apply(_, args));
};
});
};
// Add all of the Underscore functions to the wrapper object.
_.mixin(_);
// Add all mutator Array functions to the wrapper.
_.each(['pop', 'push', 'reverse', 'shift', 'sort', 'splice', 'unshift'], function(name) {
var method = ArrayProto[name];
_.prototype[name] = function() {
var obj = this._wrapped;
method.apply(obj, arguments);
if ((name === 'shift' || name === 'splice') && obj.length === 0) delete obj[0];
return chainResult(this, obj);
};
});
// Add all accessor Array functions to the wrapper.
_.each(['concat', 'join', 'slice'], function(name) {
var method = ArrayProto[name];
_.prototype[name] = function() {
return chainResult(this, method.apply(this._wrapped, arguments));
};
});
// Extracts the result from a wrapped and chained object.
_.prototype.value = function() {
return this._wrapped;
};
// Provide unwrapping proxy for some methods used in engine operations
// such as arithmetic and JSON stringification.
_.prototype.valueOf = _.prototype.toJSON = _.prototype.value;
_.prototype.toString = function() {
return '' + this._wrapped;
};
// AMD registration happens at the end for compatibility with AMD loaders
// that may not enforce next-turn semantics on modules. Even though general
// practice for AMD registration is to be anonymous, underscore registers
// as a named module because, like jQuery, it is a base library that is
// popular enough to be bundled in a third party lib, but not be part of
// an AMD load request. Those cases could generate an error when an
// anonymous define() is called outside of a loader request.
if (typeof define == 'function' && define.amd) {
define('underscore', [], function() {
return _;
});
}
}());<|fim▁end|> | |
<|file_name|>test_webserver.py<|end_file_name|><|fim▁begin|>from twisted.trial.unittest import TestCase
from mock import Mock
from twisted.web.test.test_web import DummyRequest<|fim▁hole|>from cryptosync.resources import make_site
def make_request(uri='', method='GET', args={}):
site = make_site(authenticator=Mock())
request = DummyRequest(uri.split('/'))
request.method = method
request.args = args
resource = site.getResourceFor(request)
request.render(resource)
request.data = "".join(request.written)
return request
class RootResourceResponseCodesTestCase(TestCase):
def test_root_resource_ok(self):
request = make_request()
self.assertEquals(request.responseCode, OK)
def test_root_resource_not_found_url(self):
request = make_request(uri='shouldneverfindthisthing')
self.assertEquals(request.responseCode, NOT_FOUND)
class AuthResourceTestCase(TestCase):
def _try_auth(self, credentials, expected):
request = make_request(uri='/auth/', method='POST', args=credentials)
self.assertEquals(request.responseCode, OK)
self.assertEquals(request.data, expected)
def test_auth_success_with_good_parameters(self):
credentials = {'username': 'myself', 'password': 'somethingawesome'}
self._try_auth(credentials, '{"status": "success"}')
def test_auth_failure_with_missing_parameters(self):
credentials = {'username': 'myself', 'password': 'somethingawesome'}
for (k, v) in credentials.items():
self._try_auth({k: v}, '{"status": "failure"}')<|fim▁end|> | from twisted.web.http import OK, NOT_FOUND
|
<|file_name|>search.ts<|end_file_name|><|fim▁begin|>import Route from '@ember/routing/route';
import AuthenticatedRouteMixin from 'ember-simple-auth/mixins/authenticated-route-mixin';
import { refreshModelForQueryParams } from '@datahub/utils/routes/refresh-model-for-query-params';
import { action, setProperties, set } from '@ember/object';
import { inject as service } from '@ember/service';
import SearchService from '@datahub/shared/services/search';
import SearchController from 'datahub-web/controllers/search';
import Transition from '@ember/routing/-private/transition';
import { DatasetEntity } from '@datahub/data-models/entity/dataset/dataset-entity';
import DataModelsService from '@datahub/data-models/services/data-models';
import { IEntityRenderCommonPropsSearch } from '@datahub/data-models/types/search/search-entity-render-prop';
import { ISearchEntityRenderProps } from '@datahub/data-models/types/search/search-entity-render-prop';
import Configurator from '@datahub/shared/services/configurator';
import { DataModelName } from '@datahub/data-models/constants/entity/index';
export default class SearchRoute extends Route.extend(AuthenticatedRouteMixin) {
/**
* Service to get the right class for the entity type
*/
@service('data-models')
dataModels!: DataModelsService;
/**
* Injects the service taht lets us access our application configurations
*/
@service
configurator!: Configurator;
/**
* Stores a reference to the application search service
* @type {SearchService}
* @memberof SearchRoute
*/
@service
search: SearchService;
/**
* For each listed query parameter, invoke a full transition if the query parameter changes in the url
* @type {Record<string, {
* refreshModel: true;
* }>}
* @memberof SearchRoute
*/
queryParams = refreshModelForQueryParams(['entity', 'page', 'facets', 'keyword']);
/**
* Makes an API call and process search entries
* @param {ISearchApiParams} queryParam
* @return {void}
* @memberof SearchRoute<|fim▁hole|> entity: DataModelName;
page: string;
facets: string;
keyword: string;
}): {
fields: Array<ISearchEntityRenderProps>;
showFacets: boolean;
searchConfig: IEntityRenderCommonPropsSearch;
} {
const { dataModels } = this;
const dataModelEntity = dataModels.getModel(queryParam.entity);
const searchProps = dataModelEntity.renderProps.search;
setProperties(this.search, {
entity: queryParam.entity || DatasetEntity.displayName,
keyword: queryParam.keyword
});
return {
searchConfig: searchProps,
fields: searchProps.attributes,
showFacets: typeof searchProps.showFacets === 'boolean' ? searchProps.showFacets : true
};
}
/**
* Will cleanup controller, so there are no variables leaking.
* @param controller
* @param isExiting
* @param transition
*/
resetController(controller: SearchController, isExiting: boolean, transition: Transition): void {
super.resetController(controller, isExiting, transition);
if (isExiting) {
controller.resetData();
}
}
/**
* In order to keep the service up date with the state. The router pass
* the keyword from the queryParams to the service.
* @param transition Ember transition
*/
@action
willTransition(transition: Transition): void {
if (transition.to.name !== 'search') {
set(this.search, 'keyword', '');
}
}
}<|fim▁end|> | */
model(queryParam: { |
<|file_name|>ArraySortService.java<|end_file_name|><|fim▁begin|>package com.orionplatform.data.data_structures.array;
import java.util.Arrays;
import java.util.Comparator;
import com.orionplatform.core.abstraction.OrionService;
public class ArraySortService<T> extends OrionService
{
public static synchronized void sort(byte[] array)
{
Arrays.sort(array);
}
public static synchronized void sort(short[] array)
{
Arrays.sort(array);
}
public static synchronized void sort(int[] array)
{
Arrays.sort(array);
}
public static synchronized void sort(long[] array)
{
Arrays.sort(array);
}
public static synchronized void sort(float[] array)
{
Arrays.sort(array);
}
public static synchronized void sort(double[] array)
{
Arrays.sort(array);
}
public static synchronized void sort(char[] array)
{
Arrays.sort(array);
}
public static synchronized <T> void sort(T[] array)
{
Arrays.sort(array);
}
public static synchronized <T> void sort(T[] array, Comparator<T> comparator)
{<|fim▁hole|><|fim▁end|> | Arrays.sort(array, comparator);
}
} |
<|file_name|>lib.rs<|end_file_name|><|fim▁begin|>//! The command-line tool works in the following order:
//!
//! 1. [`RemoteMounts::load`](struct.RemoteMounts.html#method.load) is called to load the contents
//! of `/etc/mtab` into an internal struct.
//! 2. [`RemoteMounts::into_current_dir`](struct.RemoteMounts.html#method.into_current_dir) is
//! called to convert the above into a [`Location`](enum.Location.html).
//! 3. [`Location::into_env_args`](enum.Location.html#method.into_env_args) is called to convert the<|fim▁hole|>//! convert the above into
//! [`std::process::Command`](https://doc.rust-lang.org/nightly/std/process/struct.Command.html).
//!
//! For `rpwd`, only steps 1 and 2 are run, and the resulting `Location` is printed.
mod location;
mod program_args;
mod remote_location;
mod remote_mounts;
pub use location::*;
pub use program_args::*;
pub use remote_location::*;
pub use remote_mounts::*;<|fim▁end|> | //! above into [`ProgramArgs`](struct.ProgramArgs.html).
//! 3. [`ProgramArgs::into_command`](struct.ProgramArgs.html#method.into_command) is called to |
<|file_name|>docker.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python3
import sys
sys.path.insert(0, '/Users/neo/workspace/devops')
from netkiller.docker import *
# from environment.experiment import experiment
# from environment.development import development
# from environment.production import production
from compose.devops import devops
from compose.demo import demo
# from libexec.portainer import portainer
# print(test)<|fim▁hole|># exit()
if __name__ == "__main__":
try:
docker = Docker()
# docker.env({'DOCKER_HOST':'ssh://[email protected]','COMPOSE_PROJECT_NAME':'experiment'})
# docker.sysctl({"vm.max_map_count": "262144"})
# docker.environment(experiment)
# docker.environment(development)
# docker.environment(logging)
docker.environment(devops)
# docker.environment(portainer)
docker.environment(demo)
docker.main()
except KeyboardInterrupt:
print("Crtl+C Pressed. Shutting down.")<|fim▁end|> | |
<|file_name|>element.py<|end_file_name|><|fim▁begin|>import collections
import re
import sys
import warnings
from bs4.dammit import EntitySubstitution
DEFAULT_OUTPUT_ENCODING = "utf-8"
PY3K = (sys.version_info[0] > 2)
whitespace_re = re.compile("\s+")
def _alias(attr):
"""Alias one attribute name to another for backward compatibility"""
@property
def alias(self):
return getattr(self, attr)
@alias.setter
def alias(self):
return setattr(self, attr)
return alias
class NamespacedAttribute(unicode):
def __new__(cls, prefix, name, namespace=None):
if name is None:
obj = unicode.__new__(cls, prefix)
elif prefix is None:
# Not really namespaced.
obj = unicode.__new__(cls, name)
else:
obj = unicode.__new__(cls, prefix + ":" + name)
obj.prefix = prefix
obj.name = name
obj.namespace = namespace
return obj
class AttributeValueWithCharsetSubstitution(unicode):
"""A stand-in object for a character encoding specified in HTML."""
class CharsetMetaAttributeValue(AttributeValueWithCharsetSubstitution):
"""A generic stand-in for the value of a meta tag's 'charset' attribute.
When Beautiful Soup parses the markup '<meta charset="utf8">', the
value of the 'charset' attribute will be one of these objects.
"""
def __new__(cls, original_value):
obj = unicode.__new__(cls, original_value)
obj.original_value = original_value
return obj
def encode(self, encoding):
return encoding
class ContentMetaAttributeValue(AttributeValueWithCharsetSubstitution):
"""A generic stand-in for the value of a meta tag's 'content' attribute.
When Beautiful Soup parses the markup:
<meta http-equiv="content-type" content="text/html; charset=utf8">
The value of the 'content' attribute will be one of these objects.
"""
CHARSET_RE = re.compile("((^|;)\s*charset=)([^;]*)", re.M)
def __new__(cls, original_value):
match = cls.CHARSET_RE.search(original_value)
if match is None:
# No substitution necessary.
return unicode.__new__(unicode, original_value)
obj = unicode.__new__(cls, original_value)
obj.original_value = original_value
return obj
def encode(self, encoding):
def rewrite(match):
return match.group(1) + encoding
return self.CHARSET_RE.sub(rewrite, self.original_value)
class HTMLAwareEntitySubstitution(EntitySubstitution):
"""Entity substitution rules that are aware of some HTML quirks.
Specifically, the contents of <script> and <style> tags should not
undergo entity substitution.
Incoming NavigableString objects are checked to see if they're the
direct children of a <script> or <style> tag.
"""
cdata_containing_tags = set(["script", "style"])
preformatted_tags = set(["pre"])
@classmethod
def _substitute_if_appropriate(cls, ns, f):
if (isinstance(ns, NavigableString)
and ns.parent is not None
and ns.parent.name in cls.cdata_containing_tags):
# Do nothing.
return ns
# Substitute.
return f(ns)
@classmethod
def substitute_html(cls, ns):
return cls._substitute_if_appropriate(
ns, EntitySubstitution.substitute_html)
@classmethod
def substitute_xml(cls, ns):
return cls._substitute_if_appropriate(
ns, EntitySubstitution.substitute_xml)
class PageElement(object):
"""Contains the navigational information for some part of the page
(either a tag or a piece of text)"""
# There are five possible values for the "formatter" argument passed in
# to methods like encode() and prettify():
#
# "html" - All Unicode characters with corresponding HTML entities
# are converted to those entities on output.
# "minimal" - Bare ampersands and angle brackets are converted to
# XML entities: & < >
# None - The null formatter. Unicode characters are never
# converted to entities. This is not recommended, but it's
# faster than "minimal".
# A function - This function will be called on every string that
# needs to undergo entity substitution.
#
# In an HTML document, the default "html" and "minimal" functions
# will leave the contents of <script> and <style> tags alone. For
# an XML document, all tags will be given the same treatment.
HTML_FORMATTERS = {
"html" : HTMLAwareEntitySubstitution.substitute_html,
"minimal" : HTMLAwareEntitySubstitution.substitute_xml,
None : None
}
XML_FORMATTERS = {
"html" : EntitySubstitution.substitute_html,
"minimal" : EntitySubstitution.substitute_xml,
None : None
}
def format_string(self, s, formatter='minimal'):
"""Format the given string using the given formatter."""
if not callable(formatter):
formatter = self._formatter_for_name(formatter)
if formatter is None:
output = s
else:
output = formatter(s)
return output
@property
def _is_xml(self):
"""Is this element part of an XML tree or an HTML tree?
This is used when mapping a formatter name ("minimal") to an
appropriate function (one that performs entity-substitution on
the contents of <script> and <style> tags, or not). It's
inefficient, but it should be called very rarely.
"""
if self.parent is None:
# This is the top-level object. It should have .is_xml set
# from tree creation. If not, take a guess--BS is usually
# used on HTML markup.
return getattr(self, 'is_xml', False)
return self.parent._is_xml
def _formatter_for_name(self, name):
"Look up a formatter function based on its name and the tree."
if self._is_xml:
return self.XML_FORMATTERS.get(
name, EntitySubstitution.substitute_xml)
else:
return self.HTML_FORMATTERS.get(
name, HTMLAwareEntitySubstitution.substitute_xml)
def setup(self, parent=None, previous_element=None):
"""Sets up the initial relations between this element and
other elements."""
self.parent = parent
self.previous_element = previous_element
if previous_element is not None:
self.previous_element.next_element = self
self.next_element = None
self.previous_sibling = None
self.next_sibling = None
if self.parent is not None and self.parent.contents:
self.previous_sibling = self.parent.contents[-1]
self.previous_sibling.next_sibling = self
nextSibling = _alias("next_sibling") # BS3
previousSibling = _alias("previous_sibling") # BS3
def replace_with(self, replace_with):
if replace_with is self:
return
if replace_with is self.parent:
raise ValueError("Cannot replace a Tag with its parent.")
old_parent = self.parent
my_index = self.parent.index(self)
self.extract()
old_parent.insert(my_index, replace_with)
return self
replaceWith = replace_with # BS3
def unwrap(self):
my_parent = self.parent
my_index = self.parent.index(self)
self.extract()
for child in reversed(self.contents[:]):
my_parent.insert(my_index, child)
return self
replace_with_children = unwrap
replaceWithChildren = unwrap # BS3
def wrap(self, wrap_inside):
me = self.replace_with(wrap_inside)
wrap_inside.append(me)
return wrap_inside
def extract(self):
"""Destructively rips this element out of the tree."""
if self.parent is not None:
del self.parent.contents[self.parent.index(self)]
#Find the two elements that would be next to each other if
#this element (and any children) hadn't been parsed. Connect
#the two.
last_child = self._last_descendant()
next_element = last_child.next_element
if self.previous_element is not None:
self.previous_element.next_element = next_element
if next_element is not None:
next_element.previous_element = self.previous_element
self.previous_element = None
last_child.next_element = None
self.parent = None
if self.previous_sibling is not None:
self.previous_sibling.next_sibling = self.next_sibling
if self.next_sibling is not None:
self.next_sibling.previous_sibling = self.previous_sibling
self.previous_sibling = self.next_sibling = None
return self
def _last_descendant(self):
"Finds the last element beneath this object to be parsed."
last_child = self
while hasattr(last_child, 'contents') and last_child.contents:
last_child = last_child.contents[-1]
return last_child
# BS3: Not part of the API!
_lastRecursiveChild = _last_descendant
def insert(self, position, new_child):
if new_child is self:
raise ValueError("Cannot insert a tag into itself.")
if (isinstance(new_child, basestring)
and not isinstance(new_child, NavigableString)):
new_child = NavigableString(new_child)
position = min(position, len(self.contents))
if hasattr(new_child, 'parent') and new_child.parent is not None:
# We're 'inserting' an element that's already one
# of this object's children.
if new_child.parent is self:
current_index = self.index(new_child)
if current_index < position:
# We're moving this element further down the list
# of this object's children. That means that when
# we extract this element, our target index will
# jump down one.
position -= 1
new_child.extract()
new_child.parent = self
previous_child = None
if position == 0:
new_child.previous_sibling = None
new_child.previous_element = self
else:
previous_child = self.contents[position - 1]
new_child.previous_sibling = previous_child
new_child.previous_sibling.next_sibling = new_child
new_child.previous_element = previous_child._last_descendant()
if new_child.previous_element is not None:
new_child.previous_element.next_element = new_child
new_childs_last_element = new_child._last_descendant()
if position >= len(self.contents):
new_child.next_sibling = None
parent = self
parents_next_sibling = None
while parents_next_sibling is None and parent is not None:
parents_next_sibling = parent.next_sibling
parent = parent.parent
if parents_next_sibling is not None:
# We found the element that comes next in the document.
break
if parents_next_sibling is not None:
new_childs_last_element.next_element = parents_next_sibling
else:
# The last element of this tag is the last element in
# the document.
new_childs_last_element.next_element = None
else:
next_child = self.contents[position]
new_child.next_sibling = next_child
if new_child.next_sibling is not None:
new_child.next_sibling.previous_sibling = new_child
new_childs_last_element.next_element = next_child
if new_childs_last_element.next_element is not None:
new_childs_last_element.next_element.previous_element = new_childs_last_element
self.contents.insert(position, new_child)
def append(self, tag):
"""Appends the given tag to the contents of this tag."""
self.insert(len(self.contents), tag)
def insert_before(self, predecessor):
"""Makes the given element the immediate predecessor of this one.
The two elements will have the same parent, and the given element
will be immediately before this one.
"""
if self is predecessor:
raise ValueError("Can't insert an element before itself.")
parent = self.parent
if parent is None:
raise ValueError(
"Element has no parent, so 'before' has no meaning.")
# Extract first so that the index won't be screwed up if they
# are siblings.
if isinstance(predecessor, PageElement):
predecessor.extract()
index = parent.index(self)
parent.insert(index, predecessor)
def insert_after(self, successor):
"""Makes the given element the immediate successor of this one.
The two elements will have the same parent, and the given element
will be immediately after this one.
"""
if self is successor:
raise ValueError("Can't insert an element after itself.")
parent = self.parent
if parent is None:
raise ValueError(
"Element has no parent, so 'after' has no meaning.")
# Extract first so that the index won't be screwed up if they
# are siblings.
if isinstance(successor, PageElement):
successor.extract()
index = parent.index(self)
parent.insert(index+1, successor)
def find_next(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the first item that matches the given criteria and
appears after this Tag in the document."""
return self._find_one(self.find_all_next, name, attrs, text, **kwargs)
findNext = find_next # BS3
def find_all_next(self, name=None, attrs={}, text=None, limit=None,
**kwargs):
"""Returns all items that match the given criteria and appear
after this Tag in the document."""
return self._find_all(name, attrs, text, limit, self.next_elements,
**kwargs)
findAllNext = find_all_next # BS3
def find_next_sibling(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the closest sibling to this Tag that matches the
given criteria and appears after this Tag in the document."""
return self._find_one(self.find_next_siblings, name, attrs, text,
**kwargs)
findNextSibling = find_next_sibling # BS3
def find_next_siblings(self, name=None, attrs={}, text=None, limit=None,
**kwargs):
"""Returns the siblings of this Tag that match the given
criteria and appear after this Tag in the document."""
return self._find_all(name, attrs, text, limit,
self.next_siblings, **kwargs)
findNextSiblings = find_next_siblings # BS3
fetchNextSiblings = find_next_siblings # BS2
def find_previous(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the first item that matches the given criteria and
appears before this Tag in the document."""
return self._find_one(
self.find_all_previous, name, attrs, text, **kwargs)
findPrevious = find_previous # BS3
def find_all_previous(self, name=None, attrs={}, text=None, limit=None,
**kwargs):
"""Returns all items that match the given criteria and appear
before this Tag in the document."""
return self._find_all(name, attrs, text, limit, self.previous_elements,
**kwargs)
findAllPrevious = find_all_previous # BS3
fetchPrevious = find_all_previous # BS2
def find_previous_sibling(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the closest sibling to this Tag that matches the
given criteria and appears before this Tag in the document."""
return self._find_one(self.find_previous_siblings, name, attrs, text,
**kwargs)
findPreviousSibling = find_previous_sibling # BS3
def find_previous_siblings(self, name=None, attrs={}, text=None,
limit=None, **kwargs):
"""Returns the siblings of this Tag that match the given
criteria and appear before this Tag in the document."""
return self._find_all(name, attrs, text, limit,
self.previous_siblings, **kwargs)
findPreviousSiblings = find_previous_siblings # BS3
fetchPreviousSiblings = find_previous_siblings # BS2
def find_parent(self, name=None, attrs={}, **kwargs):
"""Returns the closest parent of this Tag that matches the given
criteria."""
# NOTE: We can't use _find_one because findParents takes a different
# set of arguments.
r = None
l = self.find_parents(name, attrs, 1, **kwargs)
if l:
r = l[0]
return r
findParent = find_parent # BS3
def find_parents(self, name=None, attrs={}, limit=None, **kwargs):
"""Returns the parents of this Tag that match the given
criteria."""
return self._find_all(name, attrs, None, limit, self.parents,
**kwargs)
findParents = find_parents # BS3
fetchParents = find_parents # BS2
@property
def next(self):
return self.next_element
@property
def previous(self):
return self.previous_element
#These methods do the real heavy lifting.
def _find_one(self, method, name, attrs, text, **kwargs):
r = None
l = method(name, attrs, text, 1, **kwargs)
if l:
r = l[0]
return r
def _find_all(self, name, attrs, text, limit, generator, **kwargs):
"Iterates over a generator looking for things that match."
if isinstance(name, SoupStrainer):
strainer = name
elif text is None and not limit and not attrs and not kwargs:
# Optimization to find all tags.
if name is True or name is None:
return [element for element in generator
if isinstance(element, Tag)]
# Optimization to find all tags with a given name.
elif isinstance(name, basestring):
return [element for element in generator
if isinstance(element, Tag) and element.name == name]
else:
strainer = SoupStrainer(name, attrs, text, **kwargs)
else:
# Build a SoupStrainer
strainer = SoupStrainer(name, attrs, text, **kwargs)
results = ResultSet(strainer)
while True:
try:
i = next(generator)
except StopIteration:
break
if i:
found = strainer.search(i)
if found:
results.append(found)
if limit and len(results) >= limit:
break
return results
#These generators can be used to navigate starting from both
#NavigableStrings and Tags.
@property
def next_elements(self):
i = self.next_element
while i is not None:
yield i
i = i.next_element
@property
def next_siblings(self):
i = self.next_sibling
while i is not None:
yield i
i = i.next_sibling
@property
def previous_elements(self):
i = self.previous_element
while i is not None:
yield i
i = i.previous_element
@property
def previous_siblings(self):
i = self.previous_sibling
while i is not None:
yield i
i = i.previous_sibling
@property
def parents(self):
i = self.parent
while i is not None:
yield i
i = i.parent
# Methods for supporting CSS selectors.
tag_name_re = re.compile('^[a-z0-9]+$')
# /^(\w+)\[(\w+)([=~\|\^\$\*]?)=?"?([^\]"]*)"?\]$/
# \---/ \---/\-------------/ \-------/
# | | | |
# | | | The value
# | | ~,|,^,$,* or =
# | Attribute
# Tag
attribselect_re = re.compile(
r'^(?P<tag>\w+)?\[(?P<attribute>\w+)(?P<operator>[=~\|\^\$\*]?)' +
r'=?"?(?P<value>[^\]"]*)"?\]$'
)
def _attr_value_as_string(self, value, default=None):
"""Force an attribute value into a string representation.
A multi-valued attribute will be converted into a
space-separated stirng.
"""
value = self.get(value, default)
if isinstance(value, list) or isinstance(value, tuple):
value =" ".join(value)
return value
def _tag_name_matches_and(self, function, tag_name):
if not tag_name:
return function
else:
def _match(tag):
return tag.name == tag_name and function(tag)
return _match
def _attribute_checker(self, operator, attribute, value=''):
"""Create a function that performs a CSS selector operation.
Takes an operator, attribute and optional value. Returns a
function that will return True for elements that match that
combination.
"""
if operator == '=':
# string representation of `attribute` is equal to `value`
return lambda el: el._attr_value_as_string(attribute) == value
elif operator == '~':
# space-separated list representation of `attribute`
# contains `value`
def _includes_value(element):
attribute_value = element.get(attribute, [])
if not isinstance(attribute_value, list):
attribute_value = attribute_value.split()
return value in attribute_value
return _includes_value
elif operator == '^':
# string representation of `attribute` starts with `value`
return lambda el: el._attr_value_as_string(
attribute, '').startswith(value)
elif operator == '$':
# string represenation of `attribute` ends with `value`
return lambda el: el._attr_value_as_string(
attribute, '').endswith(value)
elif operator == '*':
# string representation of `attribute` contains `value`
return lambda el: value in el._attr_value_as_string(attribute, '')
elif operator == '|':
# string representation of `attribute` is either exactly
# `value` or starts with `value` and then a dash.
def _is_or_starts_with_dash(element):
attribute_value = element._attr_value_as_string(attribute, '')
return (attribute_value == value or attribute_value.startswith(
value + '-'))
return _is_or_starts_with_dash
else:
return lambda el: el.has_attr(attribute)
# Old non-property versions of the generators, for backwards
# compatibility with BS3.
def nextGenerator(self):
return self.next_elements
def nextSiblingGenerator(self):
return self.next_siblings
def previousGenerator(self):
return self.previous_elements
def previousSiblingGenerator(self):
return self.previous_siblings
def parentGenerator(self):
return self.parents
class NavigableString(unicode, PageElement):
PREFIX = ''
SUFFIX = ''
def __new__(cls, value):
"""Create a new NavigableString.
When unpickling a NavigableString, this method is called with
the string in DEFAULT_OUTPUT_ENCODING. That encoding needs to be
passed in to the superclass's __new__ or the superclass won't know
how to handle non-ASCII characters.
"""
if isinstance(value, unicode):
return unicode.__new__(cls, value)
return unicode.__new__(cls, value, DEFAULT_OUTPUT_ENCODING)
def __copy__(self):
return self
def __getnewargs__(self):
return (unicode(self),)
def __getattr__(self, attr):
"""text.string gives you text. This is for backwards
compatibility for Navigable*String, but for CData* it lets you
get the string without the CData wrapper."""
if attr == 'string':
return self
else:
raise AttributeError(
"'%s' object has no attribute '%s'" % (
self.__class__.__name__, attr))
def output_ready(self, formatter="minimal"):
output = self.format_string(self, formatter)
return self.PREFIX + output + self.SUFFIX
class PreformattedString(NavigableString):
"""A NavigableString not subject to the normal formatting rules.
The string will be passed into the formatter (to trigger side effects),
but the return value will be ignored.
"""
def output_ready(self, formatter="minimal"):
"""CData strings are passed into the formatter.
But the return value is ignored."""
self.format_string(self, formatter)
return self.PREFIX + self + self.SUFFIX
class CData(PreformattedString):
PREFIX = u'<![CDATA['
SUFFIX = u']]>'
class ProcessingInstruction(PreformattedString):
PREFIX = u'<?'
SUFFIX = u'?>'
class Comment(PreformattedString):
PREFIX = u'<!--'
SUFFIX = u'-->'
class Declaration(PreformattedString):
PREFIX = u'<!'
SUFFIX = u'!>'
class Doctype(PreformattedString):
@classmethod
def for_name_and_ids(cls, name, pub_id, system_id):
value = name or ''
if pub_id is not None:
value += ' PUBLIC "%s"' % pub_id
if system_id is not None:
value += ' "%s"' % system_id
elif system_id is not None:
value += ' SYSTEM "%s"' % system_id
return Doctype(value)
PREFIX = u'<!DOCTYPE '
SUFFIX = u'>\n'
class Tag(PageElement):
"""Represents a found HTML tag with its attributes and contents."""
def __init__(self, parser=None, builder=None, name=None, namespace=None,
prefix=None, attrs=None, parent=None, previous=None):
"Basic constructor."
if parser is None:
self.parser_class = None
else:
# We don't actually store the parser object: that lets extracted
# chunks be garbage-collected.
self.parser_class = parser.__class__
if name is None:
raise ValueError("No value provided for new tag's name.")
self.name = name
self.namespace = namespace
self.prefix = prefix
if attrs is None:
attrs = {}
elif builder.cdata_list_attributes:
attrs = builder._replace_cdata_list_attribute_values(
self.name, attrs)
else:
attrs = dict(attrs)
self.attrs = attrs
self.contents = []
self.setup(parent, previous)
self.hidden = False
# Set up any substitutions, such as the charset in a META tag.
if builder is not None:
builder.set_up_substitutions(self)
self.can_be_empty_element = builder.can_be_empty_element(name)
else:
self.can_be_empty_element = False
parserClass = _alias("parser_class") # BS3
@property
def is_empty_element(self):
"""Is this tag an empty-element tag? (aka a self-closing tag)
A tag that has contents is never an empty-element tag.
A tag that has no contents may or may not be an empty-element
tag. It depends on the builder used to create the tag. If the
builder has a designated list of empty-element tags, then only
a tag whose name shows up in that list is considered an
empty-element tag.
If the builder has no designated list of empty-element tags,
then any tag with no contents is an empty-element tag.
"""
return len(self.contents) == 0 and self.can_be_empty_element
isSelfClosing = is_empty_element # BS3
@property
def string(self):
"""Convenience property to get the single string within this tag.
:Return: If this tag has a single string child, return value
is that string. If this tag has no children, or more than one
child, return value is None. If this tag has one child tag,
return value is the 'string' attribute of the child tag,
recursively.
"""
if len(self.contents) != 1:
return None
child = self.contents[0]
if isinstance(child, NavigableString):
return child
return child.string
@string.setter
def string(self, string):
self.clear()
self.append(string.__class__(string))
def _all_strings(self, strip=False, types=(NavigableString, CData)):
"""Yield all strings of certain classes, possibly stripping them.
By default, yields only NavigableString and CData objects. So
no comments, processing instructions, etc.
"""
for descendant in self.descendants:
if (
(types is None and not isinstance(descendant, NavigableString))
or
(types is not None and type(descendant) not in types)):
continue
if strip:
descendant = descendant.strip()
if len(descendant) == 0:
continue
yield descendant
strings = property(_all_strings)
@property
def stripped_strings(self):
for string in self._all_strings(True):
yield string
def get_text(self, separator=u"", strip=False,
types=(NavigableString, CData)):
"""
Get all child strings, concatenated using the given separator.
"""
return separator.join([s for s in self._all_strings(
strip, types=types)])
getText = get_text
text = property(get_text)
def decompose(self):
"""Recursively destroys the contents of this tree."""
self.extract()
i = self
while i is not None:
next = i.next_element
i.__dict__.clear()
i.contents = []
i = next
def clear(self, decompose=False):
"""
Extract all children. If decompose is True, decompose instead.
"""
if decompose:
for element in self.contents[:]:
if isinstance(element, Tag):
element.decompose()
else:
element.extract()
else:
for element in self.contents[:]:
element.extract()
def index(self, element):
"""
Find the index of a child by identity, not value. Avoids issues with
tag.contents.index(element) getting the index of equal elements.
"""
for i, child in enumerate(self.contents):
if child is element:
return i
raise ValueError("Tag.index: element not in tag")
def get(self, key, default=None):
"""Returns the value of the 'key' attribute for the tag, or
the value given for 'default' if it doesn't have that
attribute."""
return self.attrs.get(key, default)
def has_attr(self, key):
return key in self.attrs
def __hash__(self):
return str(self).__hash__()
def __getitem__(self, key):
"""tag[key] returns the value of the 'key' attribute for the tag,
and throws an exception if it's not there."""
return self.attrs[key]
def __iter__(self):
"Iterating over a tag iterates over its contents."
return iter(self.contents)
def __len__(self):
"The length of a tag is the length of its list of contents."
return len(self.contents)
def __contains__(self, x):
return x in self.contents
def __nonzero__(self):
"A tag is non-None even if it has no contents."
return True
def __setitem__(self, key, value):
"""Setting tag[key] sets the value of the 'key' attribute for the
tag."""
self.attrs[key] = value
def __delitem__(self, key):
"Deleting tag[key] deletes all 'key' attributes for the tag."
self.attrs.pop(key, None)
def __call__(self, *args, **kwargs):
"""Calling a tag like a function is the same as calling its
find_all() method. Eg. tag('a') returns a list of all the A tags
found within this tag."""
return self.find_all(*args, **kwargs)
def __getattr__(self, tag):
#print "Getattr %s.%s" % (self.__class__, tag)
if len(tag) > 3 and tag.endswith('Tag'):
# BS3: soup.aTag -> "soup.find("a")
tag_name = tag[:-3]
warnings.warn(
'.%sTag is deprecated, use .find("%s") instead.' % (
tag_name, tag_name))
return self.find(tag_name)
# We special case contents to avoid recursion.
elif not tag.startswith("__") and not tag=="contents":
return self.find(tag)
raise AttributeError(
"'%s' object has no attribute '%s'" % (self.__class__, tag))
def __eq__(self, other):
"""Returns true iff this tag has the same name, the same attributes,
and the same contents (recursively) as the given tag."""
if self is other:
return True
if (not hasattr(other, 'name') or
not hasattr(other, 'attrs') or
not hasattr(other, 'contents') or
self.name != other.name or
self.attrs != other.attrs or
len(self) != len(other)):
return False
for i, my_child in enumerate(self.contents):
if my_child != other.contents[i]:
return False
return True
def __ne__(self, other):
"""Returns true iff this tag is not identical to the other tag,
as defined in __eq__."""
return not self == other
def __repr__(self, encoding=DEFAULT_OUTPUT_ENCODING):
"""Renders this tag as a string."""
return self.encode(encoding)
def __unicode__(self):
return self.decode()
def __str__(self):
return self.encode()
if PY3K:
__str__ = __repr__ = __unicode__
def encode(self, encoding=DEFAULT_OUTPUT_ENCODING,
indent_level=None, formatter="minimal",
errors="xmlcharrefreplace"):
# Turn the data structure into Unicode, then encode the
# Unicode.
u = self.decode(indent_level, encoding, formatter)
return u.encode(encoding, errors)
def _should_pretty_print(self, indent_level):
"""Should this tag be pretty-printed?"""
return (
indent_level is not None and
(self.name not in HTMLAwareEntitySubstitution.preformatted_tags
or self._is_xml))
def decode(self, indent_level=None,
eventual_encoding=DEFAULT_OUTPUT_ENCODING,
formatter="minimal"):
"""Returns a Unicode representation of this tag and its contents.
:param eventual_encoding: The tag is destined to be
encoded into this encoding. This method is _not_
responsible for performing that encoding. This information
is passed in so that it can be substituted in if the
document contains a <META> tag that mentions the document's
encoding.
"""
# First off, turn a string formatter into a function. This
# will stop the lookup from happening over and over again.
if not callable(formatter):
formatter = self._formatter_for_name(formatter)
attrs = []<|fim▁hole|> if self.attrs:
for key, val in sorted(self.attrs.items()):
if val is None:
decoded = key
else:
if isinstance(val, list) or isinstance(val, tuple):
val = ' '.join(val)
elif not isinstance(val, basestring):
val = unicode(val)
elif (
isinstance(val, AttributeValueWithCharsetSubstitution)
and eventual_encoding is not None):
val = val.encode(eventual_encoding)
text = self.format_string(val, formatter)
decoded = (
unicode(key) + '='
+ EntitySubstitution.quoted_attribute_value(text))
attrs.append(decoded)
close = ''
closeTag = ''
prefix = ''
if self.prefix:
prefix = self.prefix + ":"
if self.is_empty_element:
close = '/'
else:
closeTag = '</%s%s>' % (prefix, self.name)
pretty_print = self._should_pretty_print(indent_level)
space = ''
indent_space = ''
if indent_level is not None:
indent_space = (' ' * (indent_level - 1))
if pretty_print:
space = indent_space
indent_contents = indent_level + 1
else:
indent_contents = None
contents = self.decode_contents(
indent_contents, eventual_encoding, formatter)
if self.hidden:
# This is the 'document root' object.
s = contents
else:
s = []
attribute_string = ''
if attrs:
attribute_string = ' ' + ' '.join(attrs)
if indent_level is not None:
# Even if this particular tag is not pretty-printed,
# we should indent up to the start of the tag.
s.append(indent_space)
s.append('<%s%s%s%s>' % (
prefix, self.name, attribute_string, close))
if pretty_print:
s.append("\n")
s.append(contents)
if pretty_print and contents and contents[-1] != "\n":
s.append("\n")
if pretty_print and closeTag:
s.append(space)
s.append(closeTag)
if indent_level is not None and closeTag and self.next_sibling:
# Even if this particular tag is not pretty-printed,
# we're now done with the tag, and we should add a
# newline if appropriate.
s.append("\n")
s = ''.join(s)
return s
def prettify(self, encoding=None, formatter="minimal"):
if encoding is None:
return self.decode(True, formatter=formatter)
else:
return self.encode(encoding, True, formatter=formatter)
def decode_contents(self, indent_level=None,
eventual_encoding=DEFAULT_OUTPUT_ENCODING,
formatter="minimal"):
"""Renders the contents of this tag as a Unicode string.
:param eventual_encoding: The tag is destined to be
encoded into this encoding. This method is _not_
responsible for performing that encoding. This information
is passed in so that it can be substituted in if the
document contains a <META> tag that mentions the document's
encoding.
"""
# First off, turn a string formatter into a function. This
# will stop the lookup from happening over and over again.
if not callable(formatter):
formatter = self._formatter_for_name(formatter)
pretty_print = (indent_level is not None)
s = []
for c in self:
text = None
if isinstance(c, NavigableString):
text = c.output_ready(formatter)
elif isinstance(c, Tag):
s.append(c.decode(indent_level, eventual_encoding,
formatter))
if text and indent_level and not self.name == 'pre':
text = text.strip()
if text:
if pretty_print and not self.name == 'pre':
s.append(" " * (indent_level - 1))
s.append(text)
if pretty_print and not self.name == 'pre':
s.append("\n")
return ''.join(s)
def encode_contents(
self, indent_level=None, encoding=DEFAULT_OUTPUT_ENCODING,
formatter="minimal"):
"""Renders the contents of this tag as a bytestring."""
contents = self.decode_contents(indent_level, encoding, formatter)
return contents.encode(encoding)
# Old method for BS3 compatibility
def renderContents(self, encoding=DEFAULT_OUTPUT_ENCODING,
prettyPrint=False, indentLevel=0):
if not prettyPrint:
indentLevel = None
return self.encode_contents(
indent_level=indentLevel, encoding=encoding)
#Soup methods
def find(self, name=None, attrs={}, recursive=True, text=None,
**kwargs):
"""Return only the first child of this Tag matching the given
criteria."""
r = None
l = self.find_all(name, attrs, recursive, text, 1, **kwargs)
if l:
r = l[0]
return r
findChild = find
def find_all(self, name=None, attrs={}, recursive=True, text=None,
limit=None, **kwargs):
"""Extracts a list of Tag objects that match the given
criteria. You can specify the name of the Tag and any
attributes you want the Tag to have.
The value of a key-value pair in the 'attrs' map can be a
string, a list of strings, a regular expression object, or a
callable that takes a string and returns whether or not the
string matches for some custom definition of 'matches'. The
same is true of the tag name."""
generator = self.descendants
if not recursive:
generator = self.children
return self._find_all(name, attrs, text, limit, generator, **kwargs)
findAll = find_all # BS3
findChildren = find_all # BS2
#Generator methods
@property
def children(self):
# return iter() to make the purpose of the method clear
return iter(self.contents) # XXX This seems to be untested.
@property
def descendants(self):
if not len(self.contents):
return
stopNode = self._last_descendant().next_element
current = self.contents[0]
while current is not stopNode:
yield current
current = current.next_element
# CSS selector code
_selector_combinators = ['>', '+', '~']
_select_debug = False
def select(self, selector, _candidate_generator=None):
"""Perform a CSS selection operation on the current element."""
tokens = selector.split()
current_context = [self]
if tokens[-1] in self._selector_combinators:
raise ValueError(
'Final combinator "%s" is missing an argument.' % tokens[-1])
if self._select_debug:
print 'Running CSS selector "%s"' % selector
for index, token in enumerate(tokens):
if self._select_debug:
print ' Considering token "%s"' % token
recursive_candidate_generator = None
tag_name = None
if tokens[index-1] in self._selector_combinators:
# This token was consumed by the previous combinator. Skip it.
if self._select_debug:
print ' Token was consumed by the previous combinator.'
continue
# Each operation corresponds to a checker function, a rule
# for determining whether a candidate matches the
# selector. Candidates are generated by the active
# iterator.
checker = None
m = self.attribselect_re.match(token)
if m is not None:
# Attribute selector
tag_name, attribute, operator, value = m.groups()
checker = self._attribute_checker(operator, attribute, value)
elif '#' in token:
# ID selector
tag_name, tag_id = token.split('#', 1)
def id_matches(tag):
return tag.get('id', None) == tag_id
checker = id_matches
elif '.' in token:
# Class selector
tag_name, klass = token.split('.', 1)
classes = set(klass.split('.'))
def classes_match(candidate):
return classes.issubset(candidate.get('class', []))
checker = classes_match
elif ':' in token:
# Pseudo-class
tag_name, pseudo = token.split(':', 1)
if tag_name == '':
continue
raise ValueError(
"A pseudo-class must be prefixed with a tag name.")
pseudo_attributes = re.match('([a-zA-Z\d-]+)\(([a-zA-Z\d]+)\)', pseudo)
found = []
if pseudo_attributes is not None:
pseudo_type, pseudo_value = pseudo_attributes.groups()
if pseudo_type == 'nth-of-type':
try:
pseudo_value = int(pseudo_value)
except:
continue
raise NotImplementedError(
'Only numeric values are currently supported for the nth-of-type pseudo-class.')
if pseudo_value < 1:
continue
raise ValueError(
'nth-of-type pseudo-class value must be at least 1.')
class Counter(object):
def __init__(self, destination):
self.count = 0
self.destination = destination
def nth_child_of_type(self, tag):
self.count += 1
if self.count == self.destination:
return True
if self.count > self.destination:
# Stop the generator that's sending us
# these things.
raise StopIteration()
return False
checker = Counter(pseudo_value).nth_child_of_type
else:
continue
raise NotImplementedError(
'Only the following pseudo-classes are implemented: nth-of-type.')
elif token == '*':
# Star selector -- matches everything
pass
elif token == '>':
# Run the next token as a CSS selector against the
# direct children of each tag in the current context.
recursive_candidate_generator = lambda tag: tag.children
elif token == '~':
# Run the next token as a CSS selector against the
# siblings of each tag in the current context.
recursive_candidate_generator = lambda tag: tag.next_siblings
elif token == '+':
# For each tag in the current context, run the next
# token as a CSS selector against the tag's next
# sibling that's a tag.
def next_tag_sibling(tag):
yield tag.find_next_sibling(True)
recursive_candidate_generator = next_tag_sibling
elif self.tag_name_re.match(token):
# Just a tag name.
tag_name = token
else:
continue
raise ValueError(
'Unsupported or invalid CSS selector: "%s"' % token)
if recursive_candidate_generator:
# This happens when the selector looks like "> foo".
#
# The generator calls select() recursively on every
# member of the current context, passing in a different
# candidate generator and a different selector.
#
# In the case of "> foo", the candidate generator is
# one that yields a tag's direct children (">"), and
# the selector is "foo".
next_token = tokens[index+1]
def recursive_select(tag):
if self._select_debug:
print ' Calling select("%s") recursively on %s %s' % (next_token, tag.name, tag.attrs)
print '-' * 40
for i in tag.select(next_token, recursive_candidate_generator):
if self._select_debug:
print '(Recursive select picked up candidate %s %s)' % (i.name, i.attrs)
yield i
if self._select_debug:
print '-' * 40
_use_candidate_generator = recursive_select
elif _candidate_generator is None:
# By default, a tag's candidates are all of its
# children. If tag_name is defined, only yield tags
# with that name.
if self._select_debug:
if tag_name:
check = "[any]"
else:
check = tag_name
print ' Default candidate generator, tag name="%s"' % check
if self._select_debug:
# This is redundant with later code, but it stops
# a bunch of bogus tags from cluttering up the
# debug log.
def default_candidate_generator(tag):
for child in tag.descendants:
if not isinstance(child, Tag):
continue
if tag_name and not child.name == tag_name:
continue
yield child
_use_candidate_generator = default_candidate_generator
else:
_use_candidate_generator = lambda tag: tag.descendants
else:
_use_candidate_generator = _candidate_generator
new_context = []
new_context_ids = set([])
for tag in current_context:
if self._select_debug:
print " Running candidate generator on %s %s" % (
tag.name, repr(tag.attrs))
for candidate in _use_candidate_generator(tag):
if not isinstance(candidate, Tag):
continue
if tag_name and candidate.name != tag_name:
continue
if checker is not None:
try:
result = checker(candidate)
except StopIteration:
# The checker has decided we should no longer
# run the generator.
break
if checker is None or result:
if self._select_debug:
print " SUCCESS %s %s" % (candidate.name, repr(candidate.attrs))
if id(candidate) not in new_context_ids:
# If a tag matches a selector more than once,
# don't include it in the context more than once.
new_context.append(candidate)
new_context_ids.add(id(candidate))
elif self._select_debug:
print " FAILURE %s %s" % (candidate.name, repr(candidate.attrs))
current_context = new_context
if self._select_debug:
print "Final verdict:"
for i in current_context:
print " %s %s" % (i.name, i.attrs)
return current_context
# Old names for backwards compatibility
def childGenerator(self):
return self.children
def recursiveChildGenerator(self):
return self.descendants
def has_key(self, key):
"""This was kind of misleading because has_key() (attributes)
was different from __in__ (contents). has_key() is gone in
Python 3, anyway."""
warnings.warn('has_key is deprecated. Use has_attr("%s") instead.' % (
key))
return self.has_attr(key)
# Next, a couple classes to represent queries and their results.
class SoupStrainer(object):
"""Encapsulates a number of ways of matching a markup element (tag or
text)."""
def __init__(self, name=None, attrs={}, text=None, **kwargs):
self.name = self._normalize_search_value(name)
if not isinstance(attrs, dict):
# Treat a non-dict value for attrs as a search for the 'class'
# attribute.
kwargs['class'] = attrs
attrs = None
if 'class_' in kwargs:
# Treat class_="foo" as a search for the 'class'
# attribute, overriding any non-dict value for attrs.
kwargs['class'] = kwargs['class_']
del kwargs['class_']
if kwargs:
if attrs:
attrs = attrs.copy()
attrs.update(kwargs)
else:
attrs = kwargs
normalized_attrs = {}
for key, value in attrs.items():
normalized_attrs[key] = self._normalize_search_value(value)
self.attrs = normalized_attrs
self.text = self._normalize_search_value(text)
def _normalize_search_value(self, value):
# Leave it alone if it's a Unicode string, a callable, a
# regular expression, a boolean, or None.
if (isinstance(value, unicode) or callable(value) or hasattr(value, 'match')
or isinstance(value, bool) or value is None):
return value
# If it's a bytestring, convert it to Unicode, treating it as UTF-8.
if isinstance(value, bytes):
return value.decode("utf8")
# If it's listlike, convert it into a list of strings.
if hasattr(value, '__iter__'):
new_value = []
for v in value:
if (hasattr(v, '__iter__') and not isinstance(v, bytes)
and not isinstance(v, unicode)):
# This is almost certainly the user's mistake. In the
# interests of avoiding infinite loops, we'll let
# it through as-is rather than doing a recursive call.
new_value.append(v)
else:
new_value.append(self._normalize_search_value(v))
return new_value
# Otherwise, convert it into a Unicode string.
# The unicode(str()) thing is so this will do the same thing on Python 2
# and Python 3.
return unicode(str(value))
def __str__(self):
if self.text:
return self.text
else:
return "%s|%s" % (self.name, self.attrs)
def search_tag(self, markup_name=None, markup_attrs={}):
found = None
markup = None
if isinstance(markup_name, Tag):
markup = markup_name
markup_attrs = markup
call_function_with_tag_data = (
isinstance(self.name, collections.Callable)
and not isinstance(markup_name, Tag))
if ((not self.name)
or call_function_with_tag_data
or (markup and self._matches(markup, self.name))
or (not markup and self._matches(markup_name, self.name))):
if call_function_with_tag_data:
match = self.name(markup_name, markup_attrs)
else:
match = True
markup_attr_map = None
for attr, match_against in list(self.attrs.items()):
if not markup_attr_map:
if hasattr(markup_attrs, 'get'):
markup_attr_map = markup_attrs
else:
markup_attr_map = {}
for k, v in markup_attrs:
markup_attr_map[k] = v
attr_value = markup_attr_map.get(attr)
if not self._matches(attr_value, match_against):
match = False
break
if match:
if markup:
found = markup
else:
found = markup_name
if found and self.text and not self._matches(found.string, self.text):
found = None
return found
searchTag = search_tag
def search(self, markup):
# print 'looking for %s in %s' % (self, markup)
found = None
# If given a list of items, scan it for a text element that
# matches.
if hasattr(markup, '__iter__') and not isinstance(markup, (Tag, basestring)):
for element in markup:
if isinstance(element, NavigableString) \
and self.search(element):
found = element
break
# If it's a Tag, make sure its name or attributes match.
# Don't bother with Tags if we're searching for text.
elif isinstance(markup, Tag):
if not self.text or self.name or self.attrs:
found = self.search_tag(markup)
# If it's text, make sure the text matches.
elif isinstance(markup, NavigableString) or \
isinstance(markup, basestring):
if not self.name and not self.attrs and self._matches(markup, self.text):
found = markup
else:
raise Exception(
"I don't know how to match against a %s" % markup.__class__)
return found
def _matches(self, markup, match_against):
# print u"Matching %s against %s" % (markup, match_against)
result = False
if isinstance(markup, list) or isinstance(markup, tuple):
# This should only happen when searching a multi-valued attribute
# like 'class'.
if (isinstance(match_against, unicode)
and ' ' in match_against):
# A bit of a special case. If they try to match "foo
# bar" on a multivalue attribute's value, only accept
# the literal value "foo bar"
#
# XXX This is going to be pretty slow because we keep
# splitting match_against. But it shouldn't come up
# too often.
return (whitespace_re.split(match_against) == markup)
else:
for item in markup:
if self._matches(item, match_against):
return True
return False
if match_against is True:
# True matches any non-None value.
return markup is not None
if isinstance(match_against, collections.Callable):
return match_against(markup)
# Custom callables take the tag as an argument, but all
# other ways of matching match the tag name as a string.
if isinstance(markup, Tag):
markup = markup.name
# Ensure that `markup` is either a Unicode string, or None.
markup = self._normalize_search_value(markup)
if markup is None:
# None matches None, False, an empty string, an empty list, and so on.
return not match_against
if isinstance(match_against, unicode):
# Exact string match
return markup == match_against
if hasattr(match_against, 'match'):
# Regexp match
return match_against.search(markup)
if hasattr(match_against, '__iter__'):
# The markup must be an exact match against something
# in the iterable.
return markup in match_against
class ResultSet(list):
"""A ResultSet is just a list that keeps track of the SoupStrainer
that created it."""
def __init__(self, source):
list.__init__([])
self.source = source<|fim▁end|> | |
<|file_name|>imp.go<|end_file_name|><|fim▁begin|>package openrtb_ext
// ExtImp defines the contract for bidrequest.imp[i].ext
type ExtImp struct {
Prebid *ExtImpPrebid `json:"prebid"`
Appnexus *ExtImpAppnexus `json:"appnexus"`
Rubicon *ExtImpRubicon `json:"rubicon"`
}
// ExtImpPrebid defines the contract for bidrequest.imp[i].ext.prebid
type ExtImpPrebid struct {
StoredRequest *ExtStoredRequest `json:"storedrequest"`
}<|fim▁hole|>
// ExtStoredRequest defines the contract for bidrequest.imp[i].ext.prebid.storedrequest
type ExtStoredRequest struct {
ID string `json:"id"`
}<|fim▁end|> | |
<|file_name|>makepath.cpp<|end_file_name|><|fim▁begin|>// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
/***
*makepath.c - create path name from components
*
*
*Purpose:
* To provide support for creation of full path names from components
*
*******************************************************************************/
#include "stdafx.h"
#include "winwrap.h"
#include "utilcode.h"
#include "ex.h"
/***
*void Makepath() - build path name from components
*
*Purpose:
* create a path name from its individual components
*
*Entry:
* CQuickWSTR &szPath - Buffer for constructed path
* WCHAR *drive - pointer to drive component, may or may not contain
* trailing ':'
* WCHAR *dir - pointer to subdirectory component, may or may not include
* leading and/or trailing '/' or '\' characters
* WCHAR *fname - pointer to file base name component
* WCHAR *ext - pointer to extension component, may or may not contain
* a leading '.'.
*
*Exit:
* path - pointer to constructed path name
*
*Exceptions:
*
*******************************************************************************/
void MakePath (
__out CQuickWSTR &szPath,
__in LPCWSTR drive,
__in LPCWSTR dir,
__in LPCWSTR fname,
__in LPCWSTR ext
)
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
}
CONTRACTL_END
SIZE_T maxCount = 4 // Possible separators between components, plus null terminator
+ (drive != nullptr ? 2 : 0)
+ (dir != nullptr ? wcslen(dir) : 0)
+ (fname != nullptr ? wcslen(fname) : 0)
+ (ext != nullptr ? wcslen(ext) : 0);
LPWSTR path = szPath.AllocNoThrow(maxCount);
const WCHAR *p;
DWORD count = 0;
/* we assume that the arguments are in the following form (although we
* do not diagnose invalid arguments or illegal filenames (such as
* names longer than 8.3 or with illegal characters in them)
*
* drive:
* A ; or
* A:
* dir:
* \top\next\last\ ; or
* /top/next/last/ ; or
* either of the above forms with either/both the leading
* and trailing / or \ removed. Mixed use of '/' and '\' is
* also tolerated
* fname:
* any valid file name
* ext:
* any valid extension (none if empty or null )
*/
/* copy drive */
if (drive && *drive) {
*path++ = *drive;
*path++ = _T(':');
count += 2;
}
/* copy dir */
if ((p = dir)) {
while (*p) {
*path++ = *p++;
count++;
_ASSERTE(count < maxCount);
}
#ifdef _MBCS
if (*(p=_mbsdec(dir,p)) != _T('/') && *p != _T('\\')) {
#else /* _MBCS */
// suppress warning for the following line; this is safe but would require significant code
// delta for prefast to understand.
#ifdef _PREFAST_
#pragma warning( suppress: 26001 )
#endif
if (*(p-1) != _T('/') && *(p-1) != _T('\\')) {
#endif /* _MBCS */
*path++ = _T('\\');
count++;
_ASSERTE(count < maxCount);
}
}
/* copy fname */
if ((p = fname)) {
while (*p) {
*path++ = *p++;
count++;
_ASSERTE(count < maxCount);
}
}
/* copy ext, including 0-terminator - check to see if a '.' needs
* to be inserted.
*/
if ((p = ext)) {
if (*p && *p != _T('.')) {
*path++ = _T('.');
count++;
_ASSERTE(count < maxCount);
}
while ((*path++ = *p++)) {
count++;
_ASSERTE(count < maxCount);<|fim▁hole|> }
}
else {
/* better add the 0-terminator */
*path = _T('\0');
}
szPath.Shrink(count + 1);
}
// Returns the directory for HMODULE. So, if HMODULE was for "C:\Dir1\Dir2\Filename.DLL",
// then this would return "C:\Dir1\Dir2\" (note the trailing backslash).
HRESULT GetHModuleDirectory(
__in HMODULE hMod,
SString& wszPath)
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
CANNOT_TAKE_LOCK;
}
CONTRACTL_END;
DWORD dwRet = WszGetModuleFileName(hMod, wszPath);
if (dwRet == 0)
{ // Some other error.
return HRESULT_FROM_GetLastError();
}
CopySystemDirectory(wszPath, wszPath);
return S_OK;
}
//
// Returns path name from a file name.
// Example: For input "C:\Windows\System.dll" returns "C:\Windows\".
// Warning: The input file name string might be destroyed.
//
// Arguments:
// pPathString - [in] SString with file name
//
// pBuffer - [out] SString .
//
// Return Value:
// S_OK - Output buffer contains path name.
// other errors - If Sstring throws.
//
HRESULT CopySystemDirectory(const SString& pPathString,
SString& pbuffer)
{
HRESULT hr = S_OK;
EX_TRY
{
pbuffer.Set(pPathString);
SString::Iterator iter = pbuffer.End();
if (pbuffer.FindBack(iter,DIRECTORY_SEPARATOR_CHAR_W))
{
iter++;
pbuffer.Truncate(iter);
}
else
{
hr = E_UNEXPECTED;
}
}
EX_CATCH_HRESULT(hr);
return hr;
}<|fim▁end|> | |
<|file_name|>foldtest.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
from __future__ import division
from __future__ import print_function
from builtins import input
from builtins import range
from past.utils import old_div
import sys
import numpy
import matplotlib
if matplotlib.get_backend() != "TKAgg":
matplotlib.use("TKAgg")
import pylab
import pmagpy.pmag as pmag
import pmagpy.pmagplotlib as pmagplotlib
def main():
"""
NAME
foldtest.py
DESCRIPTION
does a fold test (Tauxe, 2010) on data
INPUT FORMAT
dec inc dip_direction dip
SYNTAX
foldtest.py [command line options]
OPTIONS
-h prints help message and quits
-f FILE file with input data
-F FILE for confidence bounds on fold test
-u ANGLE (circular standard deviation) for uncertainty on bedding poles
-b MIN MAX bounds for quick search of percent untilting [default is -10 to 150%]
-n NB number of bootstrap samples [default is 1000]
-fmt FMT, specify format - default is svg
-sav save figures and quit
INPUT FILE
Dec Inc Dip_Direction Dip in space delimited file
OUTPUT PLOTS
Geographic: is an equal area projection of the input data in
original coordinates
Stratigraphic: is an equal area projection of the input data in
tilt adjusted coordinates
% Untilting: The dashed (red) curves are representative plots of
maximum eigenvalue (tau_1) as a function of untilting
The solid line is the cumulative distribution of the
% Untilting required to maximize tau for all the
bootstrapped data sets. The dashed vertical lines
are 95% confidence bounds on the % untilting that yields
the most clustered result (maximum tau_1).
Command line: prints out the bootstrapped iterations and
finally the confidence bounds on optimum untilting.
If the 95% conf bounds include 0, then a post-tilt magnetization is indicated
If the 95% conf bounds include 100, then a pre-tilt magnetization is indicated
If the 95% conf bounds exclude both 0 and 100, syn-tilt magnetization is
possible as is vertical axis rotation or other pathologies
Geographic: is an equal area projection of the input data in
OPTIONAL OUTPUT FILE:
The output file has the % untilting within the 95% confidence bounds
nd the number of bootstrap samples
"""
kappa=0
fmt,plot='svg',0
nb=1000 # number of bootstraps
min,max=-10,150
if '-h' in sys.argv: # check if help is needed
print(main.__doc__)
sys.exit() # graceful quit
if '-F' in sys.argv:
ind=sys.argv.index('-F')
outfile=open(sys.argv[ind+1],'w')
else:
outfile=""
if '-f' in sys.argv:
ind=sys.argv.index('-f')
file=sys.argv[ind+1]
DIDDs=numpy.loadtxt(file)
else:
print(main.__doc__)
sys.exit()
if '-fmt' in sys.argv:
ind=sys.argv.index('-fmt')
fmt=sys.argv[ind+1]
if '-sav' in sys.argv:plot=1
if '-b' in sys.argv:
ind=sys.argv.index('-b')
min=int(sys.argv[ind+1])
max=int(sys.argv[ind+2])
if '-n' in sys.argv:
ind=sys.argv.index('-n')
nb=int(sys.argv[ind+1])
if '-u' in sys.argv:
ind=sys.argv.index('-u')
csd=float(sys.argv[ind+1])
kappa=(old_div(81.,csd))**2
#
# get to work
#
PLTS={'geo':1,'strat':2,'taus':3} # make plot dictionary
pmagplotlib.plot_init(PLTS['geo'],5,5)
pmagplotlib.plot_init(PLTS['strat'],5,5)
pmagplotlib.plot_init(PLTS['taus'],5,5)
pmagplotlib.plotEQ(PLTS['geo'],DIDDs,'Geographic')
D,I=pmag.dotilt_V(DIDDs)
TCs=numpy.array([D,I]).transpose()
pmagplotlib.plotEQ(PLTS['strat'],TCs,'Stratigraphic')<|fim▁hole|> if plot==0:pmagplotlib.drawFIGS(PLTS)
Percs=list(range(min,max))
Cdf,Untilt=[],[]
pylab.figure(num=PLTS['taus'])
print('doing ',nb,' iterations...please be patient.....')
for n in range(nb): # do bootstrap data sets - plot first 25 as dashed red line
if n%50==0:print(n)
Taus=[] # set up lists for taus
PDs=pmag.pseudo(DIDDs)
if kappa!=0:
for k in range(len(PDs)):
d,i=pmag.fshdev(kappa)
dipdir,dip=pmag.dodirot(d,i,PDs[k][2],PDs[k][3])
PDs[k][2]=dipdir
PDs[k][3]=dip
for perc in Percs:
tilt=numpy.array([1.,1.,1.,0.01*perc])
D,I=pmag.dotilt_V(PDs*tilt)
TCs=numpy.array([D,I]).transpose()
ppars=pmag.doprinc(TCs) # get principal directions
Taus.append(ppars['tau1'])
if n<25:pylab.plot(Percs,Taus,'r--')
Untilt.append(Percs[Taus.index(numpy.max(Taus))]) # tilt that gives maximum tau
Cdf.append(old_div(float(n),float(nb)))
pylab.plot(Percs,Taus,'k')
pylab.xlabel('% Untilting')
pylab.ylabel('tau_1 (red), CDF (green)')
Untilt.sort() # now for CDF of tilt of maximum tau
pylab.plot(Untilt,Cdf,'g')
lower=int(.025*nb)
upper=int(.975*nb)
pylab.axvline(x=Untilt[lower],ymin=0,ymax=1,linewidth=1,linestyle='--')
pylab.axvline(x=Untilt[upper],ymin=0,ymax=1,linewidth=1,linestyle='--')
tit= '%i - %i %s'%(Untilt[lower],Untilt[upper],'Percent Unfolding')
print(tit)
print('range of all bootstrap samples: ', Untilt[0], ' - ', Untilt[-1])
pylab.title(tit)
outstring= '%i - %i; %i\n'%(Untilt[lower],Untilt[upper],nb)
if outfile!="":outfile.write(outstring)
files={}
for key in list(PLTS.keys()):
files[key]=('foldtest_'+'%s'%(key.strip()[:2])+'.'+fmt)
if plot==0:
pmagplotlib.drawFIGS(PLTS)
ans= input('S[a]ve all figures, <Return> to quit ')
if ans!='a':
print("Good bye")
sys.exit()
pmagplotlib.saveP(PLTS,files)
main()<|fim▁end|> | |
<|file_name|>root.ts<|end_file_name|><|fim▁begin|>import { AnyAction, combineReducers } from 'redux';
import { CleanUp, cleanUpAction } from '../actions/cleanUp';
import sharedReducers from 'app/core/reducers';
import alertingReducers from 'app/features/alerting/state/reducers';
import teamsReducers from 'app/features/teams/state/reducers';
import apiKeysReducers from 'app/features/api-keys/state/reducers';
import foldersReducers from 'app/features/folders/state/reducers';
import dashboardReducers from 'app/features/dashboard/state/reducers';
import exploreReducers from 'app/features/explore/state/main';
import { reducer as pluginsReducer } from 'app/features/plugins/admin/state/reducer';
import dataSourcesReducers from 'app/features/datasources/state/reducers';
import usersReducers from 'app/features/users/state/reducers';
import invitesReducers from 'app/features/invites/state/reducers';
import userReducers from 'app/features/profile/state/reducers';
import organizationReducers from 'app/features/org/state/reducers';
import ldapReducers from 'app/features/admin/state/reducers';
import importDashboardReducers from 'app/features/manage-dashboards/state/reducers';
import panelEditorReducers from 'app/features/dashboard/components/PanelEditor/state/reducers';
import panelsReducers from 'app/features/panel/state/reducers';
import serviceAccountsReducer from 'app/features/serviceaccounts/state/reducers';
import templatingReducers from 'app/features/variables/state/keyedVariablesReducer';
import searchPageReducers from 'app/features/search/page/state/reducers';
const rootReducers = {
...sharedReducers,
...alertingReducers,
...teamsReducers,
...apiKeysReducers,
...foldersReducers,
...dashboardReducers,
...exploreReducers,
...dataSourcesReducers,
...usersReducers,
...serviceAccountsReducer,
...userReducers,
...invitesReducers,
...organizationReducers,
...ldapReducers,
...importDashboardReducers,
...panelEditorReducers,
...panelsReducers,
...templatingReducers,
...searchPageReducers,
plugins: pluginsReducer,
};
const addedReducers = {};
export const addReducer = (newReducers: any) => {
Object.assign(addedReducers, newReducers);
};
export const createRootReducer = () => {
const appReducer = combineReducers({
...rootReducers,
...addedReducers,
});<|fim▁hole|> return appReducer(state, action);
}
const { stateSelector } = action.payload as CleanUp<any>;
const stateSlice = stateSelector(state);
recursiveCleanState(state, stateSlice);
return appReducer(state, action);
};
};
export const recursiveCleanState = (state: any, stateSlice: any): boolean => {
for (const stateKey in state) {
if (!state.hasOwnProperty(stateKey)) {
continue;
}
const slice = state[stateKey];
if (slice === stateSlice) {
state[stateKey] = undefined;
return true;
}
if (typeof slice === 'object') {
const cleaned = recursiveCleanState(slice, stateSlice);
if (cleaned) {
return true;
}
}
}
return false;
};<|fim▁end|> |
return (state: any, action: AnyAction) => {
if (action.type !== cleanUpAction.type) { |
<|file_name|>setup.py<|end_file_name|><|fim▁begin|>import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
with open('requirements.txt') as f:
requirements = f.read().splitlines()
with open('cli-requirements.txt') as f:
cli_requirements = f.read().splitlines()
setuptools.setup(
name="uwg",
use_scm_version=True,
setup_requires=['setuptools_scm'],
author="Ladybug Tools",
author_email="[email protected]",
description="Python application for modeling the urban heat island effect.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/ladybug-tools/uwg",
packages=setuptools.find_packages(exclude=["tests*", "resources*"]),
include_package_data=True,
install_requires=requirements,
extras_require={
'cli': cli_requirements
},
entry_points={<|fim▁hole|> "console_scripts": ["uwg = uwg.cli:main"]
},
classifiers=[
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: Implementation :: CPython",
"License :: OSI Approved :: GNU General Public License v3 (GPLv3)",
"Operating System :: OS Independent"
],
)<|fim▁end|> | |
<|file_name|>factory.py<|end_file_name|><|fim▁begin|><|fim▁hole|>from robotics.robots.aizek_robot import AizekRobot
from robotics.sensors.redbot_wheel_encoder_sensor import RedbotWheelEncoderSensor
from robotics.sensors.sharp_ir_distance_sensor import SharpIrDistanceSensor
class RobotFactory(object):
@staticmethod
def createAizekRobot():
gpio.setmode(gpio.BOARD)
lmotor = RedbotMotorActor(gpio, 8, 10, 12)
rmotor = RedbotMotorActor(gpio, 11, 13, 15)
spi = MCP3008SpiInterface(0)
wencoder = RedbotWheelEncoderSensor(spi)
lsensor = SharpIrDistanceSensor(spi, 5)
fsensor = SharpIrDistanceSensor(spi, 4)
rsensor = SharpIrDistanceSensor(spi, 3)
wheel_radius = 0.032
wheel_distance = 0.1
robot = AizekRobot(
left_motor=lmotor,
right_motor=rmotor,
wheel_encoder=wencoder,
left_distance_sensor=lsensor,
front_distance_sensor=fsensor,
right_distance_sensor=rsensor,
wheel_radius=wheel_radius,
wheel_distance=wheel_distance,
)
return robot<|fim▁end|> | from RPi import GPIO as gpio
from robotics.actors.redbot_motor_actor import RedbotMotorActor
from robotics.interfaces.spi.mcp3008_spi_interface import MCP3008SpiInterface |
<|file_name|>notosanssylotinagri_regular.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
class Charset(object):<|fim▁hole|> native_name = ''
def glyphs(self):
glyphs = []
glyphs.append(0x0039) #glyph00057
glyphs.append(0x0034) #uniA82A
glyphs.append(0x0035) #uniA82B
glyphs.append(0x0036) #glyph00054
glyphs.append(0x0040) #glyph00064
glyphs.append(0x0053) #uni09EE
glyphs.append(0x0038) #glyph00056
glyphs.append(0x0015) #uniA80B
glyphs.append(0x0016) #uniA80C
glyphs.append(0x003D) #glyph00061
glyphs.append(0x0014) #uniA80A
glyphs.append(0x0019) #uniA80F
glyphs.append(0x0037) #glyph00055
glyphs.append(0x0017) #uniA80D
glyphs.append(0x0018) #uniA80E
glyphs.append(0x0032) #uniA828
glyphs.append(0x0001) #uniFEFF
glyphs.append(0x004D) #uni09E8
glyphs.append(0x0054) #uni09EF
glyphs.append(0x0048) #uni2055
glyphs.append(0x0050) #uni09EB
glyphs.append(0x0002) #uni000D
glyphs.append(0x0051) #uni09EC
glyphs.append(0x0052) #uni09ED
glyphs.append(0x002C) #uniA822
glyphs.append(0x0049) #uni0964
glyphs.append(0x004A) #uni0965
glyphs.append(0x003E) #glyph00062
glyphs.append(0x0042) #glyph00066
glyphs.append(0x002D) #uniA823
glyphs.append(0x0023) #uniA819
glyphs.append(0x0022) #uniA818
glyphs.append(0x0033) #uniA829
glyphs.append(0x0043) #glyph00067
glyphs.append(0x001F) #uniA815
glyphs.append(0x001E) #uniA814
glyphs.append(0x0021) #uniA817
glyphs.append(0x0020) #uniA816
glyphs.append(0x001B) #uniA811
glyphs.append(0x001A) #uniA810
glyphs.append(0x001D) #uniA813
glyphs.append(0x001C) #uniA812
glyphs.append(0x0047) #glyph00071
glyphs.append(0x0041) #glyph00065
glyphs.append(0x004C) #uni09E7
glyphs.append(0x0044) #glyph00068
glyphs.append(0x0045) #glyph00069
glyphs.append(0x0028) #uniA81E
glyphs.append(0x0027) #uniA81D
glyphs.append(0x0003) #uni00A0
glyphs.append(0x0029) #uniA81F
glyphs.append(0x0024) #uniA81A
glyphs.append(0x003F) #glyph00063
glyphs.append(0x0026) #uniA81C
glyphs.append(0x0025) #uniA81B
glyphs.append(0x0005) #uni200C
glyphs.append(0x0004) #uni200B
glyphs.append(0x003B) #glyph00059
glyphs.append(0x0006) #uni200D
glyphs.append(0x003A) #glyph00058
glyphs.append(0x004E) #uni09E9
glyphs.append(0x002F) #uniA825
glyphs.append(0x0007) #uni2010
glyphs.append(0x0008) #uni2011
glyphs.append(0x004B) #uni09E6
glyphs.append(0x0009) #uni25CC
glyphs.append(0x004F) #uni09EA
glyphs.append(0x003C) #glyph00060
glyphs.append(0x0046) #glyph00070
glyphs.append(0x002A) #uniA820
glyphs.append(0x002B) #uniA821
glyphs.append(0x0012) #uniA808
glyphs.append(0x0013) #uniA809
glyphs.append(0x002E) #uniA824
glyphs.append(0x0000) #.notdef
glyphs.append(0x0030) #uniA826
glyphs.append(0x0031) #uniA827
glyphs.append(0x000C) #uniA802
glyphs.append(0x000D) #uniA803
glyphs.append(0x000A) #uniA800
glyphs.append(0x000B) #uniA801
glyphs.append(0x0010) #uniA806
glyphs.append(0x0011) #uniA807
glyphs.append(0x000E) #uniA804
glyphs.append(0x000F) #uniA805
return glyphs<|fim▁end|> | common_name = 'NotoSansSylotiNagri-Regular' |
<|file_name|>setup.py<|end_file_name|><|fim▁begin|>from setuptools import setup, find_packages
setup(name='MODEL1310110042',
version=20140916,
description='MODEL1310110042 from BioModels',
url='http://www.ebi.ac.uk/biomodels-main/MODEL1310110042',
maintainer='Stanley Gu',
maintainer_url='[email protected]',<|fim▁hole|> package_data={'': ['*.xml', 'README.md']},
)<|fim▁end|> | packages=find_packages(), |
<|file_name|>DefaultOkHttpResourceExtractor.java<|end_file_name|><|fim▁begin|>/*
* Copyright 1999-2020 Alibaba Group Holding Ltd.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,<|fim▁hole|> * See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.alibaba.csp.sentinel.adapter.okhttp.extractor;
import okhttp3.Connection;
import okhttp3.Request;
/**
* @author zhaoyuguang
*/
public class DefaultOkHttpResourceExtractor implements OkHttpResourceExtractor {
@Override
public String extract(Request request, Connection connection) {
return request.method() + ":" + request.url().toString();
}
}<|fim▁end|> | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
<|file_name|>types.ts<|end_file_name|><|fim▁begin|>import * as EventLogger from './'
const log = new EventLogger()
log.info('Basic information')
log.information('Basic information')
log.warning('Watch out!')
log.warn('Watch out!')
log.error('Something went wrong.')
log.auditFailure('Audit Failure')
log.auditSuccess('Audit Success')
// Configurations
new EventLogger('FooApplication')<|fim▁hole|> source: 'FooApplication',
logPath: '/var/usr/local/log',
eventLog: 'APPLICATION'
})<|fim▁end|> | new EventLogger({ |
<|file_name|>types.py<|end_file_name|><|fim▁begin|># Lint as: python3
# Copyright 2020 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The core data types ctexplain manipulates."""
from typing import Mapping
from typing import Optional
from typing import Tuple
# Do not edit this line. Copybara replaces it with PY2 migration helper.
from dataclasses import dataclass
from dataclasses import field
from frozendict import frozendict
@dataclass(frozen=True)
class Configuration():
"""Stores a build configuration as a collection of fragments and options."""
# Mapping of each BuildConfiguration.Fragment in this configuration to the
# FragmentOptions it requires.
#
# All names are qualified up to the base file name, without package prefixes.
# For example, foo.bar.BazConfiguration appears as "BazConfiguration".
# foo.bar.BazConfiguration$Options appears as "BazeConfiguration$Options".
fragments: Mapping[str, Tuple[str, ...]]
# Mapping of FragmentOptions to option key/value pairs. For example:
# {"CoreOptions": {"action_env": "[]", "cpu": "x86", ...}, ...}.
#
# Option values are stored as strings of whatever "bazel config" outputs.
#
# Note that Fragment and FragmentOptions aren't the same thing.
options: Mapping[str, Mapping[str, str]]
@dataclass(frozen=True)
class ConfiguredTarget():
"""Encapsulates a target + configuration + required fragments."""
# Label of the target this represents.
label: str
# Configuration this target is applied to. May be None.
config: Optional[Configuration]
# The hash of this configuration as reported by Bazel.
config_hash: str
# Fragments required by this configured target and its transitive
# dependencies. Stored as base names without packages. For example:
# "PlatformOptions" or "FooConfiguration$Options".
transitive_fragments: Tuple[str, ...]
@dataclass(frozen=True)
class HostConfiguration(Configuration):
"""Special marker for the host configuration.
There's exactly one host configuration per build, so we shouldn't suggest
merging it with other configurations.<|fim▁hole|> TODO(gregce): suggest host configuration trimming once we figure out the right
criteria. Even if Bazel's not technically equipped to do the trimming, it's
still theoretically valuable information. Note that moving from host to exec
configurations make this all a little less relevant, since exec configurations
aren't "special" compared to normal configurations.
"""
# We don't currently read the host config's fragments or option values.
fragments: Tuple[str, ...] = ()
options: Mapping[str,
Mapping[str,
str]] = field(default_factory=lambda: frozendict({}))
@dataclass(frozen=True)
class NullConfiguration(Configuration):
"""Special marker for the null configuration.
By definition this has no fragments or options.
"""
fragments: Tuple[str, ...] = ()
options: Mapping[str,
Mapping[str,
str]] = field(default_factory=lambda: frozendict({}))<|fim▁end|> | |
<|file_name|>people.js<|end_file_name|><|fim▁begin|>angular.module('emp').service('PeopleService', function($http) {
var service = {
getAllPeople: function() {
return $http.get('core/data/people.json', { cache: true }).then(function(resp) {
return resp.data;<|fim▁hole|>
getPerson: function(id) {
function personMatchesParam(person) {
return person.id === id;
}
return service.getAllPeople().then(function (people) {
return people.find(personMatchesParam)
});
}
}
return service;
})<|fim▁end|> | });
}, |
<|file_name|>tests.py<|end_file_name|><|fim▁begin|>TESTS = {
"Level_1": [
{
"input": [1, 2, 3],
"answer": 2,
"explanation": "3-1=2"
},
{
"input": [5, -5],
"answer": 10,
"explanation": "5-(-5)=10"
},
{
"input": [10.2, -2.2, 0, 1.1, 0.5],
"answer": 12.4,
"explanation": "10.2-(-2.2)=12.4"
},
{
"input": [],
"answer": 0,
"explanation": "Empty"
},
{"input": [-99.9, 99.9],
"answer": 199.8,
"explanation": "99.9-(-99.9)"},
{"input": [1, 1],
"answer": 0,
"explanation": "1-1"},<|fim▁hole|>
{"input": [36.0, -26.0, -7.5, 0.9, 0.53, -6.6, -71.0, 0.53, -48.0, 57.0, 69.0, 0.063, -4.7, 0.01, 9.2],
"answer": 140.0,
"explanation": "69.0-(-71.0)"},
{"input": [-0.035, 0.0, -0.1, 83.0, 0.28, 60.0],
"answer": 83.1,
"explanation": "83.0-(-0.1)"},
{"input": [0.02, 0.93, 0.066, -94.0, -0.91, -21.0, -7.2, -0.018, 26.0],
"answer": 120.0,
"explanation": "26.0-(-94.0)"},
{"input": [89.0, 0.014, 2.9, -1.2, 5.8],
"answer": 90.2,
"explanation": "89.0-(-1.2)"},
{"input": [-69.0, 0.0, 0.0, -0.051, -0.021, -0.81],
"answer": 69.0,
"explanation": "0.0-(-69.0)"},
{"input": [-0.07],
"answer": 0.0,
"explanation": "-0.07-(-0.07)"},
{"input": [0.074, 0.12, -0.4, 4.0, -1.7, 3.0, -5.1, 0.57, -54.0, -41.0, -5.2, -5.6, 3.8, 0.054, -35.0, -5.0,
-0.005, 0.034],
"answer": 58.0,
"explanation": "4.0-(-54.0)"},
{"input": [29.0, 0.47, -4.5, -6.7, -0.051, -0.82, -0.074, -4.0, -0.015, -0.015, -8.0, -0.43],
"answer": 37.0,
"explanation": "29.0-(-8.0)"},
{"input": [-0.036, -0.11, -0.55, -64.0],
"answer": 63.964,
"explanation": "-0.036-(-64.0)"},
{"input": [-0.092, -0.079, -0.31, -0.87, -28.0, -6.2, -0.097, -5.8, -0.025, -28.0, -4.7, -2.9, -8.0, -0.093,
-13.0, -73.0],
"answer": 72.975,
"explanation": "-0.025-(-73.0)"},
{"input": [-0.015, 7.6],
"answer": 7.615,
"explanation": "7.6-(-0.015)"},
{"input": [-46.0, 0.19, -0.08, -4.0, 4.4, 0.071, -0.029, -0.034, 28.0, 0.043, -97.0],
"answer": 125.0,
"explanation": "28.0-(-97.0)"},
{"input": [32.0, -0.07, -0.056, -6.4, 0.084],
"answer": 38.4,
"explanation": "32.0-(-6.4)"},
{"input": [0.017, 0.015, 0.69, 0.78],
"answer": 0.765,
"explanation": "0.78-0.015"},
]
}<|fim▁end|> | {"input": [0, 0, 0, 0],
"answer": 0,
"explanation": "0-0"},
|
<|file_name|>routes.js<|end_file_name|><|fim▁begin|>/* eslint-disable global-require */
import React from 'react';
import { Router, Route, IndexRoute } from 'react-router';
import App from './modules/App/App';
import Landing from './modules/App/Landing';
import TalentInput from './modules/App/TalentInput';
import Performer from './modules/App/Performer';
// require.ensure polyfill for node
if (typeof require.ensure !== 'function') {
require.ensure = function requireModule(deps, callback) {
callback(require);
};
}
/* Workaround for async react routes to work with react-hot-reloader till
https://github.com/reactjs/react-router/issues/2182 and
https://github.com/gaearon/react-hot-loader/issues/288 is fixed.
*/
if (process.env.NODE_ENV !== 'production') {
// Require async routes only in development for react-hot-reloader to work.
// require('./modules/Post/pages/PostListPage/PostListPage');
// require('./modules/Post/pages/PostDetailPage/PostDetailPage');
}
// react-router setup with code-splitting
// More info: http://blog.mxstbr.com/2016/01/react-apps-with-pages/
export default (
<Router>
<Route path="/" component={Landing} />
{/*} <IndexRoute
getComponent={(nextState, cb) => {
require.ensure([], require => {
cb(null, require('./modules/Post/pages/PostDetailPage/PostDetailPage').default);
});
}}<|fim▁hole|> path="/index"
getComponent={(nextState, cb) => {
require.ensure([], require => {
cb(null, require('./modules/Post/pages/PostDetailPage/PostDetailPage').default);
});
}}
/>*/}
{/*</Route>*/}
<Route path="/talent" component={TalentInput} />
</Router>
);<|fim▁end|> | />*/}
<Route path="/room" component={Performer} />
{/* <Route |
<|file_name|>map.view.clicked.examples.js<|end_file_name|><|fim▁begin|>cmapi.channel["map.view.clicked"].examples = [<|fim▁hole|> "title": "Map Clicked",
"description": "Report that the map was clicked at a location",
"valid": true,
"payload": {
"lat": 40.195659093364654,
"lon": -74.28955078125,
"button": "right",
"type": "single",
"keys": ["shift", "ctrl"]
}
},
{
"title": "Map Clicked",
"description": "Report that the map was clicked at a location",
"valid": true,
"payload": {
"lat": 40.195659093364654,
"lon": -74.28955078125,
"button": "middle",
"type": "double",
"keys": ["none"]
}
},
{
"title": "Map Clicked",
"description": "Report that the map was clicked at a location",
"valid": false,
"payload": {
"lat": 40.195659093364654,
"lon": -74.28955078125,
"button": "taco",
"type": "single",
"keys": ["shift", "ctrl"]
}
},
{
"title": "Map Clicked",
"description": "Report that the map was clicked at a location",
"valid": false,
"payload": {
"lat": 40.195659093364654,
"lon": -74.28955078125,
"type": "single",
"keys": ["shift", "ctrl"]
}
}
];<|fim▁end|> | { |
<|file_name|>rgb.py<|end_file_name|><|fim▁begin|>"""
RGB Colourspace & Transformations
=================================
Defines the following *RGB* colourspace transformations:
- :func:`colour_hdri.camera_space_to_RGB`
- :func:`colour_hdri.camera_space_to_sRGB`
"""
from __future__ import annotations
import numpy as np
from colour.algebra import matrix_dot, vector_dot
from colour.hints import ArrayLike, NDArray
from colour.models import RGB_COLOURSPACES
__author__ = "Colour Developers"
__copyright__ = "Copyright 2015 Colour Developers"
__license__ = "New BSD License - https://opensource.org/licenses/BSD-3-Clause"
__maintainer__ = "Colour Developers"
__email__ = "[email protected]"
__status__ = "Production"
__all__ = [
"camera_space_to_RGB",
"camera_space_to_sRGB",
]
def camera_space_to_RGB(
RGB: ArrayLike,
M_XYZ_to_camera_space: ArrayLike,
matrix_RGB_to_XYZ: ArrayLike,
) -> NDArray:
"""
Convert given *RGB* array from *camera space* to given *RGB* colourspace.
Parameters
----------
RGB
Camera space *RGB* colourspace array.
M_XYZ_to_camera_space
Matrix converting from *CIE XYZ* tristimulus values to *camera space*.
matrix_RGB_to_XYZ
Matrix converting from *RGB* colourspace to *CIE XYZ* tristimulus
values.
Returns
-------
:class:`numpy.ndarray`
*RGB* colourspace array.
Examples
--------
>>> RGB = np.array([0.80660, 0.81638, 0.65885])
>>> M_XYZ_to_camera_space = np.array([
... [0.47160000, 0.06030000, -0.08300000],
... [-0.77980000, 1.54740000, 0.24800000],
... [-0.14960000, 0.19370000, 0.66510000]])
>>> matrix_RGB_to_XYZ = np.array([
... [0.41238656, 0.35759149, 0.18045049],
... [0.21263682, 0.71518298, 0.07218020],
... [0.01933062, 0.11919716, 0.95037259]])
>>> camera_space_to_RGB(<|fim▁hole|> ... matrix_RGB_to_XYZ) # doctest: +ELLIPSIS
array([ 0.7564180..., 0.8683192..., 0.6044589...])
"""
M_RGB_camera = matrix_dot(M_XYZ_to_camera_space, matrix_RGB_to_XYZ)
M_RGB_camera /= np.transpose(np.sum(M_RGB_camera, axis=1)[np.newaxis])
RGB_f = vector_dot(np.linalg.inv(M_RGB_camera), RGB)
return RGB_f
def camera_space_to_sRGB(
RGB: ArrayLike, M_XYZ_to_camera_space: ArrayLike
) -> NDArray:
"""
Convert given *RGB* array from *camera space* to *sRGB* colourspace.
Parameters
----------
RGB
Camera space *RGB* colourspace array.
M_XYZ_to_camera_space
Matrix converting from *CIE XYZ* tristimulus values to *camera space*.
Returns
-------
:class:`numpy.ndarray`
*sRGB* colourspace array.
Examples
--------
>>> RGB = np.array([0.80660, 0.81638, 0.65885])
>>> M_XYZ_to_camera_space = np.array([
... [0.47160000, 0.06030000, -0.08300000],
... [-0.77980000, 1.54740000, 0.24800000],
... [-0.14960000, 0.19370000, 0.66510000]])
>>> camera_space_to_sRGB(RGB, M_XYZ_to_camera_space) # doctest: +ELLIPSIS
array([ 0.7564350..., 0.8683155..., 0.6044706...])
"""
return camera_space_to_RGB(
RGB, M_XYZ_to_camera_space, RGB_COLOURSPACES["sRGB"].matrix_RGB_to_XYZ
)<|fim▁end|> | ... RGB,
... M_XYZ_to_camera_space, |
<|file_name|>index.d.ts<|end_file_name|><|fim▁begin|>export {toMarkdown} from './lib/index.js'
export type SafeOptions = import('./lib/types.js').SafeOptions
export type Context = import('./lib/types.js').Context
export type Handle = import('./lib/types.js').Handle<|fim▁hole|>export type Map = import('./lib/util/indent-lines.js').Map<|fim▁end|> | export type Handlers = import('./lib/types.js').Handlers
export type Join = import('./lib/types.js').Join
export type Unsafe = import('./lib/types.js').Unsafe
export type Options = import('./lib/types.js').Options |
<|file_name|>Event.ts<|end_file_name|><|fim▁begin|>module X3Map {
export class Event {
private handlers: Array<EventHandler>;
constructor() {
this.handlers = [];
}
public Add(handler: EventHandler) {
this.handlers.push(handler);
}<|fim▁hole|> this.handlers.splice(a, 1);
}
public Fire(sender: Object, params?: Object) {
this.handlers.forEach((eh) => {
eh.Handle(sender, params);
})
}
}
}<|fim▁end|> |
public Remove(handler: EventHandler) {
var a = this.handlers.indexOf(handler); |
<|file_name|>base.py<|end_file_name|><|fim▁begin|>from __future__ import unicode_literals
import re
from functools import partial
from importlib import import_module
from inspect import getargspec, getcallargs
import warnings
from django.apps import apps
from django.conf import settings
from django.template.context import (BaseContext, Context, RequestContext, # NOQA: imported for backwards compatibility
ContextPopException)
from django.utils.deprecation import RemovedInDjango20Warning
from django.utils.itercompat import is_iterable
from django.utils.text import (smart_split, unescape_string_literal,
get_text_list)
from django.utils.encoding import force_str, force_text
from django.utils.translation import ugettext_lazy, pgettext_lazy
from django.utils.safestring import (SafeData, EscapeData, mark_safe,
mark_for_escaping)
from django.utils.formats import localize
from django.utils.html import escape
from django.utils.module_loading import module_has_submodule
from django.utils import six
from django.utils.timezone import template_localtime
from django.utils.encoding import python_2_unicode_compatible
TOKEN_TEXT = 0
TOKEN_VAR = 1
TOKEN_BLOCK = 2
TOKEN_COMMENT = 3
TOKEN_MAPPING = {
TOKEN_TEXT: 'Text',
TOKEN_VAR: 'Var',
TOKEN_BLOCK: 'Block',
TOKEN_COMMENT: 'Comment',
}
# template syntax constants
FILTER_SEPARATOR = '|'
FILTER_ARGUMENT_SEPARATOR = ':'
VARIABLE_ATTRIBUTE_SEPARATOR = '.'
BLOCK_TAG_START = '{%'
BLOCK_TAG_END = '%}'
VARIABLE_TAG_START = '{{'
VARIABLE_TAG_END = '}}'
COMMENT_TAG_START = '{#'
COMMENT_TAG_END = '#}'
TRANSLATOR_COMMENT_MARK = 'Translators'
SINGLE_BRACE_START = '{'
SINGLE_BRACE_END = '}'
ALLOWED_VARIABLE_CHARS = ('abcdefghijklmnopqrstuvwxyz'
'ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_.')
# what to report as the origin for templates that come from non-loader sources
# (e.g. strings)
UNKNOWN_SOURCE = '<unknown source>'
# match a variable or block tag and capture the entire tag, including start/end
# delimiters
tag_re = (re.compile('(%s.*?%s|%s.*?%s|%s.*?%s)' %
(re.escape(BLOCK_TAG_START), re.escape(BLOCK_TAG_END),
re.escape(VARIABLE_TAG_START), re.escape(VARIABLE_TAG_END),
re.escape(COMMENT_TAG_START), re.escape(COMMENT_TAG_END))))
# global dictionary of libraries that have been loaded using get_library
libraries = {}
# global list of libraries to load by default for a new parser
builtins = []
# True if TEMPLATE_STRING_IF_INVALID contains a format string (%s). None means
# uninitialized.
invalid_var_format_string = None
class TemplateSyntaxError(Exception):
pass
class TemplateDoesNotExist(Exception):
pass
class TemplateEncodingError(Exception):
pass
@python_2_unicode_compatible
class VariableDoesNotExist(Exception):
def __init__(self, msg, params=()):
self.msg = msg
self.params = params
def __str__(self):
return self.msg % tuple(force_text(p, errors='replace') for p in self.params)
class InvalidTemplateLibrary(Exception):
pass
class Origin(object):
def __init__(self, name):
self.name = name
def reload(self):
raise NotImplementedError('subclasses of Origin must provide a reload() method')
def __str__(self):
return self.name
class StringOrigin(Origin):
def __init__(self, source):
super(StringOrigin, self).__init__(UNKNOWN_SOURCE)
self.source = source
def reload(self):
return self.source
class Template(object):
def __init__(self, template_string, origin=None, name=None):
try:
template_string = force_text(template_string)
except UnicodeDecodeError:
raise TemplateEncodingError("Templates can only be constructed "
"from unicode or UTF-8 strings.")
if settings.TEMPLATE_DEBUG and origin is None:
origin = StringOrigin(template_string)
self.nodelist = compile_string(template_string, origin)
self.name = name
self.origin = origin
def __iter__(self):
for node in self.nodelist:
for subnode in node:
yield subnode
def _render(self, context):
return self.nodelist.render(context)
def render(self, context):
"Display stage -- can be called many times"
context.render_context.push()
try:
return self._render(context)
finally:
context.render_context.pop()
def compile_string(template_string, origin):
"Compiles template_string into NodeList ready for rendering"
if settings.TEMPLATE_DEBUG:
from django.template.debug import DebugLexer, DebugParser
lexer_class, parser_class = DebugLexer, DebugParser
else:
lexer_class, parser_class = Lexer, Parser
lexer = lexer_class(template_string, origin)
parser = parser_class(lexer.tokenize())
return parser.parse()
class Token(object):
def __init__(self, token_type, contents):
# token_type must be TOKEN_TEXT, TOKEN_VAR, TOKEN_BLOCK or
# TOKEN_COMMENT.
self.token_type, self.contents = token_type, contents
self.lineno = None
def __str__(self):
token_name = TOKEN_MAPPING[self.token_type]
return ('<%s token: "%s...">' %
(token_name, self.contents[:20].replace('\n', '')))
def split_contents(self):
split = []
bits = iter(smart_split(self.contents))
for bit in bits:
# Handle translation-marked template pieces
if bit.startswith('_("') or bit.startswith("_('"):
sentinal = bit[2] + ')'
trans_bit = [bit]
while not bit.endswith(sentinal):
bit = next(bits)
trans_bit.append(bit)
bit = ' '.join(trans_bit)
split.append(bit)
return split
class Lexer(object):
def __init__(self, template_string, origin):
self.template_string = template_string
self.origin = origin
self.lineno = 1
self.verbatim = False
def tokenize(self):
"""
Return a list of tokens from a given template_string.
"""
in_tag = False
result = []
for bit in tag_re.split(self.template_string):
if bit:
result.append(self.create_token(bit, in_tag))
in_tag = not in_tag
return result
def create_token(self, token_string, in_tag):
"""
Convert the given token string into a new Token object and return it.
If in_tag is True, we are processing something that matched a tag,
otherwise it should be treated as a literal string.
"""
if in_tag and token_string.startswith(BLOCK_TAG_START):
# The [2:-2] ranges below strip off *_TAG_START and *_TAG_END.
# We could do len(BLOCK_TAG_START) to be more "correct", but we've
# hard-coded the 2s here for performance. And it's not like
# the TAG_START values are going to change anytime, anyway.
block_content = token_string[2:-2].strip()
if self.verbatim and block_content == self.verbatim:
self.verbatim = False
if in_tag and not self.verbatim:
if token_string.startswith(VARIABLE_TAG_START):
token = Token(TOKEN_VAR, token_string[2:-2].strip())
elif token_string.startswith(BLOCK_TAG_START):
if block_content[:9] in ('verbatim', 'verbatim '):
self.verbatim = 'end%s' % block_content
token = Token(TOKEN_BLOCK, block_content)
elif token_string.startswith(COMMENT_TAG_START):
content = ''
if token_string.find(TRANSLATOR_COMMENT_MARK):
content = token_string[2:-2].strip()
token = Token(TOKEN_COMMENT, content)
else:
token = Token(TOKEN_TEXT, token_string)
token.lineno = self.lineno
self.lineno += token_string.count('\n')
return token
class Parser(object):
def __init__(self, tokens):
self.tokens = tokens
self.tags = {}
self.filters = {}
for lib in builtins:
self.add_library(lib)
def parse(self, parse_until=None):
if parse_until is None:
parse_until = []
nodelist = self.create_nodelist()
while self.tokens:
token = self.next_token()
# Use the raw values here for TOKEN_* for a tiny performance boost.
if token.token_type == 0: # TOKEN_TEXT
self.extend_nodelist(nodelist, TextNode(token.contents), token)
elif token.token_type == 1: # TOKEN_VAR
if not token.contents:
self.empty_variable(token)
try:
filter_expression = self.compile_filter(token.contents)
except TemplateSyntaxError as e:
if not self.compile_filter_error(token, e):
raise
var_node = self.create_variable_node(filter_expression)
self.extend_nodelist(nodelist, var_node, token)
elif token.token_type == 2: # TOKEN_BLOCK
try:
command = token.contents.split()[0]
except IndexError:
self.empty_block_tag(token)
if command in parse_until:
# put token back on token list so calling
# code knows why it terminated
self.prepend_token(token)
return nodelist
# execute callback function for this tag and append
# resulting node
self.enter_command(command, token)
try:
compile_func = self.tags[command]
except KeyError:
self.invalid_block_tag(token, command, parse_until)
try:
compiled_result = compile_func(self, token)
except TemplateSyntaxError as e:
if not self.compile_function_error(token, e):
raise
self.extend_nodelist(nodelist, compiled_result, token)
self.exit_command()
if parse_until:
self.unclosed_block_tag(parse_until)
return nodelist
def skip_past(self, endtag):
while self.tokens:
token = self.next_token()
if token.token_type == TOKEN_BLOCK and token.contents == endtag:
return
self.unclosed_block_tag([endtag])
def create_variable_node(self, filter_expression):
return VariableNode(filter_expression)
def create_nodelist(self):
return NodeList()
def extend_nodelist(self, nodelist, node, token):
if node.must_be_first and nodelist:
try:
if nodelist.contains_nontext:
raise AttributeError
except AttributeError:
raise TemplateSyntaxError("%r must be the first tag "
"in the template." % node)
if isinstance(nodelist, NodeList) and not isinstance(node, TextNode):
nodelist.contains_nontext = True
nodelist.append(node)
def enter_command(self, command, token):
pass
def exit_command(self):
pass
def error(self, token, msg):
return TemplateSyntaxError(msg)
def empty_variable(self, token):
raise self.error(token, "Empty variable tag")
def empty_block_tag(self, token):
raise self.error(token, "Empty block tag")
def invalid_block_tag(self, token, command, parse_until=None):
if parse_until:
raise self.error(token, "Invalid block tag: '%s', expected %s" %
(command, get_text_list(["'%s'" % p for p in parse_until])))
raise self.error(token, "Invalid block tag: '%s'" % command)
def unclosed_block_tag(self, parse_until):
raise self.error(None, "Unclosed tags: %s " % ', '.join(parse_until))
def compile_filter_error(self, token, e):
pass
def compile_function_error(self, token, e):
pass
def next_token(self):
return self.tokens.pop(0)
def prepend_token(self, token):
self.tokens.insert(0, token)
def delete_first_token(self):
del self.tokens[0]
def add_library(self, lib):
self.tags.update(lib.tags)
self.filters.update(lib.filters)
def compile_filter(self, token):
"""
Convenient wrapper for FilterExpression
"""
return FilterExpression(token, self)
def find_filter(self, filter_name):
if filter_name in self.filters:
return self.filters[filter_name]
else:
raise TemplateSyntaxError("Invalid filter: '%s'" % filter_name)
class TokenParser(object):
"""
Subclass this and implement the top() method to parse a template line.
When instantiating the parser, pass in the line from the Django template
parser.
The parser's "tagname" instance-variable stores the name of the tag that
the filter was called with.
"""
def __init__(self, subject):
self.subject = subject
self.pointer = 0
self.backout = []
self.tagname = self.tag()
def top(self):
"""
Overload this method to do the actual parsing and return the result.
"""
raise NotImplementedError('subclasses of Tokenparser must provide a top() method')
def more(self):
"""
Returns True if there is more stuff in the tag.
"""
return self.pointer < len(self.subject)
def back(self):
"""
Undoes the last microparser. Use this for lookahead and backtracking.
"""
if not len(self.backout):
raise TemplateSyntaxError("back called without some previous "
"parsing")
self.pointer = self.backout.pop()
def tag(self):
"""
A microparser that just returns the next tag from the line.
"""
subject = self.subject
i = self.pointer
if i >= len(subject):
raise TemplateSyntaxError("expected another tag, found "
"end of string: %s" % subject)
p = i
while i < len(subject) and subject[i] not in (' ', '\t'):
i += 1
s = subject[p:i]
while i < len(subject) and subject[i] in (' ', '\t'):
i += 1
self.backout.append(self.pointer)
self.pointer = i
return s
def value(self):
"""
A microparser that parses for a value: some string constant or
variable name.
"""
subject = self.subject
i = self.pointer
def next_space_index(subject, i):
"""
Increment pointer until a real space (i.e. a space not within
quotes) is encountered
"""
while i < len(subject) and subject[i] not in (' ', '\t'):
if subject[i] in ('"', "'"):
c = subject[i]
i += 1
while i < len(subject) and subject[i] != c:
i += 1
if i >= len(subject):
raise TemplateSyntaxError("Searching for value. "
"Unexpected end of string in column %d: %s" %
(i, subject))
i += 1
return i
if i >= len(subject):
raise TemplateSyntaxError("Searching for value. Expected another "
"value but found end of string: %s" %
subject)
if subject[i] in ('"', "'"):
p = i
i += 1
while i < len(subject) and subject[i] != subject[p]:
i += 1
if i >= len(subject):
raise TemplateSyntaxError("Searching for value. Unexpected "
"end of string in column %d: %s" %
(i, subject))
i += 1
# Continue parsing until next "real" space,
# so that filters are also included
i = next_space_index(subject, i)
res = subject[p:i]
while i < len(subject) and subject[i] in (' ', '\t'):
i += 1
self.backout.append(self.pointer)
self.pointer = i
return res
else:
p = i
i = next_space_index(subject, i)
s = subject[p:i]
while i < len(subject) and subject[i] in (' ', '\t'):
i += 1
self.backout.append(self.pointer)
self.pointer = i
return s
# This only matches constant *strings* (things in quotes or marked for
# translation). Numbers are treated as variables for implementation reasons
# (so that they retain their type when passed to filters).
constant_string = r"""
(?:%(i18n_open)s%(strdq)s%(i18n_close)s|
%(i18n_open)s%(strsq)s%(i18n_close)s|
%(strdq)s|
%(strsq)s)
""" % {
'strdq': r'"[^"\\]*(?:\\.[^"\\]*)*"', # double-quoted string
'strsq': r"'[^'\\]*(?:\\.[^'\\]*)*'", # single-quoted string
'i18n_open': re.escape("_("),
'i18n_close': re.escape(")"),
}
constant_string = constant_string.replace("\n", "")
filter_raw_string = r"""
^(?P<constant>%(constant)s)|
^(?P<var>[%(var_chars)s]+|%(num)s)|
(?:\s*%(filter_sep)s\s*
(?P<filter_name>\w+)
(?:%(arg_sep)s
(?:
(?P<constant_arg>%(constant)s)|
(?P<var_arg>[%(var_chars)s]+|%(num)s)
)
)?
)""" % {
'constant': constant_string,
'num': r'[-+\.]?\d[\d\.e]*',
'var_chars': "\w\.",
'filter_sep': re.escape(FILTER_SEPARATOR),
'arg_sep': re.escape(FILTER_ARGUMENT_SEPARATOR),
}
filter_re = re.compile(filter_raw_string, re.UNICODE | re.VERBOSE)
class FilterExpression(object):
"""
Parses a variable token and its optional filters (all as a single string),
and return a list of tuples of the filter name and arguments.
Sample::
>>> token = 'variable|default:"Default value"|date:"Y-m-d"'
>>> p = Parser('')
>>> fe = FilterExpression(token, p)
>>> len(fe.filters)
2
>>> fe.var
<Variable: 'variable'>
"""
def __init__(self, token, parser):
self.token = token
matches = filter_re.finditer(token)
var_obj = None
filters = []
upto = 0
for match in matches:
start = match.start()
if upto != start:
raise TemplateSyntaxError("Could not parse some characters: "
"%s|%s|%s" %
(token[:upto], token[upto:start],
token[start:]))
if var_obj is None:
var, constant = match.group("var", "constant")
if constant:
try:
var_obj = Variable(constant).resolve({})
except VariableDoesNotExist:
var_obj = None
elif var is None:
raise TemplateSyntaxError("Could not find variable at "
"start of %s." % token)
else:
var_obj = Variable(var)
else:
filter_name = match.group("filter_name")
args = []
constant_arg, var_arg = match.group("constant_arg", "var_arg")
if constant_arg:
args.append((False, Variable(constant_arg).resolve({})))
elif var_arg:
args.append((True, Variable(var_arg)))
filter_func = parser.find_filter(filter_name)
self.args_check(filter_name, filter_func, args)
filters.append((filter_func, args))
upto = match.end()
if upto != len(token):
raise TemplateSyntaxError("Could not parse the remainder: '%s' "
"from '%s'" % (token[upto:], token))
self.filters = filters
self.var = var_obj
def resolve(self, context, ignore_failures=False):
if isinstance(self.var, Variable):
try:
obj = self.var.resolve(context)
except VariableDoesNotExist:
if ignore_failures:
obj = None
else:
if settings.TEMPLATE_STRING_IF_INVALID:
global invalid_var_format_string
if invalid_var_format_string is None:
invalid_var_format_string = '%s' in settings.TEMPLATE_STRING_IF_INVALID
if invalid_var_format_string:
return settings.TEMPLATE_STRING_IF_INVALID % self.var
return settings.TEMPLATE_STRING_IF_INVALID
else:
obj = settings.TEMPLATE_STRING_IF_INVALID
else:
obj = self.var
for func, args in self.filters:
arg_vals = []
for lookup, arg in args:
if not lookup:
arg_vals.append(mark_safe(arg))
else:<|fim▁hole|> if getattr(func, 'needs_autoescape', False):
new_obj = func(obj, autoescape=context.autoescape, *arg_vals)
else:
new_obj = func(obj, *arg_vals)
if getattr(func, 'is_safe', False) and isinstance(obj, SafeData):
obj = mark_safe(new_obj)
elif isinstance(obj, EscapeData):
obj = mark_for_escaping(new_obj)
else:
obj = new_obj
return obj
def args_check(name, func, provided):
provided = list(provided)
# First argument, filter input, is implied.
plen = len(provided) + 1
# Check to see if a decorator is providing the real function.
func = getattr(func, '_decorated_function', func)
args, varargs, varkw, defaults = getargspec(func)
alen = len(args)
dlen = len(defaults or [])
# Not enough OR Too many
if plen < (alen - dlen) or plen > alen:
raise TemplateSyntaxError("%s requires %d arguments, %d provided" %
(name, alen - dlen, plen))
return True
args_check = staticmethod(args_check)
def __str__(self):
return self.token
def resolve_variable(path, context):
"""
Returns the resolved variable, which may contain attribute syntax, within
the given context.
Deprecated; use the Variable class instead.
"""
warnings.warn("resolve_variable() is deprecated. Use django.template."
"Variable(path).resolve(context) instead",
RemovedInDjango20Warning, stacklevel=2)
return Variable(path).resolve(context)
class Variable(object):
"""
A template variable, resolvable against a given context. The variable may
be a hard-coded string (if it begins and ends with single or double quote
marks)::
>>> c = {'article': {'section':u'News'}}
>>> Variable('article.section').resolve(c)
u'News'
>>> Variable('article').resolve(c)
{'section': u'News'}
>>> class AClass: pass
>>> c = AClass()
>>> c.article = AClass()
>>> c.article.section = u'News'
(The example assumes VARIABLE_ATTRIBUTE_SEPARATOR is '.')
"""
def __init__(self, var):
self.var = var
self.literal = None
self.lookups = None
self.translate = False
self.message_context = None
if not isinstance(var, six.string_types):
raise TypeError(
"Variable must be a string or number, got %s" % type(var))
try:
# First try to treat this variable as a number.
#
# Note that this could cause an OverflowError here that we're not
# catching. Since this should only happen at compile time, that's
# probably OK.
self.literal = float(var)
# So it's a float... is it an int? If the original value contained a
# dot or an "e" then it was a float, not an int.
if '.' not in var and 'e' not in var.lower():
self.literal = int(self.literal)
# "2." is invalid
if var.endswith('.'):
raise ValueError
except ValueError:
# A ValueError means that the variable isn't a number.
if var.startswith('_(') and var.endswith(')'):
# The result of the lookup should be translated at rendering
# time.
self.translate = True
var = var[2:-1]
# If it's wrapped with quotes (single or double), then
# we're also dealing with a literal.
try:
self.literal = mark_safe(unescape_string_literal(var))
except ValueError:
# Otherwise we'll set self.lookups so that resolve() knows we're
# dealing with a bonafide variable
if var.find(VARIABLE_ATTRIBUTE_SEPARATOR + '_') > -1 or var[0] == '_':
raise TemplateSyntaxError("Variables and attributes may "
"not begin with underscores: '%s'" %
var)
self.lookups = tuple(var.split(VARIABLE_ATTRIBUTE_SEPARATOR))
def resolve(self, context):
"""Resolve this variable against a given context."""
if self.lookups is not None:
# We're dealing with a variable that needs to be resolved
value = self._resolve_lookup(context)
else:
# We're dealing with a literal, so it's already been "resolved"
value = self.literal
if self.translate:
if self.message_context:
return pgettext_lazy(self.message_context, value)
else:
return ugettext_lazy(value)
return value
def __repr__(self):
return "<%s: %r>" % (self.__class__.__name__, self.var)
def __str__(self):
return self.var
def _resolve_lookup(self, context):
"""
Performs resolution of a real variable (i.e. not a literal) against the
given context.
As indicated by the method's name, this method is an implementation
detail and shouldn't be called by external code. Use Variable.resolve()
instead.
"""
current = context
try: # catch-all for silent variable failures
for bit in self.lookups:
try: # dictionary lookup
current = current[bit]
except (TypeError, AttributeError, KeyError, ValueError):
try: # attribute lookup
# Don't return class attributes if the class is the context:
if isinstance(current, BaseContext) and getattr(type(current), bit):
raise AttributeError
current = getattr(current, bit)
except (TypeError, AttributeError) as e:
# Reraise an AttributeError raised by a @property
if (isinstance(e, AttributeError) and
not isinstance(current, BaseContext) and bit in dir(current)):
raise
try: # list-index lookup
current = current[int(bit)]
except (IndexError, # list index out of range
ValueError, # invalid literal for int()
KeyError, # current is a dict without `int(bit)` key
TypeError): # unsubscriptable object
raise VariableDoesNotExist("Failed lookup for key "
"[%s] in %r",
(bit, current)) # missing attribute
if callable(current):
if getattr(current, 'do_not_call_in_templates', False):
pass
elif getattr(current, 'alters_data', False):
current = settings.TEMPLATE_STRING_IF_INVALID
else:
try: # method call (assuming no args required)
current = current()
except TypeError:
try:
getcallargs(current)
except TypeError: # arguments *were* required
current = settings.TEMPLATE_STRING_IF_INVALID # invalid method call
else:
raise
except Exception as e:
if getattr(e, 'silent_variable_failure', False):
current = settings.TEMPLATE_STRING_IF_INVALID
else:
raise
return current
class Node(object):
# Set this to True for nodes that must be first in the template (although
# they can be preceded by text nodes.
must_be_first = False
child_nodelists = ('nodelist',)
def render(self, context):
"""
Return the node rendered as a string.
"""
pass
def __iter__(self):
yield self
def get_nodes_by_type(self, nodetype):
"""
Return a list of all nodes (within this node and its nodelist)
of the given type
"""
nodes = []
if isinstance(self, nodetype):
nodes.append(self)
for attr in self.child_nodelists:
nodelist = getattr(self, attr, None)
if nodelist:
nodes.extend(nodelist.get_nodes_by_type(nodetype))
return nodes
class NodeList(list):
# Set to True the first time a non-TextNode is inserted by
# extend_nodelist().
contains_nontext = False
def render(self, context):
bits = []
for node in self:
if isinstance(node, Node):
bit = self.render_node(node, context)
else:
bit = node
bits.append(force_text(bit))
return mark_safe(''.join(bits))
def get_nodes_by_type(self, nodetype):
"Return a list of all nodes of the given type"
nodes = []
for node in self:
nodes.extend(node.get_nodes_by_type(nodetype))
return nodes
def render_node(self, node, context):
return node.render(context)
class TextNode(Node):
def __init__(self, s):
self.s = s
def __repr__(self):
return force_str("<Text Node: '%s'>" % self.s[:25], 'ascii',
errors='replace')
def render(self, context):
return self.s
def render_value_in_context(value, context):
"""
Converts any value to a string to become part of a rendered template. This
means escaping, if required, and conversion to a unicode object. If value
is a string, it is expected to have already been translated.
"""
value = template_localtime(value, use_tz=context.use_tz)
value = localize(value, use_l10n=context.use_l10n)
value = force_text(value)
if ((context.autoescape and not isinstance(value, SafeData)) or
isinstance(value, EscapeData)):
return escape(value)
else:
return value
class VariableNode(Node):
def __init__(self, filter_expression):
self.filter_expression = filter_expression
def __repr__(self):
return "<Variable Node: %s>" % self.filter_expression
def render(self, context):
try:
output = self.filter_expression.resolve(context)
except UnicodeDecodeError:
# Unicode conversion can fail sometimes for reasons out of our
# control (e.g. exception rendering). In that case, we fail
# quietly.
return ''
return render_value_in_context(output, context)
# Regex for token keyword arguments
kwarg_re = re.compile(r"(?:(\w+)=)?(.+)")
def token_kwargs(bits, parser, support_legacy=False):
"""
A utility method for parsing token keyword arguments.
:param bits: A list containing remainder of the token (split by spaces)
that is to be checked for arguments. Valid arguments will be removed
from this list.
:param support_legacy: If set to true ``True``, the legacy format
``1 as foo`` will be accepted. Otherwise, only the standard ``foo=1``
format is allowed.
:returns: A dictionary of the arguments retrieved from the ``bits`` token
list.
There is no requirement for all remaining token ``bits`` to be keyword
arguments, so the dictionary will be returned as soon as an invalid
argument format is reached.
"""
if not bits:
return {}
match = kwarg_re.match(bits[0])
kwarg_format = match and match.group(1)
if not kwarg_format:
if not support_legacy:
return {}
if len(bits) < 3 or bits[1] != 'as':
return {}
kwargs = {}
while bits:
if kwarg_format:
match = kwarg_re.match(bits[0])
if not match or not match.group(1):
return kwargs
key, value = match.groups()
del bits[:1]
else:
if len(bits) < 3 or bits[1] != 'as':
return kwargs
key, value = bits[2], bits[0]
del bits[:3]
kwargs[key] = parser.compile_filter(value)
if bits and not kwarg_format:
if bits[0] != 'and':
return kwargs
del bits[:1]
return kwargs
def parse_bits(parser, bits, params, varargs, varkw, defaults,
takes_context, name):
"""
Parses bits for template tag helpers (simple_tag, include_tag and
assignment_tag), in particular by detecting syntax errors and by
extracting positional and keyword arguments.
"""
if takes_context:
if params[0] == 'context':
params = params[1:]
else:
raise TemplateSyntaxError(
"'%s' is decorated with takes_context=True so it must "
"have a first argument of 'context'" % name)
args = []
kwargs = {}
unhandled_params = list(params)
for bit in bits:
# First we try to extract a potential kwarg from the bit
kwarg = token_kwargs([bit], parser)
if kwarg:
# The kwarg was successfully extracted
param, value = list(six.iteritems(kwarg))[0]
if param not in params and varkw is None:
# An unexpected keyword argument was supplied
raise TemplateSyntaxError(
"'%s' received unexpected keyword argument '%s'" %
(name, param))
elif param in kwargs:
# The keyword argument has already been supplied once
raise TemplateSyntaxError(
"'%s' received multiple values for keyword argument '%s'" %
(name, param))
else:
# All good, record the keyword argument
kwargs[str(param)] = value
if param in unhandled_params:
# If using the keyword syntax for a positional arg, then
# consume it.
unhandled_params.remove(param)
else:
if kwargs:
raise TemplateSyntaxError(
"'%s' received some positional argument(s) after some "
"keyword argument(s)" % name)
else:
# Record the positional argument
args.append(parser.compile_filter(bit))
try:
# Consume from the list of expected positional arguments
unhandled_params.pop(0)
except IndexError:
if varargs is None:
raise TemplateSyntaxError(
"'%s' received too many positional arguments" %
name)
if defaults is not None:
# Consider the last n params handled, where n is the
# number of defaults.
unhandled_params = unhandled_params[:-len(defaults)]
if unhandled_params:
# Some positional arguments were not supplied
raise TemplateSyntaxError(
"'%s' did not receive value(s) for the argument(s): %s" %
(name, ", ".join("'%s'" % p for p in unhandled_params)))
return args, kwargs
def generic_tag_compiler(parser, token, params, varargs, varkw, defaults,
name, takes_context, node_class):
"""
Returns a template.Node subclass.
"""
bits = token.split_contents()[1:]
args, kwargs = parse_bits(parser, bits, params, varargs, varkw,
defaults, takes_context, name)
return node_class(takes_context, args, kwargs)
class TagHelperNode(Node):
"""
Base class for tag helper nodes such as SimpleNode, InclusionNode and
AssignmentNode. Manages the positional and keyword arguments to be passed
to the decorated function.
"""
def __init__(self, takes_context, args, kwargs):
self.takes_context = takes_context
self.args = args
self.kwargs = kwargs
def get_resolved_arguments(self, context):
resolved_args = [var.resolve(context) for var in self.args]
if self.takes_context:
resolved_args = [context] + resolved_args
resolved_kwargs = dict((k, v.resolve(context)) for k, v in self.kwargs.items())
return resolved_args, resolved_kwargs
class Library(object):
def __init__(self):
self.filters = {}
self.tags = {}
def tag(self, name=None, compile_function=None):
if name is None and compile_function is None:
# @register.tag()
return self.tag_function
elif name is not None and compile_function is None:
if callable(name):
# @register.tag
return self.tag_function(name)
else:
# @register.tag('somename') or @register.tag(name='somename')
def dec(func):
return self.tag(name, func)
return dec
elif name is not None and compile_function is not None:
# register.tag('somename', somefunc)
self.tags[name] = compile_function
return compile_function
else:
raise InvalidTemplateLibrary("Unsupported arguments to "
"Library.tag: (%r, %r)", (name, compile_function))
def tag_function(self, func):
self.tags[getattr(func, "_decorated_function", func).__name__] = func
return func
def filter(self, name=None, filter_func=None, **flags):
if name is None and filter_func is None:
# @register.filter()
def dec(func):
return self.filter_function(func, **flags)
return dec
elif name is not None and filter_func is None:
if callable(name):
# @register.filter
return self.filter_function(name, **flags)
else:
# @register.filter('somename') or @register.filter(name='somename')
def dec(func):
return self.filter(name, func, **flags)
return dec
elif name is not None and filter_func is not None:
# register.filter('somename', somefunc)
self.filters[name] = filter_func
for attr in ('expects_localtime', 'is_safe', 'needs_autoescape'):
if attr in flags:
value = flags[attr]
# set the flag on the filter for FilterExpression.resolve
setattr(filter_func, attr, value)
# set the flag on the innermost decorated function
# for decorators that need it e.g. stringfilter
if hasattr(filter_func, "_decorated_function"):
setattr(filter_func._decorated_function, attr, value)
filter_func._filter_name = name
return filter_func
else:
raise InvalidTemplateLibrary("Unsupported arguments to "
"Library.filter: (%r, %r)", (name, filter_func))
def filter_function(self, func, **flags):
name = getattr(func, "_decorated_function", func).__name__
return self.filter(name, func, **flags)
def simple_tag(self, func=None, takes_context=None, name=None):
def dec(func):
params, varargs, varkw, defaults = getargspec(func)
class SimpleNode(TagHelperNode):
def render(self, context):
resolved_args, resolved_kwargs = self.get_resolved_arguments(context)
return func(*resolved_args, **resolved_kwargs)
function_name = (name or
getattr(func, '_decorated_function', func).__name__)
compile_func = partial(generic_tag_compiler,
params=params, varargs=varargs, varkw=varkw,
defaults=defaults, name=function_name,
takes_context=takes_context, node_class=SimpleNode)
compile_func.__doc__ = func.__doc__
self.tag(function_name, compile_func)
return func
if func is None:
# @register.simple_tag(...)
return dec
elif callable(func):
# @register.simple_tag
return dec(func)
else:
raise TemplateSyntaxError("Invalid arguments provided to simple_tag")
def assignment_tag(self, func=None, takes_context=None, name=None):
def dec(func):
params, varargs, varkw, defaults = getargspec(func)
class AssignmentNode(TagHelperNode):
def __init__(self, takes_context, args, kwargs, target_var):
super(AssignmentNode, self).__init__(takes_context, args, kwargs)
self.target_var = target_var
def render(self, context):
resolved_args, resolved_kwargs = self.get_resolved_arguments(context)
context[self.target_var] = func(*resolved_args, **resolved_kwargs)
return ''
function_name = (name or
getattr(func, '_decorated_function', func).__name__)
def compile_func(parser, token):
bits = token.split_contents()[1:]
if len(bits) < 2 or bits[-2] != 'as':
raise TemplateSyntaxError(
"'%s' tag takes at least 2 arguments and the "
"second last argument must be 'as'" % function_name)
target_var = bits[-1]
bits = bits[:-2]
args, kwargs = parse_bits(parser, bits, params,
varargs, varkw, defaults, takes_context, function_name)
return AssignmentNode(takes_context, args, kwargs, target_var)
compile_func.__doc__ = func.__doc__
self.tag(function_name, compile_func)
return func
if func is None:
# @register.assignment_tag(...)
return dec
elif callable(func):
# @register.assignment_tag
return dec(func)
else:
raise TemplateSyntaxError("Invalid arguments provided to assignment_tag")
def inclusion_tag(self, file_name, context_class=Context, takes_context=False, name=None):
def dec(func):
params, varargs, varkw, defaults = getargspec(func)
class InclusionNode(TagHelperNode):
def render(self, context):
resolved_args, resolved_kwargs = self.get_resolved_arguments(context)
_dict = func(*resolved_args, **resolved_kwargs)
if not getattr(self, 'nodelist', False):
from django.template.loader import get_template, select_template
if isinstance(file_name, Template):
t = file_name
elif not isinstance(file_name, six.string_types) and is_iterable(file_name):
t = select_template(file_name)
else:
t = get_template(file_name)
self.nodelist = t.nodelist
new_context = context_class(_dict, **{
'autoescape': context.autoescape,
'current_app': context.current_app,
'use_l10n': context.use_l10n,
'use_tz': context.use_tz,
})
# Copy across the CSRF token, if present, because
# inclusion tags are often used for forms, and we need
# instructions for using CSRF protection to be as simple
# as possible.
csrf_token = context.get('csrf_token', None)
if csrf_token is not None:
new_context['csrf_token'] = csrf_token
return self.nodelist.render(new_context)
function_name = (name or
getattr(func, '_decorated_function', func).__name__)
compile_func = partial(generic_tag_compiler,
params=params, varargs=varargs, varkw=varkw,
defaults=defaults, name=function_name,
takes_context=takes_context, node_class=InclusionNode)
compile_func.__doc__ = func.__doc__
self.tag(function_name, compile_func)
return func
return dec
def is_library_missing(name):
"""Check if library that failed to load cannot be found under any
templatetags directory or does exist but fails to import.
Non-existing condition is checked recursively for each subpackage in cases
like <appdir>/templatetags/subpackage/package/module.py.
"""
# Don't bother to check if '.' is in name since any name will be prefixed
# with some template root.
path, module = name.rsplit('.', 1)
try:
package = import_module(path)
return not module_has_submodule(package, module)
except ImportError:
return is_library_missing(path)
def import_library(taglib_module):
"""
Load a template tag library module.
Verifies that the library contains a 'register' attribute, and
returns that attribute as the representation of the library
"""
try:
mod = import_module(taglib_module)
except ImportError as e:
# If the ImportError is because the taglib submodule does not exist,
# that's not an error that should be raised. If the submodule exists
# and raised an ImportError on the attempt to load it, that we want
# to raise.
if is_library_missing(taglib_module):
return None
else:
raise InvalidTemplateLibrary("ImportError raised loading %s: %s" %
(taglib_module, e))
try:
return mod.register
except AttributeError:
raise InvalidTemplateLibrary("Template library %s does not have "
"a variable named 'register'" %
taglib_module)
templatetags_modules = []
def get_templatetags_modules():
"""
Return the list of all available template tag modules.
Caches the result for faster access.
"""
global templatetags_modules
if not templatetags_modules:
_templatetags_modules = []
# Populate list once per process. Mutate the local list first, and
# then assign it to the global name to ensure there are no cases where
# two threads try to populate it simultaneously.
templatetags_modules_candidates = ['django.templatetags']
templatetags_modules_candidates += ['%s.templatetags' % app_config.name
for app_config in apps.get_app_configs()]
for templatetag_module in templatetags_modules_candidates:
try:
import_module(templatetag_module)
_templatetags_modules.append(templatetag_module)
except ImportError:
continue
templatetags_modules = _templatetags_modules
return templatetags_modules
def get_library(library_name):
"""
Load the template library module with the given name.
If library is not already loaded loop over all templatetags modules
to locate it.
{% load somelib %} and {% load someotherlib %} loops twice.
Subsequent loads eg. {% load somelib %} in the same process will grab
the cached module from libraries.
"""
lib = libraries.get(library_name, None)
if not lib:
templatetags_modules = get_templatetags_modules()
tried_modules = []
for module in templatetags_modules:
taglib_module = '%s.%s' % (module, library_name)
tried_modules.append(taglib_module)
lib = import_library(taglib_module)
if lib:
libraries[library_name] = lib
break
if not lib:
raise InvalidTemplateLibrary("Template library %s not found, "
"tried %s" %
(library_name,
','.join(tried_modules)))
return lib
def add_to_builtins(module):
builtins.append(import_library(module))
add_to_builtins('django.template.defaulttags')
add_to_builtins('django.template.defaultfilters')
add_to_builtins('django.template.loader_tags')<|fim▁end|> | arg_vals.append(arg.resolve(context))
if getattr(func, 'expects_localtime', False):
obj = template_localtime(obj, context.use_tz) |
<|file_name|>index.js<|end_file_name|><|fim▁begin|>const fs = require("fs");
module.exports = function (config) {
var mod = require("./mod.js")(config);
config[mod.name] = mod;
function tryPatchComponent(componentName) {
if (config[componentName] && fs.existsSync(`${__dirname}/${componentName}.js`)) {
require(`./${componentName}`)(config, mod);
}
}
tryPatchComponent("engine");
tryPatchComponent("backend");
tryPatchComponent("driver");
tryPatchComponent("cli");
tryPatchComponent("cronjobs");<|fim▁hole|><|fim▁end|> | }; |
<|file_name|>count_visits_to_subdomains.hpp<|end_file_name|><|fim▁begin|>#ifndef COUNT_VISITS_TO_SUBDOMAINS_HPP
#define COUNT_VISITS_TO_SUBDOMAINS_HPP
/*
https://leetcode.com/problems/subdomain-visit-count/
A website domain like "discuss.leetcode.com" consists of various subdomains. At the top level,
we have "com", at the next level, we have "leetcode.com", and at the lowest level,
"discuss.leetcode.com". When we visit a domain like "discuss.leetcode.com", we will also visit
the parent domains "leetcode.com" and "com" implicitly.
Now, call a "count-paired domain" to be a count (representing the number of visits this domain
received), followed by a space, followed by the address. An example of a count-paired domain might
be "9001 discuss.leetcode.com".
We are given a list cpdomains of count-paired domains. We would like a list of count-paired domains,
(in the same format as the input, and in any order), that explicitly counts the number of visits
to each subdomain.
Example 1:
Input:
["9001 discuss.leetcode.com"]
Output:
["9001 discuss.leetcode.com", "9001 leetcode.com", "9001 com"]
Explanation:
We only have one website domain: "discuss.leetcode.com". As discussed above, the subdomain
"leetcode.com" and "com" will also be visited. So they will all be visited 9001 times.
Example 2:
Input:
["900 google.mail.com", "50 yahoo.com", "1 intel.mail.com", "5 wiki.org"]
Output:
["901 mail.com","50 yahoo.com","900 google.mail.com","5 wiki.org","5 org","1 intel.mail.com","951 com"]
Explanation:
We will visit "google.mail.com" 900 times, "yahoo.com" 50 times, "intel.mail.com" once and
"wiki.org" 5 times. For the subdomains, we will visit "mail.com" 900 + 1 = 901 times,
"com" 900 + 50 + 1 = 951 times, and "org" 5 times.
Notes:
The length of cpdomains will not exceed 100.
The length of each domain name will not exceed 100.
Each address will have either 1 or 2 "." characters.
The input count in any count-paired domain will not exceed 10000.
The answer output can be returned in any order.
*/
#include <string>
#include <string_view>
#include <vector>
#include <unordered_map>
#include <limits>
namespace Algo::DS::HashMap {
class CountVisitsToSubdomains {
static bool convert_to_num(const std::string_view& str, size_t& num) {
char** end = nullptr;
const auto result = std::strtoull(str.data(), end, 10);
if (result == std::numeric_limits<unsigned long long>::max()) {
return false;
}
num = result;
return true;
}
static std::tuple<bool, size_t, std::vector<std::string_view>> split_count_domains_string(
const std::string_view& str) {
const auto space_pos = str.find(' ');
if (space_pos == std::string::npos) { return {false, 0, {}}; }
size_t count = 0;
if (!convert_to_num(str.substr(0, space_pos), count) || count == 0) {
return {false, 0, {}};
}
std::vector<std::string_view> domains;
for (size_t i = space_pos + 1; i < str.size(); ++i) {
const auto pos = str.find('.', i);
const auto dot_pos = pos == std::string_view::npos ? str.size() : pos;
domains.push_back(str.substr(i));
i = dot_pos;
}
if (domains.empty()) { return {false, 0, {}}; }
return {true, count, domains};
}
public:
static std::vector<std::string> count(const std::vector<std::string>& count_domains) {
std::unordered_map<std::string_view, size_t> count_map;
for (const auto& str : count_domains) {
const auto [is_ok, count, domains] = split_count_domains_string(str);
if (!is_ok) { continue; }
<|fim▁hole|> }
}
std::vector<std::string> result;
for (const auto& [str, count] : count_map) {
result.push_back(std::to_string(count) + ' ' + std::string(str));
}
return result;
}
};
}
#endif //COUNT_VISITS_TO_SUBDOMAINS_HPP<|fim▁end|> | for (const auto& domain : domains) {
count_map[domain] += count; |
<|file_name|>parse_metamap.py<|end_file_name|><|fim▁begin|>#!/bin/python
# The purpose of this script is to take the *machine-readable* output of UMLS
# MetaMap and convert it to something that looks like a sentence of UMLS CUIs,
# if possible. Ideally there would be an option in MetaMap to do this, assuming
# it is sensible.
import re
import sys
#INTERACTIVE = True
INTERACTIVE = False
# "hacks" to fix metamap weirdness
POSTPROC = True
if POSTPROC:
print 'WARNING: Performing dataset-specific postprocessing.'
# --- some regexes --- #
utterance_re = re.compile('^utterance\(')
phrase_re = re.compile('^phrase\(')
mappings_re = re.compile('^mappings\(')
candidates_re = re.compile('^candidates\(')
EOU_re = re.compile('^\'EOU')
# this is a file of sentences, fed into metamap
raw_data_path = ''
# --- grab in paths --- #
# this is the metamap output. YMMV
# created by the command:
# metamap14 -q -Q 3 --word_sense_disambiguation raw_data_path metamap_output_path
# must provide an input path
assert len(sys.argv) >= 2
metamap_output_path = sys.argv[1]
# optionally provide output path
# (this is the processed data path, the output of this script)
try:
proc_data_path = sys.argv[2]
# do not write over the input, please
assert not proc_data_path == metamap_output_path
except IndexError:
# not provided
proc_data_path = metamap_output_path + '.reform'
# --- open files --- #
metamap_output = open(metamap_output_path, 'r')
proc_data = open(proc_data_path, 'w')
# --- the first line is 'args', pop that --- #
args_line = metamap_output.readline()
# not sure what second line is but pop it too
unknown_line = metamap_output.readline()
# --- the relevant and important functions --- #
def parse_phrase(line, neg_dict={}):
"""
Takes a phrase from machine-readable format, parses its mappings, returns
a string of mapped terms (into CUIs, when possible).
"""
wordmap = dict()
# list of words in the phrase
# (note: the phrase looks like phrase('PHRASEHERE', [sometext(... )
phrase = re.sub('[\'\.]','',re.split(',\[[a-zA-Z]+\(', re.sub('phrase\(','', line))[0])
# get the candidates (and most importantly, their numbers)
candidates = metamap_output.readline()
if candidates == '' or not candidates_re.match(candidates):
parsed_phrase = phrase + ' '
return parsed_phrase
TotalCandidateCount = int(re.sub('candidates\(','',candidates).split(',')[0])
# get the mappings
mappings = metamap_output.readline()
if mappings == '' or not mappings_re.match(mappings):
parsed_phrase = phrase + ' '
return parsed_phrase
if TotalCandidateCount == 0:
# there were no mappings for this phrase
parsed_phrase = phrase + ' '
else:
# accounted for by other words
delwords = []
parsed_phrase = ''
# split the mappings up into 'ev's
split_mappings = mappings.split('ev(')
outstring = ''
for mapping in split_mappings[1:]:
CUI = mapping.split(',')[1].strip('\'')
try:
words = re.split('[\[\]]',','.join(mapping.split(',')[4:]))[1].split(',')
except IndexError:
# ugh, mapping is messed up
print 'WARNING: input is messed up'
return parsed_phrase
umls_strings = mapping.split(',')[2:4]
# CPI is the final [] in this mapping, I think/believe
ConceptPositionalInfo = mapping.split('[')[-1].split(']')[0]
if ConceptPositionalInfo in neg_dict:
# this concept has been negated!
# make sure it's the same one...
assert CUI in neg_dict[ConceptPositionalInfo]
# need to make sure it's ONE of the CUIs which was negated at this location
CUI = 'NOT_' + CUI
if INTERACTIVE:
outstring += '\n\tAssociation between '+ CUI + ' and ' + ', '.join(map(lambda x: '"'+x+'"',words))
if len(words) > 1:
outstring += ' (subsuming ' + ' '.join(map(lambda x: '"'+x+'"', words[1:])) + ')'
outstring += '\n\tbased on UMLS strings ' + ', '.join(umls_strings) +'\n'
wordmap[words[0]] = CUI
# if multiple words mapped to this CUI, remember to delete the rest
# that is: when we consume the sentence later we will 'replace' the
# first word in this list with the CUI, then delete the rest
# brittleness: delwords may appear elsewhere in the sentence
delwords += words[1:]
# split on spaces, commas
for word in re.split(', | ', phrase):
try:
# lowercase word, cause it is represented in the prolog that way
parsed_phrase += wordmap[word.lower()] + ' '
except KeyError:
if word.lower() in delwords:
continue
else:
parsed_phrase += word + ' '
if INTERACTIVE:
if len(wordmap) > 0:<|fim▁hole|> print outstring
print 'Mapped:', phrase, '--->',
print parsed_phrase
print ''
eh = raw_input('')
return parsed_phrase
def postproc_utterance(parsed_utterance):
"""
HACKS!
Do some 'manual' post-processing to make up for MetaMap peculiarity.
WARNING: dataset specific.
"""
# _ S__ DEID --> _S__DEID
parsed_utterance = re.sub('_ S__ DEID', '_S__DEID', parsed_utterance)
# _ S__ C2825141 --> _S__FINDING (FINDING...)
parsed_utterance = re.sub('_ S__ C2825141', '_S__FINDING', parsed_utterance)
return parsed_utterance
def parse_utterance(neg_dict={}):
"""
Suck in an utterance from the machine-readable format, parse its mapping
and then return a string of mapped terms (into CUIs).
May not be the same length as the input sentence.
"""
phrases = ''
line = metamap_output.readline()
while not EOU_re.match(line):
if phrase_re.match(line):
parsed_phrase = parse_phrase(line, neg_dict)
phrases += parsed_phrase
elif line == '':
# EOF I guess...
return phrases
elif not EOU_re.match(line):
print'ERROR: utterance not followed by EOU line, followed by:'
print line
sys.exit('ERROR: missing EOU')
line = metamap_output.readline()
return phrases
def parse_negline(neg_line):
"""
Parse the THIRD line of the .mmo file, where the negations are stored.
Why does it not do this per-phrase? Mystery.
We connect the negated-CUI to its appearance in the text using the
ConceptPositionalInfo which _appears_ to correspond to the PosInfo field
which appears in the ev found in a mapping.
The output is neg_dict which maps these ConceptPositionalInfos into the
associated CUIs :we use this for sanity checking while parsing the mappings;
the position should be enough to identify it, but for extra-safety we assert
that the CUIs are matching.
"""
assert 'neg_list([' in neg_line
neg_dict = dict()
# strip things out
# (removing "neg_list(["... and ..."]).\n")
l_stripped = neg_line[10:][:-5]
# split into seprate 'negations'...
# split on ( and then remove the training ", negation(" at the end, first entry is useless
negations = map(lambda x: x.rstrip(')')[:-10] if 'negation' in x else x.rstrip(')'), l_stripped.split('('))[1:]
# for each negation, grab its location and CUI
for neg in negations:
# strip the string part of the CUI: we know it's between the SECOND pair of [], and before a :
NegatedConcept = neg.split('[')[2].split(':')[0].strip('\'')
# now get the concept... we know it's in the THIRD set of []... and there may be several separated by ,
ConceptPositionalInfo = neg.split('[')[3].rstrip(']')
try:
neg_dict[ConceptPositionalInfo].add(NegatedConcept)
except KeyError:
neg_dict[ConceptPositionalInfo] = set([NegatedConcept])
return neg_dict
# --- run through the file --- #
# --- get the neglist --- #
neg_line = metamap_output.readline()
neg_dict = parse_negline(neg_line)
# the first line
n = 0
while True:
line = metamap_output.readline()
if not line: break
if utterance_re.match(line):
# we are now in an utterance!
parsed_utterance = parse_utterance(neg_dict)
if POSTPROC:
# hacky post-processing
parsed_utterance = postproc_utterance(parsed_utterance)
print 'Parsed utterance:'
print '\t','"'.join(line.split('"')[1:2]).strip('[]')
print '=====>'
print '\t',parsed_utterance
proc_data.write(parsed_utterance+'\n')
n += 1
else:
# not interested in this line
continue
proc_data.close()
print '\nWrote', n, 'sentences to', proc_data_path<|fim▁end|> | # yolo
print '\nMapping phrase:',
print phrase, '...' |
<|file_name|>rest_api_server.py<|end_file_name|><|fim▁begin|>import time
import json
import random
from flask import Flask, request, current_app, abort
from functools import wraps
from cloudbrain.utils.metadata_info import (map_metric_name_to_num_channels,
get_supported_devices,
get_metrics_names)
from cloudbrain.settings import WEBSERVER_PORT
_API_VERSION = "v1.0"
app = Flask(__name__)<|fim▁hole|>
dao = CassandraDAO()
dao.connect()
def support_jsonp(f):
"""Wraps JSONified output for JSONP"""
@wraps(f)
def decorated_function(*args, **kwargs):
callback = request.args.get('callback', False)
if callback:
content = str(callback) + '(' + str(f()) + ')'
return current_app.response_class(content,
mimetype='application/json')
else:
return f(*args, **kwargs)
return decorated_function
@app.route('/data', methods=['GET'])
@support_jsonp
def data():
"""
GET metric data
:return:
"""
# return last 5 microseconds if start not specified.
default_start_timestamp = int(time.time() * 1000000 - 5)
device_id = request.args.get('device_id', None)
device_name = request.args.get('device_name', None)
metric = request.args.get('metric', None)
start = int(request.args.get('start', default_start_timestamp))
if not device_name:
return "missing param: device_name", 500
if not metric:
return "missing param: metric", 500
if not device_id:
return "missing param: device_id", 500
# data_records = _get_mock_data(device_name, metric)
data_records = dao.get_data(device_name, device_id, metric, start)
return json.dumps(data_records)
def _get_mock_data(device_name, metric):
metric_to_num_channels = map_metric_name_to_num_channels(device_name)
num_channels = metric_to_num_channels[metric]
now = int(time.time() * 1000000 - 5) # micro seconds
data_records = []
for i in xrange(5):
record = {'timestamp': now + i}
for j in xrange(num_channels):
channel_name = 'channel_%s' % j
record[channel_name] = random.random() * 10
data_records.append(record)
return data_records
@app.route('/metadata/devices', methods=['GET'])
@support_jsonp
def get_device_names():
""" Returns the device names from the metadata file """
return json.dumps(get_supported_devices())
@app.route('/registered_devices', methods=['GET'])
@support_jsonp
def get_registered_devices():
""" Get the registered devices IDs """
registered_devices = dao.get_registered_devices()
return json.dumps(registered_devices)
""" Tags """
def _generate_mock_tags(user_id, tag_name):
if tag_name is None:
tag_names = ["Facebook", "Netflix", "TechCrunch"]
else:
tag_names = [tag_name]
tags = []
for tag_name in tag_names:
tags.append(
{"tag_id": "c1f6e1f2-c964-48c0-8cdd-fafe8336190b",
"user_id": user_id,
"tag_name": tag_name,
"metadata": {},
"start": int(time.time() * 1000) - 10,
"end": int(time.time() * 1000)
})
return tags
def generate_mock_tag(user_id, tag_id):
tag = {"tag_id": tag_id,
"user_id": user_id,
"tag_name": "label_1",
"metadata": {},
"start": int(time.time() * 1000) - 10,
"end": int(time.time() * 1000)
}
return tag
@app.route('/api/%s/users/<string:user_id>/tags' % _API_VERSION,
methods=['GET'])
@support_jsonp
def get_tags(user_id):
"""Retrieve all tags for a specific user """
tag_name = request.args.get('tag_name', None)
#tags = _generate_mock_tags(user_id, tag_name)
tags = dao.get_tags(user_id, tag_name)
return json.dumps(tags), 200
@app.route('/api/%s/users/<string:user_id>/tags/<string:tag_id>' % _API_VERSION,
methods=['GET'])
@support_jsonp
def get_tag(user_id, tag_id):
"""Retrieve a specific tag for a specific user """
#tag = dao.get_mock_tag(user_id, tag_id)
tag = dao.get_tag(user_id, tag_id)
return json.dumps(tag), 200
@app.route('/api/%s/users/<string:user_id>/tags' % _API_VERSION,
methods=['POST'])
@support_jsonp
def create_tag(user_id):
if (not request.json
or not 'tag_name' in request.json
or not 'start' in request.json):
abort(400)
tag_name = request.json.get("tag_name")
metadata = request.json.get("metadata")
start = request.json.get("start")
end = request.json.get("end")
#tag_id = "c1f6e1f2-c964-48c0-8cdd-fafe8336190b"
tag_id = dao.create_tag(user_id, tag_name, metadata, start, end)
return json.dumps({"tag_id": tag_id}), 500
""" Tag aggregates"""
def _generate_mock_tag_aggregates(user_id, tag_id, device_type, metrics):
aggregates = []
for metric in metrics:
aggregates.append(
{
"aggregate_id": "c1f6e1f2-c964-48c0-8cdd-fafe83361977",
"user_id": user_id,
"tag_id": tag_id,
"aggregate_type": "avg",
"device_type": device_type,
"aggregate_value": random.random() * 10,
"metric": metric,
"start": int(time.time() * 1000) - 10,
"end": int(time.time() * 1000)
})
return aggregates
@app.route(('/api/%s/users/<string:user_id>/tags/<string:tag_id>/aggregates'
% _API_VERSION), methods=['GET'])
@support_jsonp
def get_tag_aggregate(user_id, tag_id):
"""Retrieve all aggregates for a specific tag and user"""
device_type = request.args.get('device_type', None)
metrics = request.args.getlist('metrics', None)
if device_type is None and len(metrics) == 0:
device_types = get_supported_devices()
for device_type in device_types:
metrics.extend(get_metrics_names(device_type))
elif len(metrics) == 0 and device_type is not None:
metrics = get_metrics_names(device_type)
elif len(metrics) > 0 and device_type is None:
return "parameter 'device_type' is required to filter on `metrics`", 500
#aggregates = _generate_mock_tag_aggregates(user_id, tag_id, device_type, metrics)
aggregates = dao.get_aggregates(user_id, tag_id, device_type, metrics)
return json.dumps(aggregates), 200
if __name__ == "__main__":
app.run(host="0.0.0.0", port=WEBSERVER_PORT)<|fim▁end|> | app.config['PROPAGATE_EXCEPTIONS'] = True
from cloudbrain.datastore.CassandraDAO import CassandraDAO |
<|file_name|>Server.cpp<|end_file_name|><|fim▁begin|>// **********************************************************************
//
// Copyright (c) 2003-2014 ZeroC, Inc. All rights reserved.
//
// This copy of Ice is licensed to you under the terms described in the
// ICE_LICENSE file included in this distribution.
//
// **********************************************************************
#include <Ice/Ice.h>
#include <ThroughputI.h>
using namespace std;
class ThroughputServer : public Ice::Application
{
public:<|fim▁hole|>
int
main(int argc, char* argv[])
{
ThroughputServer app;
return app.main(argc, argv, "config.server");
}
int
ThroughputServer::run(int argc, char*[])
{
if(argc > 1)
{
cerr << appName() << ": too many arguments" << endl;
return EXIT_FAILURE;
}
Ice::ObjectAdapterPtr adapter = communicator()->createObjectAdapter("Throughput");
Demo::ThroughputPtr servant = new ThroughputI;
adapter->add(servant, communicator()->stringToIdentity("throughput"));
adapter->activate();
communicator()->waitForShutdown();
return EXIT_SUCCESS;
}<|fim▁end|> |
virtual int run(int, char*[]);
}; |
<|file_name|>passive_cable.py<|end_file_name|><|fim▁begin|>"""
Implements compartmental model of a passive cable. See Neuronal Dynamics
`Chapter 3 Section 2 <http://neuronaldynamics.epfl.ch/online/Ch3.S2.html>`_
"""
# This file is part of the exercise code repository accompanying
# the book: Neuronal Dynamics (see http://neuronaldynamics.epfl.ch)
# located at http://github.com/EPFL-LCN/neuronaldynamics-exercises.
# This free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License 2.0 as published by the
# Free Software Foundation. You should have received a copy of the
# GNU General Public License along with the repository. If not,
# see http://www.gnu.org/licenses/.
# Should you reuse and publish the code for your own purposes,
# please cite the book or point to the webpage http://neuronaldynamics.epfl.ch.
# Wulfram Gerstner, Werner M. Kistler, Richard Naud, and Liam Paninski.
# Neuronal Dynamics: From Single Neurons to Networks and Models of Cognition.
# Cambridge University Press, 2014.
import brian2 as b2
from neurodynex3.tools import input_factory
import matplotlib.pyplot as plt
import numpy as np
<|fim▁hole|>CABLE_LENGTH = 500. * b2.um # length of dendrite
CABLE_DIAMETER = 2. * b2.um # diameter of dendrite
R_LONGITUDINAL = 0.5 * b2.kohm * b2.mm # Intracellular medium resistance
R_TRANSVERSAL = 1.25 * b2.Mohm * b2.mm ** 2 # cell membrane resistance (->leak current)
E_LEAK = -70. * b2.mV # reversal potential of the leak current (-> resting potential)
CAPACITANCE = 0.8 * b2.uF / b2.cm ** 2 # membrane capacitance
DEFAULT_INPUT_CURRENT = input_factory.get_step_current(2000, 3000, unit_time=b2.us, amplitude=0.2 * b2.namp)
DEFAULT_INPUT_LOCATION = [CABLE_LENGTH / 3] # provide an array of locations
# print("Membrane Timescale = {}".format(R_TRANSVERSAL*CAPACITANCE))
def simulate_passive_cable(current_injection_location=DEFAULT_INPUT_LOCATION, input_current=DEFAULT_INPUT_CURRENT,
length=CABLE_LENGTH, diameter=CABLE_DIAMETER,
r_longitudinal=R_LONGITUDINAL,
r_transversal=R_TRANSVERSAL, e_leak=E_LEAK, initial_voltage=E_LEAK,
capacitance=CAPACITANCE, nr_compartments=200, simulation_time=5 * b2.ms):
"""Builds a multicompartment cable and numerically approximates the cable equation.
Args:
t_spikes (int): list of spike times
current_injection_location (list): List [] of input locations (Quantity, Length): [123.*b2.um]
input_current (TimedArray): TimedArray of current amplitudes. One column per current_injection_location.
length (Quantity): Length of the cable: 0.8*b2.mm
diameter (Quantity): Diameter of the cable: 0.2*b2.um
r_longitudinal (Quantity): The longitudinal (axial) resistance of the cable: 0.5*b2.kohm*b2.mm
r_transversal (Quantity): The transversal resistance (=membrane resistance): 1.25*b2.Mohm*b2.mm**2
e_leak (Quantity): The reversal potential of the leak current (=resting potential): -70.*b2.mV
initial_voltage (Quantity): Value of the potential at t=0: -70.*b2.mV
capacitance (Quantity): Membrane capacitance: 0.8*b2.uF/b2.cm**2
nr_compartments (int): Number of compartments. Spatial discretization: 200
simulation_time (Quantity): Time for which the dynamics are simulated: 5*b2.ms
Returns:
(StateMonitor, SpatialNeuron): The state monitor contains the membrane voltage in a
Time x Location matrix. The SpatialNeuron object specifies the simulated neuron model
and gives access to the morphology. You may want to use those objects for
spatial indexing: myVoltageStateMonitor[mySpatialNeuron.morphology[0.123*b2.um]].v
"""
assert isinstance(input_current, b2.TimedArray), "input_current is not of type TimedArray"
assert input_current.values.shape[1] == len(current_injection_location),\
"number of injection_locations does not match nr of input currents"
cable_morphology = b2.Cylinder(diameter=diameter, length=length, n=nr_compartments)
# Im is transmembrane current
# Iext is injected current at a specific position on dendrite
EL = e_leak
RT = r_transversal
eqs = """
Iext = current(t, location_index): amp (point current)
location_index : integer (constant)
Im = (EL-v)/RT : amp/meter**2
"""
cable_model = b2.SpatialNeuron(morphology=cable_morphology, model=eqs, Cm=capacitance, Ri=r_longitudinal)
monitor_v = b2.StateMonitor(cable_model, "v", record=True)
# inject all input currents at the specified location:
nr_input_locations = len(current_injection_location)
input_current_0 = np.insert(input_current.values, 0, 0., axis=1) * b2.amp # insert default current: 0. [amp]
current = b2.TimedArray(input_current_0, dt=input_current.dt * b2.second)
for current_index in range(nr_input_locations):
insert_location = current_injection_location[current_index]
compartment_index = int(np.floor(insert_location / (length / nr_compartments)))
# next line: current_index+1 because 0 is the default current 0Amp
cable_model.location_index[compartment_index] = current_index + 1
# set initial values and run for 1 ms
cable_model.v = initial_voltage
b2.run(simulation_time)
return monitor_v, cable_model
def getting_started():
"""A simple code example to get started.
"""
current = input_factory.get_step_current(500, 510, unit_time=b2.us, amplitude=3. * b2.namp)
voltage_monitor, cable_model = simulate_passive_cable(
length=0.5 * b2.mm, current_injection_location=[0.1 * b2.mm], input_current=current,
nr_compartments=100, simulation_time=2 * b2.ms)
# provide a minimal plot
plt.figure()
plt.imshow(voltage_monitor.v / b2.volt)
plt.colorbar(label="voltage")
plt.xlabel("time index")
plt.ylabel("location index")
plt.title("vm at (t,x), raw data voltage_monitor.v")
plt.show()
if __name__ == "__main__":
getting_started()<|fim▁end|> | # integration time step in milliseconds
b2.defaultclock.dt = 0.01 * b2.ms
# DEFAULT morphological and electrical parameters |
<|file_name|>sawlive.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
'''<|fim▁hole|> it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re,urllib,urlparse,base64
from resources.lib.libraries import client
from resources.lib.libraries import jsunpack
def resolve(url):
try:
page = re.compile('//.+?/(?:embed|v)/([0-9a-zA-Z-_]+)').findall(url)[0]
page = 'http://sawlive.tv/embed/%s' % page
try: referer = urlparse.parse_qs(urlparse.urlparse(url).query)['referer'][0]
except: referer = page
result = client.request(page, referer=referer)
unpacked = ''
packed = result.split('\n')
for i in packed:
try: unpacked += jsunpack.unpack(i)
except: pass
result += unpacked
result = urllib.unquote_plus(result)
result = re.compile('<iframe(.+?)</iframe>').findall(result)[-1]
url = re.compile('src\s*=\s*[\'|\"](.+?)[\'|\"].+?[\'|\"](.+?)[\'|\"]').findall(result)[0]
url = '/'.join(url)
result = client.request(url, referer=referer)
strm = re.compile("'streamer'.+?'(.+?)'").findall(result)[0]
file = re.compile("'file'.+?'(.+?)'").findall(result)[0]
swf = re.compile("SWFObject\('(.+?)'").findall(result)[0]
url = '%s playpath=%s swfUrl=%s pageUrl=%s live=1 timeout=30' % (strm, file, swf, url)
return url
except:
return<|fim▁end|> | FanFilm Add-on
Copyright (C) 2015 lambda
This program is free software: you can redistribute it and/or modify |
<|file_name|>model.py<|end_file_name|><|fim▁begin|># This file is part of Tryton. The COPYRIGHT file at the top level of
# this repository contains the full copyright notices and license terms.
import re
import heapq
from sql import Null
from sql.aggregate import Max
from sql.conditionals import Case
from collections import defaultdict
try:
import simplejson as json
except ImportError:
import json
from ..model import ModelView, ModelSQL, fields, Unique
from ..report import Report
from ..wizard import Wizard, StateView, StateAction, Button
from ..transaction import Transaction
from ..cache import Cache
from ..pool import Pool
from ..pyson import Bool, Eval
from ..rpc import RPC
from .. import backend
from ..protocols.jsonrpc import JSONDecoder, JSONEncoder
from ..tools import is_instance_method
try:
from ..tools.StringMatcher import StringMatcher
except ImportError:
from difflib import SequenceMatcher as StringMatcher
__all__ = [
'Model', 'ModelField', 'ModelAccess', 'ModelFieldAccess', 'ModelButton',
'ModelData', 'PrintModelGraphStart', 'PrintModelGraph', 'ModelGraph',
]
IDENTIFIER = re.compile(r'^[a-zA-z_][a-zA-Z0-9_]*$')
class Model(ModelSQL, ModelView):
"Model"
__name__ = 'ir.model'
_order_name = 'model'
name = fields.Char('Model Description', translate=True, loading='lazy',
states={
'readonly': Bool(Eval('module')),
},
depends=['module'])
model = fields.Char('Model Name', required=True,
states={
'readonly': Bool(Eval('module')),
},
depends=['module'])
info = fields.Text('Information',
states={
'readonly': Bool(Eval('module')),
},
depends=['module'])
module = fields.Char('Module',
help="Module in which this model is defined", readonly=True)
global_search_p = fields.Boolean('Global Search')
fields = fields.One2Many('ir.model.field', 'model', 'Fields',
required=True)
@classmethod
def __setup__(cls):
super(Model, cls).__setup__()
table = cls.__table__()
cls._sql_constraints += [
('model_uniq', Unique(table, table.model),
'The model must be unique!'),
]
cls._error_messages.update({
'invalid_module': ('Module name "%s" is not a valid python '
'identifier.'),
})
cls._order.insert(0, ('model', 'ASC'))
cls.__rpc__.update({
'list_models': RPC(),
'list_history': RPC(),
'global_search': RPC(),
})
@classmethod
def register(cls, model, module_name):
pool = Pool()
Property = pool.get('ir.property')
cursor = Transaction().cursor
ir_model = cls.__table__()
cursor.execute(*ir_model.select(ir_model.id,
where=ir_model.model == model.__name__))
model_id = None
if cursor.rowcount == -1 or cursor.rowcount is None:
data = cursor.fetchone()
if data:
model_id, = data
elif cursor.rowcount != 0:
model_id, = cursor.fetchone()
if not model_id:
cursor.execute(*ir_model.insert(
[ir_model.model, ir_model.name, ir_model.info,
ir_model.module],
[[model.__name__, model._get_name(), model.__doc__,
module_name]]))
Property._models_get_cache.clear()
cursor.execute(*ir_model.select(ir_model.id,
where=ir_model.model == model.__name__))
(model_id,) = cursor.fetchone()
elif model.__doc__:
cursor.execute(*ir_model.update(
[ir_model.name, ir_model.info],
[model._get_name(), model.__doc__],
where=ir_model.id == model_id))
return model_id
@classmethod
def validate(cls, models):
super(Model, cls).validate(models)
cls.check_module(models)
@classmethod
def check_module(cls, models):
'''
Check module
'''
for model in models:
if model.module and not IDENTIFIER.match(model.module):
cls.raise_user_error('invalid_module', (model.rec_name,))
@classmethod
def list_models(cls):
'Return a list of all models names'
models = cls.search([], order=[
('module', 'ASC'), # Optimization assumption
('model', 'ASC'),
('id', 'ASC'),
])
return [m.model for m in models]
@classmethod
def list_history(cls):
'Return a list of all models with history'
return [name for name, model in Pool().iterobject()
if getattr(model, '_history', False)]
@classmethod
def create(cls, vlist):
pool = Pool()
Property = pool.get('ir.property')
res = super(Model, cls).create(vlist)
# Restart the cache of models_get
Property._models_get_cache.clear()
return res
@classmethod
def write(cls, models, values, *args):
pool = Pool()
Property = pool.get('ir.property')
super(Model, cls).write(models, values, *args)
# Restart the cache of models_get
Property._models_get_cache.clear()
@classmethod
def delete(cls, models):
pool = Pool()
Property = pool.get('ir.property')
super(Model, cls).delete(models)
# Restart the cache of models_get
Property._models_get_cache.clear()
@classmethod
def global_search(cls, text, limit, menu='ir.ui.menu'):
"""
Search on models for text including menu
Returns a list of tuple (ratio, model, model_name, id, name, icon)
The size of the list is limited to limit
"""
pool = Pool()
ModelAccess = pool.get('ir.model.access')
if not limit > 0:
raise ValueError('limit must be > 0: %r' % (limit,))
models = cls.search(['OR',
('global_search_p', '=', True),
('model', '=', menu),
])
access = ModelAccess.get_access([m.model for m in models])
s = StringMatcher()
if isinstance(text, str):
text = text.decode('utf-8')
s.set_seq2(text)
def generate():
for model in models:
if not access[model.model]['read']:
continue
Model = pool.get(model.model)
if not hasattr(Model, 'search_global'):
continue
for record, name, icon in Model.search_global(text):
if isinstance(name, str):
name = name.decode('utf-8')
s.set_seq1(name)
yield (s.ratio(), model.model, model.rec_name,
record.id, name, icon)
return heapq.nlargest(int(limit), generate())
class ModelField(ModelSQL, ModelView):
"Model field"
__name__ = 'ir.model.field'
name = fields.Char('Name', required=True,
states={
'readonly': Bool(Eval('module')),
},
depends=['module'])
relation = fields.Char('Model Relation',
states={
'readonly': Bool(Eval('module')),
},
depends=['module'])
model = fields.Many2One('ir.model', 'Model', required=True,
select=True, ondelete='CASCADE',
states={
'readonly': Bool(Eval('module')),
},
depends=['module'])
field_description = fields.Char('Field Description', translate=True,
loading='lazy',
states={
'readonly': Bool(Eval('module')),
},
depends=['module'])
ttype = fields.Char('Field Type',
states={
'readonly': Bool(Eval('module')),
},
depends=['module'])
groups = fields.Many2Many('ir.model.field-res.group', 'field',
'group', 'Groups')
help = fields.Text('Help', translate=True, loading='lazy',
states={
'readonly': Bool(Eval('module')),
},
depends=['module'])
module = fields.Char('Module',
help="Module in which this field is defined")
@classmethod
def __setup__(cls):
super(ModelField, cls).__setup__()
table = cls.__table__()
cls._sql_constraints += [
('name_model_uniq', Unique(table, table.name, table.model),
'The field name in model must be unique!'),
]
cls._error_messages.update({
'invalid_name': ('Model Field name "%s" is not a valid python '
'identifier.'),
})
cls._order.insert(0, ('name', 'ASC'))
@classmethod
def register(cls, model, module_name, model_id):
pool = Pool()
Model = pool.get('ir.model')
cursor = Transaction().cursor
ir_model_field = cls.__table__()
ir_model = Model.__table__()
cursor.execute(*ir_model_field.join(ir_model,
condition=ir_model_field.model == ir_model.id
).select(ir_model_field.id.as_('id'),
ir_model_field.name.as_('name'),
ir_model_field.field_description.as_('field_description'),
ir_model_field.ttype.as_('ttype'),
ir_model_field.relation.as_('relation'),
ir_model_field.module.as_('module'),
ir_model_field.help.as_('help'),
where=ir_model.model == model.__name__))
model_fields = dict((f['name'], f) for f in cursor.dictfetchall())
for field_name, field in model._fields.iteritems():
if hasattr(field, 'model_name'):
relation = field.model_name
elif hasattr(field, 'relation_name'):
relation = field.relation_name
else:
relation = None
if field_name not in model_fields:
cursor.execute(*ir_model_field.insert(
[ir_model_field.model, ir_model_field.name,
ir_model_field.field_description,
ir_model_field.ttype, ir_model_field.relation,
ir_model_field.help, ir_model_field.module],
[[model_id, field_name, field.string, field._type,
relation, field.help, module_name]]))
elif (model_fields[field_name]['field_description'] != field.string
or model_fields[field_name]['ttype'] != field._type
or model_fields[field_name]['relation'] != relation
or model_fields[field_name]['help'] != field.help):
cursor.execute(*ir_model_field.update(
[ir_model_field.field_description,
ir_model_field.ttype, ir_model_field.relation,
ir_model_field.help],
[field.string, field._type, relation, field.help],
where=ir_model_field.id ==
model_fields[field_name]['id']))
# Clean ir_model_field from field that are no more existing.
for field_name in model_fields:
if model_fields[field_name]['module'] == module_name \
and field_name not in model._fields:
# XXX This delete field even when it is defined later
# in the module
cursor.execute(*ir_model_field.delete(
where=ir_model_field.id ==
model_fields[field_name]['id']))
@staticmethod
def default_name():
return 'No Name'
@staticmethod
def default_field_description():
return 'No description available'
@classmethod
def validate(cls, fields):
super(ModelField, cls).validate(fields)
cls.check_name(fields)
@classmethod
def check_name(cls, fields):
'''
Check name
'''
for field in fields:
if not IDENTIFIER.match(field.name):
cls.raise_user_error('invalid_name', (field.name,))
@classmethod
def read(cls, ids, fields_names=None):
pool = Pool()
Translation = pool.get('ir.translation')
Model = pool.get('ir.model')
to_delete = []
if Transaction().context.get('language'):
if fields_names is None:
fields_names = cls._fields.keys()
if 'field_description' in fields_names \
or 'help' in fields_names:
if 'model' not in fields_names:
fields_names.append('model')
to_delete.append('model')
if 'name' not in fields_names:
fields_names.append('name')
to_delete.append('name')
res = super(ModelField, cls).read(ids, fields_names=fields_names)
if (Transaction().context.get('language')
and ('field_description' in fields_names
or 'help' in fields_names)):
model_ids = set()
for rec in res:
if isinstance(rec['model'], (list, tuple)):
model_ids.add(rec['model'][0])
else:
model_ids.add(rec['model'])
model_ids = list(model_ids)
cursor = Transaction().cursor
model = Model.__table__()
cursor.execute(*model.select(model.id, model.model,
where=model.id.in_(model_ids)))
id2model = dict(cursor.fetchall())
trans_args = []
for rec in res:
if isinstance(rec['model'], (list, tuple)):
model_id = rec['model'][0]
else:
model_id = rec['model']
if 'field_description' in fields_names:
trans_args.append((id2model[model_id] + ',' + rec['name'],
'field', Transaction().language, None))
if 'help' in fields_names:
trans_args.append((id2model[model_id] + ',' + rec['name'],
'help', Transaction().language, None))
Translation.get_sources(trans_args)
for rec in res:
if isinstance(rec['model'], (list, tuple)):
model_id = rec['model'][0]
else:
model_id = rec['model']
if 'field_description' in fields_names:
res_trans = Translation.get_source(
id2model[model_id] + ',' + rec['name'],
'field', Transaction().language)
if res_trans:
rec['field_description'] = res_trans
if 'help' in fields_names:
res_trans = Translation.get_source(
id2model[model_id] + ',' + rec['name'],
'help', Transaction().language)
if res_trans:
rec['help'] = res_trans
if to_delete:
for rec in res:
for field in to_delete:
del rec[field]
return res
class ModelAccess(ModelSQL, ModelView):
"Model access"
__name__ = 'ir.model.access'
_rec_name = 'model'
model = fields.Many2One('ir.model', 'Model', required=True,
ondelete="CASCADE")
group = fields.Many2One('res.group', 'Group',
ondelete="CASCADE")
perm_read = fields.Boolean('Read Access')
perm_write = fields.Boolean('Write Access')
perm_create = fields.Boolean('Create Access')
perm_delete = fields.Boolean('Delete Access')
description = fields.Text('Description')
_get_access_cache = Cache('ir_model_access.get_access', context=False)
@classmethod
def __setup__(cls):
super(ModelAccess, cls).__setup__()
cls._error_messages.update({
'read': 'You can not read this document! (%s)',
'write': 'You can not write in this document! (%s)',
'create': 'You can not create this kind of document! (%s)',
'delete': 'You can not delete this document! (%s)',
})
cls.__rpc__.update({
'get_access': RPC(),
})
@classmethod
def __register__(cls, module_name):
TableHandler = backend.get('TableHandler')
cursor = Transaction().cursor
super(ModelAccess, cls).__register__(module_name)
table = TableHandler(cursor, cls, module_name)
# Migration from 2.6 (model, group) no more unique
table.drop_constraint('model_group_uniq')
@staticmethod
def check_xml_record(accesses, values):
return True
@staticmethod
def default_perm_read():
return False
@staticmethod
def default_perm_write():
return False
<|fim▁hole|> return False
@staticmethod
def default_perm_delete():
return False
@classmethod
def get_access(cls, models):
'Return access for models'
# root user above constraint
if Transaction().user == 0:
return defaultdict(lambda: defaultdict(lambda: True))
pool = Pool()
Model = pool.get('ir.model')
UserGroup = pool.get('res.user-res.group')
cursor = Transaction().cursor
user = Transaction().user
model_access = cls.__table__()
ir_model = Model.__table__()
user_group = UserGroup.__table__()
access = {}
for model in models:
maccess = cls._get_access_cache.get((user, model), default=-1)
if maccess == -1:
break
access[model] = maccess
else:
return access
default = {'read': True, 'write': True, 'create': True, 'delete': True}
access = dict((m, default) for m in models)
cursor.execute(*model_access.join(ir_model, 'LEFT',
condition=model_access.model == ir_model.id
).join(user_group, 'LEFT',
condition=user_group.group == model_access.group
).select(
ir_model.model,
Max(Case((model_access.perm_read == True, 1), else_=0)),
Max(Case((model_access.perm_write == True, 1), else_=0)),
Max(Case((model_access.perm_create == True, 1), else_=0)),
Max(Case((model_access.perm_delete == True, 1), else_=0)),
where=ir_model.model.in_(models)
& ((user_group.user == user) | (model_access.group == Null)),
group_by=ir_model.model))
access.update(dict(
(m, {'read': r, 'write': w, 'create': c, 'delete': d})
for m, r, w, c, d in cursor.fetchall()))
for model, maccess in access.iteritems():
cls._get_access_cache.set((user, model), maccess)
return access
@classmethod
def check(cls, model_name, mode='read', raise_exception=True):
'Check access for model_name and mode'
assert mode in ['read', 'write', 'create', 'delete'], \
'Invalid access mode for security'
if ((Transaction().user == 0)
or (raise_exception
and not Transaction().context.get('_check_access'))):
return True
access = cls.get_access([model_name])[model_name][mode]
if not access and access is not None:
if raise_exception:
cls.raise_user_error(mode, model_name)
else:
return False
return True
@classmethod
def check_relation(cls, model_name, field_name, mode='read'):
'Check access to relation field for model_name and mode'
pool = Pool()
Model = pool.get(model_name)
field = getattr(Model, field_name)
if field._type in ('one2many', 'many2one'):
return cls.check(field.model_name, mode=mode,
raise_exception=False)
elif field._type in ('many2many', 'one2one'):
if (field.target
and not cls.check(field.target, mode=mode,
raise_exception=False)):
return False
elif (field.relation_name
and not cls.check(field.relation_name, mode=mode,
raise_exception=False)):
return False
else:
return True
elif field._type == 'reference':
selection = field.selection
if isinstance(selection, basestring):
sel_func = getattr(Model, field.selection)
if not is_instance_method(Model, field.selection):
selection = sel_func()
else:
# XXX Can not check access right on instance method
selection = []
for model_name, _ in selection:
if not cls.check(model_name, mode=mode,
raise_exception=False):
return False
return True
else:
return True
@classmethod
def write(cls, accesses, values, *args):
super(ModelAccess, cls).write(accesses, values, *args)
# Restart the cache
cls._get_access_cache.clear()
ModelView._fields_view_get_cache.clear()
@classmethod
def create(cls, vlist):
res = super(ModelAccess, cls).create(vlist)
# Restart the cache
cls._get_access_cache.clear()
ModelView._fields_view_get_cache.clear()
return res
@classmethod
def delete(cls, accesses):
super(ModelAccess, cls).delete(accesses)
# Restart the cache
cls._get_access_cache.clear()
ModelView._fields_view_get_cache.clear()
class ModelFieldAccess(ModelSQL, ModelView):
"Model Field Access"
__name__ = 'ir.model.field.access'
_rec_name = 'field'
field = fields.Many2One('ir.model.field', 'Field', required=True,
ondelete='CASCADE')
group = fields.Many2One('res.group', 'Group', ondelete='CASCADE')
perm_read = fields.Boolean('Read Access')
perm_write = fields.Boolean('Write Access')
perm_create = fields.Boolean('Create Access')
perm_delete = fields.Boolean('Delete Access')
description = fields.Text('Description')
_get_access_cache = Cache('ir_model_field_access.check', context=False)
@classmethod
def __setup__(cls):
super(ModelFieldAccess, cls).__setup__()
cls._error_messages.update({
'read': 'You can not read the field! (%s.%s)',
'write': 'You can not write on the field! (%s.%s)',
})
@classmethod
def __register__(cls, module_name):
TableHandler = backend.get('TableHandler')
cursor = Transaction().cursor
super(ModelFieldAccess, cls).__register__(module_name)
table = TableHandler(cursor, cls, module_name)
# Migration from 2.6 (field, group) no more unique
table.drop_constraint('field_group_uniq')
@staticmethod
def check_xml_record(field_accesses, values):
return True
@staticmethod
def default_perm_read():
return False
@staticmethod
def default_perm_write():
return False
@staticmethod
def default_perm_create():
return True
@staticmethod
def default_perm_delete():
return True
@classmethod
def get_access(cls, models):
'Return fields access for models'
# root user above constraint
if Transaction().user == 0:
return defaultdict(lambda: defaultdict(
lambda: defaultdict(lambda: True)))
pool = Pool()
Model = pool.get('ir.model')
ModelField = pool.get('ir.model.field')
UserGroup = pool.get('res.user-res.group')
cursor = Transaction().cursor
user = Transaction().user
field_access = cls.__table__()
ir_model = Model.__table__()
model_field = ModelField.__table__()
user_group = UserGroup.__table__()
accesses = {}
for model in models:
maccesses = cls._get_access_cache.get((user, model))
if maccesses is None:
break
accesses[model] = maccesses
else:
return accesses
default = {}
accesses = dict((m, default) for m in models)
cursor.execute(*field_access.join(model_field,
condition=field_access.field == model_field.id
).join(ir_model,
condition=model_field.model == ir_model.id
).join(user_group, 'LEFT',
condition=user_group.group == field_access.group
).select(
ir_model.model,
model_field.name,
Max(Case((field_access.perm_read == True , 1), else_=0)),
Max(Case((field_access.perm_write == True, 1), else_=0)),
Max(Case((field_access.perm_create == True, 1), else_=0)),
Max(Case((field_access.perm_delete == True, 1), else_=0)),
where=ir_model.model.in_(models)
& ((user_group.user == user) | (field_access.group == Null)),
group_by=[ir_model.model, model_field.name]))
for m, f, r, w, c, d in cursor.fetchall():
accesses[m][f] = {'read': r, 'write': w, 'create': c, 'delete': d}
for model, maccesses in accesses.iteritems():
cls._get_access_cache.set((user, model), maccesses)
return accesses
@classmethod
def check(cls, model_name, fields, mode='read', raise_exception=True,
access=False):
'''
Check access for fields on model_name.
'''
assert mode in ('read', 'write', 'create', 'delete'), \
'Invalid access mode'
if ((Transaction().user == 0)
or (raise_exception
and not Transaction().context.get('_check_access'))):
if access:
return dict((x, True) for x in fields)
return True
accesses = dict((f, a[mode])
for f, a in cls.get_access([model_name])[model_name].iteritems())
if access:
return accesses
for field in fields:
if not accesses.get(field, True):
if raise_exception:
cls.raise_user_error(mode, (model_name, field))
else:
return False
return True
@classmethod
def write(cls, field_accesses, values, *args):
super(ModelFieldAccess, cls).write(field_accesses, values, *args)
# Restart the cache
cls._get_access_cache.clear()
ModelView._fields_view_get_cache.clear()
@classmethod
def create(cls, vlist):
res = super(ModelFieldAccess, cls).create(vlist)
# Restart the cache
cls._get_access_cache.clear()
ModelView._fields_view_get_cache.clear()
return res
@classmethod
def delete(cls, field_accesses):
super(ModelFieldAccess, cls).delete(field_accesses)
# Restart the cache
cls._get_access_cache.clear()
ModelView._fields_view_get_cache.clear()
class ModelButton(ModelSQL, ModelView):
"Model Button"
__name__ = 'ir.model.button'
name = fields.Char('Name', required=True, readonly=True)
model = fields.Many2One('ir.model', 'Model', required=True, readonly=True,
ondelete='CASCADE', select=True)
groups = fields.Many2Many('ir.model.button-res.group', 'button', 'group',
'Groups')
_groups_cache = Cache('ir.model.button.groups')
@classmethod
def __setup__(cls):
super(ModelButton, cls).__setup__()
table = cls.__table__()
cls._sql_constraints += [
('name_model_uniq', Unique(table, table.name, table.model),
'The button name in model must be unique!'),
]
cls._order.insert(0, ('model', 'ASC'))
@classmethod
def create(cls, vlist):
result = super(ModelButton, cls).create(vlist)
# Restart the cache for get_groups
cls._groups_cache.clear()
return result
@classmethod
def write(cls, buttons, values, *args):
super(ModelButton, cls).write(buttons, values, *args)
# Restart the cache for get_groups
cls._groups_cache.clear()
@classmethod
def delete(cls, buttons):
super(ModelButton, cls).delete(buttons)
# Restart the cache for get_groups
cls._groups_cache.clear()
@classmethod
def get_groups(cls, model, name):
'''
Return a set of group ids for the named button on the model.
'''
key = (model, name)
groups = cls._groups_cache.get(key)
if groups is not None:
return groups
buttons = cls.search([
('model.model', '=', model),
('name', '=', name),
])
if not buttons:
groups = set()
else:
button, = buttons
groups = set(g.id for g in button.groups)
cls._groups_cache.set(key, groups)
return groups
class ModelData(ModelSQL, ModelView):
"Model data"
__name__ = 'ir.model.data'
fs_id = fields.Char('Identifier on File System', required=True,
help="The id of the record as known on the file system.",
select=True)
model = fields.Char('Model', required=True, select=True)
module = fields.Char('Module', required=True, select=True)
db_id = fields.Integer('Resource ID',
help="The id of the record in the database.", select=True,
required=True)
values = fields.Text('Values')
fs_values = fields.Text('Values on File System')
noupdate = fields.Boolean('No Update')
out_of_sync = fields.Function(fields.Boolean('Out of Sync'),
'get_out_of_sync', searcher='search_out_of_sync')
_get_id_cache = Cache('ir_model_data.get_id', context=False)
@classmethod
def __setup__(cls):
super(ModelData, cls).__setup__()
table = cls.__table__()
cls._sql_constraints = [
('fs_id_module_model_uniq',
Unique(table, table.fs_id, table.module, table.model),
'The triple (fs_id, module, model) must be unique!'),
]
cls._buttons.update({
'sync': {
'invisible': ~Eval('out_of_sync'),
},
})
@classmethod
def __register__(cls, module_name):
TableHandler = backend.get('TableHandler')
cursor = Transaction().cursor
model_data = cls.__table__()
super(ModelData, cls).__register__(module_name)
table = TableHandler(cursor, cls, module_name)
# Migration from 2.6: remove inherit
if table.column_exist('inherit'):
cursor.execute(*model_data.delete(
where=model_data.inherit == True))
table.drop_column('inherit', True)
@staticmethod
def default_noupdate():
return False
def get_out_of_sync(self, name):
return self.values != self.fs_values and self.fs_values is not None
@classmethod
def search_out_of_sync(cls, name, clause):
table = cls.__table__()
name, operator, value = clause
Operator = fields.SQL_OPERATORS[operator]
query = table.select(table.id,
where=Operator(
(table.fs_values != table.values) & (table.fs_values != Null),
value))
return [('id', 'in', query)]
@classmethod
def write(cls, data, values, *args):
super(ModelData, cls).write(data, values, *args)
# Restart the cache for get_id
cls._get_id_cache.clear()
@classmethod
def get_id(cls, module, fs_id):
"""
Return for an fs_id the corresponding db_id.
"""
key = (module, fs_id)
id_ = cls._get_id_cache.get(key)
if id_ is not None:
return id_
data = cls.search([
('module', '=', module),
('fs_id', '=', fs_id),
], limit=1)
if not data:
raise Exception("Reference to %s not found"
% ".".join([module, fs_id]))
id_ = cls.read([d.id for d in data], ['db_id'])[0]['db_id']
cls._get_id_cache.set(key, id_)
return id_
@classmethod
def dump_values(cls, values):
return json.dumps(sorted(values.iteritems()), cls=JSONEncoder)
@classmethod
def load_values(cls, values):
try:
return dict(json.loads(values, object_hook=JSONDecoder()))
except ValueError:
# Migration from 3.2
from decimal import Decimal
import datetime
return eval(values, {
'Decimal': Decimal,
'datetime': datetime,
})
@classmethod
@ModelView.button
def sync(cls, records):
pool = Pool()
to_write = []
for data in records:
Model = pool.get(data.model)
values = cls.load_values(data.values)
fs_values = cls.load_values(data.fs_values)
# values could be the same once loaded
# if they come from version < 3.2
if values != fs_values:
record = Model(data.db_id)
Model.write([record], fs_values)
values = fs_values
to_write.extend([[data], {
'values': cls.dump_values(values),
}])
if to_write:
cls.write(*to_write)
class PrintModelGraphStart(ModelView):
'Print Model Graph'
__name__ = 'ir.model.print_model_graph.start'
level = fields.Integer('Level', required=True)
filter = fields.Text('Filter', help="Entering a Python "
"Regular Expression will exclude matching models from the graph.")
@staticmethod
def default_level():
return 1
class PrintModelGraph(Wizard):
__name__ = 'ir.model.print_model_graph'
start = StateView('ir.model.print_model_graph.start',
'ir.print_model_graph_start_view_form', [
Button('Cancel', 'end', 'tryton-cancel'),
Button('Print', 'print_', 'tryton-ok', default=True),
])
print_ = StateAction('ir.report_model_graph')
def transition_print_(self):
return 'end'
def do_print_(self, action):
return action, {
'id': Transaction().context.get('active_id'),
'ids': Transaction().context.get('active_ids'),
'level': self.start.level,
'filter': self.start.filter,
}
class ModelGraph(Report):
__name__ = 'ir.model.graph'
@classmethod
def execute(cls, ids, data):
import pydot
pool = Pool()
Model = pool.get('ir.model')
ActionReport = pool.get('ir.action.report')
if not data['filter']:
filter = None
else:
filter = re.compile(data['filter'], re.VERBOSE)
action_report_ids = ActionReport.search([
('report_name', '=', cls.__name__)
])
if not action_report_ids:
raise Exception('Error', 'Report (%s) not find!' % cls.__name__)
action_report = ActionReport(action_report_ids[0])
models = Model.browse(ids)
graph = pydot.Dot(fontsize="8")
graph.set('center', '1')
graph.set('ratio', 'auto')
cls.fill_graph(models, graph, level=data['level'], filter=filter)
data = graph.create(prog='dot', format='png')
return ('png', fields.Binary.cast(data), False, action_report.name)
@classmethod
def fill_graph(cls, models, graph, level=1, filter=None):
'''
Fills a pydot graph with a models structure.
'''
import pydot
pool = Pool()
Model = pool.get('ir.model')
sub_models = set()
if level > 0:
for model in models:
for field in model.fields:
if field.name in ('create_uid', 'write_uid'):
continue
if field.relation and not graph.get_node(field.relation):
sub_models.add(field.relation)
if sub_models:
model_ids = Model.search([
('model', 'in', list(sub_models)),
])
sub_models = Model.browse(model_ids)
if set(sub_models) != set(models):
cls.fill_graph(sub_models, graph, level=level - 1,
filter=filter)
for model in models:
if filter and re.search(filter, model.model):
continue
label = '"{' + model.model + '\\n'
if model.fields:
label += '|'
for field in model.fields:
if field.name in ('create_uid', 'write_uid',
'create_date', 'write_date', 'id'):
continue
label += '+ ' + field.name + ': ' + field.ttype
if field.relation:
label += ' ' + field.relation
label += '\l'
label += '}"'
node_name = '"%s"' % model.model
node = pydot.Node(node_name, shape='record', label=label)
graph.add_node(node)
for field in model.fields:
if field.name in ('create_uid', 'write_uid'):
continue
if field.relation:
node_name = '"%s"' % field.relation
if not graph.get_node(node_name):
continue
args = {}
tail = model.model
head = field.relation
edge_model_name = '"%s"' % model.model
edge_relation_name = '"%s"' % field.relation
if field.ttype == 'many2one':
edge = graph.get_edge(edge_model_name,
edge_relation_name)
if edge:
continue
args['arrowhead'] = "normal"
elif field.ttype == 'one2many':
edge = graph.get_edge(edge_relation_name,
edge_model_name)
if edge:
continue
args['arrowhead'] = "normal"
tail = field.relation
head = model.model
elif field.ttype == 'many2many':
if graph.get_edge(edge_model_name, edge_relation_name):
continue
if graph.get_edge(edge_relation_name, edge_model_name):
continue
args['arrowtail'] = "inv"
args['arrowhead'] = "inv"
edge = pydot.Edge(str(tail), str(head), **args)
graph.add_edge(edge)<|fim▁end|> | @staticmethod
def default_perm_create(): |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.