prompt
large_stringlengths 70
991k
| completion
large_stringlengths 0
1.02k
|
---|---|
<|file_name|>fetchMessages.test.ts<|end_file_name|><|fim▁begin|>import fetchMock from 'fetch-mock-jest';
import { fetchMessages } from '../src/fetchMessages';
import { setupStorage } from './helpers';
describe('fetchMessages()', () => {
beforeEach(() => {
setupStorage();
fetchMock.mock('*', { groups: [], subgroups: [] });
});
afterEach(() => {
fetchMock.reset();
});
it('triggers a fetch', async () => {<|fim▁hole|> expect(fetchMock).toHaveBeenCalledWith(
'https://cdn.jsdelivr.net/npm/emojibase-data@latest/de/messages.json',
{
credentials: 'omit',
mode: 'cors',
redirect: 'error',
},
);
});
});<|fim▁end|> | await fetchMessages('de');
|
<|file_name|>website_class.py<|end_file_name|><|fim▁begin|>"""
The singleton class that allows metadata and other attachables to be
attached to the entire website.
<|fim▁hole|>
from django.conf import settings
from django.contrib.sites.models import Site
from metadata.models import PackageEntry, ImageMetadata, TextMetadata
from metadata.mixins import MetadataSubjectMixin
class Website(MetadataSubjectMixin):
"""
Class representing the website itself.
This does not hold any data on its own, so in order to acquire a
website object for running metadata queries, just run Website().
"""
def __init__(self, request):
"""
Initialises a Website object.
:param request: The HttpRequest object of the current page.
:type request: HttpRequest
:rtype: Website
"""
self.request = request
self.pk = 1 # Needed for the metadata system
def metadata_strands(self):
return {
"text": WebsiteTextMetadata.objects,
"image": WebsiteImageMetadata.objects,
}
def packages(self):
return WebsitePackageEntry.objects
## Template-exposed API ##
def root(self):
"""
Returns the URI of the root of the website, for concatenating
things like STATIC_URL onto it.
Please please PLEASE try using decoupling-friendly features
such as 'get_absolute_uri' and whatnot before this.
"""
return self.request.build_absolute_uri('/').rstrip('/')
def site(self):
"""
Returns the current Django Sites Framework site.
"""
try:
site = Site.objects.get_current()
except Site.DoesNotExist:
site = None
return site
WebsiteTextMetadata = TextMetadata.make_model(
Website,
'website',
table=getattr(
settings,
'WEBSITE_TEXT_METADATA_DB_TABLE',
None
),
id_column=getattr(
settings,
'WEBSITE_TEXT_METADATA_DB_ID_COLUMN',
None
),
fkey=None,
)
WebsiteImageMetadata = ImageMetadata.make_model(
Website,
'website',
table=getattr(
settings,
'WEBSITE_IMAGE_METADATA_DB_TABLE',
None
),
id_column=getattr(
settings,
'WEBSITE_IMAGE_METADATA_DB_ID_COLUMN',
None
),
fkey=None,
)
WebsitePackageEntry = PackageEntry.make_model(
Website,
'website',
table=getattr(
settings,
'WEBSITE_PACKAGE_ENTRY_DB_TABLE',
None
),
id_column=getattr(
settings,
'WEBSITE_PACKAGE_ENTRY_DB_ID_COLUMN',
None
),
fkey=None,
)<|fim▁end|> | As the website at this level is one item of data rather than an entire
model, we have to use a singleton class to attach metadata to it.
""" |
<|file_name|>htmlanchorelement.rs<|end_file_name|><|fim▁begin|>/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this<|fim▁hole|> * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::activation::Activatable;
use dom::attr::AttrValue;
use dom::bindings::codegen::Bindings::AttrBinding::AttrMethods;
use dom::bindings::codegen::Bindings::HTMLAnchorElementBinding;
use dom::bindings::codegen::Bindings::HTMLAnchorElementBinding::HTMLAnchorElementMethods;
use dom::bindings::codegen::Bindings::MouseEventBinding::MouseEventMethods;
use dom::bindings::codegen::Bindings::NodeBinding::NodeMethods;
use dom::bindings::inheritance::Castable;
use dom::bindings::js::{JS, MutNullableHeap, Root};
use dom::document::Document;
use dom::domtokenlist::DOMTokenList;
use dom::element::Element;
use dom::event::Event;
use dom::eventtarget::EventTarget;
use dom::htmlelement::HTMLElement;
use dom::htmlimageelement::HTMLImageElement;
use dom::mouseevent::MouseEvent;
use dom::node::{Node, document_from_node, window_from_node};
use dom::virtualmethods::VirtualMethods;
use num::ToPrimitive;
use std::default::Default;
use string_cache::Atom;
use util::str::DOMString;
#[dom_struct]
pub struct HTMLAnchorElement {
htmlelement: HTMLElement,
rel_list: MutNullableHeap<JS<DOMTokenList>>,
}
impl HTMLAnchorElement {
fn new_inherited(localName: Atom,
prefix: Option<DOMString>,
document: &Document) -> HTMLAnchorElement {
HTMLAnchorElement {
htmlelement:
HTMLElement::new_inherited(localName, prefix, document),
rel_list: Default::default(),
}
}
#[allow(unrooted_must_root)]
pub fn new(localName: Atom,
prefix: Option<DOMString>,
document: &Document) -> Root<HTMLAnchorElement> {
let element = HTMLAnchorElement::new_inherited(localName, prefix, document);
Node::reflect_node(box element, document, HTMLAnchorElementBinding::Wrap)
}
}
impl VirtualMethods for HTMLAnchorElement {
fn super_type(&self) -> Option<&VirtualMethods> {
Some(self.upcast::<HTMLElement>() as &VirtualMethods)
}
fn parse_plain_attribute(&self, name: &Atom, value: DOMString) -> AttrValue {
match name {
&atom!("rel") => AttrValue::from_serialized_tokenlist(value),
_ => self.super_type().unwrap().parse_plain_attribute(name, value),
}
}
}
impl HTMLAnchorElementMethods for HTMLAnchorElement {
// https://html.spec.whatwg.org/multipage/#dom-a-text
fn Text(&self) -> DOMString {
self.upcast::<Node>().GetTextContent().unwrap()
}
// https://html.spec.whatwg.org/multipage/#dom-a-text
fn SetText(&self, value: DOMString) {
self.upcast::<Node>().SetTextContent(Some(value))
}
// https://html.spec.whatwg.org/multipage/#dom-a-rellist
fn RelList(&self) -> Root<DOMTokenList> {
self.rel_list.or_init(|| {
DOMTokenList::new(self.upcast(), &atom!("rel"))
})
}
// https://html.spec.whatwg.org/multipage/#dom-a-coords
make_getter!(Coords, "coords");
// https://html.spec.whatwg.org/multipage/#dom-a-coords
make_setter!(SetCoords, "coords");
// https://html.spec.whatwg.org/multipage/#dom-a-name
make_getter!(Name, "name");
// https://html.spec.whatwg.org/multipage/#dom-a-name
make_setter!(SetName, "name");
// https://html.spec.whatwg.org/multipage/#dom-a-rev
make_getter!(Rev, "rev");
// https://html.spec.whatwg.org/multipage/#dom-a-rev
make_setter!(SetRev, "rev");
// https://html.spec.whatwg.org/multipage/#dom-a-shape
make_getter!(Shape, "shape");
// https://html.spec.whatwg.org/multipage/#dom-a-shape
make_setter!(SetShape, "shape");
}
impl Activatable for HTMLAnchorElement {
fn as_element(&self) -> &Element {
self.upcast::<Element>()
}
fn is_instance_activatable(&self) -> bool {
// https://html.spec.whatwg.org/multipage/#hyperlink
// "a [...] element[s] with an href attribute [...] must [..] create a
// hyperlink"
// https://html.spec.whatwg.org/multipage/#the-a-element
// "The activation behaviour of a elements *that create hyperlinks*"
self.upcast::<Element>().has_attribute(&atom!("href"))
}
//TODO:https://html.spec.whatwg.org/multipage/#the-a-element
fn pre_click_activation(&self) {
}
//TODO:https://html.spec.whatwg.org/multipage/#the-a-element
// https://html.spec.whatwg.org/multipage/#run-canceled-activation-steps
fn canceled_activation(&self) {
}
//https://html.spec.whatwg.org/multipage/#the-a-element:activation-behaviour
fn activation_behavior(&self, event: &Event, target: &EventTarget) {
//Step 1. If the node document is not fully active, abort.
let doc = document_from_node(self);
if !doc.is_fully_active() {
return;
}
//TODO: Step 2. Check if browsing context is specified and act accordingly.
//Step 3. Handle <img ismap/>.
let element = self.upcast::<Element>();
let mouse_event = event.downcast::<MouseEvent>().unwrap();
let mut ismap_suffix = None;
if let Some(element) = target.downcast::<Element>() {
if target.is::<HTMLImageElement>() && element.has_attribute(&atom!("ismap")) {
let target_node = element.upcast::<Node>();
let rect = window_from_node(target_node).content_box_query(
target_node.to_trusted_node_address());
ismap_suffix = Some(
format!("?{},{}", mouse_event.ClientX().to_f32().unwrap() - rect.origin.x.to_f32_px(),
mouse_event.ClientY().to_f32().unwrap() - rect.origin.y.to_f32_px())
)
}
}
// Step 4.
//TODO: Download the link is `download` attribute is set.
follow_hyperlink(element, ismap_suffix);
}
//TODO:https://html.spec.whatwg.org/multipage/#the-a-element
fn implicit_submission(&self, _ctrlKey: bool, _shiftKey: bool, _altKey: bool, _metaKey: bool) {
}
}
/// https://html.spec.whatwg.org/multipage/#following-hyperlinks-2
fn follow_hyperlink(subject: &Element, hyperlink_suffix: Option<String>) {
// Step 1: replace.
// Step 2: source browsing context.
// Step 3: target browsing context.
// Step 4.
let attribute = subject.get_attribute(&ns!(), &atom!("href")).unwrap();
let mut href = attribute.Value();
// Step 6.
// https://www.w3.org/Bugs/Public/show_bug.cgi?id=28925
if let Some(suffix) = hyperlink_suffix {
href.push_str(&suffix);
}
// Step 4-5.
let document = document_from_node(subject);
let url = match document.url().join(&href) {
Ok(url) => url,
Err(_) => return,
};
// Step 7.
debug!("following hyperlink to {}", url.serialize());
let window = document.window();
window.load_url(url);
}<|fim▁end|> | |
<|file_name|>manage.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
from os.path import dirname
import os
import sys
<|fim▁hole|> sample_dir = dirname(os.path.abspath(__file__))
root = dirname(dirname(sample_dir))
sys.path.append(root)
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "sampleproj.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)<|fim▁end|> | if __name__ == "__main__": |
<|file_name|>int_macros.rs<|end_file_name|><|fim▁begin|>// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![macro_escape]
#![doc(hidden)]
macro_rules! int_module (($T:ty, $bits:expr) => (
// FIXME(#11621): Should be deprecated once CTFE is implemented in favour of
// calling the `mem::size_of` function.
pub static BITS : uint = $bits;
// FIXME(#11621): Should be deprecated once CTFE is implemented in favour of
// calling the `mem::size_of` function.
pub static BYTES : uint = ($bits / 8);
// FIXME(#11621): Should be deprecated once CTFE is implemented in favour of
// calling the `Bounded::min_value` function.
pub static MIN: $T = (-1 as $T) << (BITS - 1);
// FIXME(#9837): Compute MIN like this so the high bits that shouldn't exist are 0.
// FIXME(#11621): Should be deprecated once CTFE is implemented in favour of
// calling the `Bounded::max_value` function.
pub static MAX: $T = !MIN;
impl CheckedDiv for $T {
#[inline]
fn checked_div(&self, v: &$T) -> Option<$T> {
if *v == 0 || (*self == MIN && *v == -1) {
None
} else {
Some(self / *v)
}
}
}
impl Num for $T {}
#[cfg(not(test))]
impl Ord for $T {
#[inline]
fn lt(&self, other: &$T) -> bool { return (*self) < (*other); }
}
#[cfg(not(test))]
impl Eq for $T {
#[inline]
fn eq(&self, other: &$T) -> bool { return (*self) == (*other); }
}
impl Default for $T {
#[inline]
fn default() -> $T { 0 }
}
impl Zero for $T {
#[inline]
fn zero() -> $T { 0 }
#[inline]
fn is_zero(&self) -> bool { *self == 0 }
}
impl One for $T {
#[inline]
fn one() -> $T { 1 }
}
#[cfg(not(test))]
impl Add<$T,$T> for $T {
#[inline]
fn add(&self, other: &$T) -> $T { *self + *other }
}
#[cfg(not(test))]
impl Sub<$T,$T> for $T {
#[inline]
fn sub(&self, other: &$T) -> $T { *self - *other }
}
#[cfg(not(test))]
impl Mul<$T,$T> for $T {
#[inline]
fn mul(&self, other: &$T) -> $T { *self * *other }
}
#[cfg(not(test))]
impl Div<$T,$T> for $T {
/// Integer division, truncated towards 0.
///
/// # Examples
///
/// ~~~
/// assert!( 8 / 3 == 2);
/// assert!( 8 / -3 == -2);
/// assert!(-8 / 3 == -2);
/// assert!(-8 / -3 == 2);
///
/// assert!( 1 / 2 == 0);
/// assert!( 1 / -2 == 0);
/// assert!(-1 / 2 == 0);
/// assert!(-1 / -2 == 0);
/// ~~~
#[inline]
fn div(&self, other: &$T) -> $T { *self / *other }
}
#[cfg(not(test))]
impl Rem<$T,$T> for $T {
/// Returns the integer remainder after division, satisfying:
///
/// ~~~
/// # let n = 1;
/// # let d = 2;
/// assert!((n / d) * d + (n % d) == n)
/// ~~~
///
/// # Examples
///
/// ~~~
/// assert!( 8 % 3 == 2);
/// assert!( 8 % -3 == 2);
/// assert!(-8 % 3 == -2);
/// assert!(-8 % -3 == -2);
///
/// assert!( 1 % 2 == 1);
/// assert!( 1 % -2 == 1);
/// assert!(-1 % 2 == -1);
/// assert!(-1 % -2 == -1);
/// ~~~
#[inline]
fn rem(&self, other: &$T) -> $T { *self % *other }
}
#[cfg(not(test))]
impl Neg<$T> for $T {
#[inline]
fn neg(&self) -> $T { -*self }
}
impl Signed for $T {
/// Computes the absolute value
#[inline]
fn abs(&self) -> $T {
if self.is_negative() { -*self } else { *self }
}
///
/// The positive difference of two numbers. Returns `0` if the number is less than or
/// equal to `other`, otherwise the difference between`self` and `other` is returned.
///
#[inline]
fn abs_sub(&self, other: &$T) -> $T {
if *self <= *other { 0 } else { *self - *other }
}
///
/// # Returns
///
/// - `0` if the number is zero
/// - `1` if the number is positive
/// - `-1` if the number is negative
///
#[inline]
fn signum(&self) -> $T {
match *self {
n if n > 0 => 1,
0 => 0,
_ => -1,
}
}
/// Returns true if the number is positive
#[inline]
fn is_positive(&self) -> bool { *self > 0 }
/// Returns true if the number is negative
#[inline]
fn is_negative(&self) -> bool { *self < 0 }
}
#[cfg(not(test))]
impl BitOr<$T,$T> for $T {
#[inline]
fn bitor(&self, other: &$T) -> $T { *self | *other }
}
#[cfg(not(test))]
impl BitAnd<$T,$T> for $T {
#[inline]
fn bitand(&self, other: &$T) -> $T { *self & *other }
}
#[cfg(not(test))]
impl BitXor<$T,$T> for $T {
#[inline]
fn bitxor(&self, other: &$T) -> $T { *self ^ *other }
}
#[cfg(not(test))]
impl Shl<$T,$T> for $T {
#[inline]
fn shl(&self, other: &$T) -> $T { *self << *other }
}
#[cfg(not(test))]
impl Shr<$T,$T> for $T {
#[inline]
fn shr(&self, other: &$T) -> $T { *self >> *other }
}
#[cfg(not(test))]
impl Not<$T> for $T {
#[inline]
fn not(&self) -> $T { !*self }
}
impl Bounded for $T {
#[inline]
fn min_value() -> $T { MIN }
#[inline]
fn max_value() -> $T { MAX }
}
impl Int for $T {}
impl Primitive for $T {}
// String conversion functions and impl str -> num
/// Parse a byte slice as a number in the given base.
#[inline]
pub fn parse_bytes(buf: &[u8], radix: uint) -> Option<$T> {
strconv::from_str_bytes_common(buf, radix, true, false, false,
strconv::ExpNone, false, false)
}
impl FromStr for $T {
#[inline]
fn from_str(s: &str) -> Option<$T> {
strconv::from_str_common(s, 10u, true, false, false,
strconv::ExpNone, false, false)
}
}
impl FromStrRadix for $T {
#[inline]
fn from_str_radix(s: &str, radix: uint) -> Option<$T> {
strconv::from_str_common(s, radix, true, false, false,
strconv::ExpNone, false, false)
}
}
// String conversion functions and impl num -> str
/// Convert to a string as a byte slice in a given base.
#[inline]
pub fn to_str_bytes<U>(n: $T, radix: uint, f: |v: &[u8]| -> U) -> U {
// The radix can be as low as 2, so we need at least 64 characters for a
// base 2 number, and then we need another for a possible '-' character.
let mut buf = [0u8, ..65];
let mut cur = 0;
strconv::int_to_str_bytes_common(n, radix, strconv::SignNeg, |i| {
buf[cur] = i;
cur += 1;
});
f(buf.slice(0, cur))
}
impl ToStrRadix for $T {
/// Convert to a string in a given base.
#[inline]
fn to_str_radix(&self, radix: uint) -> ~str {
let mut buf: ~[u8] = ~[];
strconv::int_to_str_bytes_common(*self, radix, strconv::SignNeg, |i| {
buf.push(i);
});
// We know we generated valid utf-8, so we don't need to go through that
// check.
unsafe { str::raw::from_utf8_owned(buf) }
}
}
#[cfg(test)]
mod tests {
use prelude::*;
use super::*;
use int;<|fim▁hole|> use num;
use num::Bitwise;
use num::CheckedDiv;
use num::ToStrRadix;
#[test]
fn test_overflows() {
assert!(MAX > 0);
assert!(MIN <= 0);
assert_eq!(MIN + MAX + 1, 0);
}
#[test]
fn test_num() {
num::test_num(10 as $T, 2 as $T);
}
#[test]
pub fn test_abs() {
assert_eq!((1 as $T).abs(), 1 as $T);
assert_eq!((0 as $T).abs(), 0 as $T);
assert_eq!((-1 as $T).abs(), 1 as $T);
}
#[test]
fn test_abs_sub() {
assert_eq!((-1 as $T).abs_sub(&(1 as $T)), 0 as $T);
assert_eq!((1 as $T).abs_sub(&(1 as $T)), 0 as $T);
assert_eq!((1 as $T).abs_sub(&(0 as $T)), 1 as $T);
assert_eq!((1 as $T).abs_sub(&(-1 as $T)), 2 as $T);
}
#[test]
fn test_signum() {
assert_eq!((1 as $T).signum(), 1 as $T);
assert_eq!((0 as $T).signum(), 0 as $T);
assert_eq!((-0 as $T).signum(), 0 as $T);
assert_eq!((-1 as $T).signum(), -1 as $T);
}
#[test]
fn test_is_positive() {
assert!((1 as $T).is_positive());
assert!(!(0 as $T).is_positive());
assert!(!(-0 as $T).is_positive());
assert!(!(-1 as $T).is_positive());
}
#[test]
fn test_is_negative() {
assert!(!(1 as $T).is_negative());
assert!(!(0 as $T).is_negative());
assert!(!(-0 as $T).is_negative());
assert!((-1 as $T).is_negative());
}
#[test]
fn test_bitwise() {
assert_eq!(0b1110 as $T, (0b1100 as $T).bitor(&(0b1010 as $T)));
assert_eq!(0b1000 as $T, (0b1100 as $T).bitand(&(0b1010 as $T)));
assert_eq!(0b0110 as $T, (0b1100 as $T).bitxor(&(0b1010 as $T)));
assert_eq!(0b1110 as $T, (0b0111 as $T).shl(&(1 as $T)));
assert_eq!(0b0111 as $T, (0b1110 as $T).shr(&(1 as $T)));
assert_eq!(-(0b11 as $T) - (1 as $T), (0b11 as $T).not());
}
#[test]
fn test_count_ones() {
assert_eq!((0b0101100 as $T).count_ones(), 3);
assert_eq!((0b0100001 as $T).count_ones(), 2);
assert_eq!((0b1111001 as $T).count_ones(), 5);
}
#[test]
fn test_count_zeros() {
assert_eq!((0b0101100 as $T).count_zeros(), BITS as $T - 3);
assert_eq!((0b0100001 as $T).count_zeros(), BITS as $T - 2);
assert_eq!((0b1111001 as $T).count_zeros(), BITS as $T - 5);
}
#[test]
fn test_from_str() {
assert_eq!(from_str::<$T>("0"), Some(0 as $T));
assert_eq!(from_str::<$T>("3"), Some(3 as $T));
assert_eq!(from_str::<$T>("10"), Some(10 as $T));
assert_eq!(from_str::<i32>("123456789"), Some(123456789 as i32));
assert_eq!(from_str::<$T>("00100"), Some(100 as $T));
assert_eq!(from_str::<$T>("-1"), Some(-1 as $T));
assert_eq!(from_str::<$T>("-3"), Some(-3 as $T));
assert_eq!(from_str::<$T>("-10"), Some(-10 as $T));
assert_eq!(from_str::<i32>("-123456789"), Some(-123456789 as i32));
assert_eq!(from_str::<$T>("-00100"), Some(-100 as $T));
assert!(from_str::<$T>(" ").is_none());
assert!(from_str::<$T>("x").is_none());
}
#[test]
fn test_parse_bytes() {
use str::StrSlice;
assert_eq!(parse_bytes("123".as_bytes(), 10u), Some(123 as $T));
assert_eq!(parse_bytes("1001".as_bytes(), 2u), Some(9 as $T));
assert_eq!(parse_bytes("123".as_bytes(), 8u), Some(83 as $T));
assert_eq!(i32::parse_bytes("123".as_bytes(), 16u), Some(291 as i32));
assert_eq!(i32::parse_bytes("ffff".as_bytes(), 16u), Some(65535 as i32));
assert_eq!(i32::parse_bytes("FFFF".as_bytes(), 16u), Some(65535 as i32));
assert_eq!(parse_bytes("z".as_bytes(), 36u), Some(35 as $T));
assert_eq!(parse_bytes("Z".as_bytes(), 36u), Some(35 as $T));
assert_eq!(parse_bytes("-123".as_bytes(), 10u), Some(-123 as $T));
assert_eq!(parse_bytes("-1001".as_bytes(), 2u), Some(-9 as $T));
assert_eq!(parse_bytes("-123".as_bytes(), 8u), Some(-83 as $T));
assert_eq!(i32::parse_bytes("-123".as_bytes(), 16u), Some(-291 as i32));
assert_eq!(i32::parse_bytes("-ffff".as_bytes(), 16u), Some(-65535 as i32));
assert_eq!(i32::parse_bytes("-FFFF".as_bytes(), 16u), Some(-65535 as i32));
assert_eq!(parse_bytes("-z".as_bytes(), 36u), Some(-35 as $T));
assert_eq!(parse_bytes("-Z".as_bytes(), 36u), Some(-35 as $T));
assert!(parse_bytes("Z".as_bytes(), 35u).is_none());
assert!(parse_bytes("-9".as_bytes(), 2u).is_none());
}
#[test]
fn test_to_str() {
assert_eq!((0 as $T).to_str_radix(10u), ~"0");
assert_eq!((1 as $T).to_str_radix(10u), ~"1");
assert_eq!((-1 as $T).to_str_radix(10u), ~"-1");
assert_eq!((127 as $T).to_str_radix(16u), ~"7f");
assert_eq!((100 as $T).to_str_radix(10u), ~"100");
}
#[test]
fn test_int_to_str_overflow() {
let mut i8_val: i8 = 127_i8;
assert_eq!(i8_val.to_str(), ~"127");
i8_val += 1 as i8;
assert_eq!(i8_val.to_str(), ~"-128");
let mut i16_val: i16 = 32_767_i16;
assert_eq!(i16_val.to_str(), ~"32767");
i16_val += 1 as i16;
assert_eq!(i16_val.to_str(), ~"-32768");
let mut i32_val: i32 = 2_147_483_647_i32;
assert_eq!(i32_val.to_str(), ~"2147483647");
i32_val += 1 as i32;
assert_eq!(i32_val.to_str(), ~"-2147483648");
let mut i64_val: i64 = 9_223_372_036_854_775_807_i64;
assert_eq!(i64_val.to_str(), ~"9223372036854775807");
i64_val += 1 as i64;
assert_eq!(i64_val.to_str(), ~"-9223372036854775808");
}
#[test]
fn test_int_from_str_overflow() {
let mut i8_val: i8 = 127_i8;
assert_eq!(from_str::<i8>("127"), Some(i8_val));
assert!(from_str::<i8>("128").is_none());
i8_val += 1 as i8;
assert_eq!(from_str::<i8>("-128"), Some(i8_val));
assert!(from_str::<i8>("-129").is_none());
let mut i16_val: i16 = 32_767_i16;
assert_eq!(from_str::<i16>("32767"), Some(i16_val));
assert!(from_str::<i16>("32768").is_none());
i16_val += 1 as i16;
assert_eq!(from_str::<i16>("-32768"), Some(i16_val));
assert!(from_str::<i16>("-32769").is_none());
let mut i32_val: i32 = 2_147_483_647_i32;
assert_eq!(from_str::<i32>("2147483647"), Some(i32_val));
assert!(from_str::<i32>("2147483648").is_none());
i32_val += 1 as i32;
assert_eq!(from_str::<i32>("-2147483648"), Some(i32_val));
assert!(from_str::<i32>("-2147483649").is_none());
let mut i64_val: i64 = 9_223_372_036_854_775_807_i64;
assert_eq!(from_str::<i64>("9223372036854775807"), Some(i64_val));
assert!(from_str::<i64>("9223372036854775808").is_none());
i64_val += 1 as i64;
assert_eq!(from_str::<i64>("-9223372036854775808"), Some(i64_val));
assert!(from_str::<i64>("-9223372036854775809").is_none());
}
#[test]
fn test_signed_checked_div() {
assert_eq!(10i.checked_div(&2), Some(5));
assert_eq!(5i.checked_div(&0), None);
assert_eq!(int::MIN.checked_div(&-1), None);
}
}
))<|fim▁end|> | use i32; |
<|file_name|>read_file.rs<|end_file_name|><|fim▁begin|>/*
* How to read a file.
* Future work: as a variant, we may use the C bindings to call mmap/munmap
*/
use std::io;
use std::result;
/* read the file path by calling the read_whole_file_str function */
fn read_file_whole(path: ~str) -> ~str {
let res = io::read_whole_file_str(&Path(path));
if result::is_err(&res) {
fail!(~"file_reader error: " + result::get_err(&res));
}
res.get()
}
/* read the file path line by line */
fn read_file_lines(path: ~str) -> ~str {<|fim▁hole|> let res = io::file_reader(&Path(path));
if result::is_err(&res) {
fail!(~"file_reader error: " + result::get_err(&res));
}
let mut content = ~"";
let reader = res.get();
loop {
let line = reader.read_line();
if reader.eof() {
break;
}
// read_line does not return the '\n', so we add it
content = content + line + "\n";
}
content
}
fn main() {
let filename = ~"read_file.rs";
//let content = read_file_whole(copy filename);
let content = read_file_lines(copy filename);
io::println("the content of " + filename + " is [\n" + content + "]");
}<|fim▁end|> | |
<|file_name|>svp_cal.js<|end_file_name|><|fim▁begin|>var scp;
var cal_color;
$(document).ready(function(){
scp = angular.element('.main').scope();
$("#div_point").toggle();
//Set default values
cal_color = defaults.cal_color;
//Setup plugins
$("#cal_color").spectrum({
preferredFormat: "hex",
showInput: true,
color: cal_color,
change: setColor,
showButtons: false
});
//Show modal
$('.reveal-modal').css('max-height', $('html').height() - 110 + 'px');
$('#config_modal').reveal();
});
// Reset max-height after window resize
$(window).resize(function() {
$('.reveal-modal').css('max-height', $('html').height() - 110 + 'px');
});
function setColor(color){
//Color picker callback
if(this.id === "cal_color") cal_color = color;
}
function setup(){
//Setup environment before start
$('body').css("background-color", cal_color);
//Update model
$('#cal_color').trigger('input');
//Animate description and target
setTimeout(function() {
$("#div_text" ).fadeOut( "slow", function() {
$( "#div_point" ).fadeIn( "slow", startCalibration);
});
}, 2000);
}
function closeCallback(){
//Configuration modal close callback
$(".main").css("cursor","none");
setup();
}
function calibrationFinished(){
$(".main").css("cursor","pointer");
$("#text").html("Calibration completed");
$( "#div_point" ).fadeOut( "slow", function(){<|fim▁hole|> setTimeout(function() {
window.history.back();
}, 2500);
});
});
}
function startCalibration(){
scp.makeRequest();
}<|fim▁end|> | $("#div_text" ).fadeIn( "slow", function() { |
<|file_name|>import-request.js<|end_file_name|><|fim▁begin|>/**
* Import Request event
* @module tracker/events/import-request
*/
const NError = require('nerror');
const Base = require('./base');
/**
* Import Request event class
*/
class ImportRequest extends Base {
/**
* Create service
* @param {App} app The application
* @param {object} config Configuration
* @param {Logger} logger Logger service
* @param {Registry} registry Registry service
* @param {UserRepository} userRepo User repository
* @param {DaemonRepository} daemonRepo Daemon repository
* @param {PathRepository} pathRepo Path repository
* @param {ConnectionRepository} connectionRepo Connection repository
*/
constructor(app, config, logger, registry, userRepo, daemonRepo, pathRepo, connectionRepo) {
super(app);
this._config = config;
this._logger = logger;
this._registry = registry;
this._userRepo = userRepo;
this._daemonRepo = daemonRepo;
this._pathRepo = pathRepo;
this._connectionRepo = connectionRepo;
}
/**
* Service name is 'tracker.events.importRequest'
* @type {string}
*/
static get provides() {
return 'tracker.events.importRequest';
}
/**
* Dependencies as constructor arguments
* @type {string[]}
*/
static get requires() {
return [
'app',
'config',
'logger',
'registry',
'repositories.user',
'repositories.daemon',
'repositories.path',
'repositories.connection'<|fim▁hole|> /**
* Event name
* @type {string}
*/
get name() {
return 'import_request';
}
/**
* Event handler
* @param {string} id ID of the client
* @param {object} message The message
*/
async handle(id, message) {
let client = this._registry.clients.get(id);
if (!client)
return;
this._logger.debug('import-request', `Got IMPORT REQUEST from ${id}`);
try {
let daemons = [];
if (client.daemonId)
daemons = await this._daemonRepo.find(client.daemonId);
let daemon = daemons.length && daemons[0];
if (!daemon) {
let response = this.tracker.ImportResponse.create({
response: this.tracker.ImportResponse.Result.REJECTED,
});
let reply = this.tracker.ServerMessage.create({
type: this.tracker.ServerMessage.Type.IMPORT_RESPONSE,
messageId: message.messageId,
importResponse: response,
});
let data = this.tracker.ServerMessage.encode(reply).finish();
this._logger.debug('import-request', `Sending REJECTED IMPORT RESPONSE to ${id}`);
return this.tracker.send(id, data);
}
let [paths, connections] = await Promise.all([
this._pathRepo.findByToken(message.importRequest.token),
this._connectionRepo.findByToken(message.importRequest.token)
]);
let path = paths.length && paths[0];
let connection = connections.length && connections[0];
let userId, actingAs;
if (path) {
actingAs = 'client';
userId = path.userId;
} else if (connection) {
actingAs = 'server';
userId = connection.userId;
} else {
let response = this.tracker.ImportResponse.create({
response: this.tracker.ImportResponse.Result.REJECTED,
});
let reply = this.tracker.ServerMessage.create({
type: this.tracker.ServerMessage.Type.IMPORT_RESPONSE,
messageId: message.messageId,
importResponse: response,
});
let data = this.tracker.ServerMessage.encode(reply).finish();
this._logger.debug('import-request', `Sending REJECTED IMPORT RESPONSE to ${id}`);
return this.tracker.send(id, data);
}
let loadConnections = async path => {
let result = [];
let connections = await this._connectionRepo.findByPath(path);
let connection = connections.length && connections[0];
if (connection)
result.push(connection);
let paths = await this._pathRepo.findByParent(path);
let promises = [];
for (let subPath of paths)
promises.push(loadConnections(subPath));
let loaded = await Promise.all(promises);
for (let subConnections of loaded)
result = result.concat(subConnections);
return result;
};
if (actingAs === 'server')
connections = [connection];
else
connections = await loadConnections(path);
let serverConnections = [];
let clientConnections = [];
let value;
let users = await this._userRepo.find(userId);
let user = users.length && users[0];
if (!user) {
value = this.tracker.ImportResponse.Result.REJECTED;
} else {
value = this.tracker.ImportResponse.Result.ACCEPTED;
if (actingAs === 'server') {
let connection = connections.length && connections[0];
if (connection) {
let paths = await this._pathRepo.find(connection.pathId);
let path = paths.length && paths[0];
if (path) {
let clients = [];
for (let clientDaemon of await this._daemonRepo.findByConnection(connection)) {
if (clientDaemon.actingAs !== 'client')
continue;
let clientUsers = await this._userRepo.find(clientDaemon.userId);
let clientUser = clientUsers.length && clientUsers[0];
if (clientUser)
clients.push(clientUser.email + '?' + clientDaemon.name);
}
let {address, port} = this._registry.addressOverride(
connection.connectAddress,
connection.connectPort,
connection.addressOverride,
connection.portOverride
);
serverConnections.push(this.tracker.ServerConnection.create({
name: user.email + path.path,
connectAddress: address,
connectPort: port,
encrypted: connection.encrypted,
fixed: connection.fixed,
clients: clients,
}));
}
}
} else {
for (let connection of connections) {
let paths = await this._pathRepo.find(connection.pathId);
let path = paths.length && paths[0];
if (path) {
let serverDaemons = await this._daemonRepo.findServerByConnection(connection);
let serverDaemon = serverDaemons.length && serverDaemons[0];
let serverUsers = [];
if (serverDaemon)
serverUsers = await this._userRepo.find(serverDaemon.userId);
let serverUser = serverUsers.length && serverUsers[0];
let {address, port} = this._registry.addressOverride(
connection.listenAddress,
connection.listenPort,
connection.addressOverride,
connection.portOverride
);
clientConnections.push(this.tracker.ClientConnection.create({
name: user.email + path.path,
listenAddress: address,
listenPort: port,
encrypted: connection.encrypted,
fixed: connection.fixed,
server: (serverDaemon && serverUser) ? serverUser.email + '?' + serverDaemon.name : '',
}));
}
}
}
}
let list = this.tracker.ConnectionsList.create({
serverConnections: serverConnections,
clientConnections: clientConnections,
});
let response = this.tracker.ImportResponse.create({
response: value,
updates: list,
});
let reply = this.tracker.ServerMessage.create({
type: this.tracker.ServerMessage.Type.IMPORT_RESPONSE,
messageId: message.messageId,
importResponse: response,
});
let data = this.tracker.ServerMessage.encode(reply).finish();
this._logger.debug('import-request', `Sending RESULTING IMPORT RESPONSE to ${id}`);
this.tracker.send(id, data);
} catch (error) {
this._logger.error(new NError(error, 'ImportRequest.handle()'));
}
}
}
module.exports = ImportRequest;<|fim▁end|> | ];
}
|
<|file_name|>SpiderFileUtils.py<|end_file_name|><|fim▁begin|># -*- coding:utf-8 -*-
'''
Created on 2015年3月12日
@author: wanhao01
<|fim▁hole|>
import os
class SpiderFileUtils(object):
'''
deal with file related operations.
'''
def __save_page(self, data, url, outputdir):
'''
save the page content with the specific url to the local path.
'''
if(not os.path.exists(outputdir)):
os.makedirs(outputdir)
filename = self.__validate_name(url)
f = open(outputdir + os.sep + filename, 'w')
f.writelines(data)
f.close()
if __name__ == '__main__':
pass<|fim▁end|> | '''
|
<|file_name|>concat_roles.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
import glob
import os
import shutil
import subprocess
import sys
import yaml
def create_role(role):
ret = subprocess.check_output(
'ansible-galaxy init {}'.format(role).split())
if not ret.strip().endswith('created successfully'):
raise Exception('could not create role "{}"'.format(role))
def get_metadata(role):
try:
main = open(os.path.join(role, 'meta/main.yml'))
return yaml.load(main)
except IOError:
return {}
def ensure_meta(role):
"""Ensure the role has a meta directory"""
try:
os.makedirs(os.path.join(role, 'meta'))
except OSError:
pass
<|fim▁hole|> orig_main = os.path.join(role, 'meta/main.yml')
with open(new_main, 'w') as out:
yaml.dump(metadata, out, default_flow_style=False, explicit_start=True)
os.rename(new_main, orig_main)
def add_dependency(src_role, target_role):
"""Add metadata saying that 'target_role' depends on 'src_role'"""
md = get_metadata(target_role)
deps = md.setdefault('dependencies', [])
deps.append(os.path.join(target_role, 'roles', src_role))
set_metadata(target_role, md)
def sub_roles(role):
try:
return glob.glob(os.path.join(role, 'roles/*'))
except OSError:
return []
def fix_dependency(role, for_destination):
"""Fix the sub-role dependency.
Dependency on a sub-role has to be changed once we move the base
role.
"""
metadata = get_metadata(role)
deps = metadata.setdefault('dependencies', [])
def f(dep):
if dep.startswith(role):
return os.path.join(for_destination, 'roles', dep)
else:
return dep
metadata['dependencies'] = [f(dep) for dep in deps]
set_metadata(role, metadata)
def fix_dependencies(src_role, for_destination):
for role in sub_roles(src_role):
fix_dependencies(role, for_destination)
fix_dependency(src_role, for_destination)
def move(src_role, target_role, copy=False):
op = shutil.copytree if copy else shutil.move
try:
os.makedirs(os.path.join(target_role, 'roles'))
except OSError:
pass
fix_dependencies(src_role, for_destination=target_role)
op(src_role, os.path.join(target_role, 'roles', src_role))
add_dependency(src_role, target_role)
def concat(roles, into, copy=False):
create_role(into)
for role in roles:
move(role, target_role=into, copy=copy)
def test():
roles = ['foo', 'bar', 'spam']
try:
for role in roles:
create_role(role)
move('foo', 'bar')
assert get_metadata('bar')['dependencies'] == ['bar/roles/foo']
move('bar', 'spam')
assert get_metadata('spam')['dependencies'] == ['spam/roles/bar']
assert get_metadata('spam/roles/bar')['dependencies'] == ['spam/roles/bar/roles/foo']
finally:
for role in roles:
shutil.rmtree(role, ignore_errors=True)
def main():
roles_path = None
if roles_path is not None:
os.chdir(roles_path)
concat([sys.argv[1], sys.argv[2]], into=sys.argv[3])
if __name__ == '__main__':
main()<|fim▁end|> |
def set_metadata(role, metadata):
ensure_meta(role)
new_main = os.path.join(role, 'meta/main.yml.new') |
<|file_name|>pubsub.rs<|end_file_name|><|fim▁begin|>use futures_lite::stream::StreamExt;
use lapin::{
options::*, publisher_confirm::Confirmation, types::FieldTable, BasicProperties, Connection,
ConnectionProperties, Result,
};
use tracing::info;
fn main() -> Result<()> {
if std::env::var("RUST_LOG").is_err() {
std::env::set_var("RUST_LOG", "info");
}
tracing_subscriber::fmt::init();
let addr = std::env::var("AMQP_ADDR").unwrap_or_else(|_| "amqp://127.0.0.1:5672/%2f".into());
async_global_executor::block_on(async {
let conn = Connection::connect(
&addr,
ConnectionProperties::default().with_connection_name("pubsub-example".into()),
)
.await?;
info!("CONNECTED");
let channel_a = conn.create_channel().await?;
let channel_b = conn.create_channel().await?;
let queue = channel_a
.queue_declare(
"hello",
QueueDeclareOptions::default(),
FieldTable::default(),
)
.await?;
info!(?queue, "Declared queue");
let mut consumer = channel_b
.basic_consume(
"hello",
"my_consumer",
BasicConsumeOptions::default(),
FieldTable::default(),
)
.await?;
async_global_executor::spawn(async move {
info!("will consume");
while let Some(delivery) = consumer.next().await {
let delivery = delivery.expect("error in consumer");<|fim▁hole|>
let payload = b"Hello world!";
loop {
let confirm = channel_a
.basic_publish(
"",
"hello",
BasicPublishOptions::default(),
payload,
BasicProperties::default(),
)
.await?
.await?;
assert_eq!(confirm, Confirmation::NotRequested);
}
})
}<|fim▁end|> | delivery.ack(BasicAckOptions::default()).await.expect("ack");
}
})
.detach(); |
<|file_name|>gamemenu.py<|end_file_name|><|fim▁begin|>import pygame
from vec_2d import Vec2d
import time
from types import *
from graphics_helper import Plotter
#------------------------------------------------------------------------
#
# This file is part of Conquer.
#
# Conquer is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Conquer is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Conquer. If not, see <http://www.gnu.org/licenses/>.
#
# Copyright Conquer Development Team (http://code.google.com/p/pyconquer/)
#
#------------------------------------------------------------------------
class TGameMenu:
def __init__(self, screen, bg_image, logo1, menu_items, start_vec, spacing=50):
# Currently selected menuitem's index
self.pos = 0
# List of menuitems
self.menu_items = menu_items
# Pointer to pygame screen
self.screen = screen
# Coordinates where to render the menu
self.start_vec = start_vec
# Space between menuitems
self.spacing = spacing
# Background picture is oddly here as well the top logo
self.bg = bg_image
self.logo = logo1
self.plotter = Plotter(self.screen)
# Font to be used with the menu
self.used_font = pygame.font.Font("yanone_regular.otf", 24)
self.plotter.cur_font = self.used_font
def draw_items(self, text=None):
# If images and/or text are supplied, draw them
if self.bg:
self.screen.blit(self.bg, (0, 0))
if self.logo:
self.screen.blit(self.logo, (263, 0))
if text:
self.plotter.text_at(text[0], Vec2d(text[1], text[2]),
font=self.used_font, wipe_background=True, color=(255, 255, 255))
# Iterate through menu items
for i, item_i in enumerate(self.menu_items):
# FIXME: skinnable colors
# Menu item color is white
cur_color = (0, 0, 0)
shadow = True
if i == self.pos:
# Selected menu item is red
shadow = False
cur_color = (255, 0, 0)
# Text to be rendered
text = item_i[0]
# Check if menu items are value editors
if len(item_i[2]) >= 2:
if item_i[2][0] == "value_int_editor":
text = "%s (%d)" % (text, item_i[2][1])
if item_i[2][0] == "value_bool_editor":
if item_i[2][1]:
text = "%s (%s)" % (text, "on")
else:
text = "%s (%s)" % (text, "off")
# Draw the menu item text
self.plotter.text_at(text,
self.start_vec + Vec2d(0, self.spacing) * i,
color=cur_color,
wipe_background=False,
drop_shadow=shadow
)
# Caption Text
if self.menu_items[self.pos][3]:
# It has caption text, draw it
self.plotter.text_at(self.menu_items[self.pos][3],
Vec2d(400, 75))
# Some info :)
tmp_color = (50, 185, 10)
self.plotter.text_at("Contact:", Vec2d(400, 520),
color=tmp_color,
wipe_background=False)
self.plotter.text_at("Conquer Dev Team http://pyconquer.googlecode.com/",
Vec2d(400, 545),
color=tmp_color,
wipe_background=False)
def scroll(self, dy):
# Change the selected menu item
self.pos += dy
if self.pos < 0:
self.pos = len(self.menu_items) - 1
if self.pos == len(self.menu_items):
self.pos = 0
def edit_value(self, dv):
# This is totally unreadable :D
# Well it edits values in their border values
if len(self.menu_items[self.pos][2]) >= 2:
if self.menu_items[self.pos][2][0] == "value_int_editor":
self.menu_items[self.pos][2][1] += dv
if len(self.menu_items[self.pos][2]) >= 3:
if self.menu_items[self.pos][2][1] < self.menu_items[self.pos][2][2][0]:
self.menu_items[self.pos][2][1] = self.menu_items[self.pos][2][2][0]
if self.menu_items[self.pos][2][1] > self.menu_items[self.pos][2][2][1]:
self.menu_items[self.pos][2][1] = self.menu_items[self.pos][2][2][1]
if self.menu_items[self.pos][2][0] == "value_bool_editor":
self.menu_items[self.pos][2][1] = not self.menu_items[self.pos][2][1]
def get_selection(self, text=None):
"""
Render the menu as long as user selects a menuitem
text -> optional text to be rendered
"""
# Draw the items
self.draw_items(text)
# Create instance of pygame Clock
clock = pygame.time.Clock()<|fim▁hole|> # Limit fps to 30
clock.tick(30)
# Iterate through events
for e in pygame.event.get():
if e.type == pygame.KEYDOWN:
if e.key == pygame.K_DOWN:
self.scroll(1)
self.draw_items(text)
if e.key == pygame.K_UP:
self.scroll(-1)
self.draw_items(text)
if e.key == pygame.K_RETURN:
choice = self.select()
return choice
if e.key == pygame.K_LEFT:
self.edit_value(-1)
self.draw_items(text)
if e.key == pygame.K_RIGHT:
self.edit_value(1)
self.draw_items(text)
pygame.display.flip()
def select(self):
# User selects a menu item
return self.menu_items[self.pos][1]
# end of class TGameMenu
###################################################################
def text_input(plotter, caption, corner_1, span_vec, fonts, only_numbers=False):
# Make an input-box and prompt it for input
assert isinstance(corner_1, Vec2d)
x1 = corner_1.x
y1 = corner_1.y
assert isinstance(span_vec, Vec2d)
w1 = span_vec.x
h1 = span_vec.y
cur_str = []
pygame.draw.rect(plotter.screen, (30, 30, 30), (x1, y1, w1, h1))
plotter.text_at(caption, Vec2d(x1 + w1 / 4, y1), font=fonts.font_2, wipe_background=False)
pygame.display.flip()
done = False
while not done:
for e in pygame.event.get():
key = None
#e = pygame.event.poll()
if e.type == pygame.NOEVENT:
# event queue is empty
time.sleep(0.1)
continue
if e.type == pygame.KEYDOWN:
key = e.key
else:
continue
if key == pygame.K_BACKSPACE:
if cur_str:
del cur_str[len(cur_str) - 1]
done = True
elif key == pygame.K_RETURN:
done = True
if (key <= 127) and (key != pygame.K_BACKSPACE):
if only_numbers:
if chr(key) in ["1", "2", "3", "4", "5", "6", "7", "8", "9", "0"]:
cur_str.append(chr(key))
else:
cur_str.append(chr(key))
cur_text_pos = Vec2d(x1 + (w1 / 2) - (len(cur_str) * 4), y1 + 15)
cur_font = fonts.font_4
plotter.text_at("".join(cur_str),
cur_text_pos,
wipe_background=False,
font=cur_font)
pygame.display.flip()
return "".join(cur_str)
def load_image_files_but_not_interface_image_files(image_handler, graphics_path):
tmp = pygame.image.load(graphics_path + "skull7.png").convert_alpha()
tmp.set_colorkey(tmp.get_at((0, 0)))
image_handler.add_image(tmp, "skull")
tmp = pygame.image.load(graphics_path + "soldier.png").convert_alpha()
tmp.set_colorkey(tmp.get_at((0, 0)))
image_handler.add_image(tmp, "soldier")
tmp = pygame.image.load(graphics_path + "armytent.png").convert_alpha()
tmp.set_colorkey(tmp.get_at((0, 0)))
image_handler.add_image(tmp, "town")
tmp = pygame.image.load(graphics_path + "hextile2_.png").convert()
tmp.set_colorkey(tmp.get_at((0, 0)))
image_handler.add_image(tmp, "cell_1")
tmp = pygame.image.load(graphics_path + "hextile_.png").convert()
tmp.set_colorkey(tmp.get_at((0, 0)))
image_handler.add_image(tmp, "cell_2")
tmp = pygame.image.load(graphics_path + "hextile3_.png").convert()
tmp.set_colorkey(tmp.get_at((0, 0)))
image_handler.add_image(tmp, "cell_3")
tmp = pygame.image.load(graphics_path + "hextile4_.png").convert()
tmp.set_colorkey(tmp.get_at((0, 0)))
image_handler.add_image(tmp, "cell_4")
tmp = pygame.image.load(graphics_path + "hextile5_.png").convert()
tmp.set_colorkey(tmp.get_at((0, 0)))
image_handler.add_image(tmp, "cell_5")
tmp = pygame.image.load(graphics_path + "hextile6_.png").convert()
tmp.set_colorkey(tmp.get_at((0, 0)))
image_handler.add_image(tmp, "cell_6")
image_handler.add_image(pygame.image.load(graphics_path + "teksti.png").convert(), "logo")
image_handler.add_image(pygame.image.load(graphics_path + "mapedit.png").convert(), "mapedit")
def get_human_and_cpu_count(screen, fonts):
# This is very ugly piece of code.
# It ask for scenario editing and random generated map,
# how many human and cpu players will participate.
max_player = 6
text_pos = Vec2d(800 / 2 - 110, 300)
span_vec = Vec2d(240, 45)
# get number of human players
nr_of_h = 0
while True:
#input_raw = text_input(screen, 'How many human players (1-6)?',
# text_pos, span_vec, fonts, only_numbers=True)
# DEBUG:
input_raw = '2'
try:
nr_of_h = int(input_raw)
except:
continue
if 1 <= nr_of_h <= max_player:
break
# get number of ai players
nr_of_c = 0
min_nr_of_ai = 0
if nr_of_h < max_player:
if nr_of_h == 1:
min_nr_of_ai = 1
while True:
#input_raw = text_input(screen,
# 'How many cpu players (%d-%d)?' % (min_nr_of_ai, max_player - nr_of_h),
# text_pos, span_vec, fonts, only_numbers=True)
# DEBUG:
input_raw = '2'
try:
nr_of_c = int(input_raw)
except:
continue
if min_nr_of_ai <= nr_of_c <= (max_player - nr_of_h):
break
return nr_of_h, nr_of_c<|fim▁end|> |
# Endless loop
while True: |
<|file_name|>test_exception.py<|end_file_name|><|fim▁begin|><|fim▁hole|>from twisted.trial.unittest import TestCase
from txaws.exception import AWSError
from txaws.exception import AWSResponseParseError
from txaws.util import XML
REQUEST_ID = "0ef9fc37-6230-4d81-b2e6-1b36277d4247"
class AWSErrorTestCase(TestCase):
def test_creation(self):
error = AWSError("<dummy1 />", 500, "Server Error", "<dummy2 />")
self.assertEquals(error.status, 500)
self.assertEquals(error.response, "<dummy2 />")
self.assertEquals(error.original, "<dummy1 />")
self.assertEquals(error.errors, [])
self.assertEquals(error.request_id, "")
def test_node_to_dict(self):
xml = "<parent><child1>text1</child1><child2>text2</child2></parent>"
error = AWSError("<dummy />", 400)
data = error._node_to_dict(XML(xml))
self.assertEquals(data, {"child1": "text1", "child2": "text2"})
def test_set_request_id(self):
xml = "<a><b /><RequestID>%s</RequestID></a>" % REQUEST_ID
error = AWSError("<dummy />", 400)
error._set_request_id(XML(xml))
self.assertEquals(error.request_id, REQUEST_ID)
def test_set_host_id(self):
host_id = "ASD@#FDG$E%FG"
xml = "<a><b /><HostID>%s</HostID></a>" % host_id
error = AWSError("<dummy />", 400)
error._set_host_id(XML(xml))
self.assertEquals(error.host_id, host_id)
def test_set_empty_errors(self):
xml = "<a><Errors /><b /></a>"
error = AWSError("<dummy />", 500)
error._set_500_error(XML(xml))
self.assertEquals(error.errors, [])
def test_set_empty_error(self):
xml = "<a><Errors><Error /><Error /></Errors><b /></a>"
error = AWSError("<dummy />", 500)
error._set_500_error(XML(xml))
self.assertEquals(error.errors, [])
def test_parse_without_xml(self):
xml = "<dummy />"
error = AWSError(xml, 400)
error.parse()
self.assertEquals(error.original, xml)
def test_parse_with_xml(self):
xml1 = "<dummy1 />"
xml2 = "<dummy2 />"
error = AWSError(xml1, 400)
error.parse(xml2)
self.assertEquals(error.original, xml2)
def test_parse_html(self):
xml = "<html><body>a page</body></html>"
self.assertRaises(AWSResponseParseError, AWSError, xml, 400)
def test_empty_xml(self):
self.assertRaises(ValueError, AWSError, "", 400)
def test_no_request_id(self):
errors = "<Errors><Error><Code /><Message /></Error></Errors>"
xml = "<Response>%s<RequestID /></Response>" % errors
error = AWSError(xml, 400)
self.assertEquals(error.request_id, "")
def test_no_request_id_node(self):
errors = "<Errors><Error><Code /><Message /></Error></Errors>"
xml = "<Response>%s</Response>" % errors
error = AWSError(xml, 400)
self.assertEquals(error.request_id, "")
def test_no_errors_node(self):
xml = "<Response><RequestID /></Response>"
error = AWSError(xml, 400)
self.assertEquals(error.errors, [])
def test_no_error_node(self):
xml = "<Response><Errors /><RequestID /></Response>"
error = AWSError(xml, 400)
self.assertEquals(error.errors, [])
def test_no_error_code_node(self):
errors = "<Errors><Error><Message /></Error></Errors>"
xml = "<Response>%s<RequestID /></Response>" % errors
error = AWSError(xml, 400)
self.assertEquals(error.errors, [])
def test_no_error_message_node(self):
errors = "<Errors><Error><Code /></Error></Errors>"
xml = "<Response>%s<RequestID /></Response>" % errors
error = AWSError(xml, 400)
self.assertEquals(error.errors, [])
def test_set_500_error(self):
xml = "<Error><Code>500</Code><Message>Oops</Message></Error>"
error = AWSError("<dummy />", 500)
error._set_500_error(XML(xml))
self.assertEquals(error.errors[0]["Code"], "500")
self.assertEquals(error.errors[0]["Message"], "Oops")<|fim▁end|> | # Copyright (c) 2009 Canonical Ltd <[email protected]>
# Licenced under the txaws licence available at /LICENSE in the txaws source.
|
<|file_name|>AbstractCappedContainerHandlerTestHelper.java<|end_file_name|><|fim▁begin|>/**
* #-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#
* This file is part of the Smart Developer Hub Project:
* http://www.smartdeveloperhub.org/
*
* Center for Open Middleware
* http://www.centeropenmiddleware.com/
* #-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#
* Copyright (C) 2015-2016 Center for Open Middleware.
* #-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* #-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#
* Artifact : org.smartdeveloperhub.harvesters.scm:scm-harvester-frontend:0.3.0
* Bundle : scm-harvester.war
* #-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#
*/
package org.smartdeveloperhub.harvesters.scm.frontend.core.util;
import static org.hamcrest.MatcherAssert.assertThat;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.sameInstance;
import static org.junit.Assert.fail;
import java.io.Serializable;
import java.util.Collection;
import java.util.Iterator;
import java.util.Set;
import mockit.Expectations;
import mockit.Mocked;
import org.ldp4j.application.data.DataSet;
import org.ldp4j.application.data.Individual;
import org.ldp4j.application.data.Name;
import org.ldp4j.application.data.NamingScheme;
import org.ldp4j.application.ext.ApplicationRuntimeException;
import org.ldp4j.application.session.ContainerSnapshot;
import org.ldp4j.application.session.ResourceSnapshot;
import org.ldp4j.application.session.WriteSession;
public abstract class AbstractCappedContainerHandlerTestHelper {
private final class CustomDataSet implements DataSet {
@Override
public Iterator<Individual<?, ?>> iterator() {
throw new UnsupportedOperationException("Method should not be invoked");
}
@Override
public Name<?> name() {
throw new UnsupportedOperationException("Method should not be invoked");
}
@Override
public int numberOfIndividuals() {
throw new UnsupportedOperationException("Method should not be invoked");
}
@Override
public boolean hasIndividuals() {
throw new UnsupportedOperationException("Method should not be invoked");
}
@Override
public Collection<? extends Individual<?, ?>> individuals() {
throw new UnsupportedOperationException("Method should not be invoked");
}
@Override
public Set<Serializable> individualIds() {
throw new UnsupportedOperationException("Method should not be invoked");
}
@Override
public boolean hasIndividual(final Object id) {
throw new UnsupportedOperationException("Method should not be invoked");
}
@Override
public <T extends Serializable, S extends Individual<T, S>> S individualOfId(final T id) {
throw new UnsupportedOperationException("Method should not be invoked");
}
@Override
public <T extends Serializable, S extends Individual<T, S>> S individual(final T id, final Class<? extends S> clazz) {
throw new UnsupportedOperationException("Method should not be invoked");
}
@Override
public boolean isEmpty() {
throw new UnsupportedOperationException("Method should not be invoked");
}
@Override
public void remove(final Individual<?, ?> src) {
throw new UnsupportedOperationException("Method should not be invoked");
}
@Override
public String toString() {
return "DATASET";
}
}
@Mocked private ContainerSnapshot container;
@Mocked private WriteSession session;
@Mocked private ResourceSnapshot snapshot;
protected final void verifyGetReturnsEmptyDataset(final AbstractCappedContainerHandler sut) throws Exception {
final Name<String> name=NamingScheme.getDefault().name("id");
new Expectations() {{
AbstractCappedContainerHandlerTestHelper.this.snapshot.name();this.result=name;
}};
final DataSet result = sut.get(this.snapshot);
assertThat((Object)result.name(),sameInstance((Object)name));
assertThat(result.hasIndividuals(),equalTo(false));<|fim▁hole|> sut.create(this.container, new CustomDataSet(), this.session);
fail("Factory method should be disabled");
} catch (final ApplicationRuntimeException e) {
assertThat(e.getMessage().toLowerCase(),equalTo(name+" creation is not supported"));
}
}
}<|fim▁end|> | }
protected final void verifyFactoryMethodIsDisabled(final String name, final AbstractCappedContainerHandler sut) {
try { |
<|file_name|>std_pf_readonly.py<|end_file_name|><|fim▁begin|>from __future__ import unicode_literals
def execute():
"""Make standard print formats readonly for system manager"""
import webnotes.model.doc
new_perms = [
{
'parent': 'Print Format',
'parentfield': 'permissions',
'parenttype': 'DocType',
'role': 'System Manager', <|fim▁hole|> 'read': 1,
},
{
'parent': 'Print Format',
'parentfield': 'permissions',
'parenttype': 'DocType',
'role': 'Administrator',
'permlevel': 1,
'read': 1,
'write': 1
},
]
for perms in new_perms:
doc = webnotes.model.doc.Document('DocPerm')
doc.fields.update(perms)
doc.save()
webnotes.conn.commit()
webnotes.conn.begin()
webnotes.reload_doc('core', 'doctype', 'print_format')<|fim▁end|> | 'permlevel': 1, |
<|file_name|>ssl_validation_hostname_error.go<|end_file_name|><|fim▁begin|>package pluginerror<|fim▁hole|>// SSLValidationHostnameError replaces x509.HostnameError when the server has
// SSL certificate that does not match the hostname.
type SSLValidationHostnameError struct {
Message string
}
func (e SSLValidationHostnameError) Error() string {
return fmt.Sprintf("Hostname does not match SSL Certificate (%s)", e.Message)
}<|fim▁end|> |
import "fmt"
|
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|>from frasco.ext import *
from frasco.assets import expose_package, register_assets_builder
from frasco.utils import join_url_rule
from flask import render_template
import os
import json
import re
import htmlmin
import codecs
class FrascoAngular(Extension):
name = "frasco_angular"
defaults = {"static_dir": None, # defaults to app.static_folder
"static_url_path": None, # defaults to app.static_url_path
"angular_template": "angular_layout.html",
"app_dir": "app",
"services_module": "services",
"services_name": "%s",
"templates_file": None,
"templates_module": "templatesCache",
"templates_search_paths": [],
"disable_templates_cache": None, # app.debug
"templates_matcher": r".*\.html$",
"add_app_dir_in_babel_extract": True}
def _init_app(self, app, state):
require_extension('frasco_assets', app)
expose_package(app, "frasco_angular", __name__)
if not state.options["static_dir"]:
state.options["static_dir"] = app.static_folder
if not state.options["static_url_path"]:
state.options["static_url_path"] = app.static_url_path
state.options['templates_search_paths'].append(
(os.path.join(state.options["static_dir"], state.options['app_dir']), state.options["static_url_path"] + '/' + state.options['app_dir'])
)
if state.options['templates_file']:
register_assets_builder(self.build_templates)
if has_extension('frasco_babel', app) and state.options['add_app_dir_in_babel_extract']:
app.extensions.frasco_babel.add_extract_dir(os.path.join(state.options['static_dir'], state.options['app_dir']),<|fim▁hole|>
@ext_stateful_method
def add_route(self, state, endpoint, rule, decorators=None, **options):
rules = rule if isinstance(rule, (list, tuple)) else [rule]
def func(*args, **kwargs):
return self.angular_view_response()
if decorators:
for decorator in reversed(decorators):
func = decorator(func)
for rule in rules:
self.get_app().add_url_rule(rule, endpoint, func, **options)
def angular_view_response(self):
return render_template(get_extension_state('frasco_angular').options['angular_template'])
@ext_stateful_method
def register_service_builder(self, state, api_version, filename):
def builder():
module = ("/* This file is auto-generated by frasco-angular. DO NOT MODIFY. */\n'use strict';\n"
"\n(function() {\n\nvar services = angular.module('%s', ['frasco']);\n") % state.options["services_module"]
for service in api_version.iter_services():
endpoints = {}
for rule, endpoint, func, options in service.iter_endpoints():
args = []
if hasattr(func, 'request_params'):
for p in reversed(func.request_params):
args.extend(p.names)
endpoints[endpoint] = [_convert_url_args(join_url_rule(service.url_prefix, rule)), args]
module += ("\nservices.factory('%s', ['frascoServiceFactory', function(frascoServiceFactory) {\n"
"return frascoServiceFactory.make('%s', '%s', [], %s);\n}]);\n") % \
(state.options['services_name'] % service.name, service.name, api_version.url_prefix,
json.dumps(endpoints, indent=2))
module += "\n})();"
_write_file(os.path.join(state.options["static_dir"], state.options["app_dir"], filename), module)
register_assets_builder(builder)
@ext_stateful_method
def build_templates(self, state):
module = [("/* This file is auto-generated by frasco-angular. DO NOT MODIFY. */\n'use strict';\n"
"\nangular.module('%s', []).run(['$templateCache', function($templateCache) {") % state.options["templates_module"]]
matcher = re.compile(state.options["templates_matcher"], re.I)
done = set()
def process_file(filename, path, relpath, url_path):
pathname = os.path.join(path, filename)
relname = "/".join([p for p in [url_path, os.path.relpath(path, relpath), filename] if p])
if pathname not in done and matcher.match(relname):
with codecs.open(pathname, 'r', 'utf-8') as f:
content = f.read()
module.append(" $templateCache.put('%s', %s);" % (relname, json.dumps(htmlmin.minify(content))))
done.add(pathname)
disable = state.options["disable_templates_cache"]
if (disable is None and not self.get_app().debug) or disable is False:
for templates_dir, url_path in state.options['templates_search_paths']:
for path, dirnames, filenames in os.walk(templates_dir):
for filename in filenames:
process_file(filename, path, templates_dir, url_path)
module = "\n".join(module) + "\n}]);"
filename = os.path.join(state.options["static_dir"], state.options["app_dir"], state.options['templates_file'])
_write_file(filename, module)
_url_arg_re = re.compile(r"<([a-z]+:)?([a-z0-9_]+)>")
def _convert_url_args(url):
return _url_arg_re.sub(r":\2", url)
def _write_file(filename, source):
if not os.path.exists(os.path.dirname(filename)):
os.makedirs(os.path.dirname(filename))
with codecs.open(filename, "w", "utf-8") as f:
f.write(source)<|fim▁end|> | '.', ['frasco.angular.babel.AngularCompatExtension'], [('javascript:**.js', {})]) |
<|file_name|>pam_basic_test.py<|end_file_name|><|fim▁begin|>#! /usr/bin/env python
# -*- mode: python; indent-tabs-mode: nil; -*-
# vim:expandtab:shiftwidth=2:tabstop=2:smarttab:
#
# Copyright (C) 2011 Patrick Crews
#
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import os
import time
import shutil
import signal
import subprocess
from lib.util.mysqlBaseTestCase import mysqlBaseTestCase
from lib.util.mysql_methods import execute_cmd
server_requirements = [[]]
servers = []
server_manager = None
test_executor = None
pamcfg = '/etc/pam.d/mysqld'
class basicTest(mysqlBaseTestCase):
def test_pam_basic(self):
percent_string = '%'
opt_matrix_req = ['pam_plugin_dir']
self.servers = servers
logging = test_executor.logging
master_server = servers[0]
output_path = os.path.join(master_server.vardir, 'pam.out')
test_executor.matrix_manager.matrix_check_req(opt_matrix_req)
# This is a master
if (test_executor.matrix_manager.option_matrix['pam_user']):
pam_user = test_executor.matrix_manager.option_matrix['pam_user']
else:
pam_user = 'pamuser'
# Create UNIX system account
if (test_executor.system_manager.user_exists(pam_user)):
pass
else:
subprocess.call(["useradd", pam_user])
# Create PAM config
if (os.path.isfile(pamcfg)):
os.remove(pamcfg)
pamcfg_fh = open("/etc/pam.d/mysqld", "wb")
pamcfg_fh.write("auth\trequired\tpam_permit.so\n")
pamcfg_fh.close();
# Stop server
master_server.stop()
# Specify mysql plugin dir
master_server.server_options.append('--plugin-dir=%s' %(test_executor.matrix_manager.option_matrix['pam_plugin_dir']))
# Start server with new options
master_server.start()
self.assertEqual( master_server.status, 1, msg = 'Server failed to restart')<|fim▁hole|> cmd = "%s --protocol=tcp --port=%d -uroot -e \"%s\"" %(master_server.mysql_client
, master_server.master_port
, query )
retcode, output = execute_cmd(cmd, output_path, None, True)
self.assertEqual(retcode, 0, msg = cmd)
self.assertEqual(output, expected_result, msg = "%s || %s" %(output, expected_result))
# Create user
query = "CREATE USER \'%s\'@\'%s\' IDENTIFIED WITH auth_pam;" %(pam_user, percent_string)
expected_result = ''
cmd = "%s --protocol=tcp --port=%d -uroot -e \"%s\"" %(master_server.mysql_client
, master_server.master_port
, query )
retcode, output = execute_cmd(cmd, output_path, None, True)
self.assertEqual(retcode, 0, msg = output)
self.assertEqual(output, expected_result, msg = "%s || %s" %(output, expected_result))
# Grant permissions
query = "GRANT ALL ON test.* TO \'%s\'@\'%s\';" %(pam_user, percent_string)
expected_result = ''
cmd = "%s --protocol=tcp --port=%d --user=root -e \"%s\"" %(master_server.mysql_client
, master_server.master_port
, query )
retcode, output = execute_cmd(cmd, output_path, None, True)
self.assertEqual(retcode, 0, msg = output)
self.assertEqual(output, expected_result, msg = "%s || %s" %(output, expected_result))
# Test user login
query = "SHOW TABLES;"
expected_result = ''
cmd = "%s --plugin-dir=/usr/lib/mysql/plugin/ --protocol=tcp --port=%d --user=%s --password=\'\' -e \"%s\" test" %(master_server.mysql_client
, master_server.master_port
, pam_user
, query )
retcode, output = execute_cmd(cmd, output_path, None, True)
self.assertEqual(retcode, 0, msg = output)
self.assertEqual(output, expected_result, msg = "%s || %s" %(output, expected_result))<|fim▁end|> | # Install plugin
query = "INSTALL PLUGIN auth_pam SONAME \'auth_pam.so\'"
expected_result = '' |
<|file_name|>bans_gline.py<|end_file_name|><|fim▁begin|>from twisted.plugin import IPlugin
from twisted.words.protocols import irc
from txircd.config import ConfigValidationError
from txircd.module_interface import Command, ICommand, IModuleData, ModuleData
from txircd.modules.xlinebase import XLineBase
from txircd.utils import durationToSeconds, ircLower, now
from zope.interface import implements
from fnmatch import fnmatchcase
class GLine(ModuleData, XLineBase):
implements(IPlugin, IModuleData)
name = "GLine"
core = True
lineType = "G"
def actions(self):
return [ ("register", 10, self.checkLines),
("changeident", 10, self.checkIdentChange),
("changehost", 10, self.checkHostChange),
("commandpermission-GLINE", 10, self.restrictToOper),
("statsruntype-glines", 10, self.generateInfo),
("burst", 10, self.burstLines) ]
def userCommands(self):
return [ ("GLINE", 1, UserGLine(self)) ]
def serverCommands(self):
return [ ("ADDLINE", 1, ServerAddGLine(self)),
("DELLINE", 1, ServerDelGLine(self)) ]
def load(self):
self.initializeLineStorage()
def verifyConfig(self, config):
if "client_ban_msg" in config and not isinstance(config["client_ban_msg"], basestring):
raise ConfigValidationError("client_ban_msg", "value must be a string")
def checkUserMatch(self, user, mask, data):
banMask = self.normalizeMask(mask)
userMask = ircLower("{}@{}".format(user.ident, user.host()))
if fnmatchcase(userMask, banMask):
return True
userMask = ircLower("{}@{}".format(user.ident, user.realHost))
if fnmatchcase(userMask, banMask):
return True
userMask = ircLower("{}@{}".format(user.ident, user.ip))
if fnmatchcase(userMask, banMask):
return True
return False
def killUser(self, user, reason):
self.ircd.log.info("Matched user {user.uuid} ({user.ident}@{user.host()}) against a g:line: {reason}", user=user, reason=reason)
user.sendMessage(irc.ERR_YOUREBANNEDCREEP, self.ircd.config.get("client_ban_msg", "You're banned! Email [email protected] for assistance."))
user.disconnect("G:Lined: {}".format(reason))
def checkLines(self, user):
banReason = self.matchUser(user)
if banReason is not None:
self.killUser(user, banReason)
return False
return True
def checkIdentChange(self, user, oldIdent, fromServer):
self.checkLines(user)
def checkHostChange(self, user, hostType, oldHost, fromServer):
if user.uuid[:3] == self.ircd.serverID:
self.checkLines(user)
def restrictToOper(self, user, data):
if not self.ircd.runActionUntilValue("userhasoperpermission", user, "command-gline", users=[user]):
user.sendMessage(irc.ERR_NOPRIVILEGES, "Permission denied - You do not have the correct operator privileges")
return False
return None
class UserGLine(Command):
implements(ICommand)
def __init__(self, module):
self.module = module
def parseParams(self, user, params, prefix, tags):
if len(params) < 1 or len(params) == 2:
user.sendSingleError("GLineParams", irc.ERR_NEEDMOREPARAMS, "GLINE", "Not enough parameters")
return None
banmask = params[0]
if banmask in self.module.ircd.userNicks:
targetUser = self.module.ircd.users[self.module.ircd.userNicks[banmask]]
banmask = "{}@{}".format(targetUser.ident, targetUser.realHost)
else:
if "@" not in banmask:
banmask = "*@{}".format(banmask)
if len(params) == 1:
return {
"mask": banmask
}
return {
"mask": banmask,
"duration": durationToSeconds(params[1]),
"reason": " ".join(params[2:])
}
def execute(self, user, data):
banmask = data["mask"]
if "reason" in data:
if not self.module.addLine(banmask, now(), data["duration"], user.hostmask(), data["reason"]):
user.sendMessage("NOTICE", "*** G:Line for {} is already set.".format(banmask))
return True
badUsers = []
for checkUser in self.module.ircd.users.itervalues():
reason = self.module.matchUser(checkUser)
if reason is not None:
badUsers.append((checkUser, reason))
for badUser in badUsers:
self.module.killUser(*badUser)
if data["duration"] > 0:
user.sendMessage("NOTICE", "*** Timed g:line for {} has been set, to expire in {} seconds.".format(banmask, data["duration"]))
else:
user.sendMessage("NOTICE", "*** Permanent g:line for {} has been set.".format(banmask))
return True
if not self.module.delLine(banmask):
user.sendMessage("NOTICE", "*** G:Line for {} doesn't exist.".format(banmask))
return True
user.sendMessage("NOTICE", "*** G:Line for {} has been removed.".format(banmask))
return True
class ServerAddGLine(Command):
implements(ICommand)
def __init__(self, module):
self.module = module
def parseParams(self, server, params, prefix, tags):
return self.module.handleServerAddParams(server, params, prefix, tags)
def execute(self, server, data):
if self.module.executeServerAddCommand(server, data):
badUsers = []
for user in self.module.ircd.users.itervalues():
reason = self.module.matchUser(user)
if reason is not None:
badUsers.append((user, reason))
for user in badUsers:
self.module.killUser(*user)
return True
return None
class ServerDelGLine(Command):
implements(ICommand)
def __init__(self, module):
self.module = module
def parseParams(self, server, params, prefix, tags):<|fim▁hole|> return self.module.executeServerDelCommand(server, data)
glineModule = GLine()<|fim▁end|> | return self.module.handleServerDelParams(server, params, prefix, tags)
def execute(self, server, data): |
<|file_name|>modal.ts<|end_file_name|><|fim▁begin|>import { Component, Input, Output, EventEmitter, ViewChild } from '@angular/core';
import { ModalDirective } from 'ngx-bootstrap/modal';
@Component({
selector: 'modal-message',
templateUrl: './modal.html',
styleUrls: ['./modal.scss'],
})
export default class Modal {
@Input() private title: string;
@Input() private message: string;
@Input() private cancelText: string;
@Input() private denyText: string;
@Input() private agreeText: string;
@Output() private onHideCallback = new EventEmitter<any>();
@Output() private denyAction = new EventEmitter<any>();
@Output() private agreeAction = new EventEmitter<any>();
@ViewChild('childModal') private childModal: ModalDirective;
public onHideClb = () => {
if (this.onHideCallback) {
this.onHideCallback.emit();
}
}
private closeModal() {
this.childModal.hide();
}
private agree() {
this.agreeAction.emit();<|fim▁hole|><|fim▁end|> | }
} |
<|file_name|>publish.js<|end_file_name|><|fim▁begin|>Meteor.publish('card-vocoder-vocoders',function(simulatorId){
return Flint.collection('vocoders').find({simulatorId:simulatorId});<|fim▁hole|><|fim▁end|> | }); |
<|file_name|>utils.go<|end_file_name|><|fim▁begin|>/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2016 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package s3utils
import (
"bytes"
"encoding/hex"
"errors"
"net"
"net/url"
"regexp"
"sort"
"strings"
"unicode/utf8"
)
// Sentinel URL is the default url value which is invalid.
var sentinelURL = url.URL{}
// IsValidDomain validates if input string is a valid domain name.
func IsValidDomain(host string) bool {
// See RFC 1035, RFC 3696.
host = strings.TrimSpace(host)
if len(host) == 0 || len(host) > 255 {
return false
}
// host cannot start or end with "-"
if host[len(host)-1:] == "-" || host[:1] == "-" {
return false
}
// host cannot start or end with "_"
if host[len(host)-1:] == "_" || host[:1] == "_" {
return false
}
// host cannot start or end with a "."
if host[len(host)-1:] == "." || host[:1] == "." {
return false
}
// All non alphanumeric characters are invalid.
if strings.ContainsAny(host, "`~!@#$%^&*()+={}[]|\\\"';:><?/") {
return false
}
// No need to regexp match, since the list is non-exhaustive.
// We let it valid and fail later.
return true
}
// IsValidIP parses input string for ip address validity.
func IsValidIP(ip string) bool {
return net.ParseIP(ip) != nil
}
// IsVirtualHostSupported - verifies if bucketName can be part of
// virtual host. Currently only Amazon S3 and Google Cloud Storage
// would support this.
func IsVirtualHostSupported(endpointURL url.URL, bucketName string) bool {
if endpointURL == sentinelURL {
return false
}
// bucketName can be valid but '.' in the hostname will fail SSL
// certificate validation. So do not use host-style for such buckets.
if endpointURL.Scheme == "https" && strings.Contains(bucketName, ".") {
return false
}
// Return true for all other cases
return IsAmazonEndpoint(endpointURL) || IsGoogleEndpoint(endpointURL)
}
// AmazonS3Host - regular expression used to determine if an arg is s3 host.
var AmazonS3Host = regexp.MustCompile("^s3[.-]?(.*?)\\.amazonaws\\.com$")
// IsAmazonEndpoint - Match if it is exactly Amazon S3 endpoint.
func IsAmazonEndpoint(endpointURL url.URL) bool {
if IsAmazonChinaEndpoint(endpointURL) {
return true
}
if IsAmazonGovCloudEndpoint(endpointURL) {
return true
}
return AmazonS3Host.MatchString(endpointURL.Host)
}
// IsAmazonGovCloudEndpoint - Match if it is exactly Amazon S3 GovCloud endpoint.
func IsAmazonGovCloudEndpoint(endpointURL url.URL) bool {
if endpointURL == sentinelURL {
return false
}
return (endpointURL.Host == "s3-us-gov-west-1.amazonaws.com" ||
IsAmazonFIPSGovCloudEndpoint(endpointURL))
}
// IsAmazonFIPSGovCloudEndpoint - Match if it is exactly Amazon S3 FIPS GovCloud endpoint.
func IsAmazonFIPSGovCloudEndpoint(endpointURL url.URL) bool {
if endpointURL == sentinelURL {
return false
}
return endpointURL.Host == "s3-fips-us-gov-west-1.amazonaws.com"
}
// IsAmazonChinaEndpoint - Match if it is exactly Amazon S3 China endpoint.
// Customers who wish to use the new Beijing Region are required
// to sign up for a separate set of account credentials unique to
// the China (Beijing) Region. Customers with existing AWS credentials
// will not be able to access resources in the new Region, and vice versa.
// For more info https://aws.amazon.com/about-aws/whats-new/2013/12/18/announcing-the-aws-china-beijing-region/
func IsAmazonChinaEndpoint(endpointURL url.URL) bool {
if endpointURL == sentinelURL {
return false
}
return endpointURL.Host == "s3.cn-north-1.amazonaws.com.cn"
}
// IsGoogleEndpoint - Match if it is exactly Google cloud storage endpoint.
func IsGoogleEndpoint(endpointURL url.URL) bool {
if endpointURL == sentinelURL {
return false
}
return endpointURL.Host == "storage.googleapis.com"
}
// Expects ascii encoded strings - from output of urlEncodePath
func percentEncodeSlash(s string) string {
return strings.Replace(s, "/", "%2F", -1)
}
// QueryEncode - encodes query values in their URL encoded form. In
// addition to the percent encoding performed by urlEncodePath() used
// here, it also percent encodes '/' (forward slash)
func QueryEncode(v url.Values) string {
if v == nil {
return ""
}
var buf bytes.Buffer
keys := make([]string, 0, len(v))
for k := range v {
keys = append(keys, k)
}
sort.Strings(keys)
for _, k := range keys {
vs := v[k]
prefix := percentEncodeSlash(EncodePath(k)) + "="
for _, v := range vs {
if buf.Len() > 0 {
buf.WriteByte('&')
}
buf.WriteString(prefix)
buf.WriteString(percentEncodeSlash(EncodePath(v)))
}
}
return buf.String()
}
// if object matches reserved string, no need to encode them
var reservedObjectNames = regexp.MustCompile("^[a-zA-Z0-9-_.~/]+$")
// EncodePath encode the strings from UTF-8 byte representations to HTML hex escape sequences
//
// This is necessary since regular url.Parse() and url.Encode() functions do not support UTF-8
// non english characters cannot be parsed due to the nature in which url.Encode() is written
//
// This function on the other hand is a direct replacement for url.Encode() technique to support
// pretty much every UTF-8 character.
func EncodePath(pathName string) string {
if reservedObjectNames.MatchString(pathName) {
return pathName
}
var encodedPathname string
for _, s := range pathName {
if 'A' <= s && s <= 'Z' || 'a' <= s && s <= 'z' || '0' <= s && s <= '9' { // §2.3 Unreserved characters (mark)
encodedPathname = encodedPathname + string(s)
continue
}
switch s {
case '-', '_', '.', '~', '/': // §2.3 Unreserved characters (mark)
encodedPathname = encodedPathname + string(s)
continue
default:
len := utf8.RuneLen(s)
if len < 0 {
// if utf8 cannot convert return the same string as is
return pathName
}
u := make([]byte, len)
utf8.EncodeRune(u, s)
for _, r := range u {
hex := hex.EncodeToString([]byte{r})
encodedPathname = encodedPathname + "%" + strings.ToUpper(hex)
}
}
}
return encodedPathname
}
// We support '.' with bucket names but we fallback to using path
// style requests instead for such buckets.<|fim▁hole|>var (
validBucketName = regexp.MustCompile(`^[A-Za-z0-9][A-Za-z0-9\.\-\_\:]{1,61}[A-Za-z0-9]$`)
validBucketNameStrict = regexp.MustCompile(`^[a-z0-9][a-z0-9\.\-]{1,61}[a-z0-9]$`)
ipAddress = regexp.MustCompile(`^(\d+\.){3}\d+$`)
)
// Common checker for both stricter and basic validation.
func checkBucketNameCommon(bucketName string, strict bool) (err error) {
if strings.TrimSpace(bucketName) == "" {
return errors.New("Bucket name cannot be empty")
}
if len(bucketName) < 3 {
return errors.New("Bucket name cannot be smaller than 3 characters")
}
if len(bucketName) > 63 {
return errors.New("Bucket name cannot be greater than 63 characters")
}
if ipAddress.MatchString(bucketName) {
return errors.New("Bucket name cannot be an ip address")
}
if strings.Contains(bucketName, "..") {
return errors.New("Bucket name contains invalid characters")
}
if strict {
if !validBucketNameStrict.MatchString(bucketName) {
err = errors.New("Bucket name contains invalid characters")
}
return err
}
if !validBucketName.MatchString(bucketName) {
err = errors.New("Bucket name contains invalid characters")
}
return err
}
// CheckValidBucketName - checks if we have a valid input bucket name.
func CheckValidBucketName(bucketName string) (err error) {
return checkBucketNameCommon(bucketName, false)
}
// CheckValidBucketNameStrict - checks if we have a valid input bucket name.
// This is a stricter version.
// - http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html
func CheckValidBucketNameStrict(bucketName string) (err error) {
return checkBucketNameCommon(bucketName, true)
}
// CheckValidObjectNamePrefix - checks if we have a valid input object name prefix.
// - http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html
func CheckValidObjectNamePrefix(objectName string) error {
if len(objectName) > 1024 {
return errors.New("Object name cannot be greater than 1024 characters")
}
if !utf8.ValidString(objectName) {
return errors.New("Object name with non UTF-8 strings are not supported")
}
return nil
}
// CheckValidObjectName - checks if we have a valid input object name.
// - http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html
func CheckValidObjectName(objectName string) error {
if strings.TrimSpace(objectName) == "" {
return errors.New("Object name cannot be empty")
}
return CheckValidObjectNamePrefix(objectName)
}<|fim▁end|> | |
<|file_name|>cfg.py<|end_file_name|><|fim▁begin|>""" Computes the Control Flow Graph of a function. """
from pythran.passmanager import FunctionAnalysis
import ast
import networkx as nx
class CFG(FunctionAnalysis):
"""
Computes the Control Flow Graph of a function.
The processing of a node yields a pair containing
* the OUT nodes, to be linked with the IN nodes of the successor
* the RAISE nodes, nodes that stop the control flow (exception/break/...)
"""
def __init__(self):
self.result = nx.DiGraph()
super(CFG, self).__init__()
def visit_FunctionDef(self, node):
# the function itself is the entry point
self.result.add_node(node)
currs = (node,)
for n in node.body:
self.result.add_node(n)
for curr in currs:
self.result.add_edge(curr, n)
currs, _ = self.visit(n)
# add an edge to None for nodes that end the control flow
# without a return
self.result.add_node(None)
for curr in currs:
self.result.add_edge(curr, None)
def visit_Pass(self, node):
"""OUT = node, RAISES = ()"""
return (node,), ()
# All these nodes have the same behavior as pass
visit_Assign = visit_AugAssign = visit_Import = visit_Pass
visit_Expr = visit_Print = visit_ImportFrom = visit_Pass
visit_Yield = visit_Delete = visit_Pass
def visit_Return(self, node):
"""OUT = (), RAISES = ()"""
return (), ()
def visit_For(self, node):
"""
OUT = (node,) + last body statements
RAISES = body's that are not break or continue
"""
currs = (node,)
break_currs = (node,)
raises = ()
# handle body
for n in node.body:
self.result.add_node(n)
for curr in currs:
self.result.add_edge(curr, n)
currs, nraises = self.visit(n)
for nraise in nraises:
if type(nraise) is ast.Break:
break_currs += (nraise,)
elif type(nraise) is ast.Continue:
self.result.add_edge(nraise, node)
else:
raises += (nraise,)
# add the backward loop
for curr in currs:
self.result.add_edge(curr, node)
# the else statement if needed
if node.orelse:
for n in node.orelse:
self.result.add_node(n)
for curr in currs:
self.result.add_edge(curr, n)
currs, nraises = self.visit(n)
return break_currs + currs, raises
visit_While = visit_For
def visit_If(self, node):
"""
OUT = true branch U false branch
RAISES = true branch U false branch
"""
currs = (node,)
raises = ()
# true branch
for n in node.body:
self.result.add_node(n)
for curr in currs:
self.result.add_edge(curr, n)
currs, nraises = self.visit(n)
raises += nraises
tcurrs = currs
# false branch
currs = (node,)
for n in node.orelse:
self.result.add_node(n)
for curr in currs:
self.result.add_edge(curr, n)
currs, nraises = self.visit(n)
raises += nraises
return tcurrs + currs, raises
def visit_Raise(self, node):
"""OUT = (), RAISES = (node)"""
return (), (node,)
visit_Break = visit_Continue = visit_Raise
def visit_Assert(self, node):
"""OUT = RAISES = (node)"""
return (node,), (node,)
def visit_TryExcept(self, node):
"""
OUT = body's U handler's
RAISES = handler's
this equation is not has good has it could be...
but we need type information to be more accurate
"""
currs = (node,)
raises = ()
for handler in node.handlers:
self.result.add_node(handler)
for n in node.body:
self.result.add_node(n)
for curr in currs:
self.result.add_edge(curr, n)
currs, nraises = self.visit(n)
for nraise in nraises:
if type(nraise) is ast.Raise:
for handler in node.handlers:
self.result.add_edge(nraise, handler)
else:
raises += (nraise,)
for handler in node.handlers:
ncurrs, nraises = self.visit(handler)
currs += ncurrs
raises += nraises
return currs, raises
<|fim▁hole|> raises = ()
for n in node.body:
self.result.add_node(n)
for curr in currs:
self.result.add_edge(curr, n)
currs, nraises = self.visit(n)
raises += nraises
return currs, raises<|fim▁end|> | def visit_ExceptHandler(self, node):
"""OUT = body's, RAISES = body's"""
currs = (node,) |
<|file_name|>geometric.py<|end_file_name|><|fim▁begin|># Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Geometric distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op<|fim▁hole|>from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import random_ops
from tensorflow.python.ops.distributions import distribution
from tensorflow.python.ops.distributions import util as distribution_util
class Geometric(distribution.Distribution):
"""Geometric distribution.
The Geometric distribution is parameterized by p, the probability of a
positive event. It represents the probability that in k + 1 Bernoulli trials,
the first k trials failed, before seeing a success.
The pmf of this distribution is:
#### Mathematical Details
```none
pmf(k; p) = (1 - p)**k * p
```
where:
* `p` is the success probability, `0 < p <= 1`, and,
* `k` is a non-negative integer.
"""
def __init__(self,
logits=None,
probs=None,
validate_args=True,
allow_nan_stats=False,
name="Geometric"):
"""Construct Geometric distributions.
Args:
logits: Floating-point `Tensor` with shape `[B1, ..., Bb]` where `b >= 0`
indicates the number of batch dimensions. Each entry represents logits
for the probability of success for independent Geometric distributions
and must be in the range `(-inf, inf]`. Only one of `logits` or `probs`
should be specified.
probs: Positive floating-point `Tensor` with shape `[B1, ..., Bb]`
where `b >= 0` indicates the number of batch dimensions. Each entry
represents the probability of success for independent Geometric
distributions and must be in the range `(0, 1]`. Only one of `logits`
or `probs` should be specified.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value "`NaN`" to indicate the
result is undefined. When `False`, an exception is raised if one or
more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
"""
parameters = locals()
with ops.name_scope(name, values=[logits, probs]):
self._logits, self._probs = distribution_util.get_logits_and_probs(
logits, probs, validate_args=validate_args, name=name)
with ops.control_dependencies(
[check_ops.assert_positive(self._probs)] if validate_args else []):
self._probs = array_ops.identity(self._probs, name="probs")
super(Geometric, self).__init__(
dtype=self._probs.dtype,
reparameterization_type=distribution.NOT_REPARAMETERIZED,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
graph_parents=[self._probs, self._logits],
name=name)
@property
def logits(self):
"""Log-odds of a `1` outcome (vs `0`)."""
return self._logits
@property
def probs(self):
"""Probability of a `1` outcome (vs `0`)."""
return self._probs
def _batch_shape_tensor(self):
return array_ops.shape(self._probs)
def _batch_shape(self):
return self.probs.get_shape()
def _event_shape_tensor(self):
return array_ops.constant([], dtype=dtypes.int32)
def _event_shape(self):
return tensor_shape.scalar()
def _sample_n(self, n, seed=None):
# Uniform variates must be sampled from the open-interval `(0, 1)` rather
# than `[0, 1)`. To do so, we use `np.finfo(self.dtype.as_numpy_dtype).tiny`
# because it is the smallest, positive, "normal" number. A "normal" number
# is such that the mantissa has an implicit leading 1. Normal, positive
# numbers x, y have the reasonable property that, `x + y >= max(x, y)`. In
# this case, a subnormal number (i.e., np.nextafter) can cause us to sample
# 0.
sampled = random_ops.random_uniform(
array_ops.concat([[n], array_ops.shape(self._probs)], 0),
minval=np.finfo(self.dtype.as_numpy_dtype).tiny,
maxval=1.,
seed=seed,
dtype=self.dtype)
return math_ops.floor(
math_ops.log(sampled) / math_ops.log1p(-self.probs))
def _cdf(self, counts):
if self.validate_args:
# We set `check_integer=False` since the CDF is defined on whole real
# line.
counts = math_ops.floor(
distribution_util.embed_check_nonnegative_discrete(
counts, check_integer=False))
counts *= array_ops.ones_like(self.probs)
return array_ops.where(
counts < 0.,
array_ops.zeros_like(counts),
-math_ops.expm1(
(counts + 1) * math_ops.log1p(-self.probs)))
def _log_prob(self, counts):
if self.validate_args:
counts = distribution_util.embed_check_nonnegative_discrete(
counts, check_integer=True)
counts *= array_ops.ones_like(self.probs)
probs = self.probs * array_ops.ones_like(counts)
safe_domain = array_ops.where(
math_ops.equal(counts, 0.),
array_ops.zeros_like(probs),
probs)
return counts * math_ops.log1p(-safe_domain) + math_ops.log(probs)
def _entropy(self):
probs = self._probs
if self.validate_args:
probs = control_flow_ops.with_dependencies(
[check_ops.assert_less(
probs,
constant_op.constant(1., probs.dtype),
message="Entropy is undefined when logits = inf or probs = 1.")],
probs)
# Claim: entropy(p) = softplus(s)/p - s
# where s=logits and p=probs.
#
# Proof:
#
# entropy(p)
# := -[(1-p)log(1-p) + plog(p)]/p
# = -[log(1-p) + plog(p/(1-p))]/p
# = -[-softplus(s) + ps]/p
# = softplus(s)/p - s
#
# since,
# log[1-sigmoid(s)]
# = log[1/(1+exp(s)]
# = -log[1+exp(s)]
# = -softplus(s)
#
# using the fact that,
# 1-sigmoid(s) = sigmoid(-s) = 1/(1+exp(s))
return nn.softplus(self.logits) / probs - self.logits
def _mean(self):
return math_ops.exp(-self.logits)
def _variance(self):
return self._mean() / self.probs
def _mode(self):
return array_ops.zeros(self.batch_shape_tensor(), dtype=self.dtype)<|fim▁end|> | |
<|file_name|>generate_flows.py<|end_file_name|><|fim▁begin|>#! /usr/bin/env python
# -*- Mode: python; py-indent-offset: 4; tab-width: 8; indent-tabs-mode: t; -*-
#
# A script for generating a number of flows.
#
# The output of the script should be saved to a file, and the flows from
# that file should be added by the following command:
#
# web/add_flow.py -f filename
#
# NOTE: Currently, some of the parameters fo the flows are hard-coded,
# and all flows are between same source and destination DPID and ports
# (differentiated by different matchSrcMac and matchDstMac).
#
import copy
import pprint
import os
import sys
import subprocess
import json
import argparse
import io
import time
## Global Var ##
DEBUG=0
pp = pprint.PrettyPrinter(indent=4)
## Worker Functions ##
def log_error(txt):
print '%s' % (txt)
def debug(txt):
if DEBUG:
print '%s' % (txt)
if __name__ == "__main__":
usage_msg = "Generate a number of flows by using a pre-defined template.\n"
usage_msg = usage_msg + "\n"
usage_msg = usage_msg + "NOTE: This script is work-in-progress. Currently all flows are within same\n"
usage_msg = usage_msg + "pair of switch ports and contain auto-generated MAC-based matching conditions.\n"
usage_msg = usage_msg + "\n"
usage_msg = usage_msg + "Usage: %s <begin-flow-id> <end-flow-id>\n" % (sys.argv[0])
usage_msg = usage_msg + "\n"
usage_msg = usage_msg + " The output should be saved to a file, and the flows should be installed\n"
usage_msg = usage_msg + " by using the command './add_flow.py -f filename'\n"
# app.debug = False;
# Usage info
if len(sys.argv) > 1 and (sys.argv[1] == "-h" or sys.argv[1] == "--help"):
print(usage_msg)
exit(0)
# Check arguments
if len(sys.argv) < 3:
log_error(usage_msg)
exit(1)
# Extract the arguments
begin_flow_id = int(sys.argv[1], 0)
end_flow_id = int(sys.argv[2], 0)
if begin_flow_id > end_flow_id:
log_error(usage_msg)
exit(1)
#
# Do the work
#
# NOTE: Currently, up to 65536 flows are supported.
# More flows can be supported by iterating by, say, iterating over some of<|fim▁hole|> #
flow_id = begin_flow_id
idx = 0
while flow_id <= end_flow_id:
mac3 = idx / 255
mac4 = idx % 255
str_mac3 = "%0.2x" % mac3
str_mac4 = "%0.2x" % mac4
src_mac = "00:00:" + str_mac3 + ":" + str_mac4 + ":00:00";
dst_mac = "00:01:" + str_mac3 + ":" + str_mac4 + ":00:00";
print "%s FOOBAR 00:00:00:00:00:00:00:01 1 00:00:00:00:00:00:00:01 2 matchSrcMac %s matchDstMac %s" % (flow_id, src_mac, dst_mac)
flow_id = flow_id + 1
idx = idx + 1<|fim▁end|> | # the other bytes of the autogenereated source/destination MAC addresses. |
<|file_name|>length.py<|end_file_name|><|fim▁begin|># Copyright (C) 2011 Mark Burnett
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#<|fim▁hole|># This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from base_classes import EndCondition as _EndCondition
class MinLength(_EndCondition):
'End simulation after duration seconds.'
__slots__ = ['duration']
def __init__(self, value=None, label=None):
self.value = int(value)
_EndCondition.__init__(self, label=label)
def reset(self):
pass
def __call__(self, time, filaments, concentrations):
for f in filaments:
if len(f) < self.value:
return True
return False<|fim▁end|> | |
<|file_name|>CoreRender.java<|end_file_name|><|fim▁begin|>/**
* Copyright (c) 2013, Jens Hohmuth
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGE.
*/
package com.lessvoid.coregl;
import com.lessvoid.coregl.spi.CoreGL;
/**
* Simple helper methods to render vertex arrays.
*
* @author void
*/
public class CoreRender {
private final CoreGL gl;
CoreRender(final CoreGL gl) {
this.gl = gl;
}
public static CoreRender createCoreRender(final CoreGL gl) {
return new CoreRender(gl);
}
// Lines
/**
* Render lines.
*
* @param count
* number of vertices
*/
public void renderLines(final int count) {
gl.glDrawArrays(gl.GL_LINE_STRIP(), 0, count);
gl.checkGLError("glDrawArrays");
}
/**
* Render adjacent lines.
*
* @param count
* number of vertices
*/
public void renderLinesAdjacent(final int count) {
gl.glDrawArrays(gl.GL_LINE_STRIP_ADJACENCY(), 0, count);
gl.checkGLError("glDrawArrays");
}
// Triangle Strip
/**
* Render the currently active VAO using triangle strips with the given number
* of vertices.
*
* @param count
* number of vertices to render as triangle strips
*/
public void renderTriangleStrip(final int count) {
gl.glDrawArrays(gl.GL_TRIANGLE_STRIP(), 0, count);
gl.checkGLError("glDrawArrays");
}
/**
* Render the currently active VAO using triangle strips, sending the given
* number of indizes.
*
* @param count
* number of indizes to render as triangle strips
*/
public void renderTriangleStripIndexed(final int count) {
gl.glDrawElements(gl.GL_TRIANGLE_STRIP(), count, gl.GL_UNSIGNED_INT(), 0);
gl.checkGLError("glDrawElements(GL_TRIANGLE_STRIP)");<|fim▁hole|> * of vertices AND do that primCount times.
*
* @param count
* number of vertices to render as triangle strips per primitve
* @param primCount
* number of primitives to render
*/
public void renderTriangleStripInstances(final int count, final int primCount) {
gl.glDrawArraysInstanced(gl.GL_TRIANGLE_STRIP(), 0, count, primCount);
gl.checkGLError("glDrawArraysInstanced(GL_TRIANGLE_STRIP)");
}
// Triangle Fan
/**
* Render the currently active VAO using triangle fan with the given number of
* vertices.
*
* @param count
* number of vertices to render as triangle fan
*/
public void renderTriangleFan(final int count) {
gl.glDrawArrays(gl.GL_TRIANGLE_FAN(), 0, count);
gl.checkGLError("glDrawArrays");
}
/**
* Render the currently active VAO using triangle fans, sending the given
* number of indizes.
*
* @param count
* number of indizes to render as triangle fans.
*/
public void renderTriangleFanIndexed(final int count) {
gl.glDrawElements(gl.GL_TRIANGLE_FAN(), count, gl.GL_UNSIGNED_INT(), 0);
gl.checkGLError("glDrawElements(GL_TRIANGLE_FAN)");
}
// Individual Triangles
/**
* Render the currently active VAO using triangles with the given number of
* vertices.
*
* @param vertexCount
* number of vertices to render as triangle strips
*/
public void renderTriangles(final int vertexCount) {
gl.glDrawArrays(gl.GL_TRIANGLES(), 0, vertexCount);
gl.checkGLError("glDrawArrays");
}
/**
* Render the currently active VAO using triangles with the given number of
* vertices starting at the given offset.
*
* @param offset
* offset to start sending vertices
* @param vertexCount
* number of vertices to render as triangle strips
*/
public void renderTrianglesOffset(final int offset, final int vertexCount) {
gl.glDrawArrays(gl.GL_TRIANGLES(), offset, vertexCount);
gl.checkGLError("glDrawArrays");
}
/**
* Render the currently active VAO using triangles with the given number of
* vertices.
*
* @param count
* number of vertices to render as triangles
*/
public void renderTrianglesIndexed(final int count) {
gl.glDrawElements(gl.GL_TRIANGLES(), count, gl.GL_UNSIGNED_INT(), 0);
gl.checkGLError("glDrawElements");
}
/**
* Render the currently active VAO using triangles with the given number of
* vertices AND do that primCount times.
*
* @param count
* number of vertices to render as triangles per primitve
* @param primCount
* number of primitives to render
*/
public void renderTrianglesInstances(final int count, final int primCount) {
gl.glDrawArraysInstanced(gl.GL_TRIANGLES(), 0, count, primCount);
gl.checkGLError("glDrawArraysInstanced(GL_TRIANGLES)");
}
// Points
/**
* Render the currently active VAO using points with the given number of
* vertices.
*
* @param count
* number of vertices to render as points
*/
public void renderPoints(final int count) {
gl.glDrawArrays(gl.GL_POINTS(), 0, count);
gl.checkGLError("glDrawArrays(GL_POINTS)");
}
/**
* Render the currently active VAO using points with the given number of
* vertices AND do that primCount times.
*
* @param count
* number of vertices to render as points per primitive
* @param primCount
* number of primitives to render
*/
public void renderPointsInstances(final int count, final int primCount) {
gl.glDrawArraysInstanced(gl.GL_POINTS(), 0, count, primCount);
gl.checkGLError("glDrawArraysInstanced(GL_POINTS)");
}
// Utils
/**
* Set the clear color.
*
* @param r
* red
* @param g
* green
* @param b
* blue
* @param a
* alpha
*/
public void clearColor(final float r, final float g, final float b, final float a) {
gl.glClearColor(r, g, b, a);
}
/**
* Clear the color buffer.
*/
public void clearColorBuffer() {
gl.glClear(gl.GL_COLOR_BUFFER_BIT());
}
}<|fim▁end|> | }
/**
* Render the currently active VAO using triangle strips with the given number |
<|file_name|>Simulation.py<|end_file_name|><|fim▁begin|>###################################################################################################
#
# PySpice - A Spice Package for Python
# Copyright (C) 2014 Fabrice Salvaire
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
####################################################################################################
####################################################################################################
import logging
####################################################################################################
from ..Tools.StringTools import join_list, join_dict
from .NgSpice.Shared import NgSpiceShared
from .Server import SpiceServer
####################################################################################################
_module_logger = logging.getLogger(__name__)
####################################################################################################
class CircuitSimulation:
"""Define and generate the spice instruction to perform a circuit simulation.
.. warning:: In some cases NgSpice can perform several analyses one after the other. This case
is partially supported.
"""
_logger = _module_logger.getChild('CircuitSimulation')
##############################################
def __init__(self, circuit,
temperature=27,
nominal_temperature=27,
pipe=True,
):
self._circuit = circuit
self._options = {} # .options
self._initial_condition = {} # .ic
self._saved_nodes = ()
self._analysis_parameters = {}
self.temperature = temperature
self.nominal_temperature = nominal_temperature
if pipe:
self.options('NOINIT')
self.options(filetype='binary')
##############################################
@property
def circuit(self):
return self._circuit
##############################################
def options(self, *args, **kwargs):
for item in args:
self._options[str(item)] = None
for key, value in kwargs.items():
self._options[str(key)] = str(value)
##############################################
@property
def temperature(self):
return self._options['TEMP']
@temperature.setter
def temperature(self, value):
self._options['TEMP'] = value
##############################################
@property
def nominal_temperature(self):
return self._options['TNOM']
@nominal_temperature.setter
def nominal_temperature(self, value):
self._options['TNOM'] = value
##############################################
def initial_condition(self, **kwargs):
""" Set initial condition for voltage nodes.
Usage: initial_condition(node_name1=value, ...)
"""
for key, value in kwargs.items():
self._initial_condition['V({})'.format(str(key))] = str(value)
# Fixme: .nodeset
##############################################
def save(self, *args):
# Fixme: pass Node for voltage node, Element for source branch current, ...
"""Set the list of saved vectors.
If no *.save* line is given, then the default set of vectors is saved (node voltages and
voltage source branch currents). If *.save* lines are given, only those vectors specified
are saved.
Node voltages may be saved by giving the node_name or *v(node_name)*. Currents through an
independent voltage source (including inductor) are given by *i(source_name)* or
*source_name#branch*. Internal device data are accepted as *@dev[param]*.
If you want to save internal data in addition to the default vector set, add the parameter
*all* to the additional vectors to be saved.
"""
self._saved_nodes = list(args)
##############################################
@property
def save_currents(self):
""" Save all currents. """
return self._options.get('SAVECURRENTS', False)
@save_currents.setter
def save_currents(self, value):
if value:
self._options['SAVECURRENTS'] = True
else:
del self._options['SAVECURRENTS']
##############################################
def reset_analysis(self):
self._analysis_parameters.clear()
##############################################
def operating_point(self):
"""Compute the operating point of the circuit with capacitors open and inductors shorted."""
self._analysis_parameters['op'] = ''
##############################################
def dc_sensitivity(self, output_variable):
"""Compute the sensitivity of the DC operating point of a node voltage or voltage-source branch
current to all non-zero device parameters.
General form:
.. code::
.sens outvar
Examples:
.. code::
.SENS V(1, OUT)
.SENS I(VTEST)
"""
self._analysis_parameters['sens'] = (output_variable,)
##############################################
def ac_sensitivity(self, output_variable,
start_frequency, stop_frequency, number_of_points, variation):
"""Compute the sensitivity of the AC values of a node voltage or voltage-source branch
current to all non-zero device parameters.
General form:
.. code::
.sens outvar ac dec nd fstart fstop
.sens outvar ac oct no fstart fstop
.sens outvar ac lin np fstart fstop
Examples:
.. code::
.SENS V(OUT) AC DEC 10 100 100 k
"""
if variation not in ('dec', 'oct', 'lin'):
raise ValueError("Incorrect variation type")
self._analysis_parameters['sens'] = (output_variable,
variation, number_of_points, start_frequency, stop_frequency)
##############################################
def dc(self, **kwargs):
"""Compute the DC transfer fonction of the circuit with capacitors open and inductors shorted.
General form:
.. code::
.dc srcnam vstart vstop vincr [ src2 start2 stop2 incr2 ]
*srcnam* is the name of an independent voltage or current source, a resistor or the circuit
temperature. *vstart*, *vstop*, and *vincr* are the starting, final, and incrementing values
respectively.
A second source (*src2*) may optionally be specified with associated sweep parameters. In
this case, the first source is swept over its range for each value of the second source.
Examples:
.. code::
.dc VIN 0 .2 5 5.0 0.25
.dc VDS 0 10 .5 VGS 0 5 1
.dc VCE 0 10 .2 5 IB 0 10U 1U
.dc RLoad 1k 2k 100
.dc TEMP -15 75 5
"""
parameters = []
for variable, value_slice in kwargs.items():
variable_lower = variable.lower()
if variable_lower[0] in ('v', 'i', 'r') or variable_lower == 'temp':
parameters += [variable, value_slice.start, value_slice.stop, value_slice.step]
else:
raise NameError('Sweep variable must be a voltage/current source, '
'a resistor or the circuit temperature')
self._analysis_parameters['dc'] = parameters
##############################################
def ac(self, start_frequency, stop_frequency, number_of_points, variation):
# fixme: concise keyword ?
"""Perform a small-signal AC analysis of the circuit where all non-linear devices are linearized
around their actual DC operating point.
Note that in order for this analysis to be meaningful, at least one independent source must
have been specified with an AC value. Typically it does not make much sense to specify more
than one AC source. If you do, the result will be a superposition of all sources, thus
difficult to interpret.
Examples:
.. code::
.ac dec nd fstart fstop
.ac oct no fstart fstop
.ac lin np fstart fstop
The parameter *variation* must be either `dec`, `oct` or `lin`.
"""
if variation not in ('dec', 'oct', 'lin'):
raise ValueError("Incorrect variation type")
self._analysis_parameters['ac'] = (variation, number_of_points, start_frequency, stop_frequency)
##############################################
def transient(self, step_time, end_time, start_time=None, max_time=None,
use_initial_condition=False):
"""Perform a transient analysis of the circuit.
General Form:
.. code::
.tran tstep tstop <tstart <tmax>> <uic>
"""
if use_initial_condition:
uic = 'uic'
else:
uic = None
self._analysis_parameters['tran'] = (step_time, end_time, start_time, max_time, uic)
##############################################
def __str__(self):
netlist = str(self._circuit)
if self.options:
for key, value in self._options.items():
if value is not None:<|fim▁hole|> else:
netlist += '.options {}\n'.format(key)
if self.initial_condition:
netlist += '.ic ' + join_dict(self._initial_condition) + '\n'
if self._saved_nodes:
netlist += '.save ' + join_list(self._saved_nodes) + '\n'
for analysis, analysis_parameters in self._analysis_parameters.items():
netlist += '.' + analysis + ' ' + join_list(analysis_parameters) + '\n'
netlist += '.end\n'
return netlist
####################################################################################################
class CircuitSimulator(CircuitSimulation):
""" This class implements a circuit simulator. Each analysis mode is performed by a method that
return the measured probes.
For *ac* and *transient* analyses, the user must specify a list of nodes using the *probes* key
argument.
"""
_logger = _module_logger.getChild('CircuitSimulator')
##############################################
def _run(self, analysis_method, *args, **kwargs):
self.reset_analysis()
if 'probes' in kwargs:
self.save(* kwargs.pop('probes'))
method = getattr(CircuitSimulation, analysis_method)
method(self, *args, **kwargs)
self._logger.debug('desk\n' + str(self))
##############################################
def operating_point(self, *args, **kwargs):
return self._run('operating_point', *args, **kwargs)
##############################################
def dc(self, *args, **kwargs):
return self._run('dc', *args, **kwargs)
##############################################
def dc_sensitivity(self, *args, **kwargs):
return self._run('dc_sensitivity', *args, **kwargs)
##############################################
def ac(self, *args, **kwargs):
return self._run('ac', *args, **kwargs)
##############################################
def transient(self, *args, **kwargs):
return self._run('transient', *args, **kwargs)
####################################################################################################
class SubprocessCircuitSimulator(CircuitSimulator):
_logger = _module_logger.getChild('SubprocessCircuitSimulator')
##############################################
def __init__(self, circuit,
temperature=27,
nominal_temperature=27,
spice_command='ngspice',
):
# Fixme: kwargs
super().__init__(circuit, temperature, nominal_temperature, pipe=True)
self._spice_server = SpiceServer()
##############################################
def _run(self, analysis_method, *args, **kwargs):
super()._run(analysis_method, *args, **kwargs)
raw_file = self._spice_server(str(self))
self.reset_analysis()
# for field in raw_file.variables:
# print field
return raw_file.to_analysis(self._circuit)
####################################################################################################
class NgSpiceSharedCircuitSimulator(CircuitSimulator):
_logger = _module_logger.getChild('NgSpiceSharedCircuitSimulator')
##############################################
def __init__(self, circuit,
temperature=27,
nominal_temperature=27,
ngspice_shared=None,
):
# Fixme: kwargs
super().__init__(circuit, temperature, nominal_temperature, pipe=False)
if ngspice_shared is None:
self._ngspice_shared = NgSpiceShared(send_data=False)
else:
self._ngspice_shared = ngspice_shared
##############################################
def _run(self, analysis_method, *args, **kwargs):
super()._run(analysis_method, *args, **kwargs)
self._ngspice_shared.load_circuit(str(self))
self._ngspice_shared.run()
self._logger.debug(str(self._ngspice_shared.plot_names))
self.reset_analysis()
if analysis_method == 'dc':
plot_name = 'dc1'
elif analysis_method == 'ac':
plot_name = 'ac1'
elif analysis_method == 'transient':
plot_name = 'tran1'
else:
raise NotImplementedError
return self._ngspice_shared.plot(plot_name).to_analysis()
####################################################################################################
#
# End
#
####################################################################################################<|fim▁end|> | netlist += '.options {} = {}\n'.format(key, value) |
<|file_name|>datacatalog_v1_generated_data_catalog_list_tags_sync.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for ListTags
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-datacatalog
# [START datacatalog_v1_generated_DataCatalog_ListTags_sync]
from google.cloud import datacatalog_v1
def sample_list_tags():
# Create a client
client = datacatalog_v1.DataCatalogClient()
# Initialize request argument(s)
request = datacatalog_v1.ListTagsRequest(
parent="parent_value",
)
# Make the request
page_result = client.list_tags(request=request)<|fim▁hole|> # Handle the response
for response in page_result:
print(response)
# [END datacatalog_v1_generated_DataCatalog_ListTags_sync]<|fim▁end|> | |
<|file_name|>XmlTagValueImpl.java<|end_file_name|><|fim▁begin|>/*
* Copyright 2000-2016 JetBrains s.r.o.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intellij.psi.impl.source.xml;
import com.intellij.lang.ASTNode;
import com.intellij.openapi.diagnostic.Logger;
import com.intellij.openapi.util.TextRange;
import com.intellij.openapi.util.text.StringUtil;
import com.intellij.psi.PsiElement;
import com.intellij.psi.XmlElementFactory;
import com.intellij.psi.impl.source.xml.behavior.DefaultXmlPsiPolicy;
import com.intellij.psi.search.PsiElementProcessor;
import com.intellij.psi.xml.*;
import com.intellij.util.IncorrectOperationException;
import org.jetbrains.annotations.NotNull;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
public class XmlTagValueImpl implements XmlTagValue{
private static final Logger LOG = Logger.getInstance("#com.intellij.psi.impl.source.xml.XmlTagValueImpl");
private final XmlTag myTag;
private final XmlTagChild[] myElements;
private volatile XmlText[] myTextElements;
private volatile String myText;
private volatile String myTrimmedText;
public XmlTagValueImpl(@NotNull XmlTagChild[] bodyElements, @NotNull XmlTag tag) {
myTag = tag;
myElements = bodyElements;
}
@Override
@NotNull
public XmlTagChild[] getChildren() {
return myElements;
}
@Override
@NotNull
public XmlText[] getTextElements() {
XmlText[] textElements = myTextElements;
if (textElements == null) {
textElements = Arrays.stream(myElements)
.filter(element -> element instanceof XmlText)
.map(element -> (XmlText)element).toArray(XmlText[]::new);
myTextElements = textElements = textElements.length == 0 ? XmlText.EMPTY_ARRAY : textElements;
}
return textElements;
}
@Override
@NotNull
public String getText() {
String text = myText;
if (text == null) {
final StringBuilder consolidatedText = new StringBuilder();
for (final XmlTagChild element : myElements) {
consolidatedText.append(element.getText());
}
myText = text = consolidatedText.toString();
}
return text;
}
@Override
@NotNull
public TextRange getTextRange() {
if(myElements.length == 0){
final ASTNode child = XmlChildRole.START_TAG_END_FINDER.findChild( (ASTNode)myTag);
if(child != null)
return new TextRange(child.getStartOffset() + 1, child.getStartOffset() + 1);
return new TextRange(myTag.getTextRange().getEndOffset(), myTag.getTextRange().getEndOffset());
}
return new TextRange(myElements[0].getTextRange().getStartOffset(), myElements[myElements.length - 1].getTextRange().getEndOffset());
}
@Override
@NotNull
public String getTrimmedText() {
String trimmedText = myTrimmedText;
if (trimmedText == null) {
final StringBuilder consolidatedText = new StringBuilder();
final XmlText[] textElements = getTextElements();
for (final XmlText textElement : textElements) {
consolidatedText.append(textElement.getValue());
}
myTrimmedText = trimmedText = consolidatedText.toString().trim();
}
return trimmedText;
}
@Override
public void setText(String value) {
setText(value, false);
}
@Override
public void setEscapedText(String value) {
setText(value, true);
}
private void setText(String value, boolean defaultPolicy) {
try {
XmlText text = null;
if (value != null) {
final XmlText[] texts = getTextElements();
if (texts.length == 0) {
text = (XmlText)myTag.add(XmlElementFactory.getInstance(myTag.getProject()).createDisplayText("x"));
} else {
text = texts[0];
}
if (StringUtil.isEmpty(value)) {
text.delete();
}
else {
if (defaultPolicy && text instanceof XmlTextImpl) {
((XmlTextImpl)text).doSetValue(value, new DefaultXmlPsiPolicy());
} else {
text.setValue(value);
}
}
}
if(myElements.length > 0){
for (final XmlTagChild child : myElements) {
if (child != text) {
child.delete();
}<|fim▁hole|> }
catch (IncorrectOperationException e) {
LOG.error(e);
}
}
@Override
public boolean hasCDATA() {
for (XmlText xmlText : getTextElements()) {
PsiElement[] children = xmlText.getChildren();
for (PsiElement child : children) {
if (child.getNode().getElementType() == XmlElementType.XML_CDATA) {
return true;
}
}
}
return false;
}
public static XmlTagValue createXmlTagValue(XmlTag tag) {
final List<XmlTagChild> bodyElements = new ArrayList<>();
tag.processElements(new PsiElementProcessor() {
boolean insideBody;
@Override
public boolean execute(@NotNull PsiElement element) {
final ASTNode treeElement = element.getNode();
if (insideBody) {
if (treeElement != null && treeElement.getElementType() == XmlTokenType.XML_END_TAG_START) return false;
if (!(element instanceof XmlTagChild)) return true;
bodyElements.add((XmlTagChild)element);
}
else if (treeElement != null && treeElement.getElementType() == XmlTokenType.XML_TAG_END) insideBody = true;
return true;
}
}, tag);
XmlTagChild[] tagChildren = bodyElements.toArray(XmlTagChild.EMPTY_ARRAY);
return new XmlTagValueImpl(tagChildren, tag);
}
}<|fim▁end|> | }
} |
<|file_name|>seq.hpp<|end_file_name|><|fim▁begin|>#ifndef __SEQ_HPP__
#define __SEQ_HPP__
#include <string>
#include <vector>
#include <memory>
#include <iostream>
class SeqNode;
typedef std::shared_ptr<SeqNode> SeqNodeSP;
class SeqNode
{
public:
enum E_CallType {
E_UNKNOWN,
E_SYNC,
E_ASYNC,
E_ASYNC_WAIT
};
SeqNode();
void printTree(std::ostream &out,int tabcnt=0);
void setCallType(E_CallType calltype);
void setTaskSrc(const std::string& src);
void setTaskDst(const std::string& dst);
void setFunction(const std::string& func);
void setReturn(const std::string& ret);
void appendFuncParam(const std::string& param);
void appendFuncExtra(const std::string& extra);
void appendFuncMemo(const std::string& memo);
friend void AppendChild(SeqNodeSP parent, SeqNodeSP child);
protected:
std::string toString();
private:
E_CallType m_call_type;
std::string m_task_src;
std::string m_task_dst;
std::string m_func_name;
std::string m_func_ret;
std::vector<std::string> m_func_param;<|fim▁hole|> std::vector<std::string> m_func_memo;
std::vector< SeqNodeSP > m_child;
std::weak_ptr<SeqNode> m_parent;
};
SeqNodeSP CreateNode();
void AppendChild(SeqNodeSP parent, SeqNodeSP child);
void SetMember(SeqNodeSP node,const std::string& key, const std::string& value);
#endif<|fim▁end|> | std::vector<std::string> m_func_extra; |
<|file_name|>PathFilter.java<|end_file_name|><|fim▁begin|>/**
* This file is part of muCommander, http://www.mucommander.com
* Copyright (C) 2002-2010 Maxence Bernard
*
* muCommander is free software; you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* muCommander is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
<|fim▁hole|>
package com.mucommander.commons.file.filter;
/**
* <code>PathFilter</code> is a {@link FileFilter} that operates on absolute file paths.
*
* @see AbstractPathFilter
* @author Maxence Bernard
*/
public interface PathFilter extends StringCriterionFilter {
}<|fim▁end|> | |
<|file_name|>checkerboard.py<|end_file_name|><|fim▁begin|>"""Two dimensional checkerboard lattice with real hoppings"""
import pybinding as pb<|fim▁hole|>
def checkerboard(d=0.2, delta=1.1, t=0.6):
lat = pb.Lattice(a1=[d, 0], a2=[0, d])
lat.add_sublattices(
('A', [0, 0], -delta),
('B', [d/2, d/2], delta)
)
lat.add_hoppings(
([ 0, 0], 'A', 'B', t),
([ 0, -1], 'A', 'B', t),
([-1, 0], 'A', 'B', t),
([-1, -1], 'A', 'B', t)
)
return lat
lattice = checkerboard()
lattice.plot()
plt.show()
lattice.plot_brillouin_zone()
plt.show()
model = pb.Model(checkerboard(), pb.translational_symmetry())
solver = pb.solver.lapack(model)
bands = solver.calc_bands([0, 0], [0, 5*pi], [5*pi, 5*pi], [0, 0])
bands.plot()
plt.show()<|fim▁end|> | import matplotlib.pyplot as plt
from math import pi
pb.pltutils.use_style() |
<|file_name|>tags.ts<|end_file_name|><|fim▁begin|>/**
* @license
* Copyright Google Inc. All Rights Reserved.
*
* Use of this source code is governed by an MIT-style license that can be
* found in the LICENSE file at https://angular.io/license
*/
export enum TagContentType {
RAW_TEXT,
ESCAPABLE_RAW_TEXT,
PARSABLE_DATA
}
export interface TagDefinition {
closedByParent: boolean;
implicitNamespacePrefix: string|null;
contentType: TagContentType;
isVoid: boolean;
ignoreFirstLf: boolean;
canSelfClose: boolean;
isClosedByChild(name: string): boolean;
}
export function splitNsName(elementName: string): [string | null, string] {
if (elementName[0] != ':') {
return [null, elementName];
}
const colonIndex = elementName.indexOf(':', 1);
if (colonIndex == -1) {
throw new Error(`Unsupported format "${elementName}" expecting ":namespace:name"`);
}
return [elementName.slice(1, colonIndex), elementName.slice(colonIndex + 1)];
}
// `<ng-container>` tags work the same regardless the namespace
export function isNgContainer(tagName: string): boolean {
return splitNsName(tagName)[1] === 'ng-container';
}
// `<ng-content>` tags work the same regardless the namespace
export function isNgContent(tagName: string): boolean {
return splitNsName(tagName)[1] === 'ng-content';
}
// `<ng-template>` tags work the same regardless the namespace
export function isNgTemplate(tagName: string): boolean {
return splitNsName(tagName)[1] === 'ng-template';
}<|fim▁hole|>export function getNsPrefix(fullName: string): string;
export function getNsPrefix(fullName: null): null;
export function getNsPrefix(fullName: string | null): string|null {
return fullName === null ? null : splitNsName(fullName)[0];
}
export function mergeNsAndName(prefix: string, localName: string): string {
return prefix ? `:${prefix}:${localName}` : localName;
}
// see http://www.w3.org/TR/html51/syntax.html#named-character-references
// see https://html.spec.whatwg.org/multipage/entities.json
// This list is not exhaustive to keep the compiler footprint low.
// The `{` / `ƫ` syntax should be used when the named character reference does not
// exist.
export const NAMED_ENTITIES: {[k: string]: string} = {
'Aacute': '\u00C1',
'aacute': '\u00E1',
'Acirc': '\u00C2',
'acirc': '\u00E2',
'acute': '\u00B4',
'AElig': '\u00C6',
'aelig': '\u00E6',
'Agrave': '\u00C0',
'agrave': '\u00E0',
'alefsym': '\u2135',
'Alpha': '\u0391',
'alpha': '\u03B1',
'amp': '&',
'and': '\u2227',
'ang': '\u2220',
'apos': '\u0027',
'Aring': '\u00C5',
'aring': '\u00E5',
'asymp': '\u2248',
'Atilde': '\u00C3',
'atilde': '\u00E3',
'Auml': '\u00C4',
'auml': '\u00E4',
'bdquo': '\u201E',
'Beta': '\u0392',
'beta': '\u03B2',
'brvbar': '\u00A6',
'bull': '\u2022',
'cap': '\u2229',
'Ccedil': '\u00C7',
'ccedil': '\u00E7',
'cedil': '\u00B8',
'cent': '\u00A2',
'Chi': '\u03A7',
'chi': '\u03C7',
'circ': '\u02C6',
'clubs': '\u2663',
'cong': '\u2245',
'copy': '\u00A9',
'crarr': '\u21B5',
'cup': '\u222A',
'curren': '\u00A4',
'dagger': '\u2020',
'Dagger': '\u2021',
'darr': '\u2193',
'dArr': '\u21D3',
'deg': '\u00B0',
'Delta': '\u0394',
'delta': '\u03B4',
'diams': '\u2666',
'divide': '\u00F7',
'Eacute': '\u00C9',
'eacute': '\u00E9',
'Ecirc': '\u00CA',
'ecirc': '\u00EA',
'Egrave': '\u00C8',
'egrave': '\u00E8',
'empty': '\u2205',
'emsp': '\u2003',
'ensp': '\u2002',
'Epsilon': '\u0395',
'epsilon': '\u03B5',
'equiv': '\u2261',
'Eta': '\u0397',
'eta': '\u03B7',
'ETH': '\u00D0',
'eth': '\u00F0',
'Euml': '\u00CB',
'euml': '\u00EB',
'euro': '\u20AC',
'exist': '\u2203',
'fnof': '\u0192',
'forall': '\u2200',
'frac12': '\u00BD',
'frac14': '\u00BC',
'frac34': '\u00BE',
'frasl': '\u2044',
'Gamma': '\u0393',
'gamma': '\u03B3',
'ge': '\u2265',
'gt': '>',
'harr': '\u2194',
'hArr': '\u21D4',
'hearts': '\u2665',
'hellip': '\u2026',
'Iacute': '\u00CD',
'iacute': '\u00ED',
'Icirc': '\u00CE',
'icirc': '\u00EE',
'iexcl': '\u00A1',
'Igrave': '\u00CC',
'igrave': '\u00EC',
'image': '\u2111',
'infin': '\u221E',
'int': '\u222B',
'Iota': '\u0399',
'iota': '\u03B9',
'iquest': '\u00BF',
'isin': '\u2208',
'Iuml': '\u00CF',
'iuml': '\u00EF',
'Kappa': '\u039A',
'kappa': '\u03BA',
'Lambda': '\u039B',
'lambda': '\u03BB',
'lang': '\u27E8',
'laquo': '\u00AB',
'larr': '\u2190',
'lArr': '\u21D0',
'lceil': '\u2308',
'ldquo': '\u201C',
'le': '\u2264',
'lfloor': '\u230A',
'lowast': '\u2217',
'loz': '\u25CA',
'lrm': '\u200E',
'lsaquo': '\u2039',
'lsquo': '\u2018',
'lt': '<',
'macr': '\u00AF',
'mdash': '\u2014',
'micro': '\u00B5',
'middot': '\u00B7',
'minus': '\u2212',
'Mu': '\u039C',
'mu': '\u03BC',
'nabla': '\u2207',
'nbsp': '\u00A0',
'ndash': '\u2013',
'ne': '\u2260',
'ni': '\u220B',
'not': '\u00AC',
'notin': '\u2209',
'nsub': '\u2284',
'Ntilde': '\u00D1',
'ntilde': '\u00F1',
'Nu': '\u039D',
'nu': '\u03BD',
'Oacute': '\u00D3',
'oacute': '\u00F3',
'Ocirc': '\u00D4',
'ocirc': '\u00F4',
'OElig': '\u0152',
'oelig': '\u0153',
'Ograve': '\u00D2',
'ograve': '\u00F2',
'oline': '\u203E',
'Omega': '\u03A9',
'omega': '\u03C9',
'Omicron': '\u039F',
'omicron': '\u03BF',
'oplus': '\u2295',
'or': '\u2228',
'ordf': '\u00AA',
'ordm': '\u00BA',
'Oslash': '\u00D8',
'oslash': '\u00F8',
'Otilde': '\u00D5',
'otilde': '\u00F5',
'otimes': '\u2297',
'Ouml': '\u00D6',
'ouml': '\u00F6',
'para': '\u00B6',
'permil': '\u2030',
'perp': '\u22A5',
'Phi': '\u03A6',
'phi': '\u03C6',
'Pi': '\u03A0',
'pi': '\u03C0',
'piv': '\u03D6',
'plusmn': '\u00B1',
'pound': '\u00A3',
'prime': '\u2032',
'Prime': '\u2033',
'prod': '\u220F',
'prop': '\u221D',
'Psi': '\u03A8',
'psi': '\u03C8',
'quot': '\u0022',
'radic': '\u221A',
'rang': '\u27E9',
'raquo': '\u00BB',
'rarr': '\u2192',
'rArr': '\u21D2',
'rceil': '\u2309',
'rdquo': '\u201D',
'real': '\u211C',
'reg': '\u00AE',
'rfloor': '\u230B',
'Rho': '\u03A1',
'rho': '\u03C1',
'rlm': '\u200F',
'rsaquo': '\u203A',
'rsquo': '\u2019',
'sbquo': '\u201A',
'Scaron': '\u0160',
'scaron': '\u0161',
'sdot': '\u22C5',
'sect': '\u00A7',
'shy': '\u00AD',
'Sigma': '\u03A3',
'sigma': '\u03C3',
'sigmaf': '\u03C2',
'sim': '\u223C',
'spades': '\u2660',
'sub': '\u2282',
'sube': '\u2286',
'sum': '\u2211',
'sup': '\u2283',
'sup1': '\u00B9',
'sup2': '\u00B2',
'sup3': '\u00B3',
'supe': '\u2287',
'szlig': '\u00DF',
'Tau': '\u03A4',
'tau': '\u03C4',
'there4': '\u2234',
'Theta': '\u0398',
'theta': '\u03B8',
'thetasym': '\u03D1',
'thinsp': '\u2009',
'THORN': '\u00DE',
'thorn': '\u00FE',
'tilde': '\u02DC',
'times': '\u00D7',
'trade': '\u2122',
'Uacute': '\u00DA',
'uacute': '\u00FA',
'uarr': '\u2191',
'uArr': '\u21D1',
'Ucirc': '\u00DB',
'ucirc': '\u00FB',
'Ugrave': '\u00D9',
'ugrave': '\u00F9',
'uml': '\u00A8',
'upsih': '\u03D2',
'Upsilon': '\u03A5',
'upsilon': '\u03C5',
'Uuml': '\u00DC',
'uuml': '\u00FC',
'weierp': '\u2118',
'Xi': '\u039E',
'xi': '\u03BE',
'Yacute': '\u00DD',
'yacute': '\u00FD',
'yen': '\u00A5',
'yuml': '\u00FF',
'Yuml': '\u0178',
'Zeta': '\u0396',
'zeta': '\u03B6',
'zwj': '\u200D',
'zwnj': '\u200C',
};
// The &ngsp; pseudo-entity is denoting a space. see:
// https://github.com/dart-lang/angular/blob/0bb611387d29d65b5af7f9d2515ab571fd3fbee4/_tests/test/compiler/preserve_whitespace_test.dart
export const NGSP_UNICODE = '\uE500';
NAMED_ENTITIES['ngsp'] = NGSP_UNICODE;<|fim▁end|> | |
<|file_name|>extern-call-scrub.rs<|end_file_name|><|fim▁begin|>// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// This time we're testing repeatedly going up and down both stacks to
// make sure the stack pointers are maintained properly in both
// directions
extern crate libc;
use std::task;
mod rustrt {
extern crate libc;
#[link(name = "rust_test_helpers")]
extern {
pub fn rust_dbg_call(cb: extern "C" fn(libc::uintptr_t) -> libc::uintptr_t,
data: libc::uintptr_t)
-> libc::uintptr_t;<|fim▁hole|> }
}
extern fn cb(data: libc::uintptr_t) -> libc::uintptr_t {
if data == 1 {
data
} else {
count(data - 1) + count(data - 1)
}
}
fn count(n: libc::uintptr_t) -> libc::uintptr_t {
unsafe {
println!("n = {}", n);
rustrt::rust_dbg_call(cb, n)
}
}
pub fn main() {
// Make sure we're on a task with small Rust stacks (main currently
// has a large stack)
task::spawn(proc() {
let result = count(12);
println!("result = {}", result);
assert_eq!(result, 2048);
});
}<|fim▁end|> | |
<|file_name|>lib.rs<|end_file_name|><|fim▁begin|>//! Internal library for data-encoding-macro
//!
//! Do **not** use this library. Use [data-encoding-macro] instead.
//!
//! This library is for internal use by data-encoding-macro because procedural
//! macros require a separate crate.
//!
//! [data-encoding-macro]: https://crates.io/crates/data-encoding-macro
#![warn(unused_results)]
use proc_macro::token_stream::IntoIter;
use proc_macro::{TokenStream, TokenTree};
use std::collections::HashMap;
use data_encoding::{BitOrder, Encoding, Specification, Translate, Wrap};
fn parse_op(tokens: &mut IntoIter, op: char, key: &str) {
match tokens.next() {
Some(TokenTree::Punct(ref x)) if x.as_char() == op => (),
_ => panic!("expected {:?} after {}", op, key),
}
}
fn parse_map(mut tokens: IntoIter) -> HashMap<String, TokenTree> {
let mut map = HashMap::new();
while let Some(key) = tokens.next() {
let key = match key {
TokenTree::Ident(ident) => format!("{}", ident),
_ => panic!("expected key got {}", key),
};
parse_op(&mut tokens, ':', &key);
let value = match tokens.next() {
None => panic!("expected value for {}", key),
Some(value) => value,<|fim▁hole|> parse_op(&mut tokens, ',', &key);
let _ = map.insert(key, value);
}
map
}
fn get_string(map: &mut HashMap<String, TokenTree>, key: &str) -> String {
let node = match map.remove(key) {
None => return String::new(),
Some(node) => node,
};
match syn::parse::<syn::LitStr>(node.into()) {
Ok(result) => result.value(),
_ => panic!("expected string for {}", key),
}
}
fn get_usize(map: &mut HashMap<String, TokenTree>, key: &str) -> usize {
let node = match map.remove(key) {
None => return 0,
Some(node) => node,
};
let literal = match node {
TokenTree::Literal(literal) => literal,
_ => panic!("expected literal for {}", key),
};
match literal.to_string().parse() {
Ok(result) => result,
Err(error) => panic!("expected usize for {}: {}", key, error),
}
}
fn get_padding(map: &mut HashMap<String, TokenTree>) -> Option<char> {
let node = match map.remove("padding") {
None => return None,
Some(node) => node,
};
if let Ok(result) = syn::parse::<syn::LitChar>(node.clone().into()) {
return Some(result.value());
}
match syn::parse::<syn::Ident>(node.into()) {
Ok(ref result) if result == "None" => None,
_ => panic!("expected None or char for padding"),
}
}
fn get_bool(map: &mut HashMap<String, TokenTree>, key: &str) -> Option<bool> {
let node = match map.remove(key) {
None => return None,
Some(node) => node,
};
match syn::parse::<syn::LitBool>(node.into()) {
Ok(result) => Some(result.value),
_ => panic!("expected bool for padding"),
}
}
fn get_bit_order(map: &mut HashMap<String, TokenTree>) -> BitOrder {
let node = match map.remove("bit_order") {
None => return BitOrder::MostSignificantFirst,
Some(node) => node,
};
let msb = "MostSignificantFirst";
let lsb = "LeastSignificantFirst";
match node {
TokenTree::Ident(ref ident) if format!("{}", ident) == msb => {
BitOrder::MostSignificantFirst
}
TokenTree::Ident(ref ident) if format!("{}", ident) == lsb => {
BitOrder::LeastSignificantFirst
}
_ => panic!("expected {} or {} for bit_order", msb, lsb),
}
}
fn check_present<T>(hash_map: &HashMap<String, T>, key: &str) {
assert!(hash_map.contains_key(key), "{} is required", key);
}
fn get_encoding(hash_map: &mut HashMap<String, TokenTree>) -> Encoding {
check_present(hash_map, "symbols");
let spec = Specification {
symbols: get_string(hash_map, "symbols"),
bit_order: get_bit_order(hash_map),
check_trailing_bits: get_bool(hash_map, "check_trailing_bits").unwrap_or(true),
padding: get_padding(hash_map),
ignore: get_string(hash_map, "ignore"),
wrap: Wrap {
width: get_usize(hash_map, "wrap_width"),
separator: get_string(hash_map, "wrap_separator"),
},
translate: Translate {
from: get_string(hash_map, "translate_from"),
to: get_string(hash_map, "translate_to"),
},
};
spec.encoding().unwrap()
}
fn check_empty<T>(hash_map: HashMap<String, T>) {
assert!(hash_map.is_empty(), "Unexpected keys {:?}", hash_map.keys());
}
#[proc_macro]
#[doc(hidden)]
pub fn internal_new_encoding(input: TokenStream) -> TokenStream {
let mut hash_map = parse_map(input.into_iter());
let encoding = get_encoding(&mut hash_map);
check_empty(hash_map);
format!("{:?}", encoding.internal_implementation()).parse().unwrap()
}
#[proc_macro]
#[doc(hidden)]
pub fn internal_decode_array(input: TokenStream) -> TokenStream {
let mut hash_map = parse_map(input.into_iter());
let encoding = get_encoding(&mut hash_map);
check_present(&hash_map, "name");
let name = get_string(&mut hash_map, "name");
check_present(&hash_map, "input");
let input = get_string(&mut hash_map, "input");
check_empty(hash_map);
let output = encoding.decode(input.as_bytes()).unwrap();
format!("{}: [u8; {}] = {:?};", name, output.len(), output).parse().unwrap()
}
#[proc_macro]
#[doc(hidden)]
pub fn internal_decode_slice(input: TokenStream) -> TokenStream {
let mut hash_map = parse_map(input.into_iter());
let encoding = get_encoding(&mut hash_map);
check_present(&hash_map, "input");
let input = get_string(&mut hash_map, "input");
check_empty(hash_map);
format!("{:?}", encoding.decode(input.as_bytes()).unwrap()).parse().unwrap()
}<|fim▁end|> | }; |
<|file_name|>gecode_solverfactory.cpp<|end_file_name|><|fim▁begin|>#include <minizinc/solvers/gecode_solverfactory.hh>
#include <minizinc/solvers/gecode_solverinstance.hh><|fim▁hole|>
namespace MiniZinc {
namespace {
void get_wrapper() { static GecodeSolverFactory _gecode_solverfactory; }
} // namespace
GecodeSolverFactoryInitialiser::GecodeSolverFactoryInitialiser() { get_wrapper(); }
} // namespace MiniZinc<|fim▁end|> | |
<|file_name|>forms.py<|end_file_name|><|fim▁begin|>from django import forms
from dragnet.dll.models import File, Comment
class FileForm(forms.ModelForm):
"""Using a model form to expedite the creation of DLL records"""
class Meta:
model = File
exclude = ('date_created', 'date_modified', 'created_by',
'modified_by', )
class CommentForm(forms.ModelForm):
"""Comment form for DLL comments"""
class Meta:
model = Comment<|fim▁hole|>
class SearchForm(forms.Form):
term = forms.CharField()<|fim▁end|> | exclude = ('user', 'date', 'dll') |
<|file_name|>KerberosFlags.go<|end_file_name|><|fim▁begin|>package types
// Reference: https://www.ietf.org/rfc/rfc4120.txt
// Section: 5.2.8
import (
"github.com/jcmturner/gofork/encoding/asn1"
)
/*
KerberosFlags
For several message types, a specific constrained bit string type,
KerberosFlags, is used.
KerberosFlags ::= BIT STRING (SIZE (32..MAX))
-- minimum number of bits shall be sent,
-- but no fewer than 32
Compatibility note: The following paragraphs describe a change from
the RFC 1510 description of bit strings that would result in
incompatility in the case of an implementation that strictly
conformed to ASN.1 DER and RFC 1510.
ASN.1 bit strings have multiple uses. The simplest use of a bit
string is to contain a vector of bits, with no particular meaning
attached to individual bits. This vector of bits is not necessarily
a multiple of eight bits long. The use in Kerberos of a bit string
as a compact boolean vector wherein each element has a distinct
meaning poses some problems. The natural notation for a compact
boolean vector is the ASN.1 "NamedBit" notation, and the DER require
that encodings of a bit string using "NamedBit" notation exclude any
trailing zero bits. This truncation is easy to neglect, especially
given C language implementations that naturally choose to store
boolean vectors as 32-bit integers.
For example, if the notation for KDCOptions were to include the
"NamedBit" notation, as in RFC 1510, and a KDCOptions value to be
encoded had only the "forwardable" (bit number one) bit set, the DER
encoding MUST include only two bits: the first reserved bit
("reserved", bit number zero, value zero) and the one-valued bit (bit
number one) for "forwardable".
Most existing implementations of Kerberos unconditionally send 32
bits on the wire when encoding bit strings used as boolean vectors.
This behavior violates the ASN.1 syntax used for flag values in RFC
1510, but it occurs on such a widely installed base that the protocol
description is being modified to accommodate it.
Consequently, this document removes the "NamedBit" notations for
individual bits, relegating them to comments. The size constraint on
the KerberosFlags type requires that at least 32 bits be encoded at
all times, though a lenient implementation MAY choose to accept fewer
than 32 bits and to treat the missing bits as set to zero.
Currently, no uses of KerberosFlags specify more than 32 bits' worth
of flags, although future revisions of this document may do so. When
more than 32 bits are to be transmitted in a KerberosFlags value,
future revisions to this document will likely specify that the
smallest number of bits needed to encode the highest-numbered one-
valued bit should be sent. This is somewhat similar to the DER
encoding of a bit string that is declared with the "NamedBit"
notation.
*/
// NewKrbFlags returns an ASN1 BitString struct of the right size for KrbFlags.
func NewKrbFlags() asn1.BitString {
f := asn1.BitString{}
f.Bytes = make([]byte, 4)
f.BitLength = len(f.Bytes) * 8
return f
}
// SetFlags sets the flags of an ASN1 BitString.
func SetFlags(f *asn1.BitString, j []int) {
for _, i := range j {
SetFlag(f, i)
}
}
// SetFlag sets a flag in an ASN1 BitString.
func SetFlag(f *asn1.BitString, i int) {
for l := len(f.Bytes); l < 4; l++ {
(*f).Bytes = append((*f).Bytes, byte(0))
(*f).BitLength = len((*f).Bytes) * 8<|fim▁hole|> p := uint(7 - (i - 8*b))
(*f).Bytes[b] = (*f).Bytes[b] | (1 << p)
}
// UnsetFlags unsets flags in an ASN1 BitString.
func UnsetFlags(f *asn1.BitString, j []int) {
for _, i := range j {
UnsetFlag(f, i)
}
}
// UnsetFlag unsets a flag in an ASN1 BitString.
func UnsetFlag(f *asn1.BitString, i int) {
for l := len(f.Bytes); l < 4; l++ {
(*f).Bytes = append((*f).Bytes, byte(0))
(*f).BitLength = len((*f).Bytes) * 8
}
//Which byte?
b := i / 8
//Which bit in byte
p := uint(7 - (i - 8*b))
(*f).Bytes[b] = (*f).Bytes[b] &^ (1 << p)
}
// IsFlagSet tests if a flag is set in the ASN1 BitString.
func IsFlagSet(f *asn1.BitString, i int) bool {
//Which byte?
b := i / 8
//Which bit in byte
p := uint(7 - (i - 8*b))
if (*f).Bytes[b]&(1<<p) != 0 {
return true
}
return false
}<|fim▁end|> | }
//Which byte?
b := i / 8
//Which bit in byte |
<|file_name|>lib.rs<|end_file_name|><|fim▁begin|>//! A fast, low-level IO library for Rust focusing on non-blocking APIs, event
//! notification, and other useful utilities for building high performance IO
//! apps.
//!
//! # Goals
//!
//! * Fast - minimal overhead over the equivalent OS facilities (epoll, kqueue, etc...)
//! * Zero allocations
//! * A scalable readiness-based API, similar to epoll on Linux
//! * Design to allow for stack allocated buffers when possible (avoid double buffering).
//! * Provide utilities such as a timers, a notification channel, buffer abstractions, and a slab.
//!
//! # Usage
//!
//! Using mio starts by creating an [Poll](struct.Poll.html), which reads events from the OS and
//! put them into [Events](struct.Events.html). You can handle IO events from the OS with it.
//!
//! # Example
//!
//! ```
//! use mio::*;
//! use mio::tcp::{TcpListener, TcpStream};
//!
//! // Setup some tokens to allow us to identify which event is
//! // for which socket.
//! const SERVER: Token = Token(0);
//! const CLIENT: Token = Token(1);
//!
//! let addr = "127.0.0.1:13265".parse().unwrap();
//!
//! // Setup the server socket
//! let server = TcpListener::bind(&addr).unwrap();
//!
//! // Create an poll instance
//! let poll = Poll::new().unwrap();
//!
//! // Start listening for incoming connections
//! poll.register(&server, SERVER, Ready::readable(),
//! PollOpt::edge()).unwrap();
//!
//! // Setup the client socket
//! let sock = TcpStream::connect(&addr).unwrap();
//!
//! // Register the socket
//! poll.register(&sock, CLIENT, Ready::readable(),
//! PollOpt::edge()).unwrap();
//!
//! // Create storage for events
//! let mut events = Events::with_capacity(1024);
//!
//! loop {
//! poll.poll(&mut events, None).unwrap();
//!
//! for event in events.iter() {
//! match event.token() {
//! SERVER => {
//! // Accept and drop the socket immediately, this will close
//! // the socket and notify the client of the EOF.
//! let _ = server.accept();
//! }
//! CLIENT => {
//! // The server just shuts down the socket, let's just exit
//! // from our event loop.
//! return;
//! }
//! _ => unreachable!(),
//! }
//! }
//! }
//!
//! ```
#![doc(html_root_url = "https://docs.rs/mio/0.6.1")]
#![crate_name = "mio"]
#![cfg_attr(unix, deny(warnings))]
extern crate lazycell;
extern crate net2;
extern crate slab;
#[cfg(unix)]
extern crate libc;
#[cfg(windows)]
extern crate miow;
#[cfg(windows)]
extern crate winapi;
#[cfg(windows)]
extern crate kernel32;
#[macro_use]
extern crate log;
#[cfg(test)]
extern crate env_logger;
mod event;
mod io;
mod iovec;
mod net;
mod poll;
mod sys;<|fim▁hole|>pub mod channel;
pub mod timer;
/// EventLoop and other deprecated types
pub mod deprecated;
pub use event::{
PollOpt,
Ready,
Event,
};
pub use io::{
Evented,
would_block,
};
pub use iovec::IoVec;
pub use net::{
tcp,
udp,
};
pub use poll::{
Poll,
Events,
EventsIter,
Registration,
SetReadiness,
};
pub use token::{
Token,
};
#[cfg(unix)]
pub mod unix {
//! Unix only extensions
pub use sys::{
EventedFd,
};
}
/// Windows-only extensions to the mio crate.
///
/// Mio on windows is currently implemented with IOCP for a high-performance
/// implementation of asynchronous I/O. Mio then provides TCP and UDP as sample
/// bindings for the system to connect networking types to asynchronous I/O. On
/// Unix this scheme is then also extensible to all other file descriptors with
/// the `EventedFd` type, but on Windows no such analog is available. The
/// purpose of this module, however, is to similarly provide a mechanism for
/// foreign I/O types to get hooked up into the IOCP event loop.
///
/// This module provides two types for interfacing with a custom IOCP handle:
///
/// * `Binding` - this type is intended to govern binding with mio's `Poll`
/// type. Each I/O object should contain an instance of `Binding` that's
/// interfaced with for the implementation of the `Evented` trait. The
/// `register`, `reregister`, and `deregister` methods for the `Evented` trait
/// all have rough analogs with `Binding`.
///
/// Note that this type **does not handle readiness**. That is, this type does
/// not handle whether sockets are readable/writable/etc. It's intended that
/// IOCP types will internally manage this state with a `SetReadiness` type
/// from the `poll` module. The `SetReadiness` is typically lazily created on
/// the first time that `Evented::register` is called and then stored in the
/// I/O object.
///
/// Also note that for types which represent streams of bytes the mio
/// interface of *readiness* doesn't map directly to the Windows model of
/// *completion*. This means that types will have to perform internal
/// buffering to ensure that a readiness interface can be provided. For a
/// sample implementation see the TCP/UDP modules in mio itself.
///
/// * `Overlapped` - this type is intended to be used as the concreate instances
/// of the `OVERLAPPED` type that most win32 methods expect. It's crucial, for
/// safety, that all asynchronous operations are initiated with an instance of
/// `Overlapped` and not another instantiation of `OVERLAPPED`.
///
/// Mio's `Overlapped` type is created with a function pointer that receives
/// a `OVERLAPPED_ENTRY` type when called. This `OVERLAPPED_ENTRY` type is
/// defined in the `winapi` crate. Whenever a completion is posted to an IOCP
/// object the `OVERLAPPED` that was signaled will be interpreted as
/// `Overlapped` in the mio crate and this function pointer will be invoked.
/// Through this function pointer, and through the `OVERLAPPED` pointer,
/// implementations can handle management of I/O events.
///
/// When put together these two types enable custom Windows handles to be
/// registered with mio's event loops. The `Binding` type is used to associate
/// handles and the `Overlapped` type is used to execute I/O operations. When
/// the I/O operations are completed a custom function pointer is called which
/// typically modifies a `SetReadiness` set by `Evented` methods which will get
/// later hooked into the mio event loop.
#[cfg(windows)]
pub mod windows {
pub use sys::{Overlapped, Binding};
}
// Conversion utilities
mod convert {
use std::time::Duration;
const NANOS_PER_MILLI: u32 = 1_000_000;
const MILLIS_PER_SEC: u64 = 1_000;
/// Convert a `Duration` to milliseconds, rounding up and saturating at
/// `u64::MAX`.
///
/// The saturating is fine because `u64::MAX` milliseconds are still many
/// million years.
pub fn millis(duration: Duration) -> u64 {
// Round up.
let millis = (duration.subsec_nanos() + NANOS_PER_MILLI - 1) / NANOS_PER_MILLI;
duration.as_secs().saturating_mul(MILLIS_PER_SEC).saturating_add(millis as u64)
}
}<|fim▁end|> | mod token;
|
<|file_name|>reduction.py<|end_file_name|><|fim▁begin|>#
# Module to allow connection and socket objects to be transferred
# between processes
#
# multiprocessing/reduction.py
#
# Copyright (c) 2006-2008, R Oudkerk
# Licensed to PSF under a Contributor Agreement.
#
from __future__ import absolute_import
__all__ = []
import os
import sys
import socket
import threading
from . import current_process
from ._ext import _billiard, win32
from .forking import Popen, duplicate, close, ForkingPickler
from .util import register_after_fork, debug, sub_debug
from .connection import Client, Listener
if not(sys.platform == 'win32' or hasattr(_billiard, 'recvfd')):
raise ImportError('pickling of connections not supported')
# globals set later
_listener = None
_lock = None
_cache = set()
#
# Platform specific definitions
#
if sys.platform == 'win32':
# XXX Should this subprocess import be here?
import _subprocess # noqa
def send_handle(conn, handle, destination_pid):
process_handle = win32.OpenProcess(
win32.PROCESS_ALL_ACCESS, False, destination_pid
)
try:
new_handle = duplicate(handle, process_handle)
conn.send(new_handle)
finally:
close(process_handle)
def recv_handle(conn):
return conn.recv()
else:
def send_handle(conn, handle, destination_pid): # noqa
_billiard.sendfd(conn.fileno(), handle)
def recv_handle(conn): # noqa
return _billiard.recvfd(conn.fileno())
#
# Support for a per-process server thread which caches pickled handles
#
def _reset(obj):
global _lock, _listener, _cache
for h in _cache:
close(h)
_cache.clear()
_lock = threading.Lock()
_listener = None
_reset(None)
register_after_fork(_reset, _reset)
def _get_listener():
global _listener
if _listener is None:
_lock.acquire()
try:
if _listener is None:
debug('starting listener and thread for sending handles')
_listener = Listener(authkey=current_process().authkey)
t = threading.Thread(target=_serve)
t.daemon = True
t.start()
finally:
_lock.release()
return _listener
def _serve():
from .util import is_exiting, sub_warning
while 1:
try:
conn = _listener.accept()
handle_wanted, destination_pid = conn.recv()
_cache.remove(handle_wanted)
send_handle(conn, handle_wanted, destination_pid)
close(handle_wanted)
conn.close()
except:
if not is_exiting():
sub_warning('thread for sharing handles raised exception',
exc_info=True)
#
# Functions to be used for pickling/unpickling objects with handles
#
def reduce_handle(handle):
if Popen.thread_is_spawning():
return (None, Popen.duplicate_for_child(handle), True)
dup_handle = duplicate(handle)
_cache.add(dup_handle)
sub_debug('reducing handle %d', handle)
return (_get_listener().address, dup_handle, False)
def rebuild_handle(pickled_data):
address, handle, inherited = pickled_data
if inherited:
return handle
sub_debug('rebuilding handle %d', handle)
conn = Client(address, authkey=current_process().authkey)
conn.send((handle, os.getpid()))
new_handle = recv_handle(conn)
conn.close()
return new_handle
#
# Register `_billiard.Connection` with `ForkingPickler`
#
def reduce_connection(conn):
rh = reduce_handle(conn.fileno())
return rebuild_connection, (rh, conn.readable, conn.writable)
def rebuild_connection(reduced_handle, readable, writable):
handle = rebuild_handle(reduced_handle)
return _billiard.Connection(
handle, readable=readable, writable=writable
)
ForkingPickler.register(_billiard.Connection, reduce_connection)
#
# Register `socket.socket` with `ForkingPickler`
#
<|fim▁hole|> s = socket.fromfd(fd, family, type_, proto)
if s.__class__ is not socket.socket:
s = socket.socket(_sock=s)
return s
def reduce_socket(s):
reduced_handle = reduce_handle(s.fileno())
return rebuild_socket, (reduced_handle, s.family, s.type, s.proto)
def rebuild_socket(reduced_handle, family, type_, proto):
fd = rebuild_handle(reduced_handle)
_sock = fromfd(fd, family, type_, proto)
close(fd)
return _sock
ForkingPickler.register(socket.socket, reduce_socket)
#
# Register `_billiard.PipeConnection` with `ForkingPickler`
#
if sys.platform == 'win32':
def reduce_pipe_connection(conn):
rh = reduce_handle(conn.fileno())
return rebuild_pipe_connection, (rh, conn.readable, conn.writable)
def rebuild_pipe_connection(reduced_handle, readable, writable):
handle = rebuild_handle(reduced_handle)
return _billiard.PipeConnection(
handle, readable=readable, writable=writable
)
ForkingPickler.register(_billiard.PipeConnection, reduce_pipe_connection)<|fim▁end|> |
def fromfd(fd, family, type_, proto=0): |
<|file_name|>I18nCSVTemplateLoader.java<|end_file_name|><|fim▁begin|>/*
* Copyright © 2013-2020, The SeedStack authors <http://seedstack.org>
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
package org.seedstack.i18n.rest.internal.infrastructure.csv;
import com.google.common.collect.Sets;
import org.seedstack.i18n.rest.internal.locale.LocaleFinder;
import org.seedstack.i18n.rest.internal.locale.LocaleRepresentation;
import org.seedstack.io.spi.Template;
import org.seedstack.io.spi.TemplateLoader;
import org.seedstack.io.supercsv.Column;
import org.seedstack.io.supercsv.SuperCsvTemplate;
import org.seedstack.jpa.JpaUnit;
import org.seedstack.seed.transaction.Transactional;
import org.supercsv.cellprocessor.Optional;
import javax.inject.Inject;
import java.util.List;
import java.util.Set;
/**
* @author [email protected]
*/
public class I18nCSVTemplateLoader implements TemplateLoader {
public static final String I18N_CSV_TEMPLATE = "i18nTranslations";
public static final String KEY = "key";
@Inject
private LocaleFinder localeFinder;
@JpaUnit("seed-i18n-domain")
@Transactional
@Override
public Template load(String name) {
List<LocaleRepresentation> availableLocales = localeFinder.findAvailableLocales();
SuperCsvTemplate superCsvTemplate = new SuperCsvTemplate(name);
superCsvTemplate.addColumn(new Column(KEY, KEY, new Optional(), new Optional()));
for (LocaleRepresentation availableLocale : availableLocales) {
superCsvTemplate.addColumn(new Column(availableLocale.getCode(), availableLocale.getCode(), new Optional(), new Optional()));
}
return superCsvTemplate;
}
@Override
public Set<String> names() {
return Sets.newHashSet(I18N_CSV_TEMPLATE);
}
@Override
public boolean contains(String name) {
return names().contains(name);
}
@Override
public String templateRenderer() {
return I18nCSVRenderer.I18N_RENDERER;
}
<|fim▁hole|> public String templateParser() {
return CSVParser.I18N_PARSER;
}
}<|fim▁end|> | @Override |
<|file_name|>tml.py<|end_file_name|><|fim▁begin|>"""Functions for TML layout that are used in the grammar to construct DOM-like<|fim▁hole|>
def createNode(name, attributes=None, children=None):
"""Creates a DOM-like node object, using the 164 representation so that
the node can be processed by the 164 layout engine.
"""
node = dict(attributes)
node['name'] = name
# Represent the list of child nodes as a dict with numeric keys.
node['children'] = dict(enumerate(children)) if children else {}
return node
def createWordNodes(text):
"""Returns a Python list of DOM-like nodes, one for each word in the given
text.
"""
return [createNode('Word', {'word': word + ' '}) for word in text.split()]<|fim▁end|> | node objects used in the 164 layout engine.
""" |
<|file_name|>karma.config.js<|end_file_name|><|fim▁begin|>module.exports = function(config) {
config.set({
files: [
// Each file acts as entry point for the webpack configuration
{ pattern: 'test/*.test.js', watched: false },
{ pattern: 'test/**/*.test.js', watched: false }
],
preprocessors: {
// Add webpack as preprocessor
'test/*.test.js': ['webpack'],
'test/**/*.test.js': ['webpack']
},
webpack: {
// Karma watches the test entry points
// (you don't need to specify the entry option)
// webpack watches dependencies
},
webpackMiddleware: {
stats: 'errors-only'
},
// Which frameworks to use for testing
frameworks: ['jasmine'],
// Reporting strategy
reporters: ['progress'],
// Which browser to use for running tests
browsers: ['Chrome']<|fim▁hole|> });
};<|fim▁end|> | |
<|file_name|>block-must-not-have-result-do.rs<|end_file_name|><|fim▁begin|>// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//<|fim▁hole|>// except according to those terms.
fn main() {
loop {
true //~ ERROR mismatched types
//~| expected ()
//~| found bool
//~| expected ()
//~| found bool
}
}<|fim▁end|> | // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed |
<|file_name|>core.py<|end_file_name|><|fim▁begin|>import os
import sys
import json
import time
import numpy
import dendropy
from collections import defaultdict
import pdb
def parse_site_rates(rate_file, correction = 1, test = False, count = 0):
"""Parse the site rate file returned from hyphy to a vector of rates"""
# for whatever reason, when run in a virtualenv (and perhaps in other
# cases, the file does not seem to be written quite before we try
# to read it. so, pause and try to re-read up to three-times.
try:
data = json.load(open(rate_file, 'r'))
except IOError as e:
if count <= 3:
count += 1
time.sleep(0.1)
parse_site_rates(rate_file, correction, test, count)
else:
raise IOError("Cannot open {0}: {1}".format(rate_file, e))
rates = numpy.array([line["rate"] for line in data["sites"]["rates"]])
corrected = rates/correction
if not test:
data["sites"]["corrected_rates"] = [{"site":k + 1,"rate":v} \
for k,v in enumerate(corrected)]
json.dump(data, open(rate_file,'w'), indent = 4)
return corrected
def correct_branch_lengths(tree_file, format, output_dir):
"""Scale branch lengths to values shorter than 100"""
tree = dendropy.Tree.get_from_path(tree_file, format)
depth = tree.seed_node.distance_from_tip()
mean_branch_length = tree.length()/(2 * len(tree.leaf_nodes()) - 3)
string_len = len(str(int(mean_branch_length + 0.5)))
if string_len > 1:
correction_factor = 10 ** string_len
else:
correction_factor = 1
for edge in tree.preorder_edge_iter():
if edge.length:
edge.length /= correction_factor
pth = os.path.join(output_dir, '{0}.corrected.newick'.format(
os.path.basename(tree_file)
))
tree.write_to_path(pth, 'newick')
return pth, correction_factor
def get_net_pi_for_periods(pi, times):
"""Sum across the PI values for the requested times"""<|fim▁hole|>
def get_informative_sites(alignment, threshold=4):
"""Returns a list, where True indicates a site which was over the threshold
for informativeness.
"""
taxa = dendropy.DnaCharacterMatrix.get_from_path(alignment, 'nexus')
results = defaultdict(int)
for cells in taxa.vectors():
assert len(cells) == taxa.vector_size # should all have equal lengths
for idx, cell in enumerate(cells):
results[idx] += 1 if str(cell).upper() in "ATGC" else 0
return numpy.array([1 if results[x] >= threshold else numpy.nan for x in sorted(results)])
def cull_uninformative_rates(rates, inform):
"""Zeroes out rates which are uninformative"""
return rates * inform<|fim▁end|> | sums = numpy.nansum(pi, axis=1)[times]
return dict(zip(times, sums)) |
<|file_name|>Resize.js<|end_file_name|><|fim▁begin|><|fim▁hole|>
export class Resize {
constructor () {
this.ticking = false
// Create an instance of DistributeHeight for each element
$('[data-distribute-height]').each((index, element) => {
element.distributeHeight = new DistributeHeight($(element))
})
this.calculate = () => {
$('[data-distribute-height]').each((index, element) => {
element.distributeHeight.setHeights()
})
this.ticking = false
}
}
resize () {
$(window).on('load resize', () => {
this.tick()
})
}
tick () {
if (!this.ticking) {
requestAnimationFrame(this.calculate)
this.ticking = true
}
}
}<|fim▁end|> | import requestAnimationFrame from 'requestanimationframe'
import { DistributeHeight } from './DistributeHeight' |
<|file_name|>main.go<|end_file_name|><|fim▁begin|>package main
import (
"flag"
"fmt"
"go/ast"
"go/parser"
"go/token"
"io/ioutil"
"os"
"path/filepath"
"strings"
"github.com/fatih/camelcase"
"github.com/fatih/structtag"
)
func main() {
args := flag.Args()
if len(args) == 0 {
// Default: process the file
args = []string{os.Getenv("GOFILE")}
}
fname := args[0]
absFilePath, err := filepath.Abs(fname)
if err != nil {
panic(err)
}
paths := strings.SplitAfter(absFilePath, "packer"+string(os.PathSeparator))
packerDir := paths[0]
builderName, _ := filepath.Split(paths[1])<|fim▁hole|> builderName = strings.Trim(builderName, string(os.PathSeparator))
b, err := ioutil.ReadFile(fname)
if err != nil {
fmt.Printf("ReadFile: %+v", err)
os.Exit(1)
}
fset := token.NewFileSet()
f, err := parser.ParseFile(fset, fname, b, parser.ParseComments)
if err != nil {
fmt.Printf("ParseFile: %+v", err)
os.Exit(1)
}
for _, decl := range f.Decls {
typeDecl, ok := decl.(*ast.GenDecl)
if !ok {
continue
}
typeSpec, ok := typeDecl.Specs[0].(*ast.TypeSpec)
if !ok {
continue
}
structDecl, ok := typeSpec.Type.(*ast.StructType)
if !ok {
continue
}
fields := structDecl.Fields.List
sourcePath := filepath.ToSlash(paths[1])
header := Struct{
SourcePath: sourcePath,
Name: typeSpec.Name.Name,
Filename: typeSpec.Name.Name + ".mdx",
Header: typeDecl.Doc.Text(),
}
required := Struct{
SourcePath: sourcePath,
Name: typeSpec.Name.Name,
Filename: typeSpec.Name.Name + "-required.mdx",
}
notRequired := Struct{
SourcePath: sourcePath,
Name: typeSpec.Name.Name,
Filename: typeSpec.Name.Name + "-not-required.mdx",
}
for _, field := range fields {
if len(field.Names) == 0 || field.Tag == nil {
continue
}
tag := field.Tag.Value[1:]
tag = tag[:len(tag)-1]
tags, err := structtag.Parse(tag)
if err != nil {
fmt.Printf("structtag.Parse(%s): err: %v", field.Tag.Value, err)
os.Exit(1)
}
mstr, err := tags.Get("mapstructure")
if err != nil {
continue
}
name := mstr.Name
if name == "" {
continue
}
var docs string
if field.Doc != nil {
docs = field.Doc.Text()
} else {
docs = strings.Join(camelcase.Split(field.Names[0].Name), " ")
}
if strings.Contains(docs, "TODO") {
continue
}
fieldType := string(b[field.Type.Pos()-1 : field.Type.End()-1])
fieldType = strings.ReplaceAll(fieldType, "*", `\*`)
switch fieldType {
case "time.Duration":
fieldType = `duration string | ex: "1h5m2s"`
case "config.Trilean":
fieldType = `boolean`
case "hcl2template.NameValues":
fieldType = `[]{name string, value string}`
}
field := Field{
Name: name,
Type: fieldType,
Docs: docs,
}
if req, err := tags.Get("required"); err == nil && req.Value() == "true" {
required.Fields = append(required.Fields, field)
} else {
notRequired.Fields = append(notRequired.Fields, field)
}
}
dir := filepath.Join(packerDir, "website", "pages", "partials", builderName)
os.MkdirAll(dir, 0755)
for _, str := range []Struct{header, required, notRequired} {
if len(str.Fields) == 0 && len(str.Header) == 0 {
continue
}
outputPath := filepath.Join(dir, str.Filename)
outputFile, err := os.Create(outputPath)
if err != nil {
panic(err)
}
defer outputFile.Close()
err = structDocsTemplate.Execute(outputFile, str)
if err != nil {
fmt.Printf("%v", err)
os.Exit(1)
}
}
}
}<|fim▁end|> | |
<|file_name|>glue.rs<|end_file_name|><|fim▁begin|>/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#![allow(unsafe_code)]
use app_units::Au;
use data::{NUM_THREADS, PerDocumentStyleData};
use env_logger;
use euclid::Size2D;
use gecko_bindings::bindings::{RawGeckoDocument, RawGeckoElement, RawGeckoNode};
use gecko_bindings::bindings::{RawServoStyleSet, RawServoStyleSheet, ServoComputedValues};
use gecko_bindings::bindings::{ServoDeclarationBlock, ServoNodeData, ThreadSafePrincipalHolder};
use gecko_bindings::bindings::{ThreadSafeURIHolder, nsHTMLCSSStyleSheet};
use gecko_bindings::ptr::{GeckoArcPrincipal, GeckoArcURI};
use gecko_bindings::structs::ServoElementSnapshot;
use gecko_bindings::structs::nsRestyleHint;
use gecko_bindings::structs::{SheetParsingMode, nsIAtom};
use gecko_string_cache::Atom;
use snapshot::GeckoElementSnapshot;
use std::mem::transmute;
use std::ptr;
use std::slice;
use std::str::from_utf8_unchecked;
use std::sync::{Arc, Mutex};
use style::arc_ptr_eq;
use style::context::{LocalStyleContextCreationInfo, ReflowGoal, SharedStyleContext};
use style::dom::{TDocument, TElement, TNode};
use style::error_reporting::StdoutErrorReporter;
use style::gecko_glue::ArcHelpers;
use style::gecko_selector_impl::{GeckoSelectorImpl, PseudoElement};
use style::parallel;
use style::parser::ParserContextExtraData;
use style::properties::{ComputedValues, PropertyDeclarationBlock, parse_one_declaration};
use style::selector_impl::PseudoElementCascadeType;
use style::sequential;
use style::stylesheets::{Stylesheet, Origin};
use style::timer::Timer;
use traversal::RecalcStyleOnly;
use url::Url;
use wrapper::{DUMMY_BASE_URL, GeckoDocument, GeckoElement, GeckoNode, NonOpaqueStyleData};
/*
* For Gecko->Servo function calls, we need to redeclare the same signature that was declared in
* the C header in Gecko. In order to catch accidental mismatches, we run rust-bindgen against
* those signatures as well, giving us a second declaration of all the Servo_* functions in this
* crate. If there's a mismatch, LLVM will assert and abort, which is a rather awful thing to
* depend on but good enough for our purposes.
*/
#[no_mangle]
pub extern "C" fn Servo_Initialize() -> () {
// Enable standard Rust logging.
//
// See https://doc.rust-lang.org/log/env_logger/index.html for instructions.
env_logger::init().unwrap();
// Allocate our default computed values.
unsafe { ComputedValues::initialize(); }
}
#[no_mangle]
pub extern "C" fn Servo_Shutdown() -> () {
// Destroy our default computed values.
unsafe { ComputedValues::shutdown(); }
}
fn restyle_subtree(node: GeckoNode, raw_data: *mut RawServoStyleSet) {
debug_assert!(node.is_element() || node.is_text_node());
// Force the creation of our lazily-constructed initial computed values on
// the main thread, since it's not safe to call elsewhere.
//
// FIXME(bholley): this should move into Servo_Initialize as soon as we get
// rid of the HackilyFindSomeDeviceContext stuff that happens during
// initial_values computation, since that stuff needs to be called further
// along in startup than the sensible place to call Servo_Initialize.
ComputedValues::initial_values();
// The stylist consumes stylesheets lazily.
let per_doc_data = unsafe { &mut *(raw_data as *mut PerDocumentStyleData) };
per_doc_data.flush_stylesheets();
let local_context_data =
LocalStyleContextCreationInfo::new(per_doc_data.new_animations_sender.clone());
let shared_style_context = SharedStyleContext {
viewport_size: Size2D::new(Au(0), Au(0)),
screen_size_changed: false,
generation: 0,
goal: ReflowGoal::ForScriptQuery,
stylist: per_doc_data.stylist.clone(),
running_animations: per_doc_data.running_animations.clone(),
expired_animations: per_doc_data.expired_animations.clone(),
error_reporter: Box::new(StdoutErrorReporter),
local_context_creation_data: Mutex::new(local_context_data),
timer: Timer::new(),
};
// We ensure this is true before calling Servo_RestyleSubtree()
debug_assert!(node.is_dirty() || node.has_dirty_descendants());
if per_doc_data.num_threads == 1 {
sequential::traverse_dom::<GeckoNode, RecalcStyleOnly>(node, &shared_style_context);
} else {
parallel::traverse_dom::<GeckoNode, RecalcStyleOnly>(node, &shared_style_context,
&mut per_doc_data.work_queue);
}
}
#[no_mangle]
pub extern "C" fn Servo_RestyleSubtree(node: *mut RawGeckoNode,
raw_data: *mut RawServoStyleSet) -> () {
let node = unsafe { GeckoNode::from_raw(node) };
restyle_subtree(node, raw_data);
}
#[no_mangle]
pub extern "C" fn Servo_RestyleDocument(doc: *mut RawGeckoDocument, raw_data: *mut RawServoStyleSet) -> () {
let document = unsafe { GeckoDocument::from_raw(doc) };
let node = match document.root_node() {
Some(x) => x,
None => return,
};
restyle_subtree(node, raw_data);
}
#[no_mangle]
pub extern "C" fn Servo_StyleWorkerThreadCount() -> u32 {
*NUM_THREADS as u32
}
#[no_mangle]
pub extern "C" fn Servo_DropNodeData(data: *mut ServoNodeData) -> () {
unsafe {
let _ = Box::<NonOpaqueStyleData>::from_raw(data as *mut NonOpaqueStyleData);
}
}
#[no_mangle]
pub extern "C" fn Servo_StylesheetFromUTF8Bytes(bytes: *const u8,
length: u32,
mode: SheetParsingMode,
base_bytes: *const u8,
base_length: u32,
base: *mut ThreadSafeURIHolder,
referrer: *mut ThreadSafeURIHolder,
principal: *mut ThreadSafePrincipalHolder)
-> *mut RawServoStyleSheet {
let input = unsafe { from_utf8_unchecked(slice::from_raw_parts(bytes, length as usize)) };
let origin = match mode {
SheetParsingMode::eAuthorSheetFeatures => Origin::Author,
SheetParsingMode::eUserSheetFeatures => Origin::User,
SheetParsingMode::eAgentSheetFeatures => Origin::UserAgent,
};
let base_str = unsafe { from_utf8_unchecked(slice::from_raw_parts(base_bytes, base_length as usize)) };
let url = Url::parse(base_str).unwrap();
let extra_data = ParserContextExtraData {
base: Some(GeckoArcURI::new(base)),
referrer: Some(GeckoArcURI::new(referrer)),
principal: Some(GeckoArcPrincipal::new(principal)),
};
let sheet = Arc::new(Stylesheet::from_str(input, url, origin, Box::new(StdoutErrorReporter),
extra_data));
unsafe {
transmute(sheet)
}
}
#[no_mangle]
pub extern "C" fn Servo_AppendStyleSheet(raw_sheet: *mut RawServoStyleSheet,
raw_data: *mut RawServoStyleSet) {
type Helpers = ArcHelpers<RawServoStyleSheet, Stylesheet>;
let data = PerDocumentStyleData::borrow_mut_from_raw(raw_data);
Helpers::with(raw_sheet, |sheet| {
data.stylesheets.retain(|x| !arc_ptr_eq(x, sheet));
data.stylesheets.push(sheet.clone());
data.stylesheets_changed = true;
});
}
#[no_mangle]
pub extern "C" fn Servo_PrependStyleSheet(raw_sheet: *mut RawServoStyleSheet,
raw_data: *mut RawServoStyleSet) {
type Helpers = ArcHelpers<RawServoStyleSheet, Stylesheet>;
let data = PerDocumentStyleData::borrow_mut_from_raw(raw_data);
Helpers::with(raw_sheet, |sheet| {
data.stylesheets.retain(|x| !arc_ptr_eq(x, sheet));
data.stylesheets.insert(0, sheet.clone());
data.stylesheets_changed = true;
})
}
#[no_mangle]
pub extern "C" fn Servo_InsertStyleSheetBefore(raw_sheet: *mut RawServoStyleSheet,
raw_reference: *mut RawServoStyleSheet,
raw_data: *mut RawServoStyleSet) {
type Helpers = ArcHelpers<RawServoStyleSheet, Stylesheet>;
let data = PerDocumentStyleData::borrow_mut_from_raw(raw_data);
Helpers::with(raw_sheet, |sheet| {
Helpers::with(raw_reference, |reference| {
data.stylesheets.retain(|x| !arc_ptr_eq(x, sheet));
let index = data.stylesheets.iter().position(|x| arc_ptr_eq(x, reference)).unwrap();
data.stylesheets.insert(index, sheet.clone());
data.stylesheets_changed = true;
})
})
}
#[no_mangle]
pub extern "C" fn Servo_RemoveStyleSheet(raw_sheet: *mut RawServoStyleSheet,
raw_data: *mut RawServoStyleSet) {
type Helpers = ArcHelpers<RawServoStyleSheet, Stylesheet>;
let data = PerDocumentStyleData::borrow_mut_from_raw(raw_data);
Helpers::with(raw_sheet, |sheet| {
data.stylesheets.retain(|x| !arc_ptr_eq(x, sheet));
data.stylesheets_changed = true;
});
}
#[no_mangle]
pub extern "C" fn Servo_StyleSheetHasRules(raw_sheet: *mut RawServoStyleSheet) -> bool {
type Helpers = ArcHelpers<RawServoStyleSheet, Stylesheet>;
Helpers::with(raw_sheet, |sheet| !sheet.rules.is_empty())
}
#[no_mangle]
pub extern "C" fn Servo_AddRefStyleSheet(sheet: *mut RawServoStyleSheet) -> () {
type Helpers = ArcHelpers<RawServoStyleSheet, Stylesheet>;
unsafe { Helpers::addref(sheet) };
}
#[no_mangle]
pub extern "C" fn Servo_ReleaseStyleSheet(sheet: *mut RawServoStyleSheet) -> () {
type Helpers = ArcHelpers<RawServoStyleSheet, Stylesheet>;
unsafe { Helpers::release(sheet) };
}
#[no_mangle]
pub extern "C" fn Servo_GetComputedValues(node: *mut RawGeckoNode)
-> *mut ServoComputedValues {
let node = unsafe { GeckoNode::from_raw(node) };
let arc_cv = match node.borrow_data().map_or(None, |data| data.style.clone()) {
Some(style) => style,
None => {
// FIXME(bholley): This case subverts the intended semantics of this
// function, and exists only to make stylo builds more robust corner-
// cases where Gecko wants the style for a node that Servo never
// traversed. We should remove this as soon as possible.
error!("stylo: encountered unstyled node, substituting default values.");
Arc::new(ComputedValues::initial_values().clone())
},
};
unsafe { transmute(arc_cv) }
}
#[no_mangle]
pub extern "C" fn Servo_GetComputedValuesForAnonymousBox(parent_style_or_null: *mut ServoComputedValues,
pseudo_tag: *mut nsIAtom,
raw_data: *mut RawServoStyleSet)
-> *mut ServoComputedValues {
// The stylist consumes stylesheets lazily.
let data = PerDocumentStyleData::borrow_mut_from_raw(raw_data);
data.flush_stylesheets();
let atom = Atom::from(pseudo_tag);
let pseudo = PseudoElement::from_atom_unchecked(atom, /* anon_box = */ true);
type Helpers = ArcHelpers<ServoComputedValues, ComputedValues>;
Helpers::maybe_with(parent_style_or_null, |maybe_parent| {
let new_computed = data.stylist.precomputed_values_for_pseudo(&pseudo, maybe_parent);
new_computed.map_or(ptr::null_mut(), |c| Helpers::from(c))
})
}
#[no_mangle]
pub extern "C" fn Servo_GetComputedValuesForPseudoElement(parent_style: *mut ServoComputedValues,
match_element: *mut RawGeckoElement,
pseudo_tag: *mut nsIAtom,
raw_data: *mut RawServoStyleSet,
is_probe: bool)
-> *mut ServoComputedValues {
debug_assert!(!match_element.is_null());
let parent_or_null = || {
if is_probe {
ptr::null_mut()
} else {
Servo_AddRefComputedValues(parent_style);
parent_style
}
};
let atom = Atom::from(pseudo_tag);
let pseudo = PseudoElement::from_atom_unchecked(atom, /* anon_box = */ false);
// The stylist consumes stylesheets lazily.
let data = PerDocumentStyleData::borrow_mut_from_raw(raw_data);
data.flush_stylesheets();
let element = unsafe { GeckoElement::from_raw(match_element) };
type Helpers = ArcHelpers<ServoComputedValues, ComputedValues>;
match GeckoSelectorImpl::pseudo_element_cascade_type(&pseudo) {
PseudoElementCascadeType::Eager => {
let node = element.as_node();
let maybe_computed = node.borrow_data()
.and_then(|data| {
data.per_pseudo.get(&pseudo).map(|c| c.clone())
});
maybe_computed.map_or_else(parent_or_null, Helpers::from)
}
PseudoElementCascadeType::Lazy => {
Helpers::with(parent_style, |parent| {
data.stylist
.lazily_compute_pseudo_element_style(&element, &pseudo, parent)
.map_or_else(parent_or_null, Helpers::from)
})
}
PseudoElementCascadeType::Precomputed => {
unreachable!("Anonymous pseudo found in \
Servo_GetComputedValuesForPseudoElement");
}
}
}
#[no_mangle]
pub extern "C" fn Servo_InheritComputedValues(parent_style: *mut ServoComputedValues)
-> *mut ServoComputedValues {
type Helpers = ArcHelpers<ServoComputedValues, ComputedValues>;
let style = if parent_style.is_null() {
Arc::new(ComputedValues::initial_values().clone())
} else {
Helpers::with(parent_style, ComputedValues::inherit_from)
};
Helpers::from(style)
}
#[no_mangle]
pub extern "C" fn Servo_AddRefComputedValues(ptr: *mut ServoComputedValues) -> () {
type Helpers = ArcHelpers<ServoComputedValues, ComputedValues>;
unsafe { Helpers::addref(ptr) };
}
#[no_mangle]
pub extern "C" fn Servo_ReleaseComputedValues(ptr: *mut ServoComputedValues) -> () {
type Helpers = ArcHelpers<ServoComputedValues, ComputedValues>;
unsafe { Helpers::release(ptr) };
}
#[no_mangle]
pub extern "C" fn Servo_InitStyleSet() -> *mut RawServoStyleSet {
let data = Box::new(PerDocumentStyleData::new());
Box::into_raw(data) as *mut RawServoStyleSet
}
#[no_mangle]
pub extern "C" fn Servo_DropStyleSet(data: *mut RawServoStyleSet) -> () {
unsafe {
let _ = Box::<PerDocumentStyleData>::from_raw(data as *mut PerDocumentStyleData);
}
}
pub struct GeckoDeclarationBlock {
pub declarations: Option<PropertyDeclarationBlock>,
pub cache: *mut nsHTMLCSSStyleSheet,
pub immutable: bool,
}
#[no_mangle]
pub extern "C" fn Servo_ParseStyleAttribute(bytes: *const u8, length: u32,
cache: *mut nsHTMLCSSStyleSheet)
-> *mut ServoDeclarationBlock {
let value = unsafe { from_utf8_unchecked(slice::from_raw_parts(bytes, length as usize)) };
let declarations = Box::new(GeckoDeclarationBlock {
declarations: GeckoElement::parse_style_attribute(value),
cache: cache,
immutable: false,
});
Box::into_raw(declarations) as *mut ServoDeclarationBlock<|fim▁hole|>pub extern "C" fn Servo_DropDeclarationBlock(declarations: *mut ServoDeclarationBlock) {
unsafe {
let _ = Box::<GeckoDeclarationBlock>::from_raw(declarations as *mut GeckoDeclarationBlock);
}
}
#[no_mangle]
pub extern "C" fn Servo_GetDeclarationBlockCache(declarations: *mut ServoDeclarationBlock)
-> *mut nsHTMLCSSStyleSheet {
let declarations = unsafe { (declarations as *const GeckoDeclarationBlock).as_ref().unwrap() };
declarations.cache
}
#[no_mangle]
pub extern "C" fn Servo_SetDeclarationBlockImmutable(declarations: *mut ServoDeclarationBlock) {
let declarations = unsafe { (declarations as *mut GeckoDeclarationBlock).as_mut().unwrap() };
declarations.immutable = true;
}
#[no_mangle]
pub extern "C" fn Servo_ClearDeclarationBlockCachePointer(declarations: *mut ServoDeclarationBlock) {
let declarations = unsafe { (declarations as *mut GeckoDeclarationBlock).as_mut().unwrap() };
declarations.cache = ptr::null_mut();
}
#[no_mangle]
pub extern "C" fn Servo_CSSSupports(property: *const u8, property_length: u32,
value: *const u8, value_length: u32) -> bool {
let property = unsafe { from_utf8_unchecked(slice::from_raw_parts(property, property_length as usize)) };
let value = unsafe { from_utf8_unchecked(slice::from_raw_parts(value, value_length as usize)) };
let base_url = &*DUMMY_BASE_URL;
let extra_data = ParserContextExtraData::default();
match parse_one_declaration(&property, &value, &base_url, Box::new(StdoutErrorReporter), extra_data) {
Ok(decls) => !decls.is_empty(),
Err(()) => false,
}
}
#[no_mangle]
pub extern "C" fn Servo_ComputeRestyleHint(element: *mut RawGeckoElement,
snapshot: *mut ServoElementSnapshot,
raw_data: *mut RawServoStyleSet) -> nsRestyleHint {
let per_doc_data = unsafe { &mut *(raw_data as *mut PerDocumentStyleData) };
let snapshot = unsafe { GeckoElementSnapshot::from_raw(snapshot) };
let element = unsafe { GeckoElement::from_raw(element) };
// NB: This involves an FFI call, we can get rid of it easily if needed.
let current_state = element.get_state();
let hint = per_doc_data.stylist
.compute_restyle_hint(&element, &snapshot,
current_state);
// NB: Binary representations match.
unsafe { transmute(hint.bits() as u32) }
}<|fim▁end|> | }
#[no_mangle] |
<|file_name|>store.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
#
# Copyright (C) Pootle contributors.
#
# This file is a part of the Pootle project. It is distributed under the GPL3
# or later license. See the LICENSE file for a copy of the license and the
# AUTHORS file for copyright and authorship information.
import io
import os
import six
import pytest
from pytest_pootle.factories import (
LanguageDBFactory, ProjectDBFactory, StoreDBFactory,
TranslationProjectFactory)
from pytest_pootle.utils import update_store
from translate.storage.factory import getclass
from django.db.models import Max
from django.core.exceptions import ValidationError
from django.core.files.uploadedfile import SimpleUploadedFile
from pootle.core.delegate import (
config, format_classes, format_diffs, formats)
from pootle.core.models import Revision
from pootle.core.delegate import deserializers, serializers
from pootle.core.url_helpers import to_tp_relative_path
from pootle.core.plugin import provider
from pootle.core.serializers import Serializer, Deserializer
from pootle_app.models import Directory
from pootle_config.exceptions import ConfigurationError
from pootle_format.exceptions import UnrecognizedFiletype
from pootle_format.formats.po import PoStoreSyncer
from pootle_format.models import Format
from pootle_language.models import Language
from pootle_project.models import Project
from pootle_statistics.models import (
SubmissionFields, SubmissionTypes)
from pootle_store.constants import (
NEW, OBSOLETE, PARSED, POOTLE_WINS, TRANSLATED)
from pootle_store.diff import DiffableStore, StoreDiff
from pootle_store.models import Store
from pootle_store.util import parse_pootle_revision
from pootle_translationproject.models import TranslationProject
def _update_from_upload_file(store, update_file,
content_type="text/x-gettext-translation",
user=None, submission_type=None):
with open(update_file, "r") as f:
upload = SimpleUploadedFile(os.path.basename(update_file),
f.read(),
content_type)
test_store = getclass(upload)(upload.read())
store_revision = parse_pootle_revision(test_store)
store.update(test_store, store_revision=store_revision,
user=user, submission_type=submission_type)
def _store_as_string(store):
ttk = store.syncer.convert(store.syncer.file_class)
if hasattr(ttk, "updateheader"):
# FIXME We need those headers on import
# However some formats just don't support setting metadata
ttk.updateheader(
add=True, X_Pootle_Path=store.pootle_path)
ttk.updateheader(
add=True, X_Pootle_Revision=store.get_max_unit_revision())
return str(ttk)
@pytest.mark.django_db
def test_delete_mark_obsolete(project0_nongnu, project0, store0):
"""Tests that the in-DB Store and Directory are marked as obsolete
after the on-disk file ceased to exist.
Refs. #269.
"""
tp = TranslationProjectFactory(
project=project0, language=LanguageDBFactory())
store = StoreDBFactory(
translation_project=tp,
parent=tp.directory)
store.update(store.deserialize(store0.serialize()))
store.sync()
pootle_path = store.pootle_path
# Remove on-disk file
os.remove(store.file.path)
# Update stores by rescanning TP
tp.scan_files()
# Now files that ceased to exist should be marked as obsolete
updated_store = Store.objects.get(pootle_path=pootle_path)
assert updated_store.obsolete
# The units they contained are obsolete too
assert not updated_store.units.exists()
assert updated_store.unit_set.filter(state=OBSOLETE).exists()
obs_unit = updated_store.unit_set.filter(state=OBSOLETE).first()
obs_unit.submission_set.count() == 0
@pytest.mark.django_db
def test_sync(project0_nongnu, project0, store0):
"""Tests that the new on-disk file is created after sync for existing
in-DB Store if the corresponding on-disk file ceased to exist.
"""
tp = TranslationProjectFactory(
project=project0, language=LanguageDBFactory())
store = StoreDBFactory(
translation_project=tp,
parent=tp.directory)
store.update(store.deserialize(store0.serialize()))
assert not store.file.exists()
store.sync()
assert store.file.exists()
os.remove(store.file.path)
assert not store.file.exists()
store.sync()
assert store.file.exists()
@pytest.mark.django_db
def test_update_from_ts(store0, test_fs, member):
store0.parsed = True
orig_units = store0.units.count()
existing_created_at = store0.units.aggregate(
Max("creation_time"))["creation_time__max"]
existing_mtime = store0.units.aggregate(
Max("mtime"))["mtime__max"]
old_revision = store0.data.max_unit_revision
with test_fs.open(['data', 'ts', 'tutorial', 'en', 'tutorial.ts']) as f:
store = getclass(f)(f.read())
store0.update(
store,
submission_type=SubmissionTypes.UPLOAD,
user=member)
assert not store0.units[orig_units].hasplural()
unit = store0.units[orig_units + 1]
assert unit.submission_set.count() == 0
assert unit.hasplural()
assert unit.creation_time >= existing_created_at
assert unit.creation_time >= existing_mtime
unit_source = unit.unit_source
assert unit_source.created_with == SubmissionTypes.UPLOAD
assert unit_source.created_by == member
assert unit.change.changed_with == SubmissionTypes.UPLOAD
assert unit.change.submitted_by == member
assert unit.change.submitted_on >= unit.creation_time
assert unit.change.reviewed_by is None
assert unit.change.reviewed_on is None
assert unit.revision > old_revision
@pytest.mark.django_db
def test_update_ts_plurals(store_po, test_fs, ts):
project = store_po.translation_project.project
filetype_tool = project.filetype_tool
project.filetypes.add(ts)
filetype_tool.set_store_filetype(store_po, ts)
with test_fs.open(['data', 'ts', 'add_plurals.ts']) as f:
file_store = getclass(f)(f.read())
store_po.update(file_store)
unit = store_po.units[0]
assert unit.hasplural()
assert unit.submission_set.count() == 0
with test_fs.open(['data', 'ts', 'update_plurals.ts']) as f:
file_store = getclass(f)(f.read())
store_po.update(file_store)
unit = store_po.units[0]
assert unit.hasplural()
assert unit.submission_set.count() == 1
update_sub = unit.submission_set.first()
assert update_sub.revision == unit.revision
assert update_sub.creation_time == unit.change.submitted_on
assert update_sub.submitter == unit.change.submitted_by
assert update_sub.new_value == unit.target
assert update_sub.type == unit.change.changed_with
assert update_sub.field == SubmissionFields.TARGET
# this fails 8(
# from pootle.core.utils.multistring import unparse_multistring
# assert (
# unparse_multistring(update_sub.new_value)
# == unparse_multistring(unit.target))
@pytest.mark.django_db
def test_update_with_non_ascii(store0, test_fs):
store0.state = PARSED
orig_units = store0.units.count()
path = 'data', 'po', 'tutorial', 'en', 'tutorial_non_ascii.po'
with test_fs.open(path) as f:
store = getclass(f)(f.read())
store0.update(store)
last_unit = store0.units[orig_units]
updated_target = "Hèḽḽě, ŵôrḽḓ"
assert last_unit.target == updated_target
assert last_unit.submission_set.count() == 0
# last_unit.target = "foo"
# last_unit.save()
# this should now have a submission with the old target
# but it fails
# assert last_unit.submission_set.count() == 1
# update_sub = last_unit.submission_set.first()
# assert update_sub.old_value == updated_target
# assert update_sub.new_value == "foo"
@pytest.mark.django_db
def test_update_unit_order(project0_nongnu, ordered_po, ordered_update_ttk):
"""Tests unit order after a specific update.
"""
# Set last sync revision
ordered_po.sync()
assert ordered_po.file.exists()
expected_unit_list = ['1->2', '2->4', '3->3', '4->5']
updated_unit_list = [unit.unitid for unit in ordered_po.units]
assert expected_unit_list == updated_unit_list
original_revision = ordered_po.get_max_unit_revision()
ordered_po.update(
ordered_update_ttk,
store_revision=original_revision)
expected_unit_list = [
'X->1', '1->2', '3->3', '2->4',
'4->5', 'X->6', 'X->7', 'X->8']
updated_unit_list = [unit.unitid for unit in ordered_po.units]
assert expected_unit_list == updated_unit_list
unit = ordered_po.units.first()
assert unit.revision > original_revision
assert unit.submission_set.count() == 0
@pytest.mark.django_db
def test_update_save_changed_units(project0_nongnu, store0, member, system):
"""Tests that any update saves changed units only.
"""
# not sure if this is testing anything
store = store0
# Set last sync revision
store.sync()
store.update(store.file.store)
unit_list = list(store.units)
store.file = 'tutorial/ru/update_save_changed_units_updated.po'
store.update(store.file.store, user=member)
updated_unit_list = list(store.units)
# nothing changed
for index in range(0, len(unit_list)):
unit = unit_list[index]
updated_unit = updated_unit_list[index]
assert unit.revision == updated_unit.revision
assert unit.mtime == updated_unit.mtime
assert unit.target == updated_unit.target
@pytest.mark.django_db
def test_update_set_last_sync_revision(project0_nongnu, tp0, store0, test_fs):
"""Tests setting last_sync_revision after store creation.
"""
unit = store0.units.first()
unit.target = "UPDATED TARGET"
unit.save()
store0.sync()
# Store is already parsed and store.last_sync_revision should be equal to
# max unit revision
assert store0.last_sync_revision == store0.get_max_unit_revision()
# store.last_sync_revision is not changed after empty update
saved_last_sync_revision = store0.last_sync_revision
store0.updater.update_from_disk()
assert store0.last_sync_revision == saved_last_sync_revision
orig = str(store0)
update_file = test_fs.open(
"data/po/tutorial/ru/update_set_last_sync_revision_updated.po",
"r")
with update_file as sourcef:
with open(store0.file.path, "wb") as targetf:
targetf.write(sourcef.read())
store0 = Store.objects.get(pk=store0.pk)
# any non-empty update sets last_sync_revision to next global revision
next_revision = Revision.get() + 1
store0.updater.update_from_disk()
assert store0.last_sync_revision == next_revision
# store.last_sync_revision is not changed after empty update (even if it
# has unsynced units)
item_index = 0
next_unit_revision = Revision.get() + 1
dbunit = store0.units.first()
dbunit.target = "ANOTHER DB TARGET UPDATE"
dbunit.save()
assert dbunit.revision == next_unit_revision
store0.updater.update_from_disk()
assert store0.last_sync_revision == next_revision
# Non-empty update sets store.last_sync_revision to next global revision
# (even the store has unsynced units). There is only one unsynced unit in
# this case so its revision should be set next to store.last_sync_revision
next_revision = Revision.get() + 1
with open(store0.file.path, "wb") as targetf:
targetf.write(orig)
store0 = Store.objects.get(pk=store0.pk)
store0.updater.update_from_disk()
assert store0.last_sync_revision == next_revision
# Get unsynced unit in DB. Its revision should be greater
# than store.last_sync_revision to allow to keep this change during
# update from a file
dbunit = store0.units[item_index]
assert dbunit.revision == store0.last_sync_revision + 1
@pytest.mark.django_db
def test_update_upload_defaults(store0, system):
store0.state = PARSED
unit = store0.units.first()
original_revision = unit.revision
last_sub_pk = unit.submission_set.order_by(
"id").values_list("id", flat=True).last() or 0
update_store(
store0,
[(unit.source, "%s UPDATED" % unit.source, False)],
store_revision=Revision.get() + 1)
unit = store0.units[0]
assert unit.change.submitted_by == system
assert unit.change.submitted_on >= unit.creation_time
assert unit.change.submitted_by == system
assert (
unit.submission_set.last().type
== SubmissionTypes.SYSTEM)
assert unit.revision > original_revision
new_subs = unit.submission_set.filter(id__gt=last_sub_pk).order_by("id")
# there should be 2 new subs - state_change and target_change
new_subs = unit.submission_set.filter(id__gt=last_sub_pk).order_by("id")
assert new_subs.count() == 2
target_sub = new_subs[0]
assert target_sub.old_value == ""
assert target_sub.new_value == unit.target
assert target_sub.field == SubmissionFields.TARGET
assert target_sub.type == SubmissionTypes.SYSTEM
assert target_sub.submitter == system
assert target_sub.revision == unit.revision
assert target_sub.creation_time == unit.change.submitted_on
state_sub = new_subs[1]
assert state_sub.old_value == "0"
assert state_sub.new_value == "200"
assert state_sub.field == SubmissionFields.STATE
assert state_sub.type == SubmissionTypes.SYSTEM
assert state_sub.submitter == system
assert state_sub.revision == unit.revision
assert state_sub.creation_time == unit.change.submitted_on
@pytest.mark.django_db
def test_update_upload_member_user(store0, system, member):
store0.state = PARSED
original_unit = store0.units.first()
original_revision = original_unit.revision
last_sub_pk = original_unit.submission_set.order_by(
"id").values_list("id", flat=True).last() or 0
update_store(
store0,
[(original_unit.source, "%s UPDATED" % original_unit.source, False)],
user=member,
store_revision=Revision.get() + 1,
submission_type=SubmissionTypes.UPLOAD)
unit = store0.units[0]
assert unit.change.submitted_by == member
assert unit.change.changed_with == SubmissionTypes.UPLOAD
assert unit.change.submitted_on >= unit.creation_time
assert unit.change.reviewed_on is None
assert unit.revision > original_revision
unit_source = unit.unit_source
unit_source.created_by == system
unit_source.created_with == SubmissionTypes.SYSTEM
# there should be 2 new subs - state_change and target_change
new_subs = unit.submission_set.filter(id__gt=last_sub_pk).order_by("id")
assert new_subs.count() == 2
target_sub = new_subs[0]
assert target_sub.old_value == ""
assert target_sub.new_value == unit.target
assert target_sub.field == SubmissionFields.TARGET
assert target_sub.type == SubmissionTypes.UPLOAD
assert target_sub.submitter == member
assert target_sub.revision == unit.revision
assert target_sub.creation_time == unit.change.submitted_on
state_sub = new_subs[1]
assert state_sub.old_value == "0"
assert state_sub.new_value == "200"
assert state_sub.field == SubmissionFields.STATE
assert state_sub.type == SubmissionTypes.UPLOAD
assert state_sub.submitter == member
assert state_sub.revision == unit.revision
assert state_sub.creation_time == unit.change.submitted_on
@pytest.mark.django_db
def test_update_upload_submission_type(store0):
store0.state = PARSED
unit = store0.units.first()
last_sub_pk = unit.submission_set.order_by(
"id").values_list("id", flat=True).last() or 0
update_store(
store0,
[(unit.source, "%s UPDATED" % unit.source, False)],
submission_type=SubmissionTypes.UPLOAD,
store_revision=Revision.get() + 1)
unit_source = store0.units[0].unit_source
unit_change = store0.units[0].change
assert unit_source.created_with == SubmissionTypes.SYSTEM
assert unit_change.changed_with == SubmissionTypes.UPLOAD
# there should be 2 new subs - state_change and target_change
# and both should show as by UPLOAD
new_subs = unit.submission_set.filter(id__gt=last_sub_pk)
assert (
list(new_subs.values_list("type", flat=True))
== [SubmissionTypes.UPLOAD] * 2)
@pytest.mark.django_db
def test_update_upload_new_revision(store0, member):
original_revision = store0.data.max_unit_revision
old_unit = store0.units.first()
update_store(
store0,
[("Hello, world", "Hello, world UPDATED", False)],
submission_type=SubmissionTypes.UPLOAD,
store_revision=Revision.get() + 1,
user=member)
old_unit.refresh_from_db()
assert old_unit.state == OBSOLETE
assert len(store0.units) == 1
unit = store0.units[0]
unit_source = unit.unit_source
assert unit.revision > original_revision
assert unit_source.created_by == member
assert unit.change.submitted_by == member
assert unit.creation_time == unit.change.submitted_on
assert unit.change.reviewed_by is None
assert unit.change.reviewed_on is None
assert unit.target == "Hello, world UPDATED"
assert unit.submission_set.count() == 0
@pytest.mark.django_db
def test_update_upload_again_new_revision(store0, member, member2):
store = store0
assert store.state == NEW
original_unit = store0.units[0]
update_store(
store,
[("Hello, world", "Hello, world UPDATED", False)],
submission_type=SubmissionTypes.UPLOAD,
store_revision=Revision.get() + 1,
user=member)
original_unit.refresh_from_db()
assert original_unit.state == OBSOLETE
store = Store.objects.get(pk=store0.pk)
assert store.state == PARSED
created_unit = store.units[0]
assert created_unit.target == "Hello, world UPDATED"
assert created_unit.state == TRANSLATED
assert created_unit.submission_set.count() == 0
old_unit_revision = store.data.max_unit_revision
update_store(
store0,
[("Hello, world", "Hello, world UPDATED AGAIN", False)],
submission_type=SubmissionTypes.WEB,
user=member2,
store_revision=Revision.get() + 1)
assert created_unit.submission_set.count() == 1
update_sub = created_unit.submission_set.first()
store = Store.objects.get(pk=store0.pk)
assert store.state == PARSED
unit = store.units[0]
unit_source = unit.unit_source
assert unit.revision > old_unit_revision
assert unit.target == "Hello, world UPDATED AGAIN"
assert unit_source.created_by == member
assert unit_source.created_with == SubmissionTypes.UPLOAD
assert unit.change.submitted_by == member2
assert unit.change.submitted_on >= unit.creation_time
assert unit.change.reviewed_by is None
assert unit.change.reviewed_on is None
assert unit.change.changed_with == SubmissionTypes.WEB
assert update_sub.creation_time == unit.change.submitted_on
assert update_sub.type == unit.change.changed_with
assert update_sub.field == SubmissionFields.TARGET
assert update_sub.submitter == unit.change.submitted_by
assert update_sub.old_value == created_unit.target
assert update_sub.new_value == unit.target
assert update_sub.revision == unit.revision
@pytest.mark.django_db
def test_update_upload_old_revision_unit_conflict(store0, admin, member):
original_revision = Revision.get()
original_unit = store0.units[0]
update_store(
store0,
[("Hello, world", "Hello, world UPDATED", False)],
submission_type=SubmissionTypes.UPLOAD,
store_revision=original_revision + 1,
user=admin)
unit = store0.units[0]
unit_source = unit.unit_source
assert unit_source.created_by == admin
updated_revision = unit.revision
assert (
unit_source.created_with
== SubmissionTypes.UPLOAD)
assert unit.change.submitted_by == admin
assert (
unit.change.changed_with
== SubmissionTypes.UPLOAD)
last_submit_time = unit.change.submitted_on
assert last_submit_time >= unit.creation_time
# load update with expired revision and conflicting unit
update_store(
store0,
[("Hello, world", "Hello, world CONFLICT", False)],
submission_type=SubmissionTypes.WEB,
store_revision=original_revision,
user=member)
unit = store0.units[0]
assert unit.submission_set.count() == 0
unit_source = unit.unit_source
# unit target is not updated and revision remains the same
assert store0.units[0].target == "Hello, world UPDATED"
assert unit.revision == updated_revision
unit_source = original_unit.unit_source
unit_source.created_by == admin
assert unit_source.created_with == SubmissionTypes.SYSTEM
unit.change.changed_with == SubmissionTypes.UPLOAD
unit.change.submitted_by == admin
unit.change.submitted_on == last_submit_time
unit.change.reviewed_by is None
unit.change.reviewed_on is None
# but suggestion is added
suggestion = store0.units[0].get_suggestions()[0]
assert suggestion.target == "Hello, world CONFLICT"
assert suggestion.user == member
@pytest.mark.django_db
def test_update_upload_new_revision_new_unit(store0, member):
file_name = "pytest_pootle/data/po/tutorial/en/tutorial_update_new_unit.po"
store0.state = PARSED
old_unit_revision = store0.data.max_unit_revision
_update_from_upload_file(
store0,
file_name,
user=member,
submission_type=SubmissionTypes.WEB)
unit = store0.units.last()
unit_source = unit.unit_source
# the new unit has been added
assert unit.submission_set.count() == 0
assert unit.revision > old_unit_revision
assert unit.target == 'Goodbye, world'
assert unit_source.created_by == member
assert unit_source.created_with == SubmissionTypes.WEB
assert unit.change.submitted_by == member
assert unit.change.changed_with == SubmissionTypes.WEB
@pytest.mark.django_db
def test_update_upload_old_revision_new_unit(store0, member2):
store0.units.delete()
store0.state = PARSED
old_unit_revision = store0.data.max_unit_revision
# load initial update
_update_from_upload_file(
store0,
"pytest_pootle/data/po/tutorial/en/tutorial_update.po")
# load old revision with new unit
file_name = "pytest_pootle/data/po/tutorial/en/tutorial_update_old_unit.po"
_update_from_upload_file(
store0,
file_name,
user=member2,
submission_type=SubmissionTypes.WEB)
# the unit has been added because its not already obsoleted
assert store0.units.count() == 2
unit = store0.units.last()
unit_source = unit.unit_source
# the new unit has been added
assert unit.submission_set.count() == 0
assert unit.revision > old_unit_revision
assert unit.target == 'Goodbye, world'
assert unit_source.created_by == member2
assert unit_source.created_with == SubmissionTypes.WEB
assert unit.change.submitted_by == member2
assert unit.change.changed_with == SubmissionTypes.WEB
def _test_store_update_indexes(store, *test_args):
# make sure indexes are not fooed indexes only have to be unique
indexes = [x.index for x in store.units]
assert len(indexes) == len(set(indexes))
def _test_store_update_units_before(*test_args):
# test what has happened to the units that were present before the update
(store, units_update, store_revision, resolve_conflict,
units_before, member_, member2) = test_args
updates = {unit[0]: unit[1] for unit in units_update}
for unit, change in units_before:
updated_unit = store.unit_set.get(unitid=unit.unitid)
if unit.source not in updates:
# unit is not in update, target should be left unchanged
assert updated_unit.target == unit.target
assert updated_unit.change.submitted_by == change.submitted_by
# depending on unit/store_revision should be obsoleted
if unit.isobsolete() or store_revision >= unit.revision:
assert updated_unit.isobsolete()
else:
assert not updated_unit.isobsolete()
else:
# unit is in update
if store_revision >= unit.revision:
assert not updated_unit.isobsolete()
elif unit.isobsolete():
# the unit has been obsoleted since store_revision
assert updated_unit.isobsolete()
else:
assert not updated_unit.isobsolete()
if not updated_unit.isobsolete():
if store_revision >= unit.revision:
# file store wins outright
assert updated_unit.target == updates[unit.source]
if unit.target != updates[unit.source]:
# unit has changed, or was resurrected
assert updated_unit.change.submitted_by == member2
# damn mysql microsecond precision
if change.submitted_on.time().microsecond != 0:
assert (
updated_unit.change.submitted_on
!= change.submitted_on)
elif unit.isobsolete():
# unit has changed, or was resurrected
assert updated_unit.change.reviewed_by == member2
# damn mysql microsecond precision
if change.reviewed_on.time().microsecond != 0:
assert (
updated_unit.change.reviewed_on
!= change.reviewed_on)
else:
assert (
updated_unit.change.submitted_by
== change.submitted_by)
assert (
updated_unit.change.submitted_on
== change.submitted_on)
assert updated_unit.get_suggestions().count() == 0
else:
# conflict found
suggestion = updated_unit.get_suggestions()[0]
if resolve_conflict == POOTLE_WINS:
assert updated_unit.target == unit.target
assert (
updated_unit.change.submitted_by
== change.submitted_by)
assert suggestion.target == updates[unit.source]
assert suggestion.user == member2
else:
assert updated_unit.target == updates[unit.source]
assert updated_unit.change.submitted_by == member2
assert suggestion.target == unit.target
assert suggestion.user == change.submitted_by
def _test_store_update_ordering(*test_args):
(store, units_update, store_revision, resolve_conflict_,
units_before, member_, member2_) = test_args
updates = {unit[0]: unit[1] for unit in units_update}
old_units = {unit.source: unit for unit, change in units_before}
# test ordering
new_unit_list = []
for unit, change_ in units_before:
add_unit = (not unit.isobsolete()
and unit.source not in updates
and unit.revision > store_revision)
if add_unit:
new_unit_list.append(unit.source)
for source, target_, is_fuzzy_ in units_update:
if source in old_units:
old_unit = old_units[source]
should_add = (not old_unit.isobsolete()
or old_unit.revision <= store_revision)
if should_add:
new_unit_list.append(source)
else:
new_unit_list.append(source)
assert new_unit_list == [x.source for x in store.units]
def _test_store_update_units_now(*test_args):
(store, units_update, store_revision, resolve_conflict_,
units_before, member_, member2_) = test_args
# test that all the current units should be there
updates = {unit[0]: unit[1] for unit in units_update}
old_units = {unit.source: unit for unit, change in units_before}
for unit in store.units:
assert (
unit.source in updates
or (old_units[unit.source].revision > store_revision
and not old_units[unit.source].isobsolete()))
@pytest.mark.django_db
def test_store_update(param_update_store_test):
_test_store_update_indexes(*param_update_store_test)
_test_store_update_units_before(*param_update_store_test)
_test_store_update_units_now(*param_update_store_test)
_test_store_update_ordering(*param_update_store_test)
@pytest.mark.django_db
def test_store_file_diff(store_diff_tests):
diff, store, update_units, store_revision = store_diff_tests
assert diff.target_store == store
assert diff.source_revision == store_revision
assert (
update_units
== [(x.source, x.target, x.isfuzzy())
for x in diff.source_store.units[1:]]
== [(v['source'], v['target'], v['state'] == 50)
for v in diff.source_units.values()])
assert diff.active_target_units == [x.source for x in store.units]
assert diff.target_revision == store.get_max_unit_revision()
assert (
diff.target_units
== {unit["source_f"]: unit
for unit
in store.unit_set.values("source_f", "index", "target_f",
"state", "unitid", "id", "revision",
"developer_comment", "translator_comment",
"locations", "context")})
diff_diff = diff.diff()
if diff_diff is not None:
assert (
sorted(diff_diff.keys())
== ["add", "index", "obsolete", "update"])
# obsoleted units have no index - so just check they are all they match
obsoleted = (store.unit_set.filter(state=OBSOLETE)
.filter(revision__gt=store_revision)
.values_list("source_f", flat=True))
assert len(diff.obsoleted_target_units) == obsoleted.count()
assert all(x in diff.obsoleted_target_units for x in obsoleted)
assert (
diff.updated_target_units
== list(store.units.filter(revision__gt=store_revision)
.values_list("source_f", flat=True)))
@pytest.mark.django_db
def test_store_repr():
store = Store.objects.first()
assert str(store) == str(store.syncer.convert(store.syncer.file_class))
assert repr(store) == u"<Store: %s>" % store.pootle_path
@pytest.mark.django_db
def test_store_po_deserializer(test_fs, store_po):
with test_fs.open("data/po/complex.po") as test_file:
test_string = test_file.read()
ttk_po = getclass(test_file)(test_string)
store_po.update(store_po.deserialize(test_string))
assert len(ttk_po.units) - 1 == store_po.units.count()
@pytest.mark.django_db
def test_store_po_serializer(test_fs, store_po):
with test_fs.open("data/po/complex.po") as test_file:
test_string = test_file.read()
ttk_po = getclass(test_file)(test_string)
store_po.update(store_po.deserialize(test_string))
store_io = io.BytesIO(store_po.serialize())
store_ttk = getclass(store_io)(store_io.read())
assert len(store_ttk.units) == len(ttk_po.units)
@pytest.mark.django_db
def test_store_po_serializer_custom(test_fs, store_po):
class SerializerCheck(object):
original_data = None
context = None
checker = SerializerCheck()
class EGSerializer(Serializer):
@property
def output(self):
checker.original_data = self.original_data
checker.context = self.context
@provider(serializers, sender=Project)
def provide_serializers(**kwargs):
return dict(eg_serializer=EGSerializer)
with test_fs.open("data/po/complex.po") as test_file:
test_string = test_file.read()
# ttk_po = getclass(test_file)(test_string)
store_po.update(store_po.deserialize(test_string))
# add config to the project
project = store_po.translation_project.project
config.get(project.__class__, instance=project).set_config(
"pootle.core.serializers",
["eg_serializer"])
store_po.serialize()
assert checker.context == store_po
assert (
not isinstance(checker.original_data, six.text_type)
and isinstance(checker.original_data, str))
assert checker.original_data == _store_as_string(store_po)
@pytest.mark.django_db
def test_store_po_deserializer_custom(test_fs, store_po):
class DeserializerCheck(object):
original_data = None
context = None
checker = DeserializerCheck()
class EGDeserializer(Deserializer):
@property
def output(self):
checker.context = self.context
checker.original_data = self.original_data
return self.original_data
@provider(deserializers, sender=Project)
def provide_deserializers(**kwargs):
return dict(eg_deserializer=EGDeserializer)
with test_fs.open("data/po/complex.po") as test_file:
test_string = test_file.read()
# add config to the project
project = store_po.translation_project.project
config.get().set_config(
"pootle.core.deserializers",
["eg_deserializer"],
project)
store_po.deserialize(test_string)
assert checker.original_data == test_string
assert checker.context == store_po
@pytest.mark.django_db
def test_store_base_serializer(store_po):
original_data = "SOME DATA"
serializer = Serializer(store_po, original_data)
assert serializer.context == store_po
assert serializer.data == original_data
@pytest.mark.django_db
def test_store_base_deserializer(store_po):
original_data = "SOME DATA"
deserializer = Deserializer(store_po, original_data)
assert deserializer.context == store_po
assert deserializer.data == original_data
@pytest.mark.django_db
def test_store_set_bad_deserializers(store_po):
project = store_po.translation_project.project
with pytest.raises(ConfigurationError):
config.get(project.__class__, instance=project).set_config(
"pootle.core.deserializers",
["DESERIALIZER_DOES_NOT_EXIST"])
class EGDeserializer(object):
pass
@provider(deserializers)
def provide_deserializers(**kwargs):
return dict(eg_deserializer=EGDeserializer)
# must be list
with pytest.raises(ConfigurationError):
config.get(project.__class__, instance=project).set_config(
"pootle.core.deserializers",
"eg_deserializer")
with pytest.raises(ConfigurationError):
config.get(project.__class__, instance=project).set_config(
"pootle.core.deserializers",
dict(serializer="eg_deserializer"))
config.get(project.__class__, instance=project).set_config(
"pootle.core.deserializers",
["eg_deserializer"])
@pytest.mark.django_db
def test_store_set_bad_serializers(store_po):
project = store_po.translation_project.project
with pytest.raises(ConfigurationError):
config.get(project.__class__, instance=project).set_config(
"pootle.core.serializers",
["SERIALIZER_DOES_NOT_EXIST"])
class EGSerializer(Serializer):
pass
@provider(serializers)
def provide_serializers(**kwargs):
return dict(eg_serializer=EGSerializer)
# must be list
with pytest.raises(ConfigurationError):
config.get(project.__class__, instance=project).set_config(
"pootle.core.serializers",
"eg_serializer")
with pytest.raises(ConfigurationError):
config.get(project.__class__, instance=project).set_config(
"pootle.core.serializers",
dict(serializer="eg_serializer"))
config.get(project.__class__, instance=project).set_config(
"pootle.core.serializers",
["eg_serializer"])
@pytest.mark.django_db
def test_store_create_by_bad_path(project0):
# bad project name
with pytest.raises(Project.DoesNotExist):
Store.objects.create_by_path(
"/language0/does/not/exist.po")
# bad language code
with pytest.raises(Language.DoesNotExist):
Store.objects.create_by_path(
"/does/project0/not/exist.po")
# project and project code dont match
with pytest.raises(ValueError):
Store.objects.create_by_path(
"/language0/project1/store.po",
project=project0)
# bad store.ext
with pytest.raises(ValueError):
Store.objects.create_by_path(
"/language0/project0/store_by_path.foo")
# subdir doesnt exist
path = '/language0/project0/path/to/subdir.po'
with pytest.raises(Directory.DoesNotExist):
Store.objects.create_by_path(
path, create_directory=False)
path = '/%s/project0/notp.po' % LanguageDBFactory().code
with pytest.raises(TranslationProject.DoesNotExist):
Store.objects.create_by_path(
path, create_tp=False)
@pytest.mark.django_db
def test_store_create_by_path(po_directory):
# create in tp
path = '/language0/project0/path.po'
store = Store.objects.create_by_path(path)
assert store.pootle_path == path
# "create" in tp again - get existing store
store = Store.objects.create_by_path(path)
assert store.pootle_path == path
# create in existing subdir
path = '/language0/project0/subdir0/exists.po'
store = Store.objects.create_by_path(path)
assert store.pootle_path == path
# create in new subdir
path = '/language0/project0/path/to/subdir.po'
store = Store.objects.create_by_path(path)
assert store.pootle_path == path
@pytest.mark.django_db
def test_store_create_by_path_with_project(project0):
# create in tp with project
path = '/language0/project0/path2.po'
store = Store.objects.create_by_path(
path, project=project0)
assert store.pootle_path == path
# create in existing subdir with project
path = '/language0/project0/subdir0/exists2.po'
store = Store.objects.create_by_path(
path, project=project0)
assert store.pootle_path == path
# create in new subdir with project
path = '/language0/project0/path/to/subdir2.po'
store = Store.objects.create_by_path(
path, project=project0)
assert store.pootle_path == path
@pytest.mark.django_db
def test_store_create_by_new_tp_path(po_directory):
language = LanguageDBFactory()
path = '/%s/project0/tp.po' % language.code
store = Store.objects.create_by_path(path)
assert store.pootle_path == path
assert store.translation_project.language == language
language = LanguageDBFactory()
path = '/%s/project0/with/subdir/tp.po' % language.code
store = Store.objects.create_by_path(path)
assert store.pootle_path == path
assert store.translation_project.language == language
@pytest.mark.django_db
def test_store_create(tp0):
tp = tp0
project = tp.project
registry = formats.get()
po = Format.objects.get(name="po")
po2 = registry.register("special_po_2", "po")
po3 = registry.register("special_po_3", "po")
xliff = Format.objects.get(name="xliff")
project.filetypes.add(xliff)
project.filetypes.add(po2)
project.filetypes.add(po3)
store = Store.objects.create(
name="store.po",
parent=tp.directory,
translation_project=tp)
assert store.filetype == po
assert not store.is_template
store = Store.objects.create(
name="store.pot",
parent=tp.directory,
translation_project=tp)
# not in source_language folder
assert not store.is_template
assert store.filetype == po
store = Store.objects.create(
name="store.xliff",
parent=tp.directory,
translation_project=tp)
assert store.filetype == xliff
# push po to the back of the queue
project.filetypes.remove(po)
project.filetypes.add(po)
store = Store.objects.create(
name="another_store.po",
parent=tp.directory,
translation_project=tp)
assert store.filetype == po2
store = Store.objects.create(
name="another_store.pot",
parent=tp.directory,
translation_project=tp)
assert store.filetype == po
store = Store.objects.create(
name="another_store.xliff",
parent=tp.directory,
translation_project=tp)
with pytest.raises(UnrecognizedFiletype):
store = Store.objects.create(
name="another_store.foo",
parent=tp.directory,
translation_project=tp)
@pytest.mark.django_db
def test_store_create_name_with_slashes_or_backslashes(tp0):
"""Test Stores are not created with (back)slashes on their name."""
with pytest.raises(ValidationError):
Store.objects.create(name="slashed/name.po", parent=tp0.directory,
translation_project=tp0)
with pytest.raises(ValidationError):
Store.objects.create(name="backslashed\\name.po", parent=tp0.directory,
translation_project=tp0)
@pytest.mark.django_db
def test_store_get_file_class():
store = Store.objects.filter(
translation_project__project__code="project0",
translation_project__language__code="language0").first()
# this matches because po is recognised by ttk
assert store.syncer.file_class == getclass(store)
# file_class is cached so lets delete it
del store.syncer.__dict__["file_class"]
class CustomFormatClass(object):
pass
@provider(format_classes)
def format_class_provider(**kwargs):
return dict(po=CustomFormatClass)
# we get the CutomFormatClass as it was registered
assert store.syncer.file_class is CustomFormatClass
# the Store.filetype is used in this case not the name
store.name = "new_store_name.foo"
del store.syncer.__dict__["file_class"]
assert store.syncer.file_class is CustomFormatClass
# lets register a foo filetype
format_registry = formats.get()
foo_filetype = format_registry.register("foo", "foo")
store.filetype = foo_filetype
store.save()
# oh no! not recognised by ttk
del store.syncer.__dict__["file_class"]
with pytest.raises(ValueError):
store.syncer.file_class
@provider(format_classes)
def another_format_class_provider(**kwargs):
return dict(foo=CustomFormatClass)
# works now
assert store.syncer.file_class is CustomFormatClass
format_classes.disconnect(format_class_provider)
format_classes.disconnect(another_format_class_provider)
@pytest.mark.django_db
def test_store_get_template_file_class(po_directory, templates):
project = ProjectDBFactory(source_language=templates)
tp = TranslationProjectFactory(language=templates, project=project)
format_registry = formats.get()
foo_filetype = format_registry.register("foo", "foo", template_extension="bar")
tp.project.filetypes.add(foo_filetype)
store = Store.objects.create(
name="mystore.bar",
translation_project=tp,
parent=tp.directory)
# oh no! not recognised by ttk
with pytest.raises(ValueError):
store.syncer.file_class
class CustomFormatClass(object):
pass
@provider(format_classes)
def format_class_provider(**kwargs):
return dict(foo=CustomFormatClass)
assert store.syncer.file_class == CustomFormatClass
format_classes.disconnect(format_class_provider)
@pytest.mark.django_db
def test_store_create_templates(po_directory, templates):
project = ProjectDBFactory(source_language=templates)
tp = TranslationProjectFactory(language=templates, project=project)
po = Format.objects.get(name="po")
store = Store.objects.create(
name="mystore.pot",
translation_project=tp,
parent=tp.directory)
assert store.filetype == po
assert store.is_template
@pytest.mark.django_db
def test_store_get_or_create_templates(po_directory, templates):
project = ProjectDBFactory(source_language=templates)
tp = TranslationProjectFactory(language=templates, project=project)
po = Format.objects.get(name="po")
store = Store.objects.get_or_create(
name="mystore.pot",
translation_project=tp,
parent=tp.directory)[0]
assert store.filetype == po
assert store.is_template
@pytest.mark.django_db
def test_store_diff(diffable_stores):
target_store, source_store = diffable_stores
differ = StoreDiff(
target_store,
source_store,
target_store.get_max_unit_revision() + 1)
# no changes
assert not differ.diff()
assert differ.target_store == target_store
assert differ.source_store == source_store
@pytest.mark.django_db
def test_store_diff_delete_target_unit(diffable_stores):
target_store, source_store = diffable_stores
# delete a unit in the target store
remove_unit = target_store.units.first()
remove_unit.delete()
# the unit will always be re-added (as its not obsolete)
# with source_revision to the max
differ = StoreDiff(
target_store,
source_store,
target_store.get_max_unit_revision())
result = differ.diff()
assert result["add"][0][0].source_f == remove_unit.source_f
assert len(result["add"]) == 1
assert len(result["index"]) == 0
assert len(result["obsolete"]) == 0
assert result['update'] == (set(), {})
# and source_revision to 0
differ = StoreDiff(
target_store,
source_store,
0)
result = differ.diff()
assert result["add"][0][0].source_f == remove_unit.source_f
assert len(result["add"]) == 1
assert len(result["index"]) == 0
assert len(result["obsolete"]) == 0
assert result['update'] == (set(), {})
@pytest.mark.django_db
def test_store_diff_delete_source_unit(diffable_stores):
target_store, source_store = diffable_stores
# delete a unit in the source store
remove_unit = source_store.units.first()
remove_unit.delete()
# set the source_revision to max and the unit will be obsoleted
differ = StoreDiff(
target_store,
source_store,
target_store.get_max_unit_revision())
result = differ.diff()
to_remove = target_store.units.get(unitid=remove_unit.unitid)
assert result["obsolete"] == [to_remove.pk]
assert len(result["obsolete"]) == 1
assert len(result["add"]) == 0
assert len(result["index"]) == 0
# set the source_revision to less that than the target_stores' max_revision
# and the unit will be ignored, as its assumed to have been previously
# deleted
differ = StoreDiff(
target_store,
source_store,
target_store.get_max_unit_revision() - 1)
assert not differ.diff()
@pytest.mark.django_db
def test_store_diff_delete_obsoleted_target_unit(diffable_stores):
target_store, source_store = diffable_stores
# delete a unit in the source store
remove_unit = source_store.units.first()
remove_unit.delete()
# and obsolete the same unit in the target
obsolete_unit = target_store.units.get(unitid=remove_unit.unitid)
obsolete_unit.makeobsolete()
obsolete_unit.save()
# as the unit is already obsolete - nothing
differ = StoreDiff(
target_store,
source_store,
target_store.get_max_unit_revision() + 1)
assert not differ.diff()
@pytest.mark.django_db
def test_store_diff_obsoleted_target_unit(diffable_stores):
target_store, source_store = diffable_stores
# obsolete a unit in target
obsolete_unit = target_store.units.first()
obsolete_unit.makeobsolete()
obsolete_unit.save()
# as the revision is higher it gets unobsoleted
differ = StoreDiff(
target_store,
source_store,
target_store.get_max_unit_revision() + 1)
result = differ.diff()
assert result["update"][0] == set([obsolete_unit.pk])
assert len(result["update"][1]) == 1
assert result["update"][1][obsolete_unit.unitid]["dbid"] == obsolete_unit.pk
# if the revision is less - no change
differ = StoreDiff(
target_store,
source_store,
target_store.get_max_unit_revision() - 1)
assert not differ.diff()
@pytest.mark.django_db
def test_store_diff_update_target_unit(diffable_stores):
target_store, source_store = diffable_stores
# update a unit in target
update_unit = target_store.units.first()
update_unit.target_f = "Some other string"
update_unit.save()
# the unit is always marked for update
differ = StoreDiff(
target_store,
source_store,
target_store.get_max_unit_revision() + 1)
result = differ.diff()
assert result["update"][0] == set([update_unit.pk])
assert result["update"][1] == {}
assert len(result["add"]) == 0
assert len(result["index"]) == 0
differ = StoreDiff(
target_store,
source_store,
0)
result = differ.diff()
assert result["update"][0] == set([update_unit.pk])
assert result["update"][1] == {}
assert len(result["add"]) == 0
assert len(result["index"]) == 0
@pytest.mark.django_db
def test_store_diff_update_source_unit(diffable_stores):
target_store, source_store = diffable_stores
# update a unit in source
update_unit = source_store.units.first()
update_unit.target_f = "Some other string"
update_unit.save()
target_unit = target_store.units.get(
unitid=update_unit.unitid)
# the unit is always marked for update
differ = StoreDiff(
target_store,
source_store,
target_store.get_max_unit_revision() + 1)
result = differ.diff()
assert result["update"][0] == set([target_unit.pk])
assert result["update"][1] == {}
assert len(result["add"]) == 0
assert len(result["index"]) == 0
differ = StoreDiff(
target_store,
source_store,
0)
result = differ.diff()
assert result["update"][0] == set([target_unit.pk])
assert result["update"][1] == {}
assert len(result["add"]) == 0
assert len(result["index"]) == 0
@pytest.mark.django_db
def test_store_diff_custom(diffable_stores):
target_store, source_store = diffable_stores
class CustomDiffableStore(DiffableStore):
pass
@provider(format_diffs)
def format_diff_provider(**kwargs):
return {
target_store.filetype.name: CustomDiffableStore}
differ = StoreDiff(
target_store,
source_store,
target_store.get_max_unit_revision() + 1)
assert isinstance(
differ.diffable, CustomDiffableStore)
@pytest.mark.django_db
def test_store_diff_delete_obsoleted_source_unit(diffable_stores):
target_store, source_store = diffable_stores
# delete a unit in the target store
remove_unit = target_store.units.first()
remove_unit.delete()
# and obsolete the same unit in the target
obsolete_unit = source_store.units.get(unitid=remove_unit.unitid)
obsolete_unit.makeobsolete()
obsolete_unit.save()
# as the unit is already obsolete - nothing
differ = StoreDiff(
target_store,
source_store,
target_store.get_max_unit_revision() + 1)
assert not differ.diff()
@pytest.mark.django_db
def test_store_syncer(tp0):
store = tp0.stores.live().first()
assert isinstance(store.syncer, PoStoreSyncer)
assert store.syncer.file_class == getclass(store)
assert store.syncer.translation_project == store.translation_project
assert (
store.syncer.language
== store.translation_project.language)
assert (
store.syncer.project
== store.translation_project.project)
assert (
store.syncer.source_language
== store.translation_project.project.source_language)
@pytest.mark.django_db
def test_store_syncer_obsolete_unit(tp0):
store = tp0.stores.live().first()
unit = store.units.filter(state=TRANSLATED).first()
unit_syncer = store.syncer.unit_sync_class(unit)
newunit = unit_syncer.create_unit(store.syncer.file_class.UnitClass)
# unit is untranslated, its always just deleted
obsolete, deleted = store.syncer.obsolete_unit(newunit, True)
assert not obsolete
assert deleted
obsolete, deleted = store.syncer.obsolete_unit(newunit, False)
assert not obsolete
assert deleted
# set unit to translated
newunit.target = unit.target
# if conservative, nothings changed
obsolete, deleted = store.syncer.obsolete_unit(newunit, True)
assert not obsolete
assert not deleted
# not conservative and the unit is deleted
obsolete, deleted = store.syncer.obsolete_unit(newunit, False)
assert obsolete
assert not deleted
@pytest.mark.django_db
def test_store_syncer_sync_store(tp0, dummy_store_syncer):
store = tp0.stores.live().first()
DummyStoreSyncer, __, expected = dummy_store_syncer
disk_store = store.syncer.convert()
dummy_syncer = DummyStoreSyncer(store, expected=expected)
result = dummy_syncer.sync(
disk_store,
expected["last_revision"],
update_structure=expected["update_structure"],
conservative=expected["conservative"])
assert result[0] is True
assert result[1]["updated"] == expected["changes"]
# conservative makes no diff here
expected["conservative"] = False
dummy_syncer = DummyStoreSyncer(store, expected=expected)
result = dummy_syncer.sync(
disk_store,
expected["last_revision"],
update_structure=expected["update_structure"],
conservative=expected["conservative"])
assert result[0] is True
assert result[1]["updated"] == expected["changes"]
@pytest.mark.django_db
def test_store_syncer_sync_store_no_changes(tp0, dummy_store_syncer):
store = tp0.stores.live().first()
DummyStoreSyncer, __, expected = dummy_store_syncer
disk_store = store.syncer.convert()
dummy_syncer = DummyStoreSyncer(store, expected=expected)
# no changes
expected["changes"] = []
expected["conservative"] = True
dummy_syncer = DummyStoreSyncer(store, expected=expected)
result = dummy_syncer.sync(
disk_store,
expected["last_revision"],
expected["update_structure"],
expected["conservative"])
assert result[0] is False
assert not result[1].get("updated")
# conservative makes no diff here
expected["conservative"] = False
dummy_syncer = DummyStoreSyncer(store, expected=expected)
result = dummy_syncer.sync(
disk_store,
expected["last_revision"],
expected["update_structure"],
expected["conservative"])
assert result[0] is False
assert not result[1].get("updated")
@pytest.mark.django_db
def test_store_syncer_sync_store_structure(tp0, dummy_store_syncer):
store = tp0.stores.live().first()
DummyStoreSyncer, DummyDiskStore, expected = dummy_store_syncer
<|fim▁hole|> expected["update_structure"] = True
expected["changes"] = []
dummy_syncer = DummyStoreSyncer(store, expected=expected)
result = dummy_syncer.sync(
disk_store,
expected["last_revision"],
expected["update_structure"],
expected["conservative"])
assert result[0] is True
assert result[1]["updated"] == []
assert result[1]["obsolete"] == 8
assert result[1]["deleted"] == 9
assert result[1]["added"] == 10
expected["obsolete_units"] = []
expected["new_units"] = []
expected["changes"] = []
dummy_syncer = DummyStoreSyncer(store, expected=expected)
result = dummy_syncer.sync(
disk_store,
expected["last_revision"],
expected["update_structure"],
expected["conservative"])
assert result[0] is False
@pytest.mark.django_db
def test_store_syncer_sync_update_structure(dummy_store_structure_syncer, tp0):
store = tp0.stores.live().first()
DummyStoreSyncer, DummyDiskStore, DummyUnit = dummy_store_structure_syncer
expected = dict(
unit_class="FOO",
conservative=True,
obsolete_delete=(True, True),
obsolete_units=["a", "b", "c"])
expected["new_units"] = [
DummyUnit(unit, expected=expected)
for unit in ["5", "6", "7"]]
syncer = DummyStoreSyncer(store, expected=expected)
disk_store = DummyDiskStore(expected)
result = syncer.update_structure(
disk_store,
expected["obsolete_units"],
expected["new_units"],
expected["conservative"])
obsolete_units = (
len(expected["obsolete_units"])
if expected["obsolete_delete"][0]
else 0)
deleted_units = (
len(expected["obsolete_units"])
if expected["obsolete_delete"][1]
else 0)
new_units = len(expected["new_units"])
assert result == (obsolete_units, deleted_units, new_units)
def _test_get_new(results, syncer, old_ids, new_ids):
assert list(results) == list(
syncer.store.findid_bulk(
[syncer.dbid_index.get(uid)
for uid
in new_ids - old_ids]))
def _test_get_obsolete(results, disk_store, syncer, old_ids, new_ids):
assert list(results) == list(
disk_store.findid(uid)
for uid
in old_ids - new_ids
if (disk_store.findid(uid)
and not disk_store.findid(uid).isobsolete()))
@pytest.mark.django_db
def test_store_syncer_obsolete_units(dummy_store_syncer_units, tp0):
store = tp0.stores.live().first()
disk_store = store.syncer.convert()
expected = dict(
old_ids=set(),
new_ids=set(),
disk_ids={})
syncer = dummy_store_syncer_units(store, expected=expected)
results = syncer.get_units_to_obsolete(
disk_store, expected["old_ids"], expected["new_ids"])
_test_get_obsolete(
results, disk_store, syncer,
expected["old_ids"], expected["new_ids"])
expected = dict(
old_ids=set(["2", "3", "4"]),
new_ids=set(["3", "4", "5"]),
disk_ids={"3": "foo", "4": "bar", "5": "baz"})
results = syncer.get_units_to_obsolete(
disk_store, expected["old_ids"], expected["new_ids"])
_test_get_obsolete(
results, disk_store, syncer, expected["old_ids"], expected["new_ids"])
@pytest.mark.django_db
def test_store_syncer_new_units(dummy_store_syncer_units, tp0):
store = tp0.stores.live().first()
expected = dict(
old_ids=set(),
new_ids=set(),
disk_ids={},
db_ids={})
syncer = dummy_store_syncer_units(store, expected=expected)
results = syncer.get_new_units(
expected["old_ids"], expected["new_ids"])
_test_get_new(
results, syncer, expected["old_ids"], expected["new_ids"])
expected = dict(
old_ids=set(["2", "3", "4"]),
new_ids=set(["3", "4", "5"]),
db_ids={"3": "foo", "4": "bar", "5": "baz"})
syncer = dummy_store_syncer_units(store, expected=expected)
results = syncer.get_new_units(
expected["old_ids"], expected["new_ids"])
_test_get_new(
results, syncer, expected["old_ids"], expected["new_ids"])
@pytest.mark.django_db
def test_store_path(store0):
assert store0.path == to_tp_relative_path(store0.pootle_path)
@pytest.mark.django_db
def test_store_sync_empty(project0_nongnu, tp0, caplog):
store = StoreDBFactory(
name="empty.po",
translation_project=tp0,
parent=tp0.directory)
store.sync()
assert os.path.exists(store.file.path)
modified = os.stat(store.file.path).st_mtime
store.sync()
assert modified == os.stat(store.file.path).st_mtime
# warning message - nothing changes
store.sync(conservative=True, only_newer=False)
assert "nothing changed" in caplog.records[-1].message
assert modified == os.stat(store.file.path).st_mtime
@pytest.mark.django_db
def test_store_sync_template(project0_nongnu, templates_project0, caplog):
template = templates_project0.stores.first()
template.sync()
modified = os.stat(template.file.path).st_mtime
unit = template.units.first()
unit.target = "NEW TARGET"
unit.save()
template.sync(conservative=True)
assert modified == os.stat(template.file.path).st_mtime
template.sync(conservative=False)
assert not modified == os.stat(template.file.path).st_mtime
@pytest.mark.django_db
def test_store_update_with_state_change(store0, admin):
units = dict([(x.id, (x.source, x.target, not x.isfuzzy()))
for x in store0.units])
update_store(
store0,
units=units.values(),
store_revision=store0.data.max_unit_revision,
user=admin)
for unit_id, unit in units.items():
assert unit[2] == store0.units.get(id=unit_id).isfuzzy()
@pytest.mark.django_db
def test_update_xliff(store_po, test_fs, xliff):
project = store_po.translation_project.project
filetype_tool = project.filetype_tool
project.filetypes.add(xliff)
filetype_tool.set_store_filetype(store_po, xliff)
with test_fs.open(['data', 'xliff', 'welcome.xliff']) as f:
file_store = getclass(f)(f.read())
store_po.update(file_store)
unit = store_po.units[0]
assert unit.istranslated()
with test_fs.open(['data', 'xliff', 'updated_welcome.xliff']) as f:
file_store = getclass(f)(f.read())
store_po.update(file_store)
updated_unit = store_po.units.get(id=unit.id)
assert unit.source != updated_unit.source
@pytest.mark.django_db
def test_update_resurrect(store_po, test_fs):
with test_fs.open(['data', 'po', 'obsolete.po']) as f:
file_store = getclass(f)(f.read())
store_po.update(file_store)
obsolete_units = store_po.unit_set.filter(state=OBSOLETE)
obsolete_ids = list(obsolete_units.values_list('id', flat=True))
assert len(obsolete_ids) > 0
with test_fs.open(['data', 'po', 'resurrected.po']) as f:
file_store = getclass(f)(f.read())
store_revision = store_po.data.max_unit_revision
# set store_revision as we do in update_stores cli command
store_po.update(file_store, store_revision=store_revision - 1)
obsolete_units = store_po.unit_set.filter(state=OBSOLETE)
assert obsolete_units.count() == len(obsolete_ids)
for unit in obsolete_units.filter(id__in=obsolete_ids):
assert unit.isobsolete()
# set store_revision as we do in update_stores cli command
store_po.update(file_store, store_revision=store_revision)
units = store_po.units.filter(id__in=obsolete_ids)
assert units.count() == len(obsolete_ids)
for unit in units:
assert not unit.isobsolete()
@pytest.mark.django_db
def test_store_comment_update(store0, member):
ttk = store0.deserialize(store0.serialize())
fileunit = ttk.units[-1]
fileunit.removenotes()
fileunit.addnote("A new comment")
unit = store0.findid(fileunit.getid())
last_sub_pk = unit.submission_set.order_by(
"id").values_list("id", flat=True).last() or 0
store0.update(
ttk, store_revision=store0.data.max_unit_revision + 1,
user=member
)
assert ttk.units[-1].getnotes("translator") == "A new comment"
unit = store0.units.get(id=unit.id)
assert unit.translator_comment == "A new comment"
assert unit.change.commented_by == member
new_subs = unit.submission_set.filter(id__gt=last_sub_pk).order_by("id")
assert new_subs.count() == 1
comment_sub = new_subs[0]
assert comment_sub.old_value == ""
assert comment_sub.new_value == "A new comment"
assert comment_sub.field == SubmissionFields.COMMENT
assert comment_sub.type == SubmissionTypes.SYSTEM
assert comment_sub.submitter == member
assert comment_sub.revision == unit.revision
assert comment_sub.creation_time == unit.change.commented_on<|fim▁end|> | disk_store = DummyDiskStore(expected) |
<|file_name|>wordProbability.py<|end_file_name|><|fim▁begin|># -*- coding:utf8 -*-
from __future__ import division
import codecs
import re
def calWordProbability(infile, outfile):
'''
计算词概率,源语言词翻译成目标语言词的概率
一个源语言可能对应多个目标语言,这里计算平均值
infile: 输入文件 格式:source word \t target word
outfile: source word \t target word \t probability
'''
with codecs.open(infile, 'r', 'utf8') as fin:
# 用于存储数据结构
wordDic = {}
line = fin.readline()
linNum = 1
while line:
linNum += 1
if linNum % 10001 == 1:
print(linNum, line.encode('utf8'))
line = line.strip() # 删除两端空白符
wArr = re.split('[ |\t]', line)
if len(wArr) >= 2:
key = wArr[0] # 源语言词
val = wArr[1] # 目标语言词
if key in wordDic:
wordDic[key][val] = 1
else:
valMap = dict()
valMap[val] = 1
wordDic[key] = valMap
line = fin.readline()
with codecs.open(outfile, 'w', 'utf8') as fout:
print('start write')
wCount = 0
for key in wordDic.keys():
wCount += 1
if(wCount % 1001 == 0):
print('writing', wCount)
if len(key.split(' ')) > 1:
continue
valMap = wordDic[key]
valLen = len(valMap)
for val in valMap.keys():
fout.write(key)
fout.write('\t')<|fim▁hole|> fout.write(val)
fout.write('\t')
fout.write(str(1/valLen))
fout.write('\n')<|fim▁end|> | |
<|file_name|>userdata.py<|end_file_name|><|fim▁begin|>#########
# Copyright (c) 2015 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software<|fim▁hole|># * limitations under the License.
import requests
from cloudify import compute
from cloudify import exceptions
from cloudify import ctx
def handle_userdata(server):
existing_userdata = server.get('userdata')
install_agent_userdata = ctx.agent.init_script()
if not (existing_userdata or install_agent_userdata):
return
if isinstance(existing_userdata, dict):
ud_type = existing_userdata['type']
if ud_type not in userdata_handlers:
raise exceptions.NonRecoverableError(
"Invalid type '{0}' for server userdata)".format(ud_type))
existing_userdata = userdata_handlers[ud_type](existing_userdata)
if not existing_userdata:
final_userdata = install_agent_userdata
elif not install_agent_userdata:
final_userdata = existing_userdata
else:
final_userdata = compute.create_multi_mimetype_userdata(
[existing_userdata, install_agent_userdata])
server['userdata'] = final_userdata
userdata_handlers = {
'http': lambda params: requests.get(params['url']).text
}<|fim▁end|> | # distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and |
<|file_name|>_elementpath.py<|end_file_name|><|fim▁begin|># cython: language_level=2
#
# ElementTree
# $Id: ElementPath.py 3375 2008-02-13 08:05:08Z fredrik $
#
# limited xpath support for element trees
#
# history:
# 2003-05-23 fl created
# 2003-05-28 fl added support for // etc
# 2003-08-27 fl fixed parsing of periods in element names
# 2007-09-10 fl new selection engine
# 2007-09-12 fl fixed parent selector
# 2007-09-13 fl added iterfind; changed findall to return a list
# 2007-11-30 fl added namespaces support
# 2009-10-30 fl added child element value filter
#
# Copyright (c) 2003-2009 by Fredrik Lundh. All rights reserved.
#
# [email protected]
# http://www.pythonware.com
#
# --------------------------------------------------------------------
# The ElementTree toolkit is
#
# Copyright (c) 1999-2009 by Fredrik Lundh
#
# By obtaining, using, and/or copying this software and/or its
# associated documentation, you agree that you have read, understood,
# and will comply with the following terms and conditions:
#
# Permission to use, copy, modify, and distribute this software and
# its associated documentation for any purpose and without fee is
# hereby granted, provided that the above copyright notice appears in
# all copies, and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of
# Secret Labs AB or the author not be used in advertising or publicity
# pertaining to distribution of the software without specific, written
# prior permission.
#
# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD
# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT-
# ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR
# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY
# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
# OF THIS SOFTWARE.
# --------------------------------------------------------------------
##
# Implementation module for XPath support. There's usually no reason
# to import this module directly; the <b>ElementTree</b> does this for
# you, if needed.
##
from __future__ import absolute_import
import re
xpath_tokenizer_re = re.compile(
"("
"'[^']*'|\"[^\"]*\"|"
"::|"
"//?|"
r"\.\.|"
r"\(\)|"
r"[/.*:\[\]\(\)@=])|"
r"((?:\{[^}]+\})?[^/\[\]\(\)@=\s]+)|"
r"\s+"
)
def xpath_tokenizer(pattern, namespaces=None):
# ElementTree uses '', lxml used None originally.
default_namespace = (namespaces.get(None) or namespaces.get('')) if namespaces else None
parsing_attribute = False
for token in xpath_tokenizer_re.findall(pattern):
ttype, tag = token
if tag and tag[0] != "{":
if ":" in tag:
prefix, uri = tag.split(":", 1)
try:
if not namespaces:
raise KeyError
yield ttype, "{%s}%s" % (namespaces[prefix], uri)
except KeyError:
raise SyntaxError("prefix %r not found in prefix map" % prefix)
elif default_namespace and not parsing_attribute:
yield ttype, "{%s}%s" % (default_namespace, tag)
else:
yield token
parsing_attribute = False
else:
yield token
parsing_attribute = ttype == '@'
def prepare_child(next, token):
tag = token[1]
def select(result):
for elem in result:
for e in elem.iterchildren(tag):
yield e
return select
def prepare_star(next, token):
def select(result):
for elem in result:
for e in elem.iterchildren('*'):
yield e
return select
def prepare_self(next, token):
def select(result):
return result
return select
def prepare_descendant(next, token):
token = next()
if token[0] == "*":
tag = "*"
elif not token[0]:
tag = token[1]
else:
raise SyntaxError("invalid descendant")
def select(result):
for elem in result:
for e in elem.iterdescendants(tag):
yield e
return select
def prepare_parent(next, token):
def select(result):
for elem in result:
parent = elem.getparent()
if parent is not None:
yield parent
return select
def prepare_predicate(next, token):
# FIXME: replace with real parser!!! refs:
# http://effbot.org/zone/simple-iterator-parser.htm
# http://javascript.crockford.com/tdop/tdop.html
signature = ''
predicate = []
while 1:
token = next()
if token[0] == "]":
break
if token == ('', ''):
# ignore whitespace
continue
if token[0] and token[0][:1] in "'\"":
token = "'", token[0][1:-1]
signature += token[0] or "-"<|fim▁hole|> # use signature to determine predicate type
if signature == "@-":
# [@attribute] predicate
key = predicate[1]
def select(result):
for elem in result:
if elem.get(key) is not None:
yield elem
return select
if signature == "@-='":
# [@attribute='value']
key = predicate[1]
value = predicate[-1]
def select(result):
for elem in result:
if elem.get(key) == value:
yield elem
return select
if signature == "-" and not re.match(r"-?\d+$", predicate[0]):
# [tag]
tag = predicate[0]
def select(result):
for elem in result:
for _ in elem.iterchildren(tag):
yield elem
break
return select
if signature == ".='" or (signature == "-='" and not re.match(r"-?\d+$", predicate[0])):
# [.='value'] or [tag='value']
tag = predicate[0]
value = predicate[-1]
if tag:
def select(result):
for elem in result:
for e in elem.iterchildren(tag):
if "".join(e.itertext()) == value:
yield elem
break
else:
def select(result):
for elem in result:
if "".join(elem.itertext()) == value:
yield elem
return select
if signature == "-" or signature == "-()" or signature == "-()-":
# [index] or [last()] or [last()-index]
if signature == "-":
# [index]
index = int(predicate[0]) - 1
if index < 0:
if index == -1:
raise SyntaxError(
"indices in path predicates are 1-based, not 0-based")
else:
raise SyntaxError("path index >= 1 expected")
else:
if predicate[0] != "last":
raise SyntaxError("unsupported function")
if signature == "-()-":
try:
index = int(predicate[2]) - 1
except ValueError:
raise SyntaxError("unsupported expression")
else:
index = -1
def select(result):
for elem in result:
parent = elem.getparent()
if parent is None:
continue
try:
# FIXME: what if the selector is "*" ?
elems = list(parent.iterchildren(elem.tag))
if elems[index] is elem:
yield elem
except IndexError:
pass
return select
raise SyntaxError("invalid predicate")
ops = {
"": prepare_child,
"*": prepare_star,
".": prepare_self,
"..": prepare_parent,
"//": prepare_descendant,
"[": prepare_predicate,
}
# --------------------------------------------------------------------
_cache = {}
def _build_path_iterator(path, namespaces):
"""compile selector pattern"""
if path[-1:] == "/":
path += "*" # implicit all (FIXME: keep this?)
cache_key = (path,)
if namespaces:
# lxml originally used None for the default namespace but ElementTree uses the
# more convenient (all-strings-dict) empty string, so we support both here,
# preferring the more convenient '', as long as they aren't ambiguous.
if None in namespaces:
if '' in namespaces and namespaces[None] != namespaces['']:
raise ValueError("Ambiguous default namespace provided: %r versus %r" % (
namespaces[None], namespaces['']))
cache_key += (namespaces[None],) + tuple(sorted(
item for item in namespaces.items() if item[0] is not None))
else:
cache_key += tuple(sorted(namespaces.items()))
try:
return _cache[cache_key]
except KeyError:
pass
if len(_cache) > 100:
_cache.clear()
if path[:1] == "/":
raise SyntaxError("cannot use absolute path on element")
stream = iter(xpath_tokenizer(path, namespaces))
try:
_next = stream.next
except AttributeError:
# Python 3
_next = stream.__next__
try:
token = _next()
except StopIteration:
raise SyntaxError("empty path expression")
selector = []
while 1:
try:
selector.append(ops[token[0]](_next, token))
except StopIteration:
raise SyntaxError("invalid path")
try:
token = _next()
if token[0] == "/":
token = _next()
except StopIteration:
break
_cache[cache_key] = selector
return selector
##
# Iterate over the matching nodes
def iterfind(elem, path, namespaces=None):
selector = _build_path_iterator(path, namespaces)
result = iter((elem,))
for select in selector:
result = select(result)
return result
##
# Find first matching object.
def find(elem, path, namespaces=None):
it = iterfind(elem, path, namespaces)
try:
return next(it)
except StopIteration:
return None
##
# Find all matching objects.
def findall(elem, path, namespaces=None):
return list(iterfind(elem, path, namespaces))
##
# Find text for first matching object.
def findtext(elem, path, default=None, namespaces=None):
el = find(elem, path, namespaces)
if el is None:
return default
else:
return el.text or ''<|fim▁end|> | predicate.append(token[1])
|
<|file_name|>config.go<|end_file_name|><|fim▁begin|>package config
import (
"bufio"
"io"
"io/ioutil"
"os"
"strings"
"github.com/craigmonson/colonize/util"
"gopkg.in/yaml.v2"
)
type ConfigFile struct {
Environments_Dir string "environments_dir"
Base_Environment_Ext string "base_environment_ext"
Autogenerate_Comment string "autogenerate_comment"
Combined_Vals_File string "combined_vals_file"
Combined_Vars_File string "combined_vars_file"
Combined_Derived_Vals_File string "combined_derived_vals_file"
Combined_Derived_Vars_File string "combined_derived_vars_file"
Combined_Tf_File string "combined_tf_file"
Combined_Remote_Config_File string "combined_remote_config_file"
Remote_Config_File string "remote_config_file"
Derived_File string "derived_file"
Vals_File_Env_Post_String string "vals_file_env_post_string"
Branch_Order_File string "branch_order_file"
}
var ConfigFileDefaults = ConfigFile{
"env",
"default",
"This file generated by colonize",
"_combined.tfvars",
"_combined_variables.tf",
"_combined_derived.tfvars",
"_combined_derived.tf",
"_combined.tf",
"_remote_setup.sh",
"remote_setup.sh",
"derived.tfvars",
".tfvars",
"build_order.txt",
}
type Config struct {
// Inputs
Environment string
OriginPath string
TmplName string
TmplPath string
CfgPath string
RootPath string
// Generated
TmplRelPaths []string
WalkablePaths []string
WalkableValPaths []string
CombinedValsFilePath string
CombinedVarsFilePath string
WalkableTfPaths []string
CombinedTfFilePath string
WalkableDerivedPaths []string
CombinedDerivedValsFilePath string
CombinedDerivedVarsFilePath string
CombinedRemoteFilePath string
RemoteFilePath string
// Read in from config
ConfigFile ConfigFile
}
type LoadConfigInput struct {
// The environment
Environment string
// origin path where the command is run (typically cwd)
OriginPath string
// name for this template ie: vpc
TmplName string
// the difference between the cfg path and the root path.
TmplPath string
// path to config file
CfgPath string
// the root of the project (dir where config.yaml is)
RootPath string
}
const ConfigFileComment = "## Generated by Colonize init\n---\n"
func LoadConfig(input *LoadConfigInput) (*Config, error) {
conf := Config{
Environment: input.Environment,
OriginPath: input.OriginPath,
TmplName: input.TmplName,
TmplPath: input.TmplPath,
CfgPath: input.CfgPath,
RootPath: input.RootPath,
}
contents, err := ioutil.ReadFile(input.CfgPath)
if err != nil {
return &conf, err
}
err = yaml.Unmarshal(contents, &conf.ConfigFile)
conf.initialize()
return &conf, err
}
func LoadConfigInTree(path string, env string) (*Config, error) {
cfgPath, err := util.FindCfgPath(path)
if err != nil {
return &Config{}, err
}
tmplName := util.GetBasename(path)
rootPath := util.GetDir(cfgPath)
tmplPath := util.GetTmplRelPath(path, rootPath)
return LoadConfig(&LoadConfigInput{
Environment: env,
OriginPath: path,
TmplName: tmplName,
TmplPath: tmplPath,
CfgPath: cfgPath,
RootPath: rootPath,
})
}
func (c *Config) GetEnvValPath() string {
return util.PathJoin(
c.ConfigFile.Environments_Dir,
c.Environment+c.ConfigFile.Vals_File_Env_Post_String,
)
}
func (c *Config) GetEnvTfPath() string {
return c.ConfigFile.Environments_Dir
}
func (c *Config) GetEnvDerivedPath() string {
return util.PathJoin(c.ConfigFile.Environments_Dir, c.ConfigFile.Derived_File)
}
func (c *Config) GetBuildOrderPaths() ([]string, error) {
orderPath := util.PathJoin(c.OriginPath, c.ConfigFile.Branch_Order_File)
content, err := ioutil.ReadFile(orderPath)
if err != nil {
return nil, err
}
orders := cleanEmpties(strings.Split(string(content), "\n"))
paths := util.PrependPathToPaths(orders, c.OriginPath)
return paths, nil
}
func cleanEmpties(list []string) (newList []string) {
for i, v := range list {
if v != "" {
newList = append(newList, list[i])
}
}
return newList
}
func (c *Config) IsBranch() bool {
// if build_order.txt exists, then it's a branch, if not, we expect it to be
// a leaf.
orderPath := util.PathJoin(c.OriginPath, c.ConfigFile.Branch_Order_File)
if _, err := os.Stat(orderPath); os.IsNotExist(err) {
return false
}
return true
}
func (c *Config) IsNotBranch() bool {
return !c.IsBranch()
}
func (c *Config) IsLeaf() bool {
return !c.IsBranch()
}
func (c *Config) IsNotLeaf() bool {
return !c.IsLeaf()
}
func (c *Config) initialize() {
c.TmplRelPaths = util.GetTreePaths(c.TmplPath)
// this will represent the root path in our relative paths.
c.WalkablePaths = util.PrependPathToPaths(
append([]string{""}, c.TmplRelPaths...),
c.RootPath,
)
c.WalkableValPaths = util.AppendPathToPaths(
c.WalkablePaths,
c.GetEnvValPath(),
)
c.CombinedValsFilePath = util.PathJoin(c.OriginPath, c.ConfigFile.Combined_Vals_File)
c.CombinedVarsFilePath = util.PathJoin(c.OriginPath, c.ConfigFile.Combined_Vars_File)
c.WalkableTfPaths = util.AppendPathToPaths(
c.WalkablePaths,
c.GetEnvTfPath(),
)
c.CombinedTfFilePath = util.PathJoin(c.OriginPath, c.ConfigFile.Combined_Tf_File)
c.WalkableDerivedPaths = util.AppendPathToPaths(
c.WalkablePaths,
c.GetEnvDerivedPath(),
)
c.CombinedDerivedValsFilePath = util.PathJoin(c.OriginPath, c.ConfigFile.Combined_Derived_Vals_File)
c.CombinedDerivedVarsFilePath = util.PathJoin(c.OriginPath, c.ConfigFile.Combined_Derived_Vars_File)
c.CombinedRemoteFilePath = util.PathJoin(c.OriginPath, c.ConfigFile.Combined_Remote_Config_File)
c.RemoteFilePath = util.PathJoin(
util.PathJoin(c.RootPath, c.ConfigFile.Environments_Dir),
c.ConfigFile.Remote_Config_File,
)
}
func (c *ConfigFile) ToYaml(buf io.Writer) error {
output, err := yaml.Marshal(c)
w := bufio.NewWriter(buf)
_, err = w.WriteString(ConfigFileComment)
if err != nil {
return err
}
_, err = w.Write(output)
if err != nil {
return err
}
w.Flush()
return nil
}<|fim▁hole|>func (c *ConfigFile) WriteToFile(filename string) error {
// create/open file
f, err := os.Create(filename)
if err != nil {
return err
}
defer f.Close()
return c.ToYaml(f)
}<|fim▁end|> | |
<|file_name|>setup_payload.py<|end_file_name|><|fim▁begin|>#
# Copyright (c) 2021 Project CHIP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from chip.native import GetLibraryHandle, NativeLibraryHandleMethodArguments
from chip.exceptions import ChipStackError
from ctypes import CFUNCTYPE, c_char_p, c_int32, c_uint8
class SetupPayload:
# AttributeVisitor: void(const char* name, const char* value)
AttributeVisitor = CFUNCTYPE(None, c_char_p, c_char_p)
# VendorAttributeVisitor: void(uint8_t tag, const char* value)
VendorAttributeVisitor = CFUNCTYPE(None, c_uint8, c_char_p)
def __init__(self):
self.chipLib = GetLibraryHandle()
self.__InitNativeFunctions(self.chipLib)
self.attributes = {}
self.vendor_attributes = {}
def AddAttribute(name, value):
self.attributes[name.decode()] = value.decode()
def AddVendorAttribute(tag, value):
self.vendor_attributes[tag] = value.decode()
self.attribute_visitor = SetupPayload.AttributeVisitor(AddAttribute)
self.vendor_attribute_visitor = SetupPayload.VendorAttributeVisitor(
AddVendorAttribute)
def ParseQrCode(self, qrCode: str):
self.Clear()
err = self.chipLib.pychip_SetupPayload_ParseQrCode(qrCode.upper().encode(),
self.attribute_visitor,
self.vendor_attribute_visitor)
if err != 0:
raise ChipStackError(err)
return self
def ParseManualPairingCode(self, manualPairingCode: str):
self.Clear()
err = self.chipLib.pychip_SetupPayload_ParseManualPairingCode(manualPairingCode.encode(),
self.attribute_visitor,
self.vendor_attribute_visitor)
if err != 0:
raise ChipStackError(err)
return self
def PrintOnboardingCodes(self, passcode, vendorId, productId, discriminator, customFlow, capabilities, version):
self.Clear()
err = self.chipLib.pychip_SetupPayload_PrintOnboardingCodes(
passcode, vendorId, productId, discriminator, customFlow, capabilities, version)
if err != 0:
raise ChipStackError(err)
def Print(self):
for name, value in self.attributes.items():
decorated_value = self.__DecorateValue(name, value)
decorated_value = f" [{decorated_value}]" if decorated_value else ""
print(f"{name}: {value}{decorated_value}")
for tag in self.vendor_attributes:
print(
f"Vendor attribute '{tag:>3}': {self.vendor_attributes[tag]}")
def Clear(self):
self.attributes.clear()
self.vendor_attributes.clear()
def __DecorateValue(self, name, value):
if name == "RendezvousInformation":
rendezvous_methods = []
if int(value) & 0b001:
rendezvous_methods += ["SoftAP"]<|fim▁hole|> return ', '.join(rendezvous_methods)
return None
def __InitNativeFunctions(self, chipLib):
if chipLib.pychip_SetupPayload_ParseQrCode is not None:
return
setter = NativeLibraryHandleMethodArguments(chipLib)
setter.Set("pychip_SetupPayload_ParseQrCode",
c_int32,
[c_char_p, SetupPayload.AttributeVisitor, SetupPayload.VendorAttributeVisitor])
setter.Set("pychip_SetupPayload_ParseManualPairingCode",
c_int32,
[c_char_p, SetupPayload.AttributeVisitor, SetupPayload.VendorAttributeVisitor])
setter.Set("pychip_SetupPayload_PrintOnboardingCodes",
c_int32,
[c_uint32, c_uint16, c_uint16, c_uint16, uint8_t, uint8_t, uint8_t])<|fim▁end|> | if int(value) & 0b010:
rendezvous_methods += ["BLE"]
if int(value) & 0b100:
rendezvous_methods += ["OnNetwork"] |
<|file_name|>language.py<|end_file_name|><|fim▁begin|># Copyright 2016-2021 Peppy Player [email protected]
#
# This file is part of Peppy Player.
#
# Peppy Player is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Peppy Player is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Peppy Player. If not, see <http://www.gnu.org/licenses/>.
from ui.navigator.language import LanguageNavigator
from ui.screen.screen import Screen, PERCENT_TOP_HEIGHT
from ui.menu.languagemenu import LanguageMenu
from util.config import LABELS, LANGUAGE
class LanguageScreen(Screen):
""" Genre Screen. Extends base Screen class """
def __init__(self, util, change_language, listeners, voice_assistant):
""" Initializer
:param util: utility object<|fim▁hole|> Screen.__init__(self, util, "", PERCENT_TOP_HEIGHT, voice_assistant)
self.language_menu = LanguageMenu(util, None, self.layout.CENTER)
self.language_menu.add_listener(change_language)
self.add_menu(self.language_menu)
self.label = util.config[LABELS][LANGUAGE]
l_name = util.get_current_language_translation()
txt = self.label + ": " + l_name
self.screen_title.set_text(txt)
self.navigator = LanguageNavigator(util, self.layout.BOTTOM, listeners)
self.add_navigator(self.navigator)
self.link_borders()
def add_screen_observers(self, update_observer, redraw_observer):
""" Add screen observers
:param update_observer: observer for updating the screen
:param redraw_observer: observer to redraw the whole screen
"""
Screen.add_screen_observers(self, update_observer, redraw_observer)
self.language_menu.add_menu_observers(update_observer, redraw_observer, release=False)
self.navigator.add_observers(update_observer, redraw_observer)<|fim▁end|> | :param listener: screen menu event listener
""" |
<|file_name|>urls.py<|end_file_name|><|fim▁begin|>from django.conf.urls import url<|fim▁hole|>
local_urlpatterns = [
url(r'^$', views.index, name='index'),
url(r'^filter/$',views.filter,name='filter'),
url(r'^job_display/(?P<job_num>\w+)/$',views.job_display,name='job_display'),
]
urlpatterns = local_urlpatterns<|fim▁end|> | import logging
logger = logging.getLogger(__name__)
from argo import views |
<|file_name|>__main__.py<|end_file_name|><|fim▁begin|># This file is part of pyplink.
#
# The MIT License (MIT)
#
# Copyright (c) 2014 Louis-Philippe Lemieux Perreault
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import unittest
from . import test_suite
<|fim▁hole|>
__author__ = "Louis-Philippe Lemieux Perreault"
__copyright__ = "Copyright 2014 Louis-Philippe Lemieux Perreault"
__license__ = "MIT"
unittest.TextTestRunner(verbosity=1).run(test_suite)<|fim▁end|> | |
<|file_name|>register.go<|end_file_name|><|fim▁begin|>/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.<|fim▁hole|>
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package api
import (
"github.com/FlorianOtel/client-go/pkg/runtime"
"github.com/FlorianOtel/client-go/pkg/runtime/schema"
)
// SchemeGroupVersion is group version used to register these objects
// TODO this should be in the "kubeconfig" group
var SchemeGroupVersion = schema.GroupVersion{Group: "", Version: runtime.APIVersionInternal}
var (
SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
AddToScheme = SchemeBuilder.AddToScheme
)
func addKnownTypes(scheme *runtime.Scheme) error {
scheme.AddKnownTypes(SchemeGroupVersion,
&Config{},
)
return nil
}
func (obj *Config) GetObjectKind() schema.ObjectKind { return obj }
func (obj *Config) SetGroupVersionKind(gvk schema.GroupVersionKind) {
obj.APIVersion, obj.Kind = gvk.ToAPIVersionAndKind()
}
func (obj *Config) GroupVersionKind() schema.GroupVersionKind {
return schema.FromAPIVersionAndKind(obj.APIVersion, obj.Kind)
}<|fim▁end|> | You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0 |
<|file_name|>GetAddRequestProcessor.java<|end_file_name|><|fim▁begin|>package com.snail.webgame.game.protocal.relation.getRequest;
import org.epilot.ccf.config.Resource;
import org.epilot.ccf.core.processor.ProtocolProcessor;
import org.epilot.ccf.core.processor.Request;
import org.epilot.ccf.core.processor.Response;
import org.epilot.ccf.core.protocol.Message;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.snail.webgame.game.common.GameMessageHead;
import com.snail.webgame.game.common.util.Command;
import com.snail.webgame.game.protocal.relation.service.RoleRelationMgtService;
public class GetAddRequestProcessor extends ProtocolProcessor{
private static Logger logger = LoggerFactory.getLogger("logs");
private RoleRelationMgtService roleRelationMgtService;
public void setRoleRelationMgtService(RoleRelationMgtService roleRelationMgtService) {
this.roleRelationMgtService = roleRelationMgtService;
}
@Override
public void execute(Request request, Response response) {
Message msg = request.getMessage();
GameMessageHead header = (GameMessageHead) msg.getHeader();
header.setMsgType(Command.GET_ADD_REQUEST_RESP);
int roleId = header.getUserID0();
GetAddRequestReq req = (GetAddRequestReq) msg.getBody();<|fim▁hole|> msg.setHeader(header);
msg.setBody(resp);
response.write(msg);
if(logger.isInfoEnabled()){
logger.info(Resource.getMessage("game", "GAME_ROLE_RELATION_INFO_4") + ": result=" + resp.getResult() + ",roleId="
+ roleId);
}
}
}<|fim▁end|> |
GetAddRequestResp resp = roleRelationMgtService.getAddFriendRequestList(roleId, req);
|
<|file_name|>tree.rs<|end_file_name|><|fim▁begin|>/*!
rctree is a "DOM-like" tree implemented using reference counting.
*/
// This is a copy of the https://github.com/RazrFalcon/rctree
//
// Changes:
// - Node::new marked as private
// - Node::borrow marked as private
// - Node::borrow_mut marked as private
// - Node::make_copy removed
// - Node::make_deep_copy removed
use std::fmt;
use std::cell::{RefCell, Ref, RefMut};
use std::rc::{Rc, Weak};
type Link<T> = Rc<RefCell<NodeData<T>>>;
type WeakLink<T> = Weak<RefCell<NodeData<T>>>;
/// A reference to a node holding a value of type `T`. Nodes form a tree.
///
/// Internally, this uses reference counting for lifetime tracking
/// and `std::cell::RefCell` for interior mutability.
///
/// **Note:** Cloning a `Node` only increments a reference count. It does not copy the data.
pub struct Node<T>(Link<T>);
struct NodeData<T> {
root: Option<WeakLink<T>>,
parent: Option<WeakLink<T>>,
first_child: Option<Link<T>>,
last_child: Option<WeakLink<T>>,
previous_sibling: Option<WeakLink<T>>,
next_sibling: Option<Link<T>>,
data: T,
}
/// Cloning a `Node` only increments a reference count. It does not copy the data.
impl<T> Clone for Node<T> {
fn clone(&self) -> Self {
Node(Rc::clone(&self.0))
}
}
impl<T> PartialEq for Node<T> {
fn eq(&self, other: &Node<T>) -> bool {
Rc::ptr_eq(&self.0, &other.0)
}
}
impl<T: fmt::Debug> fmt::Debug for Node<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::Debug::fmt(&*self.borrow(), f)
}
}
impl<T: fmt::Display> fmt::Display for Node<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::Display::fmt(&*self.borrow(), f)
}
}
macro_rules! try_opt {
($expr: expr) => {
match $expr {
Some(value) => value,
None => return None
}
}
}
impl<T> Node<T> {
/// Creates a new node from its associated data.
pub(crate) fn new(data: T) -> Node<T> {
Node(Rc::new(RefCell::new(NodeData {
root: None,
parent: None,
first_child: None,
last_child: None,
previous_sibling: None,
next_sibling: None,
data,
})))
}
/// Returns a root node.
///
/// If the current node is the root node - will return itself.
///
/// # Panics
///
/// Panics if the node is currently mutably borrowed.
pub fn root(&self) -> Node<T> {
match self.0.borrow().root.as_ref() {
Some(v) => Node(v.upgrade().unwrap()),
None => self.clone(),
}
}
/// Returns a parent node, unless this node is the root of the tree.
///
/// # Panics
///
/// Panics if the node is currently mutably borrowed.
pub fn parent(&self) -> Option<Node<T>> {
Some(Node(try_opt!(try_opt!(self.0.borrow().parent.as_ref()).upgrade())))
}
/// Returns a first child of this node, unless it has no child.
///
/// # Panics
///
/// Panics if the node is currently mutably borrowed.
pub fn first_child(&self) -> Option<Node<T>> {
Some(Node(try_opt!(self.0.borrow().first_child.as_ref()).clone()))
}
/// Returns a last child of this node, unless it has no child.
///
/// # Panics
///
/// Panics if the node is currently mutably borrowed.
pub fn last_child(&self) -> Option<Node<T>> {
Some(Node(try_opt!(try_opt!(self.0.borrow().last_child.as_ref()).upgrade())))
}
/// Returns the previous sibling of this node, unless it is a first child.
///
/// # Panics
///
/// Panics if the node is currently mutably borrowed.
pub fn previous_sibling(&self) -> Option<Node<T>> {
Some(Node(try_opt!(try_opt!(self.0.borrow().previous_sibling.as_ref()).upgrade())))
}
/// Returns the next sibling of this node, unless it is a last child.
///
/// # Panics
///
/// Panics if the node is currently mutably borrowed.
pub fn next_sibling(&self) -> Option<Node<T>> {
Some(Node(try_opt!(self.0.borrow().next_sibling.as_ref()).clone()))
}
/// Returns a shared reference to this node's data
///
/// # Panics
///
/// Panics if the node is currently mutably borrowed.
pub(crate) fn borrow(&self) -> Ref<T> {
Ref::map(self.0.borrow(), |v| &v.data)
}
/// Returns a unique/mutable reference to this node's data
///
/// # Panics
///
/// Panics if the node is currently borrowed.
pub(crate) fn borrow_mut(&mut self) -> RefMut<T> {
RefMut::map(self.0.borrow_mut(), |v| &mut v.data)
}
/// Returns an iterator of nodes to this node and its ancestors.
///
/// Includes the current node.
pub fn ancestors(&self) -> Ancestors<T> {
Ancestors(Some(self.clone()))
}
/// Returns an iterator of nodes to this node and the siblings before it.
///
/// Includes the current node.
pub fn preceding_siblings(&self) -> PrecedingSiblings<T> {
PrecedingSiblings(Some(self.clone()))
}
/// Returns an iterator of nodes to this node and the siblings after it.
///
/// Includes the current node.
pub fn following_siblings(&self) -> FollowingSiblings<T> {
FollowingSiblings(Some(self.clone()))
}
/// Returns an iterator of nodes to this node's children.
///
/// # Panics
///
/// Panics if the node is currently mutably borrowed.
pub fn children(&self) -> Children<T> {
Children {
next: self.first_child(),
next_back: self.last_child(),
}
}
/// Returns `true` if this node has children nodes.
///
/// # Panics
///
/// Panics if the node is currently mutably borrowed.
pub fn has_children(&self) -> bool {
self.first_child().is_some()
}
/// Returns an iterator of nodes to this node and its descendants, in tree order.
///
/// Includes the current node.
pub fn descendants(&self) -> Descendants<T> {
Descendants(self.traverse())
}
/// Returns an iterator of nodes to this node and its descendants, in tree order.
pub fn traverse(&self) -> Traverse<T> {
Traverse {
root: self.clone(),
next: Some(NodeEdge::Start(self.clone())),
next_back: Some(NodeEdge::End(self.clone())),
}
}
/// Detaches a node from its parent and siblings. Children are not affected.
///
/// # Panics
///
/// Panics if the node or one of its adjoining nodes is currently borrowed.
pub fn detach(&mut self) {
self.0.borrow_mut().detach();
}
/// Appends a new child to this node, after existing children.
///
/// # Panics
///
/// Panics if the node, the new child, or one of their adjoining nodes is currently borrowed.
pub fn append(&mut self, new_child: Node<T>) {
assert!(*self != new_child, "a node cannot be appended to itself");
let mut self_borrow = self.0.borrow_mut();
let mut last_child_opt = None;
{
let mut new_child_borrow = new_child.0.borrow_mut();
new_child_borrow.detach();
new_child_borrow.root = Some(self_borrow.root.clone().unwrap_or(Rc::downgrade(&self.0)));
new_child_borrow.parent = Some(Rc::downgrade(&self.0));
if let Some(last_child_weak) = self_borrow.last_child.take() {
if let Some(last_child_strong) = last_child_weak.upgrade() {
new_child_borrow.previous_sibling = Some(last_child_weak);
last_child_opt = Some(last_child_strong);
}
}
self_borrow.last_child = Some(Rc::downgrade(&new_child.0));
}
if let Some(last_child_strong) = last_child_opt {
let mut last_child_borrow = last_child_strong.borrow_mut();
debug_assert!(last_child_borrow.next_sibling.is_none());
last_child_borrow.next_sibling = Some(new_child.0);
} else {
// No last child
debug_assert!(self_borrow.first_child.is_none());
self_borrow.first_child = Some(new_child.0);
}
}
/// Prepends a new child to this node, before existing children.
///
/// # Panics
///
/// Panics if the node, the new child, or one of their adjoining nodes is currently borrowed.
pub fn prepend(&mut self, new_child: Node<T>) {
assert!(*self != new_child, "a node cannot be prepended to itself");
let mut self_borrow = self.0.borrow_mut();
{
let mut new_child_borrow = new_child.0.borrow_mut();
new_child_borrow.detach();
new_child_borrow.root = Some(self_borrow.root.clone().unwrap_or(Rc::downgrade(&self.0)));
new_child_borrow.parent = Some(Rc::downgrade(&self.0));
match self_borrow.first_child.take() {
Some(first_child_strong) => {
{
let mut first_child_borrow = first_child_strong.borrow_mut();
debug_assert!(first_child_borrow.previous_sibling.is_none());
first_child_borrow.previous_sibling = Some(Rc::downgrade(&new_child.0));
}
new_child_borrow.next_sibling = Some(first_child_strong);
}
None => {
debug_assert!(self_borrow.first_child.is_none());
self_borrow.last_child = Some(Rc::downgrade(&new_child.0));
}
}
}
self_borrow.first_child = Some(new_child.0);
}
/// Inserts a new sibling after this node.
///
/// # Panics
///
/// Panics if the node, the new sibling, or one of their adjoining nodes is currently borrowed.
pub fn insert_after(&mut self, new_sibling: Node<T>) {
assert!(*self != new_sibling, "a node cannot be inserted after itself");
let mut self_borrow = self.0.borrow_mut();
{
let mut new_sibling_borrow = new_sibling.0.borrow_mut();
new_sibling_borrow.detach();
new_sibling_borrow.root = self_borrow.root.clone();
new_sibling_borrow.parent = self_borrow.parent.clone();
new_sibling_borrow.previous_sibling = Some(Rc::downgrade(&self.0));
match self_borrow.next_sibling.take() {
Some(next_sibling_strong) => {
{
let mut next_sibling_borrow = next_sibling_strong.borrow_mut();
debug_assert!({
let weak = next_sibling_borrow.previous_sibling.as_ref().unwrap();
Rc::ptr_eq(&weak.upgrade().unwrap(), &self.0)
});
next_sibling_borrow.previous_sibling = Some(Rc::downgrade(&new_sibling.0));
}
new_sibling_borrow.next_sibling = Some(next_sibling_strong);
}
None => {
if let Some(parent_ref) = self_borrow.parent.as_ref() {
if let Some(parent_strong) = parent_ref.upgrade() {
let mut parent_borrow = parent_strong.borrow_mut();
parent_borrow.last_child = Some(Rc::downgrade(&new_sibling.0));
}
}
}
}
}
self_borrow.next_sibling = Some(new_sibling.0);
}
/// Inserts a new sibling before this node.
///
/// # Panics
///
/// Panics if the node, the new sibling, or one of their adjoining nodes is currently borrowed.
pub fn insert_before(&mut self, new_sibling: Node<T>) {
assert!(*self != new_sibling, "a node cannot be inserted before itself");
let mut self_borrow = self.0.borrow_mut();
let mut previous_sibling_opt = None;
{
let mut new_sibling_borrow = new_sibling.0.borrow_mut();
new_sibling_borrow.detach();
new_sibling_borrow.root = self_borrow.root.clone();
new_sibling_borrow.parent = self_borrow.parent.clone();
new_sibling_borrow.next_sibling = Some(self.0.clone());
if let Some(previous_sibling_weak) = self_borrow.previous_sibling.take() {
if let Some(previous_sibling_strong) = previous_sibling_weak.upgrade() {
new_sibling_borrow.previous_sibling = Some(previous_sibling_weak);
previous_sibling_opt = Some(previous_sibling_strong);
}
}
self_borrow.previous_sibling = Some(Rc::downgrade(&new_sibling.0));
}
if let Some(previous_sibling_strong) = previous_sibling_opt {
let mut previous_sibling_borrow = previous_sibling_strong.borrow_mut();
debug_assert!({
let rc = previous_sibling_borrow.next_sibling.as_ref().unwrap();
Rc::ptr_eq(rc, &self.0)
});
previous_sibling_borrow.next_sibling = Some(new_sibling.0);
} else {
// No previous sibling.
if let Some(parent_ref) = self_borrow.parent.as_ref() {
if let Some(parent_strong) = parent_ref.upgrade() {
let mut parent_borrow = parent_strong.borrow_mut();
parent_borrow.first_child = Some(new_sibling.0);
}
}
}
}
}
impl<T> NodeData<T> {
/// Detaches a node from its parent and siblings. Children are not affected.
fn detach(&mut self) {
let parent_weak = self.parent.take();
let previous_sibling_weak = self.previous_sibling.take();
let next_sibling_strong = self.next_sibling.take();
let previous_sibling_opt = previous_sibling_weak.as_ref().and_then(|weak| weak.upgrade());
if let Some(next_sibling_ref) = next_sibling_strong.as_ref() {
let mut next_sibling_borrow = next_sibling_ref.borrow_mut();
next_sibling_borrow.previous_sibling = previous_sibling_weak;
} else if let Some(parent_ref) = parent_weak.as_ref() {
if let Some(parent_strong) = parent_ref.upgrade() {
let mut parent_borrow = parent_strong.borrow_mut();
parent_borrow.last_child = previous_sibling_weak;
}
}
if let Some(previous_sibling_strong) = previous_sibling_opt {
let mut previous_sibling_borrow = previous_sibling_strong.borrow_mut();
previous_sibling_borrow.next_sibling = next_sibling_strong;
} else if let Some(parent_ref) = parent_weak.as_ref() {
if let Some(parent_strong) = parent_ref.upgrade() {<|fim▁hole|> parent_borrow.first_child = next_sibling_strong;
}
}
}
}
/// Iterators prelude.
pub mod iterator {
pub use super::Ancestors;
pub use super::PrecedingSiblings;
pub use super::FollowingSiblings;
pub use super::Children;
pub use super::Descendants;
pub use super::Traverse;
pub use super::NodeEdge;
}
macro_rules! impl_node_iterator {
($name: ident, $next: expr) => {
impl<T> Iterator for $name<T> {
type Item = Node<T>;
/// # Panics
///
/// Panics if the node about to be yielded is currently mutably borrowed.
fn next(&mut self) -> Option<Self::Item> {
match self.0.take() {
Some(node) => {
self.0 = $next(&node);
Some(node)
}
None => None
}
}
}
}
}
/// An iterator of nodes to the ancestors a given node.
pub struct Ancestors<T>(Option<Node<T>>);
impl_node_iterator!(Ancestors, |node: &Node<T>| node.parent());
/// An iterator of nodes to the siblings before a given node.
pub struct PrecedingSiblings<T>(Option<Node<T>>);
impl_node_iterator!(PrecedingSiblings, |node: &Node<T>| node.previous_sibling());
/// An iterator of nodes to the siblings after a given node.
pub struct FollowingSiblings<T>(Option<Node<T>>);
impl_node_iterator!(FollowingSiblings, |node: &Node<T>| node.next_sibling());
/// A double ended iterator of nodes to the children of a given node.
pub struct Children<T> {
next: Option<Node<T>>,
next_back: Option<Node<T>>,
}
impl<T> Children<T> {
// true if self.next_back's next sibling is self.next
fn finished(&self) -> bool {
match self.next_back {
Some(ref next_back) => next_back.next_sibling() == self.next,
_ => true,
}
}
}
impl<T> Iterator for Children<T> {
type Item = Node<T>;
/// # Panics
///
/// Panics if the node about to be yielded is currently mutably borrowed.
fn next(&mut self) -> Option<Self::Item> {
if self.finished() {
return None;
}
match self.next.take() {
Some(node) => {
self.next = node.next_sibling();
Some(node)
}
None => None
}
}
}
impl<T> DoubleEndedIterator for Children<T> {
/// # Panics
///
/// Panics if the node about to be yielded is currently mutably borrowed.
fn next_back(&mut self) -> Option<Self::Item> {
if self.finished() {
return None;
}
match self.next_back.take() {
Some(node) => {
self.next_back = node.previous_sibling();
Some(node)
}
None => None
}
}
}
/// An iterator of nodes to a given node and its descendants, in tree order.
pub struct Descendants<T>(Traverse<T>);
impl<T> Iterator for Descendants<T> {
type Item = Node<T>;
/// # Panics
///
/// Panics if the node about to be yielded is currently mutably borrowed.
fn next(&mut self) -> Option<Self::Item> {
loop {
match self.0.next() {
Some(NodeEdge::Start(node)) => return Some(node),
Some(NodeEdge::End(_)) => {}
None => return None
}
}
}
}
/// A node type during traverse.
#[derive(Clone, Debug)]
pub enum NodeEdge<T> {
/// Indicates that start of a node that has children.
/// Yielded by `Traverse::next` before the node's descendants.
/// In HTML or XML, this corresponds to an opening tag like `<div>`
Start(Node<T>),
/// Indicates that end of a node that has children.
/// Yielded by `Traverse::next` after the node's descendants.
/// In HTML or XML, this corresponds to a closing tag like `</div>`
End(Node<T>),
}
// Implement PartialEq manually, because we do not need to require T: PartialEq
impl<T> PartialEq for NodeEdge<T> {
fn eq(&self, other: &NodeEdge<T>) -> bool {
match (&*self, &*other) {
(&NodeEdge::Start(ref n1), &NodeEdge::Start(ref n2)) => *n1 == *n2,
(&NodeEdge::End(ref n1), &NodeEdge::End(ref n2)) => *n1 == *n2,
_ => false,
}
}
}
impl<T> NodeEdge<T> {
fn next_item(&self, root: &Node<T>) -> Option<NodeEdge<T>> {
match *self {
NodeEdge::Start(ref node) => match node.first_child() {
Some(first_child) => Some(NodeEdge::Start(first_child)),
None => Some(NodeEdge::End(node.clone())),
},
NodeEdge::End(ref node) => {
if *node == *root {
None
} else {
match node.next_sibling() {
Some(next_sibling) => Some(NodeEdge::Start(next_sibling)),
None => match node.parent() {
Some(parent) => Some(NodeEdge::End(parent)),
// `node.parent()` here can only be `None`
// if the tree has been modified during iteration,
// but silently stoping iteration
// seems a more sensible behavior than panicking.
None => None,
},
}
}
}
}
}
fn previous_item(&self, root: &Node<T>) -> Option<NodeEdge<T>> {
match *self {
NodeEdge::End(ref node) => match node.last_child() {
Some(last_child) => Some(NodeEdge::End(last_child)),
None => Some(NodeEdge::Start(node.clone())),
},
NodeEdge::Start(ref node) => {
if *node == *root {
None
} else {
match node.previous_sibling() {
Some(previous_sibling) => Some(NodeEdge::End(previous_sibling)),
None => match node.parent() {
Some(parent) => Some(NodeEdge::Start(parent)),
// `node.parent()` here can only be `None`
// if the tree has been modified during iteration,
// but silently stoping iteration
// seems a more sensible behavior than panicking.
None => None
}
}
}
}
}
}
}
/// A double ended iterator of nodes to a given node and its descendants,
/// in tree order.
pub struct Traverse<T> {
root: Node<T>,
next: Option<NodeEdge<T>>,
next_back: Option<NodeEdge<T>>,
}
impl<T> Traverse<T> {
// true if self.next_back's next item is self.next
fn finished(&self) -> bool {
match self.next_back {
Some(ref next_back) => next_back.next_item(&self.root) == self.next,
_ => true,
}
}
}
impl<T> Iterator for Traverse<T> {
type Item = NodeEdge<T>;
/// # Panics
///
/// Panics if the node about to be yielded is currently mutably borrowed.
fn next(&mut self) -> Option<Self::Item> {
if self.finished() {
return None;
}
match self.next.take() {
Some(item) => {
self.next = item.next_item(&self.root);
Some(item)
}
None => None
}
}
}
impl<T> DoubleEndedIterator for Traverse<T> {
/// # Panics
///
/// Panics if the node about to be yielded is currently mutably borrowed.
fn next_back(&mut self) -> Option<Self::Item> {
if self.finished() {
return None;
}
match self.next_back.take() {
Some(item) => {
self.next_back = item.previous_item(&self.root);
Some(item)
}
None => None
}
}
}<|fim▁end|> | let mut parent_borrow = parent_strong.borrow_mut(); |
<|file_name|>q4.py<|end_file_name|><|fim▁begin|>import sys
def solve(B):
ans = [0]*B
guess = 1
for _ in range(10):
print(guess)
sys.stdout.flush()
n = int(input().strip())
ans[guess-1] = n
guess += 1
print("".join(map(str, ans)))
sys.stdout.flush()
result = input()
if result == "N":
sys.exit()<|fim▁hole|>for case in range(1, T+1):
solve(B)<|fim▁end|> | return
T, B = map(int, input().split())
|
<|file_name|>parse.rs<|end_file_name|><|fim▁begin|>use std::iter;
use lex;
#[derive(Debug)]
pub struct ParseError;
#[derive(Debug)]
pub enum Node {
Number(f64),
Operation{operator: char, children: Vec<Node>},
}
pub fn parse(input: &str) -> Result<Node, ParseError> {
let mut parser = Parser{
tokenizer: lex::Tokenizer::new(input).peekable(),
};
parser.parse_node()
}
struct Parser<'a> {
tokenizer: iter::Peekable<lex::Tokenizer<'a>>,
}
impl<'a> Parser<'a> {
fn parse_node(&mut self) -> Result<Node, ParseError> {
let token = match self.tokenizer.next() {
None => return Err(ParseError),
Some(t) => t,
};
match token {
lex::Token::Number(c) => {
match c.parse::<f64>() {
Ok(f) => Ok(Node::Number(f)),
Err(_) => Err(ParseError)
}
},
lex::Token::OpenParen => self.parse_operation(),
_ => Err(ParseError),
}
}
fn parse_operation(&mut self) -> Result<Node, ParseError> {
let op = match self.tokenizer.next() {
Some(lex::Token::Operator(t)) => t,
_ => return Err(ParseError),
};
let mut children = vec![];
<|fim▁hole|> self.tokenizer.next();
break;
},
None => return Err(ParseError),
_ => {
let child = try!(self.parse_node());
children.push(child);
},
};
}
Ok(Node::Operation{operator: op, children: children})
}
}<|fim▁end|> | loop {
match self.tokenizer.peek() {
Some(&lex::Token::CloseParen) => { |
<|file_name|>avgscore_test.go<|end_file_name|><|fim▁begin|>package main
import (
"testing"
)
func TestAvgScore(t *testing.T) {
scores := []int{30, 60, 80, 100}
exp := 67
act := avgScore(scores)<|fim▁hole|> if exp != act {
t.Error("Expected", exp, "got", act)
}
}<|fim▁end|> | |
<|file_name|>models.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import datetime
import inspect
import logging
import os
import re
import textwrap
import time
import unittest
import urllib
from tempfile import NamedTemporaryFile, mkdtemp
import pendulum
import six
from mock import ANY, Mock, mock_open, patch
from parameterized import parameterized
from airflow import AirflowException, configuration, models, settings
from airflow.exceptions import AirflowDagCycleException, AirflowSkipException
from airflow.jobs import BackfillJob
from airflow.models import Connection
from airflow.models import DAG, TaskInstance as TI
from airflow.models import DagModel, DagRun, DagStat
from airflow.models import KubeResourceVersion, KubeWorkerIdentifier
from airflow.models import SkipMixin
from airflow.models import State as ST
from airflow.models import XCom
from airflow.models import clear_task_instances
from airflow.operators.bash_operator import BashOperator
from airflow.operators.dummy_operator import DummyOperator
from airflow.operators.python_operator import PythonOperator
from airflow.operators.python_operator import ShortCircuitOperator
from airflow.operators.subdag_operator import SubDagOperator
from airflow.ti_deps.deps.trigger_rule_dep import TriggerRuleDep
from airflow.utils import timezone
from airflow.utils.dag_processing import SimpleTaskInstance
from airflow.utils.db import create_session
from airflow.utils.state import State
from airflow.utils.trigger_rule import TriggerRule
from airflow.utils.weight_rule import WeightRule
DEFAULT_DATE = timezone.datetime(2016, 1, 1)
TEST_DAGS_FOLDER = os.path.join(
os.path.dirname(os.path.realpath(__file__)), 'dags')
class DagTest(unittest.TestCase):
def test_params_not_passed_is_empty_dict(self):
"""
Test that when 'params' is _not_ passed to a new Dag, that the params
attribute is set to an empty dictionary.
"""
dag = models.DAG('test-dag')
self.assertEqual(dict, type(dag.params))
self.assertEqual(0, len(dag.params))
def test_params_passed_and_params_in_default_args_no_override(self):
"""
Test that when 'params' exists as a key passed to the default_args dict
in addition to params being passed explicitly as an argument to the
dag, that the 'params' key of the default_args dict is merged with the
dict of the params argument.
"""
params1 = {'parameter1': 1}
params2 = {'parameter2': 2}
dag = models.DAG('test-dag',
default_args={'params': params1},
params=params2)
params_combined = params1.copy()
params_combined.update(params2)
self.assertEqual(params_combined, dag.params)
def test_dag_as_context_manager(self):
"""
Test DAG as a context manager.
When used as a context manager, Operators are automatically added to
the DAG (unless they specify a different DAG)
"""
dag = DAG(
'dag',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
dag2 = DAG(
'dag2',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner2'})
with dag:
op1 = DummyOperator(task_id='op1')
op2 = DummyOperator(task_id='op2', dag=dag2)
self.assertIs(op1.dag, dag)
self.assertEqual(op1.owner, 'owner1')
self.assertIs(op2.dag, dag2)
self.assertEqual(op2.owner, 'owner2')
with dag2:
op3 = DummyOperator(task_id='op3')
self.assertIs(op3.dag, dag2)
self.assertEqual(op3.owner, 'owner2')
with dag:
with dag2:
op4 = DummyOperator(task_id='op4')
op5 = DummyOperator(task_id='op5')
self.assertIs(op4.dag, dag2)
self.assertIs(op5.dag, dag)
self.assertEqual(op4.owner, 'owner2')
self.assertEqual(op5.owner, 'owner1')
with DAG('creating_dag_in_cm', start_date=DEFAULT_DATE) as dag:
DummyOperator(task_id='op6')
self.assertEqual(dag.dag_id, 'creating_dag_in_cm')
self.assertEqual(dag.tasks[0].task_id, 'op6')
with dag:
with dag:
op7 = DummyOperator(task_id='op7')
op8 = DummyOperator(task_id='op8')
op9 = DummyOperator(task_id='op8')
op9.dag = dag2
self.assertEqual(op7.dag, dag)
self.assertEqual(op8.dag, dag)
self.assertEqual(op9.dag, dag2)
def test_dag_topological_sort(self):
dag = DAG(
'dag',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
# A -> B
# A -> C -> D
# ordered: B, D, C, A or D, B, C, A or D, C, B, A
with dag:
op1 = DummyOperator(task_id='A')
op2 = DummyOperator(task_id='B')
op3 = DummyOperator(task_id='C')
op4 = DummyOperator(task_id='D')
op1.set_upstream([op2, op3])
op3.set_upstream(op4)
topological_list = dag.topological_sort()
logging.info(topological_list)
tasks = [op2, op3, op4]
self.assertTrue(topological_list[0] in tasks)
tasks.remove(topological_list[0])
self.assertTrue(topological_list[1] in tasks)
tasks.remove(topological_list[1])
self.assertTrue(topological_list[2] in tasks)
tasks.remove(topological_list[2])
self.assertTrue(topological_list[3] == op1)
dag = DAG(
'dag',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
# C -> (A u B) -> D
# C -> E
# ordered: E | D, A | B, C
with dag:
op1 = DummyOperator(task_id='A')
op2 = DummyOperator(task_id='B')
op3 = DummyOperator(task_id='C')
op4 = DummyOperator(task_id='D')
op5 = DummyOperator(task_id='E')
op1.set_downstream(op3)
op2.set_downstream(op3)
op1.set_upstream(op4)
op2.set_upstream(op4)
op5.set_downstream(op3)
topological_list = dag.topological_sort()
logging.info(topological_list)
set1 = [op4, op5]
self.assertTrue(topological_list[0] in set1)
set1.remove(topological_list[0])
set2 = [op1, op2]
set2.extend(set1)
self.assertTrue(topological_list[1] in set2)
set2.remove(topological_list[1])
self.assertTrue(topological_list[2] in set2)
set2.remove(topological_list[2])
self.assertTrue(topological_list[3] in set2)
self.assertTrue(topological_list[4] == op3)
dag = DAG(
'dag',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
self.assertEquals(tuple(), dag.topological_sort())
def test_dag_naive_default_args_start_date(self):
dag = DAG('DAG', default_args={'start_date': datetime.datetime(2018, 1, 1)})
self.assertEqual(dag.timezone, settings.TIMEZONE)
dag = DAG('DAG', start_date=datetime.datetime(2018, 1, 1))
self.assertEqual(dag.timezone, settings.TIMEZONE)
def test_dag_none_default_args_start_date(self):
"""
Tests if a start_date of None in default_args
works.
"""
dag = DAG('DAG', default_args={'start_date': None})
self.assertEqual(dag.timezone, settings.TIMEZONE)
def test_dag_task_priority_weight_total(self):
width = 5
depth = 5
weight = 5
pattern = re.compile('stage(\\d*).(\\d*)')
# Fully connected parallel tasks. i.e. every task at each parallel
# stage is dependent on every task in the previous stage.
# Default weight should be calculated using downstream descendants
with DAG('dag', start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'}) as dag:
pipeline = [
[DummyOperator(
task_id='stage{}.{}'.format(i, j), priority_weight=weight)
for j in range(0, width)] for i in range(0, depth)
]
for d, stage in enumerate(pipeline):
if d == 0:
continue
for current_task in stage:
for prev_task in pipeline[d - 1]:
current_task.set_upstream(prev_task)
for task in six.itervalues(dag.task_dict):
match = pattern.match(task.task_id)
task_depth = int(match.group(1))
# the sum of each stages after this task + itself
correct_weight = ((depth - (task_depth + 1)) * width + 1) * weight
calculated_weight = task.priority_weight_total
self.assertEquals(calculated_weight, correct_weight)
# Same test as above except use 'upstream' for weight calculation
weight = 3
with DAG('dag', start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'}) as dag:
pipeline = [
[DummyOperator(
task_id='stage{}.{}'.format(i, j), priority_weight=weight,
weight_rule=WeightRule.UPSTREAM)
for j in range(0, width)] for i in range(0, depth)
]
for d, stage in enumerate(pipeline):
if d == 0:
continue
for current_task in stage:
for prev_task in pipeline[d - 1]:
current_task.set_upstream(prev_task)
for task in six.itervalues(dag.task_dict):
match = pattern.match(task.task_id)
task_depth = int(match.group(1))
# the sum of each stages after this task + itself
correct_weight = (task_depth * width + 1) * weight
calculated_weight = task.priority_weight_total
self.assertEquals(calculated_weight, correct_weight)
# Same test as above except use 'absolute' for weight calculation
weight = 10
with DAG('dag', start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'}) as dag:
pipeline = [
[DummyOperator(
task_id='stage{}.{}'.format(i, j), priority_weight=weight,
weight_rule=WeightRule.ABSOLUTE)
for j in range(0, width)] for i in range(0, depth)
]
for d, stage in enumerate(pipeline):
if d == 0:
continue
for current_task in stage:
for prev_task in pipeline[d - 1]:
current_task.set_upstream(prev_task)
for task in six.itervalues(dag.task_dict):
match = pattern.match(task.task_id)
task_depth = int(match.group(1))
# the sum of each stages after this task + itself
correct_weight = weight
calculated_weight = task.priority_weight_total
self.assertEquals(calculated_weight, correct_weight)
# Test if we enter an invalid weight rule
with DAG('dag', start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'}) as dag:
with self.assertRaises(AirflowException):
DummyOperator(task_id='should_fail', weight_rule='no rule')
def test_get_num_task_instances(self):
test_dag_id = 'test_get_num_task_instances_dag'
test_task_id = 'task_1'
test_dag = DAG(dag_id=test_dag_id, start_date=DEFAULT_DATE)
test_task = DummyOperator(task_id=test_task_id, dag=test_dag)
ti1 = TI(task=test_task, execution_date=DEFAULT_DATE)
ti1.state = None
ti2 = TI(task=test_task, execution_date=DEFAULT_DATE + datetime.timedelta(days=1))
ti2.state = State.RUNNING
ti3 = TI(task=test_task, execution_date=DEFAULT_DATE + datetime.timedelta(days=2))
ti3.state = State.QUEUED
ti4 = TI(task=test_task, execution_date=DEFAULT_DATE + datetime.timedelta(days=3))
ti4.state = State.RUNNING
session = settings.Session()
session.merge(ti1)
session.merge(ti2)
session.merge(ti3)
session.merge(ti4)
session.commit()
self.assertEqual(
0,
DAG.get_num_task_instances(test_dag_id, ['fakename'], session=session)
)
self.assertEqual(
4,
DAG.get_num_task_instances(test_dag_id, [test_task_id], session=session)
)
self.assertEqual(
4,
DAG.get_num_task_instances(
test_dag_id, ['fakename', test_task_id], session=session)
)
self.assertEqual(
1,
DAG.get_num_task_instances(
test_dag_id, [test_task_id], states=[None], session=session)
)
self.assertEqual(
2,
DAG.get_num_task_instances(
test_dag_id, [test_task_id], states=[State.RUNNING], session=session)
)
self.assertEqual(
3,
DAG.get_num_task_instances(
test_dag_id, [test_task_id],
states=[None, State.RUNNING], session=session)
)
self.assertEqual(
4,
DAG.get_num_task_instances(
test_dag_id, [test_task_id],
states=[None, State.QUEUED, State.RUNNING], session=session)
)
session.close()
def test_render_template_field(self):
"""Tests if render_template from a field works"""
dag = DAG('test-dag',
start_date=DEFAULT_DATE)
with dag:
task = DummyOperator(task_id='op1')
result = task.render_template('', '{{ foo }}', dict(foo='bar'))
self.assertEqual(result, 'bar')
def test_render_template_field_macro(self):
""" Tests if render_template from a field works,
if a custom filter was defined"""
dag = DAG('test-dag',
start_date=DEFAULT_DATE,
user_defined_macros=dict(foo='bar'))
with dag:
task = DummyOperator(task_id='op1')
result = task.render_template('', '{{ foo }}', dict())
self.assertEqual(result, 'bar')
def test_render_template_numeric_field(self):
""" Tests if render_template from a field works,
if a custom filter was defined"""
dag = DAG('test-dag',
start_date=DEFAULT_DATE,
user_defined_macros=dict(foo='bar'))
with dag:
task = DummyOperator(task_id='op1')
result = task.render_template('', 1, dict())
self.assertEqual(result, 1)
def test_user_defined_filters(self):
def jinja_udf(name):
return 'Hello %s' % name
dag = models.DAG('test-dag',
start_date=DEFAULT_DATE,
user_defined_filters=dict(hello=jinja_udf))
jinja_env = dag.get_template_env()
self.assertIn('hello', jinja_env.filters)
self.assertEqual(jinja_env.filters['hello'], jinja_udf)
def test_render_template_field_filter(self):
""" Tests if render_template from a field works,
if a custom filter was defined"""
def jinja_udf(name):
return 'Hello %s' % name
dag = DAG('test-dag',
start_date=DEFAULT_DATE,
user_defined_filters=dict(hello=jinja_udf))
with dag:
task = DummyOperator(task_id='op1')
result = task.render_template('', "{{ 'world' | hello}}", dict())
self.assertEqual(result, 'Hello world')
def test_resolve_template_files_value(self):
with NamedTemporaryFile(suffix='.template') as f:
f.write('{{ ds }}'.encode('utf8'))
f.flush()
template_dir = os.path.dirname(f.name)
template_file = os.path.basename(f.name)
dag = DAG('test-dag',
start_date=DEFAULT_DATE,
template_searchpath=template_dir)
with dag:
task = DummyOperator(task_id='op1')
task.test_field = template_file
task.template_fields = ('test_field',)
task.template_ext = ('.template',)
task.resolve_template_files()
self.assertEqual(task.test_field, '{{ ds }}')
def test_resolve_template_files_list(self):
with NamedTemporaryFile(suffix='.template') as f:
f = NamedTemporaryFile(suffix='.template')
f.write('{{ ds }}'.encode('utf8'))
f.flush()
template_dir = os.path.dirname(f.name)
template_file = os.path.basename(f.name)
dag = DAG('test-dag',
start_date=DEFAULT_DATE,
template_searchpath=template_dir)
with dag:
task = DummyOperator(task_id='op1')
task.test_field = [template_file, 'some_string']
task.template_fields = ('test_field',)
task.template_ext = ('.template',)
task.resolve_template_files()
self.assertEqual(task.test_field, ['{{ ds }}', 'some_string'])
def test_cycle(self):
# test empty
dag = DAG(
'dag',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
self.assertFalse(dag.test_cycle())
# test single task
dag = DAG(
'dag',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
with dag:
opA = DummyOperator(task_id='A')
self.assertFalse(dag.test_cycle())
# test no cycle
dag = DAG(
'dag',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
# A -> B -> C
# B -> D
# E -> F
with dag:
opA = DummyOperator(task_id='A')
opB = DummyOperator(task_id='B')
opC = DummyOperator(task_id='C')
opD = DummyOperator(task_id='D')
opE = DummyOperator(task_id='E')
opF = DummyOperator(task_id='F')
opA.set_downstream(opB)
opB.set_downstream(opC)
opB.set_downstream(opD)
opE.set_downstream(opF)
self.assertFalse(dag.test_cycle())
# test self loop
dag = DAG(
'dag',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
# A -> A
with dag:
opA = DummyOperator(task_id='A')
opA.set_downstream(opA)
with self.assertRaises(AirflowDagCycleException):
dag.test_cycle()
# test downstream self loop
dag = DAG(
'dag',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
# A -> B -> C -> D -> E -> E
with dag:
opA = DummyOperator(task_id='A')
opB = DummyOperator(task_id='B')
opC = DummyOperator(task_id='C')
opD = DummyOperator(task_id='D')
opE = DummyOperator(task_id='E')
opA.set_downstream(opB)
opB.set_downstream(opC)
opC.set_downstream(opD)
opD.set_downstream(opE)
opE.set_downstream(opE)
with self.assertRaises(AirflowDagCycleException):
dag.test_cycle()
# large loop
dag = DAG(
'dag',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
# A -> B -> C -> D -> E -> A
with dag:
opA = DummyOperator(task_id='A')
opB = DummyOperator(task_id='B')
opC = DummyOperator(task_id='C')
opD = DummyOperator(task_id='D')
opE = DummyOperator(task_id='E')
opA.set_downstream(opB)
opB.set_downstream(opC)
opC.set_downstream(opD)
opD.set_downstream(opE)
opE.set_downstream(opA)
with self.assertRaises(AirflowDagCycleException):
dag.test_cycle()
# test arbitrary loop
dag = DAG(
'dag',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
# E-> A -> B -> F -> A
# -> C -> F
with dag:
opA = DummyOperator(task_id='A')
opB = DummyOperator(task_id='B')
opC = DummyOperator(task_id='C')
opD = DummyOperator(task_id='D')
opE = DummyOperator(task_id='E')
opF = DummyOperator(task_id='F')
opA.set_downstream(opB)
opA.set_downstream(opC)
opE.set_downstream(opA)
opC.set_downstream(opF)
opB.set_downstream(opF)
opF.set_downstream(opA)
with self.assertRaises(AirflowDagCycleException):
dag.test_cycle()
def test_following_previous_schedule(self):
"""
Make sure DST transitions are properly observed
"""
local_tz = pendulum.timezone('Europe/Zurich')
start = local_tz.convert(datetime.datetime(2018, 10, 28, 2, 55),
dst_rule=pendulum.PRE_TRANSITION)
self.assertEqual(start.isoformat(), "2018-10-28T02:55:00+02:00",
"Pre-condition: start date is in DST")
utc = timezone.convert_to_utc(start)
dag = DAG('tz_dag', start_date=start, schedule_interval='*/5 * * * *')
_next = dag.following_schedule(utc)
next_local = local_tz.convert(_next)
self.assertEqual(_next.isoformat(), "2018-10-28T01:00:00+00:00")
self.assertEqual(next_local.isoformat(), "2018-10-28T02:00:00+01:00")
prev = dag.previous_schedule(utc)
prev_local = local_tz.convert(prev)
self.assertEqual(prev_local.isoformat(), "2018-10-28T02:50:00+02:00")
prev = dag.previous_schedule(_next)
prev_local = local_tz.convert(prev)
self.assertEqual(prev_local.isoformat(), "2018-10-28T02:55:00+02:00")
self.assertEqual(prev, utc)
def test_following_previous_schedule_daily_dag_CEST_to_CET(self):
"""
Make sure DST transitions are properly observed
"""
local_tz = pendulum.timezone('Europe/Zurich')
start = local_tz.convert(datetime.datetime(2018, 10, 27, 3),
dst_rule=pendulum.PRE_TRANSITION)
utc = timezone.convert_to_utc(start)
dag = DAG('tz_dag', start_date=start, schedule_interval='0 3 * * *')
prev = dag.previous_schedule(utc)
prev_local = local_tz.convert(prev)
self.assertEqual(prev_local.isoformat(), "2018-10-26T03:00:00+02:00")
self.assertEqual(prev.isoformat(), "2018-10-26T01:00:00+00:00")
_next = dag.following_schedule(utc)
next_local = local_tz.convert(_next)
self.assertEqual(next_local.isoformat(), "2018-10-28T03:00:00+01:00")
self.assertEqual(_next.isoformat(), "2018-10-28T02:00:00+00:00")
prev = dag.previous_schedule(_next)
prev_local = local_tz.convert(prev)
self.assertEqual(prev_local.isoformat(), "2018-10-27T03:00:00+02:00")
self.assertEqual(prev.isoformat(), "2018-10-27T01:00:00+00:00")
def test_following_previous_schedule_daily_dag_CET_to_CEST(self):
"""
Make sure DST transitions are properly observed
"""
local_tz = pendulum.timezone('Europe/Zurich')
start = local_tz.convert(datetime.datetime(2018, 3, 25, 2),
dst_rule=pendulum.PRE_TRANSITION)
utc = timezone.convert_to_utc(start)
dag = DAG('tz_dag', start_date=start, schedule_interval='0 3 * * *')
prev = dag.previous_schedule(utc)
prev_local = local_tz.convert(prev)
self.assertEqual(prev_local.isoformat(), "2018-03-24T03:00:00+01:00")
self.assertEqual(prev.isoformat(), "2018-03-24T02:00:00+00:00")
_next = dag.following_schedule(utc)
next_local = local_tz.convert(_next)
self.assertEqual(next_local.isoformat(), "2018-03-25T03:00:00+02:00")
self.assertEqual(_next.isoformat(), "2018-03-25T01:00:00+00:00")
prev = dag.previous_schedule(_next)
prev_local = local_tz.convert(prev)
self.assertEqual(prev_local.isoformat(), "2018-03-24T03:00:00+01:00")
self.assertEqual(prev.isoformat(), "2018-03-24T02:00:00+00:00")
@patch('airflow.models.timezone.utcnow')
def test_sync_to_db(self, mock_now):
dag = DAG(
'dag',
start_date=DEFAULT_DATE,
)
with dag:
DummyOperator(task_id='task', owner='owner1')
SubDagOperator(
task_id='subtask',
owner='owner2',
subdag=DAG(
'dag.subtask',
start_date=DEFAULT_DATE,
)
)
now = datetime.datetime.utcnow().replace(tzinfo=pendulum.timezone('UTC'))
mock_now.return_value = now
session = settings.Session()
dag.sync_to_db(session=session)
orm_dag = session.query(DagModel).filter(DagModel.dag_id == 'dag').one()
self.assertEqual(set(orm_dag.owners.split(', ')), {'owner1', 'owner2'})
self.assertEqual(orm_dag.last_scheduler_run, now)
self.assertTrue(orm_dag.is_active)
orm_subdag = session.query(DagModel).filter(
DagModel.dag_id == 'dag.subtask').one()
self.assertEqual(set(orm_subdag.owners.split(', ')), {'owner1', 'owner2'})
self.assertEqual(orm_subdag.last_scheduler_run, now)
self.assertTrue(orm_subdag.is_active)
class DagStatTest(unittest.TestCase):
def test_dagstats_crud(self):
DagStat.create(dag_id='test_dagstats_crud')
session = settings.Session()
qry = session.query(DagStat).filter(DagStat.dag_id == 'test_dagstats_crud')
self.assertEqual(len(qry.all()), len(State.dag_states))
DagStat.set_dirty(dag_id='test_dagstats_crud')
res = qry.all()
for stat in res:
self.assertTrue(stat.dirty)
# create missing
DagStat.set_dirty(dag_id='test_dagstats_crud_2')
qry2 = session.query(DagStat).filter(DagStat.dag_id == 'test_dagstats_crud_2')
self.assertEqual(len(qry2.all()), len(State.dag_states))
dag = DAG(
'test_dagstats_crud',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
with dag:
DummyOperator(task_id='A')
now = timezone.utcnow()
dag.create_dagrun(
run_id='manual__' + now.isoformat(),
execution_date=now,
start_date=now,
state=State.FAILED,
external_trigger=False,
)
DagStat.update(dag_ids=['test_dagstats_crud'])
res = qry.all()
for stat in res:
if stat.state == State.FAILED:
self.assertEqual(stat.count, 1)
else:
self.assertEqual(stat.count, 0)
DagStat.update()
res = qry2.all()
for stat in res:
self.assertFalse(stat.dirty)
def test_update_exception(self):
session = Mock()
(session.query.return_value
.filter.return_value
.with_for_update.return_value
.all.side_effect) = RuntimeError('it broke')
DagStat.update(session=session)
session.rollback.assert_called()
def test_set_dirty_exception(self):
session = Mock()
session.query.return_value.filter.return_value.all.return_value = []
(session.query.return_value
.filter.return_value
.with_for_update.return_value
.all.side_effect) = RuntimeError('it broke')
DagStat.set_dirty('dag', session)
session.rollback.assert_called()
class DagRunTest(unittest.TestCase):
def create_dag_run(self, dag,
state=State.RUNNING,
task_states=None,
execution_date=None,
is_backfill=False,
):
now = timezone.utcnow()
if execution_date is None:
execution_date = now
if is_backfill:
run_id = BackfillJob.ID_PREFIX + now.isoformat()
else:
run_id = 'manual__' + now.isoformat()
dag_run = dag.create_dagrun(
run_id=run_id,
execution_date=execution_date,
start_date=now,
state=state,
external_trigger=False,
)
if task_states is not None:
session = settings.Session()
for task_id, state in task_states.items():
ti = dag_run.get_task_instance(task_id)
ti.set_state(state, session)
session.close()
return dag_run
def test_clear_task_instances_for_backfill_dagrun(self):
now = timezone.utcnow()
session = settings.Session()
dag_id = 'test_clear_task_instances_for_backfill_dagrun'
dag = DAG(dag_id=dag_id, start_date=now)
self.create_dag_run(dag, execution_date=now, is_backfill=True)
task0 = DummyOperator(task_id='backfill_task_0', owner='test', dag=dag)
ti0 = TI(task=task0, execution_date=now)
ti0.run()
qry = session.query(TI).filter(
TI.dag_id == dag.dag_id).all()
clear_task_instances(qry, session)
session.commit()
ti0.refresh_from_db()
dr0 = session.query(DagRun).filter(
DagRun.dag_id == dag_id,
DagRun.execution_date == now
).first()
self.assertEquals(dr0.state, State.RUNNING)
def test_id_for_date(self):
run_id = models.DagRun.id_for_date(
timezone.datetime(2015, 1, 2, 3, 4, 5, 6))
self.assertEqual(
'scheduled__2015-01-02T03:04:05', run_id,
'Generated run_id did not match expectations: {0}'.format(run_id))
def test_dagrun_find(self):
session = settings.Session()
now = timezone.utcnow()
dag_id1 = "test_dagrun_find_externally_triggered"
dag_run = models.DagRun(
dag_id=dag_id1,
run_id='manual__' + now.isoformat(),
execution_date=now,
start_date=now,
state=State.RUNNING,
external_trigger=True,
)
session.add(dag_run)
dag_id2 = "test_dagrun_find_not_externally_triggered"
dag_run = models.DagRun(
dag_id=dag_id2,
run_id='manual__' + now.isoformat(),
execution_date=now,
start_date=now,
state=State.RUNNING,
external_trigger=False,
)
session.add(dag_run)
session.commit()
self.assertEqual(1,
len(models.DagRun.find(dag_id=dag_id1, external_trigger=True)))
self.assertEqual(0,
len(models.DagRun.find(dag_id=dag_id1, external_trigger=False)))
self.assertEqual(0,
len(models.DagRun.find(dag_id=dag_id2, external_trigger=True)))
self.assertEqual(1,
len(models.DagRun.find(dag_id=dag_id2, external_trigger=False)))
def test_dagrun_success_when_all_skipped(self):
"""
Tests that a DAG run succeeds when all tasks are skipped
"""
dag = DAG(
dag_id='test_dagrun_success_when_all_skipped',
start_date=timezone.datetime(2017, 1, 1)
)
dag_task1 = ShortCircuitOperator(
task_id='test_short_circuit_false',
dag=dag,
python_callable=lambda: False)
dag_task2 = DummyOperator(
task_id='test_state_skipped1',
dag=dag)
dag_task3 = DummyOperator(
task_id='test_state_skipped2',
dag=dag)
dag_task1.set_downstream(dag_task2)
dag_task2.set_downstream(dag_task3)
initial_task_states = {
'test_short_circuit_false': State.SUCCESS,
'test_state_skipped1': State.SKIPPED,
'test_state_skipped2': State.SKIPPED,
}
dag_run = self.create_dag_run(dag=dag,
state=State.RUNNING,
task_states=initial_task_states)
updated_dag_state = dag_run.update_state()
self.assertEqual(State.SUCCESS, updated_dag_state)
def test_dagrun_success_conditions(self):
session = settings.Session()
dag = DAG(
'test_dagrun_success_conditions',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
# A -> B
# A -> C -> D
# ordered: B, D, C, A or D, B, C, A or D, C, B, A
with dag:
op1 = DummyOperator(task_id='A')
op2 = DummyOperator(task_id='B')
op3 = DummyOperator(task_id='C')
op4 = DummyOperator(task_id='D')
op1.set_upstream([op2, op3])
op3.set_upstream(op4)
dag.clear()
now = timezone.utcnow()
dr = dag.create_dagrun(run_id='test_dagrun_success_conditions',
state=State.RUNNING,
execution_date=now,
start_date=now)
# op1 = root
ti_op1 = dr.get_task_instance(task_id=op1.task_id)
ti_op1.set_state(state=State.SUCCESS, session=session)
ti_op2 = dr.get_task_instance(task_id=op2.task_id)
ti_op3 = dr.get_task_instance(task_id=op3.task_id)
ti_op4 = dr.get_task_instance(task_id=op4.task_id)
# root is successful, but unfinished tasks
state = dr.update_state()
self.assertEqual(State.RUNNING, state)
# one has failed, but root is successful
ti_op2.set_state(state=State.FAILED, session=session)
ti_op3.set_state(state=State.SUCCESS, session=session)
ti_op4.set_state(state=State.SUCCESS, session=session)
state = dr.update_state()
self.assertEqual(State.SUCCESS, state)
def test_dagrun_deadlock(self):
session = settings.Session()
dag = DAG(
'text_dagrun_deadlock',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
with dag:
op1 = DummyOperator(task_id='A')
op2 = DummyOperator(task_id='B')
op2.trigger_rule = TriggerRule.ONE_FAILED
op2.set_upstream(op1)
dag.clear()
now = timezone.utcnow()
dr = dag.create_dagrun(run_id='test_dagrun_deadlock',
state=State.RUNNING,
execution_date=now,
start_date=now)
ti_op1 = dr.get_task_instance(task_id=op1.task_id)
ti_op1.set_state(state=State.SUCCESS, session=session)
ti_op2 = dr.get_task_instance(task_id=op2.task_id)
ti_op2.set_state(state=State.NONE, session=session)
dr.update_state()
self.assertEqual(dr.state, State.RUNNING)
ti_op2.set_state(state=State.NONE, session=session)
op2.trigger_rule = 'invalid'
dr.update_state()
self.assertEqual(dr.state, State.FAILED)
def test_dagrun_no_deadlock_with_shutdown(self):
session = settings.Session()
dag = DAG('test_dagrun_no_deadlock_with_shutdown',
start_date=DEFAULT_DATE)
with dag:
op1 = DummyOperator(task_id='upstream_task')
op2 = DummyOperator(task_id='downstream_task')
op2.set_upstream(op1)
dr = dag.create_dagrun(run_id='test_dagrun_no_deadlock_with_shutdown',
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE)
upstream_ti = dr.get_task_instance(task_id='upstream_task')
upstream_ti.set_state(State.SHUTDOWN, session=session)
dr.update_state()
self.assertEqual(dr.state, State.RUNNING)
def test_dagrun_no_deadlock_with_depends_on_past(self):
session = settings.Session()
dag = DAG('test_dagrun_no_deadlock',
start_date=DEFAULT_DATE)
with dag:
DummyOperator(task_id='dop', depends_on_past=True)
DummyOperator(task_id='tc', task_concurrency=1)
dag.clear()
dr = dag.create_dagrun(run_id='test_dagrun_no_deadlock_1',
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE)
dr2 = dag.create_dagrun(run_id='test_dagrun_no_deadlock_2',
state=State.RUNNING,
execution_date=DEFAULT_DATE + datetime.timedelta(days=1),
start_date=DEFAULT_DATE + datetime.timedelta(days=1))
ti1_op1 = dr.get_task_instance(task_id='dop')
dr2.get_task_instance(task_id='dop')
ti2_op1 = dr.get_task_instance(task_id='tc')
dr.get_task_instance(task_id='tc')
ti1_op1.set_state(state=State.RUNNING, session=session)
dr.update_state()
dr2.update_state()
self.assertEqual(dr.state, State.RUNNING)
self.assertEqual(dr2.state, State.RUNNING)
ti2_op1.set_state(state=State.RUNNING, session=session)
dr.update_state()
dr2.update_state()
self.assertEqual(dr.state, State.RUNNING)
self.assertEqual(dr2.state, State.RUNNING)
def test_dagrun_success_callback(self):
def on_success_callable(context):
self.assertEqual(
context['dag_run'].dag_id,
'test_dagrun_success_callback'
)
dag = DAG(
dag_id='test_dagrun_success_callback',
start_date=datetime.datetime(2017, 1, 1),
on_success_callback=on_success_callable,
)
dag_task1 = DummyOperator(
task_id='test_state_succeeded1',
dag=dag)
dag_task2 = DummyOperator(
task_id='test_state_succeeded2',
dag=dag)
dag_task1.set_downstream(dag_task2)
initial_task_states = {
'test_state_succeeded1': State.SUCCESS,
'test_state_succeeded2': State.SUCCESS,
}
dag_run = self.create_dag_run(dag=dag,
state=State.RUNNING,
task_states=initial_task_states)
updated_dag_state = dag_run.update_state()
self.assertEqual(State.SUCCESS, updated_dag_state)
def test_dagrun_failure_callback(self):
def on_failure_callable(context):
self.assertEqual(
context['dag_run'].dag_id,
'test_dagrun_failure_callback'
)
dag = DAG(
dag_id='test_dagrun_failure_callback',
start_date=datetime.datetime(2017, 1, 1),
on_failure_callback=on_failure_callable,
)
dag_task1 = DummyOperator(
task_id='test_state_succeeded1',
dag=dag)
dag_task2 = DummyOperator(
task_id='test_state_failed2',
dag=dag)
initial_task_states = {
'test_state_succeeded1': State.SUCCESS,
'test_state_failed2': State.FAILED,
}
dag_task1.set_downstream(dag_task2)
dag_run = self.create_dag_run(dag=dag,
state=State.RUNNING,
task_states=initial_task_states)
updated_dag_state = dag_run.update_state()
self.assertEqual(State.FAILED, updated_dag_state)
def test_dagrun_set_state_end_date(self):
session = settings.Session()
dag = DAG(
'test_dagrun_set_state_end_date',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
dag.clear()
now = timezone.utcnow()
dr = dag.create_dagrun(run_id='test_dagrun_set_state_end_date',
state=State.RUNNING,
execution_date=now,
start_date=now)
# Initial end_date should be NULL
# State.SUCCESS and State.FAILED are all ending state and should set end_date
# State.RUNNING set end_date back to NULL
session.add(dr)
session.commit()
self.assertIsNone(dr.end_date)
dr.set_state(State.SUCCESS)
session.merge(dr)
session.commit()
dr_database = session.query(DagRun).filter(
DagRun.run_id == 'test_dagrun_set_state_end_date'
).one()
self.assertIsNotNone(dr_database.end_date)
self.assertEqual(dr.end_date, dr_database.end_date)
dr.set_state(State.RUNNING)
session.merge(dr)
session.commit()
dr_database = session.query(DagRun).filter(
DagRun.run_id == 'test_dagrun_set_state_end_date'
).one()
self.assertIsNone(dr_database.end_date)
dr.set_state(State.FAILED)
session.merge(dr)
session.commit()
dr_database = session.query(DagRun).filter(
DagRun.run_id == 'test_dagrun_set_state_end_date'
).one()
self.assertIsNotNone(dr_database.end_date)
self.assertEqual(dr.end_date, dr_database.end_date)
def test_dagrun_update_state_end_date(self):
session = settings.Session()
dag = DAG(
'test_dagrun_update_state_end_date',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
# A -> B
with dag:
op1 = DummyOperator(task_id='A')
op2 = DummyOperator(task_id='B')
op1.set_upstream(op2)
dag.clear()
now = timezone.utcnow()
dr = dag.create_dagrun(run_id='test_dagrun_update_state_end_date',
state=State.RUNNING,
execution_date=now,
start_date=now)
# Initial end_date should be NULL
# State.SUCCESS and State.FAILED are all ending state and should set end_date
# State.RUNNING set end_date back to NULL
session.merge(dr)
session.commit()
self.assertIsNone(dr.end_date)
ti_op1 = dr.get_task_instance(task_id=op1.task_id)
ti_op1.set_state(state=State.SUCCESS, session=session)
ti_op2 = dr.get_task_instance(task_id=op2.task_id)
ti_op2.set_state(state=State.SUCCESS, session=session)
dr.update_state()
dr_database = session.query(DagRun).filter(
DagRun.run_id == 'test_dagrun_update_state_end_date'
).one()
self.assertIsNotNone(dr_database.end_date)
self.assertEqual(dr.end_date, dr_database.end_date)
ti_op1.set_state(state=State.RUNNING, session=session)
ti_op2.set_state(state=State.RUNNING, session=session)
dr.update_state()
<|fim▁hole|>
self.assertEqual(dr._state, State.RUNNING)
self.assertIsNone(dr.end_date)
self.assertIsNone(dr_database.end_date)
ti_op1.set_state(state=State.FAILED, session=session)
ti_op2.set_state(state=State.FAILED, session=session)
dr.update_state()
dr_database = session.query(DagRun).filter(
DagRun.run_id == 'test_dagrun_update_state_end_date'
).one()
self.assertIsNotNone(dr_database.end_date)
self.assertEqual(dr.end_date, dr_database.end_date)
def test_get_task_instance_on_empty_dagrun(self):
"""
Make sure that a proper value is returned when a dagrun has no task instances
"""
dag = DAG(
dag_id='test_get_task_instance_on_empty_dagrun',
start_date=timezone.datetime(2017, 1, 1)
)
ShortCircuitOperator(
task_id='test_short_circuit_false',
dag=dag,
python_callable=lambda: False)
session = settings.Session()
now = timezone.utcnow()
# Don't use create_dagrun since it will create the task instances too which we
# don't want
dag_run = models.DagRun(
dag_id=dag.dag_id,
run_id='manual__' + now.isoformat(),
execution_date=now,
start_date=now,
state=State.RUNNING,
external_trigger=False,
)
session.add(dag_run)
session.commit()
ti = dag_run.get_task_instance('test_short_circuit_false')
self.assertEqual(None, ti)
def test_get_latest_runs(self):
session = settings.Session()
dag = DAG(
dag_id='test_latest_runs_1',
start_date=DEFAULT_DATE)
self.create_dag_run(dag, execution_date=timezone.datetime(2015, 1, 1))
self.create_dag_run(dag, execution_date=timezone.datetime(2015, 1, 2))
dagruns = models.DagRun.get_latest_runs(session)
session.close()
for dagrun in dagruns:
if dagrun.dag_id == 'test_latest_runs_1':
self.assertEqual(dagrun.execution_date, timezone.datetime(2015, 1, 2))
def test_is_backfill(self):
dag = DAG(dag_id='test_is_backfill', start_date=DEFAULT_DATE)
dagrun = self.create_dag_run(dag, execution_date=DEFAULT_DATE)
dagrun.run_id = BackfillJob.ID_PREFIX + '_sfddsffds'
dagrun2 = self.create_dag_run(
dag, execution_date=DEFAULT_DATE + datetime.timedelta(days=1))
dagrun3 = self.create_dag_run(
dag, execution_date=DEFAULT_DATE + datetime.timedelta(days=2))
dagrun3.run_id = None
self.assertTrue(dagrun.is_backfill)
self.assertFalse(dagrun2.is_backfill)
self.assertFalse(dagrun3.is_backfill)
def test_removed_task_instances_can_be_restored(self):
def with_all_tasks_removed(dag):
return DAG(dag_id=dag.dag_id, start_date=dag.start_date)
dag = DAG('test_task_restoration', start_date=DEFAULT_DATE)
dag.add_task(DummyOperator(task_id='flaky_task', owner='test'))
dagrun = self.create_dag_run(dag)
flaky_ti = dagrun.get_task_instances()[0]
self.assertEquals('flaky_task', flaky_ti.task_id)
self.assertEquals(State.NONE, flaky_ti.state)
dagrun.dag = with_all_tasks_removed(dag)
dagrun.verify_integrity()
flaky_ti.refresh_from_db()
self.assertEquals(State.NONE, flaky_ti.state)
dagrun.dag.add_task(DummyOperator(task_id='flaky_task', owner='test'))
dagrun.verify_integrity()
flaky_ti.refresh_from_db()
self.assertEquals(State.NONE, flaky_ti.state)
class DagBagTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.empty_dir = mkdtemp()
@classmethod
def tearDownClass(cls):
os.rmdir(cls.empty_dir)
def test_get_existing_dag(self):
"""
test that were're able to parse some example DAGs and retrieve them
"""
dagbag = models.DagBag(dag_folder=self.empty_dir, include_examples=True)
some_expected_dag_ids = ["example_bash_operator",
"example_branch_operator"]
for dag_id in some_expected_dag_ids:
dag = dagbag.get_dag(dag_id)
self.assertIsNotNone(dag)
self.assertEqual(dag_id, dag.dag_id)
self.assertGreaterEqual(dagbag.size(), 7)
def test_get_non_existing_dag(self):
"""
test that retrieving a non existing dag id returns None without crashing
"""
dagbag = models.DagBag(dag_folder=self.empty_dir, include_examples=False)
non_existing_dag_id = "non_existing_dag_id"
self.assertIsNone(dagbag.get_dag(non_existing_dag_id))
def test_process_file_that_contains_multi_bytes_char(self):
"""
test that we're able to parse file that contains multi-byte char
"""
f = NamedTemporaryFile()
f.write('\u3042'.encode('utf8')) # write multi-byte char (hiragana)
f.flush()
dagbag = models.DagBag(dag_folder=self.empty_dir, include_examples=False)
self.assertEqual([], dagbag.process_file(f.name))
def test_zip_skip_log(self):
"""
test the loading of a DAG from within a zip file that skips another file because
it doesn't have "airflow" and "DAG"
"""
from mock import Mock
with patch('airflow.models.DagBag.log') as log_mock:
log_mock.info = Mock()
test_zip_path = os.path.join(TEST_DAGS_FOLDER, "test_zip.zip")
dagbag = models.DagBag(dag_folder=test_zip_path, include_examples=False)
self.assertTrue(dagbag.has_logged)
log_mock.info.assert_any_call("File %s assumed to contain no DAGs. Skipping.",
test_zip_path)
def test_zip(self):
"""
test the loading of a DAG within a zip file that includes dependencies
"""
dagbag = models.DagBag(dag_folder=self.empty_dir, include_examples=False)
dagbag.process_file(os.path.join(TEST_DAGS_FOLDER, "test_zip.zip"))
self.assertTrue(dagbag.get_dag("test_zip_dag"))
def test_process_file_cron_validity_check(self):
"""
test if an invalid cron expression
as schedule interval can be identified
"""
invalid_dag_files = ["test_invalid_cron.py", "test_zip_invalid_cron.zip"]
dagbag = models.DagBag(dag_folder=self.empty_dir, include_examples=False)
self.assertEqual(len(dagbag.import_errors), 0)
for d in invalid_dag_files:
dagbag.process_file(os.path.join(TEST_DAGS_FOLDER, d))
self.assertEqual(len(dagbag.import_errors), len(invalid_dag_files))
@patch.object(DagModel, 'get_current')
def test_get_dag_without_refresh(self, mock_dagmodel):
"""
Test that, once a DAG is loaded, it doesn't get refreshed again if it
hasn't been expired.
"""
dag_id = 'example_bash_operator'
mock_dagmodel.return_value = DagModel()
mock_dagmodel.return_value.last_expired = None
mock_dagmodel.return_value.fileloc = 'foo'
class TestDagBag(models.DagBag):
process_file_calls = 0
def process_file(self, filepath, only_if_updated=True, safe_mode=True):
if 'example_bash_operator.py' == os.path.basename(filepath):
TestDagBag.process_file_calls += 1
super(TestDagBag, self).process_file(filepath, only_if_updated, safe_mode)
dagbag = TestDagBag(include_examples=True)
dagbag.process_file_calls
# Should not call process_file again, since it's already loaded during init.
self.assertEqual(1, dagbag.process_file_calls)
self.assertIsNotNone(dagbag.get_dag(dag_id))
self.assertEqual(1, dagbag.process_file_calls)
def test_get_dag_fileloc(self):
"""
Test that fileloc is correctly set when we load example DAGs,
specifically SubDAGs.
"""
dagbag = models.DagBag(include_examples=True)
expected = {
'example_bash_operator': 'example_bash_operator.py',
'example_subdag_operator': 'example_subdag_operator.py',
'example_subdag_operator.section-1': 'subdags/subdag.py'
}
for dag_id, path in expected.items():
dag = dagbag.get_dag(dag_id)
self.assertTrue(
dag.fileloc.endswith('airflow/example_dags/' + path))
def process_dag(self, create_dag):
"""
Helper method to process a file generated from the input create_dag function.
"""
# write source to file
source = textwrap.dedent(''.join(
inspect.getsource(create_dag).splitlines(True)[1:-1]))
f = NamedTemporaryFile()
f.write(source.encode('utf8'))
f.flush()
dagbag = models.DagBag(dag_folder=self.empty_dir, include_examples=False)
found_dags = dagbag.process_file(f.name)
return dagbag, found_dags, f.name
def validate_dags(self, expected_parent_dag, actual_found_dags, actual_dagbag,
should_be_found=True):
expected_dag_ids = list(map(lambda dag: dag.dag_id, expected_parent_dag.subdags))
expected_dag_ids.append(expected_parent_dag.dag_id)
actual_found_dag_ids = list(map(lambda dag: dag.dag_id, actual_found_dags))
for dag_id in expected_dag_ids:
actual_dagbag.log.info('validating %s' % dag_id)
self.assertEquals(
dag_id in actual_found_dag_ids, should_be_found,
'dag "%s" should %shave been found after processing dag "%s"' %
(dag_id, '' if should_be_found else 'not ', expected_parent_dag.dag_id)
)
self.assertEquals(
dag_id in actual_dagbag.dags, should_be_found,
'dag "%s" should %sbe in dagbag.dags after processing dag "%s"' %
(dag_id, '' if should_be_found else 'not ', expected_parent_dag.dag_id)
)
def test_load_subdags(self):
# Define Dag to load
def standard_subdag():
from airflow.models import DAG
from airflow.operators.dummy_operator import DummyOperator
from airflow.operators.subdag_operator import SubDagOperator
import datetime
DAG_NAME = 'master'
DEFAULT_ARGS = {
'owner': 'owner1',
'start_date': datetime.datetime(2016, 1, 1)
}
dag = DAG(
DAG_NAME,
default_args=DEFAULT_ARGS)
# master:
# A -> opSubDag_0
# master.opsubdag_0:
# -> subdag_0.task
# A -> opSubDag_1
# master.opsubdag_1:
# -> subdag_1.task
with dag:
def subdag_0():
subdag_0 = DAG('master.opSubdag_0', default_args=DEFAULT_ARGS)
DummyOperator(task_id='subdag_0.task', dag=subdag_0)
return subdag_0
def subdag_1():
subdag_1 = DAG('master.opSubdag_1', default_args=DEFAULT_ARGS)
DummyOperator(task_id='subdag_1.task', dag=subdag_1)
return subdag_1
opSubdag_0 = SubDagOperator(
task_id='opSubdag_0', dag=dag, subdag=subdag_0())
opSubdag_1 = SubDagOperator(
task_id='opSubdag_1', dag=dag, subdag=subdag_1())
opA = DummyOperator(task_id='A')
opA.set_downstream(opSubdag_0)
opA.set_downstream(opSubdag_1)
return dag
testDag = standard_subdag()
# sanity check to make sure DAG.subdag is still functioning properly
self.assertEqual(len(testDag.subdags), 2)
# Perform processing dag
dagbag, found_dags, _ = self.process_dag(standard_subdag)
# Validate correctness
# all dags from testDag should be listed
self.validate_dags(testDag, found_dags, dagbag)
# Define Dag to load
def nested_subdags():
from airflow.models import DAG
from airflow.operators.dummy_operator import DummyOperator
from airflow.operators.subdag_operator import SubDagOperator
import datetime
DAG_NAME = 'master'
DEFAULT_ARGS = {
'owner': 'owner1',
'start_date': datetime.datetime(2016, 1, 1)
}
dag = DAG(
DAG_NAME,
default_args=DEFAULT_ARGS)
# master:
# A -> opSubdag_0
# master.opSubdag_0:
# -> opSubDag_A
# master.opSubdag_0.opSubdag_A:
# -> subdag_A.task
# -> opSubdag_B
# master.opSubdag_0.opSubdag_B:
# -> subdag_B.task
# A -> opSubdag_1
# master.opSubdag_1:
# -> opSubdag_C
# master.opSubdag_1.opSubdag_C:
# -> subdag_C.task
# -> opSubDag_D
# master.opSubdag_1.opSubdag_D:
# -> subdag_D.task
with dag:
def subdag_A():
subdag_A = DAG(
'master.opSubdag_0.opSubdag_A', default_args=DEFAULT_ARGS)
DummyOperator(task_id='subdag_A.task', dag=subdag_A)
return subdag_A
def subdag_B():
subdag_B = DAG(
'master.opSubdag_0.opSubdag_B', default_args=DEFAULT_ARGS)
DummyOperator(task_id='subdag_B.task', dag=subdag_B)
return subdag_B
def subdag_C():
subdag_C = DAG(
'master.opSubdag_1.opSubdag_C', default_args=DEFAULT_ARGS)
DummyOperator(task_id='subdag_C.task', dag=subdag_C)
return subdag_C
def subdag_D():
subdag_D = DAG(
'master.opSubdag_1.opSubdag_D', default_args=DEFAULT_ARGS)
DummyOperator(task_id='subdag_D.task', dag=subdag_D)
return subdag_D
def subdag_0():
subdag_0 = DAG('master.opSubdag_0', default_args=DEFAULT_ARGS)
SubDagOperator(task_id='opSubdag_A', dag=subdag_0, subdag=subdag_A())
SubDagOperator(task_id='opSubdag_B', dag=subdag_0, subdag=subdag_B())
return subdag_0
def subdag_1():
subdag_1 = DAG('master.opSubdag_1', default_args=DEFAULT_ARGS)
SubDagOperator(task_id='opSubdag_C', dag=subdag_1, subdag=subdag_C())
SubDagOperator(task_id='opSubdag_D', dag=subdag_1, subdag=subdag_D())
return subdag_1
opSubdag_0 = SubDagOperator(
task_id='opSubdag_0', dag=dag, subdag=subdag_0())
opSubdag_1 = SubDagOperator(
task_id='opSubdag_1', dag=dag, subdag=subdag_1())
opA = DummyOperator(task_id='A')
opA.set_downstream(opSubdag_0)
opA.set_downstream(opSubdag_1)
return dag
testDag = nested_subdags()
# sanity check to make sure DAG.subdag is still functioning properly
self.assertEqual(len(testDag.subdags), 6)
# Perform processing dag
dagbag, found_dags, _ = self.process_dag(nested_subdags)
# Validate correctness
# all dags from testDag should be listed
self.validate_dags(testDag, found_dags, dagbag)
def test_skip_cycle_dags(self):
"""
Don't crash when loading an invalid (contains a cycle) DAG file.
Don't load the dag into the DagBag either
"""
# Define Dag to load
def basic_cycle():
from airflow.models import DAG
from airflow.operators.dummy_operator import DummyOperator
import datetime
DAG_NAME = 'cycle_dag'
DEFAULT_ARGS = {
'owner': 'owner1',
'start_date': datetime.datetime(2016, 1, 1)
}
dag = DAG(
DAG_NAME,
default_args=DEFAULT_ARGS)
# A -> A
with dag:
opA = DummyOperator(task_id='A')
opA.set_downstream(opA)
return dag
testDag = basic_cycle()
# sanity check to make sure DAG.subdag is still functioning properly
self.assertEqual(len(testDag.subdags), 0)
# Perform processing dag
dagbag, found_dags, file_path = self.process_dag(basic_cycle)
# #Validate correctness
# None of the dags should be found
self.validate_dags(testDag, found_dags, dagbag, should_be_found=False)
self.assertIn(file_path, dagbag.import_errors)
# Define Dag to load
def nested_subdag_cycle():
from airflow.models import DAG
from airflow.operators.dummy_operator import DummyOperator
from airflow.operators.subdag_operator import SubDagOperator
import datetime
DAG_NAME = 'nested_cycle'
DEFAULT_ARGS = {
'owner': 'owner1',
'start_date': datetime.datetime(2016, 1, 1)
}
dag = DAG(
DAG_NAME,
default_args=DEFAULT_ARGS)
# cycle:
# A -> opSubdag_0
# cycle.opSubdag_0:
# -> opSubDag_A
# cycle.opSubdag_0.opSubdag_A:
# -> subdag_A.task
# -> opSubdag_B
# cycle.opSubdag_0.opSubdag_B:
# -> subdag_B.task
# A -> opSubdag_1
# cycle.opSubdag_1:
# -> opSubdag_C
# cycle.opSubdag_1.opSubdag_C:
# -> subdag_C.task -> subdag_C.task >Invalid Loop<
# -> opSubDag_D
# cycle.opSubdag_1.opSubdag_D:
# -> subdag_D.task
with dag:
def subdag_A():
subdag_A = DAG(
'nested_cycle.opSubdag_0.opSubdag_A', default_args=DEFAULT_ARGS)
DummyOperator(task_id='subdag_A.task', dag=subdag_A)
return subdag_A
def subdag_B():
subdag_B = DAG(
'nested_cycle.opSubdag_0.opSubdag_B', default_args=DEFAULT_ARGS)
DummyOperator(task_id='subdag_B.task', dag=subdag_B)
return subdag_B
def subdag_C():
subdag_C = DAG(
'nested_cycle.opSubdag_1.opSubdag_C', default_args=DEFAULT_ARGS)
opSubdag_C_task = DummyOperator(
task_id='subdag_C.task', dag=subdag_C)
# introduce a loop in opSubdag_C
opSubdag_C_task.set_downstream(opSubdag_C_task)
return subdag_C
def subdag_D():
subdag_D = DAG(
'nested_cycle.opSubdag_1.opSubdag_D', default_args=DEFAULT_ARGS)
DummyOperator(task_id='subdag_D.task', dag=subdag_D)
return subdag_D
def subdag_0():
subdag_0 = DAG('nested_cycle.opSubdag_0', default_args=DEFAULT_ARGS)
SubDagOperator(task_id='opSubdag_A', dag=subdag_0, subdag=subdag_A())
SubDagOperator(task_id='opSubdag_B', dag=subdag_0, subdag=subdag_B())
return subdag_0
def subdag_1():
subdag_1 = DAG('nested_cycle.opSubdag_1', default_args=DEFAULT_ARGS)
SubDagOperator(task_id='opSubdag_C', dag=subdag_1, subdag=subdag_C())
SubDagOperator(task_id='opSubdag_D', dag=subdag_1, subdag=subdag_D())
return subdag_1
opSubdag_0 = SubDagOperator(
task_id='opSubdag_0', dag=dag, subdag=subdag_0())
opSubdag_1 = SubDagOperator(
task_id='opSubdag_1', dag=dag, subdag=subdag_1())
opA = DummyOperator(task_id='A')
opA.set_downstream(opSubdag_0)
opA.set_downstream(opSubdag_1)
return dag
testDag = nested_subdag_cycle()
# sanity check to make sure DAG.subdag is still functioning properly
self.assertEqual(len(testDag.subdags), 6)
# Perform processing dag
dagbag, found_dags, file_path = self.process_dag(nested_subdag_cycle)
# Validate correctness
# None of the dags should be found
self.validate_dags(testDag, found_dags, dagbag, should_be_found=False)
self.assertIn(file_path, dagbag.import_errors)
def test_process_file_with_none(self):
"""
test that process_file can handle Nones
"""
dagbag = models.DagBag(dag_folder=self.empty_dir, include_examples=False)
self.assertEqual([], dagbag.process_file(None))
@patch.object(TI, 'handle_failure')
def test_kill_zombies(self, mock_ti_handle_failure):
"""
Test that kill zombies call TIs failure handler with proper context
"""
dagbag = models.DagBag()
with create_session() as session:
session.query(TI).delete()
dag = dagbag.get_dag('example_branch_operator')
task = dag.get_task(task_id='run_this_first')
ti = TI(task, DEFAULT_DATE, State.RUNNING)
session.add(ti)
session.commit()
zombies = [SimpleTaskInstance(ti)]
dagbag.kill_zombies(zombies)
mock_ti_handle_failure \
.assert_called_with(ANY,
configuration.getboolean('core',
'unit_test_mode'),
ANY)
def test_deactivate_unknown_dags(self):
"""
Test that dag_ids not passed into deactivate_unknown_dags
are deactivated when function is invoked
"""
dagbag = models.DagBag(include_examples=True)
expected_active_dags = dagbag.dags.keys()
session = settings.Session
session.add(DagModel(dag_id='test_deactivate_unknown_dags', is_active=True))
session.commit()
models.DAG.deactivate_unknown_dags(expected_active_dags)
for dag in session.query(DagModel).all():
if dag.dag_id in expected_active_dags:
self.assertTrue(dag.is_active)
else:
self.assertEquals(dag.dag_id, 'test_deactivate_unknown_dags')
self.assertFalse(dag.is_active)
# clean up
session.query(DagModel).filter(DagModel.dag_id == 'test_deactivate_unknown_dags').delete()
session.commit()
class TaskInstanceTest(unittest.TestCase):
def test_set_task_dates(self):
"""
Test that tasks properly take start/end dates from DAGs
"""
dag = DAG('dag', start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=10))
op1 = DummyOperator(task_id='op_1', owner='test')
self.assertTrue(op1.start_date is None and op1.end_date is None)
# dag should assign its dates to op1 because op1 has no dates
dag.add_task(op1)
self.assertTrue(
op1.start_date == dag.start_date and op1.end_date == dag.end_date)
op2 = DummyOperator(
task_id='op_2',
owner='test',
start_date=DEFAULT_DATE - datetime.timedelta(days=1),
end_date=DEFAULT_DATE + datetime.timedelta(days=11))
# dag should assign its dates to op2 because they are more restrictive
dag.add_task(op2)
self.assertTrue(
op2.start_date == dag.start_date and op2.end_date == dag.end_date)
op3 = DummyOperator(
task_id='op_3',
owner='test',
start_date=DEFAULT_DATE + datetime.timedelta(days=1),
end_date=DEFAULT_DATE + datetime.timedelta(days=9))
# op3 should keep its dates because they are more restrictive
dag.add_task(op3)
self.assertTrue(
op3.start_date == DEFAULT_DATE + datetime.timedelta(days=1))
self.assertTrue(
op3.end_date == DEFAULT_DATE + datetime.timedelta(days=9))
def test_timezone_awareness(self):
NAIVE_DATETIME = DEFAULT_DATE.replace(tzinfo=None)
# check ti without dag (just for bw compat)
op_no_dag = DummyOperator(task_id='op_no_dag')
ti = TI(task=op_no_dag, execution_date=NAIVE_DATETIME)
self.assertEquals(ti.execution_date, DEFAULT_DATE)
# check with dag without localized execution_date
dag = DAG('dag', start_date=DEFAULT_DATE)
op1 = DummyOperator(task_id='op_1')
dag.add_task(op1)
ti = TI(task=op1, execution_date=NAIVE_DATETIME)
self.assertEquals(ti.execution_date, DEFAULT_DATE)
# with dag and localized execution_date
tz = pendulum.timezone("Europe/Amsterdam")
execution_date = timezone.datetime(2016, 1, 1, 1, 0, 0, tzinfo=tz)
utc_date = timezone.convert_to_utc(execution_date)
ti = TI(task=op1, execution_date=execution_date)
self.assertEquals(ti.execution_date, utc_date)
def test_task_naive_datetime(self):
NAIVE_DATETIME = DEFAULT_DATE.replace(tzinfo=None)
op_no_dag = DummyOperator(task_id='test_task_naive_datetime',
start_date=NAIVE_DATETIME,
end_date=NAIVE_DATETIME)
self.assertTrue(op_no_dag.start_date.tzinfo)
self.assertTrue(op_no_dag.end_date.tzinfo)
def test_set_dag(self):
"""
Test assigning Operators to Dags, including deferred assignment
"""
dag = DAG('dag', start_date=DEFAULT_DATE)
dag2 = DAG('dag2', start_date=DEFAULT_DATE)
op = DummyOperator(task_id='op_1', owner='test')
# no dag assigned
self.assertFalse(op.has_dag())
self.assertRaises(AirflowException, getattr, op, 'dag')
# no improper assignment
with self.assertRaises(TypeError):
op.dag = 1
op.dag = dag
# no reassignment
with self.assertRaises(AirflowException):
op.dag = dag2
# but assigning the same dag is ok
op.dag = dag
self.assertIs(op.dag, dag)
self.assertIn(op, dag.tasks)
def test_infer_dag(self):
dag = DAG('dag', start_date=DEFAULT_DATE)
dag2 = DAG('dag2', start_date=DEFAULT_DATE)
op1 = DummyOperator(task_id='test_op_1', owner='test')
op2 = DummyOperator(task_id='test_op_2', owner='test')
op3 = DummyOperator(task_id='test_op_3', owner='test', dag=dag)
op4 = DummyOperator(task_id='test_op_4', owner='test', dag=dag2)
# double check dags
self.assertEqual(
[i.has_dag() for i in [op1, op2, op3, op4]],
[False, False, True, True])
# can't combine operators with no dags
self.assertRaises(AirflowException, op1.set_downstream, op2)
# op2 should infer dag from op1
op1.dag = dag
op1.set_downstream(op2)
self.assertIs(op2.dag, dag)
# can't assign across multiple DAGs
self.assertRaises(AirflowException, op1.set_downstream, op4)
self.assertRaises(AirflowException, op1.set_downstream, [op3, op4])
def test_bitshift_compose_operators(self):
dag = DAG('dag', start_date=DEFAULT_DATE)
op1 = DummyOperator(task_id='test_op_1', owner='test')
op2 = DummyOperator(task_id='test_op_2', owner='test')
op3 = DummyOperator(task_id='test_op_3', owner='test')
op4 = DummyOperator(task_id='test_op_4', owner='test')
op5 = DummyOperator(task_id='test_op_5', owner='test')
# can't compose operators without dags
with self.assertRaises(AirflowException):
op1 >> op2
dag >> op1 >> op2 << op3
# make sure dag assignment carries through
# using __rrshift__
self.assertIs(op1.dag, dag)
self.assertIs(op2.dag, dag)
self.assertIs(op3.dag, dag)
# op2 should be downstream of both
self.assertIn(op2, op1.downstream_list)
self.assertIn(op2, op3.downstream_list)
# test dag assignment with __rlshift__
dag << op4
self.assertIs(op4.dag, dag)
# dag assignment with __rrshift__
dag >> op5
self.assertIs(op5.dag, dag)
@patch.object(DAG, 'concurrency_reached')
def test_requeue_over_concurrency(self, mock_concurrency_reached):
mock_concurrency_reached.return_value = True
dag = DAG(dag_id='test_requeue_over_concurrency', start_date=DEFAULT_DATE,
max_active_runs=1, concurrency=2)
task = DummyOperator(task_id='test_requeue_over_concurrency_op', dag=dag)
ti = TI(task=task, execution_date=timezone.utcnow())
ti.run()
self.assertEqual(ti.state, models.State.NONE)
@patch.object(TI, 'pool_full')
def test_run_pooling_task(self, mock_pool_full):
"""
test that running task update task state as without running task.
(no dependency check in ti_deps anymore, so also -> SUCCESS)
"""
# Mock the pool out with a full pool because the pool doesn't actually exist
mock_pool_full.return_value = True
dag = models.DAG(dag_id='test_run_pooling_task')
task = DummyOperator(task_id='test_run_pooling_task_op', dag=dag,
pool='test_run_pooling_task_pool', owner='airflow',
start_date=timezone.datetime(2016, 2, 1, 0, 0, 0))
ti = TI(
task=task, execution_date=timezone.utcnow())
ti.run()
self.assertEqual(ti.state, models.State.SUCCESS)
@patch.object(TI, 'pool_full')
def test_run_pooling_task_with_mark_success(self, mock_pool_full):
"""
test that running task with mark_success param update task state as SUCCESS
without running task.
"""
# Mock the pool out with a full pool because the pool doesn't actually exist
mock_pool_full.return_value = True
dag = models.DAG(dag_id='test_run_pooling_task_with_mark_success')
task = DummyOperator(
task_id='test_run_pooling_task_with_mark_success_op',
dag=dag,
pool='test_run_pooling_task_with_mark_success_pool',
owner='airflow',
start_date=timezone.datetime(2016, 2, 1, 0, 0, 0))
ti = TI(
task=task, execution_date=timezone.utcnow())
ti.run(mark_success=True)
self.assertEqual(ti.state, models.State.SUCCESS)
def test_run_pooling_task_with_skip(self):
"""
test that running task which returns AirflowSkipOperator will end
up in a SKIPPED state.
"""
def raise_skip_exception():
raise AirflowSkipException
dag = models.DAG(dag_id='test_run_pooling_task_with_skip')
task = PythonOperator(
task_id='test_run_pooling_task_with_skip',
dag=dag,
python_callable=raise_skip_exception,
owner='airflow',
start_date=timezone.datetime(2016, 2, 1, 0, 0, 0))
ti = TI(
task=task, execution_date=timezone.utcnow())
ti.run()
self.assertEqual(models.State.SKIPPED, ti.state)
def test_retry_delay(self):
"""
Test that retry delays are respected
"""
dag = models.DAG(dag_id='test_retry_handling')
task = BashOperator(
task_id='test_retry_handling_op',
bash_command='exit 1',
retries=1,
retry_delay=datetime.timedelta(seconds=3),
dag=dag,
owner='airflow',
start_date=timezone.datetime(2016, 2, 1, 0, 0, 0))
def run_with_error(ti):
try:
ti.run()
except AirflowException:
pass
ti = TI(
task=task, execution_date=timezone.utcnow())
self.assertEqual(ti.try_number, 1)
# first run -- up for retry
run_with_error(ti)
self.assertEqual(ti.state, State.UP_FOR_RETRY)
self.assertEqual(ti.try_number, 2)
# second run -- still up for retry because retry_delay hasn't expired
run_with_error(ti)
self.assertEqual(ti.state, State.UP_FOR_RETRY)
# third run -- failed
time.sleep(3)
run_with_error(ti)
self.assertEqual(ti.state, State.FAILED)
@patch.object(TI, 'pool_full')
def test_retry_handling(self, mock_pool_full):
"""
Test that task retries are handled properly
"""
# Mock the pool with a pool with slots open since the pool doesn't actually exist
mock_pool_full.return_value = False
dag = models.DAG(dag_id='test_retry_handling')
task = BashOperator(
task_id='test_retry_handling_op',
bash_command='exit 1',
retries=1,
retry_delay=datetime.timedelta(seconds=0),
dag=dag,
owner='airflow',
start_date=timezone.datetime(2016, 2, 1, 0, 0, 0))
def run_with_error(ti):
try:
ti.run()
except AirflowException:
pass
ti = TI(
task=task, execution_date=timezone.utcnow())
self.assertEqual(ti.try_number, 1)
# first run -- up for retry
run_with_error(ti)
self.assertEqual(ti.state, State.UP_FOR_RETRY)
self.assertEqual(ti._try_number, 1)
self.assertEqual(ti.try_number, 2)
# second run -- fail
run_with_error(ti)
self.assertEqual(ti.state, State.FAILED)
self.assertEqual(ti._try_number, 2)
self.assertEqual(ti.try_number, 3)
# Clear the TI state since you can't run a task with a FAILED state without
# clearing it first
dag.clear()
# third run -- up for retry
run_with_error(ti)
self.assertEqual(ti.state, State.UP_FOR_RETRY)
self.assertEqual(ti._try_number, 3)
self.assertEqual(ti.try_number, 4)
# fourth run -- fail
run_with_error(ti)
ti.refresh_from_db()
self.assertEqual(ti.state, State.FAILED)
self.assertEqual(ti._try_number, 4)
self.assertEqual(ti.try_number, 5)
def test_next_retry_datetime(self):
delay = datetime.timedelta(seconds=30)
max_delay = datetime.timedelta(minutes=60)
dag = models.DAG(dag_id='fail_dag')
task = BashOperator(
task_id='task_with_exp_backoff_and_max_delay',
bash_command='exit 1',
retries=3,
retry_delay=delay,
retry_exponential_backoff=True,
max_retry_delay=max_delay,
dag=dag,
owner='airflow',
start_date=timezone.datetime(2016, 2, 1, 0, 0, 0))
ti = TI(
task=task, execution_date=DEFAULT_DATE)
ti.end_date = pendulum.instance(timezone.utcnow())
dt = ti.next_retry_datetime()
# between 30 * 2^0.5 and 30 * 2^1 (15 and 30)
period = ti.end_date.add(seconds=30) - ti.end_date.add(seconds=15)
self.assertTrue(dt in period)
ti.try_number = 3
dt = ti.next_retry_datetime()
# between 30 * 2^2 and 30 * 2^3 (120 and 240)
period = ti.end_date.add(seconds=240) - ti.end_date.add(seconds=120)
self.assertTrue(dt in period)
ti.try_number = 5
dt = ti.next_retry_datetime()
# between 30 * 2^4 and 30 * 2^5 (480 and 960)
period = ti.end_date.add(seconds=960) - ti.end_date.add(seconds=480)
self.assertTrue(dt in period)
ti.try_number = 9
dt = ti.next_retry_datetime()
self.assertEqual(dt, ti.end_date + max_delay)
ti.try_number = 50
dt = ti.next_retry_datetime()
self.assertEqual(dt, ti.end_date + max_delay)
def test_depends_on_past(self):
dagbag = models.DagBag()
dag = dagbag.get_dag('test_depends_on_past')
dag.clear()
task = dag.tasks[0]
run_date = task.start_date + datetime.timedelta(days=5)
ti = TI(task, run_date)
# depends_on_past prevents the run
task.run(start_date=run_date, end_date=run_date)
ti.refresh_from_db()
self.assertIs(ti.state, None)
# ignore first depends_on_past to allow the run
task.run(
start_date=run_date,
end_date=run_date,
ignore_first_depends_on_past=True)
ti.refresh_from_db()
self.assertEqual(ti.state, State.SUCCESS)
# Parameterized tests to check for the correct firing
# of the trigger_rule under various circumstances
# Numeric fields are in order:
# successes, skipped, failed, upstream_failed, done
@parameterized.expand([
#
# Tests for all_success
#
['all_success', 5, 0, 0, 0, 0, True, None, True],
['all_success', 2, 0, 0, 0, 0, True, None, False],
['all_success', 2, 0, 1, 0, 0, True, ST.UPSTREAM_FAILED, False],
['all_success', 2, 1, 0, 0, 0, True, ST.SKIPPED, False],
#
# Tests for one_success
#
['one_success', 5, 0, 0, 0, 5, True, None, True],
['one_success', 2, 0, 0, 0, 2, True, None, True],
['one_success', 2, 0, 1, 0, 3, True, None, True],
['one_success', 2, 1, 0, 0, 3, True, None, True],
#
# Tests for all_failed
#
['all_failed', 5, 0, 0, 0, 5, True, ST.SKIPPED, False],
['all_failed', 0, 0, 5, 0, 5, True, None, True],
['all_failed', 2, 0, 0, 0, 2, True, ST.SKIPPED, False],
['all_failed', 2, 0, 1, 0, 3, True, ST.SKIPPED, False],
['all_failed', 2, 1, 0, 0, 3, True, ST.SKIPPED, False],
#
# Tests for one_failed
#
['one_failed', 5, 0, 0, 0, 0, True, None, False],
['one_failed', 2, 0, 0, 0, 0, True, None, False],
['one_failed', 2, 0, 1, 0, 0, True, None, True],
['one_failed', 2, 1, 0, 0, 3, True, None, False],
['one_failed', 2, 3, 0, 0, 5, True, ST.SKIPPED, False],
#
# Tests for done
#
['all_done', 5, 0, 0, 0, 5, True, None, True],
['all_done', 2, 0, 0, 0, 2, True, None, False],
['all_done', 2, 0, 1, 0, 3, True, None, False],
['all_done', 2, 1, 0, 0, 3, True, None, False]
])
def test_check_task_dependencies(self, trigger_rule, successes, skipped,
failed, upstream_failed, done,
flag_upstream_failed,
expect_state, expect_completed):
start_date = timezone.datetime(2016, 2, 1, 0, 0, 0)
dag = models.DAG('test-dag', start_date=start_date)
downstream = DummyOperator(task_id='downstream',
dag=dag, owner='airflow',
trigger_rule=trigger_rule)
for i in range(5):
task = DummyOperator(task_id='runme_{}'.format(i),
dag=dag, owner='airflow')
task.set_downstream(downstream)
run_date = task.start_date + datetime.timedelta(days=5)
ti = TI(downstream, run_date)
dep_results = TriggerRuleDep()._evaluate_trigger_rule(
ti=ti,
successes=successes,
skipped=skipped,
failed=failed,
upstream_failed=upstream_failed,
done=done,
flag_upstream_failed=flag_upstream_failed)
completed = all([dep.passed for dep in dep_results])
self.assertEqual(completed, expect_completed)
self.assertEqual(ti.state, expect_state)
def test_xcom_pull(self):
"""
Test xcom_pull, using different filtering methods.
"""
dag = models.DAG(
dag_id='test_xcom', schedule_interval='@monthly',
start_date=timezone.datetime(2016, 6, 1, 0, 0, 0))
exec_date = timezone.utcnow()
# Push a value
task1 = DummyOperator(task_id='test_xcom_1', dag=dag, owner='airflow')
ti1 = TI(task=task1, execution_date=exec_date)
ti1.xcom_push(key='foo', value='bar')
# Push another value with the same key (but by a different task)
task2 = DummyOperator(task_id='test_xcom_2', dag=dag, owner='airflow')
ti2 = TI(task=task2, execution_date=exec_date)
ti2.xcom_push(key='foo', value='baz')
# Pull with no arguments
result = ti1.xcom_pull()
self.assertEqual(result, None)
# Pull the value pushed most recently by any task.
result = ti1.xcom_pull(key='foo')
self.assertIn(result, 'baz')
# Pull the value pushed by the first task
result = ti1.xcom_pull(task_ids='test_xcom_1', key='foo')
self.assertEqual(result, 'bar')
# Pull the value pushed by the second task
result = ti1.xcom_pull(task_ids='test_xcom_2', key='foo')
self.assertEqual(result, 'baz')
# Pull the values pushed by both tasks
result = ti1.xcom_pull(
task_ids=['test_xcom_1', 'test_xcom_2'], key='foo')
self.assertEqual(result, ('bar', 'baz'))
def test_xcom_pull_after_success(self):
"""
tests xcom set/clear relative to a task in a 'success' rerun scenario
"""
key = 'xcom_key'
value = 'xcom_value'
dag = models.DAG(dag_id='test_xcom', schedule_interval='@monthly')
task = DummyOperator(
task_id='test_xcom',
dag=dag,
pool='test_xcom',
owner='airflow',
start_date=timezone.datetime(2016, 6, 2, 0, 0, 0))
exec_date = timezone.utcnow()
ti = TI(
task=task, execution_date=exec_date)
ti.run(mark_success=True)
ti.xcom_push(key=key, value=value)
self.assertEqual(ti.xcom_pull(task_ids='test_xcom', key=key), value)
ti.run()
# The second run and assert is to handle AIRFLOW-131 (don't clear on
# prior success)
self.assertEqual(ti.xcom_pull(task_ids='test_xcom', key=key), value)
# Test AIRFLOW-703: Xcom shouldn't be cleared if the task doesn't
# execute, even if dependencies are ignored
ti.run(ignore_all_deps=True, mark_success=True)
self.assertEqual(ti.xcom_pull(task_ids='test_xcom', key=key), value)
# Xcom IS finally cleared once task has executed
ti.run(ignore_all_deps=True)
self.assertEqual(ti.xcom_pull(task_ids='test_xcom', key=key), None)
def test_xcom_pull_different_execution_date(self):
"""
tests xcom fetch behavior with different execution dates, using
both xcom_pull with "include_prior_dates" and without
"""
key = 'xcom_key'
value = 'xcom_value'
dag = models.DAG(dag_id='test_xcom', schedule_interval='@monthly')
task = DummyOperator(
task_id='test_xcom',
dag=dag,
pool='test_xcom',
owner='airflow',
start_date=timezone.datetime(2016, 6, 2, 0, 0, 0))
exec_date = timezone.utcnow()
ti = TI(
task=task, execution_date=exec_date)
ti.run(mark_success=True)
ti.xcom_push(key=key, value=value)
self.assertEqual(ti.xcom_pull(task_ids='test_xcom', key=key), value)
ti.run()
exec_date += datetime.timedelta(days=1)
ti = TI(
task=task, execution_date=exec_date)
ti.run()
# We have set a new execution date (and did not pass in
# 'include_prior_dates'which means this task should now have a cleared
# xcom value
self.assertEqual(ti.xcom_pull(task_ids='test_xcom', key=key), None)
# We *should* get a value using 'include_prior_dates'
self.assertEqual(ti.xcom_pull(task_ids='test_xcom',
key=key,
include_prior_dates=True),
value)
def test_post_execute_hook(self):
"""
Test that post_execute hook is called with the Operator's result.
The result ('error') will cause an error to be raised and trapped.
"""
class TestError(Exception):
pass
class TestOperator(PythonOperator):
def post_execute(self, context, result):
if result == 'error':
raise TestError('expected error.')
dag = models.DAG(dag_id='test_post_execute_dag')
task = TestOperator(
task_id='test_operator',
dag=dag,
python_callable=lambda: 'error',
owner='airflow',
start_date=timezone.datetime(2017, 2, 1))
ti = TI(task=task, execution_date=timezone.utcnow())
with self.assertRaises(TestError):
ti.run()
def test_check_and_change_state_before_execution(self):
dag = models.DAG(dag_id='test_check_and_change_state_before_execution')
task = DummyOperator(task_id='task', dag=dag, start_date=DEFAULT_DATE)
ti = TI(
task=task, execution_date=timezone.utcnow())
self.assertEqual(ti._try_number, 0)
self.assertTrue(ti._check_and_change_state_before_execution())
# State should be running, and try_number column should be incremented
self.assertEqual(ti.state, State.RUNNING)
self.assertEqual(ti._try_number, 1)
def test_check_and_change_state_before_execution_dep_not_met(self):
dag = models.DAG(dag_id='test_check_and_change_state_before_execution')
task = DummyOperator(task_id='task', dag=dag, start_date=DEFAULT_DATE)
task2 = DummyOperator(task_id='task2', dag=dag, start_date=DEFAULT_DATE)
task >> task2
ti = TI(
task=task2, execution_date=timezone.utcnow())
self.assertFalse(ti._check_and_change_state_before_execution())
def test_try_number(self):
"""
Test the try_number accessor behaves in various running states
"""
dag = models.DAG(dag_id='test_check_and_change_state_before_execution')
task = DummyOperator(task_id='task', dag=dag, start_date=DEFAULT_DATE)
ti = TI(task=task, execution_date=timezone.utcnow())
self.assertEqual(1, ti.try_number)
ti.try_number = 2
ti.state = State.RUNNING
self.assertEqual(2, ti.try_number)
ti.state = State.SUCCESS
self.assertEqual(3, ti.try_number)
def test_get_num_running_task_instances(self):
session = settings.Session()
dag = models.DAG(dag_id='test_get_num_running_task_instances')
dag2 = models.DAG(dag_id='test_get_num_running_task_instances_dummy')
task = DummyOperator(task_id='task', dag=dag, start_date=DEFAULT_DATE)
task2 = DummyOperator(task_id='task', dag=dag2, start_date=DEFAULT_DATE)
ti1 = TI(task=task, execution_date=DEFAULT_DATE)
ti2 = TI(task=task, execution_date=DEFAULT_DATE + datetime.timedelta(days=1))
ti3 = TI(task=task2, execution_date=DEFAULT_DATE)
ti1.state = State.RUNNING
ti2.state = State.QUEUED
ti3.state = State.RUNNING
session.add(ti1)
session.add(ti2)
session.add(ti3)
session.commit()
self.assertEquals(1, ti1.get_num_running_task_instances(session=session))
self.assertEquals(1, ti2.get_num_running_task_instances(session=session))
self.assertEquals(1, ti3.get_num_running_task_instances(session=session))
def test_log_url(self):
now = pendulum.now('Europe/Brussels')
dag = DAG('dag', start_date=DEFAULT_DATE)
task = DummyOperator(task_id='op', dag=dag)
ti = TI(task=task, execution_date=now)
d = urllib.parse.parse_qs(
urllib.parse.urlparse(ti.log_url).query,
keep_blank_values=True, strict_parsing=True)
self.assertEqual(d['dag_id'][0], 'dag')
self.assertEqual(d['task_id'][0], 'op')
self.assertEqual(pendulum.parse(d['execution_date'][0]), now)
@patch('airflow.settings.RBAC', True)
def test_log_url_rbac(self):
dag = DAG('dag', start_date=DEFAULT_DATE)
task = DummyOperator(task_id='op', dag=dag)
ti = TI(task=task, execution_date=datetime.datetime(2018, 1, 1))
expected_url = (
'http://localhost:8080/log?'
'execution_date=2018-01-01T00%3A00%3A00%2B00%3A00'
'&task_id=op'
'&dag_id=dag'
)
self.assertEqual(ti.log_url, expected_url)
def test_mark_success_url(self):
now = pendulum.now('Europe/Brussels')
dag = DAG('dag', start_date=DEFAULT_DATE)
task = DummyOperator(task_id='op', dag=dag)
ti = TI(task=task, execution_date=now)
d = urllib.parse.parse_qs(
urllib.parse.urlparse(ti.mark_success_url).query,
keep_blank_values=True, strict_parsing=True)
self.assertEqual(d['dag_id'][0], 'dag')
self.assertEqual(d['task_id'][0], 'op')
self.assertEqual(pendulum.parse(d['execution_date'][0]), now)
def test_overwrite_params_with_dag_run_conf(self):
task = DummyOperator(task_id='op')
ti = TI(task=task, execution_date=datetime.datetime.now())
dag_run = DagRun()
dag_run.conf = {"override": True}
params = {"override": False}
ti.overwrite_params_with_dag_run_conf(params, dag_run)
self.assertEqual(True, params["override"])
def test_overwrite_params_with_dag_run_none(self):
task = DummyOperator(task_id='op')
ti = TI(task=task, execution_date=datetime.datetime.now())
params = {"override": False}
ti.overwrite_params_with_dag_run_conf(params, None)
self.assertEqual(False, params["override"])
def test_overwrite_params_with_dag_run_conf_none(self):
task = DummyOperator(task_id='op')
ti = TI(task=task, execution_date=datetime.datetime.now())
params = {"override": False}
dag_run = DagRun()
ti.overwrite_params_with_dag_run_conf(params, dag_run)
self.assertEqual(False, params["override"])
@patch('airflow.models.send_email')
def test_email_alert(self, mock_send_email):
dag = models.DAG(dag_id='test_failure_email')
task = BashOperator(
task_id='test_email_alert',
dag=dag,
bash_command='exit 1',
start_date=DEFAULT_DATE,
email='to')
ti = TI(task=task, execution_date=datetime.datetime.now())
try:
ti.run()
except AirflowException:
pass
(email, title, body), _ = mock_send_email.call_args
self.assertEqual(email, 'to')
self.assertIn('test_email_alert', title)
self.assertIn('test_email_alert', body)
@patch('airflow.models.send_email')
def test_email_alert_with_config(self, mock_send_email):
dag = models.DAG(dag_id='test_failure_email')
task = BashOperator(
task_id='test_email_alert_with_config',
dag=dag,
bash_command='exit 1',
start_date=DEFAULT_DATE,
email='to')
ti = TI(
task=task, execution_date=datetime.datetime.now())
configuration.set('email', 'SUBJECT_TEMPLATE', '/subject/path')
configuration.set('email', 'HTML_CONTENT_TEMPLATE', '/html_content/path')
opener = mock_open(read_data='template: {{ti.task_id}}')
with patch('airflow.models.open', opener, create=True):
try:
ti.run()
except AirflowException:
pass
(email, title, body), _ = mock_send_email.call_args
self.assertEqual(email, 'to')
self.assertEqual('template: test_email_alert_with_config', title)
self.assertEqual('template: test_email_alert_with_config', body)
def test_set_duration(self):
task = DummyOperator(task_id='op', email='[email protected]')
ti = TI(
task=task,
execution_date=datetime.datetime.now(),
)
ti.start_date = datetime.datetime(2018, 10, 1, 1)
ti.end_date = datetime.datetime(2018, 10, 1, 2)
ti.set_duration()
self.assertEqual(ti.duration, 3600)
def test_set_duration_empty_dates(self):
task = DummyOperator(task_id='op', email='[email protected]')
ti = TI(task=task, execution_date=datetime.datetime.now())
ti.set_duration()
self.assertIsNone(ti.duration)
def test_success_callbak_no_race_condition(self):
class CallbackWrapper(object):
def wrap_task_instance(self, ti):
self.task_id = ti.task_id
self.dag_id = ti.dag_id
self.execution_date = ti.execution_date
self.task_state_in_callback = ""
self.callback_ran = False
def success_handler(self, context):
self.callback_ran = True
session = settings.Session()
temp_instance = session.query(TI).filter(
TI.task_id == self.task_id).filter(
TI.dag_id == self.dag_id).filter(
TI.execution_date == self.execution_date).one()
self.task_state_in_callback = temp_instance.state
cw = CallbackWrapper()
dag = DAG('test_success_callbak_no_race_condition', start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=10))
task = DummyOperator(task_id='op', email='[email protected]',
on_success_callback=cw.success_handler, dag=dag)
ti = TI(task=task, execution_date=datetime.datetime.now())
ti.state = State.RUNNING
session = settings.Session()
session.merge(ti)
session.commit()
cw.wrap_task_instance(ti)
ti._run_raw_task()
self.assertTrue(cw.callback_ran)
self.assertEqual(cw.task_state_in_callback, State.RUNNING)
ti.refresh_from_db()
self.assertEqual(ti.state, State.SUCCESS)
class ClearTasksTest(unittest.TestCase):
def test_clear_task_instances(self):
dag = DAG('test_clear_task_instances', start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=10))
task0 = DummyOperator(task_id='0', owner='test', dag=dag)
task1 = DummyOperator(task_id='1', owner='test', dag=dag, retries=2)
ti0 = TI(task=task0, execution_date=DEFAULT_DATE)
ti1 = TI(task=task1, execution_date=DEFAULT_DATE)
ti0.run()
ti1.run()
session = settings.Session()
qry = session.query(TI).filter(
TI.dag_id == dag.dag_id).all()
clear_task_instances(qry, session, dag=dag)
session.commit()
ti0.refresh_from_db()
ti1.refresh_from_db()
# Next try to run will be try 2
self.assertEqual(ti0.try_number, 2)
self.assertEqual(ti0.max_tries, 1)
self.assertEqual(ti1.try_number, 2)
self.assertEqual(ti1.max_tries, 3)
def test_clear_task_instances_without_task(self):
dag = DAG('test_clear_task_instances_without_task', start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=10))
task0 = DummyOperator(task_id='task0', owner='test', dag=dag)
task1 = DummyOperator(task_id='task1', owner='test', dag=dag, retries=2)
ti0 = TI(task=task0, execution_date=DEFAULT_DATE)
ti1 = TI(task=task1, execution_date=DEFAULT_DATE)
ti0.run()
ti1.run()
# Remove the task from dag.
dag.task_dict = {}
self.assertFalse(dag.has_task(task0.task_id))
self.assertFalse(dag.has_task(task1.task_id))
session = settings.Session()
qry = session.query(TI).filter(
TI.dag_id == dag.dag_id).all()
clear_task_instances(qry, session)
session.commit()
# When dag is None, max_tries will be maximum of original max_tries or try_number.
ti0.refresh_from_db()
ti1.refresh_from_db()
# Next try to run will be try 2
self.assertEqual(ti0.try_number, 2)
self.assertEqual(ti0.max_tries, 1)
self.assertEqual(ti1.try_number, 2)
self.assertEqual(ti1.max_tries, 2)
def test_clear_task_instances_without_dag(self):
dag = DAG('test_clear_task_instances_without_dag', start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=10))
task0 = DummyOperator(task_id='task_0', owner='test', dag=dag)
task1 = DummyOperator(task_id='task_1', owner='test', dag=dag, retries=2)
ti0 = TI(task=task0, execution_date=DEFAULT_DATE)
ti1 = TI(task=task1, execution_date=DEFAULT_DATE)
ti0.run()
ti1.run()
session = settings.Session()
qry = session.query(TI).filter(
TI.dag_id == dag.dag_id).all()
clear_task_instances(qry, session)
session.commit()
# When dag is None, max_tries will be maximum of original max_tries or try_number.
ti0.refresh_from_db()
ti1.refresh_from_db()
# Next try to run will be try 2
self.assertEqual(ti0.try_number, 2)
self.assertEqual(ti0.max_tries, 1)
self.assertEqual(ti1.try_number, 2)
self.assertEqual(ti1.max_tries, 2)
def test_dag_clear(self):
dag = DAG('test_dag_clear', start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=10))
task0 = DummyOperator(task_id='test_dag_clear_task_0', owner='test', dag=dag)
ti0 = TI(task=task0, execution_date=DEFAULT_DATE)
# Next try to run will be try 1
self.assertEqual(ti0.try_number, 1)
ti0.run()
self.assertEqual(ti0.try_number, 2)
dag.clear()
ti0.refresh_from_db()
self.assertEqual(ti0.try_number, 2)
self.assertEqual(ti0.state, State.NONE)
self.assertEqual(ti0.max_tries, 1)
task1 = DummyOperator(task_id='test_dag_clear_task_1', owner='test',
dag=dag, retries=2)
ti1 = TI(task=task1, execution_date=DEFAULT_DATE)
self.assertEqual(ti1.max_tries, 2)
ti1.try_number = 1
# Next try will be 2
ti1.run()
self.assertEqual(ti1.try_number, 3)
self.assertEqual(ti1.max_tries, 2)
dag.clear()
ti0.refresh_from_db()
ti1.refresh_from_db()
# after clear dag, ti2 should show attempt 3 of 5
self.assertEqual(ti1.max_tries, 4)
self.assertEqual(ti1.try_number, 3)
# after clear dag, ti1 should show attempt 2 of 2
self.assertEqual(ti0.try_number, 2)
self.assertEqual(ti0.max_tries, 1)
def test_dags_clear(self):
# setup
session = settings.Session()
dags, tis = [], []
num_of_dags = 5
for i in range(num_of_dags):
dag = DAG('test_dag_clear_' + str(i), start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=10))
ti = TI(task=DummyOperator(task_id='test_task_clear_' + str(i), owner='test',
dag=dag),
execution_date=DEFAULT_DATE)
dags.append(dag)
tis.append(ti)
# test clear all dags
for i in range(num_of_dags):
tis[i].run()
self.assertEqual(tis[i].state, State.SUCCESS)
self.assertEqual(tis[i].try_number, 2)
self.assertEqual(tis[i].max_tries, 0)
DAG.clear_dags(dags)
for i in range(num_of_dags):
tis[i].refresh_from_db()
self.assertEqual(tis[i].state, State.NONE)
self.assertEqual(tis[i].try_number, 2)
self.assertEqual(tis[i].max_tries, 1)
# test dry_run
for i in range(num_of_dags):
tis[i].run()
self.assertEqual(tis[i].state, State.SUCCESS)
self.assertEqual(tis[i].try_number, 3)
self.assertEqual(tis[i].max_tries, 1)
DAG.clear_dags(dags, dry_run=True)
for i in range(num_of_dags):
tis[i].refresh_from_db()
self.assertEqual(tis[i].state, State.SUCCESS)
self.assertEqual(tis[i].try_number, 3)
self.assertEqual(tis[i].max_tries, 1)
# test only_failed
from random import randint
failed_dag_idx = randint(0, len(tis) - 1)
tis[failed_dag_idx].state = State.FAILED
session.merge(tis[failed_dag_idx])
session.commit()
DAG.clear_dags(dags, only_failed=True)
for i in range(num_of_dags):
tis[i].refresh_from_db()
if i != failed_dag_idx:
self.assertEqual(tis[i].state, State.SUCCESS)
self.assertEqual(tis[i].try_number, 3)
self.assertEqual(tis[i].max_tries, 1)
else:
self.assertEqual(tis[i].state, State.NONE)
self.assertEqual(tis[i].try_number, 3)
self.assertEqual(tis[i].max_tries, 2)
def test_operator_clear(self):
dag = DAG('test_operator_clear', start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=10))
t1 = DummyOperator(task_id='bash_op', owner='test', dag=dag)
t2 = DummyOperator(task_id='dummy_op', owner='test', dag=dag, retries=1)
t2.set_upstream(t1)
ti1 = TI(task=t1, execution_date=DEFAULT_DATE)
ti2 = TI(task=t2, execution_date=DEFAULT_DATE)
ti2.run()
# Dependency not met
self.assertEqual(ti2.try_number, 1)
self.assertEqual(ti2.max_tries, 1)
t2.clear(upstream=True)
ti1.run()
ti2.run()
self.assertEqual(ti1.try_number, 2)
# max_tries is 0 because there is no task instance in db for ti1
# so clear won't change the max_tries.
self.assertEqual(ti1.max_tries, 0)
self.assertEqual(ti2.try_number, 2)
# try_number (0) + retries(1)
self.assertEqual(ti2.max_tries, 1)
def test_xcom_disable_pickle_type(self):
configuration.load_test_config()
json_obj = {"key": "value"}
execution_date = timezone.utcnow()
key = "xcom_test1"
dag_id = "test_dag1"
task_id = "test_task1"
configuration.set("core", "enable_xcom_pickling", "False")
XCom.set(key=key,
value=json_obj,
dag_id=dag_id,
task_id=task_id,
execution_date=execution_date)
ret_value = XCom.get_one(key=key,
dag_id=dag_id,
task_id=task_id,
execution_date=execution_date)
self.assertEqual(ret_value, json_obj)
session = settings.Session()
ret_value = session.query(XCom).filter(XCom.key == key, XCom.dag_id == dag_id,
XCom.task_id == task_id,
XCom.execution_date == execution_date
).first().value
self.assertEqual(ret_value, json_obj)
def test_xcom_enable_pickle_type(self):
json_obj = {"key": "value"}
execution_date = timezone.utcnow()
key = "xcom_test2"
dag_id = "test_dag2"
task_id = "test_task2"
configuration.set("core", "enable_xcom_pickling", "True")
XCom.set(key=key,
value=json_obj,
dag_id=dag_id,
task_id=task_id,
execution_date=execution_date)
ret_value = XCom.get_one(key=key,
dag_id=dag_id,
task_id=task_id,
execution_date=execution_date)
self.assertEqual(ret_value, json_obj)
session = settings.Session()
ret_value = session.query(XCom).filter(XCom.key == key, XCom.dag_id == dag_id,
XCom.task_id == task_id,
XCom.execution_date == execution_date
).first().value
self.assertEqual(ret_value, json_obj)
def test_xcom_disable_pickle_type_fail_on_non_json(self):
class PickleRce(object):
def __reduce__(self):
return os.system, ("ls -alt",)
configuration.set("core", "xcom_enable_pickling", "False")
self.assertRaises(TypeError, XCom.set,
key="xcom_test3",
value=PickleRce(),
dag_id="test_dag3",
task_id="test_task3",
execution_date=timezone.utcnow())
def test_xcom_get_many(self):
json_obj = {"key": "value"}
execution_date = timezone.utcnow()
key = "xcom_test4"
dag_id1 = "test_dag4"
task_id1 = "test_task4"
dag_id2 = "test_dag5"
task_id2 = "test_task5"
configuration.set("core", "xcom_enable_pickling", "True")
XCom.set(key=key,
value=json_obj,
dag_id=dag_id1,
task_id=task_id1,
execution_date=execution_date)
XCom.set(key=key,
value=json_obj,
dag_id=dag_id2,
task_id=task_id2,
execution_date=execution_date)
results = XCom.get_many(key=key,
execution_date=execution_date)
for result in results:
self.assertEqual(result.value, json_obj)
class ConnectionTest(unittest.TestCase):
@patch.object(configuration, 'get')
def test_connection_extra_no_encryption(self, mock_get):
"""
Tests extras on a new connection without encryption. The fernet key
is set to a non-base64-encoded string and the extra is stored without
encryption.
"""
test_connection = Connection(extra='testextra')
self.assertEqual(test_connection.extra, 'testextra')
@patch.object(configuration, 'get')
def test_connection_extra_with_encryption(self, mock_get):
"""
Tests extras on a new connection with encryption. The fernet key
is set to a base64 encoded string and the extra is encrypted.
"""
# 'dGVzdA==' is base64 encoded 'test'
mock_get.return_value = 'dGVzdA=='
test_connection = Connection(extra='testextra')
self.assertEqual(test_connection.extra, 'testextra')
def test_connection_from_uri_without_extras(self):
uri = 'scheme://user:password@host%2flocation:1234/schema'
connection = Connection(uri=uri)
self.assertEqual(connection.conn_type, 'scheme')
self.assertEqual(connection.host, 'host/location')
self.assertEqual(connection.schema, 'schema')
self.assertEqual(connection.login, 'user')
self.assertEqual(connection.password, 'password')
self.assertEqual(connection.port, 1234)
self.assertIsNone(connection.extra)
def test_connection_from_uri_with_extras(self):
uri = 'scheme://user:password@host%2flocation:1234/schema?' \
'extra1=a%20value&extra2=%2fpath%2f'
connection = Connection(uri=uri)
self.assertEqual(connection.conn_type, 'scheme')
self.assertEqual(connection.host, 'host/location')
self.assertEqual(connection.schema, 'schema')
self.assertEqual(connection.login, 'user')
self.assertEqual(connection.password, 'password')
self.assertEqual(connection.port, 1234)
self.assertDictEqual(connection.extra_dejson, {'extra1': 'a value',
'extra2': '/path/'})
def test_connection_from_uri_with_colon_in_hostname(self):
uri = 'scheme://user:password@host%2flocation%3ax%3ay:1234/schema?' \
'extra1=a%20value&extra2=%2fpath%2f'
connection = Connection(uri=uri)
self.assertEqual(connection.conn_type, 'scheme')
self.assertEqual(connection.host, 'host/location:x:y')
self.assertEqual(connection.schema, 'schema')
self.assertEqual(connection.login, 'user')
self.assertEqual(connection.password, 'password')
self.assertEqual(connection.port, 1234)
self.assertDictEqual(connection.extra_dejson, {'extra1': 'a value',
'extra2': '/path/'})
def test_connection_from_uri_with_encoded_password(self):
uri = 'scheme://user:password%20with%20space@host%2flocation%3ax%3ay:1234/schema'
connection = Connection(uri=uri)
self.assertEqual(connection.conn_type, 'scheme')
self.assertEqual(connection.host, 'host/location:x:y')
self.assertEqual(connection.schema, 'schema')
self.assertEqual(connection.login, 'user')
self.assertEqual(connection.password, 'password with space')
self.assertEqual(connection.port, 1234)
def test_connection_from_uri_with_encoded_user(self):
uri = 'scheme://domain%2fuser:password@host%2flocation%3ax%3ay:1234/schema'
connection = Connection(uri=uri)
self.assertEqual(connection.conn_type, 'scheme')
self.assertEqual(connection.host, 'host/location:x:y')
self.assertEqual(connection.schema, 'schema')
self.assertEqual(connection.login, 'domain/user')
self.assertEqual(connection.password, 'password')
self.assertEqual(connection.port, 1234)
def test_connection_from_uri_with_encoded_schema(self):
uri = 'scheme://user:password%20with%20space@host:1234/schema%2ftest'
connection = Connection(uri=uri)
self.assertEqual(connection.conn_type, 'scheme')
self.assertEqual(connection.host, 'host')
self.assertEqual(connection.schema, 'schema/test')
self.assertEqual(connection.login, 'user')
self.assertEqual(connection.password, 'password with space')
self.assertEqual(connection.port, 1234)
def test_connection_from_uri_no_schema(self):
uri = 'scheme://user:password%20with%20space@host:1234'
connection = Connection(uri=uri)
self.assertEqual(connection.conn_type, 'scheme')
self.assertEqual(connection.host, 'host')
self.assertEqual(connection.schema, '')
self.assertEqual(connection.login, 'user')
self.assertEqual(connection.password, 'password with space')
self.assertEqual(connection.port, 1234)
class TestSkipMixin(unittest.TestCase):
@patch('airflow.models.timezone.utcnow')
def test_skip(self, mock_now):
session = settings.Session()
now = datetime.datetime.utcnow().replace(tzinfo=pendulum.timezone('UTC'))
mock_now.return_value = now
dag = DAG(
'dag',
start_date=DEFAULT_DATE,
)
with dag:
tasks = [DummyOperator(task_id='task')]
dag_run = dag.create_dagrun(
run_id='manual__' + now.isoformat(),
state=State.FAILED,
)
SkipMixin().skip(
dag_run=dag_run,
execution_date=now,
tasks=tasks,
session=session)
session.query(TI).filter(
TI.dag_id == 'dag',
TI.task_id == 'task',
TI.state == State.SKIPPED,
TI.start_date == now,
TI.end_date == now,
).one()
@patch('airflow.models.timezone.utcnow')
def test_skip_none_dagrun(self, mock_now):
session = settings.Session()
now = datetime.datetime.utcnow().replace(tzinfo=pendulum.timezone('UTC'))
mock_now.return_value = now
dag = DAG(
'dag',
start_date=DEFAULT_DATE,
)
with dag:
tasks = [DummyOperator(task_id='task')]
SkipMixin().skip(
dag_run=None,
execution_date=now,
tasks=tasks,
session=session)
session.query(TI).filter(
TI.dag_id == 'dag',
TI.task_id == 'task',
TI.state == State.SKIPPED,
TI.start_date == now,
TI.end_date == now,
).one()
def test_skip_none_tasks(self):
session = Mock()
SkipMixin().skip(dag_run=None, execution_date=None, tasks=[], session=session)
self.assertFalse(session.query.called)
self.assertFalse(session.commit.called)
class TestKubeResourceVersion(unittest.TestCase):
def test_checkpoint_resource_version(self):
session = settings.Session()
KubeResourceVersion.checkpoint_resource_version('7', session)
self.assertEqual(KubeResourceVersion.get_current_resource_version(session), '7')
def test_reset_resource_version(self):
session = settings.Session()
version = KubeResourceVersion.reset_resource_version(session)
self.assertEqual(version, '0')
self.assertEqual(KubeResourceVersion.get_current_resource_version(session), '0')
class TestKubeWorkerIdentifier(unittest.TestCase):
@patch('airflow.models.uuid.uuid4')
def test_get_or_create_not_exist(self, mock_uuid):
session = settings.Session()
session.query(KubeWorkerIdentifier).update({
KubeWorkerIdentifier.worker_uuid: ''
})
mock_uuid.return_value = 'abcde'
worker_uuid = KubeWorkerIdentifier.get_or_create_current_kube_worker_uuid(session)
self.assertEqual(worker_uuid, 'abcde')
def test_get_or_create_exist(self):
session = settings.Session()
KubeWorkerIdentifier.checkpoint_kube_worker_uuid('fghij', session)
worker_uuid = KubeWorkerIdentifier.get_or_create_current_kube_worker_uuid(session)
self.assertEqual(worker_uuid, 'fghij')<|fim▁end|> | dr_database = session.query(DagRun).filter(
DagRun.run_id == 'test_dagrun_update_state_end_date'
).one() |
<|file_name|>newmemoryview.py<|end_file_name|><|fim▁begin|><|fim▁hole|>
from collections import Iterable
from numbers import Integral
import string
from future.utils import istext, isbytes, PY3, with_metaclass
from future.types import no, issubset
# class BaseNewBytes(type):
# def __instancecheck__(cls, instance):
# return isinstance(instance, _builtin_bytes)
class newmemoryview(object): # with_metaclass(BaseNewBytes, _builtin_bytes)):
"""
A pretty lame backport of the Python 2.7 and Python 3.x
memoryviewview object to Py2.6.
"""
def __init__(self, obj):
return obj
__all__ = ['newmemoryview']<|fim▁end|> | """
A pretty lame implementation of a memoryview object for Python 2.6.
""" |
<|file_name|>PerformanceMonitor.js<|end_file_name|><|fim▁begin|>/*
* Atari Arcade SDK
* Developed by gskinner.com in partnership with Atari
* Visit http://atari.com/arcade/developers for documentation, updates and examples.
*
* Copyright (c) Atari Interactive, Inc. All Rights Reserved. Atari and the Atari logo are trademarks owned by Atari Interactive, Inc.
*
* Distributed under the terms of the MIT license.
* http://www.opensource.org/licenses/mit-license.html
*
* This notice shall be included in all copies or substantial portions of the Software.
*/
/** @module GameLibs */
(function(scope) {
/**
* A component which monitors the performance of the game, and toggles low quality
* mode if the
* @class PerformanceMonitor
* @param {Function} callback A function to fire when the performance is deemed to be unacceptable.
* @param {Number} threshold The amount of time in milliseconds that the game is allowed to have poor FPS
* before it toggles low quality.
* @constructor
*/
var PerformanceMonitor = function(callback, minFPS, threshold) {
this.initialize(callback, minFPS, threshold);
}
var s = PerformanceMonitor;
/**
* The default threshold value
* @property DEFAULT_THRESHOLD
* @type {Number}
* @default 3000
* @static
*/
s.DEFAULT_THRESHOLD = 3000;
var p = PerformanceMonitor.prototype = {
maxMs: null,
/**
* The minimum FPS allowed.
* @property minFPS
* @type Number
* @default 30
*/
minFPS: 30,
/**
* The number of milliseconds that can pass before the low quality mode is toggled.
* @property threshold
* @type Number
* @default 3000
*/
threshold:s.DEFAULT_THRESHOLD,
/**
* The method to call when the game enters low quality mode. It is recommended to use a proxy
* method to maintain scope. The callback takes a single argument, which indicates if the game
* is in low quality mode.
* @property callback
* @type Function
*/
callback: null,
/**
* If the game is currently in low quality mode.
* @property lowQualityMode
* @type Boolean
* @default false
*/
lowQualityMode: false,
/**
* The amount of time that has elapsed since the framerate has been acceptable.
* @property timeOnLow
* @type Number
* @default 0
*/
timeOnLow: 0,
initialize: function(callback, minFPS, threshold) {
this.callback = callback;
if(!isNaN(threshold)) { this.threshold = threshold; }
if(!isNaN(minFPS)){ this.minFPS = minFPS; }
this.maxMs = 1000 / minFPS;
this.prevTime = createjs.Ticker.getTime();
createjs.Ticker.addListener(this);
},
/**
* Reset the PerformanceMonitor. This happens whenever a game is restarted or continued.
* Note: Currently NOT implemented in any games.
* @method reset
*/
reset: function() {
this.timeOnLow = 0;
this.lowQualityMode = false;
this.prevTime = createjs.Ticker.getTime();
createjs.Ticker.setFPS(60); //TODO: This should lookup the actual FPS we need.
createjs.Ticker.addListener(this);
this.callback(false);
},
tick: function(){
var deltaT = createjs.Ticker.getTime() - this.prevTime;
this.prevTime = createjs.Ticker.getTime();
if(deltaT < 200 && deltaT > this.maxMs){
this.timeOnLow += deltaT;
if(this.timeOnLow > this.threshold) {
/*
Atari.trace("*** Low Quality Mode toggled.")
*/
this.lowQualityMode = true;
this.callback(true);
createjs.Ticker.setFPS(30);
createjs.Ticker.removeListener(this);
}
} else {
this.timeOnLow = 0;
}<|fim▁hole|>
scope.PerformanceMonitor = PerformanceMonitor;
}(window.GameLibs))<|fim▁end|> | }
} |
<|file_name|>DeviceStore.js<|end_file_name|><|fim▁begin|>(function() {<|fim▁hole|>
$.subscribe('ninja.data', function(topic, d) {
console.log("Got some data", d);
if (!devices[d.G]) {
$.publish('mappu.zone', d.G);
devices[d.G] = true;
}
var age = new Date().getTime() - d.DA.timestamp;
$.publish('mappu.alarm.'+d.G, d.DA.Alarm1, age, d.DA.timestamp);
$.publish('mappu.battery.'+d.G, d.DA.Battery, age, d.DA.timestamp);
$.publish('mappu.tamper.'+d.G, d.DA.Tamper, age, d.DA.timestamp);
});
})();<|fim▁end|> |
var devices = {}; |
<|file_name|>OurAppsAdapter.java<|end_file_name|><|fim▁begin|>package ru.mos.polls.ourapps.ui.adapter;
import java.util.ArrayList;
import java.util.List;
import ru.mos.polls.base.BaseRecyclerAdapter;
import ru.mos.polls.base.RecyclerBaseViewModel;
import ru.mos.polls.ourapps.model.OurApplication;
import ru.mos.polls.ourapps.vm.item.OurApplicationVM;
public class OurAppsAdapter extends BaseRecyclerAdapter<RecyclerBaseViewModel> {
public void add(List<OurApplication> list) {
List<RecyclerBaseViewModel> rbvm = new ArrayList<>();
for (OurApplication ourApp : list) {
rbvm.add(new OurApplicationVM(ourApp));
}<|fim▁hole|> addData(rbvm);
}
}<|fim▁end|> | |
<|file_name|>enums.go<|end_file_name|><|fim▁begin|>package types
// ApiVersion custom ENUM for SDK forward compatibility
type ApiVersion int
const (
ApiV1 ApiVersion = iota
)
// EnvironmentType
// https://docs.coinapi.io/#endpoints-2
type EnvironmentType int
const (
ProdEncrypted EnvironmentType = iota
ProdInsecure
TestEncrypted
TestInsecure<|fim▁hole|>// https://docs.coinapi.io/#messages
type MessageType string
const (
TRADE MessageType = "trade"
QUOTE MessageType = "quote"
BOOK_L2_FULL MessageType = "book" // Orderbook L2 (Full)
BOOK_L2_TOP_5 MessageType = "book5" // Orderbook L2 (5 best Bid / Ask)
BOOK_L2_TOP_20 MessageType = "book20" // Orderbook L2 (20 best Bid / Ask)
BOOK_L2_TOP_50 MessageType = "book50" // Orderbook L2 (50 best Bid / Ask)
BOOK_L3_FULL MessageType = "book_l3" // Orderbook L3 (Full) https://docs.coinapi.io/#orderbook-l3-full-in
OHLCV MessageType = "ohlcv"
VOLUME MessageType = "volume"
HEARTBEAT MessageType = "hearbeat" // DO NOT FIX! it's a typo in the official msg spec!
ERROR MessageType = "error" // Otherwise processMessage(.) fails to handle heartbeat messages!
EXCHANGERATE MessageType = "exrate"
RECONNECT MessageType = "reconnect"
)
// ReconnectType defines the reconnect behavior upon receiving a reconnect message
// https://docs.coinapi.io/#reconnect-in
type ReconnectType int
const (
OnConnectionClose ReconnectType = iota
OnReconnectMessage
)<|fim▁end|> | )
// MessageType replicates the official incoming message types as (kinda) string enum. |
<|file_name|>GoodExample3.py<|end_file_name|><|fim▁begin|><|fim▁hole|>test = 2
while test < num:
if num % test == 0 and num != test:
print(num,'equals',test, '*', num/test)
print(num,'is not a prime number')
break
test = test + 1
else:
print(num,'is a prime number!')<|fim▁end|> | from __future__ import print_function
num = 17 |
<|file_name|>alg_shortest_game.py<|end_file_name|><|fim▁begin|>"""Shortest game.
When we play games, we always bet in one of two ways in each game:
- betting one chip
- betting all-in
Wins are paid equal to the wager, so if we bet C chips and wins, <|fim▁hole|>
Suppose yesterday was a lucky day for us, we won every game we played.
Starting with 1 chip and leaving the game with N chips. And we played
all-in no more than K times.
Given the integers N and K, return the minimum number of games that
are necessary for us to play.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
def shortest_game(N, K):
# Apply top-down recursion, which is efficient with no repetition.
if N <= 2 or K == 0:
# Base cases: If N is 1 or 2, or K is 0, bet N-1 times of 1 chip.
return N - 1
if N % 2 == 0:
# If N is even, bet 1 all-in, and
# continue playing game for N//2 with K-1 all-in opportunities.
return 1 + shortest_game(N // 2, K - 1)
else:
# If N is odd, bet 1 chip, and
# continue playing game for N-1 with K all-in opportunities.
return 1 + shortest_game(N - 1, K)
def main():
# Output: 7
N = 8
K = 0
print(shortest_game(N, K))
# Output: 6
N = 18
K = 2
print(shortest_game(N, K))
# Output: 4
N = 10
K = 10
print(shortest_game(N, K))
# Output: 0
N = 1
K = 0
print(shortest_game(N, K))
# Output: 8
N = 100
K = 5
print(shortest_game(N, K))
if __name__ == '__main__':
main()<|fim▁end|> | we get 2C chips back. |
<|file_name|>aarch64_unknown_netbsd.rs<|end_file_name|><|fim▁begin|>// Copyright 2018 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your<|fim▁hole|>use spec::{LinkerFlavor, Target, TargetResult};
pub fn target() -> TargetResult {
let mut base = super::netbsd_base::opts();
base.max_atomic_width = Some(128);
base.abi_blacklist = super::arm_base::abi_blacklist();
Ok(Target {
llvm_target: "aarch64-unknown-netbsd".to_string(),
target_endian: "little".to_string(),
target_pointer_width: "64".to_string(),
target_c_int_width: "32".to_string(),
data_layout: "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128".to_string(),
arch: "aarch64".to_string(),
target_os: "netbsd".to_string(),
target_env: String::new(),
target_vendor: "unknown".to_string(),
linker_flavor: LinkerFlavor::Gcc,
options: base,
})
}<|fim▁end|> | // option. This file may not be copied, modified, or distributed
// except according to those terms.
|
<|file_name|>flip.js<|end_file_name|><|fim▁begin|>module.exports = {
attach: function attach(app) {
app.actions.flip = (image, metadata, config, callback) => {
const axis = config.axis || 'y';
if (axis === 'y') {<|fim▁hole|> }
return callback(undefined, image.flop());
};
},
};<|fim▁end|> | return callback(undefined, image.flip()); |
<|file_name|>segmentize_tests.cpp<|end_file_name|><|fim▁begin|>#define BOOST_TEST_MODULE segmentize tests
#include <boost/test/unit_test.hpp>
#include <ostream>
#include "segmentize.hpp"
#include "bg_operators.hpp"
using namespace std;
BOOST_AUTO_TEST_SUITE(segmentize_tests)
void print_result(const vector<std::pair<linestring_type_fp, bool>>& result) {
cout << result;
}
BOOST_AUTO_TEST_CASE(abuts) {
vector<pair<linestring_type_fp, bool>> ms = {
{{{0,0}, {2,2}}, true},
{{{1,1}, {2,0}}, true},
};
const auto& result = segmentize::segmentize_paths(ms);
BOOST_CHECK_EQUAL(result.size(), 3UL);
}
BOOST_AUTO_TEST_CASE(x_shape) {
vector<pair<linestring_type_fp, bool>> ms = {
{{{0,10000}, {10000,9000}}, true},
{{{10000,10000}, {0,0}}, true},
};<|fim▁hole|> BOOST_CHECK_EQUAL(result.size(), 4UL);
}
BOOST_AUTO_TEST_CASE(plus_shape) {
vector<pair<linestring_type_fp, bool>> ms = {
{{{1,2}, {3,2}}, true},
{{{2,1}, {2,3}}, true},
};
const auto& result = segmentize::segmentize_paths(ms);
BOOST_CHECK_EQUAL(result.size(), 4UL);
}
BOOST_AUTO_TEST_CASE(touching_no_overlap) {
vector<pair<linestring_type_fp, bool>> ms = {
{{{1,20}, {40,50}}, true},
{{{40,50}, {80,90}}, true},
};
const auto& result = segmentize::segmentize_paths(ms);
BOOST_CHECK_EQUAL(result.size(), 2UL);
}
BOOST_AUTO_TEST_CASE(parallel_with_overlap) {
vector<pair<linestring_type_fp, bool>> ms = {
{{{10,10}, {0,0}}, false},
{{{9,9}, {20,20}}, true},
{{{30,30}, {15,15}}, true},
};
const auto& result = segmentize::segmentize_paths(ms);
BOOST_CHECK_EQUAL(result.size(), 7UL);
//print_result(result);
}
BOOST_AUTO_TEST_CASE(parallel_with_overlap_directed) {
vector<pair<linestring_type_fp, bool>> ms = {
{{{10,10}, {0,0}}, true},
{{{9,9}, {20,20}}, false},
{{{30,30}, {15,15}}, false},
};
const auto& result = segmentize::segmentize_paths(ms);
BOOST_CHECK_EQUAL(result.size(), 7UL);
//print_result(result);
}
BOOST_AUTO_TEST_CASE(sort_segments) {
vector<pair<linestring_type_fp, bool>> ms = {
{{{10,10}, {13,-4}}, true},
{{{13,-4}, {10,10}}, true},
{{{13,-4}, {10,10}}, true},
{{{10, 10}, {13, -4}}, true},
{{{10, 10}, {13, -4}}, true},
};
const auto& result = segmentize::segmentize_paths(ms);
BOOST_CHECK_EQUAL(result.size(), 5UL);
//print_result(result);
}
BOOST_AUTO_TEST_SUITE_END()<|fim▁end|> | const auto& result = segmentize::segmentize_paths(ms); |
<|file_name|>ui.rs<|end_file_name|><|fim▁begin|>/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this<|fim▁hole|>
use values::{Auto, Either};
use values::computed::Number;
use values::computed::color::Color;
use values::computed::url::ComputedImageUrl;
use values::generics::ui as generics;
pub use values::specified::ui::MozForceBrokenImageIcon;
/// auto | <color>
pub type ColorOrAuto = Either<Color, Auto>;
/// A computed value for the `cursor` property.
pub type Cursor = generics::Cursor<CursorImage>;
/// A computed value for item of `image cursors`.
pub type CursorImage = generics::CursorImage<ComputedImageUrl, Number>;<|fim▁end|> | * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Computed values for UI properties |
<|file_name|>imports.py<|end_file_name|><|fim▁begin|>from __future__ import absolute_import
import pkgutil
import six
MODEL_MOVES = {
"sentry.models.tagkey.TagKey": "sentry.tagstore.legacy.models.tagkey.TagKey",<|fim▁hole|> "sentry.models.grouptagvalue.GroupTagValue": "sentry.tagstore.legacy.models.grouptagvalue.GroupTagValue",
"sentry.models.eventtag.EventTag": "sentry.tagstore.legacy.models.eventtag.EventTag",
}
class ModuleProxyCache(dict):
def __missing__(self, key):
if "." not in key:
return __import__(key)
module_name, class_name = key.rsplit(".", 1)
module = __import__(module_name, {}, {}, [class_name])
handler = getattr(module, class_name)
# We cache a NoneType for missing imports to avoid repeated lookups
self[key] = handler
return handler
_cache = ModuleProxyCache()
def import_string(path):
"""
Path must be module.path.ClassName
>>> cls = import_string('sentry.models.Group')
"""
path = MODEL_MOVES.get(path, path)
result = _cache[path]
return result
def import_submodules(context, root_module, path):
"""
Import all submodules and register them in the ``context`` namespace.
>>> import_submodules(locals(), __name__, __path__)
"""
for loader, module_name, is_pkg in pkgutil.walk_packages(path, root_module + "."):
# this causes a Runtime error with model conflicts
# module = loader.find_module(module_name).load_module(module_name)
module = __import__(module_name, globals(), locals(), ["__name__"])
for k, v in six.iteritems(vars(module)):
if not k.startswith("_"):
context[k] = v
context[module_name] = module<|fim▁end|> | "sentry.models.tagvalue.tagvalue": "sentry.tagstore.legacy.models.tagvalue.TagValue",
"sentry.models.grouptagkey.GroupTagKey": "sentry.tagstore.legacy.models.grouptagkey.GroupTagKey", |
<|file_name|>angular-resource.js<|end_file_name|><|fim▁begin|>version https://git-lfs.github.com/spec/v1
oid sha256:9e6f79a284bf0e25e4a049856c97549792145e4af30916b5044b69d4779caae2<|fim▁hole|><|fim▁end|> | size 23494 |
<|file_name|>migration.py<|end_file_name|><|fim▁begin|># -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright (C) 2020 Canonical Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License<|fim▁hole|>import abc
import logging
from datetime import datetime
from typing import Any, ClassVar, Dict
import tinydb
logger = logging.getLogger(__name__)
class Migration(metaclass=abc.ABCMeta):
"""Migrates schema from <SCHEMA_VERSION-1> to <SCHEMA_VERSION>."""
SCHEMA_VERSION: ClassVar[int] = 0
def __init__(self, *, db: tinydb.TinyDB, snapcraft_version: str) -> None:
self.db = db
self._snapcraft_version = snapcraft_version
def _query_control_record(self) -> Dict[str, Any]:
"""Query control record (single document in 'control' table)."""
control_table = self.db.table("control")
control_records = control_table.all()
if len(control_records) == 0:
return dict(schema_version=0)
elif len(control_records) == 1:
return control_records[0]
raise RuntimeError(f"Invalid control records: {control_records!r}")
def _update_control_schema_version(self) -> None:
"""Update 'control' table record to SCHEMA_VERSION."""
control_record = self._query_control_record()
control_record["schema_version"] = self.SCHEMA_VERSION
control_table = self.db.table("control")
control_table.truncate()
control_table.insert(control_record)
def _record_migration(self) -> None:
"""Record migration in 'migration' table."""
migration_table = self.db.table("migration")
migration_table.insert(
{
"schema_version": self.SCHEMA_VERSION,
"snapcraft_version": self._snapcraft_version,
"timestamp": datetime.utcnow().isoformat() + "Z",
}
)
@abc.abstractmethod
def _migrate(self) -> None:
"""Per-migration implementation."""
...
def apply(self) -> int:
"""Apply migration, if determined to be necessary.
Returns current schema version."""
control_record = self._query_control_record()
current_schema_version = control_record["schema_version"]
if self.SCHEMA_VERSION <= current_schema_version:
logger.debug(
f"Migration apply: migration {self.SCHEMA_VERSION} already applied, ignoring..."
)
return current_schema_version
logger.debug(
f"Migration apply: applying migration for {self.SCHEMA_VERSION} for {control_record}"
)
self._migrate()
self._record_migration()
self._update_control_schema_version()
return self.SCHEMA_VERSION
class MigrationV1(Migration):
"""Default (Initial) Migration to v1."""
SCHEMA_VERSION: ClassVar[int] = 1
def _migrate_control(self) -> None:
control_table = self.db.table("control")
control_table.insert(
{
"created_with_snapcraft_version": self._snapcraft_version,
"schema_version": self.SCHEMA_VERSION,
}
)
def _migrate(self) -> None:
"""Per-migration implementation."""
self._migrate_control()<|fim▁end|> | # along with this program. If not, see <http://www.gnu.org/licenses/>.
|
<|file_name|>tests.py<|end_file_name|><|fim▁begin|><|fim▁hole|><|fim▁end|> | #This is where the tests go. |
<|file_name|>wmcounter.js<|end_file_name|><|fim▁begin|>// These values are updated every 5 minutes
// using site_stats table from all wikis replicated in
// WMF Labs databases.
// Families updated include ["wikibooks", "wikipedia", "wiktionary", "wikimedia", "wikiquote", "wikisource", "wikinews", "wikiversity", "commons", "wikispecies", "wikidata", "wikivoyage"]
// More questions? emijrp AT gmail DOT com
var timenow = new Date().getTime();
var period = 20; // period update in miliseconds
var spliter = ",";
var spliter_r = new RegExp(/(^|\s)(\d+)(\d{3})/);
function init() {
adjustSizes();
var lang = "";
var header = "";
var donate = "";
var f11 = "";
var author = "";
if (navigator.systemLanguage) {
lang = navigator.systemLanguage;
}else if (navigator.userLanguage) {
lang = navigator.userLanguage;
}else if(navigator.language) {
lang = navigator.language;
}else {
lang = "en";
}
if (lang.length>2) { lang=lang.substring(0,2); }
switch(lang){
case "example":
header='<a href="http://www.wikimedia.org"></a>:';
spliter=' ';
donate='<a href="http://wikimediafoundation.org/wiki/Support_Wikipedia"></a>';
f11='';
author='<a href="https://en.wikipedia.org/wiki/User:Emijrp">emijrp</a> (<a href="http://www.7is7.com/software/firefox/partycounter.html">7is7</a>)';
break;
case "af":
header='Totale wysigings in alle <a href="http://www.wikimedia.org">Wikimedia-projekte</a>:';
spliter=' ';
donate="<a href='http://wikimediafoundation.org/wiki/Skenk'>Skenk 'n donasie aan die Wikimedia-stigting</a>"; //be careful with 'n
f11='Druk op F11 vir volskerm';
author='Ontwikkel deur <a href="https://en.wikipedia.org/wiki/User:Emijrp">emijrp</a> (inspirasie deur <a href="http://www.7is7.com/software/firefox/partycounter.html">7is7</a>)';
break;
case "als":
header='Gsamtaazahl Bearbeitige uff de <a href="http://www.wikimedia.org">Wikimedia-Brojäkt:</a>';
spliter=' ';
donate="<a href='http://wikimediafoundation.org/wiki/Finanzielli_Hilf'>Understütz d'Wikimedia Foundation</a>"; //be careful with d'
f11='Vollbild: F11';
author='Gschribe vum <a href="https://en.wikipedia.org/wiki/User:Emijrp">emijrp</a> (uff Basis vu <a href="http://www.7is7.com/software/firefox/partycounter.html">7is7</a>)';
break;
case "an":
header='Edicions totals en <a href="http://www.wikimedia.org">prochectos Wikimedia</a>:';
spliter='.';
donate="<a href='http://wikimediafoundation.org/wiki/Support_Wikipedia'>Fer una donación t'a Fundación Wikimedia</a>"; //be careful with t'
f11='Pretar F11 ta veyer en pantalla completa';
author='Desembolicau por <a href="https://en.wikipedia.org/wiki/User:Emijrp">emijrp</a> (Inspirau por <a href="http://www.7is7.com/software/firefox/partycounter.html">7is7</a>)';
break;
case "ar":
header='مجموع التعديلات في <a href="http://www.wikimedia.org">مشاريع ويكيميديا</a>:';
spliter=',';
donate='<a href="http://wikimediafoundation.org/wiki/جمع_تبرعات">تبرع لمؤسسة ويكيميديا</a>';
f11='للشاشة الكاملة اضغط F11';<|fim▁hole|> author='من تطوير <a href="https://en.wikipedia.org/wiki/User:Emijrp">emijrp</a> (ملهمة من <a href="http://www.7is7.com/software/firefox/partycounter.html">7is7</a>)';
break;
case "az":
header='<a href="http://www.wikimedia.org">Wikimedia layihəsində </a> redaktələrin ümumi sayı:';
spliter=',';
donate='<a href="http://wikimediafoundation.org/wiki/Bağışlar">Wikimedia Foundation təşkilatına ianələrin göndərilməsi</a>';
f11='Ekranın tam açılması üçün F11 düyməsini basın';
author='<a href="https://en.wikipedia.org/wiki/User:Emijrp">emijrp</a> tərəfindən (<a href="http://www.7is7.com/software/firefox/partycounter.html">7is7</a> dəstəyi ilə) işlənmişdir';
break;
case "be":
header='Агулам правак у <a href="http://www.wikimedia.org">праектах Фундацыі «Вікімэдыя»</a>:';
spliter=' ';
donate='<a href="http://wikimediafoundation.org/wiki/Support_Wikipedia">Ахвяруйце Фундацыі Вікімэдыя</a>';
f11='Націсьніце F11 для поўнаэкраннага прагляду';
author='Распрацаваў <a href="https://en.wikipedia.org/wiki/User:Emijrp">emijrp</a> (ідэя <a href="http://www.7is7.com/software/firefox/partycounter.html">7is7</a>)';
break;
case "bg":
header='Общ брой редакции в <a href="http://www.wikimedia.org">проектите на Уикимедия</a>:';
spliter=' ';
donate='<a href="http://wikimediafoundation.org/wiki/Support_Wikipedia">Подкрепете с дарение Фондация Уикимедия</a>';
f11='Натиснете F11 за показване на голям екран';
author='Разработено от <a href="https://en.wikipedia.org/wiki/User:Emijrp">emijrp</a> (вдъхновено от <a href="http://www.7is7.com/software/firefox/partycounter.html">7is7</a>)';
break;
case "bn":
header='<a href="http://www.wikimedia.org">উইকিমিডিয়ার বিভিন্ন প্রকল্পে</a> সর্বমোট সম্পাদনার সংখ্যা:';
spliter=',';
donate='<a href="http://wikimediafoundation.org/wiki/Support_Wikipedia">উইকিমিডিয়া ফাউন্ডেশনে দান করুন</a>';
f11='সম্পূর্ন স্ক্রিন জুড়ে দেখতে হলে F11 চাপুন';
author='এই কাউন্টারটি তৈরী করেছেন <a href="https://en.wikipedia.org/wiki/User:Emijrp">emijrp</a> (<a href="http://www.7is7.com/software/firefox/partycounter.html">7is7</a> এর অনুপ্রেরণায়)';
break;
case "br":
header='Niver hollek a gemmoù er <a href="http://www.wikimedia.org">raktresoù Wikimedia</a> :';
spliter=' ';
donate='<a href="http://wikimediafoundation.org/wiki/Support_Wikipedia">Donezoniñ da Ziazezadur Wikimedia</a>';
f11='Pouezit war F11 evit ar mod skramm leun';
author='Diorroet gant <a href="https://en.wikipedia.org/wiki/User:Emijrp">emijrp</a> (Awenet gant <a href="http://www.7is7.com/software/firefox/partycounter.html">7is7</a>)';
break;
case "bs":
header='Ukupne izmjene u svim <a href="http://www.wikimedia.org">Wikimedia projektima</a>:';
spliter='.';
donate='<a href="http://wikimediafoundation.org/wiki/Support_Wikipedia">Donirajte Wikimedia Fondaciji</a>';
f11='Pritisnite F11 za prikaz preko cijelog ekrana';
author='Razvio korisnik <a href="https://en.wikipedia.org/wiki/User:Emijrp">emijrp</a> (Inspiriran od strane <a href="http://www.7is7.com/software/firefox/partycounter.html">7is7</a>)';
break;
case "ca":
header='Edicions entre tots els <a href="http://www.wikimedia.org">projectes de Wikimedia</a>:';
spliter='.';
donate='<a href="http://wikimediafoundation.org/wiki/Donatius">Dona a la Fundació Wikimedia</a>';
f11='Pantalla completa pulsant F11';
author='Desarrollat per <a href="https://en.wikipedia.org/wiki/User:Emijrp">emijrp</a> (Inspirat en <a href="http://www.7is7.com/software/firefox/partycounter.html">7is7</a>)';
break;
case "ceb":
header='Mga tibuok kausaban sa <a href="http://www.wikimedia.org">mga proyekto sa Wikimedya</a>:';
spliter=',';
donate='<a href="http://wikimediafoundation.org/wiki/Support_Wikipedia">Idonar sa Wikimedia Foundation</a>';
f11='Tuploka ang F11 aron mapuno sa tabil';
author='Gipalambo ni <a href="https://en.wikipedia.org/wiki/User:Emijrp">emijrp</a> (Nadasig sa <a href="http://www.7is7.com/software/firefox/partycounter.html">7is7</a>)';
break;
case "cs":
header='Celkový počet editací v <a href="http://www.wikimedia.org">projektech nadace Wikimedia</a>:';
spliter=' ';
donate='<a href="http://wikimediafoundation.org/wiki/Sponzorství">Podpořte Wikimedia Foundation</a>';
f11='Stisknutím klávesy F11 zobrazíte stránku na celou obrazovku';
author='Vyvinul <a href="https://en.wikipedia.org/wiki/User:Emijrp">emijrp</a> (inspirováno stránkami <a href="http://www.7is7.com/software/firefox/partycounter.html">7is7</a>)';
break;
case "cy":
header='Cyfanswm yr holl olygiadau ym <a href="http://www.wikimedia.org">mhrosiectau Wikimedia</a>:';
spliter=' ';
donate='<a href="http://wikimediafoundation.org/wiki/Support_Wikipedia">Cyfrannwch at Sefydliad Wikimedia</a>';
f11='Gwasgwch F11 am sgrîn lawn';
author='Datblygwyd gan <a href="https://en.wikipedia.org/wiki/User:Emijrp">emijrp</a> (Ysbrydolwyd gan <a href="http://www.7is7.com/software/firefox/partycounter.html">7is7</a>)';
break;
case "da":
header='Samlet antal rettelser på tværs af alle <a href="http://www.wikimedia.org">Wikimedia-projekter</a>:';
spliter='.';
donate='<a href="http://wikimediafoundation.org/wiki/Indsamling">Giv et bidrag til Wikimedia Foundation</a>';
f11='Tryk F11 for fuldskærmsvisning';
author='Udviklet af <a href="https://en.wikipedia.org/wiki/User:Emijrp">emijrp</a> (Inspireret af <a href="http://www.7is7.com/software/firefox/partycounter.html">7is7</a>)';
break;
case "de":
header='Gesamtzahl der Bearbeitungen in <a href="http://www.wikimedia.org">den Wikimedia-Projekten</a>:';
spliter=' ';
donate='<a href="http://wikimediafoundation.org/wiki/Spenden">Spende an die Wikimedia Foundation</a>';
f11='Drücke F11 für die Vollbild-Anzeige';
author='Entwickelt von <a href="https://en.wikipedia.org/wiki/User:Emijrp">emijrp</a> (Inspiriert durch <a href="http://www.7is7.com/software/firefox/partycounter.html">7is7</a>)';
break;
case "el":
header='Συνολικές επεξεργασίες στα <a href="http://www.wikimedia.org">εγχειρήματα του Wikimedia</a>:';
spliter='.';
donate='<a href="http://wikimediafoundation.org/wiki/Support_Wikipedia">Κάντε δωρεά στο Ίδρυμα Wikimedia</a>';
f11='Πατήστε F11 για πλήρη οθόνη';
author='Αναπτύχθηκε από τον <a href="https://en.wikipedia.org/wiki/User:Emijrp">emijrp</a> (Εμπνευσμένο από το <a href="http://www.7is7.com/software/firefox/partycounter.html">7is7</a>)';
break;
case "eo":
header='Totala nombro de redaktoj en <a href="http://www.wikimedia.org">Vikimediaj projektoj</a>:';
spliter='.';
donate='<a href="http://wikimediafoundation.org/wiki/Monkolektado">Donaci al Fondaĵo Vikimedio</a>';
f11='Premu F11 por plenekrana modo';
author='Kreita de <a href="https://en.wikipedia.org/wiki/User:Emijrp">emijrp</a> (Inspirita de <a href="http://www.7is7.com/software/firefox/partycounter.html">7is7</a>)';
break;
case "es":
header='Ediciones entre todos los <a href="http://www.wikimedia.org">proyectos Wikimedia</a>:';
spliter='.';
donate='<a href="http://wikimediafoundation.org/wiki/Donaciones">Dona a la Fundación Wikimedia</a>';
f11='Pantalla completa pulsando F11';
author='Desarrollado por <a href="https://en.wikipedia.org/wiki/User:Emijrp">emijrp</a> (Inspirado en <a href="http://www.7is7.com/software/firefox/partycounter.html">7is7</a>)';
break;
case "et":
header='<a href="http://www.wikimedia.org">Wikimedia projektides</a> tehtud redigeerimiste koguarv:';
spliter=' ';
donate='<a href="http://wikimediafoundation.org/wiki/Support_Wikipedia">Anneta Wikimedia sihtasutusele</a>';
f11='Täisekraani jaoks vajuta F11';
author='Kasutajalt <a href="https://en.wikipedia.org/wiki/User:Emijrp">emijrp</a> (<a href="http://www.7is7.com/software/firefox/partycounter.html">7is7</a> eeskujul)';
break;
case "eu":
header='<a href="http://www.wikimedia.org">Wikimedia proiektuetan</a> egindako eguneraketak guztira:';
spliter='.';
donate='<a href="http://wikimediafoundation.org/wiki/Dohaintzak">Wikimedia Foundazioari dohaintza egin</a>';
f11='F11 sakatu pantaila osoan erakusteko';
author='<a href="https://en.wikipedia.org/wiki/User:Emijrp">emijrp</a>-ek garatua (<a href="http://www.7is7.com/software/firefox/partycounter.html">7is7</a>-ek inspiratuta)';
break;
case "fa":
header='مجموع ویرایشها در <a href="http://www.wikimedia.org">پروژه ویکیمدیا</a>:';
spliter=',';
donate='<a href="http://wikimediafoundation.org/wiki/Support_Wikipedia">کمک مالی به بنیاد ویکیمدیا</a>';
f11='را برای نمایش تمام صفحه فشار دهید F11کلید';
author='گسترشیافته بوسیله <a href="https://en.wikipedia.org/wiki/User:Emijrp">emijrp</a> (با الهام از <a href="http://www.7is7.com/software/firefox/partycounter.html">7is7</a>)';
break;
case "fr":
header="Nombre total d'éditions dans les <a href='http://www.wikimedia.org'>projets Wikimedia</a>:"; // be careful with d'éditions
spliter=' ';
donate='<a href="http://wikimediafoundation.org/wiki/Faire_un_don">Donner à la Wikimedia Foundation</a>';
f11='Appuyez sur F11 pour passer en plein écran';
author='Développé par <a href="https://en.wikipedia.org/wiki/User:Emijrp">emijrp</a> (Inspiré par <a href="http://www.7is7.com/software/firefox/partycounter.html">7is7</a>)';
break;
case "gu":
header='<a href="http://www.wikimedia.org">વિકિમીડિયા પરિયોજના </a> માં કુલ સંપાદનો';
spliter=' ';
donate='<a href="http://wikimediafoundation.org/wiki/Support_Wikipedia">વિકિમીડિયા ફાઉન્ડેશનને દાન આપો</a>';
f11='ફુલ સ્ક્રીન માટે F11 દબાવો';
author='<a href="https://en.wikipedia.org/wiki/User:Emijrp">emijrp ધ્વારા વિકસિત </a> (<a href="http://www.7is7.com/software/firefox/partycounter.html">7is7 ધ્વરા પ્રેરિત </a>)';
break;
case "hi":
header='<a href="http://www.wikimedia.org">विकिमीडिया परियोजना</a> में कुल संपादन:';
spliter=' ';
donate='<a href="http://wikimediafoundation.org/wiki/Donate/hi">विकिमीडिया फ़ौंडेशन को दान करें। </a>';
f11='पूर्ण स्क्रीन के लिए ऍफ़११ [F11] दबाएँ।';
author='<a href="https://en.wikipedia.org/wiki/User:Emijrp">एमिजआरपी [emijrp]</a> द्वारा विकसित (<a href="http://www.7is7.com/software/firefox/partycounter.html">७इस७ [7is7]</a> द्वारा प्रेरित।)';
break;
case "hu":
header='<a href="http://www.wikimedia.org">A Wikimédia projektek</a> együttes szerkesztésszáma:';
spliter=' ';
donate='<a href="http://wikimediafoundation.org/wiki/Support_Wikipedia/hu">Támogasd a Wikimédia Alapítványt</a>';
f11='Teljes képernyős mód: F11';
author='Készítette: <a href="https://en.wikipedia.org/wiki/User:Emijrp">emijrp</a> (<a href="http://www.7is7.com/software/firefox/partycounter.html">7is7</a> ötlete alapján)';
break;
case "id":
header='Jumlah suntingan di <a href="http://www.wikimedia.org">proyek Wikimedia</a>:';
spliter='.';
donate='<a href="http://wikimediafoundation.org/wiki/Penggalangan_dana">Menyumbang untuk Yayasan Wikimedia</a>';
f11='Tekan F11 untuk tampilan layar penuh';
author='Dikembangkan oleh <a href="https://en.wikipedia.org/wiki/User:Emijrp">emijrp</a> (Terinspirasi dari <a href="http://www.7is7.com/software/firefox/partycounter.html">7is7</a>)';
break;
case "it":
header='Modifiche totali nei <a href="http://www.wikimedia.org">progetti Wikimedia</a>:';
spliter='.';
donate='<a href="http://wikimediafoundation.org/wiki/Donazioni">Fai una donazione a Wikimedia Foundation</a>';
f11='Premi F11 per passare a schermo intero';
author='Sviluppato da <a href="https://en.wikipedia.org/wiki/User:Emijrp">emijrp</a> (ispirato da <a href="http://www.7is7.com/software/firefox/partycounter.html">7is7</a>)';
break;
case "ja":
header='<a href="http://www.wikimedia.org">ウィキメディア・プロジェクト</a>の総編集回数';
spliter=',';
donate='<a href="http://wikimediafoundation.org/wiki/Support_Wikipedia">ウィキメディア財団に寄付</a>';
f11='F11キーでフルスクリーン表示';
author='開発:<a href="https://en.wikipedia.org/wiki/User:Emijrp">emijrp</a> (原案:<a href="http://www.7is7.com/software/firefox/partycounter.html">7is7</a>)';
break;
case "kl":
header='Tamakkiisumik amerlassutsit aaqqissuussinerni <a href="http://www.wikimedia.org">Wikimedia suliniutaani</a>:';
spliter='.';
donate='<a href="http://wikimediafoundation.org/wiki/Support_Wikipedia">Wikimedia suliniutaani tunissuteqarit</a>';
f11='F11 tooruk tamaat saqqummissagukku';
author='Siuarsaasuuvoq <a href="https://en.wikipedia.org/wiki/User:Emijrp">emijrp</a> (Peqatigalugu <a href="http://www.7is7.com/software/firefox/partycounter.html">7is7</a>)';
break;
case "ko":
header='<a href="http://www.wikimedia.org">위키미디어 재단에서 운영하는 프로젝트</a>의 총 편집 횟수:';
spliter=',';
donate='<a href="http://wikimediafoundation.org/wiki/Support_Wikipedia">위키미디어 재단에 기부하기</a>';
f11='F11 키를 누르면 전체 화면 모드로 전환합니다';
author='<a href="https://en.wikipedia.org/wiki/User:Emijrp">emijrp</a>이 만듬 (<a href="http://www.7is7.com/software/firefox/partycounter.html">7is7</a>에서 영감을 얻음)';
break;
case "nl":
header='Totaal aantal bewerkingen in <a href="http://www.wikimedia.org">Wikimediaprojecten</a>:';
//spliter=' ';
//donate='<a href="http://wikimediafoundation.org/wiki/Support_Wikipedia"></a>';
//f11='';
//author='<a href="https://en.wikipedia.org/wiki/User:Emijrp">emijrp</a> (<a href="http://www.7is7.com/software/firefox/partycounter.html">7is7</a>)';
break;
case "oc":
header='Edicions totalas dins <a href="http://www.wikimedia.org">los projèctes de Wikimedia</a>:';
spliter='.';
donate='<a href="http://wikimediafoundation.org/wiki/Support_Wikipedia">Far una donacion a la fondacion Wikimedia</a>';
f11="Quichar sus F11 per un afichatge sus tot l'ecran";
author='Desvolopat per <a href="https://en.wikipedia.org/wiki/User:Emijrp">emijrp</a> (Inspirat de <a href="http://www.7is7.com/software/firefox/partycounter.html">7is7</a>)';
break;
case "pl":
header='Ogólna liczba edycji w <a href="http://www.wikimedia.org">projektach Wikimedia</a>:';
spliter=' ';
donate='<a href="http://wikimediafoundation.org/wiki/Dary_pieniężne">Wesprzyj Wikimedia Foundation</a>';
f11='Naciśnij F11, aby włączyć tryb pełnoekranowy';
author='Stworzony przez <a href="https://en.wikipedia.org/wiki/User:Emijrp">emijrp</a> (zainspirowany przez <a href="http://www.7is7.com/software/firefox/partycounter.html">7is7</a>)';
break;
case "pt":
header='Total de edições nos <a href="http://www.wikimedia.org">projetos Wikimedia</a>:';
spliter='.';
donate='<a href="http://wikimediafoundation.org/wiki/Coleta_de_fundos">Doe para a Fundação Wikimedia</a>';
f11='Pressione F11 para tela cheia';
author='Desenvolvido por <a href="https://en.wikipedia.org/wiki/User:Emijrp">emijrp</a> (Inspirado em <a href="http://www.7is7.com/software/firefox/partycounter.html">7is7</a>)';
break;
case "ro":
header='Numărul total de modificări în <a href="http://www.wikimedia.org">proiectele Wikimedia</a>:';
spliter='.';
donate='<a href="http://wikimediafoundation.org/wiki/Donaţii">Donaţi pentru Wikimedia</a>';
f11='Apăsați F11 pentru afișarea pe tot ecranul';
author='Dezvoltat de <a href="https://en.wikipedia.org/wiki/User:Emijrp">emijrp</a> (inspirat de la <a href="http://www.7is7.com/software/firefox/partycounter.html">7is7</a>)';
break;
case "ru":
header='Всего правок в <a href="http://www.wikimedia.org">проектах Викимедиа</a>:';
spliter=' ';
donate='<a href="http://wikimediafoundation.org/wiki/Support_Wikipedia/ru">Пожертвуйте «Фонду Викимедиа»</a>';
f11='Нажмите F11 для показа на весь экран';
author='Разработал <a href="https://en.wikipedia.org/wiki/User:Emijrp">emijrp</a> (Основано на <a href="http://www.7is7.com/software/firefox/partycounter.html">7is7</a>)';
break;
case "sv":
header='Antal redigeringar i <a href="http://www.wikimedia.org">Wikimediaprojekten</a>:';
spliter=' ';
donate='<a href="http://wikimediafoundation.org/wiki/Insamling">Donera till Wikimedia Foundation</a>';
f11='Tryck F11 för helskärm';
author='Utvecklad av <a href="https://en.wikipedia.org/wiki/User:Emijrp">emijrp</a> (Inspirerad av <a href="http://www.7is7.com/software/firefox/partycounter.html">7is7</a>)';
break;
case "te":
header='<a href="http://www.wikimedia.org">వికీమీడియా ప్రాజెక్టుల</a>లో మొత్తం దిద్దుబాట్లు:';
spliter=',';
donate='<a href="http://wikimediafoundation.org/wiki/Support_Wikipedia">వికీమీడియా ఫౌండేషనుకి విరాళమివ్వండి</a>';
f11='నిండుతెర కొరకు F11 నొక్కండి';
author='రూపొందించినది <a href="https://en.wikipedia.org/wiki/User:Emijrp">emijrp</a> (<a href="http://www.7is7.com/software/firefox/partycounter.html">7is7</a> ప్రేరణతో)';
break;
case "tr":
header='<a href="http://www.wikimedia.org">Wikimedia projelerindeki</a> toplam düzenleme sayısı:';
spliter='.';
donate='<a href="http://wikimediafoundation.org/wiki/Support_Wikipedia">Wikimedia Vakfına bağışta bulunun</a>';
f11='Tam ekran görüntülemek için F11 tuşuna basın';
author='<a href="https://en.wikipedia.org/wiki/User:Emijrp">emijrp</a> tarafından geliştirilmiştir (Esin kaynağı <a href="http://www.7is7.com/software/firefox/partycounter.html">7is7</a>)';
break;
case "ur":
header=' جملہ ترامیم در <a href="http://www.wikimedia.org">ویکیمیڈیا منصوبہ جات</a>:';
spliter='.';
donate='<a href="http://wikimediafoundation.org/wiki/Support_Wikipedia">مؤسسہ ویکیمیڈیا کو عطیہ دیں</a>';
f11='مکمل سکرین دیکھنے کے لیے کلک رکیں F11';
author='ترقی دہندہ <a href="https://en.wikipedia.org/wiki/User:Emijrp">emijrp</a> (متاثر از <a href="http://www.7is7.com/software/firefox/partycounter.html">7is7</a>)';
break;
default:
header='Total edits in <a href="http://www.wikimedia.org">Wikimedia projects</a>:';
spliter=',';
donate='<a href="http://wikimediafoundation.org/wiki/Support_Wikipedia">Donate to Wikimedia Foundation</a>';
f11='Press F11 for fullscreen';
author='Developed by <a href="https://en.wikipedia.org/wiki/User:Emijrp">emijrp</a> (Inspired by <a href="http://www.7is7.com/software/firefox/partycounter.html">7is7</a>)';
}
document.getElementById('header').innerHTML = header;
document.getElementById('donate').innerHTML = donate;
document.getElementById('f11').innerHTML = f11;
document.getElementById('author').innerHTML = author;
window.setTimeout(update, period);
}
function update() {
timenow2 = new Date().getTime();
if (Math.round(((timenow2-timenow)/1000)+1) % 300 == 0) { window.setTimeout(window.location.reload(), 1100); } //refresh page
editnow = editinit + (timenow2-timeinit) * editrate;
editnowtext = ""+Math.round(editnow);
for(var i=3; i<editnowtext.length; i+=3) {
editnowtext = editnowtext.replace(spliter_r,'$2'+spliter+'$3');
}
document.getElementById('counter').innerHTML = editnowtext;
window.setTimeout(update, period);
}
function adjustSizes(){
var width=800;
var height=600;
if (self.innerWidth) {
width=self.innerWidth;
height=self.innerHeight;
} else if (document.documentElement && document.documentElement.clientWidth) {
width=document.documentElement.clientWidth;
height=document.documentElement.clientHeight;
} else if (document.body) {
width=document.body.clientWidth;
height=document.body.clientHeight;
}
document.getElementById('wrapper').style.height=(height-10)+'px';
document.getElementById('header').style.fontSize=width/45+'pt';
document.getElementById('footer').style.fontSize=width/45+'pt';
document.getElementById('counter').style.fontSize=width/12+'pt';
}
window.onload = init;
window.onresize = adjustSizes;<|fim▁end|> | |
<|file_name|>Event.py<|end_file_name|><|fim▁begin|>from django.db import models
from django.conf import settings
from django.utils import timezone
from django.core.validators import MinValueValidator
from django.urls import reverse
from django.core.exceptions import ValidationError
from django.utils.translation import ugettext as _
from .Committee import Committee
from .Registration import Registration
from lib.CommaSeparatedStringsField import CommaSeparatedStringsField
class Event(models.Model):
name = models.CharField(max_length=25)
description = models.TextField(max_length=255)
long_description = models.TextField(blank=True)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
published_at = models.DateTimeField(default=timezone.now, blank=True)
deadline_at = models.DateTimeField()
start_at = models.DateTimeField()
end_at = models.DateTimeField()
note_field = models.CharField(max_length=100, default='', blank=True)
note_field_options = CommaSeparatedStringsField(max_length=255, default='', blank=True)
note_field_required = models.BooleanField()
note_field_public = models.BooleanField()
location = models.CharField(max_length=25)
price = models.DecimalField(max_digits=5, decimal_places=2, default=0)
calendar_url = models.CharField(max_length=255, blank=True)
committee = models.ForeignKey(Committee, on_delete=models.PROTECT)
participants = models.ManyToManyField(settings.AUTH_USER_MODEL, through=Registration)
places = models.PositiveIntegerField(default=None, null=True, blank=True, validators=[MinValueValidator(1)])
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse('event-detail', args=[self.pk])
def is_published(self):
"""
Return true if the event is published (past published date and not past end date)<|fim▁hole|> """
return self.published_at < timezone.now() < self.end_at
def is_expired(self):
"""
Return true if deadline is expired.
"""
return self.deadline_at is not None and self.deadline_at < timezone.now()
def is_full(self):
"""
Return true if there are no free places left.
"""
return self.get_free_places() is not None and self.get_free_places() <= 0
def get_free_places(self):
"""
Return the number of free places left.
"""
if self.places is None:
# If the event doesn't have a places limit, the value of this function is not defined
return None
else:
return self.places - Registration.objects.filter(event=self, withdrawn_at__isnull=True).count()
def get_active_registrations_count(self):
"""
Return the number of non-withdrawn registrations
"""
return self.registration_set.filter(withdrawn_at__isnull=True).count()
def is_almost_expired(self):
"""
Return true if the deadline is closer than a day.
"""
return self.deadline_at - timezone.now() < timezone.timedelta(days=1) and not self.is_expired()
def get_note_field_options(self):
"""
Return list of tuples from list of options
"""
return [('', self.note_field + ':')] + [(x, x) for x in self.note_field_options]
def clean(self):
if self.start_at > self.end_at:
raise ValidationError(_("Begindatum is later dan de einddatum!"))
if self.start_at < timezone.now():
raise ValidationError({'start_at': _("Startdatum is in het verleden!")})
if self.end_at < timezone.now():
raise ValidationError({'end_at': _("Einddatum is in het verleden!")})
if self.note_field_options and len(self.note_field_options) < 2:
raise ValidationError({'note_field_options': _("Geef minstens twee opties op.")})
class Meta:
ordering = ['created_at']<|fim▁end|> | |
<|file_name|>testUtils.py<|end_file_name|><|fim▁begin|>import unittest
from pyicoteolib.utils import DualSortedReader
from pyicoteolib.core import BED
class TestUtils(unittest.TestCase):<|fim▁hole|>
def test_dual_reader(self):
reader = DualSortedReader("test_files/mini_sorted.bed", "test_files/mini_sorted2.bed", BED, False, False)
merged_file = open("test_files/mini_sorted_merged.bed")
for line in reader:
if line:
self.assertEqual(line, merged_file.next())
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TestUtils))
return suite<|fim▁end|> | |
<|file_name|>rotate.go<|end_file_name|><|fim▁begin|>package cmd
import (
"github.com/spf13/cobra"
)
// rotateCmd represents the shuffletips command
var rotateCmd = &cobra.Command{
Use: "rotate",
Short: "Rotates children of internal nodes",
Long: `Rotates children of internal nodes by different means.
Either randomly with "rand" subcommand, either sorting by number of tips
with "sort" subcommand.
It does not change the topology, but just the order of neighbors
of all node and thus the newick representation.
------C ------A
x |z x |z
A---------*ROOT => B---------*ROOT
|t |t
------B ------C
<|fim▁hole|>Example of usage:
gotree rotate rand -i t.nw
gotree rotate sort -i t.nw
`,
}
func init() {
RootCmd.AddCommand(rotateCmd)
rotateCmd.PersistentFlags().StringVarP(&intreefile, "input", "i", "stdin", "Input tree")
rotateCmd.PersistentFlags().StringVarP(&outtreefile, "output", "o", "stdout", "Rotated tree output file")
}<|fim▁end|> | |
<|file_name|>test_benchmarks.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# -*- coding: utf-8 -*-
import random
import sys
import time
import pytest
try:
import yajl
except ImportError:
yajl = None
try:
import simplejson
except ImportError:
simplejson = None
try:
import json
except ImportError:
json = None
try:
import rapidjson
except ImportError:
rapidjson = None
try:
import ujson
except ImportError:
ujson = None
default_data = {
'words': """
Lorem ipsum dolor sit amet, consectetur adipiscing
elit. Mauris adipiscing adipiscing placerat.
Vestibulum augue augue,
pellentesque quis sollicitudin id, adipiscing.
""",
'list': list(range(200)),
'dict': dict((str(i), 'a') for i in list(range(200))),
'int': 100100100,
'float': 100999.123456
}
user = {
"userId": 3381293,
"age": 213,
"username": "johndoe",
"fullname": u"John Doe the Second",
"isAuthorized": True,
"liked": 31231.31231202,
"approval": 31.1471,
"jobs": [1, 2],
"currJob": None
}
friends = [user, user, user, user, user, user, user, user]
def time_func(func, data, iterations):
start = time.time()
while iterations:
iterations -= 1
func(data)
return time.time() - start
def run_client_test(
name, serialize, deserialize, iterations=100 * 1000, data=default_data
):
squashed_data = serialize(data)
serialize_profile = time_func(serialize, data, iterations)
deserialize_profile = time_func(deserialize, squashed_data, iterations)
return serialize_profile, deserialize_profile
contenders = []
if yajl:
contenders.append(('yajl', yajl.Encoder().encode, yajl.Decoder().decode))
if simplejson:
contenders.append(('simplejson', simplejson.dumps, simplejson.loads))
if json:
contenders.append(('stdlib json', json.dumps, json.loads))
if rapidjson:
contenders.append(
('rapidjson', rapidjson.dumps, rapidjson.loads)
)
if ujson:
contenders.append(
('ujson', ujson.dumps, ujson.loads)
)
doubles = []
unicode_strings = []
strings = []
booleans = []
list_dicts = []
dict_lists = {}
medium_complex = [
[user, friends], [user, friends], [user, friends],
[user, friends], [user, friends], [user, friends]
]
for x in range(256):
doubles.append(sys.maxsize * random.random())
unicode_strings.append(
"نظام الحكم سلطاني وراثي في الذكور من ذرية السيد تركي بن سعيد بن سلطان ويشترط فيمن يختار لولاية الحكم من بينهم ان يكون مسلما رشيدا عاقلا ًوابنا شرعيا لابوين عمانيين ")
strings.append("A pretty long string which is in a list")
booleans.append(True)
for y in range(100):
arrays = []
list_dicts.append({str(random.random() * 20): int(random.random() * 1000000)})
for x in range(100):
arrays.append({str(random.random() * 20): int(random.random() * 1000000)})
dict_lists[str(random.random() * 20)] = arrays
@pytest.mark.parametrize('name,serialize,deserialize', contenders)
def test_json_serialization(name, serialize, deserialize, benchmark):
ser_data, des_data = benchmark(run_client_test, name, serialize, deserialize)
msg = "\n%-11s serialize: %0.3f deserialize: %0.3f total: %0.3f" % (
name, ser_data, des_data, ser_data + des_data
)
print(msg)
@pytest.mark.parametrize('name,serialize,deserialize', contenders)
def test_json_unicode_strings(name, serialize, deserialize, benchmark):
print("\nArray with 256 unicode strings:")
ser_data, des_data = benchmark(run_client_test,
name, serialize, deserialize,
data=unicode_strings,
iterations=5000,
)
msg = "%-11s serialize: %0.3f deserialize: %0.3f total: %0.3f" % (
name, ser_data, des_data, ser_data + des_data
)
print(msg)
@pytest.mark.parametrize('name,serialize,deserialize', contenders)
def test_json_scii_strings(name, serialize, deserialize, benchmark):
print("\nArray with 256 ascii strings:")
ser_data, des_data = benchmark(run_client_test,
name, serialize, deserialize,
data=strings,
)
msg = "%-11s serialize: %0.3f deserialize: %0.3f total: %0.3f" % (
name, ser_data, des_data, ser_data + des_data
)
print(msg)
@pytest.mark.parametrize('name,serialize,deserialize', contenders)
def test_json_booleans(name, serialize, deserialize, benchmark):
print("\nArray with 256 True's:")
ser_data, des_data = benchmark(run_client_test,
name, serialize, deserialize,
data=booleans,
)
msg = "%-11s serialize: %0.3f deserialize: %0.3f total: %0.3f" % (
name, ser_data, des_data, ser_data + des_data
)
print(msg)
@pytest.mark.parametrize('name,serialize,deserialize', contenders)
def test_json_list_of_dictionaries(name, serialize, deserialize, benchmark):
print("\nArray of 100 dictionaries:")
ser_data, des_data = benchmark(run_client_test,
name, serialize, deserialize,
data=list_dicts,
iterations=5,
)
msg = "%-11s serialize: %0.3f deserialize: %0.3f total: %0.3f" % (
name, ser_data, des_data, ser_data + des_data
)
print(msg)
@pytest.mark.parametrize('name,serialize,deserialize', contenders)
def test_json_dictionary_of_lists(name, serialize, deserialize, benchmark):
print("\nDictionary of 100 Arrays:")<|fim▁hole|> ser_data, des_data = benchmark(run_client_test,
name, serialize, deserialize,
data=dict_lists,
iterations=5,
)
msg = "%-11s serialize: %0.3f deserialize: %0.3f total: %0.3f" % (
name, ser_data, des_data, ser_data + des_data
)
print(msg)
@pytest.mark.parametrize('name,serialize,deserialize', contenders)
def test_json_medium_complex_objects(name, serialize, deserialize, benchmark):
print("\n256 Medium Complex objects:")
ser_data, des_data = benchmark(run_client_test,
name, serialize, deserialize,
data=medium_complex,
iterations=50000,
)
msg = "%-11s serialize: %0.3f deserialize: %0.3f total: %0.3f" % (
name, ser_data, des_data, ser_data + des_data
)
print(msg)
def test_double_performance_float_precision(benchmark):
print("\nArray with 256 doubles:")
name = 'rapidjson (precise)'
serialize = rapidjson.dumps
deserialize = rapidjson.loads
ser_data, des_data = benchmark(run_client_test,
name, serialize, deserialize,
data=doubles,
iterations=50000,
)
msg = "%-11s serialize: %0.3f deserialize: %0.3f total: %0.3f" % (
name, ser_data, des_data, ser_data + des_data
)
print(msg)<|fim▁end|> | |
<|file_name|>gml.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
#
# Copyright (C) 2004 Mark H. Lyon <[email protected]>
#
# This file is the Mbox & Maildir to Gmail Loader (GML).
#
# Mbox & Maildir to Gmail Loader (GML) is free software; you can redistribute
# it and/or modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2 of
# the License, or (at your option) any later version.
#
# GML is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
<|fim▁hole|>
# Origional development thread at Ars Technica:
# http://episteme.arstechnica.com/eve/ubb.x?a=tpc&s=50009562&f=6330927813&m=108000474631
#
# Version 0.1 - 15 Jun 04 16:28 Supports Mbox
# Version 0.2 - 15 Jun 04 18:48 Implementing Magus` suggestion for Maildir
# Version 0.3 - 16 Jun 04 16:17 Implement Rold Gold suggestion for counters
# Version 0.4 - 17 Jun 04 13:15 Add support for changing SMTP server at command line
# Version 0.5 - 05 Oct 09 redo exception handling to see what Google's
# complaints are on failure, update to use TLS
import mailbox, smtplib, sys, time, string
def main ():
print "\nMbox & Maildir to Gmail Loader (GML) by Mark Lyon <[email protected]>\n"
if len(sys.argv) in (5, 6) :
boxtype_in = sys.argv[1]
mailboxname_in = sys.argv[2]
emailname_in = sys.argv[3]
password_in = sys.argv[4]
else:
usage()
try:
smtpserver_in = sys.argv[5]
except:
smtpserver_in = 'smtp.gmail.com'
print "Using smtpserver %s\n" % smtpserver_in
count = [0,0,0]
try:
if boxtype_in == "maildir":
mb = mailbox.Maildir(mailboxname_in)
else:
mb = mailbox.UnixMailbox (file(mailboxname_in,'r'))
msg = mb.next()
except:
print "*** Can't open file or directory. Is the path correct? ***\n"
usage()
while msg is not None:
try:
document = msg.fp.read()
except:
count[2] = count[2] + 1
print "*** %d MESSAGE READ FAILED, SKIPPED" % (count[2])
msg = mb.next()
if document is not None:
fullmsg = msg.__str__( ) + '\x0a' + document
server = smtplib.SMTP(smtpserver_in)
#server.set_debuglevel(1)
server.ehlo()
server.starttls()
# smtplib won't send auth info without this second ehlo after
# starttls -- thanks to
# http://bytes.com/topic/python/answers/475531-smtplib-authentication-required-error
# for the tip
server.ehlo()
server.login(emailname_in, password_in)
server.sendmail(msg.getaddr('From')[1], emailname_in, fullmsg)
server.quit()
count[0] = count[0] + 1
print " %d Forwarded a message from: %s" % (count[0], msg.getaddr('From')[1])
msg = mb.next()
print "\nDone. Stats: %d success %d error %d skipped." % (count[0], count[1], count[2])
def usage():
print 'Usage: gml.py [mbox or maildir] [mbox file or maildir path] [gmail address] [gmail password] [Optional SMTP Server]'
print 'Exmpl: gml.py mbox "c:\mail\Inbox" [email protected] password'
print 'Exmpl: gml.py maildir "c:\mail\Inbox\" [email protected] password gsmtp171.google.com\n'
sys.exit()
if __name__ == '__main__':
main ()<|fim▁end|> | # You should have received a copy of the GNU General Public License
# along with GML; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
<|file_name|>ITConnectionAccessControl.java<|end_file_name|><|fim▁begin|>/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.nifi.integration.accesscontrol;
import com.sun.jersey.api.client.ClientResponse;
import org.apache.nifi.connectable.ConnectableType;
import org.apache.nifi.integration.util.NiFiTestAuthorizer;
import org.apache.nifi.integration.util.NiFiTestUser;
import org.apache.nifi.web.api.dto.ConnectableDTO;
import org.apache.nifi.web.api.dto.ConnectionDTO;
import org.apache.nifi.web.api.dto.RevisionDTO;
import org.apache.nifi.web.api.dto.flow.FlowDTO;
import org.apache.nifi.web.api.entity.ConnectionEntity;
import org.apache.nifi.web.api.entity.ProcessGroupFlowEntity;
import org.apache.nifi.web.api.entity.ProcessorEntity;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.Map;
import java.util.Set;
import static org.apache.nifi.integration.accesscontrol.AccessControlHelper.NONE_CLIENT_ID;
import static org.apache.nifi.integration.accesscontrol.AccessControlHelper.READ_CLIENT_ID;
import static org.apache.nifi.integration.accesscontrol.AccessControlHelper.READ_WRITE_CLIENT_ID;
import static org.apache.nifi.integration.accesscontrol.AccessControlHelper.WRITE_CLIENT_ID;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
/**
* Access control test for connections.
*/
public class ITConnectionAccessControl {
private static AccessControlHelper helper;
@BeforeClass
public static void setup() throws Exception {
helper = new AccessControlHelper();
}
/**
* Ensures the READ user can get a connection.
*
* @throws Exception ex
*/
@Test
public void testReadUserGetConnection() throws Exception {
final ConnectionEntity entity = getRandomConnection(helper.getReadUser());
assertTrue(entity.getPermissions().getCanRead());
assertFalse(entity.getPermissions().getCanWrite());
assertNotNull(entity.getComponent());
}
/**
* Ensures the READ WRITE user can get a connection.
*
* @throws Exception ex
*/
@Test
public void testReadWriteUserGetConnection() throws Exception {
final ConnectionEntity entity = getRandomConnection(helper.getReadWriteUser());
assertTrue(entity.getPermissions().getCanRead());
assertTrue(entity.getPermissions().getCanWrite());
assertNotNull(entity.getComponent());
}
/**
* Ensures the WRITE user can get a connection.
*
* @throws Exception ex
*/
@Test
public void testWriteUserGetConnection() throws Exception {
final ConnectionEntity entity = getRandomConnection(helper.getWriteUser());
assertFalse(entity.getPermissions().getCanRead());
assertTrue(entity.getPermissions().getCanWrite());
assertNull(entity.getComponent());
}
/**
* Ensures the NONE user can get a connection.
*
* @throws Exception ex
*/
@Test
public void testNoneUserGetConnection() throws Exception {
final ConnectionEntity entity = getRandomConnection(helper.getNoneUser());
assertFalse(entity.getPermissions().getCanRead());
assertFalse(entity.getPermissions().getCanWrite());
assertNull(entity.getComponent());
}
/**
* Ensures the READ user cannot put a connection.
*
* @throws Exception ex
*/
@Test
public void testReadUserPutConnection() throws Exception {
final ConnectionEntity entity = getRandomConnection(helper.getReadUser());
assertTrue(entity.getPermissions().getCanRead());
assertFalse(entity.getPermissions().getCanWrite());
assertNotNull(entity.getComponent());
// attempt update the name
entity.getRevision().setClientId(READ_CLIENT_ID);
entity.getComponent().setName("Updated Name");
// perform the request
final ClientResponse response = updateConnection(helper.getReadUser(), entity);
// ensure forbidden response
assertEquals(403, response.getStatus());
}
/**
* Ensures the READ_WRITE user can put a connection.
*
* @throws Exception ex
*/
@Test
public void testReadWriteUserPutConnection() throws Exception {
final ConnectionEntity entity = getRandomConnection(helper.getReadWriteUser());
assertTrue(entity.getPermissions().getCanRead());
assertTrue(entity.getPermissions().getCanWrite());
assertNotNull(entity.getComponent());
final String updatedName = "Updated Name";
// attempt to update the name
final long version = entity.getRevision().getVersion();
entity.getRevision().setClientId(AccessControlHelper.READ_WRITE_CLIENT_ID);
entity.getComponent().setName(updatedName);
// perform the request
final ClientResponse response = updateConnection(helper.getReadWriteUser(), entity);
// ensure successful response
assertEquals(200, response.getStatus());
// get the response
final ConnectionEntity responseEntity = response.getEntity(ConnectionEntity.class);
// verify
assertEquals(READ_WRITE_CLIENT_ID, responseEntity.getRevision().getClientId());
assertEquals(version + 1, responseEntity.getRevision().getVersion().longValue());
assertEquals(updatedName, responseEntity.getComponent().getName());
}
/**
* Ensures the READ_WRITE user can put a connection.
*
* @throws Exception ex
*/
@Test
public void testReadWriteUserPutConnectionThroughInheritedPolicy() throws Exception {
final ConnectionEntity entity = createConnection(NiFiTestAuthorizer.NO_POLICY_COMPONENT_NAME);
final String updatedName = "Updated name";
// attempt to update the name
final long version = entity.getRevision().getVersion();
entity.getRevision().setClientId(READ_WRITE_CLIENT_ID);
entity.getComponent().setName(updatedName);
// perform the request
final ClientResponse response = updateConnection(helper.getReadWriteUser(), entity);
// ensure successful response
assertEquals(200, response.getStatus());
// get the response
final ConnectionEntity responseEntity = response.getEntity(ConnectionEntity.class);
// verify
assertEquals(AccessControlHelper.READ_WRITE_CLIENT_ID, responseEntity.getRevision().getClientId());
assertEquals(version + 1, responseEntity.getRevision().getVersion().longValue());
assertEquals(updatedName, responseEntity.getComponent().getName());
}
/**
* Ensures the WRITE user can put a connection.
*
* @throws Exception ex
*/
@Test
public void testWriteUserPutConnection() throws Exception {
final ConnectionEntity entity = getRandomConnection(helper.getWriteUser());
assertFalse(entity.getPermissions().getCanRead());
assertTrue(entity.getPermissions().getCanWrite());
assertNull(entity.getComponent());
final String updatedName = "Updated Name";
// attempt to update the name
final ConnectionDTO requestDto = new ConnectionDTO();
requestDto.setId(entity.getId());
requestDto.setName(updatedName);
final long version = entity.getRevision().getVersion();
final RevisionDTO requestRevision = new RevisionDTO();
requestRevision.setVersion(version);
requestRevision.setClientId(AccessControlHelper.WRITE_CLIENT_ID);
final ConnectionEntity requestEntity = new ConnectionEntity();
requestEntity.setId(entity.getId());
requestEntity.setRevision(requestRevision);
requestEntity.setComponent(requestDto);
// perform the request
final ClientResponse response = updateConnection(helper.getWriteUser(), requestEntity);
// ensure successful response
assertEquals(200, response.getStatus());
// get the response
final ConnectionEntity responseEntity = response.getEntity(ConnectionEntity.class);
// verify
assertEquals(WRITE_CLIENT_ID, responseEntity.getRevision().getClientId());
assertEquals(version + 1, responseEntity.getRevision().getVersion().longValue());
}
/**
* Ensures the NONE user cannot put a connection.
*
* @throws Exception ex
*/
@Test
public void testNoneUserPutConnection() throws Exception {
final ConnectionEntity entity = getRandomConnection(helper.getNoneUser());
assertFalse(entity.getPermissions().getCanRead());
assertFalse(entity.getPermissions().getCanWrite());
assertNull(entity.getComponent());
final String updatedName = "Updated Name";
// attempt to update the name
final ConnectionDTO requestDto = new ConnectionDTO();
requestDto.setId(entity.getId());
requestDto.setName(updatedName);
final long version = entity.getRevision().getVersion();
final RevisionDTO requestRevision = new RevisionDTO();
requestRevision.setVersion(version);
requestRevision.setClientId(AccessControlHelper.NONE_CLIENT_ID);
final ConnectionEntity requestEntity = new ConnectionEntity();
requestEntity.setId(entity.getId());
requestEntity.setRevision(requestRevision);
requestEntity.setComponent(requestDto);
// perform the request
final ClientResponse response = updateConnection(helper.getNoneUser(), requestEntity);
// ensure forbidden response
assertEquals(403, response.getStatus());
}
/**
* Ensures the READ user cannot delete a connection.
*
* @throws Exception ex
*/
@Test
public void testReadUserDeleteConnection() throws Exception {
verifyDelete(helper.getReadUser(), AccessControlHelper.READ_CLIENT_ID, 403);
}
/**
* Ensures the READ WRITE user can delete a connection.
*
* @throws Exception ex
*/
@Test
public void testReadWriteUserDeleteConnection() throws Exception {
verifyDelete(helper.getReadWriteUser(), AccessControlHelper.READ_WRITE_CLIENT_ID, 200);
}
/**
* Ensures the WRITE user can delete a connection.
*
* @throws Exception ex
*/
@Test
public void testWriteUserDeleteConnection() throws Exception {
verifyDelete(helper.getWriteUser(), AccessControlHelper.WRITE_CLIENT_ID, 200);
}
/**
* Ensures the NONE user can delete a connection.
*
* @throws Exception ex
*/
@Test
public void testNoneUserDeleteConnection() throws Exception {
verifyDelete(helper.getNoneUser(), NONE_CLIENT_ID, 403);
}
private ConnectionEntity getRandomConnection(final NiFiTestUser user) throws Exception {
final String url = helper.getBaseUrl() + "/flow/process-groups/root";
// get the connections
final ClientResponse response = user.testGet(url);
// ensure the response was successful
assertEquals(200, response.getStatus());
// unmarshal
final ProcessGroupFlowEntity flowEntity = response.getEntity(ProcessGroupFlowEntity.class);
final FlowDTO flowDto = flowEntity.getProcessGroupFlow().getFlow();
final Set<ConnectionEntity> connections = flowDto.getConnections();
// ensure the correct number of connection
assertFalse(connections.isEmpty());
// use the first connection as the target
Iterator<ConnectionEntity> connectionIter = connections.iterator();
assertTrue(connectionIter.hasNext());
return connectionIter.next();
}
private ClientResponse updateConnection(final NiFiTestUser user, final ConnectionEntity entity) throws Exception {
final String url = helper.getBaseUrl() + "/connections/" + entity.getId();
// perform the request
return user.testPut(url, entity);
}
private ConnectionEntity createConnection(final String name) throws Exception {
String url = helper.getBaseUrl() + "/process-groups/root/connections";
// get two processors
final ProcessorEntity one = ITProcessorAccessControl.createProcessor(helper, "one");
final ProcessorEntity two = ITProcessorAccessControl.createProcessor(helper, "two");
// create the source connectable
ConnectableDTO source = new ConnectableDTO();
source.setId(one.getId());
source.setGroupId(one.getComponent().getParentGroupId());
source.setType(ConnectableType.PROCESSOR.name());
// create the target connectable
ConnectableDTO target = new ConnectableDTO();
target.setId(two.getId());
target.setGroupId(two.getComponent().getParentGroupId());
target.setType(ConnectableType.PROCESSOR.name());
// create the relationships
Set<String> relationships = new HashSet<>();
relationships.add("success");
// create the connection
ConnectionDTO connection = new ConnectionDTO();
connection.setName(name);
connection.setSource(source);
connection.setDestination(target);
connection.setSelectedRelationships(relationships);
// create the revision
final RevisionDTO revision = new RevisionDTO();
revision.setClientId(READ_WRITE_CLIENT_ID);
revision.setVersion(0L);
// create the entity body
ConnectionEntity entity = new ConnectionEntity();
entity.setRevision(revision);
entity.setComponent(connection);
// perform the request
ClientResponse response = helper.getReadWriteUser().testPost(url, entity);
// ensure the request is successful
assertEquals(201, response.getStatus());
// get the entity body
entity = response.getEntity(ConnectionEntity.class);
// verify creation
connection = entity.getComponent();
assertEquals(name, connection.getName());
// get the connection<|fim▁hole|> }
private void verifyDelete(final NiFiTestUser user, final String clientId, final int responseCode) throws Exception {
final ConnectionEntity entity = createConnection("Copy");
// create the entity body
final Map<String, String> queryParams = new HashMap<>();
queryParams.put("version", String.valueOf(entity.getRevision().getVersion()));
queryParams.put("clientId", clientId);
// perform the request
ClientResponse response = user.testDelete(entity.getUri(), queryParams);
// ensure the request is failed with a forbidden status code
assertEquals(responseCode, response.getStatus());
}
@AfterClass
public static void cleanup() throws Exception {
helper.cleanup();
}
}<|fim▁end|> | return entity; |
<|file_name|>mod.rs<|end_file_name|><|fim▁begin|>pub type c_long = i32;
pub type c_ulong = u32;
pub type nlink_t = u32;
s! {
pub struct pthread_attr_t {
__size: [u32; 9]
}
pub struct sigset_t {
__val: [::c_ulong; 32],
}
pub struct msghdr {
pub msg_name: *mut ::c_void,
pub msg_namelen: ::socklen_t,
pub msg_iov: *mut ::iovec,
pub msg_iovlen: ::c_int,
pub msg_control: *mut ::c_void,
pub msg_controllen: ::socklen_t,
pub msg_flags: ::c_int,
}
pub struct sem_t {
__val: [::c_int; 4],
}
}
pub const __SIZEOF_PTHREAD_RWLOCK_T: usize = 32;
pub const __SIZEOF_PTHREAD_MUTEX_T: usize = 24;
cfg_if! {
if #[cfg(any(target_arch = "x86"))] {
mod x86;
pub use self::x86::*;
} else if #[cfg(any(target_arch = "mips"))] {
mod mips;
pub use self::mips::*;
} else if #[cfg(any(target_arch = "arm"))] {
mod arm;
pub use self::arm::*;
} else if #[cfg(any(target_arch = "asmjs", target_arch = "wasm32"))] {<|fim▁hole|> // For the time being asmjs and wasm32 are the same, and both
// backed by identical emscripten runtimes
mod asmjs;
pub use self::asmjs::*;
} else {
// Unknown target_arch
}
}<|fim▁end|> | |
<|file_name|>transport.go<|end_file_name|><|fim▁begin|>package memberlist
import (
"fmt"
"net"
"time"
)
// Packet is used to provide some metadata about incoming packets from peers
// over a packet connection, as well as the packet payload.
type Packet struct {
// Buf has the raw contents of the packet.
Buf []byte
// From has the address of the peer. This is an actual net.Addr so we
// can expose some concrete details about incoming packets.
From net.Addr
// Timestamp is the time when the packet was received. This should be
// taken as close as possible to the actual receipt time to help make an
// accurate RTT measurement during probes.
Timestamp time.Time
}
// Transport is used to abstract over communicating with other peers. The packet
// interface is assumed to be best-effort and the stream interface is assumed to
// be reliable.
type Transport interface {
// FinalAdvertiseAddr is given the user's configured values (which
// might be empty) and returns the desired IP and port to advertise to
// the rest of the cluster.
FinalAdvertiseAddr(ip string, port int) (net.IP, int, error)
// WriteTo is a packet-oriented interface that fires off the given
// payload to the given address in a connectionless fashion. This should
// return a time stamp that's as close as possible to when the packet
// was transmitted to help make accurate RTT measurements during probes.
//
// This is similar to net.PacketConn, though we didn't want to expose
// that full set of required methods to keep assumptions about the
// underlying plumbing to a minimum. We also treat the address here as a
// string, similar to Dial, so it's network neutral, so this usually is
// in the form of "host:port".
WriteTo(b []byte, addr string) (time.Time, error)
// PacketCh returns a channel that can be read to receive incoming
// packets from other peers. How this is set up for listening is left as
// an exercise for the concrete transport implementations.
PacketCh() <-chan *Packet
// DialTimeout is used to create a connection that allows us to perform
// two-way communication with a peer. This is generally more expensive
// than packet connections so is used for more infrequent operations
// such as anti-entropy or fallback probes if the packet-oriented probe
// failed.
DialTimeout(addr string, timeout time.Duration) (net.Conn, error)
// StreamCh returns a channel that can be read to handle incoming stream
// connections from other peers. How this is set up for listening is
// left as an exercise for the concrete transport implementations.
StreamCh() <-chan net.Conn
// Shutdown is called when memberlist is shutting down; this gives the
// transport a chance to clean up any listeners.
Shutdown() error
}
type Address struct {
// Addr is a network address as a string, similar to Dial. This usually is
// in the form of "host:port". This is required.
Addr string
// Name is the name of the node being addressed. This is optional but
// transports may require it.
Name string
}
func (a *Address) String() string {
if a.Name != "" {
return fmt.Sprintf("%s (%s)", a.Name, a.Addr)
}
return a.Addr
}
// IngestionAwareTransport is not used.
//
// Deprecated: IngestionAwareTransport is not used and may be removed in a future
// version. Define the interface locally instead of referencing this exported
// interface.
type IngestionAwareTransport interface {
IngestPacket(conn net.Conn, addr net.Addr, now time.Time, shouldClose bool) error
IngestStream(conn net.Conn) error
}
type NodeAwareTransport interface {
Transport
WriteToAddress(b []byte, addr Address) (time.Time, error)
DialAddressTimeout(addr Address, timeout time.Duration) (net.Conn, error)
}
type shimNodeAwareTransport struct {
Transport
}
var _ NodeAwareTransport = (*shimNodeAwareTransport)(nil)
func (t *shimNodeAwareTransport) WriteToAddress(b []byte, addr Address) (time.Time, error) {
return t.WriteTo(b, addr.Addr)
}
func (t *shimNodeAwareTransport) DialAddressTimeout(addr Address, timeout time.Duration) (net.Conn, error) {
return t.DialTimeout(addr.Addr, timeout)<|fim▁hole|><|fim▁end|> | } |
<|file_name|>day14.rs<|end_file_name|><|fim▁begin|>extern crate crypto;
use std::collections::HashMap;
use crypto::md5::Md5;
use crypto::digest::Digest;
struct HashCache<'a> {
base: String,
hashes: HashMap<String, String>,
hasher: &'a Fn(&str) -> String,
}
impl<'a> HashCache<'a> {
fn new(base: &str, f: &'a Fn(&str) -> String) -> Self {
HashCache{
base: base.to_owned(),
hashes: HashMap::new(),
hasher: f,
}
}
fn get(&mut self, index: usize) -> String {
let input = format!("{}{}", self.base, index);
if self.hashes.get(&input).is_none() {
let key = (self.hasher)(&input);
self.hashes.insert(input.clone(), key);
}
self.hashes[&input].clone()
}
}
fn has_triple(key: &str) -> Option<String> {
let v : Vec<_> = key.chars().collect();
let mut windows = v.windows(3);
while let Some(x) = windows.next() {
let (a, b, c) = (x[0], x[1], x[2]);
if a == b && b == c {
return Some(format!("{}{}{}{}{}", a, a, a, a, a))
}
}
None
}
fn find_keys(salt: &str, num_keys: usize, hasher: &Fn(&str) -> String) -> Vec<(usize, String)> {
let mut ret = Vec::new();
let mut index = 0;
let mut hc = HashCache::new(salt, hasher);
while ret.len() < num_keys {
let key = hc.get(index);
if let Some(t) = has_triple(&key) {
for i in 1..1000 {
if hc.get(index + i).contains(&t) {
ret.push((index, key));
break
}
}
}
index += 1;
}
ret
}
fn h1(salt: &str) -> String {
let mut hasher = Md5::new();
hasher.input(salt.as_bytes());
hasher.result_str()
}
fn h2016(salt: &str) -> String {
let mut hasher = Md5::new();
let mut key = h1(salt);
for _ in 0..2016 {
hasher.input(key.as_bytes());
key = hasher.result_str();
hasher.reset();<|fim▁hole|> }
key
}
fn main() {
let keys = find_keys("jlmsuwbz", 64, &h1);
println!("1: {}", keys.last().unwrap().0);
let keys = find_keys("jlmsuwbz", 64, &h2016);
println!("2: {}", keys.last().unwrap().0);
}<|fim▁end|> | |
<|file_name|>grpcProxy.py<|end_file_name|><|fim▁begin|># coding=utf-8
# Distributed under the MIT software license, see the accompanying
# file LICENSE or http://www.opensource.org/licenses/mit-license.php.
import argparse
import os
import simplejson as json
import grpc
from google.protobuf.json_format import MessageToJson
from qrl.core import config
from qrl.core.AddressState import AddressState
from qrl.crypto.xmss import XMSS
from qrl.core.txs.Transaction import Transaction
from qrl.core.txs.TransferTransaction import TransferTransaction
from pyqrllib.pyqrllib import hstr2bin, bin2hstr
from qrl.generated import qrl_pb2_grpc, qrl_pb2, qrlmining_pb2, qrlmining_pb2_grpc
from flask import Flask, Response, request
from jsonrpc.backend.flask import api
app = Flask(__name__)
def read_slaves(slaves_filename):
with open(slaves_filename, 'r') as f:
slave_data = json.load(f)
slave_data[0] = bytes(hstr2bin(slave_data[0]))
return slave_data
def get_addr_state(addr: bytes) -> AddressState:
stub = get_public_stub()
response = stub.GetAddressState(request=qrl_pb2.GetAddressStateReq(address=addr))
return AddressState(response.state)
def set_unused_ots_key(xmss, addr_state, start=0):
for i in range(start, 2 ** xmss.height):
if not addr_state.ots_key_reuse(i):
xmss.set_ots_index(i)
return True
return False
def valid_payment_permission(public_stub, master_address_state, payment_xmss, json_slave_txn):
access_type = master_address_state.get_slave_permission(payment_xmss.pk)
if access_type == -1:
tx = Transaction.from_json(json_slave_txn)
public_stub.PushTransaction(request=qrl_pb2.PushTransactionReq(transaction_signed=tx.pbdata))
return None
if access_type == 0:
return True
return False
def get_unused_payment_xmss(public_stub):
global payment_slaves
global payment_xmss
master_address = payment_slaves[0]
master_address_state = get_addr_state(master_address)
if payment_xmss:
addr_state = get_addr_state(payment_xmss.address)
if set_unused_ots_key(payment_xmss, addr_state, payment_xmss.ots_index):
if valid_payment_permission(public_stub, master_address_state, payment_xmss, payment_slaves[2]):
return payment_xmss
else:
payment_xmss = None<|fim▁hole|> for slave_seed in payment_slaves[1]:
xmss = XMSS.from_extended_seed(slave_seed)
addr_state = get_addr_state(xmss.address)
if set_unused_ots_key(xmss, addr_state): # Unused ots_key_found
payment_xmss = xmss
unused_ots_found = True
break
if not unused_ots_found: # Unused ots_key_found
return None
if not valid_payment_permission(public_stub, master_address_state, payment_xmss, payment_slaves[2]):
return None
return payment_xmss
@app.route('/api/<api_method_name>')
def api_proxy(api_method_name):
"""
Proxy JSON RPC requests to the gRPC server as well as converts back gRPC response
to JSON.
:param api_method_name:
:return:
"""
stub = qrl_pb2_grpc.PublicAPIStub(grpc.insecure_channel('{}:{}'.format(config.user.public_api_host,
config.user.public_api_port)))
public_api = qrl_pb2.DESCRIPTOR.services_by_name['PublicAPI']
api_method = public_api.FindMethodByName(api_method_name)
api_request = getattr(qrl_pb2, api_method.input_type.name)()
for arg in request.args:
if arg not in api_method.input_type.fields_by_name:
raise Exception('Invalid args %s', arg)
data_type = type(getattr(api_request, arg))
if data_type == bool and request.args[arg].lower() == 'false':
continue
value = data_type(request.args.get(arg, type=data_type))
setattr(api_request, arg, value)
resp = getattr(stub, api_method_name)(api_request, timeout=10)
return Response(response=MessageToJson(resp, sort_keys=True), status=200, mimetype='application/json')
def get_mining_stub():
global mining_stub
return mining_stub
def get_public_stub():
global public_stub
return public_stub
@api.dispatcher.add_method
def getlastblockheader(height=0):
stub = get_mining_stub()
request = qrlmining_pb2.GetLastBlockHeaderReq(height=height)
grpc_response = stub.GetLastBlockHeader(request=request, timeout=10)
block_header = {
'difficulty': grpc_response.difficulty,
'height': grpc_response.height,
'timestamp': grpc_response.timestamp,
'reward': grpc_response.reward,
'hash': grpc_response.hash,
'depth': grpc_response.depth
}
resp = {
"block_header": block_header,
"status": "OK"
}
return resp
@api.dispatcher.add_method
def getblockheaderbyheight(height):
return getlastblockheader(height)
@api.dispatcher.add_method
def getblocktemplate(reserve_size, wallet_address):
stub = get_mining_stub()
request = qrlmining_pb2.GetBlockToMineReq(wallet_address=wallet_address.encode())
grpc_response = stub.GetBlockToMine(request=request, timeout=10)
resp = {
'blocktemplate_blob': grpc_response.blocktemplate_blob,
'difficulty': grpc_response.difficulty,
'height': grpc_response.height,
'reserved_offset': grpc_response.reserved_offset,
'seed_hash': grpc_response.seed_hash,
'status': 'OK'
}
return resp
@api.dispatcher.add_method
def submitblock(blob):
stub = get_mining_stub()
request = qrlmining_pb2.SubmitMinedBlockReq(blob=bytes(hstr2bin(blob)))
response = stub.SubmitMinedBlock(request=request, timeout=10)
if response.error:
raise Exception # Mining pool expected exception when block submission fails
return MessageToJson(response, sort_keys=True)
@api.dispatcher.add_method
def getblockminingcompatible(height):
stub = get_mining_stub()
request = qrlmining_pb2.GetBlockMiningCompatibleReq(height=height)
response = stub.GetBlockMiningCompatible(request=request, timeout=10)
return MessageToJson(response, sort_keys=True)
@api.dispatcher.add_method
def transfer(destinations, fee, mixin, unlock_time):
if len(destinations) > config.dev.transaction_multi_output_limit:
raise Exception('Payment Failed: Amount exceeds the allowed limit')
addrs_to = []
amounts = []
for tx in destinations:
addrs_to.append(bytes(hstr2bin(tx['address'][1:]))) # Skipping 'Q'
amounts.append(tx['amount'])
stub = get_public_stub()
xmss = get_unused_payment_xmss(stub)
if not xmss:
raise Exception('Payment Failed: No Unused Payment XMSS found')
tx = TransferTransaction.create(addrs_to=addrs_to,
amounts=amounts,
message_data=None,
fee=fee,
xmss_pk=xmss.pk,
master_addr=payment_slaves[0])
tx.sign(xmss)
response = stub.PushTransaction(request=qrl_pb2.PushTransactionReq(transaction_signed=tx.pbdata))
if response.error_code != 3:
raise Exception('Transaction Submission Failed, Response Code: %s', response.error_code)
response = {'tx_hash': bin2hstr(tx.txhash)}
return response
app.add_url_rule('/json_rpc', 'api', api.as_view(), methods=['POST'])
def parse_arguments():
parser = argparse.ArgumentParser(description='QRL node')
parser.add_argument('--qrldir', '-d', dest='qrl_dir', default=config.user.qrl_dir,
help="Use a different directory for node data/configuration")
parser.add_argument('--network-type', dest='network_type', choices=['mainnet', 'testnet'],
default='mainnet', required=False, help="Runs QRL Testnet Node")
return parser.parse_args()
def main():
args = parse_arguments()
qrl_dir_post_fix = ''
copy_files = []
if args.network_type == 'testnet':
qrl_dir_post_fix = '-testnet'
package_directory = os.path.dirname(os.path.abspath(__file__))
copy_files.append(os.path.join(package_directory, 'network/testnet/genesis.yml'))
copy_files.append(os.path.join(package_directory, 'network/testnet/config.yml'))
config.user.qrl_dir = os.path.expanduser(os.path.normpath(args.qrl_dir) + qrl_dir_post_fix)
config.create_path(config.user.qrl_dir, copy_files)
config.user.load_yaml(config.user.config_path)
global payment_slaves, payment_xmss
global mining_stub, public_stub
mining_stub = qrlmining_pb2_grpc.MiningAPIStub(grpc.insecure_channel('{0}:{1}'.format(config.user.mining_api_host,
config.user.mining_api_port)))
public_stub = qrl_pb2_grpc.PublicAPIStub(grpc.insecure_channel('{0}:{1}'.format(config.user.public_api_host,
config.user.public_api_port)))
payment_xmss = None
payment_slaves = read_slaves(config.user.mining_pool_payment_wallet_path)
app.run(host=config.user.grpc_proxy_host, port=config.user.grpc_proxy_port)
if __name__ == '__main__':
main()<|fim▁end|> |
if not payment_xmss:
unused_ots_found = False |
<|file_name|>extern-1.rs<|end_file_name|><|fim▁begin|>// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at<|fim▁hole|>// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// run-pass
#![allow(dead_code)]
// pretty-expanded FIXME #23616
extern fn f() {
}
pub fn main() {
}<|fim▁end|> | // http://rust-lang.org/COPYRIGHT.
// |
<|file_name|>sort_log_fields.go<|end_file_name|><|fim▁begin|>// Copyright (c) 2017 Uber Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package adjuster
import (
"github.com/uber/jaeger/model"
)
// SortLogFields returns an Adjuster that sorts the fields in the span logs.
// It puts the `event` field in the first position (if present), and sorts
// all other fields lexicographically.
//
// TODO: it should also do something about the "msg" field, maybe replace it
// with "event" field.
// TODO: we may also want to move "level" field (as in logging level) to an earlier
// place in the list. This adjuster needs some sort of config describing predefined
// field names/types and their relative order.
func SortLogFields() Adjuster {
return Func(func(trace *model.Trace) (*model.Trace, error) {
for _, span := range trace.Spans {
for _, log := range span.Logs {
// first move "event" field into the first position
offset := 0
for i, field := range log.Fields {
if field.Key == "event" && field.VType == model.StringType {
if i > 0 {
log.Fields[0], log.Fields[i] = log.Fields[i], log.Fields[0]
}
offset = 1
break
}
}<|fim▁hole|> }
}
}
return trace, nil
})
}<|fim▁end|> | // sort all remaining fields
if len(log.Fields) > 1 {
model.KeyValues(log.Fields[offset:]).Sort() |
<|file_name|>types_test.rs<|end_file_name|><|fim▁begin|>use super::*;
#[test]
fn plugins_new() {
let plugins = Plugins::new();<|fim▁hole|> assert!(plugins.aliases.is_none());
assert!(plugins.plugins.is_empty());
}<|fim▁end|> | |
<|file_name|>test_relative_fields.py<|end_file_name|><|fim▁begin|># -*- encoding: utf-8 -*-
from __future__ import unicode_literals
import warnings
from django.core.checks import Error, Warning as DjangoWarning
from django.db import models
from django.db.models.fields.related import ForeignObject
from django.test import ignore_warnings
from django.test.testcases import SimpleTestCase, skipIfDBFeature
from django.test.utils import isolate_apps, override_settings
from django.utils import six
from django.utils.deprecation import RemovedInDjango20Warning
from django.utils.version import get_docs_version
@isolate_apps('invalid_models_tests')
class RelativeFieldTests(SimpleTestCase):
def test_valid_foreign_key_without_accessor(self):
class Target(models.Model):
# There would be a clash if Model.field installed an accessor.
model = models.IntegerField()
class Model(models.Model):
field = models.ForeignKey(Target, models.CASCADE, related_name='+')
field = Model._meta.get_field('field')
errors = field.check()
self.assertEqual(errors, [])
@ignore_warnings(category=RemovedInDjango20Warning)
def test_valid_foreign_key_without_on_delete(self):
class Target(models.Model):
model = models.IntegerField()
class Model(models.Model):
field = models.ForeignKey(Target, related_name='+')
def test_foreign_key_without_on_delete_warning(self):
with warnings.catch_warnings(record=True) as warns:
warnings.simplefilter('always') # prevent warnings from appearing as errors
class Target(models.Model):
model = models.IntegerField()
class Model(models.Model):
field = models.ForeignKey(Target, related_name='+')
self.assertEqual(len(warns), 1)
self.assertEqual(
str(warns[0].message),
'on_delete will be a required arg for ForeignKey in Django '
'2.0. Set it to models.CASCADE on models and in existing '
'migrations if you want to maintain the current default '
'behavior. See https://docs.djangoproject.com/en/%s/ref/models/fields/'
'#django.db.models.ForeignKey.on_delete' % get_docs_version(),
)
def test_foreign_key_to_field_as_arg(self):
with warnings.catch_warnings(record=True) as warns:
warnings.simplefilter('always') # prevent warnings from appearing as errors
class Target(models.Model):
model = models.IntegerField()
class Model(models.Model):
field = models.ForeignKey(Target, 'id')
self.assertEqual(len(warns), 1)
self.assertEqual(
str(warns[0].message),
"The signature for ForeignKey will change in Django 2.0. "
"Pass to_field='id' as a kwarg instead of as an arg."
)
def test_one_to_one_field_without_on_delete_warning(self):
with warnings.catch_warnings(record=True) as warns:
warnings.simplefilter('always') # prevent warnings from appearing as errors
class Target(models.Model):
model = models.IntegerField()
class Model(models.Model):
field = models.OneToOneField(Target, related_name='+')
self.assertEqual(len(warns), 1)
self.assertEqual(
str(warns[0].message),
'on_delete will be a required arg for OneToOneField in Django '
'2.0. Set it to models.CASCADE on models and in existing '
'migrations if you want to maintain the current default '
'behavior. See https://docs.djangoproject.com/en/%s/ref/models/fields/'
'#django.db.models.ForeignKey.on_delete' % get_docs_version(),
)
def test_one_to_one_field_to_field_as_arg(self):
with warnings.catch_warnings(record=True) as warns:
warnings.simplefilter('always') # prevent warnings from appearing as errors
class Target(models.Model):
model = models.IntegerField()
class Model(models.Model):
field = models.OneToOneField(Target, 'id')
self.assertEqual(len(warns), 1)
self.assertEqual(
str(warns[0].message),
"The signature for OneToOneField will change in Django 2.0. "
"Pass to_field='id' as a kwarg instead of as an arg."
)
def test_foreign_key_to_missing_model(self):
# Model names are resolved when a model is being created, so we cannot
# test relative fields in isolation and we need to attach them to a
# model.
class Model(models.Model):
foreign_key = models.ForeignKey('Rel1', models.CASCADE)
field = Model._meta.get_field('foreign_key')
errors = field.check()
expected = [
Error(
("Field defines a relation with model 'Rel1', "
"which is either not installed, or is abstract."),
hint=None,
obj=field,
id='fields.E300',
),
]
self.assertEqual(errors, expected)
@isolate_apps('invalid_models_tests')
def test_foreign_key_to_isolate_apps_model(self):
"""
#25723 - Referenced model registration lookup should be run against the
field's model registry.
"""
class OtherModel(models.Model):
pass
class Model(models.Model):
foreign_key = models.ForeignKey('OtherModel', models.CASCADE)
field = Model._meta.get_field('foreign_key')
self.assertEqual(field.check(from_model=Model), [])
def test_many_to_many_to_missing_model(self):
class Model(models.Model):
m2m = models.ManyToManyField("Rel2")
field = Model._meta.get_field('m2m')
errors = field.check(from_model=Model)
expected = [
Error(
("Field defines a relation with model 'Rel2', "
"which is either not installed, or is abstract."),
hint=None,
obj=field,
id='fields.E300',
),
]
self.assertEqual(errors, expected)
@isolate_apps('invalid_models_tests')
def test_many_to_many_to_isolate_apps_model(self):
"""
#25723 - Referenced model registration lookup should be run against the
field's model registry.
"""
class OtherModel(models.Model):
pass
class Model(models.Model):
m2m = models.ManyToManyField('OtherModel')
field = Model._meta.get_field('m2m')
self.assertEqual(field.check(from_model=Model), [])
def test_many_to_many_with_useless_options(self):
class Model(models.Model):
name = models.CharField(max_length=20)
class ModelM2M(models.Model):
m2m = models.ManyToManyField(Model, null=True, validators=[''])
errors = ModelM2M.check()
field = ModelM2M._meta.get_field('m2m')
expected = [
DjangoWarning(
'null has no effect on ManyToManyField.',
hint=None,
obj=field,
id='fields.W340',
)
]
expected.append(
DjangoWarning(
'ManyToManyField does not support validators.',
hint=None,
obj=field,
id='fields.W341',
)
)
self.assertEqual(errors, expected)
def test_ambiguous_relationship_model(self):
class Person(models.Model):
pass
class Group(models.Model):
field = models.ManyToManyField('Person',
through="AmbiguousRelationship", related_name='tertiary')
class AmbiguousRelationship(models.Model):
# Too much foreign keys to Person.
first_person = models.ForeignKey(Person, models.CASCADE, related_name="first")
second_person = models.ForeignKey(Person, models.CASCADE, related_name="second")
second_model = models.ForeignKey(Group, models.CASCADE)
field = Group._meta.get_field('field')
errors = field.check(from_model=Group)
expected = [
Error(
("The model is used as an intermediate model by "
"'invalid_models_tests.Group.field', but it has more than one "
"foreign key to 'Person', which is ambiguous. You must specify "
"which foreign key Django should use via the through_fields "
"keyword argument."),
hint=('If you want to create a recursive relationship, use '
'ForeignKey("self", symmetrical=False, '
'through="AmbiguousRelationship").'),
obj=field,
id='fields.E335',
),
]
self.assertEqual(errors, expected)
def test_relationship_model_with_foreign_key_to_wrong_model(self):
class WrongModel(models.Model):
pass
class Person(models.Model):
pass
class Group(models.Model):
members = models.ManyToManyField('Person',
through="InvalidRelationship")
class InvalidRelationship(models.Model):
person = models.ForeignKey(Person, models.CASCADE)
wrong_foreign_key = models.ForeignKey(WrongModel, models.CASCADE)
# The last foreign key should point to Group model.
field = Group._meta.get_field('members')
errors = field.check(from_model=Group)
expected = [
Error(
("The model is used as an intermediate model by "
"'invalid_models_tests.Group.members', but it does not "
"have a foreign key to 'Group' or 'Person'."),
hint=None,
obj=InvalidRelationship,
id='fields.E336',
),
]
self.assertEqual(errors, expected)
def test_relationship_model_missing_foreign_key(self):
class Person(models.Model):
pass
class Group(models.Model):
members = models.ManyToManyField('Person',
through="InvalidRelationship")
class InvalidRelationship(models.Model):
group = models.ForeignKey(Group, models.CASCADE)
# No foreign key to Person
field = Group._meta.get_field('members')
errors = field.check(from_model=Group)
expected = [
Error(
("The model is used as an intermediate model by "
"'invalid_models_tests.Group.members', but it does not have "
"a foreign key to 'Group' or 'Person'."),
hint=None,
obj=InvalidRelationship,
id='fields.E336',
),
]
self.assertEqual(errors, expected)
def test_missing_relationship_model(self):
class Person(models.Model):
pass
class Group(models.Model):
members = models.ManyToManyField('Person',
through="MissingM2MModel")
field = Group._meta.get_field('members')
errors = field.check(from_model=Group)
expected = [
Error(
("Field specifies a many-to-many relation through model "
"'MissingM2MModel', which has not been installed."),
hint=None,
obj=field,
id='fields.E331',
),
]
self.assertEqual(errors, expected)
@isolate_apps('invalid_models_tests')
def test_many_to_many_through_isolate_apps_model(self):
"""
#25723 - Through model registration lookup should be run against the
field's model registry.
"""
class GroupMember(models.Model):
person = models.ForeignKey('Person', models.CASCADE)
group = models.ForeignKey('Group', models.CASCADE)
class Person(models.Model):
pass
class Group(models.Model):
members = models.ManyToManyField('Person', through='GroupMember')
field = Group._meta.get_field('members')
self.assertEqual(field.check(from_model=Group), [])
def test_symmetrical_self_referential_field(self):
class Person(models.Model):
# Implicit symmetrical=False.
friends = models.ManyToManyField('self', through="Relationship")
class Relationship(models.Model):
first = models.ForeignKey(Person, models.CASCADE, related_name="rel_from_set")
second = models.ForeignKey(Person, models.CASCADE, related_name="rel_to_set")
field = Person._meta.get_field('friends')
errors = field.check(from_model=Person)
expected = [
Error(
'Many-to-many fields with intermediate tables must not be symmetrical.',
hint=None,
obj=field,
id='fields.E332',
),
]
self.assertEqual(errors, expected)
def test_too_many_foreign_keys_in_self_referential_model(self):
class Person(models.Model):
friends = models.ManyToManyField('self',
through="InvalidRelationship", symmetrical=False)
class InvalidRelationship(models.Model):
first = models.ForeignKey(Person, models.CASCADE, related_name="rel_from_set_2")
second = models.ForeignKey(Person, models.CASCADE, related_name="rel_to_set_2")
third = models.ForeignKey(Person, models.CASCADE, related_name="too_many_by_far")
field = Person._meta.get_field('friends')
errors = field.check(from_model=Person)
expected = [
Error(
("The model is used as an intermediate model by "
"'invalid_models_tests.Person.friends', but it has more than two "
"foreign keys to 'Person', which is ambiguous. You must specify "
"which two foreign keys Django should use via the through_fields "
"keyword argument."),
hint='Use through_fields to specify which two foreign keys Django should use.',
obj=InvalidRelationship,
id='fields.E333',
),
]
self.assertEqual(errors, expected)
def test_symmetric_self_reference_with_intermediate_table(self):
class Person(models.Model):
# Explicit symmetrical=True.
friends = models.ManyToManyField('self',
through="Relationship", symmetrical=True)
class Relationship(models.Model):
first = models.ForeignKey(Person, models.CASCADE, related_name="rel_from_set")
second = models.ForeignKey(Person, models.CASCADE, related_name="rel_to_set")
field = Person._meta.get_field('friends')
errors = field.check(from_model=Person)
expected = [
Error(
'Many-to-many fields with intermediate tables must not be symmetrical.',
hint=None,
obj=field,
id='fields.E332',
),
]
self.assertEqual(errors, expected)
def test_symmetric_self_reference_with_intermediate_table_and_through_fields(self):
"""
Using through_fields in a m2m with an intermediate model shouldn't
mask its incompatibility with symmetry.
"""
class Person(models.Model):
# Explicit symmetrical=True.
friends = models.ManyToManyField('self',
symmetrical=True,
through="Relationship",
through_fields=('first', 'second'))
class Relationship(models.Model):
first = models.ForeignKey(Person, models.CASCADE, related_name="rel_from_set")
second = models.ForeignKey(Person, models.CASCADE, related_name="rel_to_set")
referee = models.ForeignKey(Person, models.CASCADE, related_name="referred")
field = Person._meta.get_field('friends')
errors = field.check(from_model=Person)
expected = [
Error(
'Many-to-many fields with intermediate tables must not be symmetrical.',
hint=None,
obj=field,
id='fields.E332',
),
]
self.assertEqual(errors, expected)
def test_foreign_key_to_abstract_model(self):
class AbstractModel(models.Model):
class Meta:
abstract = True
class Model(models.Model):
rel_string_foreign_key = models.ForeignKey('AbstractModel', models.CASCADE)
rel_class_foreign_key = models.ForeignKey(AbstractModel, models.CASCADE)
fields = [
Model._meta.get_field('rel_string_foreign_key'),
Model._meta.get_field('rel_class_foreign_key'),
]
expected_error = Error(
"Field defines a relation with model 'AbstractModel', "
"which is either not installed, or is abstract.",
id='fields.E300',
)
for field in fields:
expected_error.obj = field
errors = field.check()
self.assertEqual(errors, [expected_error])
def test_m2m_to_abstract_model(self):
class AbstractModel(models.Model):
class Meta:
abstract = True
class Model(models.Model):
rel_string_m2m = models.ManyToManyField('AbstractModel')
rel_class_m2m = models.ManyToManyField(AbstractModel)
fields = [
Model._meta.get_field('rel_string_m2m'),
Model._meta.get_field('rel_class_m2m'),
]
expected_error = Error(
"Field defines a relation with model 'AbstractModel', "
"which is either not installed, or is abstract.",
id='fields.E300',
)
for field in fields:
expected_error.obj = field
errors = field.check(from_model=Model)
self.assertEqual(errors, [expected_error])
def test_unique_m2m(self):
class Person(models.Model):
name = models.CharField(max_length=5)
class Group(models.Model):
members = models.ManyToManyField('Person', unique=True)
field = Group._meta.get_field('members')
errors = field.check(from_model=Group)
expected = [
Error(
'ManyToManyFields cannot be unique.',
hint=None,
obj=field,
id='fields.E330',
),
]
self.assertEqual(errors, expected)
def test_foreign_key_to_non_unique_field(self):
class Target(models.Model):
bad = models.IntegerField() # No unique=True
class Model(models.Model):
foreign_key = models.ForeignKey('Target', models.CASCADE, to_field='bad')
field = Model._meta.get_field('foreign_key')
errors = field.check()
expected = [
Error(
"'Target.bad' must set unique=True because it is referenced by a foreign key.",
hint=None,
obj=field,
id='fields.E311',
),
]
self.assertEqual(errors, expected)
def test_foreign_key_to_non_unique_field_under_explicit_model(self):
class Target(models.Model):
bad = models.IntegerField()
class Model(models.Model):
field = models.ForeignKey(Target, models.CASCADE, to_field='bad')
field = Model._meta.get_field('field')
errors = field.check()
expected = [
Error(
"'Target.bad' must set unique=True because it is referenced by a foreign key.",
hint=None,
obj=field,
id='fields.E311',
),
]
self.assertEqual(errors, expected)
def test_foreign_object_to_non_unique_fields(self):
class Person(models.Model):
# Note that both fields are not unique.
country_id = models.IntegerField()
city_id = models.IntegerField()
class MMembership(models.Model):
person_country_id = models.IntegerField()
person_city_id = models.IntegerField()
person = models.ForeignObject(Person,
on_delete=models.CASCADE,
from_fields=['person_country_id', 'person_city_id'],
to_fields=['country_id', 'city_id'])
field = MMembership._meta.get_field('person')
errors = field.check()
expected = [
Error(
"No subset of the fields 'country_id', 'city_id' on model 'Person' is unique.",
hint=(
"Add unique=True on any of those fields or add at least "
"a subset of them to a unique_together constraint."
),
obj=field,
id='fields.E310',
)
]
self.assertEqual(errors, expected)
def test_on_delete_set_null_on_non_nullable_field(self):
class Person(models.Model):
pass
class Model(models.Model):
foreign_key = models.ForeignKey('Person', models.SET_NULL)
field = Model._meta.get_field('foreign_key')
errors = field.check()
expected = [
Error(
'Field specifies on_delete=SET_NULL, but cannot be null.',
hint='Set null=True argument on the field, or change the on_delete rule.',
obj=field,
id='fields.E320',
),
]
self.assertEqual(errors, expected)
def test_on_delete_set_default_without_default_value(self):
class Person(models.Model):
pass
class Model(models.Model):
foreign_key = models.ForeignKey('Person', models.SET_DEFAULT)
field = Model._meta.get_field('foreign_key')
errors = field.check()
expected = [
Error(
'Field specifies on_delete=SET_DEFAULT, but has no default value.',
hint='Set a default value, or change the on_delete rule.',
obj=field,
id='fields.E321',
),
]
self.assertEqual(errors, expected)
@skipIfDBFeature('interprets_empty_strings_as_nulls')
def test_nullable_primary_key(self):
class Model(models.Model):
field = models.IntegerField(primary_key=True, null=True)
field = Model._meta.get_field('field')
errors = field.check()
expected = [
Error(
'Primary keys must not have null=True.',
hint='Set null=False on the field, or remove primary_key=True argument.',
obj=field,
id='fields.E007',
),
]
self.assertEqual(errors, expected)
def test_not_swapped_model(self):
class SwappableModel(models.Model):
# A model that can be, but isn't swapped out. References to this
# model should *not* raise any validation error.
class Meta:
swappable = 'TEST_SWAPPABLE_MODEL'
class Model(models.Model):
explicit_fk = models.ForeignKey(SwappableModel,
models.CASCADE,
related_name='explicit_fk')
implicit_fk = models.ForeignKey('invalid_models_tests.SwappableModel',
models.CASCADE,
related_name='implicit_fk')
explicit_m2m = models.ManyToManyField(SwappableModel,
related_name='explicit_m2m')
implicit_m2m = models.ManyToManyField(
'invalid_models_tests.SwappableModel',
related_name='implicit_m2m')
explicit_fk = Model._meta.get_field('explicit_fk')
self.assertEqual(explicit_fk.check(), [])
implicit_fk = Model._meta.get_field('implicit_fk')
self.assertEqual(implicit_fk.check(), [])
explicit_m2m = Model._meta.get_field('explicit_m2m')
self.assertEqual(explicit_m2m.check(from_model=Model), [])
implicit_m2m = Model._meta.get_field('implicit_m2m')
self.assertEqual(implicit_m2m.check(from_model=Model), [])
@override_settings(TEST_SWAPPED_MODEL='invalid_models_tests.Replacement')
def test_referencing_to_swapped_model(self):
class Replacement(models.Model):
pass
class SwappedModel(models.Model):
class Meta:
swappable = 'TEST_SWAPPED_MODEL'
class Model(models.Model):
explicit_fk = models.ForeignKey(SwappedModel,
models.CASCADE,
related_name='explicit_fk')
implicit_fk = models.ForeignKey('invalid_models_tests.SwappedModel',
models.CASCADE,
related_name='implicit_fk')
explicit_m2m = models.ManyToManyField(SwappedModel,
related_name='explicit_m2m')
implicit_m2m = models.ManyToManyField(
'invalid_models_tests.SwappedModel',
related_name='implicit_m2m')
fields = [
Model._meta.get_field('explicit_fk'),
Model._meta.get_field('implicit_fk'),
Model._meta.get_field('explicit_m2m'),
Model._meta.get_field('implicit_m2m'),
]
expected_error = Error(
("Field defines a relation with the model "
"'invalid_models_tests.SwappedModel', which has been swapped out."),
hint="Update the relation to point at 'settings.TEST_SWAPPED_MODEL'.",
id='fields.E301',
)
for field in fields:
expected_error.obj = field
errors = field.check(from_model=Model)
self.assertEqual(errors, [expected_error])
def test_related_field_has_invalid_related_name(self):
digit = 0
illegal_non_alphanumeric = '!'
whitespace = '\t'
invalid_related_names = [
'%s_begins_with_digit' % digit,
'%s_begins_with_illegal_non_alphanumeric' % illegal_non_alphanumeric,
'%s_begins_with_whitespace' % whitespace,
'contains_%s_illegal_non_alphanumeric' % illegal_non_alphanumeric,
'contains_%s_whitespace' % whitespace,
'ends_with_with_illegal_non_alphanumeric_%s' % illegal_non_alphanumeric,
'ends_with_whitespace_%s' % whitespace,
'with', # a Python keyword
'related_name\n',
'',
]
# Python 2 crashes on non-ASCII strings.
if six.PY3:
invalid_related_names.append(',')
class Parent(models.Model):
pass
for invalid_related_name in invalid_related_names:
Child = type(str('Child_%s') % str(invalid_related_name), (models.Model,), {
'parent': models.ForeignKey('Parent', models.CASCADE, related_name=invalid_related_name),
'__module__': Parent.__module__,
})
field = Child._meta.get_field('parent')
errors = Child.check()
expected = [
Error(
"The name '%s' is invalid related_name for field Child_%s.parent"
% (invalid_related_name, invalid_related_name),
hint="Related name must be a valid Python identifier or end with a '+'",
obj=field,
id='fields.E306',
),
]
self.assertEqual(errors, expected)
def test_related_field_has_valid_related_name(self):
lowercase = 'a'
uppercase = 'A'
digit = 0
related_names = [
'%s_starts_with_lowercase' % lowercase,
'%s_tarts_with_uppercase' % uppercase,
'_starts_with_underscore',
'contains_%s_digit' % digit,
'ends_with_plus+',
'_',
'_+',
'+',
]
# Python 2 crashes on non-ASCII strings.
if six.PY3:
related_names.extend(['試', '試驗+'])
class Parent(models.Model):
pass
for related_name in related_names:
Child = type(str('Child_%s') % str(related_name), (models.Model,), {
'parent': models.ForeignKey('Parent', models.CASCADE, related_name=related_name),
'__module__': Parent.__module__,
})
errors = Child.check()
self.assertFalse(errors)
@isolate_apps('invalid_models_tests')
class AccessorClashTests(SimpleTestCase):
def test_fk_to_integer(self):
self._test_accessor_clash(
target=models.IntegerField(),
relative=models.ForeignKey('Target', models.CASCADE))
def test_fk_to_fk(self):
self._test_accessor_clash(
target=models.ForeignKey('Another', models.CASCADE),
relative=models.ForeignKey('Target', models.CASCADE))
def test_fk_to_m2m(self):
self._test_accessor_clash(
target=models.ManyToManyField('Another'),
relative=models.ForeignKey('Target', models.CASCADE))
def test_m2m_to_integer(self):
self._test_accessor_clash(
target=models.IntegerField(),
relative=models.ManyToManyField('Target'))
def test_m2m_to_fk(self):
self._test_accessor_clash(
target=models.ForeignKey('Another', models.CASCADE),
relative=models.ManyToManyField('Target'))
def test_m2m_to_m2m(self):
self._test_accessor_clash(
target=models.ManyToManyField('Another'),
relative=models.ManyToManyField('Target'))
def _test_accessor_clash(self, target, relative):
class Another(models.Model):
pass
class Target(models.Model):
model_set = target
class Model(models.Model):
rel = relative
errors = Model.check()
expected = [
Error(
"Reverse accessor for 'Model.rel' clashes with field name 'Target.model_set'.",
hint=("Rename field 'Target.model_set', or add/change "
"a related_name argument to the definition "
"for field 'Model.rel'."),
obj=Model._meta.get_field('rel'),
id='fields.E302',
),
]
self.assertEqual(errors, expected)
def test_clash_between_accessors(self):
class Target(models.Model):
pass
class Model(models.Model):
foreign = models.ForeignKey(Target, models.CASCADE)
m2m = models.ManyToManyField(Target)
errors = Model.check()
expected = [
Error(
"Reverse accessor for 'Model.foreign' clashes with reverse accessor for 'Model.m2m'.",
hint=("Add or change a related_name argument to the definition "
"for 'Model.foreign' or 'Model.m2m'."),
obj=Model._meta.get_field('foreign'),
id='fields.E304',
),
Error(
"Reverse accessor for 'Model.m2m' clashes with reverse accessor for 'Model.foreign'.",
hint=("Add or change a related_name argument to the definition "
"for 'Model.m2m' or 'Model.foreign'."),
obj=Model._meta.get_field('m2m'),
id='fields.E304',
),
]
self.assertEqual(errors, expected)
def test_m2m_to_m2m_with_inheritance(self):
""" Ref #22047. """
class Target(models.Model):
pass
class Model(models.Model):
children = models.ManyToManyField('Child',
related_name="m2m_clash", related_query_name="no_clash")
class Parent(models.Model):
m2m_clash = models.ManyToManyField('Target')
class Child(Parent):
pass
errors = Model.check()
expected = [
Error(
"Reverse accessor for 'Model.children' clashes with field name 'Child.m2m_clash'.",
hint=("Rename field 'Child.m2m_clash', or add/change "
"a related_name argument to the definition "
"for field 'Model.children'."),
obj=Model._meta.get_field('children'),
id='fields.E302',
)
]
self.assertEqual(errors, expected)
@isolate_apps('invalid_models_tests')
class ReverseQueryNameClashTests(SimpleTestCase):
def test_fk_to_integer(self):
self._test_reverse_query_name_clash(
target=models.IntegerField(),
relative=models.ForeignKey('Target', models.CASCADE))
def test_fk_to_fk(self):
self._test_reverse_query_name_clash(
target=models.ForeignKey('Another', models.CASCADE),
relative=models.ForeignKey('Target', models.CASCADE))
def test_fk_to_m2m(self):
self._test_reverse_query_name_clash(
target=models.ManyToManyField('Another'),
relative=models.ForeignKey('Target', models.CASCADE))
def test_m2m_to_integer(self):
self._test_reverse_query_name_clash(
target=models.IntegerField(),
relative=models.ManyToManyField('Target'))
def test_m2m_to_fk(self):
self._test_reverse_query_name_clash(
target=models.ForeignKey('Another', models.CASCADE),
relative=models.ManyToManyField('Target'))
def test_m2m_to_m2m(self):
self._test_reverse_query_name_clash(
target=models.ManyToManyField('Another'),
relative=models.ManyToManyField('Target'))
def _test_reverse_query_name_clash(self, target, relative):
class Another(models.Model):
pass
class Target(models.Model):
model = target
class Model(models.Model):
rel = relative
errors = Model.check()
expected = [
Error(
"Reverse query name for 'Model.rel' clashes with field name 'Target.model'.",
hint=("Rename field 'Target.model', or add/change "
"a related_name argument to the definition "
"for field 'Model.rel'."),
obj=Model._meta.get_field('rel'),
id='fields.E303',
),
]
self.assertEqual(errors, expected)
@isolate_apps('invalid_models_tests')
class ExplicitRelatedNameClashTests(SimpleTestCase):
def test_fk_to_integer(self):
self._test_explicit_related_name_clash(
target=models.IntegerField(),
relative=models.ForeignKey('Target', models.CASCADE, related_name='clash'))
def test_fk_to_fk(self):
self._test_explicit_related_name_clash(
target=models.ForeignKey('Another', models.CASCADE),<|fim▁hole|> relative=models.ForeignKey('Target', models.CASCADE, related_name='clash'))
def test_fk_to_m2m(self):
self._test_explicit_related_name_clash(
target=models.ManyToManyField('Another'),
relative=models.ForeignKey('Target', models.CASCADE, related_name='clash'))
def test_m2m_to_integer(self):
self._test_explicit_related_name_clash(
target=models.IntegerField(),
relative=models.ManyToManyField('Target', related_name='clash'))
def test_m2m_to_fk(self):
self._test_explicit_related_name_clash(
target=models.ForeignKey('Another', models.CASCADE),
relative=models.ManyToManyField('Target', related_name='clash'))
def test_m2m_to_m2m(self):
self._test_explicit_related_name_clash(
target=models.ManyToManyField('Another'),
relative=models.ManyToManyField('Target', related_name='clash'))
def _test_explicit_related_name_clash(self, target, relative):
class Another(models.Model):
pass
class Target(models.Model):
clash = target
class Model(models.Model):
rel = relative
errors = Model.check()
expected = [
Error(
"Reverse accessor for 'Model.rel' clashes with field name 'Target.clash'.",
hint=("Rename field 'Target.clash', or add/change "
"a related_name argument to the definition "
"for field 'Model.rel'."),
obj=Model._meta.get_field('rel'),
id='fields.E302',
),
Error(
"Reverse query name for 'Model.rel' clashes with field name 'Target.clash'.",
hint=("Rename field 'Target.clash', or add/change "
"a related_name argument to the definition "
"for field 'Model.rel'."),
obj=Model._meta.get_field('rel'),
id='fields.E303',
),
]
self.assertEqual(errors, expected)
@isolate_apps('invalid_models_tests')
class ExplicitRelatedQueryNameClashTests(SimpleTestCase):
def test_fk_to_integer(self):
self._test_explicit_related_query_name_clash(
target=models.IntegerField(),
relative=models.ForeignKey('Target',
models.CASCADE,
related_query_name='clash'))
def test_fk_to_fk(self):
self._test_explicit_related_query_name_clash(
target=models.ForeignKey('Another', models.CASCADE),
relative=models.ForeignKey('Target',
models.CASCADE,
related_query_name='clash'))
def test_fk_to_m2m(self):
self._test_explicit_related_query_name_clash(
target=models.ManyToManyField('Another'),
relative=models.ForeignKey('Target',
models.CASCADE,
related_query_name='clash'))
def test_m2m_to_integer(self):
self._test_explicit_related_query_name_clash(
target=models.IntegerField(),
relative=models.ManyToManyField('Target',
related_query_name='clash'))
def test_m2m_to_fk(self):
self._test_explicit_related_query_name_clash(
target=models.ForeignKey('Another', models.CASCADE),
relative=models.ManyToManyField('Target',
related_query_name='clash'))
def test_m2m_to_m2m(self):
self._test_explicit_related_query_name_clash(
target=models.ManyToManyField('Another'),
relative=models.ManyToManyField('Target',
related_query_name='clash'))
def _test_explicit_related_query_name_clash(self, target, relative):
class Another(models.Model):
pass
class Target(models.Model):
clash = target
class Model(models.Model):
rel = relative
errors = Model.check()
expected = [
Error(
"Reverse query name for 'Model.rel' clashes with field name 'Target.clash'.",
hint=("Rename field 'Target.clash', or add/change a related_name "
"argument to the definition for field 'Model.rel'."),
obj=Model._meta.get_field('rel'),
id='fields.E303',
),
]
self.assertEqual(errors, expected)
@isolate_apps('invalid_models_tests')
class SelfReferentialM2MClashTests(SimpleTestCase):
def test_clash_between_accessors(self):
class Model(models.Model):
first_m2m = models.ManyToManyField('self', symmetrical=False)
second_m2m = models.ManyToManyField('self', symmetrical=False)
errors = Model.check()
expected = [
Error(
"Reverse accessor for 'Model.first_m2m' clashes with reverse accessor for 'Model.second_m2m'.",
hint=("Add or change a related_name argument to the definition "
"for 'Model.first_m2m' or 'Model.second_m2m'."),
obj=Model._meta.get_field('first_m2m'),
id='fields.E304',
),
Error(
"Reverse accessor for 'Model.second_m2m' clashes with reverse accessor for 'Model.first_m2m'.",
hint=("Add or change a related_name argument to the definition "
"for 'Model.second_m2m' or 'Model.first_m2m'."),
obj=Model._meta.get_field('second_m2m'),
id='fields.E304',
),
]
self.assertEqual(errors, expected)
def test_accessor_clash(self):
class Model(models.Model):
model_set = models.ManyToManyField("self", symmetrical=False)
errors = Model.check()
expected = [
Error(
"Reverse accessor for 'Model.model_set' clashes with field name 'Model.model_set'.",
hint=("Rename field 'Model.model_set', or add/change "
"a related_name argument to the definition "
"for field 'Model.model_set'."),
obj=Model._meta.get_field('model_set'),
id='fields.E302',
),
]
self.assertEqual(errors, expected)
def test_reverse_query_name_clash(self):
class Model(models.Model):
model = models.ManyToManyField("self", symmetrical=False)
errors = Model.check()
expected = [
Error(
"Reverse query name for 'Model.model' clashes with field name 'Model.model'.",
hint=("Rename field 'Model.model', or add/change a related_name "
"argument to the definition for field 'Model.model'."),
obj=Model._meta.get_field('model'),
id='fields.E303',
),
]
self.assertEqual(errors, expected)
def test_clash_under_explicit_related_name(self):
class Model(models.Model):
clash = models.IntegerField()
m2m = models.ManyToManyField("self",
symmetrical=False, related_name='clash')
errors = Model.check()
expected = [
Error(
"Reverse accessor for 'Model.m2m' clashes with field name 'Model.clash'.",
hint=("Rename field 'Model.clash', or add/change a related_name "
"argument to the definition for field 'Model.m2m'."),
obj=Model._meta.get_field('m2m'),
id='fields.E302',
),
Error(
"Reverse query name for 'Model.m2m' clashes with field name 'Model.clash'.",
hint=("Rename field 'Model.clash', or add/change a related_name "
"argument to the definition for field 'Model.m2m'."),
obj=Model._meta.get_field('m2m'),
id='fields.E303',
),
]
self.assertEqual(errors, expected)
def test_valid_model(self):
class Model(models.Model):
first = models.ManyToManyField("self",
symmetrical=False, related_name='first_accessor')
second = models.ManyToManyField("self",
symmetrical=False, related_name='second_accessor')
errors = Model.check()
self.assertEqual(errors, [])
@isolate_apps('invalid_models_tests')
class SelfReferentialFKClashTests(SimpleTestCase):
def test_accessor_clash(self):
class Model(models.Model):
model_set = models.ForeignKey("Model", models.CASCADE)
errors = Model.check()
expected = [
Error(
"Reverse accessor for 'Model.model_set' clashes with field name 'Model.model_set'.",
hint=("Rename field 'Model.model_set', or add/change "
"a related_name argument to the definition "
"for field 'Model.model_set'."),
obj=Model._meta.get_field('model_set'),
id='fields.E302',
),
]
self.assertEqual(errors, expected)
def test_reverse_query_name_clash(self):
class Model(models.Model):
model = models.ForeignKey("Model", models.CASCADE)
errors = Model.check()
expected = [
Error(
"Reverse query name for 'Model.model' clashes with field name 'Model.model'.",
hint=("Rename field 'Model.model', or add/change "
"a related_name argument to the definition "
"for field 'Model.model'."),
obj=Model._meta.get_field('model'),
id='fields.E303',
),
]
self.assertEqual(errors, expected)
def test_clash_under_explicit_related_name(self):
class Model(models.Model):
clash = models.CharField(max_length=10)
foreign = models.ForeignKey("Model", models.CASCADE, related_name='clash')
errors = Model.check()
expected = [
Error(
"Reverse accessor for 'Model.foreign' clashes with field name 'Model.clash'.",
hint=("Rename field 'Model.clash', or add/change "
"a related_name argument to the definition "
"for field 'Model.foreign'."),
obj=Model._meta.get_field('foreign'),
id='fields.E302',
),
Error(
"Reverse query name for 'Model.foreign' clashes with field name 'Model.clash'.",
hint=("Rename field 'Model.clash', or add/change "
"a related_name argument to the definition "
"for field 'Model.foreign'."),
obj=Model._meta.get_field('foreign'),
id='fields.E303',
),
]
self.assertEqual(errors, expected)
@isolate_apps('invalid_models_tests')
class ComplexClashTests(SimpleTestCase):
# New tests should not be included here, because this is a single,
# self-contained sanity check, not a test of everything.
def test_complex_clash(self):
class Target(models.Model):
tgt_safe = models.CharField(max_length=10)
clash = models.CharField(max_length=10)
model = models.CharField(max_length=10)
clash1_set = models.CharField(max_length=10)
class Model(models.Model):
src_safe = models.CharField(max_length=10)
foreign_1 = models.ForeignKey(Target, models.CASCADE, related_name='id')
foreign_2 = models.ForeignKey(Target, models.CASCADE, related_name='src_safe')
m2m_1 = models.ManyToManyField(Target, related_name='id')
m2m_2 = models.ManyToManyField(Target, related_name='src_safe')
errors = Model.check()
expected = [
Error(
"Reverse accessor for 'Model.foreign_1' clashes with field name 'Target.id'.",
hint=("Rename field 'Target.id', or add/change a related_name "
"argument to the definition for field 'Model.foreign_1'."),
obj=Model._meta.get_field('foreign_1'),
id='fields.E302',
),
Error(
"Reverse query name for 'Model.foreign_1' clashes with field name 'Target.id'.",
hint=("Rename field 'Target.id', or add/change a related_name "
"argument to the definition for field 'Model.foreign_1'."),
obj=Model._meta.get_field('foreign_1'),
id='fields.E303',
),
Error(
"Reverse accessor for 'Model.foreign_1' clashes with reverse accessor for 'Model.m2m_1'.",
hint=("Add or change a related_name argument to "
"the definition for 'Model.foreign_1' or 'Model.m2m_1'."),
obj=Model._meta.get_field('foreign_1'),
id='fields.E304',
),
Error(
"Reverse query name for 'Model.foreign_1' clashes with reverse query name for 'Model.m2m_1'.",
hint=("Add or change a related_name argument to "
"the definition for 'Model.foreign_1' or 'Model.m2m_1'."),
obj=Model._meta.get_field('foreign_1'),
id='fields.E305',
),
Error(
"Reverse accessor for 'Model.foreign_2' clashes with reverse accessor for 'Model.m2m_2'.",
hint=("Add or change a related_name argument "
"to the definition for 'Model.foreign_2' or 'Model.m2m_2'."),
obj=Model._meta.get_field('foreign_2'),
id='fields.E304',
),
Error(
"Reverse query name for 'Model.foreign_2' clashes with reverse query name for 'Model.m2m_2'.",
hint=("Add or change a related_name argument to "
"the definition for 'Model.foreign_2' or 'Model.m2m_2'."),
obj=Model._meta.get_field('foreign_2'),
id='fields.E305',
),
Error(
"Reverse accessor for 'Model.m2m_1' clashes with field name 'Target.id'.",
hint=("Rename field 'Target.id', or add/change a related_name "
"argument to the definition for field 'Model.m2m_1'."),
obj=Model._meta.get_field('m2m_1'),
id='fields.E302',
),
Error(
"Reverse query name for 'Model.m2m_1' clashes with field name 'Target.id'.",
hint=("Rename field 'Target.id', or add/change a related_name "
"argument to the definition for field 'Model.m2m_1'."),
obj=Model._meta.get_field('m2m_1'),
id='fields.E303',
),
Error(
"Reverse accessor for 'Model.m2m_1' clashes with reverse accessor for 'Model.foreign_1'.",
hint=("Add or change a related_name argument to the definition "
"for 'Model.m2m_1' or 'Model.foreign_1'."),
obj=Model._meta.get_field('m2m_1'),
id='fields.E304',
),
Error(
"Reverse query name for 'Model.m2m_1' clashes with reverse query name for 'Model.foreign_1'.",
hint=("Add or change a related_name argument to "
"the definition for 'Model.m2m_1' or 'Model.foreign_1'."),
obj=Model._meta.get_field('m2m_1'),
id='fields.E305',
),
Error(
"Reverse accessor for 'Model.m2m_2' clashes with reverse accessor for 'Model.foreign_2'.",
hint=("Add or change a related_name argument to the definition "
"for 'Model.m2m_2' or 'Model.foreign_2'."),
obj=Model._meta.get_field('m2m_2'),
id='fields.E304',
),
Error(
"Reverse query name for 'Model.m2m_2' clashes with reverse query name for 'Model.foreign_2'.",
hint=("Add or change a related_name argument to the definition "
"for 'Model.m2m_2' or 'Model.foreign_2'."),
obj=Model._meta.get_field('m2m_2'),
id='fields.E305',
),
]
self.assertEqual(errors, expected)
@isolate_apps('invalid_models_tests')
class M2mThroughFieldsTests(SimpleTestCase):
def test_m2m_field_argument_validation(self):
"""
Tests that ManyToManyField accepts the ``through_fields`` kwarg
only if an intermediary table is specified.
"""
class Fan(models.Model):
pass
with self.assertRaisesMessage(ValueError, 'Cannot specify through_fields without a through model'):
models.ManyToManyField(Fan, through_fields=('f1', 'f2'))
def test_invalid_order(self):
"""
Tests that mixing up the order of link fields to ManyToManyField.through_fields
triggers validation errors.
"""
class Fan(models.Model):
pass
class Event(models.Model):
invitees = models.ManyToManyField(Fan, through='Invitation', through_fields=('invitee', 'event'))
class Invitation(models.Model):
event = models.ForeignKey(Event, models.CASCADE)
invitee = models.ForeignKey(Fan, models.CASCADE)
inviter = models.ForeignKey(Fan, models.CASCADE, related_name='+')
field = Event._meta.get_field('invitees')
errors = field.check(from_model=Event)
expected = [
Error(
("'Invitation.invitee' is not a foreign key to 'Event'."),
hint="Did you mean one of the following foreign keys to 'Event': event?",
obj=field,
id='fields.E339'),
Error(
("'Invitation.event' is not a foreign key to 'Fan'."),
hint="Did you mean one of the following foreign keys to 'Fan': invitee, inviter?",
obj=field,
id='fields.E339'),
]
self.assertEqual(expected, errors)
def test_invalid_field(self):
"""
Tests that providing invalid field names to ManyToManyField.through_fields
triggers validation errors.
"""
class Fan(models.Model):
pass
class Event(models.Model):
invitees = models.ManyToManyField(
Fan,
through='Invitation',
through_fields=('invalid_field_1', 'invalid_field_2'),
)
class Invitation(models.Model):
event = models.ForeignKey(Event, models.CASCADE)
invitee = models.ForeignKey(Fan, models.CASCADE)
inviter = models.ForeignKey(Fan, models.CASCADE, related_name='+')
field = Event._meta.get_field('invitees')
errors = field.check(from_model=Event)
expected = [
Error(
"The intermediary model 'invalid_models_tests.Invitation' has no field 'invalid_field_1'.",
hint="Did you mean one of the following foreign keys to 'Event': event?",
obj=field,
id='fields.E338'),
Error(
"The intermediary model 'invalid_models_tests.Invitation' has no field 'invalid_field_2'.",
hint="Did you mean one of the following foreign keys to 'Fan': invitee, inviter?",
obj=field,
id='fields.E338'),
]
self.assertEqual(expected, errors)
def test_explicit_field_names(self):
"""
Tests that if ``through_fields`` kwarg is given, it must specify both
link fields of the intermediary table.
"""
class Fan(models.Model):
pass
class Event(models.Model):
invitees = models.ManyToManyField(Fan, through='Invitation', through_fields=(None, 'invitee'))
class Invitation(models.Model):
event = models.ForeignKey(Event, models.CASCADE)
invitee = models.ForeignKey(Fan, models.CASCADE)
inviter = models.ForeignKey(Fan, models.CASCADE, related_name='+')
field = Event._meta.get_field('invitees')
errors = field.check(from_model=Event)
expected = [
Error(
"Field specifies 'through_fields' but does not provide the names "
"of the two link fields that should be used for the relation "
"through model 'invalid_models_tests.Invitation'.",
hint=("Make sure you specify 'through_fields' as "
"through_fields=('field1', 'field2')"),
obj=field,
id='fields.E337')]
self.assertEqual(expected, errors)
def test_superset_foreign_object(self):
class Parent(models.Model):
a = models.PositiveIntegerField()
b = models.PositiveIntegerField()
c = models.PositiveIntegerField()
class Meta:
unique_together = (('a', 'b', 'c'),)
class Child(models.Model):
a = models.PositiveIntegerField()
b = models.PositiveIntegerField()
value = models.CharField(max_length=255)
parent = ForeignObject(
Parent,
on_delete=models.SET_NULL,
from_fields=('a', 'b'),
to_fields=('a', 'b'),
related_name='children',
)
field = Child._meta.get_field('parent')
errors = field.check(from_model=Child)
expected = [
Error(
"No subset of the fields 'a', 'b' on model 'Parent' is unique.",
hint=(
"Add unique=True on any of those fields or add at least "
"a subset of them to a unique_together constraint."
),
obj=field,
id='fields.E310',
),
]
self.assertEqual(expected, errors)
def test_intersection_foreign_object(self):
class Parent(models.Model):
a = models.PositiveIntegerField()
b = models.PositiveIntegerField()
c = models.PositiveIntegerField()
d = models.PositiveIntegerField()
class Meta:
unique_together = (('a', 'b', 'c'),)
class Child(models.Model):
a = models.PositiveIntegerField()
b = models.PositiveIntegerField()
d = models.PositiveIntegerField()
value = models.CharField(max_length=255)
parent = ForeignObject(
Parent,
on_delete=models.SET_NULL,
from_fields=('a', 'b', 'd'),
to_fields=('a', 'b', 'd'),
related_name='children',
)
field = Child._meta.get_field('parent')
errors = field.check(from_model=Child)
expected = [
Error(
"No subset of the fields 'a', 'b', 'd' on model 'Parent' is unique.",
hint=(
"Add unique=True on any of those fields or add at least "
"a subset of them to a unique_together constraint."
),
obj=field,
id='fields.E310',
),
]
self.assertEqual(expected, errors)<|fim▁end|> | |
<|file_name|>notepad_slow.py<|end_file_name|><|fim▁begin|># GUI Application automation and testing library
# Copyright (C) 2006-2018 Mark Mc Mahon and Contributors
# https://github.com/pywinauto/pywinauto/graphs/contributors
# http://pywinauto.readthedocs.io/en/latest/credits.html
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of pywinauto nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Run some automations to test things"""
from __future__ import unicode_literals
from __future__ import print_function
import os.path
import sys
import time
try:
from pywinauto import application
except ImportError:
pywinauto_path = os.path.abspath(__file__)
pywinauto_path = os.path.split(os.path.split(pywinauto_path)[0])[0]
sys.path.append(pywinauto_path)
from pywinauto import application
from pywinauto import tests
from pywinauto.findbestmatch import MatchError
from pywinauto.timings import Timings
print("Setting timings to slow settings, may be necessary for")
print("slow applications or slow machines.")
Timings.slow()
#application.set_timing(3, .5, 10, .5, .4, .2, .2, .1, .2, .5)
def run_notepad():
"""Run notepad and do some small stuff with it"""
start = time.time()
app = application.Application()
## for distribution we don't want to connect to anybodies application
## because we may mess up something they are working on!
#try:
# app.connect_(path = r"c:\windows\system32\notepad.exe")
#except application.ProcessNotFoundError:
# app.start_(r"c:\windows\system32\notepad.exe")
app.start(r"notepad.exe")
app.Notepad.menu_select("File->PageSetup")
# ----- Page Setup Dialog ----
# Select the 4th combobox item
app.PageSetupDlg.SizeComboBox.select(4)
# Select the 'Letter' combobox item or the Letter
try:
app.PageSetupDlg.SizeComboBox.select("Letter")
except ValueError:
app.PageSetupDlg.SizeComboBox.select('Letter (8.5" x 11")')
app.PageSetupDlg.SizeComboBox.select(2)
# run some tests on the Dialog. List of available tests:
# "AllControls",
# "AsianHotkey",
# "ComboBoxDroppedHeight",
# "CompareToRefFont",
# "LeadTrailSpaces",
# "MiscValues",
# "Missalignment",
# "MissingExtraString",
# "Overlapping",
# "RepeatedHotkey",
# "Translation",
# "Truncation",
bugs = app.PageSetupDlg.run_tests('RepeatedHotkey Truncation')
# if there are any bugs they will be printed to the console
# and the controls will be highlighted
tests.print_bugs(bugs)
# ----- Next Page Setup Dialog ----
app.PageSetupDlg.Printer.click()
# do some radio button clicks
# Open the Connect to printer dialog so we can
# try out checking/unchecking a checkbox
app.PageSetupDlg.Network.click()
# ----- Connect To Printer Dialog ----
# Select a checkbox
app.ConnectToPrinter.ExpandByDefault.check()
app.ConnectToPrinter.ExpandByDefault.uncheck()
# try doing the same by using click
app.ConnectToPrinter.ExpandByDefault.click()
app.ConnectToPrinter.ExpandByDefault.click()
# close the dialog
<|fim▁hole|>
doc_props = app.window(title_re = ".*Properties$")
doc_props.wait('exists', timeout=40)
# ----- Document Properties Dialog ----
# some tab control selections
# Two ways of selecting tabs with indices...
doc_props.TabCtrl.select(0)
doc_props.TabCtrl.select(1)
try:
doc_props.TabCtrl.select(2)
except IndexError:
# not all users have 3 tabs in this dialog
print('Skip 3rd tab selection...')
# or with text...
doc_props.TabCtrl.select("PaperQuality")
try:
doc_props.TabCtrl.select("JobRetention")
except MatchError:
# some people do not have the "Job Retention" tab
print('Skip "Job Retention" tab...')
# doc_props.TabCtrl.select("Layout")
#
# # do some radio button clicks
# doc_props.RotatedLandscape.click()
# doc_props.BackToFront.click()
# doc_props.FlipOnShortEdge.click()
#
# doc_props.Portrait.click()
# doc_props._None.click()
# doc_props.FrontToBack.click()
#
# # open the Advanced options dialog in two steps
# advbutton = doc_props.Advanced
# advbutton.click()
#
# # close the 4 windows
#
# # ----- Advanced Options Dialog ----
# app.window(title_re = ".* Advanced Options").Ok.click()
# ----- Document Properties Dialog again ----
doc_props.Cancel.close_click()
# for some reason my current printer driver
# window does not close cleanly :(
if doc_props.Cancel.Exists():
doc_props.OK.close_click()
# ----- 2nd Page Setup Dialog again ----
app.PageSetupDlg.OK.close_click()
# ----- Page Setup Dialog ----
app.PageSetupDlg.Ok.close_click()
# type some text - note that extended characters ARE allowed
app.Notepad.Edit.set_edit_text("I am typing s\xe4me text to Notepad\r\n\r\n"
"And then I am going to quit")
app.Notepad.Edit.right_click()
app.Popup.menu_item("Right To Left Reading Order").click()
#app.PopupMenu.menu_select("Paste", app.Notepad.ctrl_())
#app.Notepad.Edit.right_click()
#app.PopupMenu.menu_select("Right To Left Reading Order", app.Notepad.ctrl_())
#app.PopupMenu.menu_select("Show unicode control characters", app.Notepad.ctrl_())
#time.sleep(1)
#app.Notepad.Edit.right_click()
#app.PopupMenu.menu_select("Right To Left Reading Order", app.Notepad.ctrl_())
#time.sleep(1)
#app.Notepad.Edit.right_click()
#app.PopupMenu.menu_select("Insert Unicode control character -> IAFS", app.Notepad.ctrl_())
#time.sleep(1)
#app.Notepad.Edit.type_keys("{ESC}")
# the following shows that Sendtext does not accept
# accented characters - but does allow 'control' characters
app.Notepad.Edit.type_keys("{END}{ENTER}SendText d\xf6\xe9s "
u"s\xfcpp\xf4rt \xe0cce\xf1ted characters!!!", with_spaces = True)
# Try and save
app.Notepad.menu_select("File->SaveAs")
app.SaveAs.EncodingComboBox.select("UTF-8")
app.SaveAs.FileNameEdit.set_edit_text("Example-utf8.txt")
app.SaveAs.Save.close_click()
# my machine has a weird problem - when connected to the network
# the SaveAs Dialog appears - but doing anything with it can
# cause a LONG delay - the easiest thing is to just wait
# until the dialog is no longer active
# - Dialog might just be gone - because click worked
# - dialog might be waiting to disappear
# so can't wait for next dialog or for it to be disabled
# - dialog might be waiting to display message box so can't wait
# for it to be gone or for the main dialog to be enabled.
# while the dialog exists wait upto 30 seconds (and yes it can
# take that long on my computer sometimes :-( )
app.SaveAsDialog2.Cancel.wait_not('enabled')
# If file exists - it asks you if you want to overwrite
try:
app.SaveAs.Yes.wait('exists').close_click()
except MatchError:
print('Skip overwriting...')
# exit notepad
app.Notepad.menu_select("File->Exit")
#if not run_with_appdata:
# app.WriteAppData(os.path.join(scriptdir, "Notepad_fast.pkl"))
print("That took %.3f to run"% (time.time() - start))
if __name__ == "__main__":
run_notepad()<|fim▁end|> | app.ConnectToPrinter.Cancel.close_click()
# ----- 2nd Page Setup Dialog again ----
app.PageSetupDlg.Properties.click()
|
<|file_name|>utils.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
# Aplicación web que gestiona los datos a completar en el formulario de Declaración Jurada de Generación de Residuos Sólidos Urbanos No Domiciliarios para presentar en la Intendencia de Montevideo
# Copyright (C) 2016 LKSur S.A.
#
# Este programa es software libre: usted puede redistribuirlo y/o modificarlo conforme a los términos de la Licencia Pública General de GNU publicada por
# la Fundación para el Software Libre, ya sea la versión 3 de esta Licencia o (a su elección) cualquier versión posterior.
#
# Este programa se distribuye con el deseo de que le resulte útil, pero SIN GARANTÍAS DE NINGÚN TIPO; ni siquiera con las garantías implícitas de
# COMERCIABILIDAD o APTITUD PARA UN PROPÓSITO DETERMINADO. Para más información, consulte la Licencia Pública General de GNU.
#
# Junto con este programa, se debería incluir una copia de la Licencia Pública General de GNU. De no ser así, ingrese en <http://www.gnu.org/licenses/>.
#
# Si modifica este programa, agradecemos envíe las modificaciones a la dirección especificada en el README, con el objetivo de estudiar su incorporación a nuestra distribución.
from gluon import current
# The logger
import logging
class Utils(object):
"""
Esta clase provee funciones para el manejo de la sesión, validaciones
y para exportar los datos en formato PDF.
Args:
db: La base de datos.
"""
def __init__(self, db):
self.db = db
def resetSession(self):
""" Resetea los datos de la session """
session = current.session
session.empresa = None
session.predio = None
session.datosfuncId = None
session.genera = None
def cargarResiduos(self,decid):
s = current.session
s.genera = self.cargarResiduos(decid)
def actualizarIndice(self):
db = self.db
session = current.session
if session.info:
db.DF_declaracion[session.declaracionId] = dict(indiceform=-1)
session.indiceForm = -1
session.incompleto = False
elif session.indiceForm < 7:
session.indiceForm +=1
db.DF_declaracion[session.declaracionId] = dict(indiceform=int(session.indiceForm))
def verificarRUT(self,rut):
valido = False
rut_tam = len(rut)
#Si los primeros 2 dígitos son menores a 10, se agrega un cero delante para alinear las posiciones con las validaciones
if rut_tam in range(10,12) and int(rut[8:10]) != 0:
rut = "0" + rut
rut_tam +=1
if rut_tam == 11 or rut_tam == 12:
val1 = int(rut[:2]) in range(1, 22)
val2 = int(rut[2:8]) != 0
val3 = int(rut[8:10]) == 0
# if not val1:
# logger.debug("Los primeros 2 dígitos (%d) deben estar en el rango de 1 a 21"%int(rut[:2]))
# if not val2:
# logger.debug("Los dígitos del 3 al 8 deben ser distintos de 000000")
# if not val3:
# logger.debug("Los dígitos 9 y 10 deben ser 0")
# logger.debug("Val1: %s, %d; Val2: %s, %s; Val3: %s, %d" %(val1,int(rut[:2]),val2,rut[2:8],val3,int(rut[8:10])))
if val1 and val2 and val3:
modulo = 11
numero_base = [4, 3, 2, 9, 8, 7, 6, 5, 4, 3, 2]
digito_verificador = int(rut[-1])
indice = 0
suma = 0
for numero in numero_base:
suma = suma + int(rut[indice]) * numero
indice = indice + 1
resto = suma % modulo
if resto == 0:
valido = (digito_verificador == 0)
# if not valido: logger.debug("resto 0,digito %d "%digito_verificador)
elif resto == 1:
valido = (len(rut) == 11)
# if not valido: logger.debug("resto 1,digito %d " % digito_verificador)
else:
valido = (digito_verificador == (modulo - resto))
# if not valido: logger.debug("digito correcto: %d,digito introducido %d ; suma: %d; resto: %d " % (modulo-resto,suma,resto,digito_verificador))
return valido
def verificarDocumento(self,doc):
try:
modulo = 10
digito_verificador = int(doc[-1] )
if len(doc)==7:
numero_base = [9, 8, 7, 6, 3, 4]
else:
numero_base = [2, 9, 8, 7, 6, 3, 4]
indice = 0
suma = 0
for numero in numero_base:
suma = suma + int(doc[indice])*numero
indice = indice + 1
resto = suma % modulo
if resto == 0:
valido = (digito_verificador == 0)
else:
valido = (digito_verificador == (modulo-(suma % modulo)))
return valido
except Exception as e:
return False
def autorizado(self):
session = current.session
from gluon.http import redirect
from gluon.html import URL
if (session.declaracionId is None):
session.flash = "Debe establecer una declaración"
redirect(URL('declaracion','index', extension=False))
# redirect(URL(request.application, request.controller,'index'))
return False
return True
def puedoExportarPDF(self):
from gluon.http import redirect
from gluon.html import URL
pasoscompletos = True
session = current.session
falta = None
from collections import OrderedDict
verificar = OrderedDict([
('empresa','Empresa'),
('predio','Ubicación'),
('datosfuncId','Datos de funcionamiento'),
('residuosCargados','Residuos generados'),
('info','Información Adicional')
])
for i, (clave, valor) in enumerate(verificar.items()):
if session[clave] is None:
pasoscompletos = False
tit = valor
if falta is None:
falta = tit
else:
falta += ', '+ tit
if pasoscompletos:
return True
else:
session.flash = "Para poder exportar la declaración debe elegir una y completar todos sus datos. Falta completar: "+falta
redirect(URL('declaracion','index', extension=False))
def residuosCargados(self):
db = self.db
session = current.session
listo = False
for tipo in session.genera:
if (session.genera[tipo] is not None):
listo = True
break
if listo:
session.residuosCargados = True
else:
session.residuosCargados = None
def obtenerCamposAuditoria(self,omitidos=[]):
campos=['password','registration_id','registration_key','reset_password_key','is_active', 'created_on', 'created_by', 'modified_on', 'modified_by']
campos+=['centro','id','declaracion','empresa','predio']
campos+=omitidos
return campos
def traducirlista(self,lista,tabla):
if lista == '' or lista.strip() == '':
return lista
else:
db = self.db
aux = lista.split(',')
traduccion = ""
if len(aux)>0:
e=db[tabla][aux[0]].nombre
traduccion = traduccion + e
if len(aux)>1:
for row in aux[1:]:
e=db[tabla][int(row.strip())].nombre
traduccion = traduccion + "," + e
return traduccion
def obtenerHorario(self,data):
import json
semana = {0: 'Domingo', 1: 'Lunes', 2: 'Martes', 3: 'Miércoles', 4: 'Jueves', 5: 'Viernes', 6: 'Sábado'}
#Codificado
data_string = json.dumps(data)
#Decodificado
decoded = json.loads(data_string)
horario = {}
resultado = ''
for i in range(0,7):
horario[i] = []
for h in decoded:
dia = h['day']
hora = h['hour']
horario[dia].append(hora)
cont = 0
for dia in horario:
horas = horario[dia]
if horas:
cont = cont + 1
canth = len(horas)+1
rango = horas[-1] - horas[0]
if canth == (rango + 2):
# Se imprime el primer día
if cont == 1:
resultado += '%s: %s a %s Hs.\n'%(semana[dia],horas[0],horas[-1]+1)
# Imprime el resto de los días
else:
resultado += ' ; %s: %s a %s Hs.\n'%(semana[dia],horas[0],horas[-1]+1)
else:
rangos = {}
indice = 0
anterior = horas[0]
rangos[indice] = []
rangos[indice].append(anterior)
for h in horas[1:]:
if h != anterior + 1:
indice = indice + 1
rangos[indice] = []
rangos[indice].append(h)
anterior = h
stringrangos = ''
for r in rangos:
if len(rangos[r]) == 1:
stringrangos += '%s Hs.' %(rangos[r][0])<|fim▁hole|> else:
stringrangos += ''
# Se imprime el primer día
if cont == 1:
resultado += '%s: %s\n'%(semana[dia],stringrangos)
# Imprime el resto de los días
else:
resultado += ' ; %s: %s \n'%(semana[dia],stringrangos)
return resultado
def obtenerHtml(self,tabla,id,omitidos=[],traducir=[],omitircondicion=[],consulta=None):
from gluon.html import *
db=self.db
q=db(db["%s"%(tabla)].id==id)
q=q.select().render(0)
campos_auditoria = self.obtenerCamposAuditoria(omitidos)
fi=db["%s"%(tabla)].fields
fa = [x for x in fi if x not in campos_auditoria]
html=FORM()
for row in fa:
label=db["%s"%(tabla)][row].label
valor=q[row]
# Se realiza traducción de horario desde json
if row == 'horario':
valor = self.obtenerHorario(valor)
elif row == 'nropuerta' and q['bis']:
valor = str(valor)+' BIS'
# Se traduce Sexo de 0 o 1 a Masculino o Femenino
elif label == 'Sexo':
if valor == 0:
valor = 'Masculino'
else:
valor = 'Femenino'
elif row == 'contenedor':
if valor == 'True':
valor = 'Sí'
else:
valor = 'No'
# Cambio el nombre de las etiquetas "Entre", "Y" referentes a las esquinas
elif row == 'esquina1' or row == 'esquina2':
label = db["%s"%(tabla)][row].comment
# Se traduce lista de enteros por su valor en la tabla referenciada
elif row in traducir:
valor = self.traducirlista(q(row),traducir[row])
# Se omite la impresión de las filas que no correspondan, ej. si no genera residuos del tipo 'Seco'
elif row in omitircondicion:
if q[row]==omitircondicion[row]:
html = "No corresponde"
break
# Se cambian valores True y False por Sí y No. Se pregunta si el campo es entero porque si no se realizan casteos a True o False incorrectos.
elif (isinstance(valor, int)):
if valor ==True:
valor='Sí'
else:
valor='No'
# Se cambia salida en caso de valores nulos
if valor in [None, '', ' ', 'None']:
valor = 'No corresponde'
# Se crea un DIV por cada campo con su respectiva etiqueta y valor.
if not("otro" in label.lower() and valor == 'No corresponde'):
html.append(DIV(P(B("%s: "%label)),P(valor),BR()*2))
return html
def inicializarSesion(self):
consulta = db(db.DF_declaracion)
#Si no existe ninguna declaración, se crea la primera y se establece en la sesión
if consulta.isempty():
session.declaracionId = db.DF_declaracion.insert(info='')
#Si existe alguna, se establece en la sesión la última modificada
else:
session.declaracionId = db(db.DF_declaracion).select(db.DF_declaracion.id, orderby=~db.DF_declaracion.modified_on).first().id
#Ahora se pasa a comprobar si existen datos cargados para la declaración
#Consulta por empresa
consulta = db(db.DF_empresa.declaracion==session.declaracionId)
#Si no existe ningun predio asociado a la declaración
if consulta.isempty():
session.empresa = None
#Si existe alguno, se establece el predio en la sesión
else:
#si hay un solo registro
session.empresa = consulta.select().first().id
def cargarResiduos(self,decId):
db = self.db
genera = dict()
tipos = db().select(db.DF_tipo_residuo.id,orderby=db.DF_tipo_residuo.id)
consulta = db(db.DF_genera.declaracion==decId)
for row in tipos:
tipoid = int(row.id)
genera[tipoid] = None
if not consulta.isempty():
residuo= consulta(db.DF_genera.tipo_residuo==tipoid).select(db.DF_genera.id,orderby=db.DF_genera.id).first()
if residuo:
genera[tipoid]=int(residuo.id)
return genera
def establecerSesion(self,decId):
db = self.db
session = current.session
session.declaracionId = decId
info = empresa = predio = datosfuncId= residuos = None
session.indiceForm = indiceVerif =0
session.incompleto = True
consultaDeclaracion = db(db.DF_declaracion.id == decId)
if not consultaDeclaracion.isempty():
indiceForm = db.DF_declaracion[decId].indiceform
if indiceForm is not None:
session.indiceForm = indiceForm
session.incompleto = (indiceForm !=-1)
#Consulta por empresa
consultaEmpresa = db(db.DF_empresa.declaracion==decId)
#Si existe una empresa asociada a la declaración
if not consultaEmpresa.isempty():
#si hay un solo registro
# if len(consulta.select())==1:
empresa = consultaEmpresa.select().first().id
indiceVerif +=1
#Consulta por predio
consulta = db(db.DF_predio.declaracion==decId)
#Si no existe ningun predio asociado a la declaración
if not consulta.isempty():
#si hay un solo registro
predio = consulta.select().first().id
indiceVerif += 1
#Consulta por datos de funcionamiento
consultaDatosfunc = db(db.DF_reside.empresa==empresa)(db.DF_reside.predio==predio)
#Si existen datos asociados a la declaración
if not consultaDatosfunc.isempty():
registro = consultaDatosfunc.select().first()
datosfuncId = registro.id
indiceVerif += 1
residuos = self.cargarResiduos(decId)
if indiceForm != -1 and indiceVerif > indiceForm:
db.DF_declaracion[decId] = dict(indiceform = indiceVerif)
session.indiceForm = indiceVerif
session.incompleto = True
if session.incompleto is not None and not session.incompleto:
info = True
#Se setean los valores en la sesión
[session.empresa,session.predio,session.datosfuncId,session.genera,session.info] = [empresa,predio,datosfuncId,residuos,info]
self.residuosCargados()<|fim▁end|> | else:
stringrangos += '%s a %s Hs.' %(rangos[r][0],rangos[r][-1]+1)
if r+1 != (len(rangos)):#ultimo
stringrangos += ' ,' |
<|file_name|>logs.cpp<|end_file_name|><|fim▁begin|>// logs.cpp
//
// Rivendell web service portal -- Log services
//
// (C) Copyright 2013,2016 Fred Gleason <[email protected]>
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License version 2 as
// published by the Free Software Foundation.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public
// License along with this program; if not, write to the Free Software
// Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
//
#include <stdio.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <errno.h>
#include <rdcreate_log.h>
#include <rddb.h>
#include <rdformpost.h>
#include <rdweb.h>
#include <rdsvc.h>
#include <rduser.h>
#include <rdlog.h>
#include <rdlog_event.h>
#include <rdlog_line.h>
#include <rdconf.h>
#include <rdescape_string.h>
#include <rdxport.h>
void Xport::AddLog()
{
QString log_name;
QString service_name;
//
// Get Arguments
//
if(!xport_post->getValue("LOG_NAME",&log_name)) {
XmlExit("Missing LOG_NAME",400,"logs.cpp",LINE_NUMBER);
}
if(!xport_post->getValue("SERVICE_NAME",&service_name)) {
XmlExit("Missing SERVICE_NAME",400,"logs.cpp",LINE_NUMBER);
}
RDSvc *svc=new RDSvc(service_name);
if(!svc->exists()) {
XmlExit("No such service",404,"logs.cpp",LINE_NUMBER);
}
//
// Verify User Perms
//
if(!xport_user->createLog()) {
XmlExit("Unauthorized",404,"logs.cpp",LINE_NUMBER);
}
RDLog *log=new RDLog(log_name);
if(!log->exists()) {
delete log;
log=new RDLog(log_name,true);
if(!log->exists()) {
delete log;
XmlExit("Unable to create log",500,"logs.cpp",LINE_NUMBER);
}
log->setOriginUser(xport_user->name());
log->setDescription("[new log]");
log->setService(service_name);
}
delete log;
RDCreateLogTable(RDLog::tableName(log_name));
XmlExit("OK",200,"logs.cpp",LINE_NUMBER);
}
void Xport::DeleteLog()
{
QString log_name;
//
// Get Arguments
//
if(!xport_post->getValue("LOG_NAME",&log_name)) {
XmlExit("Missing LOG_NAME",400,"logs.cpp",LINE_NUMBER);
}
//
// Verify User Perms
//
if(!xport_user->deleteLog()) {
XmlExit("Unauthorized",404,"logs.cpp",LINE_NUMBER);
}
RDLog *log=new RDLog(log_name);
if(log->exists()) {
if(!log->remove(xport_station,xport_user,xport_config)) {
delete log;
XmlExit("Unable to delete log",500,"logs.cpp",LINE_NUMBER);
}
}
delete log;
XmlExit("OK",200,"logs.cpp",LINE_NUMBER);
}
void Xport::ListLogs()
{
QString sql;
RDSqlQuery *q;
RDLog *log;
QString service_name="";
QString log_name="";
QString trackable;
//
// Get Options
//
xport_post->getValue("SERVICE_NAME",&service_name);
xport_post->getValue("LOG_NAME",&log_name);
xport_post->getValue("TRACKABLE",&trackable);
//
// Generate Log List
//
sql="select NAME from LOGS";
if((!service_name.isEmpty())||(!log_name.isEmpty())||(trackable=="1")) {
sql+=" where";
if(!log_name.isEmpty()) {
sql+=" (NAME=\""+RDEscapeString(log_name)+"\")&&";
}
if(!service_name.isEmpty()) {
sql+=" (SERVICE=\""+RDEscapeString(service_name)+"\")&&";
}
if(trackable=="1") {
sql+=" (SCHEDULED_TRACKS>0)&&";
}
sql=sql.left(sql.length()-2);
}
sql+=" order by NAME";
q=new RDSqlQuery(sql);
//
// Process Request
//
printf("Content-type: application/xml\n");
printf("Status: 200\n\n");
printf("<?xml version=\"1.0\" encoding=\"UTF-8\" ?>\n");
printf("<logList>\n");
while(q->next()) {
log=new RDLog(q->value(0).toString());
printf("%s",(const char *)log->xml());
delete log;
}
printf("</logList>\n");
delete q;
Exit(0);
}
void Xport::ListLog()
{
RDLog *log;
QString name="";
//
// Get Options
//
xport_post->getValue("NAME",&name);
//
// Verify that log exists
//
log=new RDLog(name);
if(!log->exists()) {
delete log;
XmlExit("No such log",404,"logs.cpp",LINE_NUMBER);
}
//
// Generate Log Listing
//
RDLogEvent *log_event=log->createLogEvent();
log_event->load(true);
//
// Process Request
//
printf("Content-type: application/xml\n");
printf("Status: 200\n\n");
printf("<?xml version=\"1.0\" encoding=\"UTF-8\" ?>\n");
printf("%s\n",(const char *)log_event->xml());
Exit(0);
}
void Xport::SaveLog()
{
//
// Verify User Perms
//
if((!xport_user->addtoLog())||(!xport_user->removefromLog())||(!xport_user->arrangeLog())) {
XmlExit("No user privilege",404,"logs.cpp",LINE_NUMBER);
}
QString log_name;
QString service_name;
QString description;
QDate purge_date;
bool auto_refresh;
QDate start_date;
QDate end_date;
int line_quantity;
//
// Header Data
//
if(!xport_post->getValue("LOG_NAME",&log_name)) {
XmlExit("Missing LOG_NAME",400,"logs.cpp",LINE_NUMBER);
}
if(!xport_post->getValue("SERVICE_NAME",&service_name)) {
XmlExit("Missing SERVICE_NAME",400,"logs.cpp",LINE_NUMBER);
}
if(!xport_post->getValue("DESCRIPTION",&description)) {
XmlExit("Missing DESCRIPTION",400,"logs.cpp",LINE_NUMBER);
}
if(!xport_post->getValue("PURGE_DATE",&purge_date)) {
XmlExit("Missing PURGE_DATE",400,"logs.cpp",LINE_NUMBER);
}
if(!xport_post->getValue("AUTO_REFRESH",&auto_refresh)) {
XmlExit("Missing AUTO_REFRESH",400,"logs.cpp",LINE_NUMBER);
}
if(!xport_post->getValue("START_DATE",&start_date)) {
XmlExit("Missing START_DATE",400,"logs.cpp",LINE_NUMBER);
}
if(!xport_post->getValue("END_DATE",&end_date)) {
XmlExit("Missing END_DATE",400,"logs.cpp",LINE_NUMBER);
}
if(!xport_post->getValue("LINE_QUANTITY",&line_quantity)) {
XmlExit("Missing LINE_QUANTITY",400,"logs.cpp",LINE_NUMBER);
}
//
// Logline Data
//
RDLogEvent *logevt=new RDLogEvent(RDLog::tableName(log_name));
for(int i=0;i<line_quantity;i++) {
logevt->insert(i,1);
RDLogLine *ll=logevt->logLine(i);
QString line=QString().sprintf("LINE%d",i);
QString str;
int integer1;
int integer2;
QDateTime datetime;
QTime time;
bool state;
bool ok=false;
if(!xport_post->getValue(line+"_ID",&integer1,&ok)) {
XmlExit("Missing "+line+"_ID",400,"logs.cpp",LINE_NUMBER);
}
if(!ok) {
XmlExit("Invalid "+line+"_ID",400,"logs.cpp",LINE_NUMBER);
}
ll->setId(integer1);
if(!xport_post->getValue(line+"_TYPE",&integer1)) {
XmlExit("Missing "+line+"_TYPE",400,"logs.cpp",LINE_NUMBER);
}
ll->setType((RDLogLine::Type)integer1);
if(!xport_post->getValue(line+"_CART_NUMBER",&integer1)) {
XmlExit("Missing "+line+"_CART_NUMBER",400,"logs.cpp",LINE_NUMBER);
}
ll->setCartNumber(integer1);
if(!xport_post->getValue(line+"_TIME_TYPE",&integer2)) {
XmlExit("Missing "+line+"_TIME_TYPE",400,"logs.cpp",LINE_NUMBER);
}
ll->setTimeType((RDLogLine::TimeType)integer2);
if(!xport_post->getValue(line+"_START_TIME",&integer1)) {
XmlExit("Missing "+line+"_START_TIME",400,"logs.cpp",LINE_NUMBER);
}
if(ll->timeType()==RDLogLine::Hard) {
ll->setStartTime(RDLogLine::Logged,QTime().addMSecs(integer1));
}
else {
ll->setStartTime(RDLogLine::Predicted,QTime().addMSecs(integer1));
}
if(!xport_post->getValue(line+"_GRACE_TIME",&integer1)) {
XmlExit("Missing "+line+"_GRACE_TIME",400,"logs.cpp",LINE_NUMBER);
}
ll->setGraceTime(integer1);
if(!xport_post->getValue(line+"_TRANS_TYPE",&str)) {
XmlExit("Missing "+line+"_TRANS_TYPE",400,"logs.cpp",LINE_NUMBER);
}
integer1=-1;
if(str.lower()=="play") {
integer1=RDLogLine::Play;
}
if(str.lower()=="segue") {
integer1=RDLogLine::Segue;
}
if(str.lower()=="stop") {
integer1=RDLogLine::Stop;
}
if(integer1<0) {
XmlExit("Invalid transition type in "+line+"_TRANS_TYPE",400,
"logs.cpp",LINE_NUMBER);
}
ll->setTransType((RDLogLine::TransType)integer1);
if(!xport_post->getValue(line+"_START_POINT",&integer1)) {
XmlExit("Missing "+line+"_START_POINT",400,"logs.cpp",LINE_NUMBER);
}
ll->setStartPoint(integer1,RDLogLine::LogPointer);
if(!xport_post->getValue(line+"_END_POINT",&integer1)) {
XmlExit("Missing "+line+"_END_POINT",400,"logs.cpp",LINE_NUMBER);
}
ll->setEndPoint(integer1,RDLogLine::LogPointer);
if(!xport_post->getValue(line+"_SEGUE_START_POINT",&integer1)) {
XmlExit("Missing "+line+"_SEGUE_START_POINT",400,"logs.cpp",LINE_NUMBER);
}
ll->setSegueStartPoint(integer1,RDLogLine::LogPointer);
if(!xport_post->getValue(line+"_SEGUE_END_POINT",&integer1)) {
XmlExit("Missing "+line+"_SEGUE_END_POINT",400,"logs.cpp",LINE_NUMBER);
}
ll->setSegueEndPoint(integer1,RDLogLine::LogPointer);
if(!xport_post->getValue(line+"_FADEUP_POINT",&integer1)) {
XmlExit("Missing "+line+"_FADEUP_POINT",400,"logs.cpp",LINE_NUMBER);
}
ll->setFadeupPoint(integer1,RDLogLine::LogPointer);
if(!xport_post->getValue(line+"_FADEUP_GAIN",&integer1)) {
XmlExit("Missing "+line+"_FADEUP_GAIN",400,"logs.cpp",LINE_NUMBER);
}
ll->setFadeupGain(integer1);
if(!xport_post->getValue(line+"_FADEDOWN_POINT",&integer1)) {
XmlExit("Missing "+line+"_FADEDOWN_POINT",400,"logs.cpp",LINE_NUMBER);
}
ll->setFadedownPoint(integer1,RDLogLine::LogPointer);
if(!xport_post->getValue(line+"_FADEDOWN_GAIN",&integer1)) {
XmlExit("Missing "+line+"_FADEDOWN_GAIN",400,"logs.cpp",LINE_NUMBER);
}
ll->setFadedownGain(integer1);
if(!xport_post->getValue(line+"_DUCK_UP_GAIN",&integer1)) {
XmlExit("Missing "+line+"_DUCK_UP_GAIN",400,"logs.cpp",LINE_NUMBER);
}
ll->setDuckUpGain(integer1);
if(!xport_post->getValue(line+"_DUCK_DOWN_GAIN",&integer1)) {
XmlExit("Missing "+line+"_DUCK_DOWN_GAIN",400,"logs.cpp",LINE_NUMBER);
}
ll->setDuckDownGain(integer1);
if(!xport_post->getValue(line+"_COMMENT",&str)) {
XmlExit("Missing "+line+"_COMMENT",400,"logs.cpp",LINE_NUMBER);
}
ll->setMarkerComment(str);
if(!xport_post->getValue(line+"_LABEL",&str)) {
XmlExit("Missing "+line+"_LABEL",400,"logs.cpp",LINE_NUMBER);
}
ll->setMarkerLabel(str);
if(!xport_post->getValue(line+"_ORIGIN_USER",&str)) {
XmlExit("Missing "+line+"_ORIGIN_USER",400,"logs.cpp",LINE_NUMBER);
}
ll->setOriginUser(str);
if(!xport_post->getValue(line+"_ORIGIN_DATETIME",&datetime)) {
XmlExit("Missing "+line+"_ORIGIN_DATETIME",400,"logs.cpp",LINE_NUMBER);
}
ll->setOriginDateTime(datetime);
if(!xport_post->getValue(line+"_EVENT_LENGTH",&integer1)) {
XmlExit("Missing "+line+"_EVENT_LENGTH",400,"logs.cpp",LINE_NUMBER);
}
ll->setEventLength(integer1);
if(!xport_post->getValue(line+"_LINK_EVENT_NAME",&str)) {
XmlExit("Missing "+line+"_LINK_EVENT_NAME",400,"logs.cpp",LINE_NUMBER);
}
ll->setLinkEventName(str);
if(!xport_post->getValue(line+"_LINK_START_TIME",&integer1)) {
XmlExit("Missing "+line+"_LINK_START_TIME",400,"logs.cpp",LINE_NUMBER);
}
ll->setLinkStartTime(QTime().addMSecs(integer1));
if(!xport_post->getValue(line+"_LINK_LENGTH",&integer1)) {
XmlExit("Missing "+line+"_LINK_LENGTH",400,"logs.cpp",LINE_NUMBER);
}
ll->setLinkLength(integer1);
if(!xport_post->getValue(line+"_LINK_START_SLOP",&integer1)) {
XmlExit("Missing "+line+"_LINK_START_SLOP",400,"logs.cpp",LINE_NUMBER);
}
ll->setLinkStartSlop(integer1);
if(!xport_post->getValue(line+"_LINK_END_SLOP",&integer1)) {
XmlExit("Missing "+line+"_LINK_END_SLOP",400,"logs.cpp",LINE_NUMBER);
}
ll->setLinkEndSlop(integer1);
if(!xport_post->getValue(line+"_LINK_ID",&integer1)) {
XmlExit("Missing "+line+"_LINK_ID",400,"logs.cpp",LINE_NUMBER);
}
ll->setLinkId(integer1);
if(!xport_post->getValue(line+"_LINK_EMBEDDED",&state)) {
XmlExit("Missing "+line+"_LINK_EMBEDDED",400,"logs.cpp",LINE_NUMBER);
}
ll->setLinkEmbedded(state);
if(!xport_post->getValue(line+"_EXT_START_TIME",&time)) {
XmlExit("Missing "+line+"_EXT_START_TIME",400,"logs.cpp",LINE_NUMBER);
}
ll->setExtStartTime(time);
if(!xport_post->getValue(line+"_EXT_CART_NAME",&str)) {
XmlExit("Missing "+line+"_EXT_CART_NAME",400,"logs.cpp",LINE_NUMBER);
}
ll->setExtCartName(str);
if(!xport_post->getValue(line+"_EXT_DATA",&str)) {
XmlExit("Missing "+line+"_EXT_DATA",400,"logs.cpp",LINE_NUMBER);
}
ll->setExtData(str);
if(!xport_post->getValue(line+"_EXT_EVENT_ID",&str)) {
XmlExit("Missing "+line+"_EXT_EVENT_ID",400,"logs.cpp",LINE_NUMBER);
}
ll->setExtEventId(str);
if(!xport_post->getValue(line+"_EXT_ANNC_TYPE",&str)) {
XmlExit("Missing "+line+"_EXT_ANNC_TYPE",400,"logs.cpp",LINE_NUMBER);
}
ll->setExtAnncType(str);
}
RDLog *log=new RDLog(log_name);
if(!log->exists()) {
XmlExit("No such log",404,"logs.cpp",LINE_NUMBER);
}
log->setService(service_name);
log->setDescription(description);
log->setPurgeDate(purge_date);
log->setAutoRefresh(auto_refresh);
log->setStartDate(start_date);
log->setEndDate(end_date);
log->setModifiedDatetime(QDateTime::currentDateTime());
logevt->save();
<|fim▁hole|> XmlExit(QString().sprintf("OK Saved %d events",logevt->size()),
200,"logs.cpp",LINE_NUMBER);
}<|fim▁end|> | |
<|file_name|>csearch.rs<|end_file_name|><|fim▁begin|>// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Searching for information from the cstore
use metadata::common::*;
use metadata::cstore;
use metadata::decoder;
use middle::def;
use middle::lang_items;
use middle::ty;
use rbml;
use rbml::reader;
use std::rc::Rc;
use syntax::ast;
use syntax::ast_map;
use syntax::attr;
use syntax::attr::AttrMetaMethods;
use syntax::diagnostic::expect;
use syntax::parse::token;
use std::collections::hash_map::HashMap;
#[derive(Copy, Clone)]
pub struct MethodInfo {
pub name: ast::Name,
pub def_id: ast::DefId,
pub vis: ast::Visibility,
}
pub fn get_symbol(cstore: &cstore::CStore, def: ast::DefId) -> String {
let cdata = cstore.get_crate_data(def.krate);
decoder::get_symbol(cdata.data(), def.node)
}
/// Iterates over all the language items in the given crate.
pub fn each_lang_item<F>(cstore: &cstore::CStore,
cnum: ast::CrateNum,
f: F)
-> bool where
F: FnMut(ast::NodeId, usize) -> bool,
{
let crate_data = cstore.get_crate_data(cnum);
decoder::each_lang_item(&*crate_data, f)
}
/// Iterates over each child of the given item.
pub fn each_child_of_item<F>(cstore: &cstore::CStore,
def_id: ast::DefId,
callback: F) where
F: FnMut(decoder::DefLike, ast::Name, ast::Visibility),
{
let crate_data = cstore.get_crate_data(def_id.krate);
let get_crate_data = |cnum| {
cstore.get_crate_data(cnum)
};
decoder::each_child_of_item(cstore.intr.clone(),
&*crate_data,
def_id.node,
get_crate_data,
callback)
}
/// Iterates over each top-level crate item.
pub fn each_top_level_item_of_crate<F>(cstore: &cstore::CStore,
cnum: ast::CrateNum,
callback: F) where
F: FnMut(decoder::DefLike, ast::Name, ast::Visibility),
{
let crate_data = cstore.get_crate_data(cnum);
let get_crate_data = |cnum| {
cstore.get_crate_data(cnum)
};
decoder::each_top_level_item_of_crate(cstore.intr.clone(),
&*crate_data,
get_crate_data,
callback)
}
pub fn get_item_path(tcx: &ty::ctxt, def: ast::DefId) -> Vec<ast_map::PathElem> {
let cstore = &tcx.sess.cstore;
let cdata = cstore.get_crate_data(def.krate);
let path = decoder::get_item_path(&*cdata, def.node);
// FIXME #1920: This path is not always correct if the crate is not linked
// into the root namespace.
let mut r = vec![ast_map::PathMod(token::intern(&cdata.name))];
r.push_all(&path);
r
}
pub enum FoundAst<'ast> {
Found(&'ast ast::InlinedItem),
FoundParent(ast::DefId, &'ast ast::InlinedItem),
NotFound,
}
// Finds the AST for this item in the crate metadata, if any. If the item was
// not marked for inlining, then the AST will not be present and hence none
// will be returned.
pub fn maybe_get_item_ast<'tcx>(tcx: &ty::ctxt<'tcx>, def: ast::DefId,
decode_inlined_item: decoder::DecodeInlinedItem)
-> FoundAst<'tcx> {
let cstore = &tcx.sess.cstore;
let cdata = cstore.get_crate_data(def.krate);
decoder::maybe_get_item_ast(&*cdata, tcx, def.node, decode_inlined_item)
}
pub fn get_enum_variant_defs(cstore: &cstore::CStore, enum_id: ast::DefId)
-> Vec<(def::Def, ast::Name, ast::Visibility)> {
let cdata = cstore.get_crate_data(enum_id.krate);
decoder::get_enum_variant_defs(&*cstore.intr, &*cdata, enum_id.node)
}
pub fn get_enum_variants<'tcx>(tcx: &ty::ctxt<'tcx>, def: ast::DefId)
-> Vec<Rc<ty::VariantInfo<'tcx>>> {
let cstore = &tcx.sess.cstore;
let cdata = cstore.get_crate_data(def.krate);
decoder::get_enum_variants(cstore.intr.clone(), &*cdata, def.node, tcx)
}
/// Returns information about the given implementation.
pub fn get_impl_items(cstore: &cstore::CStore, impl_def_id: ast::DefId)
-> Vec<ty::ImplOrTraitItemId> {
let cdata = cstore.get_crate_data(impl_def_id.krate);
decoder::get_impl_items(&*cdata, impl_def_id.node)
}
pub fn get_impl_or_trait_item<'tcx>(tcx: &ty::ctxt<'tcx>, def: ast::DefId)
-> ty::ImplOrTraitItem<'tcx> {
let cdata = tcx.sess.cstore.get_crate_data(def.krate);
decoder::get_impl_or_trait_item(tcx.sess.cstore.intr.clone(),
&*cdata,
def.node,
tcx)
}
pub fn get_trait_name(cstore: &cstore::CStore, def: ast::DefId) -> ast::Name {
let cdata = cstore.get_crate_data(def.krate);
decoder::get_trait_name(cstore.intr.clone(),
&*cdata,
def.node)
}
pub fn is_static_method(cstore: &cstore::CStore, def: ast::DefId) -> bool {
let cdata = cstore.get_crate_data(def.krate);
decoder::is_static_method(&*cdata, def.node)
}
pub fn get_trait_item_def_ids(cstore: &cstore::CStore, def: ast::DefId)
-> Vec<ty::ImplOrTraitItemId> {
let cdata = cstore.get_crate_data(def.krate);
decoder::get_trait_item_def_ids(&*cdata, def.node)
}
pub fn get_item_variances(cstore: &cstore::CStore,
def: ast::DefId) -> ty::ItemVariances {
let cdata = cstore.get_crate_data(def.krate);
decoder::get_item_variances(&*cdata, def.node)
}
pub fn get_provided_trait_methods<'tcx>(tcx: &ty::ctxt<'tcx>,
def: ast::DefId)
-> Vec<Rc<ty::Method<'tcx>>> {
let cstore = &tcx.sess.cstore;
let cdata = cstore.get_crate_data(def.krate);
decoder::get_provided_trait_methods(cstore.intr.clone(), &*cdata, def.node, tcx)
}
pub fn get_associated_consts<'tcx>(tcx: &ty::ctxt<'tcx>, def: ast::DefId)
-> Vec<Rc<ty::AssociatedConst<'tcx>>> {
let cstore = &tcx.sess.cstore;
let cdata = cstore.get_crate_data(def.krate);
decoder::get_associated_consts(cstore.intr.clone(), &*cdata, def.node, tcx)
}
pub fn get_type_name_if_impl(cstore: &cstore::CStore, def: ast::DefId)
-> Option<ast::Name> {
let cdata = cstore.get_crate_data(def.krate);
decoder::get_type_name_if_impl(&*cdata, def.node)
}
pub fn get_methods_if_impl(cstore: &cstore::CStore,
def: ast::DefId)
-> Option<Vec<MethodInfo> > {
let cdata = cstore.get_crate_data(def.krate);
decoder::get_methods_if_impl(cstore.intr.clone(), &*cdata, def.node)
}
pub fn get_item_attrs(cstore: &cstore::CStore,
def_id: ast::DefId)
-> Vec<ast::Attribute> {
let cdata = cstore.get_crate_data(def_id.krate);
decoder::get_item_attrs(&*cdata, def_id.node)
}
pub fn get_struct_fields(cstore: &cstore::CStore,
def: ast::DefId)
-> Vec<ty::field_ty> {
let cdata = cstore.get_crate_data(def.krate);
decoder::get_struct_fields(cstore.intr.clone(), &*cdata, def.node)
}
pub fn get_struct_field_attrs(cstore: &cstore::CStore, def: ast::DefId) -> HashMap<ast::NodeId,
Vec<ast::Attribute>> {
let cdata = cstore.get_crate_data(def.krate);
decoder::get_struct_field_attrs(&*cdata)
}
pub fn get_type<'tcx>(tcx: &ty::ctxt<'tcx>,
def: ast::DefId)
-> ty::TypeScheme<'tcx> {
let cstore = &tcx.sess.cstore;
let cdata = cstore.get_crate_data(def.krate);
decoder::get_type(&*cdata, def.node, tcx)
}
pub fn get_trait_def<'tcx>(tcx: &ty::ctxt<'tcx>, def: ast::DefId) -> ty::TraitDef<'tcx> {
let cstore = &tcx.sess.cstore;
let cdata = cstore.get_crate_data(def.krate);
decoder::get_trait_def(&*cdata, def.node, tcx)
}
pub fn get_predicates<'tcx>(tcx: &ty::ctxt<'tcx>, def: ast::DefId)
-> ty::GenericPredicates<'tcx>
{
let cstore = &tcx.sess.cstore;
let cdata = cstore.get_crate_data(def.krate);
decoder::get_predicates(&*cdata, def.node, tcx)
}
pub fn get_super_predicates<'tcx>(tcx: &ty::ctxt<'tcx>, def: ast::DefId)
-> ty::GenericPredicates<'tcx>
{
let cstore = &tcx.sess.cstore;
let cdata = cstore.get_crate_data(def.krate);
decoder::get_super_predicates(&*cdata, def.node, tcx)
}
pub fn get_field_type<'tcx>(tcx: &ty::ctxt<'tcx>, class_id: ast::DefId,
def: ast::DefId) -> ty::TypeScheme<'tcx> {
let cstore = &tcx.sess.cstore;
let cdata = cstore.get_crate_data(class_id.krate);
let all_items = reader::get_doc(rbml::Doc::new(cdata.data()), tag_items);
let class_doc = expect(tcx.sess.diagnostic(),
decoder::maybe_find_item(class_id.node, all_items),
|| {
(format!("get_field_type: class ID {:?} not found",
class_id)).to_string()
});
let the_field = expect(tcx.sess.diagnostic(),
decoder::maybe_find_item(def.node, class_doc),
|| {
(format!("get_field_type: in class {:?}, field ID {:?} not found",
class_id,
def)).to_string()
});
let ty = decoder::item_type(def, the_field, tcx, &*cdata);
ty::TypeScheme {
generics: ty::Generics::empty(),
ty: ty,
}
}
pub fn get_impl_polarity<'tcx>(tcx: &ty::ctxt<'tcx>,
def: ast::DefId)
-> Option<ast::ImplPolarity>
{
let cstore = &tcx.sess.cstore;
let cdata = cstore.get_crate_data(def.krate);
decoder::get_impl_polarity(&*cdata, def.node)
}
pub fn get_custom_coerce_unsized_kind<'tcx>(tcx: &ty::ctxt<'tcx>,
def: ast::DefId)
-> Option<ty::CustomCoerceUnsized> {<|fim▁hole|> let cstore = &tcx.sess.cstore;
let cdata = cstore.get_crate_data(def.krate);
decoder::get_custom_coerce_unsized_kind(&*cdata, def.node)
}
// Given a def_id for an impl, return the trait it implements,
// if there is one.
pub fn get_impl_trait<'tcx>(tcx: &ty::ctxt<'tcx>,
def: ast::DefId)
-> Option<ty::TraitRef<'tcx>> {
let cstore = &tcx.sess.cstore;
let cdata = cstore.get_crate_data(def.krate);
decoder::get_impl_trait(&*cdata, def.node, tcx)
}
// Given a def_id for an impl, return information about its vtables
pub fn get_impl_vtables<'tcx>(tcx: &ty::ctxt<'tcx>,
def: ast::DefId)
-> ty::vtable_res<'tcx> {
let cstore = &tcx.sess.cstore;
let cdata = cstore.get_crate_data(def.krate);
decoder::get_impl_vtables(&*cdata, def.node, tcx)
}
pub fn get_native_libraries(cstore: &cstore::CStore, crate_num: ast::CrateNum)
-> Vec<(cstore::NativeLibraryKind, String)> {
let cdata = cstore.get_crate_data(crate_num);
decoder::get_native_libraries(&*cdata)
}
pub fn each_inherent_implementation_for_type<F>(cstore: &cstore::CStore,
def_id: ast::DefId,
callback: F) where
F: FnMut(ast::DefId),
{
let cdata = cstore.get_crate_data(def_id.krate);
decoder::each_inherent_implementation_for_type(&*cdata, def_id.node, callback)
}
pub fn each_implementation_for_trait<F>(cstore: &cstore::CStore,
def_id: ast::DefId,
mut callback: F) where
F: FnMut(ast::DefId),
{
cstore.iter_crate_data(|_, cdata| {
decoder::each_implementation_for_trait(cdata, def_id, &mut callback)
})
}
/// If the given def ID describes an item belonging to a trait (either a
/// default method or an implementation of a trait method), returns the ID of
/// the trait that the method belongs to. Otherwise, returns `None`.
pub fn get_trait_of_item(cstore: &cstore::CStore,
def_id: ast::DefId,
tcx: &ty::ctxt)
-> Option<ast::DefId> {
let cdata = cstore.get_crate_data(def_id.krate);
decoder::get_trait_of_item(&*cdata, def_id.node, tcx)
}
pub fn get_tuple_struct_definition_if_ctor(cstore: &cstore::CStore,
def_id: ast::DefId)
-> Option<ast::DefId>
{
let cdata = cstore.get_crate_data(def_id.krate);
decoder::get_tuple_struct_definition_if_ctor(&*cdata, def_id.node)
}
pub fn get_dylib_dependency_formats(cstore: &cstore::CStore,
cnum: ast::CrateNum)
-> Vec<(ast::CrateNum, cstore::LinkagePreference)>
{
let cdata = cstore.get_crate_data(cnum);
decoder::get_dylib_dependency_formats(&*cdata)
}
pub fn get_missing_lang_items(cstore: &cstore::CStore, cnum: ast::CrateNum)
-> Vec<lang_items::LangItem>
{
let cdata = cstore.get_crate_data(cnum);
decoder::get_missing_lang_items(&*cdata)
}
pub fn get_method_arg_names(cstore: &cstore::CStore, did: ast::DefId)
-> Vec<String>
{
let cdata = cstore.get_crate_data(did.krate);
decoder::get_method_arg_names(&*cdata, did.node)
}
pub fn get_reachable_extern_fns(cstore: &cstore::CStore, cnum: ast::CrateNum)
-> Vec<ast::DefId>
{
let cdata = cstore.get_crate_data(cnum);
decoder::get_reachable_extern_fns(&*cdata)
}
pub fn is_typedef(cstore: &cstore::CStore, did: ast::DefId) -> bool {
let cdata = cstore.get_crate_data(did.krate);
decoder::is_typedef(&*cdata, did.node)
}
pub fn get_stability(cstore: &cstore::CStore,
def: ast::DefId)
-> Option<attr::Stability> {
let cdata = cstore.get_crate_data(def.krate);
decoder::get_stability(&*cdata, def.node)
}
pub fn is_staged_api(cstore: &cstore::CStore, def: ast::DefId) -> bool {
let cdata = cstore.get_crate_data(def.krate);
let attrs = decoder::get_crate_attributes(cdata.data());
for attr in &attrs {
if &attr.name()[..] == "staged_api" {
match attr.node.value.node { ast::MetaWord(_) => return true, _ => (/*pass*/) }
}
}
return false;
}
pub fn get_repr_attrs(cstore: &cstore::CStore, def: ast::DefId)
-> Vec<attr::ReprAttr> {
let cdata = cstore.get_crate_data(def.krate);
decoder::get_repr_attrs(&*cdata, def.node)
}
pub fn is_associated_type(cstore: &cstore::CStore, def: ast::DefId) -> bool {
let cdata = cstore.get_crate_data(def.krate);
decoder::is_associated_type(&*cdata, def.node)
}
pub fn is_defaulted_trait(cstore: &cstore::CStore, trait_def_id: ast::DefId) -> bool {
let cdata = cstore.get_crate_data(trait_def_id.krate);
decoder::is_defaulted_trait(&*cdata, trait_def_id.node)
}
pub fn is_default_impl(cstore: &cstore::CStore, impl_did: ast::DefId) -> bool {
let cdata = cstore.get_crate_data(impl_did.krate);
decoder::is_default_impl(&*cdata, impl_did.node)
}<|fim▁end|> | |
<|file_name|>Doxyfile.py<|end_file_name|><|fim▁begin|># Doxyfile 1.7.4
# This file describes the settings to be used by the documentation system
# doxygen (www.doxygen.org) for a project.
#
# All text after a hash (#) is considered a comment and will be ignored.
# The format is:
# TAG = value [value, ...]
# For lists items can also be appended using:
# TAG += value [value, ...]
# Values that contain spaces should be placed between quotes (" ").
#---------------------------------------------------------------------------
# Project related configuration options
#---------------------------------------------------------------------------
# This tag specifies the encoding used for all characters in the config file
# that follow. The default is UTF-8 which is also the encoding used for all
# text before the first occurrence of this tag. Doxygen uses libiconv (or the
# iconv built into libc) for the transcoding. See
# http://www.gnu.org/software/libiconv for the list of possible encodings.
DOXYFILE_ENCODING = UTF-8
# The PROJECT_NAME tag is a single word (or a sequence of words surrounded
# by quotes) that should identify the project.
PROJECT_NAME =
# The PROJECT_NUMBER tag can be used to enter a project or revision number.
# This could be handy for archiving the generated documentation or
# if some version control system is used.
PROJECT_NUMBER =
# Using the PROJECT_BRIEF tag one can provide an optional one line description
# for a project that appears at the top of each page and should give viewer
# a quick idea about the purpose of the project. Keep the description short.
PROJECT_BRIEF =
# With the PROJECT_LOGO tag one can specify an logo or icon that is
# included in the documentation. The maximum height of the logo should not
# exceed 55 pixels and the maximum width should not exceed 200 pixels.
# Doxygen will copy the logo to the output directory.
PROJECT_LOGO =
# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute)
# base path where the generated documentation will be put.
# If a relative path is entered, it will be relative to the location
# where doxygen was started. If left blank the current directory will be used.
OUTPUT_DIRECTORY =
# If the CREATE_SUBDIRS tag is set to YES, then doxygen will create
# 4096 sub-directories (in 2 levels) under the output directory of each output
# format and will distribute the generated files over these directories.
# Enabling this option can be useful when feeding doxygen a huge amount of
# source files, where putting all generated files in the same directory would
# otherwise cause performance problems for the file system.
CREATE_SUBDIRS = NO
# The OUTPUT_LANGUAGE tag is used to specify the language in which all
# documentation generated by doxygen is written. Doxygen will use this
# information to generate all constant output in the proper language.
# The default language is English, other supported languages are:
# Afrikaans, Arabic, Brazilian, Catalan, Chinese, Chinese-Traditional,
# Croatian, Czech, Danish, Dutch, Esperanto, Farsi, Finnish, French, German,
# Greek, Hungarian, Italian, Japanese, Japanese-en (Japanese with English
# messages), Korean, Korean-en, Lithuanian, Norwegian, Macedonian, Persian,
# Polish, Portuguese, Romanian, Russian, Serbian, Serbian-Cyrillic, Slovak,
# Slovene, Spanish, Swedish, Ukrainian, and Vietnamese.
OUTPUT_LANGUAGE = English
# If the BRIEF_MEMBER_DESC tag is set to YES (the default) Doxygen will
# include brief member descriptions after the members that are listed in
# the file and class documentation (similar to JavaDoc).
# Set to NO to disable this.
BRIEF_MEMBER_DESC = YES
# If the REPEAT_BRIEF tag is set to YES (the default) Doxygen will prepend
# the brief description of a member or function before the detailed description.
# Note: if both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the
# brief descriptions will be completely suppressed.
REPEAT_BRIEF = YES
# This tag implements a quasi-intelligent brief description abbreviator
# that is used to form the text in various listings. Each string
# in this list, if found as the leading text of the brief description, will be
# stripped from the text and the result after processing the whole list, is
# used as the annotated text. Otherwise, the brief description is used as-is.
# If left blank, the following values are used ("$name" is automatically
# replaced with the name of the entity): "The $name class" "The $name widget"
# "The $name file" "is" "provides" "specifies" "contains"
# "represents" "a" "an" "the"
ABBREVIATE_BRIEF =
# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then
# Doxygen will generate a detailed section even if there is only a brief
# description.
ALWAYS_DETAILED_SEC = YES
# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all
# inherited members of a class in the documentation of that class as if those
# members were ordinary class members. Constructors, destructors and assignment
# operators of the base classes will not be shown.
INLINE_INHERITED_MEMB = NO
# If the FULL_PATH_NAMES tag is set to YES then Doxygen will prepend the full
# path before files name in the file list and in the header files. If set
# to NO the shortest path that makes the file name unique will be used.
FULL_PATH_NAMES = YES
# If the FULL_PATH_NAMES tag is set to YES then the STRIP_FROM_PATH tag
# can be used to strip a user-defined part of the path. Stripping is
# only done if one of the specified strings matches the left-hand part of
# the path. The tag can be used to show relative paths in the file list.
# If left blank the directory from which doxygen is run is used as the
# path to strip.
STRIP_FROM_PATH =
# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of
# the path mentioned in the documentation of a class, which tells
# the reader which header file to include in order to use a class.
# If left blank only the name of the header file containing the class
# definition is used. Otherwise one should specify the include paths that
# are normally passed to the compiler using the -I flag.
STRIP_FROM_INC_PATH =
# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter
# (but less readable) file names. This can be useful if your file system
# doesn't support long names like on DOS, Mac, or CD-ROM.
SHORT_NAMES = NO
# If the JAVADOC_AUTOBRIEF tag is set to YES then Doxygen
# will interpret the first line (until the first dot) of a JavaDoc-style
# comment as the brief description. If set to NO, the JavaDoc
# comments will behave just like regular Qt-style comments
# (thus requiring an explicit @brief command for a brief description.)
JAVADOC_AUTOBRIEF = NO
# If the QT_AUTOBRIEF tag is set to YES then Doxygen will
# interpret the first line (until the first dot) of a Qt-style
# comment as the brief description. If set to NO, the comments
# will behave just like regular Qt-style comments (thus requiring
# an explicit \brief command for a brief description.)
QT_AUTOBRIEF = NO
# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make Doxygen
# treat a multi-line C++ special comment block (i.e. a block of //! or ///
# comments) as a brief description. This used to be the default behaviour.
# The new default is to treat a multi-line C++ comment block as a detailed
# description. Set this tag to YES if you prefer the old behaviour instead.
MULTILINE_CPP_IS_BRIEF = NO
# If the INHERIT_DOCS tag is set to YES (the default) then an undocumented
# member inherits the documentation from any documented member that it
# re-implements.
INHERIT_DOCS = YES
# If the SEPARATE_MEMBER_PAGES tag is set to YES, then doxygen will produce
# a new page for each member. If set to NO, the documentation of a member will
# be part of the file/class/namespace that contains it.
SEPARATE_MEMBER_PAGES = NO
# The TAB_SIZE tag can be used to set the number of spaces in a tab.
# Doxygen uses this value to replace tabs by spaces in code fragments.
TAB_SIZE = 4
# This tag can be used to specify a number of aliases that acts
# as commands in the documentation. An alias has the form "name=value".
# For example adding "sideeffect=\par Side Effects:\n" will allow you to
# put the command \sideeffect (or @sideeffect) in the documentation, which
# will result in a user-defined paragraph with heading "Side Effects:".
# You can put \n's in the value part of an alias to insert newlines.
ALIASES =
# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C
# sources only. Doxygen will then generate output that is more tailored for C.
# For instance, some of the names that are used will be different. The list
# of all members will be omitted, etc.
OPTIMIZE_OUTPUT_FOR_C = NO
# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java
# sources only. Doxygen will then generate output that is more tailored for
# Java. For instance, namespaces will be presented as packages, qualified
# scopes will look different, etc.
OPTIMIZE_OUTPUT_JAVA = NO
# Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran
# sources only. Doxygen will then generate output that is more tailored for
# Fortran.
OPTIMIZE_FOR_FORTRAN = NO
# Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL
# sources. Doxygen will then generate output that is tailored for
# VHDL.
OPTIMIZE_OUTPUT_VHDL = NO
# Doxygen selects the parser to use depending on the extension of the files it
# parses. With this tag you can assign which parser to use for a given extension.
# Doxygen has a built-in mapping, but you can override or extend it using this
# tag. The format is ext=language, where ext is a file extension, and language
# is one of the parsers supported by doxygen: IDL, Java, Javascript, CSharp, C,
# C++, D, PHP, Objective-C, Python, Fortran, VHDL, C, C++. For instance to make
# doxygen treat .inc files as Fortran files (default is PHP), and .f files as C
# (default is Fortran), use: inc=Fortran f=C. Note that for custom extensions
# you also need to set FILE_PATTERNS otherwise the files are not read by doxygen.
EXTENSION_MAPPING =
# If you use STL classes (i.e. std::string, std::vector, etc.) but do not want
# to include (a tag file for) the STL sources as input, then you should
# set this tag to YES in order to let doxygen match functions declarations and
# definitions whose arguments contain STL classes (e.g. func(std::string); v.s.
# func(std::string) {}). This also makes the inheritance and collaboration
# diagrams that involve STL classes more complete and accurate.
BUILTIN_STL_SUPPORT = NO
# If you use Microsoft's C++/CLI language, you should set this option to YES to
# enable parsing support.
CPP_CLI_SUPPORT = NO
# Set the SIP_SUPPORT tag to YES if your project consists of sip sources only.
# Doxygen will parse them like normal C++ but will assume all classes use public
# instead of private inheritance when no explicit protection keyword is present.
SIP_SUPPORT = NO
# For Microsoft's IDL there are propget and propput attributes to indicate getter
# and setter methods for a property. Setting this option to YES (the default)
# will make doxygen replace the get and set methods by a property in the
# documentation. This will only work if the methods are indeed getting or
# setting a simple type. If this is not the case, or you want to show the
# methods anyway, you should set this option to NO.
IDL_PROPERTY_SUPPORT = YES
# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC
# tag is set to YES, then doxygen will reuse the documentation of the first
# member in the group (if any) for the other members of the group. By default
# all members of a group must be documented explicitly.
DISTRIBUTE_GROUP_DOC = NO
# Set the SUBGROUPING tag to YES (the default) to allow class member groups of
# the same type (for instance a group of public functions) to be put as a
# subgroup of that type (e.g. under the Public Functions section). Set it to
# NO to prevent subgrouping. Alternatively, this can be done per class using
# the \nosubgrouping command.
SUBGROUPING = YES
# When the INLINE_GROUPED_CLASSES tag is set to YES, classes, structs and
# unions are shown inside the group in which they are included (e.g. using
# @ingroup) instead of on a separate page (for HTML and Man pages) or
# section (for LaTeX and RTF).
INLINE_GROUPED_CLASSES = NO
# When TYPEDEF_HIDES_STRUCT is enabled, a typedef of a struct, union, or enum
# is documented as struct, union, or enum with the name of the typedef. So
# typedef struct TypeS {} TypeT, will appear in the documentation as a struct
# with name TypeT. When disabled the typedef will appear as a member of a file,
# namespace, or class. And the struct will be named TypeS. This can typically
# be useful for C code in case the coding convention dictates that all compound
# types are typedef'ed and only the typedef is referenced, never the tag name.
TYPEDEF_HIDES_STRUCT = NO
# The SYMBOL_CACHE_SIZE determines the size of the internal cache use to
# determine which symbols to keep in memory and which to flush to disk.
# When the cache is full, less often used symbols will be written to disk.
# For small to medium size projects (<1000 input files) the default value is
# probably good enough. For larger projects a too small cache size can cause
# doxygen to be busy swapping symbols to and from disk most of the time
# causing a significant performance penalty.
# If the system has enough physical memory increasing the cache will improve the
# performance by keeping more symbols in memory. Note that the value works on
# a logarithmic scale so increasing the size by one will roughly double the
# memory usage. The cache size is given by this formula:
# 2^(16+SYMBOL_CACHE_SIZE). The valid range is 0..9, the default is 0,
# corresponding to a cache size of 2^16 = 65536 symbols
SYMBOL_CACHE_SIZE = 0
#---------------------------------------------------------------------------
# Build related configuration options
#---------------------------------------------------------------------------
# If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in
# documentation are documented, even if no documentation was available.
# Private class members and static file members will be hidden unless
# the EXTRACT_PRIVATE and EXTRACT_STATIC tags are set to YES
EXTRACT_ALL = YES
# If the EXTRACT_PRIVATE tag is set to YES all private members of a class
# will be included in the documentation.
EXTRACT_PRIVATE = NO
# If the EXTRACT_STATIC tag is set to YES all static members of a file
# will be included in the documentation.
EXTRACT_STATIC = NO
# If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs)
# defined locally in source files will be included in the documentation.
# If set to NO only classes defined in header files are included.
EXTRACT_LOCAL_CLASSES = YES
# This flag is only useful for Objective-C code. When set to YES local
# methods, which are defined in the implementation section but not in
# the interface are included in the documentation.
# If set to NO (the default) only methods in the interface are included.
EXTRACT_LOCAL_METHODS = NO
# If this flag is set to YES, the members of anonymous namespaces will be
# extracted and appear in the documentation as a namespace called
# 'anonymous_namespace{file}', where file will be replaced with the base
# name of the file that contains the anonymous namespace. By default
# anonymous namespaces are hidden.
EXTRACT_ANON_NSPACES = NO
# If the HIDE_UNDOC_MEMBERS tag is set to YES, Doxygen will hide all
# undocumented members of documented classes, files or namespaces.
# If set to NO (the default) these members will be included in the
# various overviews, but no documentation section is generated.
# This option has no effect if EXTRACT_ALL is enabled.
HIDE_UNDOC_MEMBERS = NO
# If the HIDE_UNDOC_CLASSES tag is set to YES, Doxygen will hide all
# undocumented classes that are normally visible in the class hierarchy.
# If set to NO (the default) these classes will be included in the various
# overviews. This option has no effect if EXTRACT_ALL is enabled.
HIDE_UNDOC_CLASSES = NO
# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, Doxygen will hide all
# friend (class|struct|union) declarations.
# If set to NO (the default) these declarations will be included in the
# documentation.
HIDE_FRIEND_COMPOUNDS = NO
# If the HIDE_IN_BODY_DOCS tag is set to YES, Doxygen will hide any
# documentation blocks found inside the body of a function.
# If set to NO (the default) these blocks will be appended to the
# function's detailed documentation block.
HIDE_IN_BODY_DOCS = NO
# The INTERNAL_DOCS tag determines if documentation
# that is typed after a \internal command is included. If the tag is set
# to NO (the default) then the documentation will be excluded.
# Set it to YES to include the internal documentation.
INTERNAL_DOCS = NO
# If the CASE_SENSE_NAMES tag is set to NO then Doxygen will only generate
# file names in lower-case letters. If set to YES upper-case letters are also
# allowed. This is useful if you have classes or files whose names only differ
# in case and if your file system supports case sensitive file names. Windows
# and Mac users are advised to set this option to NO.
CASE_SENSE_NAMES = YES
# If the HIDE_SCOPE_NAMES tag is set to NO (the default) then Doxygen
# will show members with their full class and namespace scopes in the
# documentation. If set to YES the scope will be hidden.
HIDE_SCOPE_NAMES = NO
# If the SHOW_INCLUDE_FILES tag is set to YES (the default) then Doxygen
# will put a list of the files that are included by a file in the documentation
# of that file.
SHOW_INCLUDE_FILES = NO
# If the FORCE_LOCAL_INCLUDES tag is set to YES then Doxygen
# will list include files with double quotes in the documentation
# rather than with sharp brackets.
FORCE_LOCAL_INCLUDES = NO
# If the INLINE_INFO tag is set to YES (the default) then a tag [inline]
# is inserted in the documentation for inline members.
INLINE_INFO = YES
# If the SORT_MEMBER_DOCS tag is set to YES (the default) then doxygen
# will sort the (detailed) documentation of file and class members
# alphabetically by member name. If set to NO the members will appear in
# declaration order.
SORT_MEMBER_DOCS = NO
# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the
# brief documentation of file, namespace and class members alphabetically
# by member name. If set to NO (the default) the members will appear in
# declaration order.
SORT_BRIEF_DOCS = NO
# If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen
# will sort the (brief and detailed) documentation of class members so that
# constructors and destructors are listed first. If set to NO (the default)
# the constructors will appear in the respective orders defined by
# SORT_MEMBER_DOCS and SORT_BRIEF_DOCS.
# This tag will be ignored for brief docs if SORT_BRIEF_DOCS is set to NO
# and ignored for detailed docs if SORT_MEMBER_DOCS is set to NO.
SORT_MEMBERS_CTORS_1ST = NO
# If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the
# hierarchy of group names into alphabetical order. If set to NO (the default)
# the group names will appear in their defined order.
SORT_GROUP_NAMES = NO
# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be
# sorted by fully-qualified names, including namespaces. If set to
# NO (the default), the class list will be sorted only by class name,
# not including the namespace part.
# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES.
# Note: This option applies only to the class list, not to the
# alphabetical list.
SORT_BY_SCOPE_NAME = NO
# If the STRICT_PROTO_MATCHING option is enabled and doxygen fails to
# do proper type resolution of all parameters of a function it will reject a
# match between the prototype and the implementation of a member function even
# if there is only one candidate or it is obvious which candidate to choose
# by doing a simple string match. By disabling STRICT_PROTO_MATCHING doxygen
# will still accept a match between prototype and implementation in such cases.
STRICT_PROTO_MATCHING = NO
# The GENERATE_TODOLIST tag can be used to enable (YES) or
# disable (NO) the todo list. This list is created by putting \todo
# commands in the documentation.
GENERATE_TODOLIST = YES
# The GENERATE_TESTLIST tag can be used to enable (YES) or
# disable (NO) the test list. This list is created by putting \test
# commands in the documentation.
GENERATE_TESTLIST = YES
# The GENERATE_BUGLIST tag can be used to enable (YES) or
# disable (NO) the bug list. This list is created by putting \bug
# commands in the documentation.
GENERATE_BUGLIST = YES
# The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or
# disable (NO) the deprecated list. This list is created by putting
# \deprecated commands in the documentation.
GENERATE_DEPRECATEDLIST= YES
# The ENABLED_SECTIONS tag can be used to enable conditional
# documentation sections, marked by \if sectionname ... \endif.
ENABLED_SECTIONS =
# The MAX_INITIALIZER_LINES tag determines the maximum number of lines
# the initial value of a variable or macro consists of for it to appear in
# the documentation. If the initializer consists of more lines than specified
# here it will be hidden. Use a value of 0 to hide initializers completely.
# The appearance of the initializer of individual variables and macros in the
# documentation can be controlled using \showinitializer or \hideinitializer
# command in the documentation regardless of this setting.
MAX_INITIALIZER_LINES = 30
# Set the SHOW_USED_FILES tag to NO to disable the list of files generated
# at the bottom of the documentation of classes and structs. If set to YES the
# list will mention the files that were used to generate the documentation.
SHOW_USED_FILES = YES
# If the sources in your project are distributed over multiple directories
# then setting the SHOW_DIRECTORIES tag to YES will show the directory hierarchy
# in the documentation. The default is NO.
SHOW_DIRECTORIES = NO
# Set the SHOW_FILES tag to NO to disable the generation of the Files page.
# This will remove the Files entry from the Quick Index and from the
# Folder Tree View (if specified). The default is YES.
SHOW_FILES = YES
# Set the SHOW_NAMESPACES tag to NO to disable the generation of the
# Namespaces page.
# This will remove the Namespaces entry from the Quick Index
# and from the Folder Tree View (if specified). The default is YES.
SHOW_NAMESPACES = YES
# The FILE_VERSION_FILTER tag can be used to specify a program or script that
# doxygen should invoke to get the current version for each file (typically from
# the version control system). Doxygen will invoke the program by executing (via
# popen()) the command <command> <input-file>, where <command> is the value of
# the FILE_VERSION_FILTER tag, and <input-file> is the name of an input file
# provided by doxygen. Whatever the program writes to standard output
# is used as the file version. See the manual for examples.
FILE_VERSION_FILTER =
# The LAYOUT_FILE tag can be used to specify a layout file which will be parsed
# by doxygen. The layout file controls the global structure of the generated
# output files in an output format independent way. The create the layout file
# that represents doxygen's defaults, run doxygen with the -l option.
# You can optionally specify a file name after the option, if omitted
# DoxygenLayout.xml will be used as the name of the layout file.
#LAYOUT_FILE = DoxygenLayout.xml
#---------------------------------------------------------------------------
# configuration options related to warning and progress messages
#---------------------------------------------------------------------------
# The QUIET tag can be used to turn on/off the messages that are generated
# by doxygen. Possible values are YES and NO. If left blank NO is used.
QUIET = YES
# The WARNINGS tag can be used to turn on/off the warning messages that are
# generated by doxygen. Possible values are YES and NO. If left blank
# NO is used.
WARNINGS = YES
# If WARN_IF_UNDOCUMENTED is set to YES, then doxygen will generate warnings
# for undocumented members. If EXTRACT_ALL is set to YES then this flag will
# automatically be disabled.
WARN_IF_UNDOCUMENTED = YES
# If WARN_IF_DOC_ERROR is set to YES, doxygen will generate warnings for
# potential errors in the documentation, such as not documenting some
# parameters in a documented function, or documenting parameters that
# don't exist or using markup commands wrongly.
WARN_IF_DOC_ERROR = YES
# The WARN_NO_PARAMDOC option can be enabled to get warnings for
# functions that are documented, but have no documentation for their parameters
# or return value. If set to NO (the default) doxygen will only warn about
# wrong or incomplete parameter documentation, but not about the absence of
# documentation.
WARN_NO_PARAMDOC = YES
# The WARN_FORMAT tag determines the format of the warning messages that
# doxygen can produce. The string should contain the $file, $line, and $text
# tags, which will be replaced by the file and line number from which the
# warning originated and the warning text. Optionally the format may contain
# $version, which will be replaced by the version of the file (if it could
# be obtained via FILE_VERSION_FILTER)
WARN_FORMAT = "$file:$line: $text"
# The WARN_LOGFILE tag can be used to specify a file to which warning
# and error messages should be written. If left blank the output is written
# to stderr.
WARN_LOGFILE =
#---------------------------------------------------------------------------
# configuration options related to the input files
#---------------------------------------------------------------------------
# The INPUT tag can be used to specify the files and/or directories that contain
# documented source files. You may enter file names like "myfile.cpp" or
# directories like "/usr/src/myproject". Separate the files or directories
# with spaces.
INPUT =
# This tag can be used to specify the character encoding of the source files
# that doxygen parses. Internally doxygen uses the UTF-8 encoding, which is
# also the default input encoding. Doxygen uses libiconv (or the iconv built
# into libc) for the transcoding. See http://www.gnu.org/software/libiconv for
# the list of possible encodings.
INPUT_ENCODING = UTF-8
# If the value of the INPUT tag contains directories, you can use the
# FILE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp
# and *.h) to filter out the source-files in the directories. If left
# blank the following patterns are tested:
# *.c *.cc *.cxx *.cpp *.c++ *.d *.java *.ii *.ixx *.ipp *.i++ *.inl *.h *.hh
# *.hxx *.hpp *.h++ *.idl *.odl *.cs *.php *.php3 *.inc *.m *.mm *.dox *.py
# *.f90 *.f *.for *.vhd *.vhdl
FILE_PATTERNS =
# The RECURSIVE tag can be used to turn specify whether or not subdirectories
# should be searched for input files as well. Possible values are YES and NO.
# If left blank NO is used.
RECURSIVE = NO
# The EXCLUDE tag can be used to specify files and/or directories that should
# excluded from the INPUT source files. This way you can easily exclude a
# subdirectory from a directory tree whose root is specified with the INPUT tag.
EXCLUDE =
# The EXCLUDE_SYMLINKS tag can be used select whether or not files or
# directories that are symbolic links (a Unix file system feature) are excluded
# from the input.
EXCLUDE_SYMLINKS = NO
# If the value of the INPUT tag contains directories, you can use the
# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude
# certain files from those directories. Note that the wildcards are matched
# against the file with absolute path, so to exclude all test directories
# for example use the pattern */test/*
EXCLUDE_PATTERNS =
# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names
# (namespaces, classes, functions, etc.) that should be excluded from the
# output. The symbol name can be a fully qualified name, a word, or if the
# wildcard * is used, a substring. Examples: ANamespace, AClass,
# AClass::ANamespace, ANamespace::*Test
EXCLUDE_SYMBOLS =
# The EXAMPLE_PATH tag can be used to specify one or more files or
# directories that contain example code fragments that are included (see
# the \include command).
EXAMPLE_PATH =
# If the value of the EXAMPLE_PATH tag contains directories, you can use the
# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp
# and *.h) to filter out the source-files in the directories. If left
# blank all files are included.
EXAMPLE_PATTERNS =
# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be
# searched for input files to be used with the \include or \dontinclude
# commands irrespective of the value of the RECURSIVE tag.
# Possible values are YES and NO. If left blank NO is used.
EXAMPLE_RECURSIVE = NO
# The IMAGE_PATH tag can be used to specify one or more files or
# directories that contain image that are included in the documentation (see
# the \image command).
IMAGE_PATH =
# The INPUT_FILTER tag can be used to specify a program that doxygen should
# invoke to filter for each input file. Doxygen will invoke the filter program
# by executing (via popen()) the command <filter> <input-file>, where <filter>
# is the value of the INPUT_FILTER tag, and <input-file> is the name of an
# input file. Doxygen will then use the output that the filter program writes
# to standard output.
# If FILTER_PATTERNS is specified, this tag will be
# ignored.
INPUT_FILTER =
# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern
# basis.
# Doxygen will compare the file name with each pattern and apply the
# filter if there is a match.
# The filters are a list of the form:
# pattern=filter (like *.cpp=my_cpp_filter). See INPUT_FILTER for further
# info on how filters are used. If FILTER_PATTERNS is empty or if
# non of the patterns match the file name, INPUT_FILTER is applied.
FILTER_PATTERNS =
# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using
# INPUT_FILTER) will be used to filter the input files when producing source
# files to browse (i.e. when SOURCE_BROWSER is set to YES).
FILTER_SOURCE_FILES = NO
# The FILTER_SOURCE_PATTERNS tag can be used to specify source filters per file
# pattern. A pattern will override the setting for FILTER_PATTERN (if any)
# and it is also possible to disable source filtering for a specific pattern
# using *.ext= (so without naming a filter). This option only has effect when
# FILTER_SOURCE_FILES is enabled.
FILTER_SOURCE_PATTERNS =
#---------------------------------------------------------------------------
# configuration options related to source browsing
#---------------------------------------------------------------------------
# If the SOURCE_BROWSER tag is set to YES then a list of source files will
# be generated. Documented entities will be cross-referenced with these sources.
# Note: To get rid of all source code in the generated output, make sure also
# VERBATIM_HEADERS is set to NO.
SOURCE_BROWSER = NO
# Setting the INLINE_SOURCES tag to YES will include the body
# of functions and classes directly in the documentation.
INLINE_SOURCES = NO
# Setting the STRIP_CODE_COMMENTS tag to YES (the default) will instruct
# doxygen to hide any special comment blocks from generated source code
# fragments. Normal C and C++ comments will always remain visible.
STRIP_CODE_COMMENTS = YES
# If the REFERENCED_BY_RELATION tag is set to YES
# then for each documented function all documented
# functions referencing it will be listed.
REFERENCED_BY_RELATION = NO
# If the REFERENCES_RELATION tag is set to YES
# then for each documented function all documented entities
# called/used by that function will be listed.
REFERENCES_RELATION = NO
# If the REFERENCES_LINK_SOURCE tag is set to YES (the default)
# and SOURCE_BROWSER tag is set to YES, then the hyperlinks from
# functions in REFERENCES_RELATION and REFERENCED_BY_RELATION lists will
# link to the source code.
# Otherwise they will link to the documentation.
REFERENCES_LINK_SOURCE = YES
# If the USE_HTAGS tag is set to YES then the references to source code
# will point to the HTML generated by the htags(1) tool instead of doxygen
# built-in source browser. The htags tool is part of GNU's global source
# tagging system (see http://www.gnu.org/software/global/global.html). You
# will need version 4.8.6 or higher.
USE_HTAGS = NO
# If the VERBATIM_HEADERS tag is set to YES (the default) then Doxygen
# will generate a verbatim copy of the header file for each class for
# which an include is specified. Set to NO to disable this.
VERBATIM_HEADERS = YES
#---------------------------------------------------------------------------
# configuration options related to the alphabetical class index
#---------------------------------------------------------------------------
# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index
# of all compounds will be generated. Enable this if the project
# contains a lot of classes, structs, unions or interfaces.
ALPHABETICAL_INDEX = NO
# If the alphabetical index is enabled (see ALPHABETICAL_INDEX) then
# the COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns
# in which this list will be split (can be a number in the range [1..20])
COLS_IN_ALPHA_INDEX = 5
# In case all classes in a project start with a common prefix, all
# classes will be put under the same header in the alphabetical index.
# The IGNORE_PREFIX tag can be used to specify one or more prefixes that
# should be ignored while generating the index headers.
IGNORE_PREFIX =
#---------------------------------------------------------------------------
# configuration options related to the HTML output
#---------------------------------------------------------------------------
# If the GENERATE_HTML tag is set to YES (the default) Doxygen will
# generate HTML output.
GENERATE_HTML = YES
# The HTML_OUTPUT tag is used to specify where the HTML docs will be put.
# If a relative path is entered the value of OUTPUT_DIRECTORY will be
# put in front of it. If left blank `html' will be used as the default path.
HTML_OUTPUT = html
# The HTML_FILE_EXTENSION tag can be used to specify the file extension for
# each generated HTML page (for example: .htm,.php,.asp). If it is left blank
# doxygen will generate files with .html extension.
HTML_FILE_EXTENSION = .html
# The HTML_HEADER tag can be used to specify a personal HTML header for
# each generated HTML page. If it is left blank doxygen will generate a
# standard header. Note that when using a custom header you are responsible
# for the proper inclusion of any scripts and style sheets that doxygen
# needs, which is dependent on the configuration options used.
# It is adviced to generate a default header using "doxygen -w html
# header.html footer.html stylesheet.css YourConfigFile" and then modify
# that header. Note that the header is subject to change so you typically
# have to redo this when upgrading to a newer version of doxygen or when changing the value of configuration settings such as GENERATE_TREEVIEW!
HTML_HEADER =
# The HTML_FOOTER tag can be used to specify a personal HTML footer for
# each generated HTML page. If it is left blank doxygen will generate a
# standard footer.
HTML_FOOTER =
# The HTML_STYLESHEET tag can be used to specify a user-defined cascading
# style sheet that is used by each HTML page. It can be used to
# fine-tune the look of the HTML output. If the tag is left blank doxygen
# will generate a default style sheet. Note that doxygen will try to copy
# the style sheet file to the HTML output directory, so don't put your own
# stylesheet in the HTML output directory as well, or it will be erased!
HTML_STYLESHEET =
# The HTML_EXTRA_FILES tag can be used to specify one or more extra images or
# other source files which should be copied to the HTML output directory. Note
# that these files will be copied to the base HTML output directory. Use the
# $relpath$ marker in the HTML_HEADER and/or HTML_FOOTER files to load these
# files. In the HTML_STYLESHEET file, use the file name only. Also note that
# the files will be copied as-is; there are no commands or markers available.
HTML_EXTRA_FILES =
# The HTML_COLORSTYLE_HUE tag controls the color of the HTML output.
# Doxygen will adjust the colors in the stylesheet and background images
# according to this color. Hue is specified as an angle on a colorwheel,
# see http://en.wikipedia.org/wiki/Hue for more information.
# For instance the value 0 represents red, 60 is yellow, 120 is green,
# 180 is cyan, 240 is blue, 300 purple, and 360 is red again.
# The allowed range is 0 to 359.
HTML_COLORSTYLE_HUE = 220
# The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of
# the colors in the HTML output. For a value of 0 the output will use
# grayscales only. A value of 255 will produce the most vivid colors.
HTML_COLORSTYLE_SAT = 100
# The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to
# the luminance component of the colors in the HTML output. Values below
# 100 gradually make the output lighter, whereas values above 100 make
# the output darker. The value divided by 100 is the actual gamma applied,
# so 80 represents a gamma of 0.8, The value 220 represents a gamma of 2.2,
# and 100 does not change the gamma.
HTML_COLORSTYLE_GAMMA = 80
# If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML
# page will contain the date and time when the page was generated. Setting
# this to NO can help when comparing the output of multiple runs.
HTML_TIMESTAMP = YES
# If the HTML_ALIGN_MEMBERS tag is set to YES, the members of classes,
# files or namespaces will be aligned in HTML using tables. If set to
# NO a bullet list will be used.
HTML_ALIGN_MEMBERS = YES
# If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML
# documentation will contain sections that can be hidden and shown after the
# page has loaded. For this to work a browser that supports
# JavaScript and DHTML is required (for instance Mozilla 1.0+, Firefox
# Netscape 6.0+, Internet explorer 5.0+, Konqueror, or Safari).
HTML_DYNAMIC_SECTIONS = NO
# If the GENERATE_DOCSET tag is set to YES, additional index files
# will be generated that can be used as input for Apple's Xcode 3
# integrated development environment, introduced with OSX 10.5 (Leopard).
# To create a documentation set, doxygen will generate a Makefile in the
# HTML output directory. Running make will produce the docset in that
# directory and running "make install" will install the docset in
# ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find
# it at startup.
# See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html
# for more information.
GENERATE_DOCSET = NO
# When GENERATE_DOCSET tag is set to YES, this tag determines the name of the
# feed. A documentation feed provides an umbrella under which multiple
# documentation sets from a single provider (such as a company or product suite)
# can be grouped.
DOCSET_FEEDNAME = "Doxygen generated docs"
# When GENERATE_DOCSET tag is set to YES, this tag specifies a string that
# should uniquely identify the documentation set bundle. This should be a
# reverse domain-name style string, e.g. com.mycompany.MyDocSet. Doxygen
# will append .docset to the name.
DOCSET_BUNDLE_ID = org.doxygen.Project
# When GENERATE_PUBLISHER_ID tag specifies a string that should uniquely identify
# the documentation publisher. This should be a reverse domain-name style
# string, e.g. com.mycompany.MyDocSet.documentation.
DOCSET_PUBLISHER_ID = org.doxygen.Publisher
# The GENERATE_PUBLISHER_NAME tag identifies the documentation publisher.
DOCSET_PUBLISHER_NAME = Publisher
# If the GENERATE_HTMLHELP tag is set to YES, additional index files
# will be generated that can be used as input for tools like the
# Microsoft HTML help workshop to generate a compiled HTML help file (.chm)
# of the generated HTML documentation.
GENERATE_HTMLHELP = NO
# If the GENERATE_HTMLHELP tag is set to YES, the CHM_FILE tag can
# be used to specify the file name of the resulting .chm file. You
# can add a path in front of the file if the result should not be
# written to the html output directory.
CHM_FILE =
# If the GENERATE_HTMLHELP tag is set to YES, the HHC_LOCATION tag can
# be used to specify the location (absolute path including file name) of
# the HTML help compiler (hhc.exe). If non-empty doxygen will try to run
# the HTML help compiler on the generated index.hhp.
HHC_LOCATION =
# If the GENERATE_HTMLHELP tag is set to YES, the GENERATE_CHI flag
# controls if a separate .chi index file is generated (YES) or that
# it should be included in the master .chm file (NO).
GENERATE_CHI = NO
# If the GENERATE_HTMLHELP tag is set to YES, the CHM_INDEX_ENCODING
# is used to encode HtmlHelp index (hhk), content (hhc) and project file
# content.
CHM_INDEX_ENCODING =
# If the GENERATE_HTMLHELP tag is set to YES, the BINARY_TOC flag
# controls whether a binary table of contents is generated (YES) or a
# normal table of contents (NO) in the .chm file.
BINARY_TOC = NO
# The TOC_EXPAND flag can be set to YES to add extra items for group members
# to the contents of the HTML help documentation and to the tree view.
TOC_EXPAND = NO
# If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and
# QHP_VIRTUAL_FOLDER are set, an additional index file will be generated
# that can be used as input for Qt's qhelpgenerator to generate a
# Qt Compressed Help (.qch) of the generated HTML documentation.
GENERATE_QHP = NO
# If the QHG_LOCATION tag is specified, the QCH_FILE tag can
# be used to specify the file name of the resulting .qch file.
# The path specified is relative to the HTML output folder.
QCH_FILE =
# The QHP_NAMESPACE tag specifies the namespace to use when generating
# Qt Help Project output. For more information please see
# http://doc.trolltech.com/qthelpproject.html#namespace
QHP_NAMESPACE = org.doxygen.Project
# The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating
# Qt Help Project output. For more information please see
# http://doc.trolltech.com/qthelpproject.html#virtual-folders
QHP_VIRTUAL_FOLDER = doc
# If QHP_CUST_FILTER_NAME is set, it specifies the name of a custom filter to
# add. For more information please see
# http://doc.trolltech.com/qthelpproject.html#custom-filters
QHP_CUST_FILTER_NAME =
# The QHP_CUST_FILT_ATTRS tag specifies the list of the attributes of the
# custom filter to add. For more information please see
# <a href="http://doc.trolltech.com/qthelpproject.html#custom-filters">
# Qt Help Project / Custom Filters</a>.
QHP_CUST_FILTER_ATTRS =
# The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this
# project's
# filter section matches.
# <a href="http://doc.trolltech.com/qthelpproject.html#filter-attributes">
# Qt Help Project / Filter Attributes</a>.
QHP_SECT_FILTER_ATTRS =
# If the GENERATE_QHP tag is set to YES, the QHG_LOCATION tag can
# be used to specify the location of Qt's qhelpgenerator.
# If non-empty doxygen will try to run qhelpgenerator on the generated
# .qhp file.
QHG_LOCATION =
# If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files
# will be generated, which together with the HTML files, form an Eclipse help
# plugin. To install this plugin and make it available under the help contents
# menu in Eclipse, the contents of the directory containing the HTML and XML
# files needs to be copied into the plugins directory of eclipse. The name of
# the directory within the plugins directory should be the same as
# the ECLIPSE_DOC_ID value. After copying Eclipse needs to be restarted before
# the help appears.
GENERATE_ECLIPSEHELP = NO
# A unique identifier for the eclipse help plugin. When installing the plugin
# the directory name containing the HTML and XML files should also have
# this name.
ECLIPSE_DOC_ID = org.doxygen.Project
# The DISABLE_INDEX tag can be used to turn on/off the condensed index at
# top of each HTML page. The value NO (the default) enables the index and
# the value YES disables it.
DISABLE_INDEX = YES
# The ENUM_VALUES_PER_LINE tag can be used to set the number of enum values
# (range [0,1..20]) that doxygen will group on one line in the generated HTML
# documentation. Note that a value of 0 will completely suppress the enum
# values from appearing in the overview section.
ENUM_VALUES_PER_LINE = 4
# The GENERATE_TREEVIEW tag is used to specify whether a tree-like index
# structure should be generated to display hierarchical information.
# If the tag value is set to YES, a side panel will be generated
# containing a tree-like index structure (just like the one that
# is generated for HTML Help). For this to work a browser that supports
# JavaScript, DHTML, CSS and frames is required (i.e. any modern browser).
# Windows users are probably better off using the HTML help feature.
GENERATE_TREEVIEW = NO
# By enabling USE_INLINE_TREES, doxygen will generate the Groups, Directories,
# and Class Hierarchy pages using a tree view instead of an ordered list.
USE_INLINE_TREES = NO
# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be
# used to set the initial width (in pixels) of the frame in which the tree
# is shown.
TREEVIEW_WIDTH = 250
# When the EXT_LINKS_IN_WINDOW option is set to YES doxygen will open
# links to external symbols imported via tag files in a separate window.
EXT_LINKS_IN_WINDOW = NO
# Use this tag to change the font size of Latex formulas included
# as images in the HTML documentation. The default is 10. Note that
# when you change the font size after a successful doxygen run you need
# to manually remove any form_*.png images from the HTML output directory
# to force them to be regenerated.
FORMULA_FONTSIZE = 10
# Use the FORMULA_TRANPARENT tag to determine whether or not the images
# generated for formulas are transparent PNGs. Transparent PNGs are
# not supported properly for IE 6.0, but are supported on all modern browsers.
# Note that when changing this option you need to delete any form_*.png files
# in the HTML output before the changes have effect.
FORMULA_TRANSPARENT = YES
# Enable the USE_MATHJAX option to render LaTeX formulas using MathJax
# (see http://www.mathjax.org) which uses client side Javascript for the
# rendering instead of using prerendered bitmaps. Use this if you do not
# have LaTeX installed or if you want to formulas look prettier in the HTML
# output. When enabled you also need to install MathJax separately and
# configure the path to it using the MATHJAX_RELPATH option.
USE_MATHJAX = NO
# When MathJax is enabled you need to specify the location relative to the
# HTML output directory using the MATHJAX_RELPATH option. The destination
# directory should contain the MathJax.js script. For instance, if the mathjax
# directory is located at the same level as the HTML output directory, then
# MATHJAX_RELPATH should be ../mathjax. The default value points to the
# mathjax.org site, so you can quickly see the result without installing
# MathJax, but it is strongly recommended to install a local copy of MathJax
# before deployment.
MATHJAX_RELPATH = http://www.mathjax.org/mathjax
# When the SEARCHENGINE tag is enabled doxygen will generate a search box
# for the HTML output. The underlying search engine uses javascript
# and DHTML and should work on any modern browser. Note that when using
# HTML help (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets
# (GENERATE_DOCSET) there is already a search function so this one should
# typically be disabled. For large projects the javascript based search engine
# can be slow, then enabling SERVER_BASED_SEARCH may provide a better solution.
SEARCHENGINE = NO
# When the SERVER_BASED_SEARCH tag is enabled the search engine will be
# implemented using a PHP enabled web server instead of at the web client
# using Javascript. Doxygen will generate the search PHP script and index
# file to put on the web server. The advantage of the server
# based approach is that it scales better to large projects and allows
# full text search. The disadvantages are that it is more difficult to setup
# and does not have live searching capabilities.
SERVER_BASED_SEARCH = NO
#---------------------------------------------------------------------------
# configuration options related to the LaTeX output
#---------------------------------------------------------------------------
# If the GENERATE_LATEX tag is set to YES (the default) Doxygen will
# generate Latex output.
GENERATE_LATEX = NO
# The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put.
# If a relative path is entered the value of OUTPUT_DIRECTORY will be
# put in front of it. If left blank `latex' will be used as the default path.
LATEX_OUTPUT = latex
# The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be
# invoked. If left blank `latex' will be used as the default command name.
# Note that when enabling USE_PDFLATEX this option is only used for
# generating bitmaps for formulas in the HTML output, but not in the
# Makefile that is written to the output directory.
LATEX_CMD_NAME = latex
# The MAKEINDEX_CMD_NAME tag can be used to specify the command name to
# generate index for LaTeX. If left blank `makeindex' will be used as the
# default command name.
MAKEINDEX_CMD_NAME = makeindex
# If the COMPACT_LATEX tag is set to YES Doxygen generates more compact
# LaTeX documents. This may be useful for small projects and may help to
# save some trees in general.
COMPACT_LATEX = NO
# The PAPER_TYPE tag can be used to set the paper type that is used
# by the printer. Possible values are: a4, letter, legal and
# executive. If left blank a4wide will be used.
PAPER_TYPE = a4
# The EXTRA_PACKAGES tag can be to specify one or more names of LaTeX
# packages that should be included in the LaTeX output.
EXTRA_PACKAGES =
# The LATEX_HEADER tag can be used to specify a personal LaTeX header for
# the generated latex document. The header should contain everything until
# the first chapter. If it is left blank doxygen will generate a
# standard header. Notice: only use this tag if you know what you are doing!
LATEX_HEADER =
# The LATEX_FOOTER tag can be used to specify a personal LaTeX footer for
# the generated latex document. The footer should contain everything after
# the last chapter. If it is left blank doxygen will generate a
# standard footer. Notice: only use this tag if you know what you are doing!
LATEX_FOOTER =
# If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated
# is prepared for conversion to pdf (using ps2pdf). The pdf file will
# contain links (just like the HTML output) instead of page references
# This makes the output suitable for online browsing using a pdf viewer.
PDF_HYPERLINKS = YES
# If the USE_PDFLATEX tag is set to YES, pdflatex will be used instead of
# plain latex in the generated Makefile. Set this option to YES to get a
# higher quality PDF documentation.
USE_PDFLATEX = YES
# If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \\batchmode.
# command to the generated LaTeX files. This will instruct LaTeX to keep
# running if errors occur, instead of asking the user for help.
# This option is also used when generating formulas in HTML.
LATEX_BATCHMODE = NO
# If LATEX_HIDE_INDICES is set to YES then doxygen will not
# include the index chapters (such as File Index, Compound Index, etc.)
# in the output.
LATEX_HIDE_INDICES = NO
# If LATEX_SOURCE_CODE is set to YES then doxygen will include
# source code with syntax highlighting in the LaTeX output.
# Note that which sources are shown also depends on other settings
# such as SOURCE_BROWSER.
LATEX_SOURCE_CODE = NO
#---------------------------------------------------------------------------
# configuration options related to the RTF output
#---------------------------------------------------------------------------
# If the GENERATE_RTF tag is set to YES Doxygen will generate RTF output
# The RTF output is optimized for Word 97 and may not look very pretty with
# other RTF readers or editors.
GENERATE_RTF = NO
# The RTF_OUTPUT tag is used to specify where the RTF docs will be put.
# If a relative path is entered the value of OUTPUT_DIRECTORY will be
# put in front of it. If left blank `rtf' will be used as the default path.
RTF_OUTPUT = rtf
# If the COMPACT_RTF tag is set to YES Doxygen generates more compact
# RTF documents. This may be useful for small projects and may help to
# save some trees in general.
COMPACT_RTF = NO
# If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated
# will contain hyperlink fields. The RTF file will
# contain links (just like the HTML output) instead of page references.
# This makes the output suitable for online browsing using WORD or other
# programs which support those fields.
# Note: wordpad (write) and others do not support links.
RTF_HYPERLINKS = NO
# Load stylesheet definitions from file. Syntax is similar to doxygen's
# config file, i.e. a series of assignments. You only have to provide
# replacements, missing definitions are set to their default value.
RTF_STYLESHEET_FILE =
# Set optional variables used in the generation of an rtf document.
# Syntax is similar to doxygen's config file.
RTF_EXTENSIONS_FILE =
#---------------------------------------------------------------------------
# configuration options related to the man page output
#---------------------------------------------------------------------------
# If the GENERATE_MAN tag is set to YES (the default) Doxygen will
# generate man pages
GENERATE_MAN = NO
# The MAN_OUTPUT tag is used to specify where the man pages will be put.
# If a relative path is entered the value of OUTPUT_DIRECTORY will be
# put in front of it. If left blank `man' will be used as the default path.
MAN_OUTPUT = man
# The MAN_EXTENSION tag determines the extension that is added to
# the generated man pages (default is the subroutine's section .3)
MAN_EXTENSION = .3
# If the MAN_LINKS tag is set to YES and Doxygen generates man output,
# then it will generate one additional man file for each entity
# documented in the real man page(s). These additional files
# only source the real man page, but without them the man command
# would be unable to find the correct page. The default is NO.
MAN_LINKS = NO
#---------------------------------------------------------------------------
# configuration options related to the XML output
#---------------------------------------------------------------------------
# If the GENERATE_XML tag is set to YES Doxygen will
# generate an XML file that captures the structure of
# the code including all documentation.
GENERATE_XML = NO
# The XML_OUTPUT tag is used to specify where the XML pages will be put.
# If a relative path is entered the value of OUTPUT_DIRECTORY will be
# put in front of it. If left blank `xml' will be used as the default path.
XML_OUTPUT = xml
# The XML_SCHEMA tag can be used to specify an XML schema,
# which can be used by a validating XML parser to check the
# syntax of the XML files.
XML_SCHEMA =
# The XML_DTD tag can be used to specify an XML DTD,
# which can be used by a validating XML parser to check the
# syntax of the XML files.
XML_DTD =
# If the XML_PROGRAMLISTING tag is set to YES Doxygen will
# dump the program listings (including syntax highlighting
# and cross-referencing information) to the XML output. Note that
# enabling this will significantly increase the size of the XML output.
XML_PROGRAMLISTING = YES
#---------------------------------------------------------------------------
# configuration options for the AutoGen Definitions output
#---------------------------------------------------------------------------
# If the GENERATE_AUTOGEN_DEF tag is set to YES Doxygen will
# generate an AutoGen Definitions (see autogen.sf.net) file
# that captures the structure of the code including all
# documentation. Note that this feature is still experimental
# and incomplete at the moment.
GENERATE_AUTOGEN_DEF = NO
#---------------------------------------------------------------------------
# configuration options related to the Perl module output
#---------------------------------------------------------------------------
# If the GENERATE_PERLMOD tag is set to YES Doxygen will
# generate a Perl module file that captures the structure of
# the code including all documentation. Note that this
# feature is still experimental and incomplete at the
# moment.
GENERATE_PERLMOD = NO
# If the PERLMOD_LATEX tag is set to YES Doxygen will generate
# the necessary Makefile rules, Perl scripts and LaTeX code to be able
# to generate PDF and DVI output from the Perl module output.
PERLMOD_LATEX = NO
# If the PERLMOD_PRETTY tag is set to YES the Perl module output will be
# nicely formatted so it can be parsed by a human reader.
# This is useful
# if you want to understand what is going on.
# On the other hand, if this
# tag is set to NO the size of the Perl module output will be much smaller
# and Perl will parse it just the same.
PERLMOD_PRETTY = YES
# The names of the make variables in the generated doxyrules.make file
# are prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX.
# This is useful so different doxyrules.make files included by the same
# Makefile don't overwrite each other's variables.
PERLMOD_MAKEVAR_PREFIX =
#---------------------------------------------------------------------------
# Configuration options related to the preprocessor
#---------------------------------------------------------------------------
# If the ENABLE_PREPROCESSING tag is set to YES (the default) Doxygen will
# evaluate all C-preprocessor directives found in the sources and include
# files.
ENABLE_PREPROCESSING = YES
# If the MACRO_EXPANSION tag is set to YES Doxygen will expand all macro
# names in the source code. If set to NO (the default) only conditional
# compilation will be performed. Macro expansion can be done in a controlled
# way by setting EXPAND_ONLY_PREDEF to YES.
MACRO_EXPANSION = NO
# If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES
# then the macro expansion is limited to the macros specified with the
# PREDEFINED and EXPAND_AS_DEFINED tags.
EXPAND_ONLY_PREDEF = NO
# If the SEARCH_INCLUDES tag is set to YES (the default) the includes files
# pointed to by INCLUDE_PATH will be searched when a #include is found.
SEARCH_INCLUDES = YES
# The INCLUDE_PATH tag can be used to specify one or more directories that
# contain include files that are not input files but should be processed by
# the preprocessor.
INCLUDE_PATH =
# You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard
# patterns (like *.h and *.hpp) to filter out the header-files in the
# directories. If left blank, the patterns specified with FILE_PATTERNS will
# be used.
INCLUDE_FILE_PATTERNS =
# The PREDEFINED tag can be used to specify one or more macro names that
# are defined before the preprocessor is started (similar to the -D option of
# gcc). The argument of the tag is a list of macros of the form: name
# or name=definition (no spaces). If the definition and the = are
# omitted =1 is assumed. To prevent a macro definition from being
# undefined via #undef or recursively expanded use the := operator
# instead of the = operator.
PREDEFINED =
# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then
# this tag can be used to specify a list of macro names that should be expanded.
# The macro definition that is found in the sources will be used.
# Use the PREDEFINED tag if you want to use a different macro definition that
# overrules the definition found in the source code.
EXPAND_AS_DEFINED =
# If the SKIP_FUNCTION_MACROS tag is set to YES (the default) then
# doxygen's preprocessor will remove all references to function-like macros
# that are alone on a line, have an all uppercase name, and do not end with a
# semicolon, because these will confuse the parser if not removed.
SKIP_FUNCTION_MACROS = YES
#---------------------------------------------------------------------------
# Configuration::additions related to external references
#---------------------------------------------------------------------------
# The TAGFILES option can be used to specify one or more tagfiles.
# Optionally an initial location of the external documentation
# can be added for each tagfile. The format of a tag file without
# this location is as follows:
#
# TAGFILES = file1 file2 ...
# Adding location for the tag files is done as follows:
#
# TAGFILES = file1=loc1 "file2 = loc2" ...
# where "loc1" and "loc2" can be relative or absolute paths or
# URLs. If a location is present for each tag, the installdox tool
# does not have to be run to correct the links.
# Note that each tag file must have a unique name
# (where the name does NOT include the path)
# If a tag file is not located in the directory in which doxygen<|fim▁hole|># is run, you must also specify the path to the tagfile here.
TAGFILES =
# When a file name is specified after GENERATE_TAGFILE, doxygen will create
# a tag file that is based on the input files it reads.
GENERATE_TAGFILE =
# If the ALLEXTERNALS tag is set to YES all external classes will be listed
# in the class index. If set to NO only the inherited external classes
# will be listed.
ALLEXTERNALS = NO
# If the EXTERNAL_GROUPS tag is set to YES all external groups will be listed
# in the modules index. If set to NO, only the current project's groups will
# be listed.
EXTERNAL_GROUPS = YES
# The PERL_PATH should be the absolute path and name of the perl script
# interpreter (i.e. the result of `which perl').
PERL_PATH = /usr/bin/perl
#---------------------------------------------------------------------------
# Configuration options related to the dot tool
#---------------------------------------------------------------------------
# If the CLASS_DIAGRAMS tag is set to YES (the default) Doxygen will
# generate a inheritance diagram (in HTML, RTF and LaTeX) for classes with base
# or super classes. Setting the tag to NO turns the diagrams off. Note that
# this option also works with HAVE_DOT disabled, but it is recommended to
# install and use dot, since it yields more powerful graphs.
CLASS_DIAGRAMS = YES
# You can define message sequence charts within doxygen comments using the \msc
# command. Doxygen will then run the mscgen tool (see
# http://www.mcternan.me.uk/mscgen/) to produce the chart and insert it in the
# documentation. The MSCGEN_PATH tag allows you to specify the directory where
# the mscgen tool resides. If left empty the tool is assumed to be found in the
# default search path.
MSCGEN_PATH =
# If set to YES, the inheritance and collaboration graphs will hide
# inheritance and usage relations if the target is undocumented
# or is not a class.
HIDE_UNDOC_RELATIONS = YES
# If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is
# available from the path. This tool is part of Graphviz, a graph visualization
# toolkit from AT&T and Lucent Bell Labs. The other options in this section
# have no effect if this option is set to NO (the default)
HAVE_DOT = NO
# The DOT_NUM_THREADS specifies the number of dot invocations doxygen is
# allowed to run in parallel. When set to 0 (the default) doxygen will
# base this on the number of processors available in the system. You can set it
# explicitly to a value larger than 0 to get control over the balance
# between CPU load and processing speed.
DOT_NUM_THREADS = 0
# By default doxygen will write a font called Helvetica to the output
# directory and reference it in all dot files that doxygen generates.
# When you want a differently looking font you can specify the font name
# using DOT_FONTNAME. You need to make sure dot is able to find the font,
# which can be done by putting it in a standard location or by setting the
# DOTFONTPATH environment variable or by setting DOT_FONTPATH to the directory
# containing the font.
DOT_FONTNAME = Helvetica
# The DOT_FONTSIZE tag can be used to set the size of the font of dot graphs.
# The default size is 10pt.
DOT_FONTSIZE = 10
# By default doxygen will tell dot to use the output directory to look for the
# FreeSans.ttf font (which doxygen will put there itself). If you specify a
# different font using DOT_FONTNAME you can set the path where dot
# can find it using this tag.
DOT_FONTPATH =
# If the CLASS_GRAPH and HAVE_DOT tags are set to YES then doxygen
# will generate a graph for each documented class showing the direct and
# indirect inheritance relations. Setting this tag to YES will force the
# the CLASS_DIAGRAMS tag to NO.
CLASS_GRAPH = YES
# If the COLLABORATION_GRAPH and HAVE_DOT tags are set to YES then doxygen
# will generate a graph for each documented class showing the direct and
# indirect implementation dependencies (inheritance, containment, and
# class references variables) of the class with other documented classes.
COLLABORATION_GRAPH = YES
# If the GROUP_GRAPHS and HAVE_DOT tags are set to YES then doxygen
# will generate a graph for groups, showing the direct groups dependencies
GROUP_GRAPHS = YES
# If the UML_LOOK tag is set to YES doxygen will generate inheritance and
# collaboration diagrams in a style similar to the OMG's Unified Modeling
# Language.
UML_LOOK = NO
# If set to YES, the inheritance and collaboration graphs will show the
# relations between templates and their instances.
TEMPLATE_RELATIONS = NO
# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDE_GRAPH, and HAVE_DOT
# tags are set to YES then doxygen will generate a graph for each documented
# file showing the direct and indirect include dependencies of the file with
# other documented files.
INCLUDE_GRAPH = YES
# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDED_BY_GRAPH, and
# HAVE_DOT tags are set to YES then doxygen will generate a graph for each
# documented header file showing the documented files that directly or
# indirectly include this file.
INCLUDED_BY_GRAPH = YES
# If the CALL_GRAPH and HAVE_DOT options are set to YES then
# doxygen will generate a call dependency graph for every global function
# or class method. Note that enabling this option will significantly increase
# the time of a run. So in most cases it will be better to enable call graphs
# for selected functions only using the \callgraph command.
CALL_GRAPH = NO
# If the CALLER_GRAPH and HAVE_DOT tags are set to YES then
# doxygen will generate a caller dependency graph for every global function
# or class method. Note that enabling this option will significantly increase
# the time of a run. So in most cases it will be better to enable caller
# graphs for selected functions only using the \callergraph command.
CALLER_GRAPH = NO
# If the GRAPHICAL_HIERARCHY and HAVE_DOT tags are set to YES then doxygen
# will generate a graphical hierarchy of all classes instead of a textual one.
GRAPHICAL_HIERARCHY = YES
# If the DIRECTORY_GRAPH, SHOW_DIRECTORIES and HAVE_DOT tags are set to YES
# then doxygen will show the dependencies a directory has on other directories
# in a graphical way. The dependency relations are determined by the #include
# relations between the files in the directories.
DIRECTORY_GRAPH = YES
# The DOT_IMAGE_FORMAT tag can be used to set the image format of the images
# generated by dot. Possible values are svg, png, jpg, or gif.
# If left blank png will be used.
DOT_IMAGE_FORMAT = png
# The tag DOT_PATH can be used to specify the path where the dot tool can be
# found. If left blank, it is assumed the dot tool can be found in the path.
DOT_PATH =
# The DOTFILE_DIRS tag can be used to specify one or more directories that
# contain dot files that are included in the documentation (see the
# \dotfile command).
DOTFILE_DIRS =
# The MSCFILE_DIRS tag can be used to specify one or more directories that
# contain msc files that are included in the documentation (see the
# \mscfile command).
MSCFILE_DIRS =
# The DOT_GRAPH_MAX_NODES tag can be used to set the maximum number of
# nodes that will be shown in the graph. If the number of nodes in a graph
# becomes larger than this value, doxygen will truncate the graph, which is
# visualized by representing a node as a red box. Note that doxygen if the
# number of direct children of the root node in a graph is already larger than
# DOT_GRAPH_MAX_NODES then the graph will not be shown at all. Also note
# that the size of a graph can be further restricted by MAX_DOT_GRAPH_DEPTH.
DOT_GRAPH_MAX_NODES = 50
# The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the
# graphs generated by dot. A depth value of 3 means that only nodes reachable
# from the root by following a path via at most 3 edges will be shown. Nodes
# that lay further from the root node will be omitted. Note that setting this
# option to 1 or 2 may greatly reduce the computation time needed for large
# code bases. Also note that the size of a graph can be further restricted by
# DOT_GRAPH_MAX_NODES. Using a depth of 0 means no depth restriction.
MAX_DOT_GRAPH_DEPTH = 0
# Set the DOT_TRANSPARENT tag to YES to generate images with a transparent
# background. This is disabled by default, because dot on Windows does not
# seem to support this out of the box. Warning: Depending on the platform used,
# enabling this option may lead to badly anti-aliased labels on the edges of
# a graph (i.e. they become hard to read).
DOT_TRANSPARENT = NO
# Set the DOT_MULTI_TARGETS tag to YES allow dot to generate multiple output
# files in one run (i.e. multiple -o and -T options on the command line). This
# makes dot run faster, but since only newer versions of dot (>1.8.10)
# support this, this feature is disabled by default.
DOT_MULTI_TARGETS = NO
# If the GENERATE_LEGEND tag is set to YES (the default) Doxygen will
# generate a legend page explaining the meaning of the various boxes and
# arrows in the dot generated graphs.
GENERATE_LEGEND = YES
# If the DOT_CLEANUP tag is set to YES (the default) Doxygen will
# remove the intermediate dot files that are used to generate
# the various graphs.
DOT_CLEANUP = YES<|fim▁end|> |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.