prompt
large_stringlengths 70
991k
| completion
large_stringlengths 0
1.02k
|
---|---|
<|file_name|>lib.rs<|end_file_name|><|fim▁begin|>/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#![feature(box_syntax)]
#![feature(core_intrinsics)]
#![feature(link_args)]
#![feature(plugin)]
#![feature(unicode)]
#![allow(non_camel_case_types)]
#![plugin(plugins)]
#[macro_use]
extern crate log;
extern crate servo;
extern crate compositing;
extern crate euclid;
extern crate gfx_traits;
extern crate gleam;
extern crate glutin_app;
extern crate rustc_unicode;
extern crate script_traits;
extern crate servo_url;
extern crate style_traits;
extern crate net_traits;
extern crate msg;
extern crate util;
extern crate libc;
#[cfg(target_os="macos")]
#[link_args="-Xlinker -undefined -Xlinker dynamic_lookup"]
extern { }
#[cfg(target_os="macos")]
extern crate cocoa;
#[cfg(target_os="macos")]
#[macro_use]
extern crate objc;
#[cfg(target_os="linux")] extern crate x11;
// Must come first.
pub mod macros;
pub mod browser;
pub mod browser_host;
pub mod command_line;
pub mod cookie;
pub mod core;
pub mod drag_data;
pub mod eutil;
pub mod frame;
pub mod interfaces;
pub mod print_settings;
pub mod process_message;
pub mod render_handler;<|fim▁hole|>pub mod request;
pub mod request_context;
pub mod response;
pub mod stream;
pub mod string;
pub mod string_list;
pub mod string_map;
pub mod string_multimap;
pub mod stubs;
pub mod switches;
pub mod task;
pub mod types;
pub mod urlrequest;
pub mod v8;
pub mod values;
pub mod window;
pub mod wrappers;
pub mod xml_reader;
pub mod zip_reader;<|fim▁end|> | |
<|file_name|>AppContainer.js<|end_file_name|><|fim▁begin|>/**
* Copyright (c) Nicolas Gallagher.
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*
*
*/
import StyleSheet from '../StyleSheet';
import View from '../View';
import React, { createContext } from 'react';
var RootTagContext = createContext(null);<|fim▁hole|>export default function AppContainer(props) {
var children = props.children,
WrapperComponent = props.WrapperComponent;
var innerView = React.createElement(View, {
children: children,
key: 1,
pointerEvents: "box-none",
style: styles.appContainer
});
if (WrapperComponent) {
innerView = React.createElement(WrapperComponent, null, innerView);
}
return React.createElement(RootTagContext.Provider, {
value: props.rootTag
}, React.createElement(View, {
pointerEvents: "box-none",
style: styles.appContainer
}, innerView));
}
var styles = StyleSheet.create({
appContainer: {
flex: 1
}
});<|fim▁end|> | |
<|file_name|>lexer.rs<|end_file_name|><|fim▁begin|>//! Lexer
//!
//! This module contains elements than can be used for writing plugins
//! but can be ignored for simple usage.
use token::Token;
use token::Token::*;
use token::ComparisonOperator::*;
use self::Element::*;
use regex::Regex;
use error::{Error, Result};
#[derive(Clone, Debug, PartialEq)]
pub enum Element {
Expression(Vec<Token>, String),
Tag(Vec<Token>, String),
Raw(String),
}
lazy_static! {
static ref MARKUP: Regex = {
let t = "(?:[[:space:]]*\\{\\{-|\\{\\{).*?(?:-\\}\\}[[:space:]]*|\\}\\})";
let e = "(?:[[:space:]]*\\{%-|\\{%).*?(?:-%\\}[[:space:]]*|%\\})";
Regex::new(&format!("{}|{}", t, e)).unwrap()
};
}
fn split_blocks(text: &str) -> Vec<&str> {
let mut tokens = vec![];
let mut current = 0;
for mat in MARKUP.find_iter(text) {
let start = mat.start();
let end = mat.end();
match &text[current..start] {
"" => {}
t => tokens.push(t),<|fim▁hole|> match &text[current..text.len()] {
"" => {}
t => tokens.push(t),
}
tokens
}
lazy_static! {
static ref EXPRESSION: Regex = {
let t = "(?:[[:space:]]*\\{\\{-|\\{\\{)(.*?)(?:-\\}\\}[[:space:]]*|\\}\\})";
Regex::new(t).unwrap()
};
static ref TAG: Regex = {
let e = "(?:[[:space:]]*\\{%-|\\{%)(.*?)(?:-%\\}[[:space:]]*|%\\})";
Regex::new(e).unwrap()
};
}
pub fn tokenize(text: &str) -> Result<Vec<Element>> {
let mut blocks = vec![];
for block in split_blocks(text) {
if let Some(caps) = TAG.captures(block) {
blocks.push(Tag(try!(granularize(caps.get(1).map(|x| x.as_str()).unwrap_or(""))),
block.to_owned()));
} else if let Some(caps) = EXPRESSION.captures(block) {
blocks
.push(Expression(try!(granularize(caps.get(1).map(|x| x.as_str()).unwrap_or(""))),
block.to_owned()));
} else {
blocks.push(Raw(block.to_owned()));
}
}
Ok(blocks)
}
lazy_static! {
static ref SPLIT: Regex = Regex::new(
r#"'.*?'|".*?"|\s+|[\|:,\[\]\(\)\?]|\.\.|={1,2}|!=|<=|>=|[<>]"#).unwrap();
}
fn split_atom(block: &str) -> Vec<&str> {
let mut tokens = vec![];
let mut current = 0;
for mat in SPLIT.find_iter(block) {
let start = mat.start();
let end = mat.end();
// insert the stuff between identifiers
tokens.push(&block[current..start]);
// insert the identifier
tokens.push(&block[start..end]);
current = end;
}
// insert remaining things
tokens.push(&block[current..block.len()]);
tokens
}
lazy_static! {
static ref IDENTIFIER: Regex = Regex::new(r"[a-zA-Z_][\w-]*\??").unwrap();
static ref INDEX: Regex = Regex::new(r"^\.[a-zA-Z_][a-zA-Z0-9_-]*").unwrap();
static ref SINGLE_STRING_LITERAL: Regex = Regex::new(r"'[^']*'").unwrap();
static ref DOUBLE_STRING_LITERAL: Regex = Regex::new("\"[^\"]*\"").unwrap();
static ref NUMBER_LITERAL: Regex = Regex::new(r"^-?\d+(\.\d+)?$").unwrap();
static ref BOOLEAN_LITERAL: Regex = Regex::new(r"^true|false$").unwrap();
}
pub fn granularize(block: &str) -> Result<Vec<Token>> {
let mut result = vec![];
let mut push_more;
for el in split_atom(block) {
push_more = None;
result.push(match &*el.trim() {
"" => continue,
"|" => Pipe,
"." => Dot,
":" => Colon,
"," => Comma,
"[" => OpenSquare,
"]" => CloseSquare,
"(" => OpenRound,
")" => CloseRound,
"?" => Question,
"-" => Dash,
"=" => Assignment,
"or" => Or,
"==" => Comparison(Equals),
"!=" => Comparison(NotEquals),
"<=" => Comparison(LessThanEquals),
">=" => Comparison(GreaterThanEquals),
"<" => Comparison(LessThan),
">" => Comparison(GreaterThan),
"contains" => Comparison(Contains),
".." => DotDot,
x if SINGLE_STRING_LITERAL.is_match(x) ||
DOUBLE_STRING_LITERAL.is_match(x) => {
StringLiteral(x[1..x.len() - 1].to_owned())
}
x if NUMBER_LITERAL.is_match(x) => {
NumberLiteral(x.parse::<f32>()
.expect(&format!("Could not parse {:?} as float", x)))
}
x if BOOLEAN_LITERAL.is_match(x) => {
BooleanLiteral(x.parse::<bool>()
.expect(&format!("Could not parse {:?} as bool", x)))
}
x if INDEX.is_match(x) => {
let mut parts = x.splitn(2, '.');
parts.next().unwrap();
push_more = Some(vec![Identifier(parts.next().unwrap().to_owned())]);
Dot
}
x if IDENTIFIER.is_match(x) => Identifier(x.to_owned()),
x => return Err(Error::Lexer(format!("{} is not a valid identifier", x))),
});
if let Some(v) = push_more {
result.extend(v);
}
}
Ok(result)
}
#[test]
fn test_split_blocks() {
assert_eq!(split_blocks("asdlkjfn\n{{askdljfbalkjsdbf}} asdjlfb"),
vec!["asdlkjfn\n", "{{askdljfbalkjsdbf}}", " asdjlfb"]);
assert_eq!(split_blocks("asdlkjfn\n{%askdljfbalkjsdbf%} asdjlfb"),
vec!["asdlkjfn\n", "{%askdljfbalkjsdbf%}", " asdjlfb"]);
}
#[test]
fn test_whitespace_control() {
assert_eq!(split_blocks("foo {{ bar }} 2000"),
vec!["foo ", "{{ bar }}", " 2000"]);
assert_eq!(split_blocks("foo {{- bar -}} 2000"),
vec!["foo", " {{- bar -}} ", "2000"]);
assert_eq!(split_blocks("foo \n{{- bar }} 2000"),
vec!["foo", " \n{{- bar }}", " 2000"]);
assert_eq!(split_blocks("foo {% bar %} 2000"),
vec!["foo ", "{% bar %}", " 2000"]);
assert_eq!(split_blocks("foo {%- bar -%} 2000"),
vec!["foo", " {%- bar -%} ", "2000"]);
assert_eq!(split_blocks("foo \n{%- bar %} 2000"),
vec!["foo", " \n{%- bar %}", " 2000"]);
}
#[test]
fn test_split_atom() {
assert_eq!(split_atom("truc | arg:val"),
vec!["truc", " ", "", "|", "", " ", "arg", ":", "val"]);
assert_eq!(split_atom("truc | filter:arg1,arg2"),
vec!["truc", " ", "", "|", "", " ", "filter", ":", "arg1", ",", "arg2"]);
}
#[test]
fn test_tokenize() {
assert_eq!(tokenize("{{hello 'world'}}").unwrap(),
vec![Expression(vec![Identifier("hello".to_owned()),
StringLiteral("world".to_owned())],
"{{hello 'world'}}".to_owned())]);
assert_eq!(tokenize("{{hello.world}}").unwrap(),
vec![Expression(vec![Identifier("hello.world".to_owned())],
"{{hello.world}}".to_owned())]);
assert_eq!(tokenize("{{ hello 'world' }}").unwrap(),
vec![Expression(vec![Identifier("hello".to_owned()),
StringLiteral("world".to_owned())],
"{{ hello 'world' }}".to_owned())]);
assert_eq!(tokenize("{{ hello 'world' }}").unwrap(),
vec![Expression(vec![Identifier("hello".to_owned()),
StringLiteral("world".to_owned())],
"{{ hello 'world' }}".to_owned())]);
assert_eq!(tokenize("wat\n{{hello 'world'}} test").unwrap(),
vec![Raw("wat\n".to_owned()),
Expression(vec![Identifier("hello".to_owned()),
StringLiteral("world".to_owned())],
"{{hello 'world'}}".to_owned()),
Raw(" test".to_owned())]);
assert_eq!(tokenize("wat \n {{-hello 'world'-}} test").unwrap(),
vec![Raw("wat".to_owned()),
Expression(vec![Identifier("hello".to_owned()),
StringLiteral("world".to_owned())],
" \n {{-hello 'world'-}} ".to_owned()),
Raw("test".to_owned())]);
}
#[test]
fn test_granularize() {
assert_eq!(granularize("include my-file.html").unwrap(),
vec![Identifier("include".to_owned()),
Identifier("my-file.html".to_owned())]);
assert_eq!(granularize("test | me").unwrap(),
vec![Identifier("test".to_owned()),
Pipe,
Identifier("me".to_owned())]);
assert_eq!(granularize("test .. me").unwrap(),
vec![Identifier("test".to_owned()),
DotDot,
Identifier("me".to_owned())]);
assert_eq!(granularize("test : me").unwrap(),
vec![Identifier("test".to_owned()),
Colon,
Identifier("me".to_owned())]);
assert_eq!(granularize("test , me").unwrap(),
vec![Identifier("test".to_owned()),
Comma,
Identifier("me".to_owned())]);
assert_eq!(granularize("test [ me").unwrap(),
vec![Identifier("test".to_owned()),
OpenSquare,
Identifier("me".to_owned())]);
assert_eq!(granularize("test ] me").unwrap(),
vec![Identifier("test".to_owned()),
CloseSquare,
Identifier("me".to_owned())]);
assert_eq!(granularize("test ( me").unwrap(),
vec![Identifier("test".to_owned()),
OpenRound,
Identifier("me".to_owned())]);
assert_eq!(granularize("test ) me").unwrap(),
vec![Identifier("test".to_owned()),
CloseRound,
Identifier("me".to_owned())]);
assert_eq!(granularize("test ? me").unwrap(),
vec![Identifier("test".to_owned()),
Question,
Identifier("me".to_owned())]);
assert_eq!(granularize("test - me").unwrap(),
vec![Identifier("test".to_owned()),
Dash,
Identifier("me".to_owned())]);
assert_eq!(granularize("test me").unwrap(),
vec![Identifier("test".to_owned()), Identifier("me".to_owned())]);
assert_eq!(granularize("test = me").unwrap(),
vec![Identifier("test".to_owned()),
Assignment,
Identifier("me".to_owned())]);
assert_eq!(granularize("test == me").unwrap(),
vec![Identifier("test".to_owned()),
Comparison(Equals),
Identifier("me".to_owned())]);
assert_eq!(granularize("test >= me").unwrap(),
vec![Identifier("test".to_owned()),
Comparison(GreaterThanEquals),
Identifier("me".to_owned())]);
assert_eq!(granularize("test > me").unwrap(),
vec![Identifier("test".to_owned()),
Comparison(GreaterThan),
Identifier("me".to_owned())]);
assert_eq!(granularize("test < me").unwrap(),
vec![Identifier("test".to_owned()),
Comparison(LessThan),
Identifier("me".to_owned())]);
assert_eq!(granularize("test != me").unwrap(),
vec![Identifier("test".to_owned()),
Comparison(NotEquals),
Identifier("me".to_owned())]);
assert_eq!(granularize("test <= me").unwrap(),
vec![Identifier("test".to_owned()),
Comparison(LessThanEquals),
Identifier("me".to_owned())]);
assert_eq!(granularize("test.me").unwrap(),
vec![Identifier("test.me".to_owned())]);
assert_eq!(granularize("'test' == \"me\"").unwrap(),
vec![StringLiteral("test".to_owned()),
Comparison(Equals),
StringLiteral("me".to_owned())]);
assert_eq!(granularize("test | me:arg").unwrap(),
vec![Identifier("test".to_owned()),
Pipe,
Identifier("me".to_owned()),
Colon,
Identifier("arg".to_owned())]);
assert_eq!(granularize("test | me:arg1,arg2").unwrap(),
vec![Identifier("test".to_owned()),
Pipe,
Identifier("me".to_owned()),
Colon,
Identifier("arg1".to_owned()),
Comma,
Identifier("arg2".to_owned())]);
assert_eq!(granularize("test | me : arg1, arg2").unwrap(),
vec![Identifier("test".to_owned()),
Pipe,
Identifier("me".to_owned()),
Colon,
Identifier("arg1".to_owned()),
Comma,
Identifier("arg2".to_owned())]);
assert_eq!(granularize("multiply 5 3").unwrap(),
vec![Identifier("multiply".to_owned()),
NumberLiteral(5f32),
NumberLiteral(3f32)]);
assert_eq!(granularize("for i in (1..5)").unwrap(),
vec![Identifier("for".to_owned()),
Identifier("i".to_owned()),
Identifier("in".to_owned()),
OpenRound,
NumberLiteral(1f32),
DotDot,
NumberLiteral(5f32),
CloseRound]);
assert_eq!(granularize("\"1, '2', 3, 4\"").unwrap(),
vec![StringLiteral("1, '2', 3, 4".to_owned())]);
assert_eq!(granularize("'1, \"2\", 3, 4'").unwrap(),
vec![StringLiteral("1, \"2\", 3, 4".to_owned())]);
assert_eq!(granularize("\"1, '2', 3, 4\"\"1, '2', 3, 4\"").unwrap(),
vec![StringLiteral("1, '2', 3, 4".to_owned()),
StringLiteral("1, '2', 3, 4".to_owned())]);
assert_eq!(granularize("abc : \"1, '2', 3, 4\"").unwrap(),
vec![Identifier("abc".to_owned()),
Colon,
StringLiteral("1, '2', 3, 4".to_owned())]);
}<|fim▁end|> | }
tokens.push(&text[start..end]);
current = end;
} |
<|file_name|>rawHttpService.ts<|end_file_name|><|fim▁begin|>/*---------------------------------------------------------------------------------------------
* Copyright (c) Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See License.txt in the project root for license information.
*--------------------------------------------------------------------------------------------*/
'use strict';
import { TPromise } from 'vs/base/common/winjs.base';
import { assign } from 'vs/base/common/objects';
import { Url, parse as parseUrl } from 'url';
import { request, IRequestOptions } from 'vs/base/node/request';
import HttpProxyAgent = require('http-proxy-agent');
import HttpsProxyAgent = require('https-proxy-agent');
export interface IXHROptions extends IRequestOptions {
responseType?: string;
followRedirects: number;
}
export interface IXHRResponse {
responseText: string;
status: number;
}
let proxyConfiguration: string = null;
export function configure(proxyURI: string): void {
proxyConfiguration = proxyURI;
}
function getProxyURI(uri: Url): string {
let proxyURI = proxyConfiguration;
if (!proxyURI) {
if (uri.protocol === 'http:') {
proxyURI = process.env.HTTP_PROXY || process.env.http_proxy || null;
} else if (uri.protocol === 'https:') {
proxyURI = process.env.HTTPS_PROXY || process.env.https_proxy || process.env.HTTP_PROXY || process.env.http_proxy || null;
}
}
return proxyURI;
}
function getProxyAgent(uri: Url): any {
let proxyURI = getProxyURI(uri);
if (proxyURI) {
let proxyEndpoint = parseUrl(proxyURI);
switch (proxyEndpoint.protocol) {
case 'http:':
case 'https:':
return uri.protocol === 'http:' ? new HttpProxyAgent(proxyURI) : new HttpsProxyAgent(proxyURI);
}
}
return void 0;
}
export function xhr(options: IXHROptions): TPromise<IXHRResponse> {
let endpoint = parseUrl(options.url);
options = assign({}, options);
options = assign(options, { agent: getProxyAgent(endpoint) });
return request(options).then(result => new TPromise<IXHRResponse>((c, e, p) => {
let res = result.res;
let data: string[] = [];
res.on('data', c => data.push(c));
res.on('end', () => {
if (options.followRedirects > 0 && (res.statusCode >= 300 && res.statusCode <= 303 || res.statusCode === 307)) {
let location = res.headers['location'];<|fim▁hole|> let newOptions = {
type: options.type, url: location, user: options.user, password: options.password, responseType: options.responseType, headers: options.headers,
timeout: options.timeout, followRedirects: options.followRedirects - 1, data: options.data
};
xhr(newOptions).done(c, e, p);
return;
}
}
let response: IXHRResponse = {
responseText: data.join(''),
status: res.statusCode
};
if ((res.statusCode >= 200 && res.statusCode < 300) || res.statusCode === 1223) {
c(response);
} else {
e(response);
}
});
}, err => {
let endpoint = parseUrl(options.url);
let agent = getProxyAgent(endpoint);
let message: string;
if (agent) {
message = 'Unable to to connect to ' + options.url + ' through proxy ' + getProxyURI(endpoint) + '. Error: ' + err.message;
} else {
message = 'Unable to to connect to ' + options.url + '. Error: ' + err.message;
}
return TPromise.wrapError<IXHRResponse>({
responseText: message,
status: 404
});
}));
}<|fim▁end|> | if (location) { |
<|file_name|>models.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
"""
flaskbb.forum.models
~~~~~~~~~~~~~~~~~~~~
It provides the models for the forum
:copyright: (c) 2014 by the FlaskBB Team.
:license: BSD, see LICENSE for more details.
"""
from datetime import datetime, timedelta
from flask import url_for, abort
from sqlalchemy.orm import aliased
from openspending.core import db
from openspending.forum.utils.decorators import can_access_forum, can_access_topic
from openspending.forum.utils.helpers import slugify, get_categories_and_forums, \
get_forums
from openspending.forum.utils.database import CRUDMixin
from openspending.forum.utils.forum_settings import flaskbb_config
moderators = db.Table(
'forum_moderators',
db.Column('user_id', db.Integer(), db.ForeignKey('account.id'),
nullable=False),
db.Column('forum_id', db.Integer(),
db.ForeignKey('forum_forums.id', use_alter=True, name="fk_forum_id"),
nullable=False))
topictracker = db.Table(
'forum_topictracker',
db.Column('user_id', db.Integer(), db.ForeignKey('account.id'),
nullable=False),
db.Column('forum_topic_id', db.Integer(),
db.ForeignKey('forum_topics.id',
use_alter=True, name="fk_tracker_topic_id"),
nullable=False))
class TopicsRead(db.Model, CRUDMixin):
__tablename__ = "forum_topicsread"
user_id = db.Column(db.Integer, db.ForeignKey("account.id"),
primary_key=True)
topic_id = db.Column(db.Integer,
db.ForeignKey("forum_topics.id", use_alter=True,
name="fk_tr_topic_id"),
primary_key=True)
forum_id = db.Column(db.Integer,
db.ForeignKey("forum_forums.id", use_alter=True,
name="fk_tr_forum_id"),
primary_key=True)
last_read = db.Column(db.DateTime, default=datetime.utcnow())
class ForumsRead(db.Model, CRUDMixin):
__tablename__ = "forum_forumsread"
user_id = db.Column(db.Integer, db.ForeignKey("account.id"),
primary_key=True)
forum_id = db.Column(db.Integer,
db.ForeignKey("forum_forums.id", use_alter=True,
name="fk_fr_forum_id"),
primary_key=True)
last_read = db.Column(db.DateTime, default=datetime.utcnow())
cleared = db.Column(db.DateTime)
class Report(db.Model, CRUDMixin):
__tablename__ = "forum_reports"
id = db.Column(db.Integer, primary_key=True)
reporter_id = db.Column(db.Integer, db.ForeignKey("account.id"),
nullable=False)
reported = db.Column(db.DateTime, default=datetime.utcnow())
post_id = db.Column(db.Integer, db.ForeignKey("forum_posts.id"), nullable=False)
zapped = db.Column(db.DateTime)
zapped_by = db.Column(db.Integer, db.ForeignKey("account.id"))
reason = db.Column(db.Text)
post = db.relationship("Post", backref="report", lazy="joined")
reporter = db.relationship("Account", lazy="joined",
foreign_keys=[reporter_id])
zapper = db.relationship("Account", lazy="joined", foreign_keys=[zapped_by])
def __repr__(self):
return "<{} {}>".format(self.__class__.__name__, self.id)
def save(self, post=None, user=None):
"""Saves a report.
:param post: The post that should be reported
:param user: The user who has reported the post
:param reason: The reason why the user has reported the post
"""
if self.id:
db.session.add(self)
db.session.commit()
return self
if post and user:
self.reporter_id = user.id
self.reported = datetime.utcnow()
self.post_id = post.id
db.session.add(self)
db.session.commit()
return self
class Post(db.Model, CRUDMixin):
__tablename__ = "forum_posts"
__searchable__ = ['content', 'username']
id = db.Column(db.Integer, primary_key=True)
topic_id = db.Column(db.Integer,
db.ForeignKey("forum_topics.id",
use_alter=True,
name="fk_post_topic_id",
ondelete="CASCADE"))
user_id = db.Column(db.Integer, db.ForeignKey("account.id"), nullable=True)
username = db.Column(db.String(200), nullable=False)
content = db.Column(db.Text, nullable=False)
date_created = db.Column(db.DateTime, default=datetime.utcnow())
date_modified = db.Column(db.DateTime)
modified_by = db.Column(db.String(200))
# Properties
@property
def url(self):
"""Returns the url for the post"""
return url_for("forum.view_post", post_id=self.id)
# Methods
def __init__(self, content=None):
if content:
self.content = content
def __repr__(self):
"""
Set to a unique key specific to the object in the database.
Required for cache.memoize() to work across requests.
"""
return "<{} {}>".format(self.__class__.__name__, self.id)
def save(self, user=None, topic=None):
"""Saves a new post. If no parameters are passed we assume that
you will just update an existing post. It returns the object after the
operation was successful.
:param user: The user who has created the post
:param topic: The topic in which the post was created
"""
# update/edit the post
if self.id:
db.session.add(self)
db.session.commit()
return self
# Adding a new post
if user and topic:
created = datetime.utcnow()
self.user_id = user.id
self.username = user.username
self.topic_id = topic.id
self.date_created = created
topic.last_updated = created
# This needs to be done before the last_post_id gets updated.
db.session.add(self)
db.session.commit()
# Now lets update the last post id
topic.last_post_id = self.id
# Update the last post info for the forum
topic.forum.last_post_id = self.id
topic.forum.last_post_title = topic.title
topic.forum.last_post_user_id = user.id
topic.forum.last_post_username = user.username
topic.forum.last_post_created = created
# Update the post counts
user.post_count += 1
topic.post_count += 1
topic.forum.post_count += 1
# And commit it!
db.session.add(topic)
db.session.commit()
return self
def delete(self):
"""Deletes a post and returns self."""
# This will delete the whole topic
if self.topic.first_post_id == self.id:
self.topic.delete()
return self
# Delete the last post
if self.topic.last_post_id == self.id:
# update the last post in the forum
if self.topic.last_post_id == self.topic.forum.last_post_id:
# We need the second last post in the forum here,
# because the last post will be deleted
second_last_post = Post.query.\
filter(Post.topic_id == Topic.id,
Topic.forum_id == self.topic.forum.id).\
order_by(Post.id.desc()).limit(2).offset(0).\
all()
second_last_post = second_last_post[1]
self.topic.forum.last_post_id = second_last_post.id
# check if there is a second last post, else it is the first post
if self.topic.second_last_post:
# Now the second last post will be the last post
self.topic.last_post_id = self.topic.second_last_post
# there is no second last post, now the last post is also the
# first post
else:
self.topic.last_post_id = self.topic.first_post_id
# Update the post counts
self.user.post_count -= 1
self.topic.post_count -= 1
self.topic.forum.post_count -= 1
db.session.commit()
db.session.delete(self)
db.session.commit()
return self
class Topic(db.Model, CRUDMixin):
__tablename__ = "forum_topics"
__searchable__ = ['title', 'username']
id = db.Column(db.Integer, primary_key=True)
forum_id = db.Column(db.Integer,
db.ForeignKey("forum_forums.id",
use_alter=True,
name="fk_topic_forum_id"),
nullable=False)
title = db.Column(db.String(255), nullable=False)
user_id = db.Column(db.Integer, db.ForeignKey("account.id"))
username = db.Column(db.String(200), nullable=False)
date_created = db.Column(db.DateTime, default=datetime.utcnow())
last_updated = db.Column(db.DateTime, default=datetime.utcnow())
locked = db.Column(db.Boolean, default=False)
important = db.Column(db.Boolean, default=False)
views = db.Column(db.Integer, default=0)
post_count = db.Column(db.Integer, default=0)
# One-to-one (uselist=False) relationship between first_post and topic
first_post_id = db.Column(db.Integer, db.ForeignKey("forum_posts.id",
ondelete="CASCADE"))
first_post = db.relationship("Post", backref="first_post", uselist=False,
foreign_keys=[first_post_id])
# One-to-one
last_post_id = db.Column(db.Integer, db.ForeignKey("forum_posts.id"))
last_post = db.relationship("Post", backref="last_post", uselist=False,
foreign_keys=[last_post_id])
# One-to-many
posts = db.relationship("Post", backref="topic", lazy="dynamic",
primaryjoin="Post.topic_id == Topic.id",
cascade="all, delete-orphan", post_update=True)
# Properties
@property
def second_last_post(self):
"""Returns the second last post."""
return self.posts[-2].id
@property
def slug(self):
"""Returns a slugified version from the topic title"""
return slugify(self.title)
@property
def url(self):
"""Returns the slugified url for the topic"""
return url_for("forum.view_topic", topic_id=self.id, slug=self.slug)
# Methods
def __init__(self, title=None):
if title:
self.title = title
def __repr__(self):
"""
Set to a unique key specific to the object in the database.
Required for cache.memoize() to work across requests.
"""
return "<{} {}>".format(self.__class__.__name__, self.id)
@classmethod
@can_access_topic
def get_topic(cls, topic_id, user):
topic = Topic.query.filter_by(id=topic_id).first_or_404()
return topic
def tracker_needs_update(self, forumsread, topicsread):
"""Returns True if the topicsread tracker needs an update.
Also, if the ``TRACKER_LENGTH`` is configured, it will just recognize
topics that are newer than the ``TRACKER_LENGTH`` (in days) as unread.
:param forumsread: The ForumsRead object is needed because we also
need to check if the forum has been cleared
sometime ago.
:param topicsread: The topicsread object is used to check if there is
a new post in the topic.
"""
read_cutoff = None
if flaskbb_config['TRACKER_LENGTH'] > 0:
read_cutoff = datetime.utcnow() - timedelta(
days=flaskbb_config['TRACKER_LENGTH'])
# The tracker is disabled - abort
if read_cutoff is None:
return False
# Else the topic is still below the read_cutoff
elif read_cutoff > self.last_post.date_created:
return False
# Can be None (cleared) if the user has never marked the forum as read.
# If this condition is false - we need to update the tracker
if forumsread and forumsread.cleared is not None and \
forumsread.cleared >= self.last_post.date_created:
return False
if topicsread and topicsread.last_read >= self.last_post.date_created:
return False
return True
def update_read(self, user, forum, forumsread):
"""Updates the topicsread and forumsread tracker for a specified user,
if the topic contains new posts or the user hasn't read the topic.
Returns True if the tracker has been updated.
:param user: The user for whom the readstracker should be updated.
:param forum: The forum in which the topic is.
:param forumsread: The forumsread object. It is used to check if there
is a new post since the forum has been marked as
read.
"""
# User is not logged in - abort
if not user.is_authenticated() and not getattr(user, 'is_lockdownuser', False):
return False
topicsread = TopicsRead.query.\
filter(TopicsRead.user_id == user.id,
TopicsRead.topic_id == self.id).first()
if not self.tracker_needs_update(forumsread, topicsread):
return False
# Because we return True/False if the trackers have been
# updated, we need to store the status in a temporary variable
updated = False
# A new post has been submitted that the user hasn't read.
# Updating...
if topicsread:
topicsread.last_read = datetime.utcnow()
topicsread.save()
updated = True
# The user has not visited the topic before. Inserting him in
# the TopicsRead model.
elif not topicsread:
topicsread = TopicsRead()
topicsread.user_id = user.id
topicsread.topic_id = self.id
topicsread.forum_id = self.forum_id
topicsread.last_read = datetime.utcnow()
topicsread.save()
updated = True
# No unread posts
else:
updated = False
# Save True/False if the forums tracker has been updated.
updated = forum.update_read(user, forumsread, topicsread)
return updated
def recalculate(self):
"""Recalculates the post count in the topic."""
post_count = Post.query.filter_by(topic_id=self.id).count()
self.post_count = post_count
self.save()
return self
def move(self, new_forum):
"""Moves a topic to the given forum.
Returns True if it could successfully move the topic to forum.
:param new_forum: The new forum for the topic
"""
# if the target forum is the current forum, abort
if self.forum_id == new_forum.id:
return False
old_forum = self.forum
self.forum.post_count -= self.post_count
self.forum.topic_count -= 1
self.forum_id = new_forum.id
new_forum.post_count += self.post_count
new_forum.topic_count += 1
db.session.commit()
new_forum.update_last_post()
old_forum.update_last_post()
TopicsRead.query.filter_by(topic_id=self.id).delete()
return True
def save(self, user=None, forum=None, post=None):
"""Saves a topic and returns the topic object. If no parameters are
given, it will only update the topic.
:param user: The user who has created the topic
:param forum: The forum where the topic is stored
:param post: The post object which is connected to the topic
"""
# Updates the topic
if self.id:
db.session.add(self)
db.session.commit()
return self
# Set the forum and user id
self.forum_id = forum.id
self.user_id = user.id
self.username = user.username
# Set the last_updated time. Needed for the readstracker
self.last_updated = datetime.utcnow()
self.date_created = datetime.utcnow()
# Insert and commit the topic
db.session.add(self)
db.session.commit()
# Create the topic post
post.save(user, self)
# Update the first post id
self.first_post_id = post.id
# Update the topic count
forum.topic_count += 1
db.session.commit()
return self
def delete(self, users=None):
"""Deletes a topic with the corresponding posts. If a list with
user objects is passed it will also update their post counts
:param users: A list with user objects
"""
# Grab the second last topic in the forum + parents/childs
topic = Topic.query.\
filter_by(forum_id=self.forum_id).\
order_by(Topic.last_post_id.desc()).limit(2).offset(0).all()
# do we want to delete the topic with the last post in the forum?
if topic and topic[0].id == self.id:
try:
# Now the second last post will be the last post
self.forum.last_post_id = topic[1].last_post_id
self.forum.last_post_title = topic[1].title
self.forum.last_post_user_id = topic[1].user_id
self.forum.last_post_username = topic[1].username
self.forum.last_post_created = topic[1].last_updated
# Catch an IndexError when you delete the last topic in the forum
# There is no second last post
except IndexError:
self.forum.last_post_id = None
self.forum.last_post_title = None
self.forum.last_post_user_id = None
self.forum.last_post_username = None
self.forum.last_post_created = None
# Commit the changes
db.session.commit()
# These things needs to be stored in a variable before they are deleted
forum = self.forum
TopicsRead.query.filter_by(topic_id=self.id).delete()
# Delete the topic
db.session.delete(self)
db.session.commit()
# Update the post counts
if users:
for user in users:
user.post_count = Post.query.filter_by(user_id=user.id).count()
db.session.commit()
forum.topic_count = Topic.query.\
filter_by(forum_id=self.forum_id).\
count()
forum.post_count = Post.query.\
filter(Post.topic_id == Topic.id,
Topic.forum_id == self.forum_id).\
count()
db.session.commit()
return self
class Forum(db.Model, CRUDMixin):
__tablename__ = "forum_forums"
__searchable__ = ['title', 'description']
id = db.Column(db.Integer, primary_key=True)
category_id = db.Column(db.Integer, db.ForeignKey("forum_categories.id"),
nullable=False)
title = db.Column(db.String(255), nullable=False)
description = db.Column(db.Text)
position = db.Column(db.Integer, default=1, nullable=False)
locked = db.Column(db.Boolean, default=False, nullable=False)
show_moderators = db.Column(db.Boolean, default=False, nullable=False)
external = db.Column(db.String(200))
post_count = db.Column(db.Integer, default=0, nullable=False)
topic_count = db.Column(db.Integer, default=0, nullable=False)
# One-to-one
last_post_id = db.Column(db.Integer, db.ForeignKey("forum_posts.id"))
last_post = db.relationship("Post", backref="last_post_forum",
uselist=False, foreign_keys=[last_post_id])
# Not nice, but needed to improve the performance
last_post_title = db.Column(db.String(255))
last_post_user_id = db.Column(db.Integer, db.ForeignKey("account.id"))
last_post_username = db.Column(db.String(255))
last_post_created = db.Column(db.DateTime, default=datetime.utcnow())
# One-to-many
topics = db.relationship(
"Topic",
backref="forum",
lazy="dynamic",
cascade="all, delete-orphan"
)
# Many-to-many
moderators = db.relationship(
"Account",
secondary=moderators,
primaryjoin=(moderators.c.forum_id == id),
backref=db.backref("forummoderator", lazy="dynamic"),
lazy="joined"
)
# Properties
@property
def slug(self):
"""Returns a slugified version from the forum title"""
return slugify(self.title)
@property
def url(self):
"""Returns the slugified url for the forum"""
if self.external:
return self.external
return url_for("forum.view_forum", forum_id=self.id, slug=self.slug)
@property
def last_post_url(self):
"""Returns the url for the last post in the forum"""
return url_for("forum.view_post", post_id=self.last_post_id)
# Methods
def __repr__(self):
"""Set to a unique key specific to the object in the database.
Required for cache.memoize() to work across requests.
"""
return "<{} {}>".format(self.__class__.__name__, self.id)
def update_last_post(self):
"""Updates the last post in the forum."""
last_post = Post.query.\
filter(Post.topic_id == Topic.id,
Topic.forum_id == self.id).\
order_by(Post.date_created.desc()).\
first()
# Last post is none when there are no topics in the forum
if last_post is not None:
# a new last post was found in the forum
if not last_post.id == self.last_post_id:
self.last_post_id = last_post.id
self.last_post_title = last_post.topic.title
self.last_post_user_id = last_post.user_id
self.last_post_username = last_post.username
self.last_post_created = last_post.date_created
# No post found..
else:
self.last_post_id = None
self.last_post_title = None
self.last_post_user_id = None
self.last_post_username = None
self.last_post_created = None
db.session.commit()
def update_read(self, user, forumsread, topicsread):
"""Updates the ForumsRead status for the user. In order to work
correctly, be sure that `topicsread is **not** `None`.
:param user: The user for whom we should check if he has read the
forum.
:param forumsread: The forumsread object. It is needed to check if
if the forum is unread. If `forumsread` is `None`
and the forum is unread, it will create a new entry
in the `ForumsRead` relation, else (and the forum
is still unread) we are just going to update the
entry in the `ForumsRead` relation.
:param topicsread: The topicsread object is used in combination
with the forumsread object to check if the
forumsread relation should be updated and
therefore is unread.
"""
if (not user.is_authenticated() and not getattr(user, 'is_lockdownuser', False)) or topicsread is None:
return False
read_cutoff = None
if flaskbb_config['TRACKER_LENGTH'] > 0:
read_cutoff = datetime.utcnow() - timedelta(
days=flaskbb_config['TRACKER_LENGTH'])
# fetch the unread posts in the forum
unread_count = Topic.query.\
outerjoin(TopicsRead,
db.and_(TopicsRead.topic_id == Topic.id,
TopicsRead.user_id == user.id)).\
outerjoin(ForumsRead,
db.and_(ForumsRead.forum_id == Topic.forum_id,
ForumsRead.user_id == user.id)).\
filter(Topic.forum_id == self.id,
Topic.last_updated > read_cutoff,
db.or_(TopicsRead.last_read == None,
TopicsRead.last_read < Topic.last_updated)).\
count()
# No unread topics available - trying to mark the forum as read
if unread_count == 0:
if forumsread and forumsread.last_read > topicsread.last_read:
return False
# ForumRead Entry exists - Updating it because a new topic/post
# has been submitted and has read everything (obviously, else the
# unread_count would be useless).
elif forumsread:
forumsread.last_read = datetime.utcnow()
forumsread.save()
return True
# No ForumRead Entry existing - creating one.
forumsread = ForumsRead()
forumsread.user_id = user.id
forumsread.forum_id = self.id
forumsread.last_read = datetime.utcnow()
forumsread.save()
return True
# Nothing updated, because there are still more than 0 unread
# topicsread
return False
def recalculate(self, last_post=False):
"""Recalculates the post_count and topic_count in the forum.
Returns the forum with the recounted stats.
:param last_post: If set to ``True`` it will also try to update
the last post columns in the forum.
"""
topic_count = Topic.query.filter_by(forum_id=self.id).count()
post_count = Post.query.\
filter(Post.topic_id == Topic.id,
Topic.forum_id == self.id).\
count()
self.topic_count = topic_count
self.post_count = post_count
if last_post:
self.update_last_post()
self.save()
return self
def save(self, groups=None):
"""Saves a forum
:param moderators: If given, it will update the moderators in this
forum with the given iterable of user objects.
:param groups: A list with group objects.
"""
if self.id:
db.session.merge(self)
else:
db.session.add(self)
db.session.commit()
return self
def delete(self, users=None):
"""Deletes forum. If a list with involved user objects is passed,
it will also update their post counts
:param users: A list with user objects
"""
# Delete the forum
db.session.delete(self)
db.session.commit()
# Delete the entries for the forum in the ForumsRead and TopicsRead
# relation
ForumsRead.query.filter_by(forum_id=self.id).delete()
TopicsRead.query.filter_by(forum_id=self.id).delete()
# Update the users post count
if users:
users_list = []
for user in users:
user.post_count = Post.query.filter_by(user_id=user.id).count()
users_list.append(user)
db.session.add_all(users_list)
db.session.commit()
return self
def move_topics_to(self, topics):
"""Moves a bunch a topics to the forum. Returns ``True`` if all
topics were moved successfully to the forum.
:param topics: A iterable with topic objects.
"""
status = False
for topic in topics:
status = topic.move(self)
return status
# Classmethods
@classmethod
@can_access_forum
def get_forum(cls, forum_id, user):
"""Returns the forum and forumsread object as a tuple for the user.
:param forum_id: The forum id
:param user: The user object is needed to check if we also need their
forumsread object.
"""
if user.is_authenticated() and not getattr(user, 'is_lockdownuser', False):
forum, forumsread = Forum.query.\
filter(Forum.id == forum_id).\
options(db.joinedload("category")).\
outerjoin(ForumsRead,
db.and_(ForumsRead.forum_id == Forum.id,
ForumsRead.user_id == user.id)).\
add_entity(ForumsRead).\
first_or_404()
else:
forum = Forum.query.filter(Forum.id == forum_id).first_or_404()
forumsread = None
return forum, forumsread
@classmethod
def get_topics(cls, forum_id, user, page=1, per_page=20):
"""Get the topics for the forum. If the user is logged in,
it will perform an outerjoin for the topics with the topicsread and
forumsread relation to check if it is read or unread.
:param forum_id: The forum id
:param user: The user object
:param page: The page whom should be loaded
:param per_page: How many topics per page should be shown
"""
if user.is_authenticated() and not getattr(user, 'is_lockdownuser', False):
topics = Topic.query.filter_by(forum_id=forum_id).\
outerjoin(TopicsRead,
db.and_(TopicsRead.topic_id == Topic.id,
TopicsRead.user_id == user.id)).\
add_entity(TopicsRead).\
order_by(Topic.important.desc(), Topic.last_updated.desc()).\
paginate(page, per_page, True)
else:
topics = Topic.query.filter_by(forum_id=forum_id).\
order_by(Topic.important.desc(), Topic.last_updated.desc()).\
paginate(page, per_page, True)
topics.items = [(topic, None) for topic in topics.items]
return topics
class Category(db.Model, CRUDMixin):
__tablename__ = "forum_categories"
__searchable__ = ['title', 'description']
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(255), nullable=False)
description = db.Column(db.Text)
position = db.Column(db.Integer, default=1, nullable=False)
# One-to-many
forums = db.relationship("Forum", backref="category", lazy="dynamic",
primaryjoin='Forum.category_id == Category.id',
order_by='asc(Forum.position)',
cascade="all, delete-orphan")
# Properties
@property
def slug(self):
"""Returns a slugified version from the category title"""
return slugify(self.title)
@property
def url(self):
"""Returns the slugified url for the category"""
return url_for("forum.view_category", category_id=self.id,
slug=self.slug)
# Methods<|fim▁hole|> """Set to a unique key specific to the object in the database.
Required for cache.memoize() to work across requests.
"""
return "<{} {}>".format(self.__class__.__name__, self.id)
def delete(self, users=None):
"""Deletes a category. If a list with involved user objects is passed,
it will also update their post counts
:param users: A list with user objects
"""
# and finally delete the category itself
db.session.delete(self)
db.session.commit()
# Update the users post count
if users:
for user in users:
user.post_count = Post.query.filter_by(user_id=user.id).count()
db.session.commit()
return self
# Classmethods
@classmethod
def get_all(cls, user):
"""Get all categories with all associated forums.
It returns a list with tuples. Those tuples are containing the category
and their associated forums (whose are stored in a list).
For example::
[(<Category 1>, [(<Forum 2>, <ForumsRead>), (<Forum 1>, None)]),
(<Category 2>, [(<Forum 3>, None), (<Forum 4>, None)])]
:param user: The user object is needed to check if we also need their
forumsread object.
"""
forums = db.session.query(cls).\
order_by(Category.position, Category.id).\
all()
# # import Group model locally to avoid cicular imports
# if user.is_authenticated() and not getattr(user, 'is_lockdownuser', False):
# # get list of user group idsnot
# # filter forums by user groups
# # get all
# forums = db.session.query(cls).\
# order_by(Category.position, Category.id).\
# all()
# else:
# # filter forums by guest groups
# forums = db.session.query(cls).\
# order_by(cls.position, cls.id).\
# all()
return get_categories_and_forums(forums, user)
@classmethod
def get_forums(cls, category_id, user):
"""Get the forums for the category.
It returns a tuple with the category and the forums with their
forumsread object are stored in a list.
A return value can look like this for a category with two forums::
(<Category 1>, [(<Forum 1>, None), (<Forum 2>, None)])
:param category_id: The category id
:param user: The user object is needed to check if we also need their
forumsread object.
"""
if user.is_authenticated() and not getattr(user, 'is_lockdownuser', False):
# get list of user group ids
forums = cls.query.\
filter(cls.id == category_id).\
outerjoin(ForumsRead,
db.and_(ForumsRead.user_id == user.id)).\
add_entity(ForumsRead).\
all()
else:
forums = cls.query.\
filter(cls.id == category_id).\
all()
if not forums:
abort(404)
return get_forums(forums, user)<|fim▁end|> | def __repr__(self): |
<|file_name|>test_monitor_attributes.py<|end_file_name|><|fim▁begin|>def testHasMasterPrimary(txnPoolNodeSet):
masterPrimaryCount = 0
for node in txnPoolNodeSet:
masterPrimaryCount += int(node.monitor.hasMasterPrimary)<|fim▁hole|> assert masterPrimaryCount == 1<|fim▁end|> | |
<|file_name|>GetBookByTitleAndAuthor.py<|end_file_name|><|fim▁begin|>from aquarius.objects.Book import Book
class GetBookByTitleAndAuthor(object):<|fim▁hole|> def __init__(self, connection):
self.__connection = connection
def execute(self, book):
b = Book()
sql = "SELECT Id, Title, Author FROM Book WHERE Title=? AND Author=?"
r = list(self.__connection.execute_sql_fetch_all_with_params(sql, (book.title, book.author)))
if len(r) > 0:
self.map_resultset_to_book(b, r)
return b
def map_resultset_to_book(self, book, resultset):
book.id = resultset[0][0]
book.title = resultset[0][1]
book.author = resultset[0][2]<|fim▁end|> | |
<|file_name|>uniform.rs<|end_file_name|><|fim▁begin|>// Copyright 2016 Jeremy Mason
//
// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
// http://opensource.org/licenses/MIT>, at your option. This file may not be
// copied, modified, or distributed except according to those terms.
#![feature(test)]
#![allow(non_snake_case)]
extern crate mayda;
extern crate rand;
extern crate test;
use mayda::{Access, AccessInto, Encode, Uniform};
use rand::distributions::{IndependentSample, Range};
use std::{u8, u16, u32, u64, i8, i16, i32, i64};
use test::Bencher;
fn rand_uniform<T>(min: T, max: T, length: usize) -> Vec<T>
where T: PartialOrd + rand::distributions::range::SampleRange {
let mut output: Vec<T> = Vec::with_capacity(length);
let val = Range::new(min, max);
let mut rng = rand::thread_rng();
for _ in 0..length {
output.push(val.ind_sample(&mut rng));
}
output
}
macro_rules! encode_bench {
($(($t: ty: $min: expr, $max: expr, $length: expr, $name: ident))*) => ($(
#[bench]
fn $name(b: &mut Bencher) {
let mut bin = Uniform::new();
let input: Vec<$t> = rand_uniform($min, $max, $length);
b.iter(|| {
bin.encode(&input).unwrap();
});
let output = bin.decode();
assert_eq!(input, output);
}
)*)
}
encode_bench!{
(u32: 0, u32::MAX, 15, en_u32_0_MAX_15)
(u32: 0, u32::MAX, 16, en_u32_0_MAX_16)
(u32: 0, u32::MAX, 31, en_u32_0_MAX_31)
(u32: 0, u32::MAX, 32, en_u32_0_MAX_32)
(u32: 0, u32::MAX, 127, en_u32_0_MAX_127)
(u32: 0, u32::MAX, 128, en_u32_0_MAX_128)
(u8: 0, u8::MAX, 32768, en_u8_0_MAX_32768)
(u16: 0, u16::MAX, 32768, en_u16_0_MAX_32768)
(u32: 0, u32::MAX, 32768, en_u32_0_MAX_32768)
(u64: 0, u64::MAX, 32768, en_u64_0_MAX_32768)
(i32: i32::MIN, i32::MAX, 15, en_i32_MIN_MAX_15)
(i32: i32::MIN, i32::MAX, 16, en_i32_MIN_MAX_16)
(i32: i32::MIN, i32::MAX, 31, en_i32_MIN_MAX_31)
(i32: i32::MIN, i32::MAX, 32, en_i32_MIN_MAX_32)
(i32: i32::MIN, i32::MAX, 127, en_i32_MIN_MAX_127)
(i32: i32::MIN, i32::MAX, 128, en_i32_MIN_MAX_128)
(i8: i8::MIN, i8::MAX, 32768, en_i8_MIN_MAX_32768)
(i16: i16::MIN, i16::MAX, 32768, en_i16_MIN_MAX_32768)
(i32: i32::MIN, i32::MAX, 32768, en_i32_MIN_MAX_32768)
(i64: i64::MIN, i64::MAX, 32768, en_i64_MIN_MAX_32768)
}
macro_rules! decode_bench {
($(($t: ty: $min: expr, $max: expr, $length: expr, $name: ident))*) => ($(
#[bench]
fn $name(b: &mut Bencher) {
let mut bin = Uniform::new();
let input: Vec<$t> = rand_uniform($min, $max, $length);
bin.encode(&input).unwrap();
let mut output = vec![0; $length];
b.iter(|| {
bin.decode_into(&mut *output);
});
assert_eq!(input, output);
}
)*)
}
decode_bench!{
(u32: 0, u32::MAX, 15, de_u32_0_MAX_15)
(u32: 0, u32::MAX, 16, de_u32_0_MAX_16)
(u32: 0, u32::MAX, 31, de_u32_0_MAX_31)
(u32: 0, u32::MAX, 32, de_u32_0_MAX_32)
(u32: 0, u32::MAX, 127, de_u32_0_MAX_127)<|fim▁hole|> (u32: 0, u32::MAX, 32768, de_u32_0_MAX_32768)
(u64: 0, u64::MAX, 32768, de_u64_0_MAX_32768)
(i32: i32::MIN, i32::MAX, 15, de_i32_MIN_MAX_15)
(i32: i32::MIN, i32::MAX, 16, de_i32_MIN_MAX_16)
(i32: i32::MIN, i32::MAX, 31, de_i32_MIN_MAX_31)
(i32: i32::MIN, i32::MAX, 32, de_i32_MIN_MAX_32)
(i32: i32::MIN, i32::MAX, 127, de_i32_MIN_MAX_127)
(i32: i32::MIN, i32::MAX, 128, de_i32_MIN_MAX_128)
(i8: i8::MIN, i8::MAX, 32768, de_i8_MIN_MAX_32768)
(i16: i16::MIN, i16::MAX, 32768, de_i16_MIN_MAX_32768)
(i32: i32::MIN, i32::MAX, 32768, de_i32_MIN_MAX_32768)
(i64: i64::MIN, i64::MAX, 32768, de_i64_MIN_MAX_32768)
}
macro_rules! indexing_bench {
($(($t: ty: $min: expr, $max: expr, $length: expr, $idx: expr, $name: ident))*) => ($(
#[bench]
fn $name(b: &mut Bencher) {
let mut bin = Uniform::new();
let input: Vec<$t> = rand_uniform($min, $max, $length);
bin.encode(&input).unwrap();
let mut v: $t = 0;
b.iter(|| {
v = bin.access($idx);
});
assert_eq!(input[$idx], v);
}
)*)
}
indexing_bench!{
(u8: 0, u8::MAX, 32768, 128, idx_u8_0_MAX_32768_128)
(u16: 0, u16::MAX, 32768, 128, idx_u16_0_MAX_32768_128)
(u32: 0, u32::MAX, 32768, 128, idx_u32_0_MAX_32768_128)
(u64: 0, u64::MAX, 32768, 128, idx_u64_0_MAX_32768_128)
(u8: 0, u8::MAX, 32768, 32767, idx_u8_0_MAX_32768_32767)
(u16: 0, u16::MAX, 32768, 32767, idx_u16_0_MAX_32768_32767)
(u32: 0, u32::MAX, 32768, 32767, idx_u32_0_MAX_32768_32767)
(u64: 0, u64::MAX, 32768, 32767, idx_u64_0_MAX_32768_32767)
}
macro_rules! range_bench {
($(($t: ty: $min: expr, $max: expr, $length: expr, $lwr: expr, $upr: expr, $name: ident))*) => ($(
#[bench]
fn $name(b: &mut Bencher) {
let mut bin = Uniform::new();
let input: Vec<$t> = rand_uniform($min, $max, $length);
bin.encode(&input).unwrap();
let mut output = vec![0; $upr - $lwr];
b.iter(|| {
bin.access_into($lwr..$upr, &mut *output);
});
assert_eq!(&input[$lwr..$upr], &output[..]);
}
)*)
}
range_bench!{
(u8: 0, u8::MAX, 1024, 892, 900, r_u8_0_MAX_1024_892_900)
(u16: 0, u16::MAX, 1024, 892, 900, r_u16_0_MAX_1024_892_900)
(u32: 0, u32::MAX, 1024, 892, 900, r_u32_0_MAX_1024_892_900)
(u64: 0, u64::MAX, 1024, 892, 900, r_u64_0_MAX_1024_892_900)
(i8: i8::MIN, i8::MAX, 1024, 892, 900, r_i8_MIN_MAX_1024_892_900)
(i16: i16::MIN, i16::MAX, 1024, 892, 900, r_i16_MIN_MAX_1024_892_900)
(i32: i32::MIN, i32::MAX, 1024, 892, 900, r_i32_MIN_MAX_1024_892_900)
(i64: i64::MIN, i64::MAX, 1024, 892, 900, r_i64_MIN_MAX_1024_892_900)
}<|fim▁end|> | (u32: 0, u32::MAX, 128, de_u32_0_MAX_128)
(u8: 0, u8::MAX, 32768, de_u8_0_MAX_32768)
(u16: 0, u16::MAX, 32768, de_u16_0_MAX_32768) |
<|file_name|>get-success-interjection.ts<|end_file_name|><|fim▁begin|>// See https://developer.amazon.com/docs/custom-skills/speechcon-reference-interjections-english-us.html
import { chooseOne } from './choose-one';
import { getAllSuccessInterjections } from './get-all-success-interjections';<|fim▁hole|>
export const getSuccessInterjection = () => {
return chooseOne(...getAllSuccessInterjections());
};<|fim▁end|> | |
<|file_name|>mod.rs<|end_file_name|><|fim▁begin|>//! Module implementing the handling of gists.
//!
//! Gists are represented as the Gist structure, with the auxiliary URI
//! that helps refering to them as command line arguments to the program.
mod info;
mod uri;
use std::borrow::Cow;
use std::path::PathBuf;
use super::{BIN_DIR, GISTS_DIR};
pub use self::info::{Datum, Info, InfoBuilder};
pub use self::uri::{Uri, UriError};
/// Structure representing a single gist.
#[derive(Debug, Clone)]
pub struct Gist {
/// URI to the gist.
pub uri: Uri,
/// Alternative, host-specific ID of the gist.
pub id: Option<String>,
/// Optional gist info, which may be available.
///
/// Note that this can be None or partial.
/// No piece of gist info is guaranteed to be available.
pub info: Option<Info>,
}
impl Gist {
#[inline]
pub fn new<I: ToString>(uri: Uri, id: I) -> Gist {
Gist{uri: uri, id: Some(id.to_string()), info: None}
}
#[inline]
pub fn from_uri(uri: Uri) -> Self {
Gist{uri: uri, id: None, info: None}
}
/// Create the copy of Gist that has given ID attached.
#[inline]
pub fn with_id<S: ToString>(self, id: S) -> Self {<|fim▁hole|> /// Create a copy of Gist with given gist Info attached.
/// Note that two Gists are considered identical if they only differ by Info.
#[inline]
pub fn with_info(self, info: Info) -> Self {
Gist{info: Some(info), ..self}
}
}
impl Gist {
/// Returns the path to this gist in the local gists directory
/// (regardless whether it was downloaded or not).
pub fn path(&self) -> PathBuf {
// If the gist is identified by a host-specific ID, it should be a part of the path
// (because uri.name is most likely not unique in that case).
// Otherwise, the gist's URI will form its path.
let path_fragment = match self.id {
Some(ref id) => PathBuf::new().join(&self.uri.host_id).join(id),
_ => self.uri.clone().into(),
};
GISTS_DIR.join(path_fragment)
}
/// Returns the path to the gist's binary
/// (regardless whether it was downloaded or not).
#[inline]
pub fn binary_path(&self) -> PathBuf {
let uri_path: PathBuf = self.uri.clone().into();
BIN_DIR.join(uri_path)
}
/// Whether the gist has been downloaded previously.
#[inline]
pub fn is_local(&self) -> bool {
// Path::exists() will traverse symlinks, so this also ensures
// that the target "binary" file of the gist exists.
self.binary_path().exists()
}
/// Retrieve a specific piece of gist Info, if available.
#[inline]
pub fn info(&self, datum: Datum) -> Option<info::Value> {
let info = try_opt!(self.info.as_ref());
if info.has(datum) {
Some(info.get(datum).into_owned())
} else {
None
}
}
/// Get an InfoBuilder based on this gist's Info (if any).
#[inline]
pub fn info_builder(&self) -> InfoBuilder {
self.info.clone().map(|i| i.to_builder()).unwrap_or_else(InfoBuilder::new)
}
/// Retrieve the main language this gist has been written in, if known.
pub fn main_language(&self) -> Option<&str> {
let info = try_opt!(self.info.as_ref());
// To be able to return Option<&str> rather than Option<String>,
// we need to get the underlying reference from Cow returned by Info::get.
let csv_langs = match info.get(Datum::Language) {
Cow::Borrowed(lang) => lang,
_ => return None, // Language field is default/unknown.
};
csv_langs.split(",").map(|l| l.trim()).next()
}
}
impl PartialEq<Gist> for Gist {
fn eq(&self, other: &Gist) -> bool {
if self.uri != other.uri {
return false;
}
if self.id.is_some() && self.id != other.id {
return false;
}
true
}
}
#[cfg(test)]
mod tests {
use gist::Uri;
use hosts;
use super::Gist;
const HOST_ID: &'static str = hosts::DEFAULT_HOST_ID;
const OWNER: &'static str = "JohnDoe";
const NAME: &'static str = "foo";
const ID: &'static str = "1234abcd5678efgh";
#[test]
fn path_without_id() {
let gist = Gist::from_uri(Uri::new(HOST_ID, OWNER, NAME).unwrap());
let path = gist.path().to_str().unwrap().to_owned();
assert!(path.contains(HOST_ID), "Gist path should contain host ID");
assert!(path.contains(OWNER), "Gist path should contain owner");
assert!(path.contains(NAME), "Gist path should contain gist name");
}
#[test]
fn path_with_id() {
let gist = Gist::from_uri(Uri::from_name(HOST_ID, NAME).unwrap())
.with_id(ID);
let path = gist.path().to_str().unwrap().to_owned();
assert!(path.contains(HOST_ID), "Gist path should contain host ID");
assert!(path.contains(ID), "Gist path should contain gist ID");
assert!(!path.contains(NAME), "Gist path shouldn't contain gist name");
}
#[test]
fn binary_path() {
let gist = Gist::from_uri(Uri::new(HOST_ID, OWNER, NAME).unwrap());
let path = gist.binary_path().to_str().unwrap().to_owned();
assert!(path.contains(HOST_ID), "Gist binary path should contain host ID");
assert!(path.contains(OWNER), "Gist binary path should contain owner");
assert!(path.contains(NAME), "Gist binary path should contain gist name");
}
}<|fim▁end|> | Gist{id: Some(id.to_string()), ..self}
}
|
<|file_name|>AcceptsItems.java<|end_file_name|><|fim▁begin|>/*
* This file is part of SpongeAPI, licensed under the MIT License (MIT).
*
* Copyright (c) SpongePowered <https://www.spongepowered.org>
* Copyright (c) contributors
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
package org.spongepowered.api.item.inventory.properties;
import org.spongepowered.api.data.Property;
import org.spongepowered.api.item.ItemType;
import org.spongepowered.api.item.inventory.InventoryProperty;
import org.spongepowered.api.util.Coerce;
import java.util.Collection;
import java.util.List;
/**
* A property type intended for use with
* {@link org.spongepowered.api.item.inventory.slots.InputSlot}s in order to
* query for slots which can accept items of the specified type. It is intended
* that the semantics of the {@link #equals} will be such that the method will
* return true if the other property contains <em>any</em> item present in this
* property's collection.
*/
public class AcceptsItems extends AbstractInventoryProperty<String, Collection<ItemType>> {
/**
* Create a new AcceptsItems property with the supplied value.
*
* @param value Item types to accept
*/
public AcceptsItems(Collection<ItemType> value) {
super(value);
}
/**
* Create a new AcceptsItems property with the supplied value and operator.
*
* @param value Item types to accept
* @param operator Logical operator to apply when comparing with other
* properties
*/
public AcceptsItems(Collection<ItemType> value, Operator operator) {
super(value, operator);
}
/**
* Create a new AcceptsItems property with the supplied value and operator.
*
* @param value Item types to accept
* @param operator Logical operator to apply when comparing with other
* properties
*/
public AcceptsItems(Object value, Operator operator) {
super(Coerce.toListOf(value, ItemType.class), operator);
}
/* (non-Javadoc)
* @see java.lang.Comparable#compareTo(java.lang.Object)
*/
@Override
public int compareTo(Property<?, ?> other) {
// This breaks the contract of Comparable, but we don't have a meaningful
// way of providing a natural ordering
return this.equals(other) ? 0 : this.hashCode() - this.hashCodeOf(other);
}
/**
* Returns true if <em>other</em> is also an {@link AcceptsItems} property
* and <b>any</b> item appearing in the other property's collecion appears
* in this property's collection. In formal terms, the method returns true
* if the size of the intersection between the two item type collections is
* greater than zero.
*/
@Override
public boolean equals(Object obj) {
if (!(obj instanceof InventoryProperty)) {
return false;
}
InventoryProperty<?, ?> other = (InventoryProperty<?, ?>) obj;
if (!other.getKey().equals(this.getKey())) {
return false;
}
List<ItemType> otherTypes = Coerce.toListOf(other.getValue(), ItemType.class);
for (ItemType t : this.value) {
if (otherTypes.contains(t)) {
return true;
}
}
return false;
}
/**
* Create an AcceptsItems property which matches AcceptsItems properties
* with containing one or more of the supplied values.<|fim▁hole|> */
public static AcceptsItems of(Object... value) {
return new AcceptsItems(value, Operator.EQUAL);
}
}<|fim▁end|> | *
* @param value {@link ItemType}s to accept
* @return new property |
<|file_name|>options.js<|end_file_name|><|fim▁begin|>module.exports.parse = function ( arr, obj ) {
obj = obj || {};
for ( var i = 0; i < arr.length; ++i ) {
var val = arr[i];
var matches = val.match( /^-(\w+):("?)([^"]+)\2$/ );<|fim▁hole|> }
}
return obj;
};<|fim▁end|> | if ( matches ) {
obj[matches[1]] = matches[3]; |
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
# -*- coding: utf-8 -*-
"""zigzi, Platform independent binary instrumentation module.
Copyright (c) 2016-2017 hanbum park <[email protected]>
All rights reserved.
For detailed copyright information see the file COPYING in the root of the
distribution archive.
"""
<|fim▁hole|>from PEAnalyzeTool import *
from PEManager import *
from keystone import *
from DataSegment import *
from SampleReturnVerifier import *
from WindowAPIHelper import *
code_rva = 0
def simple_return_address_save_function():
global code_rva
allocation = pe_instrument.falloc(0x1000)
code = ("push eax;push ebx;" # save register
"mov eax, [{0}];" # get shadow stack counter
"inc eax;" # increase shadow stack counter
"" # get return address from stack
"mov [{0}], eax;" # save return address
"pop ebx;pop eax;" # restore register
"ret;" # return
).format(allocation.get_va() + 4)
code_rva = pe_instrument.append_code(code)
code_abs_va = pe_manager.get_abs_va_from_rva(code_rva)
allocation[0:4] = code_abs_va
# TODO : need a way for does not calculate the relocation address directly.
pe_manager.register_rva_to_relocation(code_rva + 1 + 1)
pe_manager.register_rva_to_relocation(code_rva + 7 + 1)
def simple_indirect_branch_counting_function_call_instrument(instruction):
global code_rva
code_zero_rva = code_rva - 0x1000
instruction_zero_rva = instruction.address
# 5 mean instrumented code size.
code = "CALL {:d}".format(code_zero_rva - instruction_zero_rva + 5)
hex_code = binascii.hexlify(code).decode('hex')
try:
# Initialize engine in X86-32bit mode
ks = Ks(KS_ARCH_X86, KS_MODE_32)
encoding, count = ks.asm(hex_code)
return encoding, count
except KsError as ex:
print("ERROR: %s" % ex)
return None, 0
def simple_indirect_branch_counting_function_instrument():
global code_rva
allocation = pe_instrument.falloc(0x1000)
code = ("push eax;"
"mov eax, [{0}];"
"inc eax;"
"mov [{0}], eax;"
"pop eax;"
"ret;").format(allocation.get_va() + 4)
code_rva = pe_instrument.append_code(code)
code_abs_va = pe_manager.get_abs_va_from_rva(code_rva)
allocation[0:4] = code_abs_va
# TODO : need a way for does not calculate the relocation address directly.
pe_manager.register_rva_to_relocation(code_rva + 1 + 1)
pe_manager.register_rva_to_relocation(code_rva + 7 + 1)
def do_indirect_branch_counting():
simple_indirect_branch_counting_function_instrument()
pe_instrument.register_pre_indirect_branch(
simple_indirect_branch_counting_function_call_instrument
)
def do_return_address_verifier(pe_instrument, pe_manager, fn_rva):
simple_instrument_error_handler(pe_instrument, pe_manager, fn_rva)
pe_instrument.register_after_relative_branch(
simple_instrument_return_address_at_after_branch
)
pe_instrument.register_after_indirect_branch(
simple_instrument_return_address_at_after_branch
)
pe_instrument.register_pre_return(
simple_instrument_return_address_verifier_at_pre_return
)
pe_instrument.do_instrument()
if __name__ == '__main__':
parser = argparse.ArgumentParser("zigzi")
parser.add_argument("file",
help="filename include its absolute path.",
type=str)
args = parser.parse_args()
filename = args.file
if not os.path.isfile(filename):
parser.print_help()
exit()
pe_manager = PEManager(filename)
# add api
window_api_helper = WindowAPIHelper(pe_manager)
message_box_fn_rva = window_api_helper.add_message_box()
# set new instrumentation
pe_instrument = PEInstrument(pe_manager)
do_return_address_verifier(pe_instrument, pe_manager, message_box_fn_rva)
# do_indirect_branch_counting()
# TODO : change to avoid duplicate processing.
# do not double adjustment for file, it break file layout.
# pe_manager.adjust_file_layout()
output_filename = filename[:-4] + "_after_test.exe"
pe_manager.writefile(output_filename)
pe_instrument._save_instruction_log()
# C:\work\python\zigzi\tests\simple_echo_server.exe<|fim▁end|> |
import argparse
from PEInstrument import * |
<|file_name|>nsXMLBinding.cpp<|end_file_name|><|fim▁begin|><|fim▁hole|>/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* ***** BEGIN LICENSE BLOCK *****
* Version: MPL 1.1/GPL 2.0/LGPL 2.1
*
* The contents of this file are subject to the Mozilla Public License Version
* 1.1 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* http://www.mozilla.org/MPL/
*
* Software distributed under the License is distributed on an "AS IS" basis,
* WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
* for the specific language governing rights and limitations under the
* License.
*
* The Original Code is mozilla.org code.
*
* The Initial Developer of the Original Code is Neil Deakin
* Portions created by the Initial Developer are Copyright (C) 2006
* the Initial Developer. All Rights Reserved.
*
* Contributor(s):
*
* Alternatively, the contents of this file may be used under the terms of
* either of the GNU General Public License Version 2 or later (the "GPL"),
* or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
* in which case the provisions of the GPL or the LGPL are applicable instead
* of those above. If you wish to allow use of your version of this file only
* under the terms of either the GPL or the LGPL, and not to allow others to
* use your version of this file under the terms of the MPL, indicate your
* decision by deleting the provisions above and replace them with the notice
* and other provisions required by the GPL or the LGPL. If you do not delete
* the provisions above, a recipient may use your version of this file under
* the terms of any one of the MPL, the GPL or the LGPL.
*
* ***** END LICENSE BLOCK ***** */
#include "nsXULTemplateQueryProcessorXML.h"
#include "nsXULTemplateResultXML.h"
#include "nsXMLBinding.h"
NS_IMPL_ADDREF(nsXMLBindingSet)
NS_IMPL_RELEASE(nsXMLBindingSet)
nsresult
nsXMLBindingSet::AddBinding(nsIAtom* aVar, nsIDOMXPathExpression* aExpr)
{
nsAutoPtr<nsXMLBinding> newbinding(new nsXMLBinding(aVar, aExpr));
NS_ENSURE_TRUE(newbinding, NS_ERROR_OUT_OF_MEMORY);
if (mFirst) {
nsXMLBinding* binding = mFirst;
while (binding) {
// if the target variable is already used in a binding, ignore it
// since it won't be useful for anything
if (binding->mVar == aVar)
return NS_OK;
// add the binding at the end of the list
if (!binding->mNext) {
binding->mNext = newbinding;
break;
}
binding = binding->mNext;
}
}
else {
mFirst = newbinding;
}
return NS_OK;
}
PRInt32
nsXMLBindingSet::LookupTargetIndex(nsIAtom* aTargetVariable,
nsXMLBinding** aBinding)
{
PRInt32 idx = 0;
nsXMLBinding* binding = mFirst;
while (binding) {
if (binding->mVar == aTargetVariable) {
*aBinding = binding;
return idx;
}
idx++;
binding = binding->mNext;
}
*aBinding = nsnull;
return -1;
}
void
nsXMLBindingValues::GetAssignmentFor(nsXULTemplateResultXML* aResult,
nsXMLBinding* aBinding,
PRInt32 aIndex,
PRUint16 aType,
nsIDOMXPathResult** aValue)
{
*aValue = mValues.SafeObjectAt(aIndex);
if (!*aValue) {
nsCOMPtr<nsIDOMNode> contextNode;
aResult->GetNode(getter_AddRefs(contextNode));
if (contextNode) {
nsCOMPtr<nsISupports> resultsupports;
aBinding->mExpr->Evaluate(contextNode, aType,
nsnull, getter_AddRefs(resultsupports));
nsCOMPtr<nsIDOMXPathResult> result = do_QueryInterface(resultsupports);
if (result && mValues.ReplaceObjectAt(result, aIndex))
*aValue = result;
}
}
NS_IF_ADDREF(*aValue);
}
void
nsXMLBindingValues::GetNodeAssignmentFor(nsXULTemplateResultXML* aResult,
nsXMLBinding* aBinding,
PRInt32 aIndex,
nsIDOMNode** aNode)
{
nsCOMPtr<nsIDOMXPathResult> result;
GetAssignmentFor(aResult, aBinding, aIndex,
nsIDOMXPathResult::FIRST_ORDERED_NODE_TYPE,
getter_AddRefs(result));
if (result)
result->GetSingleNodeValue(aNode);
else
*aNode = nsnull;
}
void
nsXMLBindingValues::GetStringAssignmentFor(nsXULTemplateResultXML* aResult,
nsXMLBinding* aBinding,
PRInt32 aIndex,
nsAString& aValue)
{
nsCOMPtr<nsIDOMXPathResult> result;
GetAssignmentFor(aResult, aBinding, aIndex,
nsIDOMXPathResult::STRING_TYPE, getter_AddRefs(result));
if (result)
result->GetStringValue(aValue);
else
aValue.Truncate();
}<|fim▁end|> | |
<|file_name|>deletenetworkaclentry.py<|end_file_name|><|fim▁begin|># Copyright 2013-2014 Eucalyptus Systems, Inc.
#
# Redistribution and use of this software in source and binary forms,
# with or without modification, are permitted provided that the following
# conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from requestbuilder import Arg
from euca2ools.commands.ec2 import EC2Request<|fim▁hole|> DESCRIPTION = 'Delete a network acl rule'
ARGS = [Arg('NetworkAclId', metavar='NACL', help='''ID of the
network ACL to delete an entry from (required)'''),
Arg('-n', '--rule-number', dest='RuleNumber', required=True,
type=int, help='number of the entry to delete (required)'),
Arg('--egress', dest='Egress', action='store_true', help='''delete
an egress entry (default: delete an ingress entry)''')]<|fim▁end|> |
class DeleteNetworkAclEntry(EC2Request): |
<|file_name|>config_test.py<|end_file_name|><|fim▁begin|># pylint: disable=redefined-outer-name, missing-docstring
import sys
import pytest
sys.path.append('..')
from batchflow import Config
@pytest.fixture
def config():
_config = dict(key1='val1', key2=dict())
_config['key2']['subkey1'] = 'val21'
return Config(_config)
class TestConfig:
def test_getitem_key(self, config):
assert config['key1'] == config.config['key1']
def test_getitem_missing_key(self, config):
with pytest.raises(KeyError):
_ = config['missing key']
def test_getitem_nested_key(self, config):
assert config['key2/subkey1'] == config.config['key2']['subkey1']
def test_get_key(self, config):
assert config.get('key1') == config.config.get('key1')
def test_get_nested_key(self, config):
assert config.get('key2/subkey1') == config.config['key2']['subkey1']
def test_get_missing_key(self, config):
assert config.get('missing key') is None
def test_get_missing_key_with_default(self, config):
assert config.get('missing key', default=1) == 1
def test_get_nested_missing_key_with_default(self, config):
assert config.get('key2/missing key', default=1) == 1
def test_pop_key(self, config):
val = config.config.get('key1')
assert config.pop('key1') == val
assert 'key1' not in config, 'key should have been deleted'
def test_pop_nested_key(self, config):
val = config.config['key2']['subkey1']
assert config.pop('key2/subkey1') == val<|fim▁hole|> with pytest.raises(KeyError):
_ = config.pop('missing key')
def test_pop_missing_key_with_default(self, config):
assert config.pop('missing key', default=1) == 1
def test_pop_nested_missing_key_with_default(self, config):
assert config.pop('key2/missing key', default=1) == 1
def test_setitem_key(self, config):
config['key1'] = 'new_val1'
assert config['key1'] == config.config['key1']
assert config.config['key1'] == 'new_val1'
def test_setitem_nested_key(self, config):
config['key2/subkey1'] = 'new_val21'
assert config['key2/subkey1'] == config.config['key2']['subkey1']
assert config.config['key2']['subkey1'] == 'new_val21'
def test_setitem_new_key(self, config):
config['key0'] = 'new_val0'
assert config['key0'] == config.config['key0']
assert config.config['key0'] == 'new_val0'
def test_setitem_nested_new_key(self, config):
config['key2/subkey2'] = 'new_val22'
assert config['key2/subkey2'] == config.config['key2']['subkey2']
assert config.config['key2']['subkey2'] == 'new_val22'<|fim▁end|> | assert 'subkey1' not in config, 'nested key should have been deleted'
assert 'key2' in config, 'outer key should remain'
def test_pop_missing_key(self, config): |
<|file_name|>p2wpkh.ts<|end_file_name|><|fim▁begin|>import * as bcrypto from '../crypto';
import { bitcoin as BITCOIN_NETWORK } from '../networks';
import * as bscript from '../script';
import { isPoint, typeforce as typef } from '../types';
import { Payment, PaymentOpts } from './index';
import * as lazy from './lazy';
import { bech32 } from 'bech32';
const OPS = bscript.OPS;
const EMPTY_BUFFER = Buffer.alloc(0);
// witness: {signature} {pubKey}
// input: <>
// output: OP_0 {pubKeyHash}
export function p2wpkh(a: Payment, opts?: PaymentOpts): Payment {
if (!a.address && !a.hash && !a.output && !a.pubkey && !a.witness)
throw new TypeError('Not enough data');
opts = Object.assign({ validate: true }, opts || {});
typef(
{
address: typef.maybe(typef.String),
hash: typef.maybe(typef.BufferN(20)),
input: typef.maybe(typef.BufferN(0)),
network: typef.maybe(typef.Object),
output: typef.maybe(typef.BufferN(22)),
pubkey: typef.maybe(isPoint),
signature: typef.maybe(bscript.isCanonicalScriptSignature),
witness: typef.maybe(typef.arrayOf(typef.Buffer)),
},
a,
);
const _address = lazy.value(() => {
const result = bech32.decode(a.address!);
const version = result.words.shift();
const data = bech32.fromWords(result.words);
return {
version,
prefix: result.prefix,
data: Buffer.from(data),
};<|fim▁hole|>
const network = a.network || BITCOIN_NETWORK;
const o: Payment = { name: 'p2wpkh', network };
lazy.prop(o, 'address', () => {
if (!o.hash) return;
const words = bech32.toWords(o.hash);
words.unshift(0x00);
return bech32.encode(network.bech32, words);
});
lazy.prop(o, 'hash', () => {
if (a.output) return a.output.slice(2, 22);
if (a.address) return _address().data;
if (a.pubkey || o.pubkey) return bcrypto.hash160(a.pubkey! || o.pubkey!);
});
lazy.prop(o, 'output', () => {
if (!o.hash) return;
return bscript.compile([OPS.OP_0, o.hash]);
});
lazy.prop(o, 'pubkey', () => {
if (a.pubkey) return a.pubkey;
if (!a.witness) return;
return a.witness[1];
});
lazy.prop(o, 'signature', () => {
if (!a.witness) return;
return a.witness[0];
});
lazy.prop(o, 'input', () => {
if (!o.witness) return;
return EMPTY_BUFFER;
});
lazy.prop(o, 'witness', () => {
if (!a.pubkey) return;
if (!a.signature) return;
return [a.signature, a.pubkey];
});
// extended validation
if (opts.validate) {
let hash: Buffer = Buffer.from([]);
if (a.address) {
if (network && network.bech32 !== _address().prefix)
throw new TypeError('Invalid prefix or Network mismatch');
if (_address().version !== 0x00)
throw new TypeError('Invalid address version');
if (_address().data.length !== 20)
throw new TypeError('Invalid address data');
hash = _address().data;
}
if (a.hash) {
if (hash.length > 0 && !hash.equals(a.hash))
throw new TypeError('Hash mismatch');
else hash = a.hash;
}
if (a.output) {
if (
a.output.length !== 22 ||
a.output[0] !== OPS.OP_0 ||
a.output[1] !== 0x14
)
throw new TypeError('Output is invalid');
if (hash.length > 0 && !hash.equals(a.output.slice(2)))
throw new TypeError('Hash mismatch');
else hash = a.output.slice(2);
}
if (a.pubkey) {
const pkh = bcrypto.hash160(a.pubkey);
if (hash.length > 0 && !hash.equals(pkh))
throw new TypeError('Hash mismatch');
else hash = pkh;
if (!isPoint(a.pubkey) || a.pubkey.length !== 33)
throw new TypeError('Invalid pubkey for p2wpkh');
}
if (a.witness) {
if (a.witness.length !== 2) throw new TypeError('Witness is invalid');
if (!bscript.isCanonicalScriptSignature(a.witness[0]))
throw new TypeError('Witness has invalid signature');
if (!isPoint(a.witness[1]) || a.witness[1].length !== 33)
throw new TypeError('Witness has invalid pubkey');
if (a.signature && !a.signature.equals(a.witness[0]))
throw new TypeError('Signature mismatch');
if (a.pubkey && !a.pubkey.equals(a.witness[1]))
throw new TypeError('Pubkey mismatch');
const pkh = bcrypto.hash160(a.witness[1]);
if (hash.length > 0 && !hash.equals(pkh))
throw new TypeError('Hash mismatch');
}
}
return Object.assign(o, a);
}<|fim▁end|> | }); |
<|file_name|>Steg_rc.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
# Resource object code
#
# Created: Mon Jan 25 18:31:00 2010
# by: The Resource Compiler for PyQt (Qt v4.6.1)
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore
qt_resource_data = "\
\x00\x00\x07\xd1\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x20\x00\x00\x00\x20\x08\x06\x00\x00\x00\x73\x7a\x7a\xf4\
\x00\x00\x00\x06\x62\x4b\x47\x44\x00\xfe\x00\xfe\x00\xfe\xeb\x18\
\xd4\x82\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x00\x48\x00\x00\
\x00\x48\x00\x46\xc9\x6b\x3e\x00\x00\x00\x09\x76\x70\x41\x67\x00\
\x00\x00\x20\x00\x00\x00\x20\x00\x87\xfa\x9c\x9d\x00\x00\x06\xe8\
\x49\x44\x41\x54\x58\xc3\xe5\x96\x5d\x8c\x5d\x55\x15\xc7\xff\x6b\
\xef\x7d\xce\xb9\xdf\xf3\xdd\x62\x67\x3a\x33\x75\xa4\x34\xad\xf4\
\x83\x34\x2d\x15\x0b\xb5\x43\xe3\x47\x2a\x62\x82\x49\x31\x12\xfb\
\x42\x62\x8c\x50\x35\x1a\x13\x62\x4c\x7c\x90\xa4\x3e\x99\x18\x8d\
\x09\x2a\x21\x4a\x44\x52\x20\xf2\x00\x94\x2a\x9a\xb1\x0a\x6d\xa7\
\x03\xd2\x4a\x5b\xa4\x25\x33\xed\xb4\xc3\xcc\x74\xee\xcc\xbd\x73\
\xcf\x3d\xf7\xec\xbd\xd7\xf2\xe1\x32\x88\x91\xd6\x71\x8a\x2f\xfa\
\x7f\x39\xc9\xc9\xce\x5e\xbf\xf5\x3f\xff\xbd\xf6\x01\xfe\xdf\x45\
\xef\xf5\x72\xed\x23\x40\x61\x1b\xc0\x75\x64\xa0\xd0\xfa\xae\x85\
\x0d\x71\xa8\x90\x86\x3f\xb6\xfe\xbf\x04\x70\xe3\x6f\x80\xec\x6a\
\x80\x1b\x58\xc7\x82\xef\x28\xd1\x1b\x58\x18\x04\x08\x08\x09\x29\
\xfc\x41\xbc\xec\x27\x85\x89\xe1\x4d\xef\x13\xc0\x86\x67\x81\xec\
\x3a\xc0\xcd\x20\x4b\x1a\xbd\x10\xac\xb6\x75\x7c\xb3\x17\xeb\xb6\
\xdf\xbd\xfc\x2b\x38\x5b\x3b\x85\x03\xe7\x1f\x86\xe5\x14\x3a\x12\
\x98\x82\xfb\x11\x52\xfe\x1a\x7b\xd8\x57\x76\x5c\x1b\x80\xba\xe9\
\x25\x40\x15\xa1\x5d\x19\x77\x02\x74\x80\x53\xf5\x82\x8b\xf5\x81\
\xb4\xa2\xb7\xef\x28\xdd\x89\xee\xb3\x37\x62\x97\xde\x83\x65\xa6\
\x07\xbe\x01\xd8\x39\x03\x3b\x17\xec\x11\x4d\x9b\x55\x56\x5d\xb3\
\x03\xba\xfb\x7e\x02\x84\x3e\xca\x56\xff\xc2\xcf\x9b\x8d\x6e\x5e\
\x17\x7d\x4d\x6b\x76\x04\xef\x3d\xd6\xf5\x6c\xc0\xa9\x74\x04\x87\
\xa7\x0e\xc1\xb1\x03\x98\xc0\x0d\x95\x23\x85\x49\x5a\xd9\xf5\xbb\
\x28\x3f\x8f\xb9\x3f\x2e\x1d\xc0\x64\xd7\x66\x30\xfb\x5b\x7b\xab\
\xb0\x6e\x71\x55\x75\x09\x82\xd8\x55\xe8\x9c\x2e\xa8\xeb\x8f\x8d\
\x0f\xbf\xfe\x7a\xf9\xde\x41\x2b\xa9\xb6\xde\x82\x84\x20\x9e\xe0\
\x63\x0d\xd2\xb2\x1d\xa7\x26\x4a\xf9\x8d\x54\x01\x64\xe9\x00\x5a\
\x17\x50\x3d\x56\x7e\x2a\x5a\x85\xbe\xc6\x05\xbc\x2a\xa2\x6a\xe9\
\x04\x8e\x46\xbd\xb4\x27\x3e\xc9\x8f\x60\x5b\xf2\x48\xd0\xc6\x1f\
\x01\x14\x44\x00\xb6\x0a\x6e\x5e\x43\x45\x3c\xa0\x43\xbd\xb2\xb4\
\xc9\x8f\x6e\x79\x0d\x05\x00\x0e\x8c\x8a\x59\x86\xb4\xf1\x37\xe0\
\xf8\x2d\x8b\x03\x20\xd3\x46\xf8\xfa\x4c\x88\x27\x7f\xcc\x5b\x6c\
\x59\x57\x93\x71\x33\x49\x24\x09\x14\x75\xac\xf8\xfc\xfc\xdc\xd4\
\x73\xc5\xdb\xa2\x7e\xfc\xd4\xe4\xb9\x4b\x04\x70\x15\x83\x74\x3a\
\x40\x66\x45\x6a\x0b\xab\x6b\x43\x14\x4a\x1e\x8e\xda\xa0\x90\x92\
\xe1\x73\x2a\x90\x67\x40\x78\x8a\x02\x4c\xd9\x0b\xc0\xcb\x83\xff\
\x06\x00\x00\xba\xf7\x11\xdc\x1c\x74\xfb\x2e\x31\xb9\x35\xc8\xba\
\x32\xb4\x2e\x62\xb5\xaf\xa9\x2e\x52\xd2\x52\x3d\x9d\xbf\xc3\x94\
\x70\x17\xa7\x0a\xc9\xa5\x50\x5c\xd5\x50\xb6\x27\x41\xd8\x65\x21\
\x96\xc0\xae\x79\x9a\x55\x20\x08\x5a\x9c\x0f\xda\xed\x90\x0e\xfd\
\x37\x54\x96\x46\x92\xb3\x82\x57\x77\x5f\x25\x84\x00\x50\x3d\x02\
\xf4\xec\xd3\xa2\x22\x55\xd4\x59\xfa\x62\x3a\x65\x66\x28\x52\x0f\
\x8a\xd0\x87\x40\xfa\xcb\x10\xbd\xb6\x7e\x21\x42\x7d\x34\x38\x96\
\x8c\xe1\x67\x50\xaa\x4b\x65\x64\x19\x69\x01\x5b\x05\x71\x04\x4e\
\x15\x38\x51\xb0\xb3\x46\xb9\x8a\x59\xa5\x22\xd9\x4a\xca\xbf\x68\
\x3a\xe8\xad\xa8\x13\x98\x1d\xba\x8a\x03\x6b\x1e\x06\xec\x7c\x0e\
\x41\x9b\xef\x57\x79\xf5\x68\xd0\xea\x5c\x63\x32\xdc\xc2\xa9\xca\
\x24\xe3\x61\x92\x5e\x56\xc3\xe9\x84\xfc\xd2\x5d\x6a\x3c\xd3\xb7\
\xb7\x3a\x75\xe1\xe9\xce\x6d\x99\x01\xf5\x50\xb6\x37\x1d\x20\x23\
\xe0\x86\x02\x04\x20\x2d\x10\x26\x08\x03\x3a\xcb\xc8\xf7\xd7\x0f\
\x86\xed\xe9\x3d\xec\x30\x35\xb2\xf5\x2a\x0e\x98\x76\xa0\xf7\x7b\
\x2d\xa8\x9f\x8c\xb7\x55\x4f\xe7\xb7\x27\xe3\x51\x5f\x7c\x2e\x73\
\x31\x7e\x23\x78\x3e\x39\x87\xfd\xb5\xe1\xc6\xfe\x65\x1f\x9b\x3d\
\x7c\xdd\x3d\xbc\x97\x8a\xfa\xbb\x85\xb5\xe9\xc8\xdc\x4b\xe1\x0b\
\x3a\x4f\x3b\xa1\x90\xf7\x35\x0d\x32\xcd\x76\x84\xb1\x70\x54\xc1\
\x0d\x35\xa0\x73\xbe\xd6\xe2\x79\xa8\x78\x07\x30\xf9\xeb\x2b\x38\
\x00\x00\xa5\x5b\x43\xc4\x27\xac\x09\x7a\xf3\x6d\x94\x31\x25\x78\
\x5f\x4f\x47\x1b\x97\x8b\xbb\xda\x1b\xa5\x8d\x35\xc0\x63\x95\x8a\
\xe4\xdb\x2a\x47\x83\x20\x52\xbe\xa6\x0f\xb9\x58\xef\x04\xa8\x5f\
\x67\x19\x14\x30\x20\x04\xc8\xdb\x10\x42\x10\x01\xb2\x3d\xc9\x58\
\x6e\x65\x6d\x37\x29\x75\x62\x64\x1b\xbf\xb7\x03\x00\xd0\x18\xf5\
\xe0\x04\xec\xde\x4a\x63\x3b\x9e\x94\xed\xc5\xb4\xca\xb1\xf7\xc9\
\x89\x79\x84\x3d\x19\x34\xce\x8b\x4d\xce\xab\xa3\x3e\x0e\x26\x93\
\x51\x7a\x96\xad\xb9\xd9\xcd\xa9\x71\x32\x88\x20\xd4\xe6\xaa\x06\
\xbe\xae\x21\x9e\x40\x6f\xb7\x25\x4c\xe0\x54\xb5\x98\x3c\x4f\xbb\
\x6c\xf7\xef\x73\x2b\x2a\x98\x1b\xba\x02\xc0\xd5\x14\xbf\xd2\x40\
\xb8\x32\x67\x21\xaa\x46\xf9\xa0\x55\x65\x83\xe9\x74\x9c\x1f\xe7\
\x98\x87\x54\xc6\x7c\x82\xad\x5e\xa1\x42\x86\xce\x30\x00\x82\xf0\
\x02\x01\x20\x56\x41\x47\x1c\x86\x6a\xea\x80\x2a\xaa\xc6\xf4\x13\
\xff\x3c\xb4\xcc\xe2\xc6\x05\xa0\xfb\x0d\xe0\xa5\x8f\xe1\x1f\xe0\
\x94\xfb\xa8\x45\xce\xe8\xac\x5e\x2e\x44\xeb\x4d\xc4\x30\x05\xdf\
\xfc\xa6\xd2\x0c\x22\x18\x10\x69\x3e\x6d\xc5\xac\xd4\xa1\xee\xa2\
\x90\xe6\x00\x5e\x1a\x80\xad\xa4\xe0\x1a\x67\xe1\xe5\x71\x30\x62\
\x9e\xe3\xbf\x98\x76\x73\x5b\xd0\x15\xed\x0e\x5b\x69\x27\x00\x5a\
\x28\xdc\xcc\xc1\x3f\x20\x38\x55\x81\x9f\x57\x21\x85\xff\xba\xef\
\xa2\x01\xaa\xbf\xaa\x40\x77\xea\xbf\x06\x03\xc1\x39\x5f\xf6\x69\
\xc7\xa7\xbb\x14\x5b\xee\x07\x90\x08\xfc\x3b\x05\x4d\xc1\x43\xe7\
\x3d\x38\x51\x68\xe6\x82\x20\x96\x8c\x40\x87\xcd\x2b\xc3\xfe\xe7\
\x19\x58\x90\xc4\x02\x77\xc1\xd9\xae\xbd\xcb\x59\x52\xbe\x99\x42\
\xf5\x18\x69\x5a\x0f\x01\x91\x69\x4e\xc2\xa8\xcb\x36\x0b\xd7\x34\
\xb8\xa1\x20\x5e\x01\x4c\x59\x9d\xe7\xd3\xce\xb7\x1e\x69\xdf\xd1\
\x40\xf9\xa0\x7f\x67\xcf\x25\x5d\xe8\xe2\x05\x9c\x8a\x86\x20\x00\
\x00\x4e\x15\x7c\xac\x20\x9e\x60\x67\x0d\xdc\xbc\x86\x8f\x35\x5c\
\xac\x21\x1e\xf0\x89\x22\x5f\x0f\xee\x8b\x0a\xe5\x1d\x99\x62\x8a\
\x1b\x1e\xcd\x2e\xcd\x81\x05\x45\xab\x22\x70\x2c\xb1\x2a\xa8\xdb\
\x49\x51\x0f\x41\x00\xd7\xec\x96\xb4\x00\xbe\x39\x9a\xc5\x11\x00\
\x6a\x0e\xa6\x44\xb7\x93\xc6\xed\xc8\x9b\x9a\xd4\x71\xb2\xe3\x33\
\xa1\x9f\x79\xda\x2e\x1e\x60\x70\xac\x08\xe9\xd4\xe8\xfb\xaa\xd6\
\x9e\xc3\x4e\x31\xc1\x46\x00\xb7\x90\xa2\x3e\xf2\x04\xa9\xc8\x0c\
\x5b\xbc\x01\x83\x0c\x11\x65\xc4\x13\xd8\x2a\x27\x0d\x9c\x17\x87\
\x59\xf1\x2a\xef\xe6\x4d\x1b\x84\x06\x29\x44\xc3\x57\xfd\x91\xb6\
\x4f\x66\x78\xd1\x00\xd4\x05\x40\x58\xdb\x69\x5a\xe3\x2a\xf4\x59\
\x11\xf5\x00\x94\x5a\xaf\xac\x58\x99\xf1\xcf\x73\xd9\x3e\x18\x5c\
\x67\xff\x44\x01\x5d\x0f\xa8\x6e\xf1\x04\x71\xa8\xb8\x49\x77\x7f\
\x72\xa6\xfe\x7d\x9d\x57\x97\x29\xd0\x5b\x5c\xac\xf3\xc2\x74\x93\
\x0e\xf5\x8b\x12\xd2\xe8\xa2\x01\x4c\x0b\xa1\x7e\x96\x91\x4e\x50\
\x35\xc8\x53\x56\x6a\x32\xa6\xac\xa4\x3c\xed\x7e\xa0\xdb\xea\x07\
\x32\x1f\x96\x55\x2a\x67\xee\x03\xe9\x4d\x92\x2a\xc5\x31\xc6\x5c\
\x99\xf7\xdb\x8b\xf6\x40\xd0\x61\x2e\x89\xc3\x6b\xaa\xa8\x3e\x45\
\x44\x1f\x80\x50\x36\x34\xea\xe5\xb4\x94\x1e\x59\x34\x40\x7c\x8a\
\x51\xdc\x18\x02\x44\x5c\x7f\xd3\x8f\x2a\xe8\x33\xe9\x84\x3d\x38\
\x7d\xa8\x7a\x38\xbb\x9a\xca\xe2\xa3\x1c\xd7\x29\xcb\x89\x8e\x7c\
\x0d\x23\xe9\x98\xdb\x57\x7e\xec\xf2\x13\xf9\xad\x85\xc4\x2c\x0b\
\x33\xa6\xcd\xdc\x4b\x9a\xee\x22\x23\xa1\x0e\xfd\x19\x3b\x93\xfe\
\x50\x93\x1e\xa7\xc5\x02\x5c\x49\x9d\x5f\x28\x40\xe5\x14\x49\x2a\
\xdd\x6e\xde\xe4\x28\xab\xb7\xa6\x63\xe9\x73\xf9\xcd\xf9\xa9\x8e\
\x2f\xdd\x80\xcb\x3f\x39\xf3\x41\x90\x7c\x8b\x08\x77\x83\x78\x9a\
\xbc\x3b\xe4\xca\xf6\xa1\xa9\x9f\x57\x8f\x67\x06\x02\xb9\x66\x80\
\x05\x15\x07\x4b\xe0\x79\xd6\x9c\x30\xc2\xee\xc0\xeb\x12\x91\x6e\
\x0d\x36\x43\xe1\x73\x04\x89\xa4\xe1\x8f\xba\xe9\x74\xb8\x76\xbc\
\xfe\x66\xd4\x6b\xd2\xea\x9f\xd3\x66\xb6\xde\x2f\x80\x77\xab\xf4\
\xf1\x12\x48\x43\x01\xe8\x70\x93\xd6\xd6\x86\xeb\x15\x95\x23\xe6\
\x78\xe9\x7f\xcf\xff\xbb\xfa\x3b\x82\x22\x86\x34\x7d\x28\x03\x94\
\x00\x00\x00\x2e\x7a\x54\x58\x74\x63\x72\x65\x61\x74\x65\x2d\x64\
\x61\x74\x65\x00\x00\x78\xda\x33\x32\x30\xb0\xd0\x35\xb0\xd0\x35\
\x32\x09\x31\x30\xb0\x32\x32\xb5\x32\xb5\xd0\x35\x30\xb5\x32\x30\
\x00\x00\x42\x26\x05\x1b\x7b\xc0\xdf\xf5\x00\x00\x00\x2e\x7a\x54\
\x58\x74\x6d\x6f\x64\x69\x66\x79\x2d\x64\x61\x74\x65\x00\x00\x78\
\xda\x33\x32\x30\xb0\xd0\x35\xb0\xd0\x35\x32\x08\x31\x34\xb2\x32\
\x32\xb0\x32\x31\xd0\x35\x30\xb5\x32\x30\x00\x00\x41\x9c\x05\x0c\
\x60\x65\x6d\x10\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\
\x00\x00\x07\x1f\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x20\x00\x00\x00\x20\x08\x06\x00\x00\x00\x73\x7a\x7a\xf4\
\x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\x74\x77\x61\x72\x65\
\x00\x41\x64\x6f\x62\x65\x20\x49\x6d\x61\x67\x65\x52\x65\x61\x64\
\x79\x71\xc9\x65\x3c\x00\x00\x06\xc1\x49\x44\x41\x54\x78\xda\xc4\
\x57\x6b\x6c\x53\x65\x18\x7e\xce\xa5\x2d\x6d\x77\x01\x99\xcb\x94\
\xcb\xd8\x46\x61\x78\x19\x22\x98\x0c\xa7\x43\x23\x1a\x85\x69\xe4\
\x87\x46\x63\x54\x4c\x34\xf1\x6e\xbc\xc6\xf8\x47\x8c\x89\x31\x6a\
\x62\x8c\xd7\x3f\x1a\x15\xbc\x4b\x22\x08\x5e\xb1\x20\x82\x71\x22\
\x0c\x41\x60\xba\x21\xdb\x18\x83\x6e\x65\xed\xda\xb5\x5b\xdb\x73\
\xce\xe7\xf3\x9d\x9e\x6e\x1d\x76\x02\xf1\x76\x92\x37\xfd\xce\xd7\
\xaf\xef\xfb\xbc\xcf\x7b\xfb\xaa\x08\x21\xf0\x7f\x3e\x0a\x45\xdf\
\xfe\xd2\xc5\x9f\x68\x30\x96\x0a\xcb\xe2\xab\x28\x78\x4c\x51\x55\
\x98\xd0\xd7\xcd\xbf\x7b\xe3\x32\x6e\x18\xff\x14\x00\x9d\xe2\x95\
\xc6\xe7\xde\xfe\x06\xd5\x52\xaf\x50\xc6\x81\xa9\xe1\xe7\x57\x6f\
\x69\x92\xe7\x29\xf1\x7f\x12\xc0\x04\xdb\x73\x1a\x4f\xef\x7b\x8a\
\xfe\x6b\x8e\xc5\xfc\xc7\x84\xa7\xf2\x26\x18\xa9\x61\xec\x78\x61\
\x61\x4c\x51\x94\x93\xa3\x59\xd5\x24\x7b\xeb\xc9\xde\xd5\xc7\xb2\
\xa7\x8f\x58\xb3\x0c\x08\xd5\xcd\x37\x17\x25\xcf\x80\x8c\x88\x30\
\x61\xa5\xc2\x98\x7f\xd7\xbb\x7f\xc6\x76\x5c\xeb\x54\xa0\xbb\xc8\
\xde\xcd\x4b\x0b\xb1\xa7\x8f\x1a\x22\x30\x95\xaf\x8a\x3e\x96\x01\
\x7b\xa9\x22\x13\xde\x04\xf4\x05\x4f\xd2\xba\xe0\xcf\x4d\xb8\x03\
\x0f\x22\x9b\x5f\x98\x30\x3e\x00\xa4\xe9\xb8\x2a\xf9\x2a\x10\x02\
\xb9\xaf\xe5\x51\x32\x82\xec\x38\x84\x88\x2c\xe3\x56\x2c\x77\x52\
\x29\x94\x03\xd9\x7d\x2b\x43\x3b\xda\x38\x00\x0a\xc5\x95\x95\xa1\
\x28\xf6\xe7\x5f\x33\x40\x9d\x56\x72\x6c\x58\x0b\x33\x60\xd8\xa5\
\x66\x7b\x7b\x1c\x00\xd2\xa8\xaa\xa9\xe8\xd8\xd1\x86\xfe\xce\xc3\
\x32\x81\xc6\x27\x40\x82\x50\xb6\x32\xba\x1a\x13\xf8\xfc\x50\x16\
\xb4\x3a\x92\x94\x79\x39\x60\x66\x19\xb0\xe9\x56\x64\xe5\x17\x2c\
\x47\xfb\x1b\x1a\xef\xdc\xfe\x2b\x90\x49\x61\xc1\x3d\x6f\xd1\xbe\
\x39\x4e\xff\x70\xb6\x95\x63\x36\x5c\x4c\xca\xd7\x6e\xb5\x93\x72\
\x34\x04\x22\x63\xc7\x59\x91\xa2\x69\x59\xc6\x0a\x84\x4d\x7a\xd0\
\xf9\x63\x2b\xf1\x1a\xa8\x9c\x57\x09\xab\xfd\x59\x99\xa2\x59\x16\
\x94\xfc\xca\xa1\x97\x14\xcb\x52\x60\x8d\x60\xb3\xa0\xf9\x2b\x81\
\x53\xae\x40\x34\x9e\xde\xc6\x0d\xb7\x9e\x5f\xeb\x2a\x19\x50\x35\
\x17\x3a\x7e\xda\x8b\xf0\x81\x43\xd4\x57\xc8\x2b\x15\x93\x2b\xcb\
\x51\x3d\xaf\x9a\xeb\x0c\xba\xc2\x45\xf8\xae\x75\x22\xf6\x74\x17\
\xe5\x85\x42\xc5\x99\x53\x07\x71\x41\x6d\x14\x95\xa7\xa6\x60\x9a\
\x9a\x0d\x42\xb0\xd4\xd5\xa2\x6a\x84\xf7\x6f\xc3\x2f\x07\x13\x6b\
\xa4\xd1\xb1\x39\xc0\x38\x0d\xf4\x84\x61\x0c\x27\x70\xde\x1d\xcf\
\x3b\xd4\x2a\xc7\xb4\x44\x15\xe2\xc8\xfb\x80\x66\x61\xf5\x96\x72\
\xfc\xde\x7f\x3a\xae\x5e\x7a\x31\x6e\x08\xd4\x8c\x81\xd9\xf6\xdb\
\x7e\x7c\xb8\x3e\x88\x39\x15\xdd\x68\x3a\x2f\x42\xf5\x64\x83\x3d\
\x41\xf5\x4d\x45\xec\xe0\x07\x78\xfd\xab\x83\xeb\x78\x6c\x58\x1f\
\xcd\x15\x32\xa0\xe9\x88\xf4\xf4\x61\xd2\x8c\xb3\x18\x91\x24\x8c\
\xd0\x27\x63\x93\x92\x1f\x2a\x43\xa0\xe9\x0a\xd6\xff\x50\x82\x9e\
\xc4\x74\x3c\x70\xef\x72\xac\xdd\x13\xc3\x8a\x95\x07\x90\x32\x2d\
\x86\x57\x45\xa9\x4f\xc7\xb2\xb3\x4f\xc3\xc3\xf7\xdf\x86\x17\x5f\
\x79\x13\xeb\xb6\x09\x34\xd5\xc7\x18\xda\x29\x30\xd3\x26\xfa\xbb\
\xdb\x7a\x5b\xda\x07\xc2\xb2\xf6\x47\x19\x50\x0d\x3b\x09\x63\xa1\
\x30\x4e\x3f\x77\x31\xb1\x1d\xe2\xbb\xdb\xa9\xff\x51\x16\x54\x4d\
\xa0\x2b\xa4\xe2\xe7\xee\x0a\x3c\xf6\xf0\x72\xdc\xb7\xba\x0b\xf3\
\xa6\xf9\xf0\xe4\x92\x29\x48\x18\x16\x22\x49\x13\xa1\x64\x06\xdf\
\x76\xc4\xd1\x72\x64\x08\x8f\xde\xb9\x1c\x4f\x3d\xf3\x32\xea\x66\
\x0c\x61\x7a\xa0\x1c\x7d\xfb\x5b\xb0\xa7\x23\xb2\x8a\xaa\x86\x64\
\xcc\x54\x67\xd6\xd9\x2d\x73\x68\x20\x41\x0f\x34\x78\x4a\x4f\x83\
\x99\xe9\xa5\x6d\x37\x4b\xc6\x45\xd1\x6d\x91\x0c\xc9\x30\x05\x77\
\x79\xd1\x74\x69\x23\x3e\x6c\xe9\xc7\xbc\xe9\x3e\x5c\x56\x3b\x11\
\x31\x7a\x16\x1e\x32\xd0\x6f\x98\xb6\xe6\x92\x62\x1d\x51\xae\x83\
\x1d\x49\x2c\x5d\xdc\x88\x8d\xfc\x0d\xbc\x65\x88\x77\xb7\x62\xd3\
\xee\xa3\x5f\x48\xfa\x9d\x16\x97\x73\xcd\x44\xb4\x3b\x84\x89\x33\
\x66\xdb\x60\x14\x76\xaf\xac\x61\x6d\x44\x64\x92\x42\xa7\xf7\x6d\
\x4c\xb2\x33\x66\x62\xcd\x2f\x03\x68\xac\x2e\x41\x6f\x22\x83\xfe\
\x94\x85\xb8\x21\x90\x64\xd6\x27\xe4\x44\xa5\x23\xc5\x7e\x17\xb6\
\x1c\x49\xf1\x6c\x0d\x76\xb6\xeb\x76\xa5\xc5\x8f\x1c\x48\xbd\xbd\
\xe1\x60\xab\xdd\x7a\x47\x00\x28\x76\x81\x23\xd2\xd5\x83\x53\xaa\
\x02\xfc\xea\x68\xb6\x23\xd2\x7b\x7b\x3e\xe4\x44\x93\xe2\x62\x6e\
\x9a\x5c\xea\xc8\xf0\x32\x93\x34\x05\x22\x29\x13\x83\xa4\x3f\xc1\
\x64\x4a\x52\x57\x86\xc6\x8b\xfc\x3a\xfc\x14\xc5\xad\x71\x92\xeb\
\x76\x85\xa4\xba\x5a\xd1\xd9\x9b\x5c\xef\x78\x6f\x8e\x74\x42\x59\
\xdb\x22\x19\xe5\xb8\xcd\x60\x42\x59\x05\xd7\x21\xdb\xfb\x3f\xf5\
\x00\xbb\x51\xca\xba\x4e\x73\x76\x19\x70\xd3\x50\x24\x69\x20\x96\
\xa1\x71\x7a\x2e\x8d\xa7\xd9\xa4\x7c\x6e\x55\x1e\x43\x3a\x6d\xd1\
\x4d\xc1\x32\x34\x79\xcd\x50\x10\xed\x6c\xc7\xd6\x7d\x91\x8f\x1d\
\x00\x62\xb4\x15\xf3\xcb\xc1\xc3\x3d\x28\x9d\xca\x26\x41\xaf\x84\
\x8c\xa2\xaa\x17\xbe\x98\xb0\x8d\x5a\x66\x1a\xb1\xc1\x04\x8a\x26\
\x68\x08\x25\x0c\x9e\x16\x18\x14\x59\xcf\x7d\xac\x02\x9f\xd7\x65\
\x3b\x65\x29\x06\x7c\x82\xa1\xe1\x59\x49\x76\xec\x50\x07\x9e\xf9\
\xa8\xed\x87\x5c\xfc\x47\x19\xa0\xd2\xa3\x1d\x9d\x98\x2c\xe9\x47\
\x82\xb6\xf5\xf1\xe7\x01\xd3\xe0\x9c\x5a\x0d\x3b\x77\xef\xc3\xe2\
\xaa\x59\xd8\x19\x4a\x91\x6e\xe6\x08\x8d\xfb\x5d\x36\xbe\x91\x21\
\x25\xf8\x52\xe7\x4d\x61\xd7\xde\xbd\xa8\xf6\xf5\xe0\x50\x78\x68\
\x83\x6d\x20\xef\x52\xa2\xe6\x42\x10\xed\xe8\x42\x71\xed\x2c\x26\
\x8f\xec\xd0\x7e\x8a\xaf\xb0\x78\x7c\x68\xba\xa4\x14\x5f\x06\xb7\
\x62\x51\x85\x69\x37\xf3\x81\x14\x69\x26\xa1\x64\x1a\xe9\x8c\x40\
\x62\x98\x39\x41\xf1\xb1\x0a\x66\x59\x51\x7c\xfd\xcd\x46\x0c\x4d\
\x9a\x8f\x6f\x77\xf5\xad\xca\xa7\x3f\x47\x6a\x79\xcb\xab\x97\xb6\
\x6b\x8a\x28\xb6\x18\x57\xcb\xb2\xfe\x72\x12\xca\xd8\xba\xd9\x05\
\x83\xbd\x73\x10\xf5\xd4\xe2\xd6\x1b\xaf\x41\x73\xc4\x83\x5d\x49\
\x8f\x9d\x70\x1e\x86\xc5\x4b\x36\xce\x70\x25\x31\x5b\x44\xf1\xc6\
\xca\x77\x50\x55\x33\x13\x0b\x2f\xbc\x08\xab\x3f\xdd\x84\x47\xee\
\xbc\xae\x98\x6a\x06\xf3\x01\x4c\xa4\x54\xdf\x75\x55\x4d\xc3\x70\
\xda\x14\x27\x76\x95\x16\x9c\x45\x86\x18\xaa\x58\x72\xf9\xec\x99\
\xd3\xae\x6c\x6c\xa8\x47\xa0\xa6\x1a\x1e\xb7\xdb\x76\x2d\x95\x4a\
\xe3\xb7\xf6\xfd\xd8\xbc\xf5\x7b\x02\xf2\xa1\x71\xd1\x22\xd4\x04\
\xe6\xc0\x15\x6b\x46\xdb\xda\x15\x18\x18\xd6\x16\x2e\x79\xec\xbb\
\x66\x38\x57\x60\x49\xba\x44\xe5\x1f\xd3\x17\x4e\xec\x42\x5b\x52\
\xbf\xb0\xa1\xbe\xae\x6e\xee\xed\x3e\xaf\xb7\x2e\xf7\x1f\x43\x86\
\x34\x1e\x8f\xff\x34\xb9\xac\x6c\x41\xd5\xcc\x00\x5c\xde\x52\x34\
\x4c\x8b\xa2\x76\x56\x09\xe3\xb5\x05\x5b\x3e\xdb\x89\x0b\x1f\xd8\
<|fim▁hole|>\x32\x4e\x4f\x8f\xc8\xd9\x8e\xb1\x37\x02\x99\x4c\xc5\xcf\x3d\xfd\
\x78\x63\xc3\x13\x8b\x37\xa3\x86\xaa\x0f\x6f\xcf\xce\x96\x39\xd7\
\xc3\xb4\x76\xc8\x73\x7e\x15\x7f\xef\x11\x4e\x4b\x1d\xa0\xf4\x51\
\x7a\x1d\x91\x6b\xb6\x53\xf4\x50\x0e\x2c\x7b\x7c\x43\xe3\xfb\xed\
\x73\x09\x97\x77\xc3\xde\x66\xec\x0e\xae\xdc\x20\xb2\x1d\xc0\xa5\
\xe0\xdf\x7f\x24\x33\xe5\x94\xaa\xf7\x56\x34\x6d\x56\x98\x27\xaa\
\x62\xe1\xda\x15\x9f\x2f\xe0\xde\xef\xff\x05\x80\x7c\x10\x53\x28\
\x6c\x26\x88\x39\xec\x1c\xfd\xaf\x00\xe4\x40\xf8\x9d\xaa\xcb\x38\
\x1d\x31\x8d\xff\xfb\xf9\x43\x80\x01\x00\xe9\x9a\xaa\x21\x20\x38\
\x60\x90\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x05\x42\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x20\x00\x00\x00\x20\x08\x06\x00\x00\x00\x73\x7a\x7a\xf4\
\x00\x00\x05\x09\x49\x44\x41\x54\x78\xda\xed\x97\x0b\x4c\x95\x65\
\x18\xc7\x7f\xdf\xb9\x70\x2e\x88\x20\x10\x91\x61\xa0\x88\x17\x34\
\x2f\xe5\xcc\xb6\xd4\x4d\x92\x30\xad\x65\x56\x2e\xd7\xca\xcb\x2a\
\x5d\x6c\xb5\xa5\x2b\x9d\xba\x69\xe9\xcc\x5b\xce\xac\xe9\xa6\x9b\
\xda\x65\x5a\xda\x34\xcc\x74\x8b\xdc\x5c\xce\x5b\x86\x8a\x77\x41\
\x01\x43\x20\x2f\x20\x9c\xfb\xe5\xfb\x7a\xbe\x73\x0e\x2a\x7a\x50\
\x1c\x54\x73\xf3\xdd\x5e\xbe\xef\x7b\xdf\xe7\xfd\x3f\xff\xe7\xfa\
\x1e\x14\xfe\xe7\xa1\x3c\x20\x70\x5f\x13\x90\xc3\xca\x6b\xd9\x8c\
\xd8\x74\x92\x1d\x41\x0d\xf5\x3f\x27\x90\x95\x48\xb7\xa2\xe9\xfc\
\xf1\xd2\x2a\x46\xff\x5a\x42\xa1\xbe\x96\x11\x4f\xa7\x8b\x0e\x6a\
\x7c\x41\x7c\x6d\x4e\xc0\xa8\x60\xe8\x93\x4a\x76\x51\x15\xc7\xf4\
\xef\xf9\xcf\xb1\x60\xfa\x68\x3e\x3a\x7c\x82\xa2\xfe\xcb\x79\x62\
\x6c\x5f\xc6\xac\x9b\xc4\xfa\x9c\xa5\x8c\xd8\x53\xc6\xee\x36\x27\
\x90\x68\xe5\xe1\xea\xb9\x54\x1c\x2a\xe7\xc0\x17\xbb\x59\xf1\xe9\
\x30\x16\x64\x24\x90\xa1\x18\xa0\xe0\x24\x5b\x47\xf6\x60\x94\x21\
\x0e\xe3\xe0\x15\xe4\xfc\x7e\x9e\xdf\xda\x9c\x80\xc5\x44\x7c\xc9\
\xfb\x94\xa4\xd9\x49\xd6\xbf\x55\x13\x68\x31\x02\x22\x28\x06\x99\
\xc1\x86\x30\xe0\xe4\xed\xe4\x6b\x06\x5c\x43\xb2\x78\x66\xce\x0e\
\xe6\x95\xd6\x72\xae\x55\x04\x32\xe3\xe9\x9c\x68\xa7\xc3\xc1\x2a\
\x0e\xef\x1d\x47\xf1\xc0\x4c\xb2\x49\x95\xc3\xa2\x5c\x0b\x88\x80\
\x26\xef\x42\x06\xf1\x44\xf0\xaa\x3c\x2f\x83\xc9\x26\xcf\x24\xe8\
\x33\x9f\x41\xc5\xd5\xec\x6f\x15\x81\xb9\x43\x59\x3c\xeb\x79\x3e\
\xdc\x57\xc4\xd1\x4e\x1d\x49\xeb\xd8\x95\x44\x4f\x35\x04\xea\x44\
\x77\x30\x02\x24\xca\x8d\x76\x88\x79\x34\xec\x0d\x7f\x29\x94\xf9\
\xa8\xcc\x5e\x49\x96\xaa\xe2\x6e\x15\x81\x67\x3b\x33\xf6\xe7\x91\
\x6c\x08\x88\x12\xcb\x63\x50\x2f\xe0\xaa\x47\x36\x8c\x61\xf7\x5f\
\x47\x8a\x14\xa2\x2d\x5d\xa6\x05\xbe\xde\xa5\x14\xe4\x17\x6a\xe3\
\x9c\x7e\x5c\xcd\x95\x69\x8b\x08\xd8\x4c\xa4\x9c\x7c\x95\x8a\x94\
\xee\x58\x9c\x95\x62\xb9\x2b\x6c\x71\xa8\x11\x44\x41\xd1\x3f\x63\
\x33\xc1\x51\x8d\xd7\x51\x87\xa3\x16\x6a\x86\x6f\x61\x70\xad\x8b\
\xab\xf7\x4c\x60\x4c\x3a\xaf\xe7\xa6\x32\x32\xaf\x37\xa3\xe3\x6c\
\xd8\x1d\x55\xe2\x62\x53\x53\x04\xe5\x56\x34\xc9\x09\x73\x3b\xf1\
\x56\x07\x91\xad\x81\x43\x01\xe5\x68\xee\xf7\x0c\xf2\x05\x34\xf7\
\x3d\x13\x98\xdc\x93\x39\xcb\xfa\x31\xdb\x21\x60\x0d\x17\xc3\xe0\
\x51\x95\x72\x4b\x38\x44\x2e\x56\x12\xd5\xa4\x11\xcc\xdd\xa6\x8c\
\x3a\x70\x41\xdb\xd1\xe2\x10\x74\x89\xa5\xe7\x94\x2e\x4c\x9b\x56\
\xcc\x44\x93\x42\xd2\x91\x97\x39\x9f\x60\x22\xce\x29\xbe\x54\x8c\
\x51\xac\x57\x9a\xa2\x85\xf4\x0b\x81\xf6\xf1\x70\xca\xcb\x99\x21\
\x9b\xe9\xde\x9c\x81\x4d\x08\xe4\x67\x30\xeb\x87\x2a\x56\x27\xc7\
\xd0\xb9\x70\x10\x7b\xf2\x8b\x79\xbb\xfb\x43\x64\xbe\xd7\x9b\xa9\
\xd7\x6a\x74\x63\x6e\xb7\xbc\x39\x02\x8d\xe4\xec\x26\xd4\x35\xa7\
\x59\xf1\x59\x19\xd3\x5d\x41\x5c\x77\x24\xb0\xad\x3f\x65\xa7\x9c\
\xec\xdf\x5c\xc3\xda\x6f\xfb\xb1\xdd\x24\x71\x34\x9b\xa5\xac\xa5\
\xae\x35\x95\xe8\x49\x17\x65\x4d\x89\x84\x40\x3f\x93\xf2\xe2\x5b\
\x24\x58\x61\xef\x86\xf5\xe7\x56\x57\x68\x8b\xb7\xd6\xf0\x5d\xbd\
\x9f\x6b\x51\x09\x14\xf4\xa2\x3c\xdd\xc6\x63\xd7\x14\xaa\x93\xac\
\xa4\xfa\xfd\x52\xcf\xbe\x30\xe0\xcd\xf1\x8d\x9a\xf9\x37\xef\x2b\
\xe1\x32\x4d\x1a\x3a\x9c\xea\x29\xab\x64\x49\x21\xed\xab\x89\xb8\
\xf6\xed\xe2\x6f\xa9\x88\x55\x7f\xb1\x70\x6d\x25\x4b\x43\xe2\x16\
\x05\x5b\xb6\x85\xc1\xf9\xc9\xcc\xeb\xdf\x9e\x01\xc1\x48\x79\xa9\
\xea\x0d\x30\x1a\x73\xaf\x25\x96\xcb\x70\x7b\xc1\x2a\xb9\x62\xcb\
\xe8\x4a\xbb\x05\x9b\x43\x60\x8e\x19\xaf\xe0\xa9\x28\x25\x46\xba\
\xa7\x55\x74\x8c\x3d\xc6\xb0\x22\x27\xbb\x42\x6d\xbc\x9d\x81\xb4\
\x81\x31\x4c\xe8\x6b\x64\xc8\xd3\xb1\x3c\xd5\x29\x81\xd8\x80\x80\
\xa8\x5a\xa4\xde\xcd\xe1\xee\xd6\x48\x48\x69\xa6\x76\xf4\x75\x5f\
\x20\x1c\xb2\x47\xa4\x02\x90\x2e\x69\x49\x4e\x92\x84\x54\xf1\x5d\
\xad\xc5\x60\x0e\xcb\x25\x09\xe6\xa2\x0a\x96\x7f\x53\xcb\xbc\xdb\
\xa0\x96\xc4\x53\xfc\xb8\x95\xde\x9f\x5c\x61\xe6\x15\x95\xd2\x27\
\x4d\xbc\xf0\x4e\x0a\xe3\xb4\x48\xed\xd7\xb9\xc3\xde\xb1\x9a\xc2\
\xd3\x2c\x96\xea\x1d\xd2\x10\x21\xd6\xe0\x94\xe9\x10\x02\x29\xb2\
\x26\xeb\x8a\xc8\x1a\xb4\xf0\xd9\x80\x10\x72\xcb\xf9\x3f\x3d\xca\
\x89\x9d\x1e\x6d\xcd\x41\x3f\x1b\x9b\x10\x10\x79\xf3\xfc\x58\x8a\
\x37\x7a\x58\x56\x14\x64\xa5\xbe\x96\x17\xc3\xd4\x99\x89\x2c\xf2\
\x2a\xa1\x7d\xce\x88\x82\x3a\x01\x95\xb6\xcf\x65\x95\xf2\xa0\xa2\
\x5c\xca\x89\xd7\x06\x10\x09\x5d\x9d\x28\xd7\x73\x27\x21\x4e\xac\
\x17\x72\x97\xbd\x4a\xf0\x42\x40\xa9\xa8\xd7\xa8\x15\xe7\xf8\x03\
\xf2\xe7\x73\xa7\xf6\xa6\x5b\xd3\xce\x44\x49\x25\x8c\x56\x85\x74\
\x8f\x76\xe3\xfa\x9c\x64\x65\xe5\x1b\x76\xde\x6d\xd0\x42\xad\x9f\
\x72\x01\x10\x0e\xd8\xe4\xe4\x4e\x2f\xeb\x3c\x2a\xc7\x67\x27\xb0\
\xd0\x13\xc9\x93\x4a\x79\xd1\x2f\x48\x49\x7c\xce\xaa\xca\xf1\x2f\
\xdd\xca\x84\x2b\xaa\x56\x22\xbb\x7a\x17\x6c\xbc\x0f\xae\xff\x5a\
\xba\x6b\x27\xfc\xd8\xc6\x2f\x39\x66\xf2\xdc\x82\x2e\x17\x0a\xa7\
\xc1\x23\x9e\x34\x8b\xcb\x7d\x9b\x3c\xcc\xe8\x65\x24\x7d\xbc\x95\
\x0f\x9c\x5a\x98\x41\x99\x4c\xbd\xd8\xed\x82\x5c\x28\x04\xb7\x78\
\x19\x7f\x27\xfc\xbb\x12\xc8\x32\x32\xaa\x9b\x81\x3c\x3d\xe4\x97\
\x34\xca\x8f\x04\x29\x10\x6f\xc7\x88\x09\x4e\xaf\xc6\xa9\x19\x76\
\xb6\xe7\x98\xc8\xd5\x09\xca\x37\x67\xe5\xa1\x2a\x98\x44\xc6\xff\
\xa3\x87\x99\x07\xfc\x2c\x69\x15\x81\xbb\x8d\x1e\x46\xc6\xf4\x34\
\x30\x42\xc2\x13\x53\xa5\x51\x7a\x38\xc8\x4f\xf2\x6e\x11\x82\x2e\
\x09\xa5\x1e\x67\xcf\xbf\x4a\x20\x32\xa4\xeb\x87\x52\xa4\x9e\x70\
\x0a\xb4\x78\xdc\xdf\xff\x98\x3c\x20\xd0\x16\xe3\x1f\xb0\xb3\xc6\
\x7a\x42\x8e\x07\x2e\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\
\x82\
\x00\x00\x07\x47\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x20\x00\x00\x00\x20\x08\x06\x00\x00\x00\x73\x7a\x7a\xf4\
\x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\x74\x77\x61\x72\x65\
\x00\x41\x64\x6f\x62\x65\x20\x49\x6d\x61\x67\x65\x52\x65\x61\x64\
\x79\x71\xc9\x65\x3c\x00\x00\x06\xe9\x49\x44\x41\x54\x78\xda\x9c\
\x57\x59\x6c\x5c\xd5\x19\xfe\xee\x32\x8b\x67\x1c\x8f\x27\xf1\x12\
\x3b\x8e\xc7\x4e\xac\x24\x6e\x42\x52\x28\xb2\x82\x8c\x31\x58\x50\
\x15\x88\x51\xd2\x50\xd2\xa0\xaa\xa0\x10\x50\xcb\x03\x95\x48\xcb\
\x22\xe0\x05\x41\x9e\x22\xa4\xbe\x54\x55\x05\x0f\x08\xa1\xa6\x51\
\xa3\x26\x58\x36\x20\x08\x12\x02\x9e\x50\xd2\x90\xd8\x69\x84\xc7\
\xcb\x8c\x97\xb1\x3d\x5e\x66\xec\xf1\x6c\x77\xeb\xff\x9f\x59\x98\
\x71\x26\x5e\x72\xad\xff\x5e\xcf\xbd\xe7\xfc\xdf\xf7\x6f\xe7\xfc\
\x47\xc2\x3a\xaf\xf6\x97\x50\xe1\xaa\xc1\x49\xc8\x78\xc6\x92\xb0\
\x3f\xf7\x5e\x92\x0a\xc4\xc2\x35\xc9\xc0\x87\x4b\xd3\x78\xff\xdb\
\xbf\x62\x71\x3d\x7a\xa5\xb5\xbe\x3f\xf8\x1a\x76\x28\x15\xb8\xe0\
\xb4\x63\x5f\xc7\xcf\x3a\xd0\x5c\xeb\xc3\xf6\xaa\x3a\x58\x66\x1a\
\xb1\x74\x14\x89\x54\x84\x24\x8a\x68\x72\x01\x63\xe1\x10\x66\xa2\
\x51\x84\xe6\x92\x30\x4d\xf4\x27\x23\x38\xfc\xd5\x69\x0c\x93\x1e\
\xeb\x4e\x08\x28\x0f\xbf\x8b\xbf\x39\x1c\x78\xe1\x8f\x8f\xbe\x8c\
\xf6\x3d\xed\x58\xd4\xc6\xa1\x21\x42\xb3\xd2\xb0\x2c\x13\x86\x69\
\xc0\x30\x0c\x22\x03\xa4\xb5\x14\xe6\x16\x67\x10\x9a\x1d\x46\x70\
\xf6\x26\x82\x33\x31\x4c\xce\x01\xa6\x8e\x7f\xf4\xbd\x8a\x17\x49\
\x9f\xb1\x11\x02\xb6\xae\xd3\x08\x1c\xdc\x7d\x4f\xdd\x5b\xbf\x7e\
\x0f\x4b\xf0\x23\x6e\x06\x31\x11\xbe\x86\xe9\xf9\x01\xcc\x45\x87\
\x91\x4c\x1a\xc2\xed\x39\x2d\x0e\x9b\x0d\x1e\xd7\x76\x54\x94\xef\
\x84\x53\xad\xc7\xd0\xe4\x15\xdc\x18\xbf\x4e\x73\x80\xc5\x38\x42\
\x7d\x7f\x81\x8f\x46\x6a\xeb\x21\x60\x7b\xe8\x1d\x8c\x3e\xf7\xc8\
\xef\xeb\x0f\xb7\x1d\xc3\x82\x79\x15\x53\x0b\xd7\x71\xf5\xc7\x73\
\x48\x69\x26\x64\x39\x33\x49\x2a\x31\xd3\xca\xde\x6c\xaa\x8a\x5a\
\xcf\x43\xe4\x21\x3b\xae\x0c\x7f\x89\xc9\xd9\x14\x26\x66\x31\xf9\
\xe9\x2b\x68\x5a\x49\x62\xa5\x1a\xdb\xc3\xa7\x11\x3c\xde\x79\x74\
\xeb\x63\xf7\x3e\x8e\xa4\x32\x82\xfe\x91\xff\x60\x74\xb2\x1f\x8a\
\x02\xc8\x52\x69\xe0\x5b\x88\x58\x19\x32\x95\xae\x16\x78\x5d\xbf\
\xc0\xf7\xfe\x5e\x04\x38\x24\x61\x4c\xf5\xbd\x82\xc6\x42\x12\x4a\
\x51\xcc\xdf\xc6\xdf\xf7\x36\x37\x3f\xf0\x9b\xf6\xa7\x60\x39\x27\
\xf0\xdf\xc1\x7f\x61\x7c\xe6\x26\xc8\x20\x28\x72\x16\x7c\x1d\x92\
\xab\x8a\x84\x36\x0f\x93\x02\xb8\xb3\xb6\x8b\xc2\xe0\x47\x2c\x61\
\x94\x37\xb4\xa3\xde\xff\x15\x7a\x73\x0e\xcb\x11\x90\xda\x4e\x62\
\x77\x45\x1d\x3e\xf8\xc3\xaf\x9e\x47\xb9\x57\xc7\x40\xe0\x02\xc6\
\xc3\xff\xcb\x5b\xbe\x91\xcb\x2a\x28\xd1\x94\x16\xa5\x7f\x52\x68\
\xd8\xdc\x86\x48\x7c\x90\x04\xf7\x54\x36\xe2\xec\xc4\x15\xcc\xf1\
\x18\x39\xe7\x7a\xcf\x0e\x9c\xff\xe5\xdd\x0f\xa0\x7c\x13\x30\xbf\
\x34\x88\xc0\xf4\xf5\x0c\xb8\xbc\x3e\xab\x59\x68\x7d\x20\x8b\x01\
\x9d\x6e\x7a\x36\x0c\x12\xcd\x8f\x24\xfc\x50\xd4\x14\x76\x6d\xdb\
\x8b\xfa\x2a\xa0\xaa\x05\xe7\x19\x33\x4f\xa0\xe9\x7e\x54\xd9\xed\
\x68\x6d\xdd\xee\x43\x59\x99\x8c\x1f\x46\xce\xdd\x19\x38\x21\xa6\
\xa8\xd8\xba\xf7\xf6\xa0\xa3\xf9\x0c\xb4\x02\x12\x33\xb1\xaf\xd1\
\x58\xb5\x0f\xd5\x1e\x09\x8a\x0d\xad\xcd\x84\x99\x23\xa0\x34\x77\
\xe2\xc4\xee\x6d\xdb\x44\xac\xa7\x23\x37\x90\xd6\xa9\xc4\x94\x3b\
\x00\x27\xcb\x0f\xb5\x9e\x45\xa3\xe7\x10\xf6\x6f\x3d\x85\x2d\x65\
\x3e\xe1\x11\x1e\xa3\x59\x1a\x91\x0b\xa1\xba\xc2\x07\x2f\x79\x79\
\xc7\x83\x38\x81\x4c\x6a\xc1\x6e\x73\xe1\xe9\xcd\x9b\xdc\x70\x3a\
\x1d\x98\x22\x02\x22\x89\x36\x10\x6f\x5a\xf5\x32\x96\xef\xf9\x27\
\x76\x6e\x3e\x96\xff\x76\x74\xdf\x65\x41\x8c\x07\xb1\xbe\x98\x36\
\x8a\x2a\x4f\x23\xdc\x4e\xf2\xbf\x1b\x4f\x33\x36\x13\x70\xca\xe4\
\x12\x8f\x5b\x86\xcb\xe9\x46\x34\x11\x14\x2e\x5b\x69\xa1\xb5\x86\
\xe5\xdd\xad\x0c\xfe\xdb\x22\x72\xa1\xd8\x37\x99\xb1\x59\x7d\x49\
\x2d\x04\x6f\xf9\x56\x41\x80\x31\x05\x36\xdd\x54\x1e\xac\x1b\x09\
\x51\xbf\x29\x43\x2b\x22\x20\x92\xca\xca\x08\xff\x6f\x95\x02\x67\
\xcb\xbd\xc5\xe0\x43\x0b\x67\x71\xf1\xe6\x11\x91\x4b\xb9\xb2\xd4\
\xe8\x4f\x55\x1c\x19\xbf\x67\x4a\x45\xcd\x41\x21\x41\x1b\x8b\x6e\
\xe8\x45\xb5\xce\x80\x9c\x48\x1d\x8d\x67\xd0\xbd\xab\x47\xb8\x99\
\x41\x8b\xc0\x77\x97\x06\xef\xf9\xf1\x38\xec\x6a\x89\x44\x5e\xb1\
\x12\xcb\xb9\x7a\x4d\xa4\x97\xa0\x6b\x7a\x7e\x90\x88\x2d\xdd\xb6\
\xb8\x7c\xd8\x5f\x7b\x4a\x24\xd6\xa1\x5d\x67\x41\x5b\x00\x34\x12\
\x7e\x76\xef\xba\x3d\xb8\x23\x5b\x45\x6b\xe5\x92\x9c\x5b\x8f\x53\
\x5a\x66\x67\x5b\xe9\x81\xa3\x7b\x2e\xe7\x07\xef\xf4\x1e\x23\xd0\
\x8f\x10\xd3\x21\x9e\xb7\x80\x47\x08\x7c\x90\xc0\xd9\x72\xe5\x36\
\x2b\x67\x29\x02\xc8\xba\x34\x91\x5a\xa6\x18\xd9\x8a\x26\x88\x44\
\x2a\xb8\x5a\xbc\xbf\xc3\x6b\xf7\x59\xe2\xb9\x11\x70\x0e\x9b\x8d\
\x74\xeb\x46\x8a\x36\xa9\x9f\xc8\x88\x74\x30\x92\x18\x8c\x25\x81\
\xd9\xe8\x38\x5c\x6a\x5d\x7e\x3d\xe7\x04\xba\x38\x78\x44\x28\x5f\
\xed\x12\xe0\x7e\x02\xb7\xad\x62\x39\x89\x53\xa9\xc3\x42\x6c\x0a\
\xcb\x84\xc5\x98\x8c\xcd\x04\xf4\xd8\x34\x7a\xe2\xf4\x72\x26\x3a\
\x86\x72\xb5\x29\xbf\x96\xf3\x1e\x60\x27\x85\xac\x7c\x28\x5a\x9a\
\x04\xbf\x17\xe0\xb9\xdd\x72\x15\xa2\x6e\xd2\x1d\x8e\x04\x05\x01\
\xc6\x64\x6c\x26\x90\xbc\xf1\x09\x3e\x5e\x58\x02\xc2\x4b\x01\x38\
\x50\x27\x5c\x65\x65\xbd\xc0\x16\xb1\x5b\x4b\x91\xc8\x83\xab\xab\
\x5b\xce\xba\x38\xb4\xac\x7b\x96\x30\x18\x8b\x31\x19\x9b\x09\xa4\
\x17\x02\x98\xd6\x12\x18\x0e\x47\x2c\x04\xe6\xfa\x51\x63\xef\xcc\
\xd7\x7b\x11\x89\xe1\xe3\x18\x89\x9e\x17\xe0\xfc\xe4\xdf\x6b\x81\
\xe7\x08\x54\x91\xce\x20\xe9\x66\x0c\xc6\x62\x4c\xc6\x56\xb3\xbd\
\xda\xf2\xc0\x27\xf8\x93\xed\x18\x7a\xca\x5d\x03\xa8\xa9\x68\x42\
\xa5\xda\x82\x88\xe1\xcf\x2d\x62\x19\x12\xf4\xa3\x77\xf4\x49\x51\
\x1d\xfc\x5e\x80\xaf\xe1\x76\x1e\xeb\x51\x5a\x60\xa6\x1d\xf0\x4f\
\x0c\x88\x3e\x91\xb1\x18\x93\xb1\x73\xfd\x80\xb9\x38\x81\x78\xdd\
\xcf\xd1\x6c\xd9\xb0\x47\x37\x82\x68\xa9\xee\x82\x21\x2d\x21\x65\
\x45\xf3\xc6\x30\x18\xd7\xb6\x2a\x65\x76\x91\xb5\xc0\x39\x97\x36\
\x29\x8d\xf0\xca\x6d\xb8\xec\xef\xc3\xc8\x94\x81\xb9\x09\x5c\xbc\
\x76\x0e\x1f\xd2\xa7\x85\x22\x02\xfc\x23\xf0\x1d\xae\x36\x74\xe0\
\x29\x49\x36\x5c\xf1\x74\x00\x2d\x5b\xba\x08\x25\x85\x04\xe6\x6f\
\x3d\x03\xac\xe1\x72\x76\x51\x05\x59\x2e\xc0\x87\x7a\xa9\x65\x4f\
\x60\x66\x1e\x73\x97\xde\xc6\xb3\xbc\x4d\x70\xfc\x57\xb6\x64\x1c\
\x0a\xd3\x7f\x09\x9f\x56\x1f\xc4\x61\x0b\x69\xd7\x22\x35\x12\xdb\
\x3c\x6d\xf0\xd8\x7d\x34\x7a\x0c\xa6\x64\xe6\x3b\xc9\x52\x75\x9e\
\xdb\x78\x54\xda\x69\x6a\xd4\x2e\x28\x5a\x2d\x81\xf7\x09\xf0\xb1\
\x69\xcc\x7e\xfe\x3a\xba\x69\xc4\x18\xc9\x62\xd6\xe8\x22\x02\xec\
\xb1\x34\x2f\xff\x43\x97\xf0\x59\xcd\x41\x1c\x59\x4e\x19\x65\x51\
\x6a\xa3\x1c\x72\x19\x1a\xdc\x9d\x28\x53\xb6\x88\xe3\x8f\x29\xc5\
\x29\x3c\x66\x7e\x97\x14\x8b\x8c\x64\x83\x4b\x6e\x80\x57\x39\x00\
\x2f\xda\xa8\x1d\x1f\xa6\xc6\xe6\x3b\x8c\x84\x0c\xb6\x7c\x9e\xc0\
\x9f\x20\xdd\xa3\x24\xec\x4e\x7d\xd5\xb6\x9c\x84\xbb\x15\x5f\xe7\
\x9f\xf1\x26\xf5\x89\x8f\xd7\x13\x6e\x75\xa5\x84\xaa\x4d\x3e\xea\
\x68\x28\xa6\xe5\x75\x54\x56\xf6\xa2\xe3\x8e\x61\xa4\x69\x91\x09\
\x21\x1c\x0d\x8a\x52\xe3\x6c\xe7\x84\x5b\x0c\xa1\xf7\xeb\x33\x78\
\x87\x86\x04\x48\x66\xd7\x6a\xcb\x0b\x49\x78\x49\xb6\xd6\x1f\xc0\
\xbe\xbb\x9e\xc4\xbb\x76\x17\x9a\xb8\x93\xe1\xbd\x9c\x45\x91\x8b\
\x27\xf0\xf2\xca\x0b\x0c\x0b\xd7\x79\x3a\x8e\xd1\xeb\xff\xc6\x1b\
\x93\x3f\xa0\x9f\x3e\x4f\x65\x93\x4e\xdb\xd0\xd1\x8c\xc4\x45\x42\
\xf6\xa3\xda\x5d\x8d\xda\xbd\x87\xd1\x5d\xb9\x1d\x8f\xa8\x2e\x34\
\xdf\x72\xda\x23\x4d\x7a\x1c\x23\x91\x31\x7c\x31\x70\x01\x3d\xcb\
\x61\x51\xe7\x74\x2e\x12\xdd\x6f\x7c\xa3\x47\xb3\xc2\xef\xb6\x2c\
\x11\x37\x49\x05\x97\x35\x49\x59\x41\x47\x5d\x58\xf2\x09\x92\x68\
\x36\xc9\x96\xb3\xc0\xda\x9d\x1e\x4e\x4b\x79\xc4\xce\x6d\x54\xb6\
\x8b\x92\x4a\x94\xbd\x9e\x2d\xaf\xf4\xed\x2c\x5e\x79\xfd\x5f\x80\
\x01\x00\x55\x0a\xfa\x73\x42\x33\xae\x39\x00\x00\x00\x00\x49\x45\
\x4e\x44\xae\x42\x60\x82\
\x00\x00\x04\x8e\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x20\x00\x00\x00\x20\x08\x03\x00\x00\x00\x44\xa4\x8a\xc6\
\x00\x00\x03\x00\x50\x4c\x54\x45\x01\x00\x00\xff\xfe\xcb\xff\xfe\
\x99\xff\xfe\x65\xff\xfe\x33\xfd\xfd\x00\xff\xcb\xfe\xff\xcb\xcb\
\xff\xcb\x99\xff\xcb\x65\xff\xcc\x33\xfd\xcb\x00\xff\x99\xfe\xff\
\x99\xcb\xff\x99\x99\xff\x98\x65\xff\x98\x33\xfd\x98\x00\xff\x65\
\xfe\xff\x65\xcb\xff\x65\x98\xff\x65\x65\xff\x65\x33\xfd\x65\x00\
\xff\x33\xfe\xff\x33\xcb\xff\x33\x98\xff\x33\x65\xff\x33\x33\xfd\
\x32\x00\xfd\x00\xfd\xfd\x00\xcb\xfd\x00\x98\xfd\x00\x65\xfd\x00\
\x32\xfd\x00\x00\xcb\xff\xff\xcb\xff\xcb\xcc\xff\x99\xcb\xff\x65\
\xcc\xff\x33\xcb\xfd\x00\xcb\xcb\xff\xcc\xcc\xcc\xcb\xcb\x98\xcc\
\xcb\x66\xcb\xcb\x32\xcc\xcb\x00\xcb\x99\xff\xcb\x98\xcb\xcb\x98\
\x98\xcc\x98\x66\xcb\x98\x32\xcc\x99\x00\xcb\x65\xff\xcc\x66\xcb\
\xcc\x66\x98\xcc\x66\x66\xcb\x65\x32\xcc\x65\x00\xcb\x33\xff\xcb\
\x32\xcb\xcb\x32\x98\xcb\x32\x65\xcb\x32\x32\xcc\x32\x00\xcb\x00\
\xfd\xcc\x00\xcb\xcc\x00\x98\xcc\x00\x65\xcc\x00\x32\xcc\x00\x00\
\x99\xff\xff\x99\xff\xcb\x99\xff\x99\x98\xff\x65\x99\xff\x33\x98\
\xfd\x00\x99\xcc\xff\x98\xcb\xcb\x98\xcb\x98\x99\xcc\x66\x98\xcb\
\x32\x99\xcc\x00\x99\x99\xff\x98\x98\xcb\x99\x99\x99\x98\x98\x65\
\x99\x98\x33\x98\x97\x00\x98\x65\xff\x98\x66\xcc\x98\x65\x98\x98\
\x65\x65\x99\x65\x33\x98\x65\x00\x98\x33\xff\x98\x32\xcb\x99\x33\
\x98\x99\x33\x65\x99\x33\x33\x98\x32\x00\x98\x00\xfd\x98\x00\xcc\
\x98\x00\x97\x98\x00\x65\x98\x00\x32\x98\x00\x00\x65\xff\xff\x65\
\xff\xcb\x65\xff\x98\x65\xff\x65\x66\xff\x33\x65\xfd\x00\x65\xcb\
\xff\x66\xcc\xcc\x66\xcc\x98\x66\xcc\x66\x65\xcb\x32\x66\xcc\x00\
\x65\x98\xff\x66\x99\xcc\x65\x98\x98\x65\x98\x65\x66\x99\x33\x65\
\x98\x00\x65\x65\xff\x66\x66\xcc\x65\x65\x98\x66\x66\x66\x65\x65\
\x32\x66\x65\x00\x65\x33\xff\x65\x32\xcb\x65\x33\x99\x65\x32\x65\
\x65\x32\x32\x66\x32\x00\x65\x00\xfd\x65\x00\xcc\x65\x00\x98\x66\
\x00\x65\x66\x00\x32\x66\x00\x00\x33\xff\xff\x33\xff\xcc\x33\xff\
\x98\x33\xff\x66\x33\xff\x33\x32\xfd\x00\x33\xcc\xff\x32\xcb\xcb\
\x32\xcb\x98\x32\xcb\x65\x32\xcb\x32\x33\xcc\x00\x33\x99\xff\x32\
\x98\xcb\x33\x99\x99\x33\x99\x65\x33\x99\x33\x32\x98\x00\x33\x66\
\xff\x32\x65\xcb\x33\x66\x99\x32\x65\x65\x32\x65\x32\x33\x66\x00\
\x33\x33\xff\x32\x32\xcb\x33\x33\x99\x32\x32\x65\x33\x33\x33\x32\
\x31\x00\x32\x00\xfd\x32\x00\xcc\x32\x00\x98\x32\x00\x66\x32\x00\
\x31\x32\x00\x00\x00\xfd\xfd\x00\xfd\xcb\x00\xfd\x98\x00\xfd\x65\
\x00\xfd\x32\x00\xfd\x00\x00\xcb\xfd\x00\xcc\xcc\x00\xcc\x99\x00\
\xcc\x65\x00\xcc\x33\x00\xcc\x00\x00\x98\xfd\x00\x99\xcc\x00\x98\
\x98\x00\x98\x65\x00\x98\x32\x00\x98\x00\x00\x65\xfd\x00\x66\xcc\
\x00\x65\x98\x00\x66\x66\x00\x66\x32\x00\x66\x00\x00\x32\xfd\x00\
\x33\xcc\x00\x32\x98\x00\x33\x66\x00\x32\x32\x00\x32\x00\x00\x00\
\xfd\x00\x00\xcc\x00\x00\x98\x00\x00\x66\x00\x00\x32\xee\x00\x00\
\xdc\x00\x00\xba\x00\x00\xaa\x00\x00\x88\x00\x00\x76\x00\x00\x54\
\x00\x00\x44\x00\x00\x22\x00\x00\x10\x00\x00\x00\xee\x00\x00\xdc\
\x00\x00\xba\x00\x00\xaa\x00\x00\x88\x00\x00\x76\x00\x00\x54\x00\
\x00\x44\x00\x00\x22\x00\x00\x10\x00\x00\x00\xee\x00\x00\xdc\x00\
\x00\xba\x00\x00\xaa\x00\x00\x88\x00\x00\x76\x00\x00\x54\x00\x00\
\x44\x00\x00\x22\x00\x00\x10\xee\xee\xee\xdd\xdd\xdd\xbb\xbb\xbb\
\xaa\xaa\xaa\x88\x88\x88\x77\x77\x77\x55\x55\x55\x44\x44\x44\x22\
\x22\x22\x11\x11\x11\x00\x00\x00\x11\xa6\xf0\x45\x00\x00\x00\x01\
\x74\x52\x4e\x53\x00\x40\xe6\xd8\x66\x00\x00\x01\x3c\x49\x44\x41\
\x54\x78\xda\xdd\x92\xbd\x4e\xc3\x30\x14\x85\xcf\x05\x91\xbc\x04\
\xa2\x8b\x19\x68\x19\x61\x62\xe2\x09\x90\x18\x10\x59\x92\x2e\xdd\
\x91\x18\x0a\x83\x0b\x34\x0c\x65\x63\x67\x21\x59\x5c\xc1\x0b\xf0\
\x0e\x61\x0d\x0c\xf5\xd0\x9f\x47\x80\x25\x01\x71\x71\x9b\x86\x26\
\x50\xc4\xc2\x02\x56\xec\x58\x39\x5f\xce\xb5\xce\x35\xe1\x87\x41\
\x7f\x12\x60\xca\xe6\x7c\xc0\x28\x0c\x32\xcf\x8c\xf9\x04\x18\xf5\
\xf2\x00\x5e\x40\xdf\x01\xf5\xeb\x46\x1a\xd4\x97\x44\x13\x39\x42\
\x65\xff\xed\xe7\xc8\x83\x16\x41\xe3\x2a\x57\x8a\x0e\xfd\xca\xa0\
\x95\x56\x8f\x3a\x71\x77\x6b\x75\xad\x39\xb5\x28\x03\x4e\x27\x4c\
\x6c\x20\x19\xe9\x6a\x6e\x41\xc5\x02\x1b\x3b\xe0\xf5\x5e\x0a\xeb\
\xf0\xf4\x3c\x3f\x28\x15\xfe\x77\xfd\xfb\x97\x41\x2a\x30\x4a\xc0\
\x8f\x3a\x42\xa9\x04\x13\x7b\xed\xe5\x1b\x6d\x76\x0b\x6f\x04\x2b\
\x75\x5b\x01\x4a\x0e\xec\xbb\x61\x2d\x4e\xc8\x12\xd0\x89\x2d\x34\
\xdc\x50\x8e\x75\x9a\xa6\x6b\x80\xe3\x5b\x2d\x7d\x08\xe7\x62\xe5\
\xc1\x32\x1f\xac\xfd\x10\x67\xaf\x93\x50\x4d\x7e\xfd\x4a\x9b\xc4\
\xf0\xc9\x96\x6a\x6f\x51\x69\xa4\x55\x47\xc1\x8e\x21\x69\x0c\x60\
\x33\x22\xf6\x01\x39\x29\xe5\x0b\x67\xb7\x57\xeb\x9a\xd7\x47\x50\
\xac\xee\x02\xa5\xa5\x09\xd7\x97\xc4\x6a\x22\x98\x35\x4f\x92\x67\
\x4d\xe1\xf6\xc9\xb8\x5d\xd9\x91\xe5\x9c\xa8\x39\xe3\x29\xdb\xe2\
\x4b\xb3\x7e\xeb\xca\xfd\x4f\xe0\x1d\x87\xdb\x7e\x1f\x89\x6f\x47\
\xce\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x04\x5e\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x20\x00\x00\x00\x20\x08\x06\x00\x00\x00\x73\x7a\x7a\xf4\
\x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\x74\x77\x61\x72\x65\
\x00\x41\x64\x6f\x62\x65\x20\x49\x6d\x61\x67\x65\x52\x65\x61\x64\
\x79\x71\xc9\x65\x3c\x00\x00\x04\x00\x49\x44\x41\x54\x78\xda\xc4\
\x57\xcf\x8b\x1c\x45\x14\xfe\x5e\xf5\x8f\xd9\x71\x37\xc4\x1d\x3c\
\x44\x31\x2e\x0b\x39\x06\x21\xa0\x5e\x85\x1c\xa3\x2b\x5e\x72\xf0\
\x14\x14\x93\x93\xff\x40\x7e\x1c\x72\x48\x20\xb7\x1c\x03\x5e\x62\
\x2e\x31\x04\x3c\x18\x70\xbd\x98\x83\x82\xb0\xe0\x61\x89\x11\xc5\
\x80\x10\x5d\x4c\x16\xb2\xd9\x1f\xc9\x64\x67\xba\x7b\xba\x2a\xef\
\x55\x55\xc7\x9e\xd9\xe9\x4c\x0f\xce\xb2\x0d\x8f\xa9\xae\x9a\x7e\
\xef\xab\xef\x7d\xf5\xaa\x8a\x8c\x31\xd8\xcb\x27\x2c\x1a\x44\x14\
\x1d\xbf\xf4\xfd\xa3\xb4\xa7\xf7\xe7\x5a\xef\x5a\xc0\x40\x29\x84\
\xca\x7c\xf7\xcd\x99\x0f\x3f\xe6\xd7\x5e\x58\x1a\x9b\x4e\x38\xf8\
\xd7\x67\x8f\x21\xcd\xf9\x4d\x88\xa1\x09\x46\xf6\xfe\xe2\x00\xf8\
\xe4\xe2\xe2\x07\xfc\xd6\x64\x7b\x5a\x06\x10\xcb\xcc\x5f\xe1\xc6\
\xd2\xbf\x40\xc4\x7f\x56\x13\x04\xa0\x19\x40\xc6\x76\xf4\x2d\xc0\
\x33\x3c\x35\x08\x00\x85\x1c\x1a\x0a\xb8\xf6\xeb\x2a\x22\x65\x93\
\xc3\xe0\xfb\x91\x90\x9d\xce\x78\xda\xc9\x38\xe6\x89\xb7\x0f\x94\
\xe3\x50\x9f\x06\xfa\x84\xc1\x34\x85\x1c\x7c\xfe\x8d\x03\x90\x6c\
\xf4\xd8\xf2\x22\x87\x32\x6e\xdc\x6f\x2d\x82\xfc\x9f\xfe\x7a\xb0\
\x6a\xfd\x56\x8a\xb0\xaf\x53\xc1\xce\x9a\xf5\x02\x62\xe4\x27\xcd\
\x29\xb4\xf0\x8b\x1d\x5b\xc7\xbb\xb8\x4a\x5f\xbe\x00\x33\x32\x3e\
\x39\x2b\xfc\x0e\x3e\x6a\x28\x00\x12\x9a\xc8\x8e\x7e\x96\x9d\x44\
\x2b\xbb\xcb\x1c\xbe\x6a\x4d\xda\x9f\xa6\x9f\xc3\x04\xa2\x68\xe7\
\xf4\x65\x16\x90\x9f\x90\x71\x7e\xc7\x60\x80\xe3\x73\x90\xd9\x74\
\x89\xa7\xd0\x2a\xcd\x37\x46\x4b\x2f\x41\xcd\x70\x8f\x2e\xcd\x80\
\x4a\x39\x19\x90\x88\x52\xd5\x0c\x0c\x05\x50\xa8\x3f\x92\x98\x59\
\xee\xf2\x40\x5e\x05\x86\xdb\xac\x62\x11\x68\x58\x50\x48\xae\x61\
\x1e\x9e\x06\x92\x3b\xc0\xc1\x45\x90\x7c\xab\xfd\x90\x67\x60\xd8\
\xaa\x52\x95\x0c\x18\xb7\x1a\x36\xd5\x7b\x0c\x62\x9b\x2d\xf3\xb6\
\x6d\xfb\x62\x1e\x8b\x3c\xcd\x11\x23\x31\x0f\xce\x41\x6f\xdd\x82\
\x4e\x56\x60\xfe\xfe\xc8\x82\x2b\xc4\x1c\xaa\x31\x34\xa0\xe1\x72\
\x2b\x1c\x4e\x31\xe2\x6f\x5f\xff\x0a\x9b\xc1\x61\xa0\xfb\xc8\x9a\
\xb4\xa5\x4f\xc6\x22\x1f\x3c\x8a\x80\x74\xfd\x06\x72\xd3\xe4\x35\
\xde\x44\xd6\xb9\x07\x4a\xef\x59\x06\x0b\x10\xc6\xfb\xd5\xb5\x34\
\x40\x0e\x80\x30\x90\x71\xfb\xd6\xdc\x35\xe4\x9e\xbe\xc0\xb8\x65\
\xa8\x8c\xd3\x88\xd8\xc6\xfd\xcb\x9c\xa9\x29\xa6\x9b\xec\x77\x46\
\xc7\xe8\xae\x5d\x47\xeb\xd0\x79\x98\x9e\xa3\xde\xa0\xa6\x08\xa9\
\xd0\x80\x71\x33\x94\xf4\x47\xe6\xbf\x22\x45\x7e\x69\x09\x77\x12\
\x7c\x65\xf9\x38\xd2\xf6\x32\xb7\x67\x9c\x3e\xe4\x1f\xbc\x44\x9e\
\xac\x5e\x47\xd2\xfe\x1d\x07\x8f\xdc\x84\xd2\xce\x9f\xf8\xa5\xba\
\x0c\x48\x3c\x0b\xa0\x54\x21\x07\xd7\x77\xc0\x00\x9e\x3e\xfe\x09\
\x41\xfc\x1a\x07\xa1\x3e\xf9\x6b\x1d\x21\x59\xfb\xd1\xd6\x7e\x8c\
\xcb\x80\x38\x36\x25\xd1\x0c\xdd\xb1\xc9\xe5\x34\xe7\x3d\x45\x67\
\x5d\x50\xe8\x11\xc9\x93\xe7\x4c\x7d\x97\x7d\x34\x5d\xd0\x42\x03\
\xc1\x4e\x06\xd4\xb0\x4d\xcb\x31\xa0\xac\x88\x2a\xcd\x2f\xc3\x77\
\x8e\xfe\x89\xa0\x31\x8f\x4e\xb2\x85\xb9\xdb\xdb\xd6\xa4\x2d\x7d\
\x32\xa6\xbc\x58\xc5\x5f\xc1\x6c\x25\x03\xc5\x0e\x6c\x0b\x07\x4d\
\x23\x8e\x5c\xb1\xa9\x3a\xb3\x48\x1a\x1a\x51\x03\xef\x1f\xfb\x01\
\xc1\x34\x70\xe7\xf6\x9c\xed\x5f\x38\xb1\x82\xfc\x19\xa7\xa1\xe7\
\xbe\x0d\xbc\x3f\x55\xa4\x74\xa4\x06\xb8\x77\xe6\xfe\x9b\xf8\x63\
\xf6\x88\xd4\x9c\x4a\x00\xb0\xa2\x22\x8b\x58\x35\xa7\x78\xf9\x39\
\x77\xbf\xdd\x5d\x80\xee\x74\x6d\xc1\xb2\x73\x57\xe2\x6f\x99\xfd\
\x6e\xd4\xab\x84\x52\xbf\x49\xed\x43\x9a\x34\xf8\x7b\x35\x7a\xeb\
\x25\x57\x87\xd3\x27\x2e\xa3\xaa\xc3\xef\x49\xec\x91\x93\x5d\x4a\
\xe2\x2f\xa8\xbd\x17\x58\x7f\x0a\x49\xd2\xf3\x00\xea\x6c\xf8\x39\
\xe8\xdc\x21\xdb\x4c\xda\x29\x0b\xb1\x04\xda\x96\x72\x55\x7f\x33\
\x72\x79\xd2\xe8\x3c\xeb\xf0\xe4\x54\xbd\xb3\x47\xd5\x66\x64\x77\
\x56\x6d\xfd\x11\xc6\x00\x90\x64\x6d\x6c\x6c\x3d\x74\x00\xfe\xef\
\x71\x90\x01\x88\xbf\x91\x00\x0a\xb1\xc9\xf9\xed\xca\x17\xeb\x13\
\x3f\x93\xe6\xbe\x1a\x96\x45\x1d\x56\x7d\xd0\xd3\x93\x3f\x15\xd3\
\x28\x0d\xbc\x38\x3a\xf1\xaf\x9e\x64\x70\x9f\x57\x35\x10\x67\x07\
\x80\x50\xa9\x97\x1f\x14\x26\x75\x1b\x2a\xc5\xe9\x03\x10\x87\x68\
\x2f\x5c\x58\x9c\x91\x73\xfb\x6e\xdc\xd8\xec\x06\xc6\xc1\x4d\x77\
\xf3\xe7\x62\x9d\x50\x71\x37\xe4\xab\x19\x9f\x3a\x31\xcf\x36\x3b\
\xe1\xec\xef\x90\x17\xdb\x1a\xdb\x3f\x72\x31\x29\x03\x88\xe4\x7a\
\x66\x4f\x9d\xbb\xfb\x48\x40\xae\xd3\xe8\x58\x30\x02\x60\x2f\x6f\
\xc8\xb4\xd7\xd7\xf3\xe7\x02\x0c\x00\x6e\x8b\x74\xbd\x9b\xa7\x7c\
\x5e\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
"
qt_resource_name = "\
\x00\x04\
\x00\x05\xaa\xb7\
\x00\x53\
\x00\x74\x00\x65\x00\x67\
\x00\x15\
\x0b\x4f\x67\x87\
\x00\x54\
\x00\x79\x00\x72\x00\x61\x00\x6e\x00\x6e\x00\x6f\x00\x73\x00\x61\x00\x75\x00\x72\x00\x75\x00\x73\x00\x20\x00\x72\x00\x65\x00\x78\
\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x12\
\x0d\x69\x12\x07\
\x00\x66\
\x00\x6f\x00\x6c\x00\x64\x00\x65\x00\x72\x00\x5f\x00\x65\x00\x78\x00\x70\x00\x6c\x00\x6f\x00\x72\x00\x65\x00\x2e\x00\x70\x00\x6e\
\x00\x67\
\x00\x14\
\x0d\xa0\x0b\xc7\
\x00\x73\
\x00\x74\x00\x65\x00\x67\x00\x6f\x00\x73\x00\x61\x00\x75\x00\x72\x00\x75\x00\x73\x00\x2d\x00\x69\x00\x63\x00\x6f\x00\x6e\x00\x2e\
\x00\x70\x00\x6e\x00\x67\
\x00\x0a\
\x0c\x7b\xa4\x67\
\x00\x61\
\x00\x63\x00\x63\x00\x65\x00\x70\x00\x74\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x15\
\x0a\x08\x9e\xe7\
\x00\x53\
\x00\x74\x00\x65\x00\x67\x00\x6f\x00\x73\x00\x61\x00\x75\x00\x72\x00\x75\x00\x73\x00\x2d\x00\x69\x00\x63\x00\x6f\x00\x6e\x00\x31\
\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x0b\
\x0c\x6d\xa2\x27\
\x00\x70\
\x00\x69\x00\x63\x00\x74\x00\x75\x00\x72\x00\x65\x00\x2e\x00\x70\x00\x6e\x00\x67\
"
qt_resource_struct = "\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x06\x00\x00\x00\x02\
\x00\x00\x00\xb0\x00\x00\x00\x00\x00\x01\x00\x00\x1b\x89\
\x00\x00\x00\x0e\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\
\x00\x00\x00\xe0\x00\x00\x00\x00\x00\x01\x00\x00\x20\x1b\
\x00\x00\x00\x96\x00\x00\x00\x00\x00\x01\x00\x00\x14\x3e\
\x00\x00\x00\x3e\x00\x00\x00\x00\x00\x01\x00\x00\x07\xd5\
\x00\x00\x00\x68\x00\x00\x00\x00\x00\x01\x00\x00\x0e\xf8\
"
def qInitResources():
QtCore.qRegisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data)
def qCleanupResources():
QtCore.qUnregisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data)
qInitResources()<|fim▁end|> | \x24\xef\x87\xc3\xca\x98\x29\x73\xe2\x00\x84\x73\x56\x2a\x99\x44\
\x29\xa3\x14\x65\x53\xd4\x19\xad\xd9\x44\xf3\x3e\xf4\xe8\x13\x5f\
\x35\x05\x22\x58\x54\xcc\xb9\x52\x71\x01\x10\xb8\x9e\xb7\xc2\x20\
\x9a\xbf\x68\x41\xfd\xbd\x41\x8f\x9e\xa7\xd0\xcc\x35\x87\x93\x78\
|
<|file_name|>nested_item.rs<|end_file_name|><|fim▁begin|><|fim▁hole|>// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// original problem
pub fn foo<T>() -> isize {
{
static foo: isize = 2;
foo
}
}
// issue 8134
struct Foo;
impl Foo {
pub fn foo<T>(&self) {
static X: usize = 1;
}
}
// issue 8134
pub struct Parser<T>(T);
impl<T: std::iter::Iterator<Item=char>> Parser<T> {
fn in_doctype(&mut self) {
static DOCTYPEPattern: [char; 6] = ['O', 'C', 'T', 'Y', 'P', 'E'];
}
}
struct Bar;
impl Foo {
pub fn bar<T>(&self) {
static X: usize = 1;
}
}<|fim▁end|> | // Copyright 2013 The Rust Project Developers. See the COPYRIGHT |
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|><|fim▁hole|>Helper and server modules
"""<|fim▁end|> | """ |
<|file_name|>duplicated-headers_wsh.py<|end_file_name|><|fim▁begin|>from mod_pywebsocket import handshake
from mod_pywebsocket.handshake.hybi import compute_accept
def web_socket_do_extra_handshake(request):
message = 'HTTP/1.1 101 Switching Protocols\r\n'
message += 'Upgrade: websocket\r\n'
message += 'Connection: Upgrade\r\n'
message += 'Sec-WebSocket-Accept: %s\r\n' % compute_accept(request.headers_in['Sec-WebSocket-Key'])[0]
message += 'foo: bar, baz\r\n'
message += 'foo: hoge\r\n'
message += 'FOO: FUGA\r\n'
message += 'xxx: yyy\r\n'<|fim▁hole|> message += '\r\n'
request.connection.write(message)
# Prevents pywebsocket from sending its own handshake message.
raise handshake.AbortedByUserException('Abort the connection')
def web_socket_transfer_data(request):
pass<|fim▁end|> | |
<|file_name|>tchelper.py<|end_file_name|><|fim▁begin|>################################################################################
"""
DESCRIPTION: Helpers for telecommand wrapper functions.
PACKAGE: spell.lang.helpers.tchelper
PROJECT: SPELL
Copyright (C) 2008, 2015 SES ENGINEERING, Luxembourg S.a.r.l.
This file is part of SPELL.
This library is free software: you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation, either
version 3 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License and GNU General Public License (to which the GNU Lesser
General Public License refers) along with this library.
If not, see <http://www.gnu.org/licenses/>.
"""
###############################################################################
#===============================================================================
# SPELL imports
#===============================================================================
from spell.utils.log import *
from spell.lang.constants import *
from spell.lang.modifiers import *
from spell.lib.exception import *
from spell.lib.adapter.utctime import *
from spell.lang.functions import *
from spell.lib.adapter.constants.core import KEY_SEPARATOR
from spell.lib.adapter.tc_item import TcItemClass
from spell.lib.adapter.constants.notification import *
from spell.lib.registry import *
#===============================================================================
# Local imports
#===============================================================================
from basehelper import *
#===============================================================================
# System imports
#===============================================================================
import time,sys
################################################################################
class Send_Helper(WrapperHelper):
"""
DESCRIPTION:
Helper for the SendAndVerify wrapper.
"""
_isGroup = False
_isSequence = False
_cmdName = None
_cmdDef = None
_cmdArgs = None
# This flag is used in case of failures. The user may want to resend the
# command AND verify the parameters, or only repeat the parameters
# verification.
__doSendCommand = True
# This flag is used in case of failures. The user may want to resend the
# command and verify the parameters, but not re-adjust the limits
__doAdjustLimits = True
__doAdjustLimitsP = True
__doCheckTelemetry = True
# True if adjusting limits is feasible
__canAdjustLimits = False
# Holds the current stage of the function (TC,TM,LIM)
__section = None
__actionTaken = None
__verifyCondition = None
# Stores the original OnFailure config
__originalOnFailure = None
#===========================================================================
def __init__(self):
WrapperHelper.__init__(self,"TC")
self._opName = "Send"
self._reset()
#===========================================================================
def _initializeActionStrings(self):
WrapperHelper._initializeActionStrings(self)
self._setActionString( ACTION_REPEAT , "Repeat the whole Send() operation")
self._setActionString( ACTION_RECHECK, "Repeat the telemetry verification")
self._setActionString( ACTION_RESEND , "Send the command(s) again")
self._setActionString( ACTION_SKIP , "Skip the command injection and proceed with telemetry verification")
self._setActionString( ACTION_CANCEL , "Skip the whole operation and proceed with next SPELL instruction")
#===========================================================================
def _reset(self):
self._isGroup = False
self.__originalOnFailure = None
self._isSequence = False
self._cmdName = None
self._cmdDef = None
self._cmdArgs = None
self.__doSendCommand = True
self.__doAdjustLimits = False
self.__doAdjustLimitsP = False
self.__canAdjustLimits = False
self.__section = 'TC'
self.__actionTaken = None
self.__verifyCondition = None
self.__doCheckTelemetry = False
#===========================================================================
def _obtainVerificationDefinition(self,*args,**kargs):
# Obtain verification steps
if self._cmdArgs is not None and len(args)>=3:
self.__verifyCondition = args[3]
if type(self.__verifyCondition) != list:
raise SyntaxException("Expected a list of verification steps")
elif self._cmdArgs is None and len(args)>=2:
self.__verifyCondition = args[2]
if type(self.__verifyCondition) != list:
raise SyntaxException("Expected a list of verification steps")
elif kargs.has_key('verify'):
self.__verifyCondition = kargs.pop('verify')
else:
self.__verifyCondition = None
if self.__verifyCondition:
self.__doCheckTelemetry = True
#===========================================================================
def _obtainCommandDefinition(self, *args, **kargs):
LOG("Obtaining command definition", level = LOG_LANG)
if len(args) == 0:
LOG("No positional arguments", level = LOG_LANG)
# If no positional arguments are given, the command shall be
# given with these keywords
if not kargs.has_key('command') and\
not kargs.has_key('sequence') and\
not kargs.has_key('group'):
raise SyntaxException("Expected a command item or name")
else:
if kargs.has_key('command'):
LOG("Using keyword argument command", level = LOG_LANG)
self._isSequence = False
self._isGroup = False
self._cmdDef = kargs.pop('command')
if type(self._cmdDef)==list:
raise SyntaxException("Cannot accept list as single command")
elif kargs.has_key('group'):
LOG("Using keyword argument group", level = LOG_LANG)
self._isSequence = False
self._isGroup = True
self._cmdDef = kargs.pop('group')
if type(self._cmdDef)!=list:
raise SyntaxException("Shall provide a command list")
else:
LOG("Using keyword argument sequence", level = LOG_LANG)
self._isSequence = True
self._isGroup = False
self._cmdDef = kargs.pop('sequence')
if type(self._cmdDef)==list:
raise SyntaxException("Cannot accept command list as a sequence")
else:
raise SyntaxException("Expected keyword: command, group or sequence")
# Create the command item if necessary
if type(self._cmdDef)==str:
self._cmdDef = REGISTRY['TC'][self._cmdDef]
# Do it for each item in the list, if it is the case
elif type(self._cmdDef)==list:
cpy = []
for item in self._cmdDef:
if type(item)==str:
cpy += [REGISTRY['TC'][item]]
elif isinstance(item,TcItemClass):
cpy += [item]
else:
raise SyntaxException("Unexpected item in group: " + repr(item))
# Obtain the string representation of the entity being sent
if type(self._cmdDef)==list:
self._cmdName = []
for item in self._cmdDef:
if type(item)==str:
self._cmdName += [item]
# Must be tc item, the check was done already
else:
desc = item.desc()
if desc != "": desc = ": " + desc
self._cmdName += [item.name() + desc]
# The else case is already controlled
else:
desc = self._cmdDef.desc()
if desc != "": desc = ": " + desc
self._cmdName = self._cmdDef.name() + desc
LOG("Got command definition: " + str(self._cmdName), level = LOG_LANG)
LOG("Sequence flag: " + str(self._isSequence), level = LOG_LANG)
LOG("Group flag : " + str(self._isGroup), level = LOG_LANG)
# Copy the flags to config
self.addConfig(Sequence,self._isSequence)
#===========================================================================
def _checkCommandDefinition(self):
if not isinstance(self._cmdDef,TcItemClass) and\
not type(self._cmdDef) == str and\
not type(self._cmdDef) == list:
raise SyntaxException("Expected a TC name, TC item or TC list")
#===========================================================================
def _obtainCommandArguments(self, *args, **kargs):
# 3. Obtain the arguments
self._cmdArgs = None
if not self._isGroup:
LOG("Getting arguments for single command", level = LOG_LANG)
if kargs.has_key('args'):
LOG("Using keyword args", level = LOG_LANG)
self._cmdArgs = kargs.pop('args')
else:
LOG("No arguments found", level = LOG_LANG)
self._cmdArgs = None
# Using a group and args kword is not accepted (??)
else:
if kargs.has_key('args'):
raise SyntaxException("Cannot use args with TC lists")
#===========================================================================
def _parseCommandArguments(self):
# 6. Parse arguments if any
if self._cmdArgs is not None:
if len(self._cmdArgs)==0:
raise SyntaxException("Cannot accept empty argument list")
# Clear any previously existing argument
self._cmdDef.clear()
for argument in self._cmdArgs:
if type(argument)!=list:
raise SyntaxException("Malformed argument")
if len(argument)<1 or type(argument[0])!=str:
raise SyntaxException("Malformed argument")
argName = argument[0]
argument = argument[1:]
LOG("Set argument: " + str(argName) + "=" + repr(argument), level = LOG_LANG)
self._cmdDef[argName] = argument
#===========================================================================
def _checkCommandArguments(self):
if not self._cmdArgs is None and type(self._cmdArgs)!=list:
raise SyntaxException("Expected an argument list")
#===========================================================================
def _doPreOperation(self, *args, **kargs ):
#-----------------------------------------------------------------------
# Parse the command information
#-----------------------------------------------------------------------
# 1. Obtain the command/sequence
self._obtainCommandDefinition(*args,**kargs)
# 2. Check the command correctness
self._checkCommandDefinition()
# 3. Obtain tc arguments
self._obtainCommandArguments(*args,**kargs)
# 4. Check arguments correctness
self._checkCommandArguments()
# 5. Parse command arguments
self._parseCommandArguments()
# Some text messages, not needed if Confirm is activated as the confirmation
# mechanism already displays the command
if (not self.hasConfig(Confirm)) or (self.getConfig(Confirm)!=True):
if self._isSequence:
self._write("Sending sequence " + repr(self._cmdName))
elif self._isGroup:
self._write("Sending group of " + str(len(self._cmdDef)) + " element(s)")
for name in self._cmdName:
self._write(" - " + repr(name))
else:
self._write("Sending command " + repr(self._cmdName))
#-----------------------------------------------------------------------
# Parse the telemetry information
#-----------------------------------------------------------------------
self._obtainVerificationDefinition(*args,**kargs)
if type(self.__verifyCondition)==list:
if type(self.__verifyCondition[0])!=list:
self.__verifyCondition = [self.__verifyCondition]
#-----------------------------------------------------------------------
# Avoid alarms if the conditions are ok
#-----------------------------------------------------------------------
self.__doAdjustLimits = self.hasConfig(AdjLimits) and \
type(self.__verifyCondition)==list and \
self.getConfig(AdjLimits)==True
self.__doAdjustLimitsP = self.__doAdjustLimits
self.__canAdjustLimits = self.__doAdjustLimits
# Store information for possible failures
self.setFailureInfo("TM", self._cmdDef)
#==========================================================================
def _buildCommandDescription(self):
msg = "Please confirm execution of the following "
if self._isGroup:
msg += "command group:"
for cmd in self._cmdDef:
msg += "\n Command: " + cmd.name()
if (cmd.desc().strip() != ""): msg += " ('" + cmd.desc() + "')"
if len(cmd._getParams())>0:
msg += "\n Arguments:"
for param in cmd._getParams():
msg += "\n - " + repr(param.name) + " = " + str(param.value.get()) + " " + str(param.value.units())
elif self._isSequence:
msg += "sequence: " + self._cmdDef.name()
if (self._cmdDef.desc().strip() != ""): msg += " ('" + self._cmdDef.desc() + "')"
if len(self._cmdDef.getElements())>0:
msg += "\n Elements:"
for element in self._cmdDef.getElements():
msg += "\n - " + repr(element)
if len(self._cmdDef._getParams())>0:
msg += "\n Arguments:"
for param in self._cmdDef._getParams():
msg += "\n - " + repr(param.name) + " = " + str(param.value.get()) + " " + str(param.value.units())
else:
msg += "command: " + self._cmdDef.name()
if (self._cmdDef.desc().strip() != ""): msg += " ('" + self._cmdDef.desc() + "')"
if len(self._cmdDef._getParams())>0:
msg += "\n Arguments:"
for param in self._cmdDef._getParams():
msg += "\n - " + repr(param.name) + " = " + str(param.value.get()) + " " + str(param.value.units())
return msg
#===========================================================================
def _doOperation(self, *args, **kargs ):
repeat = False
self.__originalOnFailure = self.getConfig(OnFailure)
#-----------------------------------------------------------------------
# CONFIRM SECTION
#-----------------------------------------------------------------------
# Confirm execution if needed
confirm = REGISTRY['TC'].shouldForceTcConfirm()
confirm = confirm or self.hasConfig(Confirm) and self.getConfig(Confirm) == True
if confirm:
self.__section = 'CONFIRM'
msg = self._buildCommandDescription()
if not self._prompt(msg, [], {Type:OK_CANCEL}):
return [ False, False, NOTIF_STATUS_CL, "Cancelled by user" ]
#-----------------------------------------------------------------------
# LIMIT ADJUSTMENT SECTION
#-----------------------------------------------------------------------
if self.__canAdjustLimits and self.__doAdjustLimitsP:
self.__section = 'LIM1'
# We don't allow resend nor recheck, only repeat
self.addConfig(OnFailure,self.getConfig(OnFailure) & (~RESEND))
self.addConfig(OnFailure,self.getConfig(OnFailure) & (~RECHECK))
# Adapt the action messages
self._setActionString( ACTION_REPEAT , "Retry disabling limits")
self._setActionString( ACTION_SKIP , "Skip limits adjustment and command injection. Proceed with telemetry verification")
self._setActionString( ACTION_CANCEL , "Skip the whole Send() operation and return failure (False)")
# Store information for possible failures
self.setFailureInfo("TM", self.__verifyCondition)
# We need to enlarge the limit range to the maximum to
# avoid alarms (analog parameters) or to allow any
# status value (status parameters)
REGISTRY['CIF'].write("Avoiding alarms by adjusting limits before TC execution")
for condition in self.__verifyCondition:
paramName = condition[0]
paramValue = condition[2]
operator = condition[1]
# Do not adjust limits if the condition config dict says the contrary
if type(condition[-1])==dict:
itemCfg = condition[-1]
if itemCfg.has_key(AdjLimits) and itemCfg[AdjLimits] == False: continue
# Do not adjust limits if eq operator is not used
if operator != eq: continue
# Proceed with limit adjustment
if type(paramValue)==str: #Status parameters
# First get the currentValue
paramItem = REGISTRY['TM'][paramName]
paramItem.refresh( Wait = False )
currentValue = paramItem.eng( Wait = False )
# Build the expected value list
if (currentValue != paramValue):
expectedValues = currentValue + ", " + paramValue
else:
continue
limits = {}
limits[Expected] = expectedValues
# Adjust the limits accordingly
REGISTRY['CIF'].write(" - " + repr(paramName) + " adjusting to expected values: " + expectedValues)
else: #Analog parameters
# Set the limit to the maximum value
limits = {}
limits[LoRed] = -1.7e+308
limits[LoYel] = -1.7e+308
limits[HiRed] = 1.7e+308
limits[HiYel] = 1.7e+308
REGISTRY['CIF'].write(" - " + repr(paramName) + " enlarged analog limits to the maximum")
REGISTRY['TM'].setLimits( paramName, limits, config = self.getConfig() )
# Reset the OnFailure config
self.addConfig(OnFailure, self.__originalOnFailure)
#-----------------------------------------------------------------------
# COMMAND SECTION
#-----------------------------------------------------------------------
# If we are repeating the operation due to an user action, check
# the flag to see if we have to resend the command
if self.__doSendCommand:
self.__section = 'TC'
# Store information for possible failures
self.setFailureInfo("TC", self._cmdDef)
# We do not allow recheck or repeat yet, only resend
self.addConfig(OnFailure,self.getConfig(OnFailure) & (~REPEAT))
self.addConfig(OnFailure,self.getConfig(OnFailure) & (~RECHECK))
# Adapt the action messages
if self._isGroup:
self._setActionString( ACTION_RESEND , "Send the whole command group again")
elif self._isSequence:
self._setActionString( ACTION_RESEND , "Send the command sequence again")
else:
self._setActionString( ACTION_RESEND , "Send the command again")
if self.__verifyCondition:
self._setActionString( ACTION_SKIP , "Skip the command injection. Proceed with telemetry verification")
else:
self._setActionString( ACTION_SKIP , "Skip the command injection and return success (True)")
self._setActionString( ACTION_CANCEL , "Skip the whole Send() operation and return failure (False)")
try:
# Actually send the command
tcIsSuccess = REGISTRY['TC'].send(self._cmdDef, config = self.getConfig() )
except DriverException,ex:
raise ex
if tcIsSuccess:
self._write("Execution success")
else:
self._write("Execution failed", {Severity:ERROR} )
raise DriverException("Command execution failed")
else:
tcIsSuccess = True
# Reset the OnFailure config
self.addConfig(OnFailure, self.__originalOnFailure)
#-----------------------------------------------------------------------
# TELEMETRY SECTION
#-----------------------------------------------------------------------
# If there are verification sets, verify them
if self.__doCheckTelemetry and self.__verifyCondition and tcIsSuccess:
self.__section = 'TM'
# Store information for possible failures
self.setFailureInfo("TM", self.__verifyCondition)
# Adapt the action messages
self._setActionString( ACTION_RECHECK, "Repeat the telemetry verification")
self._setActionString( ACTION_SKIP , "Skip the telemetry verification and return success (True)")
self._setActionString( ACTION_CANCEL , "Skip the telemetry verification and return failure (False)")
# Wait some time before verifying if requested
if self.hasConfig(Delay):
delay = self.getConfig(Delay)
if delay:
from spell.lang.functions import WaitFor
self._write("Waiting "+ str(delay) + " seconds before TM verification", {Severity:INFORMATION})
WaitFor(delay, Notify=False, Verbosity=999)
# We dont allow repeat here but allow recheck at least
self.addConfig(OnFailure,self.getConfig(OnFailure) & (~REPEAT))
# Adapt the action messages
self._setActionString( ACTION_RECHECK, "Repeat the telemetry verification")
self._setActionString( ACTION_SKIP , "Skip the telemetry verification and return success (True)")
self._setActionString( ACTION_CANCEL , "Skip the telemetry verification and return failure (False)")
# Perform verification
tmIsSuccess = REGISTRY['TM'].verify(self.__verifyCondition, config=self.getConfig())
#repeat, tmIsSuccess = self._processActionOnResult(tmIsSuccess)
else:
tmIsSuccess = True
# Reset the OnFailure config
self.addConfig(OnFailure, self.__originalOnFailure)
#-----------------------------------------------------------------------
# ADJUST LIMITS SECTION
#-----------------------------------------------------------------------
if tmIsSuccess and self.__canAdjustLimits and self.__doAdjustLimits:
self.__section = "LIM2"
# Store information for possible failures
self.setFailureInfo("TM", self.__verifyCondition)
# We dont allow recheck/resend for this, only repeat if the user wants
self.addConfig(OnFailure,self.getConfig(OnFailure) & (~RESEND))
self.addConfig(OnFailure,self.getConfig(OnFailure) & (~RECHECK))
# Adapt the action messages
self._setActionString( ACTION_REPEAT , "Repeat the final limit adjustment")
self._setActionString( ACTION_SKIP , "Skip the final limit adjustment and return success (True)")
self._setActionString( ACTION_CANCEL , "Skip the final limit adjustment and return failure (False)")
REGISTRY['CIF'].write("Adjusting limit definitions after TC execution")
for condition in self.__verifyCondition:
paramName = condition[0]
paramValue = condition[2]
operator = condition[1]
# Do not adjust limits if not eq operator used
if operator != eq: continue
# Do not adjust limits if the condition config dict says the contrary
conditionTolerance = None
if type(condition[-1])==dict:
itemCfg = condition[-1]
conditionTolerance = itemCfg.get(Tolerance)
if itemCfg.has_key(AdjLimits) and itemCfg[AdjLimits] == False: continue
if type(paramValue)==str: #Status parameters
# Build the expected value list
limits = {}
limits[Expected] = paramValue
# Adjust the limits accordingly
REGISTRY['CIF'].write(" - " + repr(paramName) + " adjusting to expected value: " + paramValue)
else: #Analog parameters
# if the condition has its own tolerance, use it
if conditionTolerance:
tolerance = conditionTolerance
else:
tolerance = self.getConfig(Tolerance)
if tolerance is None: tolerance = 0.1
limits = {}
limits[LoRed] = paramValue - tolerance
limits[LoYel] = paramValue - tolerance
limits[HiRed] = paramValue + tolerance
limits[HiYel] = paramValue + tolerance
REGISTRY['CIF'].write(" - " + repr(paramName) + " limits set to ( " + str(limits[LoRed]) +
" , " + str(limits[LoYel]) + " | " + str(limits[HiYel]) + " , " + str(limits[HiRed]) + " )")
REGISTRY['CIF'].write(" Tolerance used: " + str(tolerance))
REGISTRY['TM'].setLimits( paramName, limits, config = self.getConfig() )
# Reset the OnFailure config
self.addConfig(OnFailure, self.__originalOnFailure)
# Depending on the result of both operations we decide to repeat the whole
# or part of the operation.
if self.__verifyCondition is None:
result = tcIsSuccess
else:
result = tcIsSuccess and tmIsSuccess
if self.__actionTaken in ["SKIP","CANCEL"]:
opStatus = NOTIF_STATUS_SP
elif result:
opStatus = NOTIF_STATUS_OK
else:
opStatus = NOTIF_STATUS_FL
return [ repeat, result, opStatus, "" ]
#===========================================================================
def _driverUpdateActionList(self, theOptions, exception = None):
if self.__section == "TC":
return REGISTRY['TC'].driverUpdateActionList( theOptions, exception )
return theOptions
#===========================================================================
def _driverPerformAction(self, code):
if self.__section == "TC":
return REGISTRY['TC'].driverPerformAction(code)
return None # [False,False]
#===========================================================================
def _driverBeforeAction(self, code):
if self.__section == "TC":
return REGISTRY['TC'].driverBeforeAction(code)
#===========================================================================
def _driverAfterAction(self, code):
if self.__section == "TC":
return REGISTRY['TC'].driverAfterAction(code)
#===========================================================================
def _getExceptionFlag(self, exception ):
# Special case for verify, OnFalse
if exception.reason.find("evaluated to False")>0:
return self.getConfig(PromptUser)
else:
return self.getConfig(PromptFailure)
#===========================================================================
def _getBehaviorOptions(self, exception):
# If the OnFailure parameter is not set, get the default behavior.
# This default behavior depends on the particular primitive being
# used, so it is implemented in child wrappers.
if self.getConfig(OnFailure) is None:
LOG("Using defaults")
self.setConfig({OnFailure:ABORT})
# Special case for verify, OnFalse
if exception and (exception.reason.find("evaluated to False")>0):
optionRef = self.getConfig(OnFalse)
else:
optionRef = self.getConfig(OnFailure)
# Get the desired behavior
theOptions = self._getActionList( optionRef, exception )
return theOptions
#===========================================================================
def _doSkip(self):
self.__actionTaken = "SKIP"
if self.getConfig(PromptUser)==True:
self._write("Operation skipped", {Severity:WARNING} )
# By skipping the operation, if we are in LIM1 or TC stages we still
# want to verify TM
if self.__section in ['LIM1','TC']:
self.__doAdjustLimitsP = False
self.__doAdjustLimits = False
self.__doSendCommand = False
self.__doCheckTelemetry = True
return [True,False]
elif self.__section == 'TM':
self.__doAdjustLimitsP = False
self.__doAdjustLimits = False
self.__doSendCommand = False
self.__doCheckTelemetry = False
return [True,False]
else:
return [False,True]
#===========================================================================
def _doCancel(self):
self._write("Operation cancelled", {Severity:WARNING} )
self.__actionTaken = "CANCEL"
return [False,False]
#===========================================================================
def _doResend(self):
self.__actionTaken = "RESEND"
if self._isSequence:
self._write("Retrying sequence execution", {Severity:WARNING} )
elif self._isGroup:
self._write("Retrying group execution", {Severity:WARNING} )
else:
self._write("Retrying command execution", {Severity:WARNING} )
self.__doSendCommand = True
self.__doAdjustLimitsP = False
self.__doCheckTelemetry = True
return [True,False]
#===========================================================================
def _doRepeat(self):
self.__actionTaken = "CANCEL"
self._write("Retry whole operation", {Severity:WARNING} )
self.__doAdjustLimits = True
self.__doAdjustLimitsP = True
self.__doSendCommand = True
self.__doCheckTelemetry = True
return [True,False]
#===========================================================================
def _doRecheck(self):
self.__actionTaken = "RECHECK"
self._write("Retry verification block", {Severity:WARNING} )
self.__doSendCommand = False
self.__doAdjustLimitsP = False
self.__doAdjustLimits = True
self.__doCheckTelemetry = True
return [True,False]
################################################################################
class BuildTC_Helper(WrapperHelper):
"""
DESCRIPTION:
Helper for the Build TC wrapper.
"""
_tcName = None<|fim▁hole|> #===========================================================================
def __init__(self):
WrapperHelper.__init__(self, "TC")
self._tcName = None
self._tcArguments = []
self._opName = "TC build"
self._tcItem = None
self._isSequence = False
#===========================================================================
def _obtainCommandName(self, *args, **kargs ):
if len(args)==1:
if type(args[0])!=str:
raise SyntaxException("Expected a command name")
self._tcName = args[0]
elif len(args)==0:
if kargs.has_key('command'):
self._tcName = kargs.get('command')
elif kargs.has_key('sequence'):
self._tcName = kargs.get('sequence')
self._isSequence = True
else:
raise SyntaxException("Expected a command or sequence")
else:
raise SyntaxException("Expected a command name")
#===========================================================================
def _obtainCommandArguments(self, *args, **kargs ):
if len(args)<=1:
if kargs.has_key('args'):
self._tcArguments = kargs.get('args')
else:
if type(args[1])!=list:
raise SyntaxException("Expected a list of arguments")
self._tcArguments = args[1]
#===========================================================================
def _doPreOperation(self, *args, **kargs ):
self._obtainCommandName(*args,**kargs)
self._obtainCommandArguments(*args,**kargs)
# Store information for possible failures
self.setFailureInfo("TC", self._tcName)
#===========================================================================
def _doOperation(self, *args, **kargs ):
self._setActionString( ACTION_SKIP , "Skip the command construction and return None")
self._setActionString( ACTION_REPEAT , "Repeat the command construction")
if self._isSequence:
self._write("Building sequence " + repr(self._tcName))
else:
self._write("Building command " + repr(self._tcName))
# Create the item
LOG("Obtaining TC entity: " + repr(self._tcName), level = LOG_LANG)
self._tcItem = REGISTRY['TC'][self._tcName]
self._tcItem.clear()
self._tcItem.configure(self.getConfig())
if self._isSequence:
self._tcItem.addConfig(Sequence,True)
# Assign the arguments
for tcArg in self._tcArguments:
LOG("Parsed TC argument: " + repr(tcArg[0]), level = LOG_LANG)
LOG("Argument config : " + repr(tcArg[1:]), level = LOG_LANG)
self._tcItem[ tcArg[0] ] = tcArg[1:]
self._write(" - Argument " + repr(tcArg[0]) + " value " + repr(tcArg[1:]))
return [False,self._tcItem,NOTIF_STATUS_OK,""]
#===========================================================================
def _doSkip(self):
self._write("Skipping command construction", {Severity:WARNING} )
self._write("CAUTION: procedure logic may become invalid!", {Severity:WARNING} )
self._tcItem = None
return [False,None]
#===========================================================================
def _doRepeat(self):
self._write("Repeat command construction", {Severity:WARNING} )
return [True,False]
################################################################################
class BuildMemoryLoad_Helper(BuildTC_Helper):
"""
DESCRIPTION:
Helper for the BuildMemoryLoad wrapper.
"""
#===========================================================================
def __init__(self):
BuildTC_Helper.__init__(self)
#===========================================================================
def _doOperation(self, *args, **kargs ):
self._setActionString( ACTION_SKIP , "Skip the memory load construction and return None")
self._setActionString( ACTION_REPEAT , "Repeat the memory load construction")
repeat, tcItem, status, msg = super(BuildMemoryLoad_Helper, self)._doOperation(args,kargs);
tcItem.addConfig('MemoryLoad',True)
return [repeat,tcItem,status,msg]<|fim▁end|> | _tcArguments = []
_tcItem = None
_isSequence = False
|
<|file_name|>urls.py<|end_file_name|><|fim▁begin|>from django.urls import path, re_path
from django.conf import settings
from django.contrib.auth import views as auth_views
urlpatterns = [
re_path(
'login/',
auth_views.LoginView.as_view(template_name='dj_auth/login.html'),
name='login'),
re_path(
'logout/',
auth_views.LogoutView.as_view(),
name='logout'),
re_path(
r'^password_reset/$',
auth_views.PasswordResetView.as_view(
html_email_template_name='dj_auth/password_reset_email.html',
template_name='dj_auth/password_reset_form.html'),<|fim▁hole|> re_path(
r'^password_reset/done/$',
auth_views.PasswordResetDoneView.as_view(template_name='dj_auth/password_reset_done.html'),
name='password_reset_done'),
re_path(
r'^reset/(?P<uidb64>[0-9A-Za-z_\-]+)/(?P<token>[0-9A-Za-z]{1,13}-[0-9A-Za-z]{1,20})/$',
auth_views.PasswordResetConfirmView.as_view(template_name='dj_auth/password_reset_confirm.html'),
name='password_reset_confirm'),
re_path(
r'^reset/done/$',
auth_views.PasswordResetCompleteView.as_view(template_name='dj_auth/password_reset_complete.html'),
name='password_reset_complete'),
re_path(
r'^password_change/$',
auth_views.PasswordChangeView.as_view(template_name='dj_auth/password_change_form.html'),
name='password_change'),
re_path(
r'^password_change/done/$',
auth_views.PasswordChangeDoneView.as_view(template_name='dj_auth/password_change_done.html'),
name='password_change_done'),
]<|fim▁end|> | name='password_reset'), |
<|file_name|>node.py<|end_file_name|><|fim▁begin|># Copyright (c) 2013 Red Hat, Inc.
# Author: William Benton ([email protected])
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from proxy import Proxy, proxied_attr
from proxy import proxied_attr_get as pag, proxied_attr_set as pas, proxied_attr_getset as pags
from arc_utils import arcmethod, uniq
from singleton import v as store_singleton
import errors
from errors import not_implemented, fail
from constants import PARTITION_GROUP, LABEL_SENTINEL_PARAM, LABEL_SENTINEL_PARAM_ATTR
from datetime import datetime
import calendar
import urllib<|fim▁hole|>
def ts():
now = datetime.utcnow()
return (calendar.timegm(now.utctimetuple()) * 1000000) + now.microsecond
class node(Proxy):
name = property(pag("name"))
memberships = property(*pags("memberships"))
identity_group = property(lambda self : self.cm.make_proxy_object("group", self.attr_vals["identity_group"], refresh=True))
provisioned = property(*pags("provisioned"))
last_updated_version = property(pag("last_updated_version"))
modifyMemberships = arcmethod(pag("memberships"), pas("memberships"), heterogeneous=True, preserve_order=True)
def getConfig(self, **options):
if options.has_key("version"):
return self.cm.fetch_json_resource("/config/node/%s" % urllib.quote_plus(self.name), {"commit":options["version"]}, {})
return self.cm.fetch_json_resource("/config/node/%s" % urllib.quote_plus(self.name))
def makeProvisioned(self):
self.provisioned = True
self.update()
def explain(self):
not_implemented()
def checkin(self):
metapath = "/meta/node/%s" % self.name
# now = datetime.utcnow().isoformat()
now = ts()
meta = self.cm.fetch_json_resource(metapath, False, default={})
meta["last-checkin"] = now
self.cm.put_json_resource(metapath, meta, False)
return now
def last_checkin(self):
metapath = "/meta/node/%s" % self.name
meta = self.cm.fetch_json_resource(metapath, False, default={})
return meta.has_key("last-checkin") and meta["last-checkin"] or 0
def whatChanged(self, old, new):
oc = self.cm.fetch_json_resource("/config/node/%s" % urllib.quote_plus(self.name), {"commit":old}, {})
nc = self.cm.fetch_json_resource("/config/node/%s" % urllib.quote_plus(self.name), {"commit":new}, {})
ock = set(oc)
nck = set(nc)
params = set([p for p in (ock | nck) if p not in ock or p not in nck or oc[p] != nc[p]]) - set(["WALLABY_CONFIG_VERSION"])
mc_params = set([p for p in params if store_singleton().getParam(p).must_change])
subsystems = [store_singleton().getSubsys(sub) for sub in self.cm.list_objects("subsystem")]
restart, reconfig = [], []
for ss in subsystems:
ss.refresh
ssp = set(ss.parameters)
if ssp.intersection(mc_params):
restart.append(ss.name)
elif ssp.intersection(params):
reconfig.append(ss.name)
return [list(params), restart, reconfig]
# labeling support below
def getLabels(self):
memberships = self.memberships
if not PARTITION_GROUP in memberships:
return []
else:
partition = memberships.index(PARTITION_GROUP)
return memberships[partition+1:]
labels=property(getLabels)
def modifyLabels(self, op, labels, **options):
thestore = store_singleton()
memberships = self.memberships
current_labels = self.getLabels()
label_set = set(current_labels + [PARTITION_GROUP])
new_labels = []
if op == "ADD":
new_labels = current_labels + labels
pass
elif op == "REPLACE":
new_labels = labels
pass
elif op == "REMOVE":
new_labels = [label for label in current_labels if label not in labels]
else:
raise NotImplementedError("modifyLabels: operation " + op + " not understood")
just_memberships = [grp for grp in memberships if grp not in label_set]
new_memberships = uniq(just_memberships + [PARTITION_GROUP] + new_labels)
if "ensure_partition_group" in options and options["ensure_partition_group"] is not False:
if thestore is None:
raise RuntimeError("store singleton must be initialized before using the ensure_partition_group option")
thestore.getPartitionGroup()
if "create_missing_labels" in options and options["create_missing_labels"] is not False:
if thestore is None:
raise RuntimeError("store singleton must be initialized before using the create_missing_labels option")
for missing_label in thestore.checkGroupValidity(new_labels):
thestore.addLabel(missing_label)
return self.modifyMemberships("REPLACE", new_memberships, {})
proxied_attr(node, "name")
proxied_attr(node, "memberships")
proxied_attr(node, "identity_group")
proxied_attr(node, "provisioned")<|fim▁end|> | |
<|file_name|>AssignmentManager.java<|end_file_name|><|fim▁begin|>/**
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.master;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.NavigableMap;
import java.util.Set;
import java.util.TreeMap;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentSkipListSet;
import java.util.concurrent.CopyOnWriteArrayList;
import java.util.concurrent.ThreadFactory;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.Chore;
import org.apache.hadoop.hbase.HBaseIOException;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.NotServingRegionException;
import org.apache.hadoop.hbase.RegionTransition;
import org.apache.hadoop.hbase.Server;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.Stoppable;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.TableNotFoundException;
import org.apache.hadoop.hbase.catalog.CatalogTracker;
import org.apache.hadoop.hbase.catalog.MetaReader;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.exceptions.DeserializationException;
import org.apache.hadoop.hbase.executor.EventHandler;
import org.apache.hadoop.hbase.executor.EventType;
import org.apache.hadoop.hbase.executor.ExecutorService;
import org.apache.hadoop.hbase.ipc.RpcClient;
import org.apache.hadoop.hbase.ipc.RpcClient.FailedServerException;
import org.apache.hadoop.hbase.ipc.ServerNotRunningYetException;
import org.apache.hadoop.hbase.master.RegionState.State;
import org.apache.hadoop.hbase.master.balancer.FavoredNodeAssignmentHelper;
import org.apache.hadoop.hbase.master.balancer.FavoredNodeLoadBalancer;
import org.apache.hadoop.hbase.master.handler.ClosedRegionHandler;
import org.apache.hadoop.hbase.master.handler.DisableTableHandler;
import org.apache.hadoop.hbase.master.handler.EnableTableHandler;
import org.apache.hadoop.hbase.master.handler.OpenedRegionHandler;
import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition;
import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode;
import org.apache.hadoop.hbase.regionserver.RegionAlreadyInTransitionException;
import org.apache.hadoop.hbase.regionserver.RegionMergeTransaction;
import org.apache.hadoop.hbase.regionserver.RegionOpeningState;
import org.apache.hadoop.hbase.regionserver.RegionServerStoppedException;
import org.apache.hadoop.hbase.regionserver.SplitTransaction;
import org.apache.hadoop.hbase.regionserver.wal.HLog;
import org.apache.hadoop.hbase.regionserver.wal.HLogUtil;
import org.apache.hadoop.hbase.util.ConfigUtil;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.hadoop.hbase.util.KeyLocker;
import org.apache.hadoop.hbase.util.Pair;
import org.apache.hadoop.hbase.util.PairOfSameType;
import org.apache.hadoop.hbase.util.Threads;
import org.apache.hadoop.hbase.util.Triple;
import org.apache.hadoop.hbase.zookeeper.MetaRegionTracker;
import org.apache.hadoop.hbase.zookeeper.ZKAssign;
import org.apache.hadoop.hbase.zookeeper.ZKTable;
import org.apache.hadoop.hbase.zookeeper.ZKUtil;
import org.apache.hadoop.hbase.zookeeper.ZooKeeperListener;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.zookeeper.AsyncCallback;
import org.apache.zookeeper.KeeperException;
import org.apache.zookeeper.KeeperException.NoNodeException;
import org.apache.zookeeper.KeeperException.NodeExistsException;
import org.apache.zookeeper.data.Stat;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import com.google.common.collect.LinkedHashMultimap;
/**
* Manages and performs region assignment.
* <p>
* Monitors ZooKeeper for events related to regions in transition.
* <p>
* Handles existing regions in transition during master failover.
*/
@InterfaceAudience.Private
public class AssignmentManager extends ZooKeeperListener {
private static final Log LOG = LogFactory.getLog(AssignmentManager.class);
public static final ServerName HBCK_CODE_SERVERNAME = ServerName.valueOf(HConstants.HBCK_CODE_NAME,
-1, -1L);
public static final String ASSIGNMENT_TIMEOUT = "hbase.master.assignment.timeoutmonitor.timeout";
public static final int DEFAULT_ASSIGNMENT_TIMEOUT_DEFAULT = 600000;
public static final String ASSIGNMENT_TIMEOUT_MANAGEMENT = "hbase.assignment.timeout.management";
public static final boolean DEFAULT_ASSIGNMENT_TIMEOUT_MANAGEMENT = false;
public static final String ALREADY_IN_TRANSITION_WAITTIME
= "hbase.assignment.already.intransition.waittime";
public static final int DEFAULT_ALREADY_IN_TRANSITION_WAITTIME = 60000; // 1 minute
protected final Server server;
private ServerManager serverManager;
private boolean shouldAssignRegionsWithFavoredNodes;
private CatalogTracker catalogTracker;
protected final TimeoutMonitor timeoutMonitor;
private final TimerUpdater timerUpdater;
private LoadBalancer balancer;
private final MetricsAssignmentManager metricsAssignmentManager;
private final TableLockManager tableLockManager;
private AtomicInteger numRegionsOpened = new AtomicInteger(0);
final private KeyLocker<String> locker = new KeyLocker<String>();
/**
* Map of regions to reopen after the schema of a table is changed. Key -
* encoded region name, value - HRegionInfo
*/
private final Map <String, HRegionInfo> regionsToReopen;
/*
* Maximum times we recurse an assignment/unassignment.
* See below in {@link #assign()} and {@link #unassign()}.
*/
private final int maximumAttempts;
/**
* Map of two merging regions from the region to be created.
*/
private final Map<String, PairOfSameType<HRegionInfo>> mergingRegions
= new HashMap<String, PairOfSameType<HRegionInfo>>();
/**
* The sleep time for which the assignment will wait before retrying in case of hbase:meta assignment
* failure due to lack of availability of region plan
*/
private final long sleepTimeBeforeRetryingMetaAssignment;
/** Plans for region movement. Key is the encoded version of a region name*/
// TODO: When do plans get cleaned out? Ever? In server open and in server
// shutdown processing -- St.Ack
// All access to this Map must be synchronized.
final NavigableMap<String, RegionPlan> regionPlans =
new TreeMap<String, RegionPlan>();
private final ZKTable zkTable;
/**
* Contains the server which need to update timer, these servers will be
* handled by {@link TimerUpdater}
*/
private final ConcurrentSkipListSet<ServerName> serversInUpdatingTimer;
private final ExecutorService executorService;
// For unit tests, keep track of calls to ClosedRegionHandler
private Map<HRegionInfo, AtomicBoolean> closedRegionHandlerCalled = null;
// For unit tests, keep track of calls to OpenedRegionHandler
private Map<HRegionInfo, AtomicBoolean> openedRegionHandlerCalled = null;
//Thread pool executor service for timeout monitor
private java.util.concurrent.ExecutorService threadPoolExecutorService;
// A bunch of ZK events workers. Each is a single thread executor service
private final java.util.concurrent.ExecutorService zkEventWorkers;
private List<EventType> ignoreStatesRSOffline = Arrays.asList(
EventType.RS_ZK_REGION_FAILED_OPEN, EventType.RS_ZK_REGION_CLOSED);
private final RegionStates regionStates;
// The threshold to use bulk assigning. Using bulk assignment
// only if assigning at least this many regions to at least this
// many servers. If assigning fewer regions to fewer servers,
// bulk assigning may be not as efficient.
private final int bulkAssignThresholdRegions;
private final int bulkAssignThresholdServers;
// Should bulk assignment wait till all regions are assigned,
// or it is timed out? This is useful to measure bulk assignment
// performance, but not needed in most use cases.
private final boolean bulkAssignWaitTillAllAssigned;
/**
* Indicator that AssignmentManager has recovered the region states so
* that ServerShutdownHandler can be fully enabled and re-assign regions
* of dead servers. So that when re-assignment happens, AssignmentManager
* has proper region states.
*
* Protected to ease testing.
*/
protected final AtomicBoolean failoverCleanupDone = new AtomicBoolean(false);
/** Is the TimeOutManagement activated **/
private final boolean tomActivated;
/**
* A map to track the count a region fails to open in a row.
* So that we don't try to open a region forever if the failure is
* unrecoverable. We don't put this information in region states
* because we don't expect this to happen frequently; we don't
* want to copy this information over during each state transition either.
*/
private final ConcurrentHashMap<String, AtomicInteger>
failedOpenTracker = new ConcurrentHashMap<String, AtomicInteger>();
// A flag to indicate if we are using ZK for region assignment
private final boolean useZKForAssignment;
// In case not using ZK for region assignment, region states
// are persisted in meta with a state store
private final RegionStateStore regionStateStore;
/**
* For testing only! Set to true to skip handling of split.
*/
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="MS_SHOULD_BE_FINAL")
public static boolean TEST_SKIP_SPLIT_HANDLING = false;
/** Listeners that are called on assignment events. */
private List<AssignmentListener> listeners = new CopyOnWriteArrayList<AssignmentListener>();
/**
* Constructs a new assignment manager.
*
* @param server
* @param serverManager
* @param catalogTracker
* @param service
* @throws KeeperException
* @throws IOException
*/
public AssignmentManager(Server server, ServerManager serverManager,
CatalogTracker catalogTracker, final LoadBalancer balancer,
final ExecutorService service, MetricsMaster metricsMaster,
final TableLockManager tableLockManager) throws KeeperException, IOException {
super(server.getZooKeeper());
this.server = server;
this.serverManager = serverManager;
this.catalogTracker = catalogTracker;
this.executorService = service;
this.regionStateStore = new RegionStateStore(server);
this.regionsToReopen = Collections.synchronizedMap
(new HashMap<String, HRegionInfo> ());
Configuration conf = server.getConfiguration();
// Only read favored nodes if using the favored nodes load balancer.
this.shouldAssignRegionsWithFavoredNodes = conf.getClass(
HConstants.HBASE_MASTER_LOADBALANCER_CLASS, Object.class).equals(
FavoredNodeLoadBalancer.class);
this.tomActivated = conf.getBoolean(
ASSIGNMENT_TIMEOUT_MANAGEMENT, DEFAULT_ASSIGNMENT_TIMEOUT_MANAGEMENT);
if (tomActivated){
this.serversInUpdatingTimer = new ConcurrentSkipListSet<ServerName>();
this.timeoutMonitor = new TimeoutMonitor(
conf.getInt("hbase.master.assignment.timeoutmonitor.period", 30000),
server, serverManager,
conf.getInt(ASSIGNMENT_TIMEOUT, DEFAULT_ASSIGNMENT_TIMEOUT_DEFAULT));
this.timerUpdater = new TimerUpdater(conf.getInt(
"hbase.master.assignment.timerupdater.period", 10000), server);
Threads.setDaemonThreadRunning(timerUpdater.getThread(),
server.getServerName() + ".timerUpdater");
} else {
this.serversInUpdatingTimer = null;
this.timeoutMonitor = null;
this.timerUpdater = null;
}
this.zkTable = new ZKTable(this.watcher);
// This is the max attempts, not retries, so it should be at least 1.
this.maximumAttempts = Math.max(1,
this.server.getConfiguration().getInt("hbase.assignment.maximum.attempts", 10));
this.sleepTimeBeforeRetryingMetaAssignment = this.server.getConfiguration().getLong(
"hbase.meta.assignment.retry.sleeptime", 1000l);
this.balancer = balancer;
int maxThreads = conf.getInt("hbase.assignment.threads.max", 30);
this.threadPoolExecutorService = Threads.getBoundedCachedThreadPool(
maxThreads, 60L, TimeUnit.SECONDS, Threads.newDaemonThreadFactory("AM."));
this.regionStates = new RegionStates(server, serverManager, regionStateStore);
this.bulkAssignWaitTillAllAssigned =
conf.getBoolean("hbase.bulk.assignment.waittillallassigned", false);
this.bulkAssignThresholdRegions = conf.getInt("hbase.bulk.assignment.threshold.regions", 7);
this.bulkAssignThresholdServers = conf.getInt("hbase.bulk.assignment.threshold.servers", 3);
int workers = conf.getInt("hbase.assignment.zkevent.workers", 20);
ThreadFactory threadFactory = Threads.newDaemonThreadFactory("AM.ZK.Worker");
zkEventWorkers = Threads.getBoundedCachedThreadPool(workers, 60L,
TimeUnit.SECONDS, threadFactory);
this.tableLockManager = tableLockManager;
this.metricsAssignmentManager = new MetricsAssignmentManager();
useZKForAssignment = ConfigUtil.useZKForAssignment(conf);
}
void startTimeOutMonitor() {
if (tomActivated) {
Threads.setDaemonThreadRunning(timeoutMonitor.getThread(), server.getServerName()
+ ".timeoutMonitor");
}
}
/**
* Add the listener to the notification list.
* @param listener The AssignmentListener to register
*/
public void registerListener(final AssignmentListener listener) {
this.listeners.add(listener);
}
/**
* Remove the listener from the notification list.
* @param listener The AssignmentListener to unregister
*/
public boolean unregisterListener(final AssignmentListener listener) {
return this.listeners.remove(listener);
}
/**
* @return Instance of ZKTable.
*/
public ZKTable getZKTable() {
// These are 'expensive' to make involving trip to zk ensemble so allow
// sharing.
return this.zkTable;
}
/**
* This SHOULD not be public. It is public now
* because of some unit tests.
*
* TODO: make it package private and keep RegionStates in the master package
*/
public RegionStates getRegionStates() {
return regionStates;
}
/**
* Used in some tests to mock up region state in meta
*/
@VisibleForTesting
RegionStateStore getRegionStateStore() {
return regionStateStore;
}
public RegionPlan getRegionReopenPlan(HRegionInfo hri) {
return new RegionPlan(hri, null, regionStates.getRegionServerOfRegion(hri));
}
/**
* Add a regionPlan for the specified region.
* @param encodedName
* @param plan
*/
public void addPlan(String encodedName, RegionPlan plan) {
synchronized (regionPlans) {
regionPlans.put(encodedName, plan);
}
}
/**
* Add a map of region plans.
*/
public void addPlans(Map<String, RegionPlan> plans) {
synchronized (regionPlans) {
regionPlans.putAll(plans);
}
}
/**
* Set the list of regions that will be reopened
* because of an update in table schema
*
* @param regions
* list of regions that should be tracked for reopen
*/
public void setRegionsToReopen(List <HRegionInfo> regions) {
for(HRegionInfo hri : regions) {
regionsToReopen.put(hri.getEncodedName(), hri);
}
}
/**
* Used by the client to identify if all regions have the schema updates
*
* @param tableName
* @return Pair indicating the status of the alter command
* @throws IOException
*/
public Pair<Integer, Integer> getReopenStatus(TableName tableName)
throws IOException {
List <HRegionInfo> hris =
MetaReader.getTableRegions(this.server.getCatalogTracker(), tableName, true);
Integer pending = 0;
for (HRegionInfo hri : hris) {
String name = hri.getEncodedName();
// no lock concurrent access ok: sequential consistency respected.
if (regionsToReopen.containsKey(name)
|| regionStates.isRegionInTransition(name)) {
pending++;
}
}
return new Pair<Integer, Integer>(pending, hris.size());
}
/**
* Used by ServerShutdownHandler to make sure AssignmentManager has completed
* the failover cleanup before re-assigning regions of dead servers. So that
* when re-assignment happens, AssignmentManager has proper region states.
*/
public boolean isFailoverCleanupDone() {
return failoverCleanupDone.get();
}
/**
* To avoid racing with AM, external entities may need to lock a region,
* for example, when SSH checks what regions to skip re-assigning.
*/
public Lock acquireRegionLock(final String encodedName) {
return locker.acquireLock(encodedName);
}
/**
* Now, failover cleanup is completed. Notify server manager to
* process queued up dead servers processing, if any.
*/
void failoverCleanupDone() {
failoverCleanupDone.set(true);
serverManager.processQueuedDeadServers();
}
/**
* Called on startup.
* Figures whether a fresh cluster start of we are joining extant running cluster.
* @throws IOException
* @throws KeeperException
* @throws InterruptedException
*/
void joinCluster() throws IOException,
KeeperException, InterruptedException {
long startTime = System.currentTimeMillis();
// Concurrency note: In the below the accesses on regionsInTransition are
// outside of a synchronization block where usually all accesses to RIT are
// synchronized. The presumption is that in this case it is safe since this
// method is being played by a single thread on startup.
// TODO: Regions that have a null location and are not in regionsInTransitions
// need to be handled.
// Scan hbase:meta to build list of existing regions, servers, and assignment
// Returns servers who have not checked in (assumed dead) and their regions
Map<ServerName, List<HRegionInfo>> deadServers = rebuildUserRegions();
// This method will assign all user regions if a clean server startup or
// it will reconstruct master state and cleanup any leftovers from
// previous master process.
boolean failover = processDeadServersAndRegionsInTransition(deadServers);
if (!useZKForAssignment) {
// Not use ZK for assignment any more, remove the ZNode
ZKUtil.deleteNodeRecursively(watcher, watcher.assignmentZNode);
}
recoverTableInDisablingState();
recoverTableInEnablingState();
LOG.info("Joined the cluster in " + (System.currentTimeMillis()
- startTime) + "ms, failover=" + failover);
}
/**
* Process all regions that are in transition in zookeeper and also
* processes the list of dead servers by scanning the META.
* Used by master joining an cluster. If we figure this is a clean cluster
* startup, will assign all user regions.
* @param deadServers
* Map of dead servers and their regions. Can be null.
* @throws KeeperException
* @throws IOException
* @throws InterruptedException
*/
boolean processDeadServersAndRegionsInTransition(
final Map<ServerName, List<HRegionInfo>> deadServers)
throws KeeperException, IOException, InterruptedException {
List<String> nodes = ZKUtil.listChildrenNoWatch(watcher,
watcher.assignmentZNode);
if (nodes == null && useZKForAssignment) {
String errorMessage = "Failed to get the children from ZK";
server.abort(errorMessage, new IOException(errorMessage));
return true; // Doesn't matter in this case
}
boolean failover = !serverManager.getDeadServers().isEmpty();
if (failover) {
// This may not be a failover actually, especially if meta is on this master.
if (LOG.isDebugEnabled()) {
LOG.debug("Found dead servers out on cluster " + serverManager.getDeadServers());
}
} else {
// If any one region except meta is assigned, it's a failover.
Set<ServerName> onlineServers = serverManager.getOnlineServers().keySet();
for (Map.Entry<HRegionInfo, ServerName> en : regionStates.getRegionAssignments().entrySet()) {
HRegionInfo hri = en.getKey();
if (!hri.isMetaTable() && onlineServers.contains(en.getValue())) {
LOG.debug("Found " + hri + " out on cluster");
failover = true;
break;
}
}
}
if (!failover && nodes != null) {
// If any one region except meta is in transition, it's a failover.
for (String encodedName : nodes) {
RegionState regionState = regionStates.getRegionState(encodedName);
if (regionState != null && !regionState.getRegion().isMetaRegion()) {
LOG.debug("Found " + regionState + " in RITs");
failover = true;
break;
}
}
}
if (!failover && !useZKForAssignment) {
// If any region except meta is in transition on a live server, it's a failover.
Map<String, RegionState> regionsInTransition = regionStates.getRegionsInTransition();
if (!regionsInTransition.isEmpty()) {
Set<ServerName> onlineServers = serverManager.getOnlineServers().keySet();
for (RegionState regionState : regionsInTransition.values()) {
if (!regionState.getRegion().isMetaRegion()
&& onlineServers.contains(regionState.getServerName())) {
LOG.debug("Found " + regionState + " in RITs");
failover = true;
break;
}
}
}
}
if (!failover) {
// If we get here, we have a full cluster restart. It is a failover only
// if there are some HLogs are not split yet. For meta HLogs, they should have
// been split already, if any. We can walk through those queued dead servers,
// if they don't have any HLogs, this restart should be considered as a clean one
Set<ServerName> queuedDeadServers = serverManager.getRequeuedDeadServers().keySet();
if (!queuedDeadServers.isEmpty()) {
Configuration conf = server.getConfiguration();
Path rootdir = FSUtils.getRootDir(conf);
FileSystem fs = rootdir.getFileSystem(conf);
for (ServerName serverName : queuedDeadServers) {
Path logDir = new Path(rootdir, HLogUtil.getHLogDirectoryName(serverName.toString()));
Path splitDir = logDir.suffix(HLog.SPLITTING_EXT);
if (fs.exists(logDir) || fs.exists(splitDir)) {
LOG.debug("Found queued dead server " + serverName);
failover = true;
break;
}
}
if (!failover) {
// We figured that it's not a failover, so no need to
// work on these re-queued dead servers any more.
LOG.info("AM figured that it's not a failover and cleaned up " + queuedDeadServers.size()
+ " queued dead servers");
serverManager.removeRequeuedDeadServers();
}
}
}
Set<TableName> disabledOrDisablingOrEnabling = null;
if (!failover) {
disabledOrDisablingOrEnabling = ZKTable.getDisabledOrDisablingTables(watcher);
disabledOrDisablingOrEnabling.addAll(ZKTable.getEnablingTables(watcher));
// Clean re/start, mark all user regions closed before reassignment
// TODO -Hbase-11319
regionStates.closeAllUserRegions(disabledOrDisablingOrEnabling);
}
// Now region states are restored
regionStateStore.start();
// If we found user regions out on cluster, its a failover.
if (failover) {
LOG.info("Found regions out on cluster or in RIT; presuming failover");
// Process list of dead servers and regions in RIT.
// See HBASE-4580 for more information.
processDeadServersAndRecoverLostRegions(deadServers);
}
if (!failover && useZKForAssignment) {
// Cleanup any existing ZK nodes and start watching
ZKAssign.deleteAllNodes(watcher);
ZKUtil.listChildrenAndWatchForNewChildren(this.watcher, this.watcher.assignmentZNode);
}
// Now we can safely claim failover cleanup completed and enable
// ServerShutdownHandler for further processing. The nodes (below)
// in transition, if any, are for regions not related to those
// dead servers at all, and can be done in parallel to SSH.
failoverCleanupDone();
if (!failover) {
// Fresh cluster startup.
LOG.info("Clean cluster startup. Assigning user regions");
assignAllUserRegions(disabledOrDisablingOrEnabling);
}
return failover;
}
/**
* If region is up in zk in transition, then do fixup and block and wait until
* the region is assigned and out of transition. Used on startup for
* catalog regions.
* @param hri Region to look for.
* @return True if we processed a region in transition else false if region
* was not up in zk in transition.
* @throws InterruptedException
* @throws KeeperException
* @throws IOException
*/
boolean processRegionInTransitionAndBlockUntilAssigned(final HRegionInfo hri)
throws InterruptedException, KeeperException, IOException {
String encodedRegionName = hri.getEncodedName();
if (!processRegionInTransition(encodedRegionName, hri)) {
return false; // The region is not in transition
}
LOG.debug("Waiting on " + HRegionInfo.prettyPrint(encodedRegionName));
while (!this.server.isStopped() &&
this.regionStates.isRegionInTransition(encodedRegionName)) {
RegionState state = this.regionStates.getRegionTransitionState(encodedRegionName);
if (state == null || !serverManager.isServerOnline(state.getServerName())) {
// The region is not in transition, or not in transition on an online
// server. Doesn't help to block here any more. Caller need to
// verify the region is actually assigned.
break;
}
this.regionStates.waitForUpdate(100);
}
return true;
}
/**
* Process failover of new master for region <code>encodedRegionName</code>
* up in zookeeper.
* @param encodedRegionName Region to process failover for.
* @param regionInfo If null we'll go get it from meta table.
* @return True if we processed <code>regionInfo</code> as a RIT.
* @throws KeeperException
* @throws IOException
*/
boolean processRegionInTransition(final String encodedRegionName,
final HRegionInfo regionInfo) throws KeeperException, IOException {
// We need a lock here to ensure that we will not put the same region twice
// It has no reason to be a lock shared with the other operations.
// We can do the lock on the region only, instead of a global lock: what we want to ensure
// is that we don't have two threads working on the same region.
Lock lock = locker.acquireLock(encodedRegionName);
try {
Stat stat = new Stat();
byte [] data = ZKAssign.getDataAndWatch(watcher, encodedRegionName, stat);
if (data == null) return false;
RegionTransition rt;
try {
rt = RegionTransition.parseFrom(data);
} catch (DeserializationException e) {
LOG.warn("Failed parse znode data", e);
return false;
}
HRegionInfo hri = regionInfo;
if (hri == null) {
// The region info is not passed in. We will try to find the region
// from region states map/meta based on the encoded region name. But we
// may not be able to find it. This is valid for online merge that
// the region may have not been created if the merge is not completed.
// Therefore, it is not in meta at master recovery time.
hri = regionStates.getRegionInfo(rt.getRegionName());
EventType et = rt.getEventType();
if (hri == null && et != EventType.RS_ZK_REGION_MERGING
&& et != EventType.RS_ZK_REQUEST_REGION_MERGE) {
LOG.warn("Couldn't find the region in recovering " + rt);
return false;
}
}
return processRegionsInTransition(
rt, hri, stat.getVersion());
} finally {
lock.unlock();
}
}
/**
* This call is invoked only (1) master assign meta;
* (2) during failover mode startup, zk assignment node processing.
* The locker is set in the caller. It returns true if the region
* is in transition for sure, false otherwise.
*
* It should be private but it is used by some test too.
*/
boolean processRegionsInTransition(
final RegionTransition rt, final HRegionInfo regionInfo,
final int expectedVersion) throws KeeperException {
EventType et = rt.getEventType();
// Get ServerName. Could not be null.
final ServerName sn = rt.getServerName();
final byte[] regionName = rt.getRegionName();
final String encodedName = HRegionInfo.encodeRegionName(regionName);
final String prettyPrintedRegionName = HRegionInfo.prettyPrint(encodedName);
LOG.info("Processing " + prettyPrintedRegionName + " in state: " + et);
if (regionStates.isRegionInTransition(encodedName)
&& (regionInfo.isMetaRegion() || !useZKForAssignment)) {
LOG.info("Processed region " + prettyPrintedRegionName + " in state: "
+ et + ", does nothing since the region is already in transition "
+ regionStates.getRegionTransitionState(encodedName));
// Just return
return true;
}
if (!serverManager.isServerOnline(sn)) {
// It was transitioning on a dead server, so it's closed now.
// Force to OFFLINE and put it in transition, but not assign it
// since log splitting for the dead server is not done yet.
LOG.debug("RIT " + encodedName + " in state=" + rt.getEventType() +
" was on deadserver; forcing offline");
if (regionStates.isRegionOnline(regionInfo)) {
// Meta could still show the region is assigned to the previous
// server. If that server is online, when we reload the meta, the
// region is put back to online, we need to offline it.
regionStates.regionOffline(regionInfo);
sendRegionClosedNotification(regionInfo);
}
// Put it back in transition so that SSH can re-assign it
regionStates.updateRegionState(regionInfo, State.OFFLINE, sn);
if (regionInfo.isMetaRegion()) {
// If it's meta region, reset the meta location.
// So that master knows the right meta region server.
MetaRegionTracker.setMetaLocation(watcher, sn);
} else {
// No matter the previous server is online or offline,
// we need to reset the last region server of the region.
regionStates.setLastRegionServerOfRegion(sn, encodedName);
// Make sure we know the server is dead.
if (!serverManager.isServerDead(sn)) {
serverManager.expireServer(sn);
}
}
return false;
}
switch (et) {
case M_ZK_REGION_CLOSING:
// Insert into RIT & resend the query to the region server: may be the previous master
// died before sending the query the first time.
final RegionState rsClosing = regionStates.updateRegionState(rt, State.CLOSING);
this.executorService.submit(
new EventHandler(server, EventType.M_MASTER_RECOVERY) {
@Override
public void process() throws IOException {
ReentrantLock lock = locker.acquireLock(regionInfo.getEncodedName());
try {
unassign(regionInfo, rsClosing, expectedVersion, null, useZKForAssignment, null);
if (regionStates.isRegionOffline(regionInfo)) {
assign(regionInfo, true);
}
} finally {
lock.unlock();
}
}
});
break;
case RS_ZK_REGION_CLOSED:
case RS_ZK_REGION_FAILED_OPEN:
// Region is closed, insert into RIT and handle it
regionStates.updateRegionState(regionInfo, State.CLOSED, sn);
invokeAssign(regionInfo);
break;
case M_ZK_REGION_OFFLINE:
// Insert in RIT and resend to the regionserver
regionStates.updateRegionState(rt, State.PENDING_OPEN);
final RegionState rsOffline = regionStates.getRegionState(regionInfo);
this.executorService.submit(
new EventHandler(server, EventType.M_MASTER_RECOVERY) {
@Override
public void process() throws IOException {
ReentrantLock lock = locker.acquireLock(regionInfo.getEncodedName());
try {
RegionPlan plan = new RegionPlan(regionInfo, null, sn);
addPlan(encodedName, plan);
assign(rsOffline, false, false);
} finally {
lock.unlock();
}
}
});
break;
case RS_ZK_REGION_OPENING:
regionStates.updateRegionState(rt, State.OPENING);
break;
case RS_ZK_REGION_OPENED:
// Region is opened, insert into RIT and handle it
// This could be done asynchronously, we would need then to acquire the lock in the
// handler.
regionStates.updateRegionState(rt, State.OPEN);
new OpenedRegionHandler(server, this, regionInfo, sn, expectedVersion).process();
break;
case RS_ZK_REQUEST_REGION_SPLIT:
case RS_ZK_REGION_SPLITTING:
case RS_ZK_REGION_SPLIT:
// Splitting region should be online. We could have skipped it during
// user region rebuilding since we may consider the split is completed.
// Put it in SPLITTING state to avoid complications.
regionStates.regionOnline(regionInfo, sn);
regionStates.updateRegionState(rt, State.SPLITTING);
if (!handleRegionSplitting(
rt, encodedName, prettyPrintedRegionName, sn)) {
deleteSplittingNode(encodedName, sn);
}
break;
case RS_ZK_REQUEST_REGION_MERGE:
case RS_ZK_REGION_MERGING:
case RS_ZK_REGION_MERGED:
if (!handleRegionMerging(
rt, encodedName, prettyPrintedRegionName, sn)) {
deleteMergingNode(encodedName, sn);
}
break;
default:
throw new IllegalStateException("Received region in state:" + et + " is not valid.");
}
LOG.info("Processed region " + prettyPrintedRegionName + " in state "
+ et + ", on " + (serverManager.isServerOnline(sn) ? "" : "dead ")
+ "server: " + sn);
return true;
}
/**
* When a region is closed, it should be removed from the regionsToReopen
* @param hri HRegionInfo of the region which was closed
*/
public void removeClosedRegion(HRegionInfo hri) {
if (regionsToReopen.remove(hri.getEncodedName()) != null) {
LOG.debug("Removed region from reopening regions because it was closed");
}
}
/**
* Handles various states an unassigned node can be in.
* <p>
* Method is called when a state change is suspected for an unassigned node.
* <p>
* This deals with skipped transitions (we got a CLOSED but didn't see CLOSING
* yet).
* @param rt
* @param expectedVersion
*/
void handleRegion(final RegionTransition rt, int expectedVersion) {
if (rt == null) {
LOG.warn("Unexpected NULL input for RegionTransition rt");
return;
}
final ServerName sn = rt.getServerName();
// Check if this is a special HBCK transition
if (sn.equals(HBCK_CODE_SERVERNAME)) {
handleHBCK(rt);
return;
}
final long createTime = rt.getCreateTime();
final byte[] regionName = rt.getRegionName();
String encodedName = HRegionInfo.encodeRegionName(regionName);
String prettyPrintedRegionName = HRegionInfo.prettyPrint(encodedName);
// Verify this is a known server
if (!serverManager.isServerOnline(sn)
&& !ignoreStatesRSOffline.contains(rt.getEventType())) {
LOG.warn("Attempted to handle region transition for server but " +
"it is not online: " + prettyPrintedRegionName + ", " + rt);
return;
}
RegionState regionState =
regionStates.getRegionState(encodedName);
long startTime = System.currentTimeMillis();
if (LOG.isDebugEnabled()) {
boolean lateEvent = createTime < (startTime - 15000);
LOG.debug("Handling " + rt.getEventType() +
", server=" + sn + ", region=" +
(prettyPrintedRegionName == null ? "null" : prettyPrintedRegionName) +
(lateEvent ? ", which is more than 15 seconds late" : "") +
", current_state=" + regionState);
}
// We don't do anything for this event,
// so separate it out, no need to lock/unlock anything
if (rt.getEventType() == EventType.M_ZK_REGION_OFFLINE) {
return;
}
// We need a lock on the region as we could update it
Lock lock = locker.acquireLock(encodedName);
try {
RegionState latestState =
regionStates.getRegionState(encodedName);
if ((regionState == null && latestState != null)
|| (regionState != null && latestState == null)
|| (regionState != null && latestState != null
&& latestState.getState() != regionState.getState())) {
LOG.warn("Region state changed from " + regionState + " to "
+ latestState + ", while acquiring lock");
}
long waitedTime = System.currentTimeMillis() - startTime;
if (waitedTime > 5000) {
LOG.warn("Took " + waitedTime + "ms to acquire the lock");
}
regionState = latestState;
switch (rt.getEventType()) {
case RS_ZK_REQUEST_REGION_SPLIT:
case RS_ZK_REGION_SPLITTING:
case RS_ZK_REGION_SPLIT:
if (!handleRegionSplitting(
rt, encodedName, prettyPrintedRegionName, sn)) {
deleteSplittingNode(encodedName, sn);
}
break;
case RS_ZK_REQUEST_REGION_MERGE:
case RS_ZK_REGION_MERGING:
case RS_ZK_REGION_MERGED:
// Merged region is a new region, we can't find it in the region states now.
// However, the two merging regions are not new. They should be in state for merging.
if (!handleRegionMerging(
rt, encodedName, prettyPrintedRegionName, sn)) {
deleteMergingNode(encodedName, sn);
}
break;
case M_ZK_REGION_CLOSING:
// Should see CLOSING after we have asked it to CLOSE or additional
// times after already being in state of CLOSING
if (regionState == null
|| !regionState.isPendingCloseOrClosingOnServer(sn)) {
LOG.warn("Received CLOSING for " + prettyPrintedRegionName
+ " from " + sn + " but the region isn't PENDING_CLOSE/CLOSING here: "
+ regionStates.getRegionState(encodedName));
return;
}
// Transition to CLOSING (or update stamp if already CLOSING)
regionStates.updateRegionState(rt, State.CLOSING);
break;
case RS_ZK_REGION_CLOSED:
// Should see CLOSED after CLOSING but possible after PENDING_CLOSE
if (regionState == null
|| !regionState.isPendingCloseOrClosingOnServer(sn)) {
LOG.warn("Received CLOSED for " + prettyPrintedRegionName
+ " from " + sn + " but the region isn't PENDING_CLOSE/CLOSING here: "
+ regionStates.getRegionState(encodedName));
return;
}
// Handle CLOSED by assigning elsewhere or stopping if a disable
// If we got here all is good. Need to update RegionState -- else
// what follows will fail because not in expected state.
new ClosedRegionHandler(server, this, regionState.getRegion()).process();
updateClosedRegionHandlerTracker(regionState.getRegion());
break;
case RS_ZK_REGION_FAILED_OPEN:
if (regionState == null
|| !regionState.isPendingOpenOrOpeningOnServer(sn)) {
LOG.warn("Received FAILED_OPEN for " + prettyPrintedRegionName
+ " from " + sn + " but the region isn't PENDING_OPEN/OPENING here: "
+ regionStates.getRegionState(encodedName));
return;
}
AtomicInteger failedOpenCount = failedOpenTracker.get(encodedName);
if (failedOpenCount == null) {
failedOpenCount = new AtomicInteger();
// No need to use putIfAbsent, or extra synchronization since
// this whole handleRegion block is locked on the encoded region
// name, and failedOpenTracker is updated only in this block
failedOpenTracker.put(encodedName, failedOpenCount);
}
if (failedOpenCount.incrementAndGet() >= maximumAttempts) {
regionStates.updateRegionState(rt, State.FAILED_OPEN);
// remove the tracking info to save memory, also reset
// the count for next open initiative
failedOpenTracker.remove(encodedName);
} else {
// Handle this the same as if it were opened and then closed.
regionState = regionStates.updateRegionState(rt, State.CLOSED);
if (regionState != null) {
// When there are more than one region server a new RS is selected as the
// destination and the same is updated in the regionplan. (HBASE-5546)
try {
getRegionPlan(regionState.getRegion(), sn, true);
new ClosedRegionHandler(server, this, regionState.getRegion()).process();
} catch (HBaseIOException e) {
LOG.warn("Failed to get region plan", e);
}
}
}
break;
case RS_ZK_REGION_OPENING:
// Should see OPENING after we have asked it to OPEN or additional
// times after already being in state of OPENING
if (regionState == null
|| !regionState.isPendingOpenOrOpeningOnServer(sn)) {
LOG.warn("Received OPENING for " + prettyPrintedRegionName
+ " from " + sn + " but the region isn't PENDING_OPEN/OPENING here: "
+ regionStates.getRegionState(encodedName));
return;
}
// Transition to OPENING (or update stamp if already OPENING)
regionStates.updateRegionState(rt, State.OPENING);
break;
case RS_ZK_REGION_OPENED:
// Should see OPENED after OPENING but possible after PENDING_OPEN.
if (regionState == null
|| !regionState.isPendingOpenOrOpeningOnServer(sn)) {
LOG.warn("Received OPENED for " + prettyPrintedRegionName
+ " from " + sn + " but the region isn't PENDING_OPEN/OPENING here: "
+ regionStates.getRegionState(encodedName));
if (regionState != null) {
// Close it without updating the internal region states,
// so as not to create double assignments in unlucky scenarios
// mentioned in OpenRegionHandler#process
unassign(regionState.getRegion(), null, -1, null, false, sn);
}
return;
}
// Handle OPENED by removing from transition and deleted zk node
regionState = regionStates.updateRegionState(rt, State.OPEN);
if (regionState != null) {
failedOpenTracker.remove(encodedName); // reset the count, if any
new OpenedRegionHandler(
server, this, regionState.getRegion(), sn, expectedVersion).process();
updateOpenedRegionHandlerTracker(regionState.getRegion());
}
break;
default:
throw new IllegalStateException("Received event is not valid.");
}
} finally {
lock.unlock();
}
}
//For unit tests only
boolean wasClosedHandlerCalled(HRegionInfo hri) {
AtomicBoolean b = closedRegionHandlerCalled.get(hri);
//compareAndSet to be sure that unit tests don't see stale values. Means,
//we will return true exactly once unless the handler code resets to true
//this value.
return b == null ? false : b.compareAndSet(true, false);
}
//For unit tests only
boolean wasOpenedHandlerCalled(HRegionInfo hri) {
AtomicBoolean b = openedRegionHandlerCalled.get(hri);
//compareAndSet to be sure that unit tests don't see stale values. Means,
//we will return true exactly once unless the handler code resets to true
//this value.
return b == null ? false : b.compareAndSet(true, false);
}
//For unit tests only
void initializeHandlerTrackers() {
closedRegionHandlerCalled = new HashMap<HRegionInfo, AtomicBoolean>();
openedRegionHandlerCalled = new HashMap<HRegionInfo, AtomicBoolean>();
}
void updateClosedRegionHandlerTracker(HRegionInfo hri) {
if (closedRegionHandlerCalled != null) { //only for unit tests this is true
closedRegionHandlerCalled.put(hri, new AtomicBoolean(true));
}
}
void updateOpenedRegionHandlerTracker(HRegionInfo hri) {
if (openedRegionHandlerCalled != null) { //only for unit tests this is true
openedRegionHandlerCalled.put(hri, new AtomicBoolean(true));
}
}
// TODO: processFavoredNodes might throw an exception, for e.g., if the
// meta could not be contacted/updated. We need to see how seriously to treat
// this problem as. Should we fail the current assignment. We should be able
// to recover from this problem eventually (if the meta couldn't be updated
// things should work normally and eventually get fixed up).
void processFavoredNodes(List<HRegionInfo> regions) throws IOException {
if (!shouldAssignRegionsWithFavoredNodes) return;
// The AM gets the favored nodes info for each region and updates the meta
// table with that info
Map<HRegionInfo, List<ServerName>> regionToFavoredNodes =
new HashMap<HRegionInfo, List<ServerName>>();
for (HRegionInfo region : regions) {
regionToFavoredNodes.put(region,
((FavoredNodeLoadBalancer)this.balancer).getFavoredNodes(region));
}
FavoredNodeAssignmentHelper.updateMetaWithFavoredNodesInfo(regionToFavoredNodes, catalogTracker);
}
/**
* Handle a ZK unassigned node transition triggered by HBCK repair tool.
* <p>
* This is handled in a separate code path because it breaks the normal rules.
* @param rt
*/
private void handleHBCK(RegionTransition rt) {
String encodedName = HRegionInfo.encodeRegionName(rt.getRegionName());
LOG.info("Handling HBCK triggered transition=" + rt.getEventType() +
", server=" + rt.getServerName() + ", region=" +
HRegionInfo.prettyPrint(encodedName));
RegionState regionState = regionStates.getRegionTransitionState(encodedName);
switch (rt.getEventType()) {
case M_ZK_REGION_OFFLINE:
HRegionInfo regionInfo;
if (regionState != null) {
regionInfo = regionState.getRegion();
} else {
try {
byte [] name = rt.getRegionName();
Pair<HRegionInfo, ServerName> p = MetaReader.getRegion(catalogTracker, name);
regionInfo = p.getFirst();
} catch (IOException e) {
LOG.info("Exception reading hbase:meta doing HBCK repair operation", e);
return;
}
}
LOG.info("HBCK repair is triggering assignment of region=" +
regionInfo.getRegionNameAsString());
// trigger assign, node is already in OFFLINE so don't need to update ZK
assign(regionInfo, false);
break;
default:
LOG.warn("Received unexpected region state from HBCK: " + rt.toString());
break;
}
}
// ZooKeeper events
/**
* New unassigned node has been created.
*
* <p>This happens when an RS begins the OPENING or CLOSING of a region by
* creating an unassigned node.
*
* <p>When this happens we must:
* <ol>
* <li>Watch the node for further events</li>
* <li>Read and handle the state in the node</li>
* </ol>
*/
@Override
public void nodeCreated(String path) {
handleAssignmentEvent(path);
}
/**
* Existing unassigned node has had data changed.
*
* <p>This happens when an RS transitions from OFFLINE to OPENING, or between
* OPENING/OPENED and CLOSING/CLOSED.
*
* <p>When this happens we must:
* <ol>
* <li>Watch the node for further events</li>
* <li>Read and handle the state in the node</li>
* </ol>
*/
@Override
public void nodeDataChanged(String path) {
handleAssignmentEvent(path);
}
// We don't want to have two events on the same region managed simultaneously.
// For this reason, we need to wait if an event on the same region is currently in progress.
// So we track the region names of the events in progress, and we keep a waiting list.
private final Set<String> regionsInProgress = new HashSet<String>();
// In a LinkedHashMultimap, the put order is kept when we retrieve the collection back. We need
// this as we want the events to be managed in the same order as we received them.
private final LinkedHashMultimap <String, RegionRunnable>
zkEventWorkerWaitingList = LinkedHashMultimap.create();
/**
* A specific runnable that works only on a region.
*/
private interface RegionRunnable extends Runnable{
/**
* @return - the name of the region it works on.
*/
String getRegionName();
}
/**
* Submit a task, ensuring that there is only one task at a time that working on a given region.
* Order is respected.
*/
protected void zkEventWorkersSubmit(final RegionRunnable regRunnable) {
synchronized (regionsInProgress) {
// If we're there is already a task with this region, we add it to the
// waiting list and return.
if (regionsInProgress.contains(regRunnable.getRegionName())) {
synchronized (zkEventWorkerWaitingList){
zkEventWorkerWaitingList.put(regRunnable.getRegionName(), regRunnable);
}
return;
}
// No event in progress on this region => we can submit a new task immediately.
regionsInProgress.add(regRunnable.getRegionName());
zkEventWorkers.submit(new Runnable() {
@Override
public void run() {
try {
regRunnable.run();
} finally {
// now that we have finished, let's see if there is an event for the same region in the
// waiting list. If it's the case, we can now submit it to the pool.
synchronized (regionsInProgress) {
regionsInProgress.remove(regRunnable.getRegionName());
synchronized (zkEventWorkerWaitingList) {
java.util.Set<RegionRunnable> waiting = zkEventWorkerWaitingList.get(
regRunnable.getRegionName());
if (!waiting.isEmpty()) {
// We want the first object only. The only way to get it is through an iterator.
RegionRunnable toSubmit = waiting.iterator().next();
zkEventWorkerWaitingList.remove(toSubmit.getRegionName(), toSubmit);
zkEventWorkersSubmit(toSubmit);
}
}
}
}
}
});
}
}
@Override
public void nodeDeleted(final String path) {
if (path.startsWith(watcher.assignmentZNode)) {
final String regionName = ZKAssign.getRegionName(watcher, path);
zkEventWorkersSubmit(new RegionRunnable() {
@Override
public String getRegionName() {
return regionName;
}
@Override
public void run() {
Lock lock = locker.acquireLock(regionName);
try {
RegionState rs = regionStates.getRegionTransitionState(regionName);
if (rs == null) {
rs = regionStates.getRegionState(regionName);
if (rs == null || !rs.isMergingNew()) {
// MergingNew is an offline state
return;
}
}
HRegionInfo regionInfo = rs.getRegion();
String regionNameStr = regionInfo.getRegionNameAsString();
LOG.debug("Znode " + regionNameStr + " deleted, state: " + rs);
boolean disabled = getZKTable().isDisablingOrDisabledTable(regionInfo.getTable());
ServerName serverName = rs.getServerName();
if (serverManager.isServerOnline(serverName)) {
if (rs.isOnServer(serverName)
&& (rs.isOpened() || rs.isSplitting())) {
regionOnline(regionInfo, serverName);
if (disabled) {
// if server is offline, no hurt to unassign again
LOG.info("Opened " + regionNameStr
+ "but this table is disabled, triggering close of region");
unassign(regionInfo);
}
} else if (rs.isMergingNew()) {
synchronized (regionStates) {
String p = regionInfo.getEncodedName();
PairOfSameType<HRegionInfo> regions = mergingRegions.get(p);
if (regions != null) {
onlineMergingRegion(disabled, regions.getFirst(), serverName);
onlineMergingRegion(disabled, regions.getSecond(), serverName);
}
}
}
}
} finally {
lock.unlock();
}
}
private void onlineMergingRegion(boolean disabled,
final HRegionInfo hri, final ServerName serverName) {
RegionState regionState = regionStates.getRegionState(hri);
if (regionState != null && regionState.isMerging()
&& regionState.isOnServer(serverName)) {
regionOnline(regionState.getRegion(), serverName);
if (disabled) {
unassign(hri);
}
}
}
});
}
}
/**
* New unassigned node has been created.
*
* <p>This happens when an RS begins the OPENING, SPLITTING or CLOSING of a
* region by creating a znode.
*
* <p>When this happens we must:
* <ol>
* <li>Watch the node for further children changed events</li>
* <li>Watch all new children for changed events</li>
* </ol>
*/
@Override
public void nodeChildrenChanged(String path) {
if (path.equals(watcher.assignmentZNode)) {
zkEventWorkers.submit(new Runnable() {
@Override
public void run() {
try {
// Just make sure we see the changes for the new znodes
List<String> children =
ZKUtil.listChildrenAndWatchForNewChildren(
watcher, watcher.assignmentZNode);
if (children != null) {
Stat stat = new Stat();
for (String child : children) {
// if region is in transition, we already have a watch
// on it, so no need to watch it again. So, as I know for now,
// this is needed to watch splitting nodes only.
if (!regionStates.isRegionInTransition(child)) {
ZKAssign.getDataAndWatch(watcher, child, stat);
}
}
}
} catch (KeeperException e) {
server.abort("Unexpected ZK exception reading unassigned children", e);
}
}
});
}
}
/**
* Marks the region as online. Removes it from regions in transition and
* updates the in-memory assignment information.
* <p>
* Used when a region has been successfully opened on a region server.
* @param regionInfo
* @param sn
*/
void regionOnline(HRegionInfo regionInfo, ServerName sn) {
regionOnline(regionInfo, sn, HConstants.NO_SEQNUM);
}
void regionOnline(HRegionInfo regionInfo, ServerName sn, long openSeqNum) {
numRegionsOpened.incrementAndGet();
regionStates.regionOnline(regionInfo, sn, openSeqNum);
// Remove plan if one.
clearRegionPlan(regionInfo);
// Add the server to serversInUpdatingTimer
addToServersInUpdatingTimer(sn);
balancer.regionOnline(regionInfo, sn);
// Tell our listeners that a region was opened
sendRegionOpenedNotification(regionInfo, sn);
}
/**
* Pass the assignment event to a worker for processing.
* Each worker is a single thread executor service. The reason
* for just one thread is to make sure all events for a given
* region are processed in order.
*
* @param path
*/
private void handleAssignmentEvent(final String path) {
if (path.startsWith(watcher.assignmentZNode)) {
final String regionName = ZKAssign.getRegionName(watcher, path);
zkEventWorkersSubmit(new RegionRunnable() {
@Override
public String getRegionName() {
return regionName;
}
@Override
public void run() {
try {
Stat stat = new Stat();
byte [] data = ZKAssign.getDataAndWatch(watcher, path, stat);
if (data == null) return;
RegionTransition rt = RegionTransition.parseFrom(data);
handleRegion(rt, stat.getVersion());
} catch (KeeperException e) {
server.abort("Unexpected ZK exception reading unassigned node data", e);
} catch (DeserializationException e) {
server.abort("Unexpected exception deserializing node data", e);
}
}
});
}
}
/**
* Add the server to the set serversInUpdatingTimer, then {@link TimerUpdater}
* will update timers for this server in background
* @param sn
*/
private void addToServersInUpdatingTimer(final ServerName sn) {
if (tomActivated){
this.serversInUpdatingTimer.add(sn);
}
}
/**
* Touch timers for all regions in transition that have the passed
* <code>sn</code> in common.
* Call this method whenever a server checks in. Doing so helps the case where
* a new regionserver has joined the cluster and its been given 1k regions to
* open. If this method is tickled every time the region reports in a
* successful open then the 1k-th region won't be timed out just because its
* sitting behind the open of 999 other regions. This method is NOT used
* as part of bulk assign -- there we have a different mechanism for extending
* the regions in transition timer (we turn it off temporarily -- because
* there is no regionplan involved when bulk assigning.
* @param sn
*/
private void updateTimers(final ServerName sn) {
Preconditions.checkState(tomActivated);
if (sn == null) return;
// This loop could be expensive.
// First make a copy of current regionPlan rather than hold sync while
// looping because holding sync can cause deadlock. Its ok in this loop
// if the Map we're going against is a little stale
List<Map.Entry<String, RegionPlan>> rps;
synchronized(this.regionPlans) {
rps = new ArrayList<Map.Entry<String, RegionPlan>>(regionPlans.entrySet());
}
for (Map.Entry<String, RegionPlan> e : rps) {
if (e.getValue() != null && e.getKey() != null && sn.equals(e.getValue().getDestination())) {
RegionState regionState = regionStates.getRegionTransitionState(e.getKey());
if (regionState != null) {
regionState.updateTimestampToNow();
}
}
}
}
/**
* Marks the region as offline. Removes it from regions in transition and
* removes in-memory assignment information.
* <p>
* Used when a region has been closed and should remain closed.
* @param regionInfo
*/
public void regionOffline(final HRegionInfo regionInfo) {
regionOffline(regionInfo, null);
}
public void offlineDisabledRegion(HRegionInfo regionInfo) {
if (useZKForAssignment) {
// Disabling so should not be reassigned, just delete the CLOSED node
LOG.debug("Table being disabled so deleting ZK node and removing from " +
"regions in transition, skipping assignment of region " +
regionInfo.getRegionNameAsString());
String encodedName = regionInfo.getEncodedName();
deleteNodeInStates(encodedName, "closed", null,
EventType.RS_ZK_REGION_CLOSED, EventType.M_ZK_REGION_OFFLINE);
}
regionOffline(regionInfo);
}
// Assignment methods
/**
* Assigns the specified region.
* <p>
* If a RegionPlan is available with a valid destination then it will be used
* to determine what server region is assigned to. If no RegionPlan is
* available, region will be assigned to a random available server.
* <p>
* Updates the RegionState and sends the OPEN RPC.
* <p>
* This will only succeed if the region is in transition and in a CLOSED or
* OFFLINE state or not in transition (in-memory not zk), and of course, the
* chosen server is up and running (It may have just crashed!). If the
* in-memory checks pass, the zk node is forced to OFFLINE before assigning.
*
* @param region server to be assigned
* @param setOfflineInZK whether ZK node should be created/transitioned to an
* OFFLINE state before assigning the region
*/
public void assign(HRegionInfo region, boolean setOfflineInZK) {
assign(region, setOfflineInZK, false);
}
/**
* Use care with forceNewPlan. It could cause double assignment.
*/
public void assign(HRegionInfo region,
boolean setOfflineInZK, boolean forceNewPlan) {
if (isDisabledorDisablingRegionInRIT(region)) {
return;
}
if (this.serverManager.isClusterShutdown()) {
LOG.info("Cluster shutdown is set; skipping assign of " +
region.getRegionNameAsString());
return;
}
String encodedName = region.getEncodedName();
Lock lock = locker.acquireLock(encodedName);
try {
RegionState state = forceRegionStateToOffline(region, forceNewPlan);
if (state != null) {
if (regionStates.wasRegionOnDeadServer(encodedName)) {
LOG.info("Skip assigning " + region.getRegionNameAsString()
+ ", it's host " + regionStates.getLastRegionServerOfRegion(encodedName)
+ " is dead but not processed yet");
return;
}
assign(state, setOfflineInZK && useZKForAssignment, forceNewPlan);
}
} finally {
lock.unlock();
}
}
/**
* Bulk assign regions to <code>destination</code>.
* @param destination
* @param regions Regions to assign.
* @return true if successful
*/
boolean assign(final ServerName destination, final List<HRegionInfo> regions) {
long startTime = EnvironmentEdgeManager.currentTimeMillis();
try {
int regionCount = regions.size();
if (regionCount == 0) {
return true;
}
LOG.debug("Assigning " + regionCount + " region(s) to " + destination.toString());
Set<String> encodedNames = new HashSet<String>(regionCount);
for (HRegionInfo region : regions) {
encodedNames.add(region.getEncodedName());
}
List<HRegionInfo> failedToOpenRegions = new ArrayList<HRegionInfo>();
Map<String, Lock> locks = locker.acquireLocks(encodedNames);
try {
AtomicInteger counter = new AtomicInteger(0);
Map<String, Integer> offlineNodesVersions = new ConcurrentHashMap<String, Integer>();
OfflineCallback cb = new OfflineCallback(
watcher, destination, counter, offlineNodesVersions);
Map<String, RegionPlan> plans = new HashMap<String, RegionPlan>(regions.size());
List<RegionState> states = new ArrayList<RegionState>(regions.size());
for (HRegionInfo region : regions) {
String encodedName = region.getEncodedName();
if (!isDisabledorDisablingRegionInRIT(region)) {
RegionState state = forceRegionStateToOffline(region, false);
boolean onDeadServer = false;
if (state != null) {
if (regionStates.wasRegionOnDeadServer(encodedName)) {
LOG.info("Skip assigning " + region.getRegionNameAsString()
+ ", it's host " + regionStates.getLastRegionServerOfRegion(encodedName)
+ " is dead but not processed yet");
onDeadServer = true;
} else if (!useZKForAssignment
|| asyncSetOfflineInZooKeeper(state, cb, destination)) {
RegionPlan plan = new RegionPlan(region, state.getServerName(), destination);
plans.put(encodedName, plan);
states.add(state);
continue;
}
}
// Reassign if the region wasn't on a dead server
if (!onDeadServer) {
LOG.info("failed to force region state to offline or "
+ "failed to set it offline in ZK, will reassign later: " + region);
failedToOpenRegions.add(region); // assign individually later
}
}
// Release the lock, this region is excluded from bulk assign because
// we can't update its state, or set its znode to offline.
Lock lock = locks.remove(encodedName);
lock.unlock();
}
if (useZKForAssignment) {
// Wait until all unassigned nodes have been put up and watchers set.
int total = states.size();
for (int oldCounter = 0; !server.isStopped();) {
int count = counter.get();
if (oldCounter != count) {
LOG.info(destination.toString() + " unassigned znodes=" + count + " of total="
+ total);
oldCounter = count;
}
if (count >= total) break;
Threads.sleep(5);
}
}
if (server.isStopped()) {
return false;
}
// Add region plans, so we can updateTimers when one region is opened so
// that unnecessary timeout on RIT is reduced.
this.addPlans(plans);
List<Triple<HRegionInfo, Integer, List<ServerName>>> regionOpenInfos =
new ArrayList<Triple<HRegionInfo, Integer, List<ServerName>>>(states.size());
for (RegionState state: states) {
HRegionInfo region = state.getRegion();
String encodedRegionName = region.getEncodedName();
Integer nodeVersion = offlineNodesVersions.get(encodedRegionName);
if (useZKForAssignment && (nodeVersion == null || nodeVersion == -1)) {
LOG.warn("failed to offline in zookeeper: " + region);
failedToOpenRegions.add(region); // assign individually later
Lock lock = locks.remove(encodedRegionName);
lock.unlock();
} else {
regionStates.updateRegionState(
region, State.PENDING_OPEN, destination);
List<ServerName> favoredNodes = ServerName.EMPTY_SERVER_LIST;
if (this.shouldAssignRegionsWithFavoredNodes) {
favoredNodes = ((FavoredNodeLoadBalancer)this.balancer).getFavoredNodes(region);
}
regionOpenInfos.add(new Triple<HRegionInfo, Integer, List<ServerName>>(
region, nodeVersion, favoredNodes));
}
}
// Move on to open regions.
try {
// Send OPEN RPC. If it fails on a IOE or RemoteException,
// regions will be assigned individually.
long maxWaitTime = System.currentTimeMillis() +
this.server.getConfiguration().
getLong("hbase.regionserver.rpc.startup.waittime", 60000);
for (int i = 1; i <= maximumAttempts && !server.isStopped(); i++) {
try {
// regionOpenInfos is empty if all regions are in failedToOpenRegions list
if (regionOpenInfos.isEmpty()) {
break;
}
List<RegionOpeningState> regionOpeningStateList = serverManager
.sendRegionOpen(destination, regionOpenInfos);
if (regionOpeningStateList == null) {
// Failed getting RPC connection to this server
return false;
}
for (int k = 0, n = regionOpeningStateList.size(); k < n; k++) {
RegionOpeningState openingState = regionOpeningStateList.get(k);
if (openingState != RegionOpeningState.OPENED) {
HRegionInfo region = regionOpenInfos.get(k).getFirst();
if (openingState == RegionOpeningState.ALREADY_OPENED) {
processAlreadyOpenedRegion(region, destination);
} else if (openingState == RegionOpeningState.FAILED_OPENING) {
// Failed opening this region, reassign it later
failedToOpenRegions.add(region);
} else {
LOG.warn("THIS SHOULD NOT HAPPEN: unknown opening state "
+ openingState + " in assigning region " + region);
}
}
}
break;
} catch (IOException e) {
if (e instanceof RemoteException) {
e = ((RemoteException)e).unwrapRemoteException();
}
if (e instanceof RegionServerStoppedException) {
LOG.warn("The region server was shut down, ", e);
// No need to retry, the region server is a goner.
return false;
} else if (e instanceof ServerNotRunningYetException) {
long now = System.currentTimeMillis();
if (now < maxWaitTime) {
LOG.debug("Server is not yet up; waiting up to " +
(maxWaitTime - now) + "ms", e);
Thread.sleep(100);
i--; // reset the try count
continue;
}
} else if (e instanceof java.net.SocketTimeoutException
&& this.serverManager.isServerOnline(destination)) {
// In case socket is timed out and the region server is still online,
// the openRegion RPC could have been accepted by the server and
// just the response didn't go through. So we will retry to
// open the region on the same server.
if (LOG.isDebugEnabled()) {
LOG.debug("Bulk assigner openRegion() to " + destination
+ " has timed out, but the regions might"
+ " already be opened on it.", e);
}
// wait and reset the re-try count, server might be just busy.
Thread.sleep(100);
i--;
continue;
}
throw e;
}
}
} catch (IOException e) {
// Can be a socket timeout, EOF, NoRouteToHost, etc
LOG.info("Unable to communicate with " + destination
+ " in order to assign regions, ", e);
return false;
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
} finally {
for (Lock lock : locks.values()) {
lock.unlock();
}
}
if (!failedToOpenRegions.isEmpty()) {
for (HRegionInfo region : failedToOpenRegions) {
if (!regionStates.isRegionOnline(region)) {
invokeAssign(region);
}
}
}
LOG.debug("Bulk assigning done for " + destination);
return true;
} finally {
metricsAssignmentManager.updateBulkAssignTime(EnvironmentEdgeManager.currentTimeMillis() - startTime);
}
}
/**
* Send CLOSE RPC if the server is online, otherwise, offline the region.
*
* The RPC will be sent only to the region sever found in the region state
* if it is passed in, otherwise, to the src server specified. If region
* state is not specified, we don't update region state at all, instead
* we just send the RPC call. This is useful for some cleanup without
* messing around the region states (see handleRegion, on region opened
* on an unexpected server scenario, for an example)
*/
private void unassign(final HRegionInfo region,
final RegionState state, final int versionOfClosingNode,
final ServerName dest, final boolean transitionInZK,
final ServerName src) {
ServerName server = src;
if (state != null) {
server = state.getServerName();
}
long maxWaitTime = -1;
for (int i = 1; i <= this.maximumAttempts; i++) {
if (this.server.isStopped() || this.server.isAborted()) {
LOG.debug("Server stopped/aborted; skipping unassign of " + region);
return;
}
// ClosedRegionhandler can remove the server from this.regions
if (!serverManager.isServerOnline(server)) {
LOG.debug("Offline " + region.getRegionNameAsString()
+ ", no need to unassign since it's on a dead server: " + server);
if (transitionInZK) {
// delete the node. if no node exists need not bother.
deleteClosingOrClosedNode(region, server);
}
if (state != null) {
regionOffline(region);
}
return;
}
try {
// Send CLOSE RPC
if (serverManager.sendRegionClose(server, region,
versionOfClosingNode, dest, transitionInZK)) {
LOG.debug("Sent CLOSE to " + server + " for region " +
region.getRegionNameAsString());
if (useZKForAssignment && !transitionInZK && state != null) {
// Retry to make sure the region is
// closed so as to avoid double assignment.
unassign(region, state, versionOfClosingNode,
dest, transitionInZK, src);
}
return;
}
// This never happens. Currently regionserver close always return true.
// Todo; this can now happen (0.96) if there is an exception in a coprocessor
LOG.warn("Server " + server + " region CLOSE RPC returned false for " +
region.getRegionNameAsString());
} catch (Throwable t) {
if (t instanceof RemoteException) {
t = ((RemoteException)t).unwrapRemoteException();
}
boolean logRetries = true;
if (t instanceof NotServingRegionException
|| t instanceof RegionServerStoppedException
|| t instanceof ServerNotRunningYetException) {
LOG.debug("Offline " + region.getRegionNameAsString()
+ ", it's not any more on " + server, t);
if (transitionInZK) {
deleteClosingOrClosedNode(region, server);
}
if (state != null) {
regionOffline(region);
}
return;
} else if ((t instanceof FailedServerException) || (state != null &&
t instanceof RegionAlreadyInTransitionException)) {
long sleepTime = 0;
Configuration conf = this.server.getConfiguration();
if(t instanceof FailedServerException) {
sleepTime = 1 + conf.getInt(RpcClient.FAILED_SERVER_EXPIRY_KEY,
RpcClient.FAILED_SERVER_EXPIRY_DEFAULT);
} else {
// RS is already processing this region, only need to update the timestamp
LOG.debug("update " + state + " the timestamp.");
state.updateTimestampToNow();
if (maxWaitTime < 0) {
maxWaitTime =
EnvironmentEdgeManager.currentTimeMillis()
+ conf.getLong(ALREADY_IN_TRANSITION_WAITTIME,
DEFAULT_ALREADY_IN_TRANSITION_WAITTIME);
}
long now = EnvironmentEdgeManager.currentTimeMillis();
if (now < maxWaitTime) {
LOG.debug("Region is already in transition; "
+ "waiting up to " + (maxWaitTime - now) + "ms", t);
sleepTime = 100;
i--; // reset the try count
logRetries = false;
}
}
try {
if (sleepTime > 0) {
Thread.sleep(sleepTime);
}
} catch (InterruptedException ie) {
LOG.warn("Failed to unassign "
+ region.getRegionNameAsString() + " since interrupted", ie);
Thread.currentThread().interrupt();
if (!tomActivated && state != null) {
regionStates.updateRegionState(region, State.FAILED_CLOSE);
}
return;
}
}
if (logRetries) {
LOG.info("Server " + server + " returned " + t + " for "
+ region.getRegionNameAsString() + ", try=" + i
+ " of " + this.maximumAttempts, t);
// Presume retry or server will expire.
}
}
}
// Run out of attempts
if (!tomActivated && state != null) {
regionStates.updateRegionState(region, State.FAILED_CLOSE);
}
}
/**
* Set region to OFFLINE unless it is opening and forceNewPlan is false.
*/
private RegionState forceRegionStateToOffline(
final HRegionInfo region, final boolean forceNewPlan) {
RegionState state = regionStates.getRegionState(region);
if (state == null) {
LOG.warn("Assigning a region not in region states: " + region);
state = regionStates.createRegionState(region);
}
ServerName sn = state.getServerName();
if (forceNewPlan && LOG.isDebugEnabled()) {
LOG.debug("Force region state offline " + state);
}
switch (state.getState()) {
case OPEN:
case OPENING:
case PENDING_OPEN:
case CLOSING:
case PENDING_CLOSE:
if (!forceNewPlan) {
LOG.debug("Skip assigning " +
region + ", it is already " + state);
return null;
}
case FAILED_CLOSE:
case FAILED_OPEN:
unassign(region, state, -1, null, false, null);
state = regionStates.getRegionState(region);
if (state.isFailedClose()) {
// If we can't close the region, we can't re-assign
// it so as to avoid possible double assignment/data loss.
LOG.info("Skip assigning " +
region + ", we couldn't close it: " + state);
return null;
}
case OFFLINE:
// This region could have been open on this server
// for a while. If the server is dead and not processed
// yet, we can move on only if the meta shows the
// region is not on this server actually, or on a server
// not dead, or dead and processed already.
// In case not using ZK, we don't need this check because
// we have the latest info in memory, and the caller
// will do another round checking any way.
if (useZKForAssignment
&& regionStates.isServerDeadAndNotProcessed(sn)
&& wasRegionOnDeadServerByMeta(region, sn)) {
if (!regionStates.isRegionInTransition(region)) {
LOG.info("Updating the state to " + State.OFFLINE + " to allow to be reassigned by SSH");
regionStates.updateRegionState(region, State.OFFLINE);
}
LOG.info("Skip assigning " + region.getRegionNameAsString()
+ ", it is on a dead but not processed yet server: " + sn);
return null;
}
case CLOSED:
break;
default:
LOG.error("Trying to assign region " + region
+ ", which is " + state);
return null;
}
return state;
}
private boolean wasRegionOnDeadServerByMeta(
final HRegionInfo region, final ServerName sn) {
try {
if (region.isMetaRegion()) {
ServerName server = catalogTracker.getMetaLocation();
return regionStates.isServerDeadAndNotProcessed(server);
}
while (!server.isStopped()) {
try {
catalogTracker.waitForMeta();
Result r = MetaReader.getRegionResult(catalogTracker, region.getRegionName());
if (r == null || r.isEmpty()) return false;
ServerName server = HRegionInfo.getServerName(r);
return regionStates.isServerDeadAndNotProcessed(server);
} catch (IOException ioe) {
LOG.info("Received exception accessing hbase:meta during force assign "
+ region.getRegionNameAsString() + ", retrying", ioe);
}
}
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
LOG.info("Interrupted accessing hbase:meta", e);
}
// Call is interrupted or server is stopped.
return regionStates.isServerDeadAndNotProcessed(sn);
}
/**
* Caller must hold lock on the passed <code>state</code> object.
* @param state
* @param setOfflineInZK
* @param forceNewPlan
*/
private void assign(RegionState state,
final boolean setOfflineInZK, final boolean forceNewPlan) {
long startTime = EnvironmentEdgeManager.currentTimeMillis();
try {
Configuration conf = server.getConfiguration();
RegionState currentState = state;
int versionOfOfflineNode = -1;
RegionPlan plan = null;
long maxWaitTime = -1;
HRegionInfo region = state.getRegion();
RegionOpeningState regionOpenState;
Throwable previousException = null;
for (int i = 1; i <= maximumAttempts; i++) {
if (server.isStopped() || server.isAborted()) {
LOG.info("Skip assigning " + region.getRegionNameAsString()
+ ", the server is stopped/aborted");
return;
}
if (plan == null) { // Get a server for the region at first
try {
plan = getRegionPlan(region, forceNewPlan);
} catch (HBaseIOException e) {
LOG.warn("Failed to get region plan", e);
}
}
if (plan == null) {
LOG.warn("Unable to determine a plan to assign " + region);
if (tomActivated){
this.timeoutMonitor.setAllRegionServersOffline(true);
} else {
if (region.isMetaRegion()) {
try {
Thread.sleep(this.sleepTimeBeforeRetryingMetaAssignment);
if (i == maximumAttempts) i = 1;
continue;
} catch (InterruptedException e) {
LOG.error("Got exception while waiting for hbase:meta assignment");
Thread.currentThread().interrupt();
}
}
regionStates.updateRegionState(region, State.FAILED_OPEN);
}
return;
}
if (setOfflineInZK && versionOfOfflineNode == -1) {
// get the version of the znode after setting it to OFFLINE.
// versionOfOfflineNode will be -1 if the znode was not set to OFFLINE
versionOfOfflineNode = setOfflineInZooKeeper(currentState, plan.getDestination());
if (versionOfOfflineNode != -1) {
if (isDisabledorDisablingRegionInRIT(region)) {
return;
}
// In case of assignment from EnableTableHandler table state is ENABLING. Any how
// EnableTableHandler will set ENABLED after assigning all the table regions. If we
// try to set to ENABLED directly then client API may think table is enabled.
// When we have a case such as all the regions are added directly into hbase:meta and we call
// assignRegion then we need to make the table ENABLED. Hence in such case the table
// will not be in ENABLING or ENABLED state.
TableName tableName = region.getTable();
if (!zkTable.isEnablingTable(tableName) && !zkTable.isEnabledTable(tableName)) {
LOG.debug("Setting table " + tableName + " to ENABLED state.");
setEnabledTable(tableName);
}
}
}
if (setOfflineInZK && versionOfOfflineNode == -1) {
LOG.info("Unable to set offline in ZooKeeper to assign " + region);
// Setting offline in ZK must have been failed due to ZK racing or some
// exception which may make the server to abort. If it is ZK racing,
// we should retry since we already reset the region state,
// existing (re)assignment will fail anyway.
if (!server.isAborted()) {
continue;
}
}
LOG.info("Assigning " + region.getRegionNameAsString() +
" to " + plan.getDestination().toString());
// Transition RegionState to PENDING_OPEN
currentState = regionStates.updateRegionState(region,
State.PENDING_OPEN, plan.getDestination());
boolean needNewPlan;
final String assignMsg = "Failed assignment of " + region.getRegionNameAsString() +
" to " + plan.getDestination();
try {
List<ServerName> favoredNodes = ServerName.EMPTY_SERVER_LIST;
if (this.shouldAssignRegionsWithFavoredNodes) {
favoredNodes = ((FavoredNodeLoadBalancer)this.balancer).getFavoredNodes(region);
}
regionOpenState = serverManager.sendRegionOpen(
plan.getDestination(), region, versionOfOfflineNode, favoredNodes);
if (regionOpenState == RegionOpeningState.FAILED_OPENING) {
// Failed opening this region, looping again on a new server.
needNewPlan = true;
LOG.warn(assignMsg + ", regionserver says 'FAILED_OPENING', " +
" trying to assign elsewhere instead; " +
"try=" + i + " of " + this.maximumAttempts);
} else {
// we're done
if (regionOpenState == RegionOpeningState.ALREADY_OPENED) {
processAlreadyOpenedRegion(region, plan.getDestination());
}
return;
}
} catch (Throwable t) {
if (t instanceof RemoteException) {
t = ((RemoteException) t).unwrapRemoteException();
}
previousException = t;
// Should we wait a little before retrying? If the server is starting it's yes.
// If the region is already in transition, it's yes as well: we want to be sure that
// the region will get opened but we don't want a double assignment.
boolean hold = (t instanceof RegionAlreadyInTransitionException ||
t instanceof ServerNotRunningYetException);
// In case socket is timed out and the region server is still online,
// the openRegion RPC could have been accepted by the server and
// just the response didn't go through. So we will retry to
// open the region on the same server to avoid possible
// double assignment.
boolean retry = !hold && (t instanceof java.net.SocketTimeoutException
&& this.serverManager.isServerOnline(plan.getDestination()));
if (hold) {
LOG.warn(assignMsg + ", waiting a little before trying on the same region server " +
"try=" + i + " of " + this.maximumAttempts, t);
if (maxWaitTime < 0) {
if (t instanceof RegionAlreadyInTransitionException) {
maxWaitTime = EnvironmentEdgeManager.currentTimeMillis()
+ this.server.getConfiguration().getLong(ALREADY_IN_TRANSITION_WAITTIME,
DEFAULT_ALREADY_IN_TRANSITION_WAITTIME);
} else {
maxWaitTime = EnvironmentEdgeManager.currentTimeMillis()
+ this.server.getConfiguration().getLong(
"hbase.regionserver.rpc.startup.waittime", 60000);
}
}
try {
needNewPlan = false;
long now = EnvironmentEdgeManager.currentTimeMillis();
if (now < maxWaitTime) {
LOG.debug("Server is not yet up or region is already in transition; "
+ "waiting up to " + (maxWaitTime - now) + "ms", t);
Thread.sleep(100);
i--; // reset the try count
} else if (!(t instanceof RegionAlreadyInTransitionException)) {
LOG.debug("Server is not up for a while; try a new one", t);
needNewPlan = true;
}
} catch (InterruptedException ie) {
LOG.warn("Failed to assign "
+ region.getRegionNameAsString() + " since interrupted", ie);
Thread.currentThread().interrupt();
if (!tomActivated) {
regionStates.updateRegionState(region, State.FAILED_OPEN);
}
return;
}
} else if (retry) {
needNewPlan = false;
i--; // we want to retry as many times as needed as long as the RS is not dead.
LOG.warn(assignMsg + ", trying to assign to the same region server due ", t);
} else {
needNewPlan = true;
LOG.warn(assignMsg + ", trying to assign elsewhere instead;" +
" try=" + i + " of " + this.maximumAttempts, t);
}
}
if (i == this.maximumAttempts) {
// Don't reset the region state or get a new plan any more.
// This is the last try.
continue;
}
// If region opened on destination of present plan, reassigning to new
// RS may cause double assignments. In case of RegionAlreadyInTransitionException
// reassigning to same RS.
if (needNewPlan) {
// Force a new plan and reassign. Will return null if no servers.
// The new plan could be the same as the existing plan since we don't
// exclude the server of the original plan, which should not be
// excluded since it could be the only server up now.
RegionPlan newPlan = null;
try {
newPlan = getRegionPlan(region, true);
} catch (HBaseIOException e) {
LOG.warn("Failed to get region plan", e);
}
if (newPlan == null) {
if (tomActivated) {
this.timeoutMonitor.setAllRegionServersOffline(true);
} else {
regionStates.updateRegionState(region, State.FAILED_OPEN);
}
LOG.warn("Unable to find a viable location to assign region " +
region.getRegionNameAsString());
return;
}
if (plan != newPlan && !plan.getDestination().equals(newPlan.getDestination())) {
// Clean out plan we failed execute and one that doesn't look like it'll
// succeed anyways; we need a new plan!
// Transition back to OFFLINE
currentState = regionStates.updateRegionState(region, State.OFFLINE);
versionOfOfflineNode = -1;
plan = newPlan;
} else if(plan.getDestination().equals(newPlan.getDestination()) &&
previousException instanceof FailedServerException) {
try {
LOG.info("Trying to re-assign " + region.getRegionNameAsString() +
" to the same failed server.");
Thread.sleep(1 + conf.getInt(RpcClient.FAILED_SERVER_EXPIRY_KEY,
RpcClient.FAILED_SERVER_EXPIRY_DEFAULT));
} catch (InterruptedException ie) {
LOG.warn("Failed to assign "
+ region.getRegionNameAsString() + " since interrupted", ie);
Thread.currentThread().interrupt();
if (!tomActivated) {
regionStates.updateRegionState(region, State.FAILED_OPEN);
}
return;
}
}
}
}
// Run out of attempts
if (!tomActivated) {
regionStates.updateRegionState(region, State.FAILED_OPEN);
}
} finally {
metricsAssignmentManager.updateAssignmentTime(EnvironmentEdgeManager.currentTimeMillis() - startTime);
}
}
private void processAlreadyOpenedRegion(HRegionInfo region, ServerName sn) {
// Remove region from in-memory transition and unassigned node from ZK
// While trying to enable the table the regions of the table were
// already enabled.
LOG.debug("ALREADY_OPENED " + region.getRegionNameAsString()
+ " to " + sn);
String encodedName = region.getEncodedName();
deleteNodeInStates(encodedName, "offline", sn, EventType.M_ZK_REGION_OFFLINE);
regionStates.regionOnline(region, sn);
}
private boolean isDisabledorDisablingRegionInRIT(final HRegionInfo region) {
TableName tableName = region.getTable();
boolean disabled = this.zkTable.isDisabledTable(tableName);
if (disabled || this.zkTable.isDisablingTable(tableName)) {
LOG.info("Table " + tableName + (disabled ? " disabled;" : " disabling;") +
" skipping assign of " + region.getRegionNameAsString());
offlineDisabledRegion(region);
return true;
}
return false;
}
/**
* Set region as OFFLINED up in zookeeper
*
* @param state
* @return the version of the offline node if setting of the OFFLINE node was
* successful, -1 otherwise.
*/
private int setOfflineInZooKeeper(final RegionState state, final ServerName destination) {
if (!state.isClosed() && !state.isOffline()) {
String msg = "Unexpected state : " + state + " .. Cannot transit it to OFFLINE.";
this.server.abort(msg, new IllegalStateException(msg));
return -1;
}
regionStates.updateRegionState(state.getRegion(), State.OFFLINE);
int versionOfOfflineNode;
try {
// get the version after setting the znode to OFFLINE
versionOfOfflineNode = ZKAssign.createOrForceNodeOffline(watcher,
state.getRegion(), destination);
if (versionOfOfflineNode == -1) {
LOG.warn("Attempted to create/force node into OFFLINE state before "
+ "completing assignment but failed to do so for " + state);
return -1;
}
} catch (KeeperException e) {
server.abort("Unexpected ZK exception creating/setting node OFFLINE", e);
return -1;
}
return versionOfOfflineNode;
}
/**
* @param region the region to assign
* @return Plan for passed <code>region</code> (If none currently, it creates one or
* if no servers to assign, it returns null).
*/
private RegionPlan getRegionPlan(final HRegionInfo region,
final boolean forceNewPlan) throws HBaseIOException {
return getRegionPlan(region, null, forceNewPlan);
}
/**
* @param region the region to assign
* @param serverToExclude Server to exclude (we know its bad). Pass null if
* all servers are thought to be assignable.
* @param forceNewPlan If true, then if an existing plan exists, a new plan
* will be generated.
* @return Plan for passed <code>region</code> (If none currently, it creates one or
* if no servers to assign, it returns null).
*/
private RegionPlan getRegionPlan(final HRegionInfo region,
final ServerName serverToExclude, final boolean forceNewPlan) throws HBaseIOException {
// Pickup existing plan or make a new one
final String encodedName = region.getEncodedName();
final List<ServerName> destServers =
serverManager.createDestinationServersList(serverToExclude);
if (destServers.isEmpty()){
LOG.warn("Can't move " + encodedName +
", there is no destination server available.");
return null;
}
RegionPlan randomPlan = null;
boolean newPlan = false;
RegionPlan existingPlan;
synchronized (this.regionPlans) {
existingPlan = this.regionPlans.get(encodedName);
if (existingPlan != null && existingPlan.getDestination() != null) {
LOG.debug("Found an existing plan for " + region.getRegionNameAsString()
+ " destination server is " + existingPlan.getDestination() +
" accepted as a dest server = " + destServers.contains(existingPlan.getDestination()));
}
if (forceNewPlan
|| existingPlan == null
|| existingPlan.getDestination() == null
|| !destServers.contains(existingPlan.getDestination())) {
newPlan = true;
randomPlan = new RegionPlan(region, null,
balancer.randomAssignment(region, destServers));
if (!region.isMetaTable() && shouldAssignRegionsWithFavoredNodes) {
List<HRegionInfo> regions = new ArrayList<HRegionInfo>(1);
regions.add(region);
try {
processFavoredNodes(regions);
} catch (IOException ie) {
LOG.warn("Ignoring exception in processFavoredNodes " + ie);
}
}
this.regionPlans.put(encodedName, randomPlan);
}
}
if (newPlan) {
if (randomPlan.getDestination() == null) {
LOG.warn("Can't find a destination for " + encodedName);
return null;
}
LOG.debug("No previous transition plan found (or ignoring " +
"an existing plan) for " + region.getRegionNameAsString() +
"; generated random plan=" + randomPlan + "; " +
serverManager.countOfRegionServers() +
" (online=" + serverManager.getOnlineServers().size() +
", available=" + destServers.size() + ") available servers" +
", forceNewPlan=" + forceNewPlan);
return randomPlan;
}
LOG.debug("Using pre-existing plan for " +
region.getRegionNameAsString() + "; plan=" + existingPlan);
return existingPlan;
}
/**
* Unassigns the specified region.
* <p>
* Updates the RegionState and sends the CLOSE RPC unless region is being
* split by regionserver; then the unassign fails (silently) because we
* presume the region being unassigned no longer exists (its been split out
* of existence). TODO: What to do if split fails and is rolled back and
* parent is revivified?
* <p>
* If a RegionPlan is already set, it will remain.
*
* @param region server to be unassigned
*/
public void unassign(HRegionInfo region) {
unassign(region, false);
}
/**
* Unassigns the specified region.
* <p>
* Updates the RegionState and sends the CLOSE RPC unless region is being
* split by regionserver; then the unassign fails (silently) because we
* presume the region being unassigned no longer exists (its been split out
* of existence). TODO: What to do if split fails and is rolled back and
* parent is revivified?
* <p>
* If a RegionPlan is already set, it will remain.
*
* @param region server to be unassigned
* @param force if region should be closed even if already closing
*/
public void unassign(HRegionInfo region, boolean force, ServerName dest) {
// TODO: Method needs refactoring. Ugly buried returns throughout. Beware!
LOG.debug("Starting unassign of " + region.getRegionNameAsString()
+ " (offlining), current state: " + regionStates.getRegionState(region));
String encodedName = region.getEncodedName();
// Grab the state of this region and synchronize on it
int versionOfClosingNode = -1;
// We need a lock here as we're going to do a put later and we don't want multiple states
// creation
ReentrantLock lock = locker.acquireLock(encodedName);
RegionState state = regionStates.getRegionTransitionState(encodedName);
boolean reassign = true;
try {
if (state == null) {
// Region is not in transition.
// We can unassign it only if it's not SPLIT/MERGED.
state = regionStates.getRegionState(encodedName);
if (state != null && state.isUnassignable()) {
LOG.info("Attempting to unassign " + state + ", ignored");
// Offline region will be reassigned below
return;
}
// Create the znode in CLOSING state
try {
if (state == null || state.getServerName() == null) {
// We don't know where the region is, offline it.
// No need to send CLOSE RPC
LOG.warn("Attempting to unassign a region not in RegionStates"
+ region.getRegionNameAsString() + ", offlined");
regionOffline(region);
return;
}
if (useZKForAssignment) {
versionOfClosingNode = ZKAssign.createNodeClosing(
watcher, region, state.getServerName());
if (versionOfClosingNode == -1) {
LOG.info("Attempting to unassign " +
region.getRegionNameAsString() + " but ZK closing node "
+ "can't be created.");
reassign = false; // not unassigned at all
return;
}
}
} catch (KeeperException e) {
if (e instanceof NodeExistsException) {
// Handle race between master initiated close and regionserver
// orchestrated splitting. See if existing node is in a
// SPLITTING or SPLIT state. If so, the regionserver started
// an op on node before we could get our CLOSING in. Deal.
NodeExistsException nee = (NodeExistsException)e;
String path = nee.getPath();
try {
if (isSplitOrSplittingOrMergedOrMerging(path)) {
LOG.debug(path + " is SPLIT or SPLITTING or MERGED or MERGING; " +
"skipping unassign because region no longer exists -- its split or merge");
reassign = false; // no need to reassign for split/merged region
return;
}
} catch (KeeperException.NoNodeException ke) {
LOG.warn("Failed getData on SPLITTING/SPLIT at " + path +
"; presuming split and that the region to unassign, " +
encodedName + ", no longer exists -- confirm", ke);
return;
} catch (KeeperException ke) {
LOG.error("Unexpected zk state", ke);
} catch (DeserializationException de) {
LOG.error("Failed parse", de);
}
}
// If we get here, don't understand whats going on -- abort.
server.abort("Unexpected ZK exception creating node CLOSING", e);
reassign = false; // heading out already
return;
}
state = regionStates.updateRegionState(region, State.PENDING_CLOSE);
} else if (state.isFailedOpen()) {
// The region is not open yet
regionOffline(region);
return;
} else if (force && state.isPendingCloseOrClosing()) {
LOG.debug("Attempting to unassign " + region.getRegionNameAsString() +
" which is already " + state.getState() +
" but forcing to send a CLOSE RPC again ");
if (state.isFailedClose()) {
state = regionStates.updateRegionState(region, State.PENDING_CLOSE);
}
state.updateTimestampToNow();
} else {
LOG.debug("Attempting to unassign " +
region.getRegionNameAsString() + " but it is " +
"already in transition (" + state.getState() + ", force=" + force + ")");
return;
}
unassign(region, state, versionOfClosingNode, dest, useZKForAssignment, null);
} finally {
lock.unlock();
// Region is expected to be reassigned afterwards
if (reassign && regionStates.isRegionOffline(region)) {
assign(region, true);
}
}
}
public void unassign(HRegionInfo region, boolean force){
unassign(region, force, null);
}
/**
* @param region regioninfo of znode to be deleted.
*/
public void deleteClosingOrClosedNode(HRegionInfo region, ServerName sn) {
String encodedName = region.getEncodedName();
deleteNodeInStates(encodedName, "closing", sn, EventType.M_ZK_REGION_CLOSING,
EventType.RS_ZK_REGION_CLOSED);
}
/**
* @param path
* @return True if znode is in SPLIT or SPLITTING or MERGED or MERGING state.
* @throws KeeperException Can happen if the znode went away in meantime.
* @throws DeserializationException
*/
private boolean isSplitOrSplittingOrMergedOrMerging(final String path)
throws KeeperException, DeserializationException {
boolean result = false;
// This may fail if the SPLIT or SPLITTING or MERGED or MERGING znode gets
// cleaned up before we can get data from it.
byte [] data = ZKAssign.getData(watcher, path);
if (data == null) {
LOG.info("Node " + path + " is gone");
return false;
}
RegionTransition rt = RegionTransition.parseFrom(data);
switch (rt.getEventType()) {
case RS_ZK_REQUEST_REGION_SPLIT:
case RS_ZK_REGION_SPLIT:
case RS_ZK_REGION_SPLITTING:
case RS_ZK_REQUEST_REGION_MERGE:
case RS_ZK_REGION_MERGED:
case RS_ZK_REGION_MERGING:
result = true;
break;
default:
LOG.info("Node " + path + " is in " + rt.getEventType());
break;
}
return result;
}
/**
* Used by unit tests. Return the number of regions opened so far in the life
* of the master. Increases by one every time the master opens a region
* @return the counter value of the number of regions opened so far
*/
public int getNumRegionsOpened() {
return numRegionsOpened.get();
}
/**
* Waits until the specified region has completed assignment.
* <p>
* If the region is already assigned, returns immediately. Otherwise, method
* blocks until the region is assigned.
* @param regionInfo region to wait on assignment for
* @throws InterruptedException
*/
public boolean waitForAssignment(HRegionInfo regionInfo)
throws InterruptedException {
while (!regionStates.isRegionOnline(regionInfo)) {
if (regionStates.isRegionInState(regionInfo, State.FAILED_OPEN)
|| this.server.isStopped()) {
return false;
}
// We should receive a notification, but it's
// better to have a timeout to recheck the condition here:
// it lowers the impact of a race condition if any
regionStates.waitForUpdate(100);
}
return true;
}
/**
* Assigns the hbase:meta region.
* <p>
* Assumes that hbase:meta is currently closed and is not being actively served by
* any RegionServer.
* <p>
* Forcibly unsets the current meta region location in ZooKeeper and assigns
* hbase:meta to a random RegionServer.
* @throws KeeperException
*/
public void assignMeta() throws KeeperException {
MetaRegionTracker.deleteMetaLocation(this.watcher);
assign(HRegionInfo.FIRST_META_REGIONINFO, true);
}
/**
* Assigns specified regions retaining assignments, if any.
* <p>
* This is a synchronous call and will return once every region has been
* assigned. If anything fails, an exception is thrown
* @throws InterruptedException
* @throws IOException
*/
public void assign(Map<HRegionInfo, ServerName> regions)
throws IOException, InterruptedException {
if (regions == null || regions.isEmpty()) {
return;
}
List<ServerName> servers = serverManager.createDestinationServersList();
if (servers == null || servers.isEmpty()) {
throw new IOException("Found no destination server to assign region(s)");
}
// Reuse existing assignment info
Map<ServerName, List<HRegionInfo>> bulkPlan =
balancer.retainAssignment(regions, servers);
assign(regions.size(), servers.size(),
"retainAssignment=true", bulkPlan);
}
/**
* Assigns specified regions round robin, if any.
* <p>
* This is a synchronous call and will return once every region has been
* assigned. If anything fails, an exception is thrown
* @throws InterruptedException
* @throws IOException
*/
public void assign(List<HRegionInfo> regions)
throws IOException, InterruptedException {
if (regions == null || regions.isEmpty()) {
return;
}
List<ServerName> servers = serverManager.createDestinationServersList();
if (servers == null || servers.isEmpty()) {
throw new IOException("Found no destination server to assign region(s)");
}
// Generate a round-robin bulk assignment plan
Map<ServerName, List<HRegionInfo>> bulkPlan
= balancer.roundRobinAssignment(regions, servers);
processFavoredNodes(regions);
assign(regions.size(), servers.size(),
"round-robin=true", bulkPlan);
}
private void assign(int regions, int totalServers,
String message, Map<ServerName, List<HRegionInfo>> bulkPlan)
throws InterruptedException, IOException {
int servers = bulkPlan.size();
if (servers == 1 || (regions < bulkAssignThresholdRegions
&& servers < bulkAssignThresholdServers)) {
// Not use bulk assignment. This could be more efficient in small
// cluster, especially mini cluster for testing, so that tests won't time out
if (LOG.isTraceEnabled()) {
LOG.trace("Not using bulk assignment since we are assigning only " + regions +
" region(s) to " + servers + " server(s)");
}
for (Map.Entry<ServerName, List<HRegionInfo>> plan: bulkPlan.entrySet()) {
if (!assign(plan.getKey(), plan.getValue())) {
for (HRegionInfo region: plan.getValue()) {
if (!regionStates.isRegionOnline(region)) {
invokeAssign(region);
}
}
}
}
} else {
LOG.info("Bulk assigning " + regions + " region(s) across "
+ totalServers + " server(s), " + message);
// Use fixed count thread pool assigning.
BulkAssigner ba = new GeneralBulkAssigner(
this.server, bulkPlan, this, bulkAssignWaitTillAllAssigned);
ba.bulkAssign();
LOG.info("Bulk assigning done");
}
}
/**
* Assigns all user regions, if any exist. Used during cluster startup.
* <p>
* This is a synchronous call and will return once every region has been
* assigned. If anything fails, an exception is thrown and the cluster
* should be shutdown.
* @throws InterruptedException
* @throws IOException
* @throws KeeperException
*/
private void assignAllUserRegions(Set<TableName> disabledOrDisablingOrEnabling)
throws IOException, InterruptedException, KeeperException {
// Skip assignment for regions of tables in DISABLING state because during clean cluster startup
// no RS is alive and regions map also doesn't have any information about the regions.
// See HBASE-6281.
// Scan hbase:meta for all user regions, skipping any disabled tables
Map<HRegionInfo, ServerName> allRegions;
SnapshotOfRegionAssignmentFromMeta snapshotOfRegionAssignment =
new SnapshotOfRegionAssignmentFromMeta(catalogTracker, disabledOrDisablingOrEnabling, true);
snapshotOfRegionAssignment.initialize();
allRegions = snapshotOfRegionAssignment.getRegionToRegionServerMap();
if (allRegions == null || allRegions.isEmpty()) {
return;
}
// Determine what type of assignment to do on startup
boolean retainAssignment = server.getConfiguration().
getBoolean("hbase.master.startup.retainassign", true);
if (retainAssignment) {
assign(allRegions);
} else {
List<HRegionInfo> regions = new ArrayList<HRegionInfo>(allRegions.keySet());
assign(regions);
}
for (HRegionInfo hri : allRegions.keySet()) {
TableName tableName = hri.getTable();
if (!zkTable.isEnabledTable(tableName)) {
setEnabledTable(tableName);
}
}
}
/**
* Wait until no regions in transition.
* @param timeout How long to wait.
* @return True if nothing in regions in transition.
* @throws InterruptedException
*/
boolean waitUntilNoRegionsInTransition(final long timeout)
throws InterruptedException {
// Blocks until there are no regions in transition. It is possible that
// there
// are regions in transition immediately after this returns but guarantees
// that if it returns without an exception that there was a period of time
// with no regions in transition from the point-of-view of the in-memory
// state of the Master.
final long endTime = System.currentTimeMillis() + timeout;
while (!this.server.isStopped() && regionStates.isRegionsInTransition()
&& endTime > System.currentTimeMillis()) {
regionStates.waitForUpdate(100);
}
return !regionStates.isRegionsInTransition();
}
/**
* Rebuild the list of user regions and assignment information.
* <p>
* Returns a map of servers that are not found to be online and the regions
* they were hosting.
* @return map of servers not online to their assigned regions, as stored
* in META
* @throws IOException
*/
Map<ServerName, List<HRegionInfo>> rebuildUserRegions() throws IOException, KeeperException {
Set<TableName> enablingTables = ZKTable.getEnablingTables(watcher);
Set<TableName> disabledOrEnablingTables = ZKTable.getDisabledTables(watcher);
disabledOrEnablingTables.addAll(enablingTables);
Set<TableName> disabledOrDisablingOrEnabling = ZKTable.getDisablingTables(watcher);
disabledOrDisablingOrEnabling.addAll(disabledOrEnablingTables);
// Region assignment from META
List<Result> results = MetaReader.fullScan(this.catalogTracker);
// Get any new but slow to checkin region server that joined the cluster
Set<ServerName> onlineServers = serverManager.getOnlineServers().keySet();
// Map of offline servers and their regions to be returned
Map<ServerName, List<HRegionInfo>> offlineServers =
new TreeMap<ServerName, List<HRegionInfo>>();
// Iterate regions in META
for (Result result : results) {
HRegionInfo regionInfo = HRegionInfo.getHRegionInfo(result);
if (regionInfo == null) continue;
State state = RegionStateStore.getRegionState(result);
ServerName regionLocation = RegionStateStore.getRegionServer(result);
regionStates.createRegionState(regionInfo, state, regionLocation);
if (!regionStates.isRegionInState(regionInfo, State.OPEN)) {
// Region is not open (either offline or in transition), skip
continue;
}
TableName tableName = regionInfo.getTable();
if (!onlineServers.contains(regionLocation)) {
// Region is located on a server that isn't online
List<HRegionInfo> offlineRegions = offlineServers.get(regionLocation);
if (offlineRegions == null) {
offlineRegions = new ArrayList<HRegionInfo>(1);
offlineServers.put(regionLocation, offlineRegions);
}
if (useZKForAssignment) {
regionStates.regionOffline(regionInfo);
}
offlineRegions.add(regionInfo);
} else if (!disabledOrEnablingTables.contains(tableName)) {
// Region is being served and on an active server
// add only if region not in disabled or enabling table
regionStates.updateRegionState(regionInfo, State.OPEN, regionLocation);
regionStates.regionOnline(regionInfo, regionLocation);
balancer.regionOnline(regionInfo, regionLocation);
} else if (useZKForAssignment) {
regionStates.regionOffline(regionInfo);
}
// need to enable the table if not disabled or disabling or enabling
// this will be used in rolling restarts
if (!disabledOrDisablingOrEnabling.contains(tableName)
&& !getZKTable().isEnabledTable(tableName)) {
setEnabledTable(tableName);
}
}
return offlineServers;
}
/**
* Recover the tables that were not fully moved to DISABLED state. These
* tables are in DISABLING state when the master restarted/switched.
*
* @throws KeeperException
* @throws TableNotFoundException
* @throws IOException
*/
private void recoverTableInDisablingState()
throws KeeperException, TableNotFoundException, IOException {
Set<TableName> disablingTables = ZKTable.getDisablingTables(watcher);
if (disablingTables.size() != 0) {
for (TableName tableName : disablingTables) {
// Recover by calling DisableTableHandler
LOG.info("The table " + tableName
+ " is in DISABLING state. Hence recovering by moving the table"
+ " to DISABLED state.");
new DisableTableHandler(this.server, tableName, catalogTracker,
this, tableLockManager, true).prepare().process();
}
}
}
/**
* Recover the tables that are not fully moved to ENABLED state. These tables
* are in ENABLING state when the master restarted/switched
*
* @throws KeeperException
* @throws org.apache.hadoop.hbase.TableNotFoundException
* @throws IOException
*/
private void recoverTableInEnablingState()
throws KeeperException, TableNotFoundException, IOException {
Set<TableName> enablingTables = ZKTable.getEnablingTables(watcher);
if (enablingTables.size() != 0) {
for (TableName tableName : enablingTables) {
// Recover by calling EnableTableHandler
LOG.info("The table " + tableName
+ " is in ENABLING state. Hence recovering by moving the table"
+ " to ENABLED state.");
// enableTable in sync way during master startup,
// no need to invoke coprocessor
EnableTableHandler eth = new EnableTableHandler(this.server, tableName,
catalogTracker, this, tableLockManager, true);
try {
eth.prepare();
} catch (TableNotFoundException e) {
LOG.warn("Table " + tableName + " not found in hbase:meta to recover.");
continue;
}
eth.process();
}
}
}
/**
* Processes list of dead servers from result of hbase:meta scan and regions in RIT
* <p>
* This is used for failover to recover the lost regions that belonged to
* RegionServers which failed while there was no active master or regions
* that were in RIT.
* <p>
*
*
* @param deadServers
* The list of dead servers which failed while there was no active
* master. Can be null.
* @throws IOException
* @throws KeeperException
*/
private void processDeadServersAndRecoverLostRegions(
Map<ServerName, List<HRegionInfo>> deadServers)
throws IOException, KeeperException {
if (deadServers != null) {
for (Map.Entry<ServerName, List<HRegionInfo>> server: deadServers.entrySet()) {
ServerName serverName = server.getKey();
// We need to keep such info even if the server is known dead
regionStates.setLastRegionServerOfRegions(serverName, server.getValue());
if (!serverManager.isServerDead(serverName)) {
serverManager.expireServer(serverName); // Let SSH do region re-assign
}
}
}
List<String> nodes = useZKForAssignment ?
ZKUtil.listChildrenAndWatchForNewChildren(watcher, watcher.assignmentZNode)
: ZKUtil.listChildrenNoWatch(watcher, watcher.assignmentZNode);
if (nodes != null && !nodes.isEmpty()) {
for (String encodedRegionName : nodes) {
processRegionInTransition(encodedRegionName, null);
}
} else if (!useZKForAssignment) {
// We need to send RPC call again for PENDING_OPEN/PENDING_CLOSE regions
// in case the RPC call is not sent out yet before the master was shut down
// since we update the state before we send the RPC call. We can't update
// the state after the RPC call. Otherwise, we don't know what's happened
// to the region if the master dies right after the RPC call is out.
Map<String, RegionState> rits = regionStates.getRegionsInTransition();
for (RegionState regionState: rits.values()) {
if (!serverManager.isServerOnline(regionState.getServerName())) {
continue; // SSH will handle it
}
State state = regionState.getState();
LOG.info("Processing " + regionState);
switch (state) {
case CLOSED:
invokeAssign(regionState.getRegion());
break;
case PENDING_OPEN:
retrySendRegionOpen(regionState);
break;
case PENDING_CLOSE:
retrySendRegionClose(regionState);
break;
default:
// No process for other states
}
}
}
}
/**
* At master failover, for pending_open region, make sure
* sendRegionOpen RPC call is sent to the target regionserver
*/
private void retrySendRegionOpen(final RegionState regionState) {
this.executorService.submit(
new EventHandler(server, EventType.M_MASTER_RECOVERY) {
@Override
public void process() throws IOException {
HRegionInfo hri = regionState.getRegion();
ServerName serverName = regionState.getServerName();
ReentrantLock lock = locker.acquireLock(hri.getEncodedName());
try {
while (serverManager.isServerOnline(serverName)
&& !server.isStopped() && !server.isAborted()) {
try {
List<ServerName> favoredNodes = ServerName.EMPTY_SERVER_LIST;
if (shouldAssignRegionsWithFavoredNodes) {
favoredNodes = ((FavoredNodeLoadBalancer)balancer).getFavoredNodes(hri);
}
RegionOpeningState regionOpenState = serverManager.sendRegionOpen(
serverName, hri, -1, favoredNodes);
if (regionOpenState == RegionOpeningState.FAILED_OPENING) {
// Failed opening this region, this means the target server didn't get
// the original region open RPC, so re-assign it with a new plan
LOG.debug("Got failed_opening in retry sendRegionOpen for "
+ regionState + ", re-assign it");
invokeAssign(hri, true);
}
return; // Done.
} catch (Throwable t) {
if (t instanceof RemoteException) {
t = ((RemoteException) t).unwrapRemoteException();
}
// In case SocketTimeoutException/FailedServerException, we will retry
if (t instanceof java.net.SocketTimeoutException
|| t instanceof FailedServerException) {
Threads.sleep(100);
continue;
}
// For other exceptions, re-assign it
LOG.debug("Got exception in retry sendRegionOpen for "
+ regionState + ", re-assign it", t);
invokeAssign(hri);
return; // Done.
}
}
} finally {
lock.unlock();
}
}
});
}
/**
* At master failover, for pending_close region, make sure
* sendRegionClose RPC call is sent to the target regionserver
*/
private void retrySendRegionClose(final RegionState regionState) {
this.executorService.submit(
new EventHandler(server, EventType.M_MASTER_RECOVERY) {
@Override
public void process() throws IOException {
HRegionInfo hri = regionState.getRegion();
ServerName serverName = regionState.getServerName();
ReentrantLock lock = locker.acquireLock(hri.getEncodedName());
try {
while (serverManager.isServerOnline(serverName)
&& !server.isStopped() && !server.isAborted()) {
try {
if (!serverManager.sendRegionClose(serverName, hri, -1, null, false)) {
// This means the region is still on the target server
LOG.debug("Got false in retry sendRegionClose for "
+ regionState + ", re-close it");
invokeUnAssign(hri);
}
return; // Done.
} catch (Throwable t) {
if (t instanceof RemoteException) {
t = ((RemoteException) t).unwrapRemoteException();
}
// In case SocketTimeoutException/FailedServerException, we will retry<|fim▁hole|> Threads.sleep(100);
continue;
}
if (!(t instanceof NotServingRegionException
|| t instanceof RegionAlreadyInTransitionException)) {
// NotServingRegionException/RegionAlreadyInTransitionException
// means the target server got the original region close request.
// For other exceptions, re-close it
LOG.debug("Got exception in retry sendRegionClose for "
+ regionState + ", re-close it", t);
invokeUnAssign(hri);
}
return; // Done.
}
}
} finally {
lock.unlock();
}
}
});
}
/**
* Set Regions in transitions metrics.
* This takes an iterator on the RegionInTransition map (CLSM), and is not synchronized.
* This iterator is not fail fast, which may lead to stale read; but that's better than
* creating a copy of the map for metrics computation, as this method will be invoked
* on a frequent interval.
*/
public void updateRegionsInTransitionMetrics() {
long currentTime = System.currentTimeMillis();
int totalRITs = 0;
int totalRITsOverThreshold = 0;
long oldestRITTime = 0;
int ritThreshold = this.server.getConfiguration().
getInt(HConstants.METRICS_RIT_STUCK_WARNING_THRESHOLD, 60000);
for (RegionState state: regionStates.getRegionsInTransition().values()) {
totalRITs++;
long ritTime = currentTime - state.getStamp();
if (ritTime > ritThreshold) { // more than the threshold
totalRITsOverThreshold++;
}
if (oldestRITTime < ritTime) {
oldestRITTime = ritTime;
}
}
if (this.metricsAssignmentManager != null) {
this.metricsAssignmentManager.updateRITOldestAge(oldestRITTime);
this.metricsAssignmentManager.updateRITCount(totalRITs);
this.metricsAssignmentManager.updateRITCountOverThreshold(totalRITsOverThreshold);
}
}
/**
* @param region Region whose plan we are to clear.
*/
void clearRegionPlan(final HRegionInfo region) {
synchronized (this.regionPlans) {
this.regionPlans.remove(region.getEncodedName());
}
}
/**
* Wait on region to clear regions-in-transition.
* @param hri Region to wait on.
* @throws IOException
*/
public void waitOnRegionToClearRegionsInTransition(final HRegionInfo hri)
throws IOException, InterruptedException {
waitOnRegionToClearRegionsInTransition(hri, -1L);
}
/**
* Wait on region to clear regions-in-transition or time out
* @param hri
* @param timeOut Milliseconds to wait for current region to be out of transition state.
* @return True when a region clears regions-in-transition before timeout otherwise false
* @throws InterruptedException
*/
public boolean waitOnRegionToClearRegionsInTransition(final HRegionInfo hri, long timeOut)
throws InterruptedException {
if (!regionStates.isRegionInTransition(hri)) return true;
long end = (timeOut <= 0) ? Long.MAX_VALUE : EnvironmentEdgeManager.currentTimeMillis()
+ timeOut;
// There is already a timeout monitor on regions in transition so I
// should not have to have one here too?
LOG.info("Waiting for " + hri.getEncodedName() +
" to leave regions-in-transition, timeOut=" + timeOut + " ms.");
while (!this.server.isStopped() && regionStates.isRegionInTransition(hri)) {
regionStates.waitForUpdate(100);
if (EnvironmentEdgeManager.currentTimeMillis() > end) {
LOG.info("Timed out on waiting for " + hri.getEncodedName() + " to be assigned.");
return false;
}
}
if (this.server.isStopped()) {
LOG.info("Giving up wait on regions in transition because stoppable.isStopped is set");
return false;
}
return true;
}
/**
* Update timers for all regions in transition going against the server in the
* serversInUpdatingTimer.
*/
public class TimerUpdater extends Chore {
public TimerUpdater(final int period, final Stoppable stopper) {
super("AssignmentTimerUpdater", period, stopper);
}
@Override
protected void chore() {
Preconditions.checkState(tomActivated);
ServerName serverToUpdateTimer = null;
while (!serversInUpdatingTimer.isEmpty() && !stopper.isStopped()) {
if (serverToUpdateTimer == null) {
serverToUpdateTimer = serversInUpdatingTimer.first();
} else {
serverToUpdateTimer = serversInUpdatingTimer
.higher(serverToUpdateTimer);
}
if (serverToUpdateTimer == null) {
break;
}
updateTimers(serverToUpdateTimer);
serversInUpdatingTimer.remove(serverToUpdateTimer);
}
}
}
/**
* Monitor to check for time outs on region transition operations
*/
public class TimeoutMonitor extends Chore {
private boolean allRegionServersOffline = false;
private ServerManager serverManager;
private final int timeout;
/**
* Creates a periodic monitor to check for time outs on region transition
* operations. This will deal with retries if for some reason something
* doesn't happen within the specified timeout.
* @param period
* @param stopper When {@link Stoppable#isStopped()} is true, this thread will
* cleanup and exit cleanly.
* @param timeout
*/
public TimeoutMonitor(final int period, final Stoppable stopper,
ServerManager serverManager,
final int timeout) {
super("AssignmentTimeoutMonitor", period, stopper);
this.timeout = timeout;
this.serverManager = serverManager;
}
private synchronized void setAllRegionServersOffline(
boolean allRegionServersOffline) {
this.allRegionServersOffline = allRegionServersOffline;
}
@Override
protected void chore() {
Preconditions.checkState(tomActivated);
boolean noRSAvailable = this.serverManager.createDestinationServersList().isEmpty();
// Iterate all regions in transition checking for time outs
long now = System.currentTimeMillis();
// no lock concurrent access ok: we will be working on a copy, and it's java-valid to do
// a copy while another thread is adding/removing items
for (String regionName : regionStates.getRegionsInTransition().keySet()) {
RegionState regionState = regionStates.getRegionTransitionState(regionName);
if (regionState == null) continue;
if (regionState.getStamp() + timeout <= now) {
// decide on action upon timeout
actOnTimeOut(regionState);
} else if (this.allRegionServersOffline && !noRSAvailable) {
RegionPlan existingPlan = regionPlans.get(regionName);
if (existingPlan == null
|| !this.serverManager.isServerOnline(existingPlan
.getDestination())) {
// if some RSs just came back online, we can start the assignment
// right away
actOnTimeOut(regionState);
}
}
}
setAllRegionServersOffline(noRSAvailable);
}
private void actOnTimeOut(RegionState regionState) {
HRegionInfo regionInfo = regionState.getRegion();
LOG.info("Regions in transition timed out: " + regionState);
// Expired! Do a retry.
switch (regionState.getState()) {
case CLOSED:
LOG.info("Region " + regionInfo.getEncodedName()
+ " has been CLOSED for too long, waiting on queued "
+ "ClosedRegionHandler to run or server shutdown");
// Update our timestamp.
regionState.updateTimestampToNow();
break;
case OFFLINE:
LOG.info("Region has been OFFLINE for too long, " + "reassigning "
+ regionInfo.getRegionNameAsString() + " to a random server");
invokeAssign(regionInfo);
break;
case PENDING_OPEN:
LOG.info("Region has been PENDING_OPEN for too "
+ "long, reassigning region=" + regionInfo.getRegionNameAsString());
invokeAssign(regionInfo);
break;
case OPENING:
processOpeningState(regionInfo);
break;
case OPEN:
LOG.error("Region has been OPEN for too long, " +
"we don't know where region was opened so can't do anything");
regionState.updateTimestampToNow();
break;
case PENDING_CLOSE:
LOG.info("Region has been PENDING_CLOSE for too "
+ "long, running forced unassign again on region="
+ regionInfo.getRegionNameAsString());
invokeUnassign(regionInfo);
break;
case CLOSING:
LOG.info("Region has been CLOSING for too " +
"long, this should eventually complete or the server will " +
"expire, send RPC again");
invokeUnassign(regionInfo);
break;
case SPLIT:
case SPLITTING:
case FAILED_OPEN:
case FAILED_CLOSE:
case MERGING:
break;
default:
throw new IllegalStateException("Received event is not valid.");
}
}
}
private void processOpeningState(HRegionInfo regionInfo) {
LOG.info("Region has been OPENING for too long, reassigning region="
+ regionInfo.getRegionNameAsString());
// Should have a ZK node in OPENING state
try {
String node = ZKAssign.getNodeName(watcher, regionInfo.getEncodedName());
Stat stat = new Stat();
byte [] data = ZKAssign.getDataNoWatch(watcher, node, stat);
if (data == null) {
LOG.warn("Data is null, node " + node + " no longer exists");
return;
}
RegionTransition rt = RegionTransition.parseFrom(data);
EventType et = rt.getEventType();
if (et == EventType.RS_ZK_REGION_OPENED) {
LOG.debug("Region has transitioned to OPENED, allowing "
+ "watched event handlers to process");
return;
} else if (et != EventType.RS_ZK_REGION_OPENING && et != EventType.RS_ZK_REGION_FAILED_OPEN ) {
LOG.warn("While timing out a region, found ZK node in unexpected state: " + et);
return;
}
invokeAssign(regionInfo);
} catch (KeeperException ke) {
LOG.error("Unexpected ZK exception timing out CLOSING region", ke);
} catch (DeserializationException e) {
LOG.error("Unexpected exception parsing CLOSING region", e);
}
}
void invokeAssign(HRegionInfo regionInfo) {
invokeAssign(regionInfo, true);
}
void invokeAssign(HRegionInfo regionInfo, boolean newPlan) {
threadPoolExecutorService.submit(new AssignCallable(this, regionInfo, newPlan));
}
void invokeUnAssign(HRegionInfo regionInfo) {
threadPoolExecutorService.submit(new UnAssignCallable(this, regionInfo));
}
private void invokeUnassign(HRegionInfo regionInfo) {
threadPoolExecutorService.submit(new UnAssignCallable(this, regionInfo));
}
public boolean isCarryingMeta(ServerName serverName) {
return isCarryingRegion(serverName, HRegionInfo.FIRST_META_REGIONINFO);
}
/**
* Check if the shutdown server carries the specific region.
* We have a bunch of places that store region location
* Those values aren't consistent. There is a delay of notification.
* The location from zookeeper unassigned node has the most recent data;
* but the node could be deleted after the region is opened by AM.
* The AM's info could be old when OpenedRegionHandler
* processing hasn't finished yet when server shutdown occurs.
* @return whether the serverName currently hosts the region
*/
private boolean isCarryingRegion(ServerName serverName, HRegionInfo hri) {
RegionTransition rt = null;
try {
byte [] data = ZKAssign.getData(watcher, hri.getEncodedName());
// This call can legitimately come by null
rt = data == null? null: RegionTransition.parseFrom(data);
} catch (KeeperException e) {
server.abort("Exception reading unassigned node for region=" + hri.getEncodedName(), e);
} catch (DeserializationException e) {
server.abort("Exception parsing unassigned node for region=" + hri.getEncodedName(), e);
}
ServerName addressFromZK = rt != null? rt.getServerName(): null;
if (addressFromZK != null) {
// if we get something from ZK, we will use the data
boolean matchZK = addressFromZK.equals(serverName);
LOG.debug("Checking region=" + hri.getRegionNameAsString() + ", zk server=" + addressFromZK +
" current=" + serverName + ", matches=" + matchZK);
return matchZK;
}
ServerName addressFromAM = regionStates.getRegionServerOfRegion(hri);
boolean matchAM = (addressFromAM != null &&
addressFromAM.equals(serverName));
LOG.debug("based on AM, current region=" + hri.getRegionNameAsString() +
" is on server=" + (addressFromAM != null ? addressFromAM : "null") +
" server being checked: " + serverName);
return matchAM;
}
/**
* Process shutdown server removing any assignments.
* @param sn Server that went down.
* @return list of regions in transition on this server
*/
public List<HRegionInfo> processServerShutdown(final ServerName sn) {
// Clean out any existing assignment plans for this server
synchronized (this.regionPlans) {
for (Iterator <Map.Entry<String, RegionPlan>> i =
this.regionPlans.entrySet().iterator(); i.hasNext();) {
Map.Entry<String, RegionPlan> e = i.next();
ServerName otherSn = e.getValue().getDestination();
// The name will be null if the region is planned for a random assign.
if (otherSn != null && otherSn.equals(sn)) {
// Use iterator's remove else we'll get CME
i.remove();
}
}
}
List<HRegionInfo> regions = regionStates.serverOffline(watcher, sn);
for (Iterator<HRegionInfo> it = regions.iterator(); it.hasNext(); ) {
HRegionInfo hri = it.next();
String encodedName = hri.getEncodedName();
// We need a lock on the region as we could update it
Lock lock = locker.acquireLock(encodedName);
try {
RegionState regionState =
regionStates.getRegionTransitionState(encodedName);
if (regionState == null
|| (regionState.getServerName() != null && !regionState.isOnServer(sn))
|| !(regionState.isFailedClose() || regionState.isOffline()
|| regionState.isPendingOpenOrOpening())) {
LOG.info("Skip " + regionState + " since it is not opening/failed_close"
+ " on the dead server any more: " + sn);
it.remove();
} else {
try {
// Delete the ZNode if exists
ZKAssign.deleteNodeFailSilent(watcher, hri);
} catch (KeeperException ke) {
server.abort("Unexpected ZK exception deleting node " + hri, ke);
}
if (zkTable.isDisablingOrDisabledTable(hri.getTable())) {
regionStates.regionOffline(hri);
it.remove();
continue;
}
// Mark the region offline and assign it again by SSH
regionStates.updateRegionState(hri, State.OFFLINE);
}
} finally {
lock.unlock();
}
}
return regions;
}
/**
* @param plan Plan to execute.
*/
public void balance(final RegionPlan plan) {
HRegionInfo hri = plan.getRegionInfo();
TableName tableName = hri.getTable();
if (zkTable.isDisablingOrDisabledTable(tableName)) {
LOG.info("Ignored moving region of disabling/disabled table "
+ tableName);
return;
}
// Move the region only if it's assigned
String encodedName = hri.getEncodedName();
ReentrantLock lock = locker.acquireLock(encodedName);
try {
if (!regionStates.isRegionOnline(hri)) {
RegionState state = regionStates.getRegionState(encodedName);
LOG.info("Ignored moving region not assigned: " + hri + ", "
+ (state == null ? "not in region states" : state));
return;
}
synchronized (this.regionPlans) {
this.regionPlans.put(plan.getRegionName(), plan);
}
unassign(hri, false, plan.getDestination());
} finally {
lock.unlock();
}
}
public void stop() {
shutdown(); // Stop executor service, etc
if (tomActivated){
this.timeoutMonitor.interrupt();
this.timerUpdater.interrupt();
}
}
/**
* Shutdown the threadpool executor service
*/
public void shutdown() {
// It's an immediate shutdown, so we're clearing the remaining tasks.
synchronized (zkEventWorkerWaitingList){
zkEventWorkerWaitingList.clear();
}
threadPoolExecutorService.shutdownNow();
zkEventWorkers.shutdownNow();
regionStateStore.stop();
}
protected void setEnabledTable(TableName tableName) {
try {
this.zkTable.setEnabledTable(tableName);
} catch (KeeperException e) {
// here we can abort as it is the start up flow
String errorMsg = "Unable to ensure that the table " + tableName
+ " will be" + " enabled because of a ZooKeeper issue";
LOG.error(errorMsg);
this.server.abort(errorMsg, e);
}
}
/**
* Set region as OFFLINED up in zookeeper asynchronously.
* @param state
* @return True if we succeeded, false otherwise (State was incorrect or failed
* updating zk).
*/
private boolean asyncSetOfflineInZooKeeper(final RegionState state,
final AsyncCallback.StringCallback cb, final ServerName destination) {
if (!state.isClosed() && !state.isOffline()) {
this.server.abort("Unexpected state trying to OFFLINE; " + state,
new IllegalStateException());
return false;
}
regionStates.updateRegionState(state.getRegion(), State.OFFLINE);
try {
ZKAssign.asyncCreateNodeOffline(watcher, state.getRegion(),
destination, cb, state);
} catch (KeeperException e) {
if (e instanceof NodeExistsException) {
LOG.warn("Node for " + state.getRegion() + " already exists");
} else {
server.abort("Unexpected ZK exception creating/setting node OFFLINE", e);
}
return false;
}
return true;
}
private boolean deleteNodeInStates(String encodedName,
String desc, ServerName sn, EventType... types) {
try {
for (EventType et: types) {
if (ZKAssign.deleteNode(watcher, encodedName, et, sn)) {
return true;
}
}
LOG.info("Failed to delete the " + desc + " node for "
+ encodedName + ". The node type may not match");
} catch (NoNodeException e) {
if (LOG.isDebugEnabled()) {
LOG.debug("The " + desc + " node for " + encodedName + " already deleted");
}
} catch (KeeperException ke) {
server.abort("Unexpected ZK exception deleting " + desc
+ " node for the region " + encodedName, ke);
}
return false;
}
private void deleteMergingNode(String encodedName, ServerName sn) {
deleteNodeInStates(encodedName, "merging", sn, EventType.RS_ZK_REGION_MERGING,
EventType.RS_ZK_REQUEST_REGION_MERGE, EventType.RS_ZK_REGION_MERGED);
}
private void deleteSplittingNode(String encodedName, ServerName sn) {
deleteNodeInStates(encodedName, "splitting", sn, EventType.RS_ZK_REGION_SPLITTING,
EventType.RS_ZK_REQUEST_REGION_SPLIT, EventType.RS_ZK_REGION_SPLIT);
}
private void onRegionFailedOpen(
final HRegionInfo hri, final ServerName sn) {
String encodedName = hri.getEncodedName();
AtomicInteger failedOpenCount = failedOpenTracker.get(encodedName);
if (failedOpenCount == null) {
failedOpenCount = new AtomicInteger();
// No need to use putIfAbsent, or extra synchronization since
// this whole handleRegion block is locked on the encoded region
// name, and failedOpenTracker is updated only in this block
failedOpenTracker.put(encodedName, failedOpenCount);
}
if (failedOpenCount.incrementAndGet() >= maximumAttempts) {
regionStates.updateRegionState(hri, State.FAILED_OPEN);
// remove the tracking info to save memory, also reset
// the count for next open initiative
failedOpenTracker.remove(encodedName);
} else {
// Handle this the same as if it were opened and then closed.
RegionState regionState = regionStates.updateRegionState(hri, State.CLOSED);
if (regionState != null) {
// When there are more than one region server a new RS is selected as the
// destination and the same is updated in the region plan. (HBASE-5546)
Set<TableName> disablingOrDisabled = null;
try {
disablingOrDisabled = ZKTable.getDisablingTables(watcher);
disablingOrDisabled.addAll(ZKTable.getDisabledTables(watcher));
} catch (KeeperException e) {
server.abort("Cannot retrieve info about disabling or disabled tables ", e);
}
if (disablingOrDisabled.contains(hri.getTable())) {
offlineDisabledRegion(hri);
return;
}
// ZK Node is in CLOSED state, assign it.
regionStates.updateRegionState(hri, RegionState.State.CLOSED);
// This below has to do w/ online enable/disable of a table
removeClosedRegion(hri);
try {
getRegionPlan(hri, sn, true);
} catch (HBaseIOException e) {
LOG.warn("Failed to get region plan", e);
}
invokeAssign(hri, false);
}
}
}
private void onRegionOpen(
final HRegionInfo hri, final ServerName sn, long openSeqNum) {
regionOnline(hri, sn, openSeqNum);
if (useZKForAssignment) {
try {
// Delete the ZNode if exists
ZKAssign.deleteNodeFailSilent(watcher, hri);
} catch (KeeperException ke) {
server.abort("Unexpected ZK exception deleting node " + hri, ke);
}
}
// reset the count, if any
failedOpenTracker.remove(hri.getEncodedName());
if (isTableDisabledOrDisabling(hri.getTable())) {
invokeUnAssign(hri);
}
}
private void onRegionClosed(final HRegionInfo hri) {
if (isTableDisabledOrDisabling(hri.getTable())) {
offlineDisabledRegion(hri);
return;
}
regionStates.updateRegionState(hri, RegionState.State.CLOSED);
// This below has to do w/ online enable/disable of a table
removeClosedRegion(hri);
invokeAssign(hri, false);
}
private String onRegionSplit(ServerName sn, TransitionCode code,
HRegionInfo p, HRegionInfo a, HRegionInfo b) {
RegionState rs_p = regionStates.getRegionState(p);
RegionState rs_a = regionStates.getRegionState(a);
RegionState rs_b = regionStates.getRegionState(b);
if (!(rs_p.isOpenOrSplittingOnServer(sn)
&& (rs_a == null || rs_a.isOpenOrSplittingNewOnServer(sn))
&& (rs_b == null || rs_b.isOpenOrSplittingNewOnServer(sn)))) {
return "Not in state good for split";
}
regionStates.updateRegionState(a, State.SPLITTING_NEW, sn);
regionStates.updateRegionState(b, State.SPLITTING_NEW, sn);
regionStates.updateRegionState(p, State.SPLITTING);
if (code == TransitionCode.SPLIT) {
if (TEST_SKIP_SPLIT_HANDLING) {
return "Skipping split message, TEST_SKIP_SPLIT_HANDLING is set";
}
regionOffline(p, State.SPLIT);
regionOnline(a, sn, 1);
regionOnline(b, sn, 1);
// User could disable the table before master knows the new region.
if (isTableDisabledOrDisabling(p.getTable())) {
invokeUnAssign(a);
invokeUnAssign(b);
}
} else if (code == TransitionCode.SPLIT_PONR) {
try {
regionStateStore.splitRegion(p, a, b, sn);
} catch (IOException ioe) {
LOG.info("Failed to record split region " + p.getShortNameToLog());
return "Failed to record the splitting in meta";
}
} else if (code == TransitionCode.SPLIT_REVERTED) {
regionOnline(p, sn);
regionOffline(a);
regionOffline(b);
if (isTableDisabledOrDisabling(p.getTable())) {
invokeUnAssign(p);
}
}
return null;
}
private boolean isTableDisabledOrDisabling(TableName t) {
Set<TableName> disablingOrDisabled = null;
try {
disablingOrDisabled = ZKTable.getDisablingTables(watcher);
disablingOrDisabled.addAll(ZKTable.getDisabledTables(watcher));
} catch (KeeperException e) {
server.abort("Cannot retrieve info about disabling or disabled tables ", e);
}
return disablingOrDisabled.contains(t) ? true : false;
}
private String onRegionMerge(ServerName sn, TransitionCode code,
HRegionInfo p, HRegionInfo a, HRegionInfo b) {
RegionState rs_p = regionStates.getRegionState(p);
RegionState rs_a = regionStates.getRegionState(a);
RegionState rs_b = regionStates.getRegionState(b);
if (!(rs_a.isOpenOrMergingOnServer(sn) && rs_b.isOpenOrMergingOnServer(sn)
&& (rs_p == null || rs_p.isOpenOrMergingNewOnServer(sn)))) {
return "Not in state good for merge";
}
regionStates.updateRegionState(a, State.MERGING);
regionStates.updateRegionState(b, State.MERGING);
regionStates.updateRegionState(p, State.MERGING_NEW, sn);
String encodedName = p.getEncodedName();
if (code == TransitionCode.READY_TO_MERGE) {
mergingRegions.put(encodedName,
new PairOfSameType<HRegionInfo>(a, b));
} else if (code == TransitionCode.MERGED) {
mergingRegions.remove(encodedName);
regionOffline(a, State.MERGED);
regionOffline(b, State.MERGED);
regionOnline(p, sn, 1);
// User could disable the table before master knows the new region.
if (isTableDisabledOrDisabling(p.getTable())) {
invokeUnAssign(p);
}
} else if (code == TransitionCode.MERGE_PONR) {
try {
regionStateStore.mergeRegions(p, a, b, sn);
} catch (IOException ioe) {
LOG.info("Failed to record merged region " + p.getShortNameToLog());
return "Failed to record the merging in meta";
}
} else {
mergingRegions.remove(encodedName);
regionOnline(a, sn);
regionOnline(b, sn);
regionOffline(p);
if (isTableDisabledOrDisabling(p.getTable())) {
invokeUnAssign(a);
invokeUnAssign(b);
}
}
return null;
}
/**
* A helper to handle region merging transition event.
* It transitions merging regions to MERGING state.
*/
private boolean handleRegionMerging(final RegionTransition rt, final String encodedName,
final String prettyPrintedRegionName, final ServerName sn) {
if (!serverManager.isServerOnline(sn)) {
LOG.warn("Dropped merging! ServerName=" + sn + " unknown.");
return false;
}
byte [] payloadOfMerging = rt.getPayload();
List<HRegionInfo> mergingRegions;
try {
mergingRegions = HRegionInfo.parseDelimitedFrom(
payloadOfMerging, 0, payloadOfMerging.length);
} catch (IOException e) {
LOG.error("Dropped merging! Failed reading " + rt.getEventType()
+ " payload for " + prettyPrintedRegionName);
return false;
}
assert mergingRegions.size() == 3;
HRegionInfo p = mergingRegions.get(0);
HRegionInfo hri_a = mergingRegions.get(1);
HRegionInfo hri_b = mergingRegions.get(2);
RegionState rs_p = regionStates.getRegionState(p);
RegionState rs_a = regionStates.getRegionState(hri_a);
RegionState rs_b = regionStates.getRegionState(hri_b);
if (!((rs_a == null || rs_a.isOpenOrMergingOnServer(sn))
&& (rs_b == null || rs_b.isOpenOrMergingOnServer(sn))
&& (rs_p == null || rs_p.isOpenOrMergingNewOnServer(sn)))) {
LOG.warn("Dropped merging! Not in state good for MERGING; rs_p="
+ rs_p + ", rs_a=" + rs_a + ", rs_b=" + rs_b);
return false;
}
EventType et = rt.getEventType();
if (et == EventType.RS_ZK_REQUEST_REGION_MERGE) {
try {
if (RegionMergeTransaction.transitionMergingNode(watcher, p,
hri_a, hri_b, sn, -1, EventType.RS_ZK_REQUEST_REGION_MERGE,
EventType.RS_ZK_REGION_MERGING) == -1) {
byte[] data = ZKAssign.getData(watcher, encodedName);
EventType currentType = null;
if (data != null) {
RegionTransition newRt = RegionTransition.parseFrom(data);
currentType = newRt.getEventType();
}
if (currentType == null || (currentType != EventType.RS_ZK_REGION_MERGED
&& currentType != EventType.RS_ZK_REGION_MERGING)) {
LOG.warn("Failed to transition pending_merge node "
+ encodedName + " to merging, it's now " + currentType);
return false;
}
}
} catch (Exception e) {
LOG.warn("Failed to transition pending_merge node "
+ encodedName + " to merging", e);
return false;
}
}
synchronized (regionStates) {
regionStates.updateRegionState(hri_a, State.MERGING);
regionStates.updateRegionState(hri_b, State.MERGING);
regionStates.updateRegionState(p, State.MERGING_NEW, sn);
if (et != EventType.RS_ZK_REGION_MERGED) {
this.mergingRegions.put(encodedName,
new PairOfSameType<HRegionInfo>(hri_a, hri_b));
} else {
this.mergingRegions.remove(encodedName);
regionOffline(hri_a, State.MERGED);
regionOffline(hri_b, State.MERGED);
regionOnline(p, sn);
}
}
if (et == EventType.RS_ZK_REGION_MERGED) {
LOG.debug("Handling MERGED event for " + encodedName + "; deleting node");
// Remove region from ZK
try {
boolean successful = false;
while (!successful) {
// It's possible that the RS tickles in between the reading of the
// znode and the deleting, so it's safe to retry.
successful = ZKAssign.deleteNode(watcher, encodedName,
EventType.RS_ZK_REGION_MERGED, sn);
}
} catch (KeeperException e) {
if (e instanceof NoNodeException) {
String znodePath = ZKUtil.joinZNode(watcher.splitLogZNode, encodedName);
LOG.debug("The znode " + znodePath + " does not exist. May be deleted already.");
} else {
server.abort("Error deleting MERGED node " + encodedName, e);
}
}
LOG.info("Handled MERGED event; merged=" + p.getRegionNameAsString()
+ ", region_a=" + hri_a.getRegionNameAsString() + ", region_b="
+ hri_b.getRegionNameAsString() + ", on " + sn);
// User could disable the table before master knows the new region.
if (zkTable.isDisablingOrDisabledTable(p.getTable())) {
unassign(p);
}
}
return true;
}
/**
* A helper to handle region splitting transition event.
*/
private boolean handleRegionSplitting(final RegionTransition rt, final String encodedName,
final String prettyPrintedRegionName, final ServerName sn) {
if (!serverManager.isServerOnline(sn)) {
LOG.warn("Dropped splitting! ServerName=" + sn + " unknown.");
return false;
}
byte [] payloadOfSplitting = rt.getPayload();
List<HRegionInfo> splittingRegions;
try {
splittingRegions = HRegionInfo.parseDelimitedFrom(
payloadOfSplitting, 0, payloadOfSplitting.length);
} catch (IOException e) {
LOG.error("Dropped splitting! Failed reading " + rt.getEventType()
+ " payload for " + prettyPrintedRegionName);
return false;
}
assert splittingRegions.size() == 2;
HRegionInfo hri_a = splittingRegions.get(0);
HRegionInfo hri_b = splittingRegions.get(1);
RegionState rs_p = regionStates.getRegionState(encodedName);
RegionState rs_a = regionStates.getRegionState(hri_a);
RegionState rs_b = regionStates.getRegionState(hri_b);
if (!((rs_p == null || rs_p.isOpenOrSplittingOnServer(sn))
&& (rs_a == null || rs_a.isOpenOrSplittingNewOnServer(sn))
&& (rs_b == null || rs_b.isOpenOrSplittingNewOnServer(sn)))) {
LOG.warn("Dropped splitting! Not in state good for SPLITTING; rs_p="
+ rs_p + ", rs_a=" + rs_a + ", rs_b=" + rs_b);
return false;
}
if (rs_p == null) {
// Splitting region should be online
rs_p = regionStates.updateRegionState(rt, State.OPEN);
if (rs_p == null) {
LOG.warn("Received splitting for region " + prettyPrintedRegionName
+ " from server " + sn + " but it doesn't exist anymore,"
+ " probably already processed its split");
return false;
}
regionStates.regionOnline(rs_p.getRegion(), sn);
}
HRegionInfo p = rs_p.getRegion();
EventType et = rt.getEventType();
if (et == EventType.RS_ZK_REQUEST_REGION_SPLIT) {
try {
if (SplitTransaction.transitionSplittingNode(watcher, p,
hri_a, hri_b, sn, -1, EventType.RS_ZK_REQUEST_REGION_SPLIT,
EventType.RS_ZK_REGION_SPLITTING) == -1) {
byte[] data = ZKAssign.getData(watcher, encodedName);
EventType currentType = null;
if (data != null) {
RegionTransition newRt = RegionTransition.parseFrom(data);
currentType = newRt.getEventType();
}
if (currentType == null || (currentType != EventType.RS_ZK_REGION_SPLIT
&& currentType != EventType.RS_ZK_REGION_SPLITTING)) {
LOG.warn("Failed to transition pending_split node "
+ encodedName + " to splitting, it's now " + currentType);
return false;
}
}
} catch (Exception e) {
LOG.warn("Failed to transition pending_split node "
+ encodedName + " to splitting", e);
return false;
}
}
synchronized (regionStates) {
regionStates.updateRegionState(hri_a, State.SPLITTING_NEW, sn);
regionStates.updateRegionState(hri_b, State.SPLITTING_NEW, sn);
regionStates.updateRegionState(rt, State.SPLITTING);
// The below is for testing ONLY! We can't do fault injection easily, so
// resort to this kinda uglyness -- St.Ack 02/25/2011.
if (TEST_SKIP_SPLIT_HANDLING) {
LOG.warn("Skipping split message, TEST_SKIP_SPLIT_HANDLING is set");
return true; // return true so that the splitting node stays
}
if (et == EventType.RS_ZK_REGION_SPLIT) {
regionOffline(p, State.SPLIT);
regionOnline(hri_a, sn);
regionOnline(hri_b, sn);
}
}
if (et == EventType.RS_ZK_REGION_SPLIT) {
LOG.debug("Handling SPLIT event for " + encodedName + "; deleting node");
// Remove region from ZK
try {
boolean successful = false;
while (!successful) {
// It's possible that the RS tickles in between the reading of the
// znode and the deleting, so it's safe to retry.
successful = ZKAssign.deleteNode(watcher, encodedName,
EventType.RS_ZK_REGION_SPLIT, sn);
}
} catch (KeeperException e) {
if (e instanceof NoNodeException) {
String znodePath = ZKUtil.joinZNode(watcher.splitLogZNode, encodedName);
LOG.debug("The znode " + znodePath + " does not exist. May be deleted already.");
} else {
server.abort("Error deleting SPLIT node " + encodedName, e);
}
}
LOG.info("Handled SPLIT event; parent=" + p.getRegionNameAsString()
+ ", daughter a=" + hri_a.getRegionNameAsString() + ", daughter b="
+ hri_b.getRegionNameAsString() + ", on " + sn);
// User could disable the table before master knows the new region.
if (zkTable.isDisablingOrDisabledTable(p.getTable())) {
unassign(hri_a);
unassign(hri_b);
}
}
return true;
}
/**
* A region is offline. The new state should be the specified one,
* if not null. If the specified state is null, the new state is Offline.
* The specified state can be Split/Merged/Offline/null only.
*/
private void regionOffline(final HRegionInfo regionInfo, final State state) {
regionStates.regionOffline(regionInfo, state);
removeClosedRegion(regionInfo);
// remove the region plan as well just in case.
clearRegionPlan(regionInfo);
balancer.regionOffline(regionInfo);
// Tell our listeners that a region was closed
sendRegionClosedNotification(regionInfo);
}
private void sendRegionOpenedNotification(final HRegionInfo regionInfo,
final ServerName serverName) {
if (!this.listeners.isEmpty()) {
for (AssignmentListener listener : this.listeners) {
listener.regionOpened(regionInfo, serverName);
}
}
}
private void sendRegionClosedNotification(final HRegionInfo regionInfo) {
if (!this.listeners.isEmpty()) {
for (AssignmentListener listener : this.listeners) {
listener.regionClosed(regionInfo);
}
}
}
/**
* Try to update some region states. If the state machine prevents
* such update, an error message is returned to explain the reason.
*
* It's expected that in each transition there should have just one
* region for opening/closing, 3 regions for splitting/merging.
* These regions should be on the server that requested the change.
*
* Region state machine. Only these transitions
* are expected to be triggered by a region server.
*
* On the state transition:
* (1) Open/Close should be initiated by master
* (a) Master sets the region to pending_open/pending_close
* in memory and hbase:meta after sending the request
* to the region server
* (b) Region server reports back to the master
* after open/close is done (either success/failure)
* (c) If region server has problem to report the status
* to master, it must be because the master is down or some
* temporary network issue. Otherwise, the region server should
* abort since it must be a bug. If the master is not accessible,
* the region server should keep trying until the server is
* stopped or till the status is reported to the (new) master
* (d) If region server dies in the middle of opening/closing
* a region, SSH picks it up and finishes it
* (e) If master dies in the middle, the new master recovers
* the state during initialization from hbase:meta. Region server
* can report any transition that has not been reported to
* the previous active master yet
* (2) Split/merge is initiated by region servers
* (a) To split a region, a region server sends a request
* to master to try to set a region to splitting, together with
* two daughters (to be created) to splitting new. If approved
* by the master, the splitting can then move ahead
* (b) To merge two regions, a region server sends a request to
* master to try to set the new merged region (to be created) to
* merging_new, together with two regions (to be merged) to merging.
* If it is ok with the master, the merge can then move ahead
* (c) Once the splitting/merging is done, the region server
* reports the status back to the master either success/failure.
* (d) Other scenarios should be handled similarly as for
* region open/close
*/
protected String onRegionTransition(final ServerName serverName,
final RegionStateTransition transition) {
TransitionCode code = transition.getTransitionCode();
HRegionInfo hri = HRegionInfo.convert(transition.getRegionInfo(0));
RegionState current = regionStates.getRegionState(hri);
if (LOG.isDebugEnabled()) {
LOG.debug("Got transition " + code + " for "
+ (current != null ? current.toString() : hri.getShortNameToLog())
+ " from " + serverName);
}
String errorMsg = null;
switch (code) {
case OPENED:
if (current != null && current.isOpened() && current.isOnServer(serverName)) {
LOG.info("Region " + hri.getShortNameToLog() + " is already " + current.getState() + " on "
+ serverName);
break;
}
case FAILED_OPEN:
if (current == null
|| !current.isPendingOpenOrOpeningOnServer(serverName)) {
errorMsg = hri.getShortNameToLog()
+ " is not pending open on " + serverName;
} else if (code == TransitionCode.FAILED_OPEN) {
onRegionFailedOpen(hri, serverName);
} else {
long openSeqNum = HConstants.NO_SEQNUM;
if (transition.hasOpenSeqNum()) {
openSeqNum = transition.getOpenSeqNum();
}
if (openSeqNum < 0) {
errorMsg = "Newly opened region has invalid open seq num " + openSeqNum;
} else {
onRegionOpen(hri, serverName, openSeqNum);
}
}
break;
case CLOSED:
if (current == null
|| !current.isPendingCloseOrClosingOnServer(serverName)) {
errorMsg = hri.getShortNameToLog()
+ " is not pending close on " + serverName;
} else {
onRegionClosed(hri);
}
break;
case READY_TO_SPLIT:
case SPLIT_PONR:
case SPLIT:
case SPLIT_REVERTED:
errorMsg = onRegionSplit(serverName, code, hri,
HRegionInfo.convert(transition.getRegionInfo(1)),
HRegionInfo.convert(transition.getRegionInfo(2)));
break;
case READY_TO_MERGE:
case MERGE_PONR:
case MERGED:
case MERGE_REVERTED:
errorMsg = onRegionMerge(serverName, code, hri,
HRegionInfo.convert(transition.getRegionInfo(1)),
HRegionInfo.convert(transition.getRegionInfo(2)));
break;
default:
errorMsg = "Unexpected transition code " + code;
}
if (errorMsg != null) {
LOG.error("Failed to transtion region from " + current + " to "
+ code + " by " + serverName + ": " + errorMsg);
}
return errorMsg;
}
/**
* @return Instance of load balancer
*/
public LoadBalancer getBalancer() {
return this.balancer;
}
}<|fim▁end|> | if (t instanceof java.net.SocketTimeoutException
|| t instanceof FailedServerException) { |
<|file_name|>xdg_test.go<|end_file_name|><|fim▁begin|>package xdg_test
import (
"os"
"path/filepath"
"testing"
<|fim▁hole|>
type envSample struct {
name string
value string
expected interface{}
actual interface{}
}
func testDirs(t *testing.T, samples ...*envSample) {
// Test home directory.
require.NotEmpty(t, xdg.Home)
t.Logf("Home: %s", xdg.Home)
// Set environment variables.
for _, sample := range samples {
require.NoError(t, os.Setenv(sample.name, sample.value))
}
xdg.Reload()
// Test results.
for _, sample := range samples {
var actual interface{}
switch v := sample.actual.(type) {
case *string:
actual = *v
case *[]string:
actual = *v
}
require.Equal(t, sample.expected, actual)
t.Logf("%s: %v", sample.name, actual)
}
}
type testInputData struct {
relPaths []string
pathFunc func(string) (string, error)
searchFunc func(string) (string, error)
}
func TestBaseDirFuncs(t *testing.T) {
inputs := []*testInputData{
{
relPaths: []string{"app.data", "appname/app.data"},
pathFunc: xdg.DataFile,
searchFunc: xdg.SearchDataFile,
},
{
relPaths: []string{"app.yaml", "appname/app.yaml"},
pathFunc: xdg.ConfigFile,
searchFunc: xdg.SearchConfigFile,
},
{
relPaths: []string{"app.state", "appname/app.state"},
pathFunc: xdg.StateFile,
searchFunc: xdg.SearchStateFile,
},
{
relPaths: []string{"app.cache", "appname/app.cache"},
pathFunc: xdg.CacheFile,
searchFunc: xdg.SearchCacheFile,
},
{
relPaths: []string{"app.pid", "appname/app.pid"},
pathFunc: xdg.RuntimeFile,
searchFunc: xdg.SearchRuntimeFile,
},
}
// Test base directories for regular files.
testBaseDirsRegular(t, inputs)
// Test base directories for symbolic links.
for _, input := range inputs {
input.relPaths = []string{input.relPaths[1]}
}
testBaseDirsSymlinks(t, inputs)
}
func testBaseDirsRegular(t *testing.T, inputs []*testInputData) {
for _, input := range inputs {
for _, relPath := range input.relPaths {
// Get suitable path for input file.
expFullPath, err := input.pathFunc(relPath)
require.NoError(t, err)
// Create input file.
f, err := os.Create(expFullPath)
require.NoError(t, err)
require.NoError(t, f.Close())
// Search input file after creation.
actFullPath, err := input.searchFunc(relPath)
require.NoError(t, err)
require.Equal(t, expFullPath, actFullPath)
// Remove created file.
require.NoError(t, os.Remove(expFullPath))
// Search input file after removal.
_, err = input.searchFunc(relPath)
require.Error(t, err)
// Check that the same path is returned.
actFullPath, err = input.pathFunc(relPath)
require.NoError(t, err)
require.Equal(t, expFullPath, actFullPath)
}
}
}
func testBaseDirsSymlinks(t *testing.T, inputs []*testInputData) {
for _, input := range inputs {
for _, relPath := range input.relPaths {
// Get suitable path for input file.
expFullPath, err := input.pathFunc(relPath)
require.NoError(t, err)
// Replace input directory with symlink.
symlinkDir := filepath.Dir(expFullPath)
inputDir := filepath.Join(filepath.Dir(symlinkDir), "inputdir")
require.NoError(t, os.Remove(symlinkDir))
require.NoError(t, os.Mkdir(inputDir, os.ModeDir|0700))
require.NoError(t, os.Symlink(inputDir, symlinkDir))
// Create input file.
inputPath := filepath.Join(symlinkDir, "input.file")
f, err := os.Create(inputPath)
require.NoError(t, err)
require.NoError(t, f.Close())
// Create symbolic link.
require.NoError(t, os.Symlink(inputPath, expFullPath))
// Search input file after creation.
actFullPath, err := input.searchFunc(relPath)
require.NoError(t, err)
require.Equal(t, expFullPath, actFullPath)
// Remove created symbolic links, files and directories.
require.NoError(t, os.Remove(expFullPath))
require.NoError(t, os.Remove(inputPath))
require.NoError(t, os.Remove(symlinkDir))
require.NoError(t, os.Remove(inputDir))
// Search input file after removal.
_, err = input.searchFunc(relPath)
require.Error(t, err)
// Check that the same path is returned.
actFullPath, err = input.pathFunc(relPath)
require.NoError(t, err)
require.Equal(t, expFullPath, actFullPath)
}
}
}
func TestInvalidPaths(t *testing.T) {
inputs := map[string]func(string) (string, error){
"\000/app.data": xdg.DataFile,
"appname\000/app.yaml": xdg.ConfigFile,
"appname/\000/app.state": xdg.StateFile,
"\000appname/app.cache": xdg.CacheFile,
"\000/appname/app.pid": xdg.RuntimeFile,
}
for inputPath, xdgFunc := range inputs {
_, err := xdgFunc(inputPath)
require.Error(t, err)
}
}<|fim▁end|> | "github.com/adrg/xdg"
"github.com/stretchr/testify/require"
) |
<|file_name|>test_generate_product_templates_tfidf_api.py<|end_file_name|><|fim▁begin|>""" Tests barbante.api.generate_product_templates_tfidf.
"""
import json
import nose.tools
import barbante.api.generate_product_templates_tfidf as script<|fim▁hole|>
log = barbante_logging.get_logger(__name__)
def test_script():
""" Tests a call to script barbante.api.generate_product_templates_tfidf.
"""
result = script.main([tests.TEST_ENV])
log.debug(result)
result_json = json.dumps(result)
nose.tools.ok_(result_json) # a well-formed json is enough
if __name__ == '__main__':
test_script()<|fim▁end|> | import barbante.utils.logging as barbante_logging
import barbante.tests as tests
|
<|file_name|>api.ts<|end_file_name|><|fim▁begin|>import * as request from 'superagent';
import { assign } from 'lodash';
declare var window;
<|fim▁hole|>}<|fim▁end|> | export function init() {
return request
.get('/api/user')
.set('x-auth', window.localStorage.jwt_token || window.jwt_token || ''); |
<|file_name|>conftest.py<|end_file_name|><|fim▁begin|># Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://github.com/nedbat/django_coverage_plugin/blob/master/NOTICE.txt
"""
Pytest auto configuration.
This module is run automatically by pytest, to define and enable fixtures.<|fim▁hole|>
import re
import warnings
import django.utils.deprecation
import pytest
@pytest.fixture(autouse=True)
def set_warnings():
"""Configure warnings to show while running tests."""
warnings.simplefilter("default")
warnings.simplefilter("once", DeprecationWarning)
# Warnings to suppress:
# How come these warnings are successfully suppressed here, but not in setup.cfg??
# We know we do tricky things with Django settings, don't warn us about it.
warnings.filterwarnings(
"ignore",
category=UserWarning,
message=r"Overriding setting DATABASES can lead to unexpected behavior.",
)
# Django has warnings like RemovedInDjango40Warning. We use features that are going to be
# deprecated, so we don't need to see those warnings. But the specific warning classes change
# in every release. Find them and ignore them.
for name, obj in vars(django.utils.deprecation).items():
if re.match(r"RemovedInDjango\d+Warning", name):
warnings.filterwarnings("ignore", category=obj)<|fim▁end|> | """ |
<|file_name|>MusicOperat.cpp<|end_file_name|><|fim▁begin|>/****************************************************************************
* ¹¦ ÄÜ£ºÒôƵÎļþ²Ù×÷Àà *
* Ìí ¼Ó ÈË£ºÐ¡¿É *
* Ìí¼Óʱ¼ä£º2015.01.17 12£º27 *
* °æ±¾ÀàÐÍ£º³õʼ°æ±¾ *
* ÁªÏµ·½Ê½£ºQQ-1035144170 *
****************************************************************************/
#include "StdAfx.h"
#include "MusicOperat.h"
CMusicOpreat::CMusicOpreat(HWND m_PWnd)
{
m_ParenhWnd=m_PWnd;
nIndex=0; //²¥·ÅË÷Òý
hStream=NULL; //²¥·ÅÁ÷
m_pBassMusic=NULL;
m_pMainState=NULL;
//²âÊÔ£º
CLrcParse lrcPar;
lrcPar.ReadFile("");
}
CMusicOpreat::~CMusicOpreat(void)
{
if (hStream)
{
BASS_ChannelStop(hStream);
hStream=NULL;
}
}
<|fim▁hole|>//
// return &_Instance;
//}
void CMusicOpreat::InitDatas()
{
//³õʼ»¯ÉùÒô×é¼þ
m_pBassMusic = CBassMusicEngine::GetInstance();
if ( m_pBassMusic == NULL )
{
if ( SMessageBox(NULL,TEXT("ÉùÒôÒýÇæ³õʼ»¯Ê§°Ü"),_T("¾¯¸æ"),MB_OK|MB_ICONEXCLAMATION) == IDOK )
{
PostQuitMessage(0);
}
}
m_pBassMusic->Init(m_hWnd,this);
}
void CMusicOpreat::InsertMapInfo(int nNum, CString strPath, tagMusicInfo &pMuInfo)
{
//¼ÓÔØÎļþ
HSTREAM hStream = m_pBassMusic->LoadFile(strPath);
if ( hStream == -1 ) return;
//»ñȡýÌå±êÇ©
tagMusicInfo *pInfo = m_pBassMusic->GetInfo(hStream);
//ͨ¹ýmapºÍListBox½áºÏ£¬Ò»Æð¹ÜÀí²¥·ÅÁбí
tagMusicInfo *pMusicInfo = new tagMusicInfo;
pMusicInfo->dwTime = pInfo->dwTime;
pMusicInfo->hStream = pInfo->hStream;
lstrcpyn(pMusicInfo->szArtist,pInfo->szArtist,CountArray(pMusicInfo->szArtist));
lstrcpyn(pMusicInfo->szTitle,pInfo->szTitle,CountArray(pMusicInfo->szTitle));
pMuInfo=*pMusicInfo;
m_MusicManager.insert(pair<int,tagMusicInfo*>(nNum,pMusicInfo));
}
void CMusicOpreat::OnButPrev() // ÉÏÒ»Çú
{
m_pBassMusic->Stop(hStream);
nIndex--;
if (nIndex<0)
{
nIndex=m_MusicManager.size()-1;
}
CMusicManagerMap::iterator iter = m_MusicManager.find(nIndex);
if ( iter == m_MusicManager.end() ) return;
hStream = iter->second->hStream;
if( m_pBassMusic->Play(hStream,true) )
{
int i=0;
}
}
void CMusicOpreat::OnButPlay() // ²¥·Å
{
m_pBassMusic->Stop(hStream);
CMusicManagerMap::iterator iter = m_MusicManager.find(nIndex);
if ( iter == m_MusicManager.end() )
{
return;
}else
{
hStream = iter->second->hStream;
if( m_pBassMusic->Play(hStream,/*(++nIndex!= nIndex) ? false : true)*/true ))
{
int i=0;
}
}
}
void CMusicOpreat::OnButPause() // ÔÝÍ£
{
if ( m_pBassMusic->IsPlaying(hStream) == FALSE ) return;
if( m_pBassMusic->Pause(hStream) )
{
int i=0;
}
}
void CMusicOpreat::OnButPlayNext() // ÏÂÒ»Çú
{
m_pBassMusic->Stop(hStream);
nIndex++;
if (nIndex>=m_MusicManager.size())
{
nIndex=0;
}
CMusicManagerMap::iterator iter = m_MusicManager.find(nIndex);
if ( iter == m_MusicManager.end() ) return;
hStream = iter->second->hStream;
if( m_pBassMusic->Play(hStream,true) )
{
int i=0;
}
}
void CMusicOpreat::OnStop()
{
//×Ô¶¯Çл»ÏÂÒ»Ê׸è
OnButPlayNext();
//::PostMessage(GetContainer()->GetHostHwnd(), MSG_USER_SEARCH_DMTASKDLG, 0, 0);
::PostMessage(m_ParenhWnd,MSG_USER_REDRAW,0,0);
}<|fim▁end|> | //CMusicOpreat * CMusicOpreat::GetInstance()
//{
// static CMusicOpreat _Instance;
|
<|file_name|>account_invoice_send.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import api, fields, models, _
from odoo.exceptions import UserError
class AccountInvoiceSend(models.TransientModel):
_name = 'account.invoice.send'
_inherit = 'account.invoice.send'
_description = 'Account Invoice Send'
partner_id = fields.Many2one('res.partner', compute='_get_partner', string='Partner')
snailmail_is_letter = fields.Boolean('Send by Post', help='Allows to send the document by Snailmail (coventional posting delivery service)', default=lambda self: self.env.company.invoice_is_snailmail)
snailmail_cost = fields.Float(string='Stamp(s)', compute='_compute_snailmail_cost', readonly=True)
invalid_addresses = fields.Integer('Invalid Addresses Count', compute='_compute_invalid_addresses')
invalid_invoice_ids = fields.Many2many('account.move', string='Invalid Addresses', compute='_compute_invalid_addresses')
@api.depends('invoice_ids')
def _compute_invalid_addresses(self):
for wizard in self:
invalid_invoices = wizard.invoice_ids.filtered(lambda i: not self.env['snailmail.letter']._is_valid_address(i.partner_id))
wizard.invalid_invoice_ids = invalid_invoices
wizard.invalid_addresses = len(invalid_invoices)
@api.depends('invoice_ids')
def _get_partner(self):
self.partner_id = self.env['res.partner']
for wizard in self:
if wizard.invoice_ids and len(wizard.invoice_ids) == 1:
wizard.partner_id = wizard.invoice_ids.partner_id.id
@api.depends('snailmail_is_letter')
def _compute_snailmail_cost(self):
for wizard in self:
wizard.snailmail_cost = len(wizard.invoice_ids.ids)
def snailmail_print_action(self):
self.ensure_one()
letters = self.env['snailmail.letter']
for invoice in self.invoice_ids:
letter = self.env['snailmail.letter'].create({
'partner_id': invoice.partner_id.id,
'model': 'account.move',
'res_id': invoice.id,
'user_id': self.env.user.id,
'company_id': invoice.company_id.id,
'report_template': self.env.ref('account.account_invoices').id
})
letters |= letter
self.invoice_ids.filtered(lambda inv: not inv.is_move_sent).write({'is_move_sent': True})
if len(self.invoice_ids) == 1:
letters._snailmail_print()
else:
letters._snailmail_print(immediate=False)
def send_and_print_action(self):
if self.snailmail_is_letter:<|fim▁hole|> if self.env['snailmail.confirm.invoice'].show_warning():
wizard = self.env['snailmail.confirm.invoice'].create({'model_name': _('Invoice'), 'invoice_send_id': self.id})
return wizard.action_open()
self._print_action()
return self.send_and_print()
def _print_action(self):
if not self.snailmail_is_letter:
return
if self.invalid_addresses and self.composition_mode == "mass_mail":
self.notify_invalid_addresses()
self.snailmail_print_action()
def send_and_print(self):
res = super(AccountInvoiceSend, self).send_and_print_action()
return res
def notify_invalid_addresses(self):
self.ensure_one()
self.env['bus.bus'].sendone(
(self._cr.dbname, 'res.partner', self.env.user.partner_id.id),
{'type': 'snailmail_invalid_address', 'title': _("Invalid Addresses"),
'message': _("%s of the selected invoice(s) had an invalid address and were not sent", self.invalid_addresses)}
)
def invalid_addresses_action(self):
return {
'name': _('Invalid Addresses'),
'type': 'ir.actions.act_window',
'view_mode': 'kanban,tree,form',
'res_model': 'account.move',
'domain': [('id', 'in', self.mapped('invalid_invoice_ids').ids)],
}<|fim▁end|> | |
<|file_name|>Content.java<|end_file_name|><|fim▁begin|>/*
* Asqatasun - Automated webpage assessment
* Copyright (C) 2008-2020 Asqatasun.org
*
* This file is part of Asqatasun.
*
* Asqatasun is free software: you can redistribute it and/or modify<|fim▁hole|> *
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*
* Contact us by mail: asqatasun AT asqatasun DOT org
*/
package org.asqatasun.entity.audit;
import java.util.Date;
import org.asqatasun.entity.Entity;
/**
*
* @author jkowalczyk
*/
public interface Content extends Entity {
/**
*
* @return the audit
*/
Audit getAudit();
/**
*
* @return the date of loading
*/
Date getDateOfLoading();
/**
*
* @return the URI
*/
String getURI();
/**
*
* @return the http Status Code
*/
int getHttpStatusCode();
/**
*
* @param audit
* the audit to set
*/
void setAudit(Audit audit);
/**
*
* @param dateOfLoading
* the date of loading to set
*/
void setDateOfLoading(Date dateOfLoading);
/**
*
* @param uri
* the URI to set
*/
void setURI(String uri);
/**
*
* @param httpStatusCode
* the Http Status Code when fetched the content
*/
void setHttpStatusCode(int httpStatusCode);
}<|fim▁end|> | * it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version. |
<|file_name|>board.ts<|end_file_name|><|fim▁begin|>import { HeadlessState } from './state.js';
import { pos2key, key2pos, opposite, distanceSq, allPos, computeSquareCenter } from './util.js';
import { premove, queen, knight } from './premove.js';
import * as cg from './types.js';
export function callUserFunction<T extends (...args: any[]) => void>(f: T | undefined, ...args: Parameters<T>): void {
if (f) setTimeout(() => f(...args), 1);
}
export function toggleOrientation(state: HeadlessState): void {
state.orientation = opposite(state.orientation);
state.animation.current = state.draggable.current = state.selected = undefined;
}
export function reset(state: HeadlessState): void {
state.lastMove = undefined;
unselect(state);
unsetPremove(state);
unsetPredrop(state);
}
export function setPieces(state: HeadlessState, pieces: cg.PiecesDiff): void {
for (const [key, piece] of pieces) {
if (piece) state.pieces.set(key, piece);
else state.pieces.delete(key);
}
}
export function setCheck(state: HeadlessState, color: cg.Color | boolean): void {
state.check = undefined;
if (color === true) color = state.turnColor;
if (color)
for (const [k, p] of state.pieces) {
if (p.role === 'king' && p.color === color) {
state.check = k;
}
}
}
function setPremove(state: HeadlessState, orig: cg.Key, dest: cg.Key, meta: cg.SetPremoveMetadata): void {
unsetPredrop(state);
state.premovable.current = [orig, dest];
callUserFunction(state.premovable.events.set, orig, dest, meta);
}
export function unsetPremove(state: HeadlessState): void {
if (state.premovable.current) {
state.premovable.current = undefined;
callUserFunction(state.premovable.events.unset);
}
}
function setPredrop(state: HeadlessState, role: cg.Role, key: cg.Key): void {
unsetPremove(state);
state.predroppable.current = { role, key };
callUserFunction(state.predroppable.events.set, role, key);
}
export function unsetPredrop(state: HeadlessState): void {
const pd = state.predroppable;
if (pd.current) {
pd.current = undefined;
callUserFunction(pd.events.unset);
}
}
function tryAutoCastle(state: HeadlessState, orig: cg.Key, dest: cg.Key): boolean {
if (!state.autoCastle) return false;
const king = state.pieces.get(orig);
if (!king || king.role !== 'king') return false;
const origPos = key2pos(orig);
const destPos = key2pos(dest);
if ((origPos[1] !== 0 && origPos[1] !== 7) || origPos[1] !== destPos[1]) return false;
if (origPos[0] === 4 && !state.pieces.has(dest)) {
if (destPos[0] === 6) dest = pos2key([7, destPos[1]]);
else if (destPos[0] === 2) dest = pos2key([0, destPos[1]]);
}
const rook = state.pieces.get(dest);
if (!rook || rook.color !== king.color || rook.role !== 'rook') return false;
state.pieces.delete(orig);
state.pieces.delete(dest);
if (origPos[0] < destPos[0]) {
state.pieces.set(pos2key([6, destPos[1]]), king);
state.pieces.set(pos2key([5, destPos[1]]), rook);
} else {
state.pieces.set(pos2key([2, destPos[1]]), king);
state.pieces.set(pos2key([3, destPos[1]]), rook);
}
return true;
}
export function baseMove(state: HeadlessState, orig: cg.Key, dest: cg.Key): cg.Piece | boolean {
const origPiece = state.pieces.get(orig),
destPiece = state.pieces.get(dest);
if (orig === dest || !origPiece) return false;
const captured = destPiece && destPiece.color !== origPiece.color ? destPiece : undefined;
if (dest === state.selected) unselect(state);
callUserFunction(state.events.move, orig, dest, captured);
if (!tryAutoCastle(state, orig, dest)) {
state.pieces.set(dest, origPiece);
state.pieces.delete(orig);
}
state.lastMove = [orig, dest];
state.check = undefined;
callUserFunction(state.events.change);
return captured || true;
}
export function baseNewPiece(state: HeadlessState, piece: cg.Piece, key: cg.Key, force?: boolean): boolean {
if (state.pieces.has(key)) {
if (force) state.pieces.delete(key);
else return false;
}
callUserFunction(state.events.dropNewPiece, piece, key);
state.pieces.set(key, piece);
state.lastMove = [key];
state.check = undefined;
callUserFunction(state.events.change);
state.movable.dests = undefined;
state.turnColor = opposite(state.turnColor);
return true;
}
function baseUserMove(state: HeadlessState, orig: cg.Key, dest: cg.Key): cg.Piece | boolean {
const result = baseMove(state, orig, dest);
if (result) {
state.movable.dests = undefined;
state.turnColor = opposite(state.turnColor);
state.animation.current = undefined;
}
return result;
}
export function userMove(state: HeadlessState, orig: cg.Key, dest: cg.Key): boolean {
if (canMove(state, orig, dest)) {
const result = baseUserMove(state, orig, dest);
if (result) {
const holdTime = state.hold.stop();
unselect(state);
const metadata: cg.MoveMetadata = {
premove: false,
ctrlKey: state.stats.ctrlKey,
holdTime,
};
if (result !== true) metadata.captured = result;
callUserFunction(state.movable.events.after, orig, dest, metadata);
return true;
}
} else if (canPremove(state, orig, dest)) {
setPremove(state, orig, dest, {
ctrlKey: state.stats.ctrlKey,
});
unselect(state);
return true;
}
unselect(state);
return false;
}
export function dropNewPiece(state: HeadlessState, orig: cg.Key, dest: cg.Key, force?: boolean): void {
const piece = state.pieces.get(orig);
if (piece && (canDrop(state, orig, dest) || force)) {
state.pieces.delete(orig);
baseNewPiece(state, piece, dest, force);
callUserFunction(state.movable.events.afterNewPiece, piece.role, dest, {
premove: false,
predrop: false,
});
} else if (piece && canPredrop(state, orig, dest)) {
setPredrop(state, piece.role, dest);
} else {
unsetPremove(state);
unsetPredrop(state);
}
state.pieces.delete(orig);
unselect(state);
}
export function selectSquare(state: HeadlessState, key: cg.Key, force?: boolean): void {
callUserFunction(state.events.select, key);
if (state.selected) {
if (state.selected === key && !state.draggable.enabled) {
unselect(state);<|fim▁hole|> state.hold.cancel();
return;
} else if ((state.selectable.enabled || force) && state.selected !== key) {
if (userMove(state, state.selected, key)) {
state.stats.dragged = false;
return;
}
}
}
if (isMovable(state, key) || isPremovable(state, key)) {
setSelected(state, key);
state.hold.start();
}
}
export function setSelected(state: HeadlessState, key: cg.Key): void {
state.selected = key;
if (isPremovable(state, key)) {
state.premovable.dests = premove(state.pieces, key, state.premovable.castle);
} else state.premovable.dests = undefined;
}
export function unselect(state: HeadlessState): void {
state.selected = undefined;
state.premovable.dests = undefined;
state.hold.cancel();
}
function isMovable(state: HeadlessState, orig: cg.Key): boolean {
const piece = state.pieces.get(orig);
return (
!!piece &&
(state.movable.color === 'both' || (state.movable.color === piece.color && state.turnColor === piece.color))
);
}
export function canMove(state: HeadlessState, orig: cg.Key, dest: cg.Key): boolean {
return (
orig !== dest && isMovable(state, orig) && (state.movable.free || !!state.movable.dests?.get(orig)?.includes(dest))
);
}
function canDrop(state: HeadlessState, orig: cg.Key, dest: cg.Key): boolean {
const piece = state.pieces.get(orig);
return (
!!piece &&
(orig === dest || !state.pieces.has(dest)) &&
(state.movable.color === 'both' || (state.movable.color === piece.color && state.turnColor === piece.color))
);
}
function isPremovable(state: HeadlessState, orig: cg.Key): boolean {
const piece = state.pieces.get(orig);
return !!piece && state.premovable.enabled && state.movable.color === piece.color && state.turnColor !== piece.color;
}
function canPremove(state: HeadlessState, orig: cg.Key, dest: cg.Key): boolean {
return (
orig !== dest && isPremovable(state, orig) && premove(state.pieces, orig, state.premovable.castle).includes(dest)
);
}
function canPredrop(state: HeadlessState, orig: cg.Key, dest: cg.Key): boolean {
const piece = state.pieces.get(orig);
const destPiece = state.pieces.get(dest);
return (
!!piece &&
(!destPiece || destPiece.color !== state.movable.color) &&
state.predroppable.enabled &&
(piece.role !== 'pawn' || (dest[1] !== '1' && dest[1] !== '8')) &&
state.movable.color === piece.color &&
state.turnColor !== piece.color
);
}
export function isDraggable(state: HeadlessState, orig: cg.Key): boolean {
const piece = state.pieces.get(orig);
return (
!!piece &&
state.draggable.enabled &&
(state.movable.color === 'both' ||
(state.movable.color === piece.color && (state.turnColor === piece.color || state.premovable.enabled)))
);
}
export function playPremove(state: HeadlessState): boolean {
const move = state.premovable.current;
if (!move) return false;
const orig = move[0],
dest = move[1];
let success = false;
if (canMove(state, orig, dest)) {
const result = baseUserMove(state, orig, dest);
if (result) {
const metadata: cg.MoveMetadata = { premove: true };
if (result !== true) metadata.captured = result;
callUserFunction(state.movable.events.after, orig, dest, metadata);
success = true;
}
}
unsetPremove(state);
return success;
}
export function playPredrop(state: HeadlessState, validate: (drop: cg.Drop) => boolean): boolean {
const drop = state.predroppable.current;
let success = false;
if (!drop) return false;
if (validate(drop)) {
const piece = {
role: drop.role,
color: state.movable.color,
} as cg.Piece;
if (baseNewPiece(state, piece, drop.key)) {
callUserFunction(state.movable.events.afterNewPiece, drop.role, drop.key, {
premove: false,
predrop: true,
});
success = true;
}
}
unsetPredrop(state);
return success;
}
export function cancelMove(state: HeadlessState): void {
unsetPremove(state);
unsetPredrop(state);
unselect(state);
}
export function stop(state: HeadlessState): void {
state.movable.color = state.movable.dests = state.animation.current = undefined;
cancelMove(state);
}
export function getKeyAtDomPos(pos: cg.NumberPair, asWhite: boolean, bounds: ClientRect): cg.Key | undefined {
let file = Math.floor((8 * (pos[0] - bounds.left)) / bounds.width);
if (!asWhite) file = 7 - file;
let rank = 7 - Math.floor((8 * (pos[1] - bounds.top)) / bounds.height);
if (!asWhite) rank = 7 - rank;
return file >= 0 && file < 8 && rank >= 0 && rank < 8 ? pos2key([file, rank]) : undefined;
}
export function getSnappedKeyAtDomPos(
orig: cg.Key,
pos: cg.NumberPair,
asWhite: boolean,
bounds: ClientRect
): cg.Key | undefined {
const origPos = key2pos(orig);
const validSnapPos = allPos.filter(pos2 => {
return queen(origPos[0], origPos[1], pos2[0], pos2[1]) || knight(origPos[0], origPos[1], pos2[0], pos2[1]);
});
const validSnapCenters = validSnapPos.map(pos2 => computeSquareCenter(pos2key(pos2), asWhite, bounds));
const validSnapDistances = validSnapCenters.map(pos2 => distanceSq(pos, pos2));
const [, closestSnapIndex] = validSnapDistances.reduce(
(a, b, index) => (a[0] < b ? a : [b, index]),
[validSnapDistances[0], 0]
);
return pos2key(validSnapPos[closestSnapIndex]);
}
export function whitePov(s: HeadlessState): boolean {
return s.orientation === 'white';
}<|fim▁end|> | |
<|file_name|>SortOrder.java<|end_file_name|><|fim▁begin|>//
// This file was generated by the JavaTM Architecture for XML Binding(JAXB) Reference Implementation, v2.2.11
// See <a href="http://java.sun.com/xml/jaxb">http://java.sun.com/xml/jaxb</a>
// Any modifications to this file will be lost upon recompilation of the source schema.
// Generated on: 2015.08.19 at 01:05:06 PM PDT
//
package com.google.api.ads.adwords.lib.jaxb.v201509;
import javax.xml.bind.annotation.XmlEnum;
import javax.xml.bind.annotation.XmlType;
/**
* <p>Java class for SortOrder.
*
* <p>The following schema fragment specifies the expected content contained within this class.
* <p>
* <pre>
* <simpleType name="SortOrder"><|fim▁hole|> * <enumeration value="ASCENDING"/>
* <enumeration value="DESCENDING"/>
* </restriction>
* </simpleType>
* </pre>
*
*/
@XmlType(name = "SortOrder")
@XmlEnum
public enum SortOrder {
ASCENDING,
DESCENDING;
public String value() {
return name();
}
public static SortOrder fromValue(String v) {
return valueOf(v);
}
}<|fim▁end|> | * <restriction base="{http://www.w3.org/2001/XMLSchema}string"> |
<|file_name|>clss.py<|end_file_name|><|fim▁begin|>import conv
import tools
from ..api.clss import api
from ..sql.clss import sql
from pandas import DataFrame
import time as tm
<|fim▁hole|>class data(object):
def __init__(self):
self.a = api()
self.s = sql()
self.jobs = []
self.trd = DataFrame()
self.prc = DataFrame()
def add_trades(self, exchange, symbol, limit='', since='',
auto_since='no', ping_limit=1.0):
job = {'exchange':exchange,'symbol':symbol}
self.a.add_job(exchange, symbol, 'trades', limit=limit, since=since,
auto_since=auto_since, ping_limit=ping_limit)
self.jobs.append(job)
def get_trades(self, exchange='', symbol='', start=''):
trd = self.s.select('trades',exchange=exchange,
symbol=symbol,start=start)
self.trd = self.trd.append(trd)
self.trd = self.trd.drop_duplicates(['tid','exchange'])
def run_trades(self, exchange, symbol):
self.trd = self.trd.append(self.a.run(exchange,symbol,'trades'))
self.trd = self.trd.drop_duplicates(['tid','exchange'])
def run_loop(self, time, to_sql=60, log='no'):
dump = tm.time() + to_sql
end = tm.time() + time
while tm.time() < end:
for job in self.jobs:
self.run_trades(job['exchange'], job['symbol'])
if tm.time() > dump:
dump = tm.time() + to_sql
self.to_sql(log)
def get_price(self, exchange='', symbol='',
freq='', start=''):
prc = self.s.select('price',exchange=exchange,symbol=symbol,
freq=freq, start=start)
self.prc = self.prc.append(prc)
self.prc = self.prc.drop_duplicates(['timestamp','exchange',
'symbol','freq'])
return prc
def run_price(self, exchange, symbol, freq, label='left',
from_sql='no', start=''):
if from_sql == 'yes':
self.get_trades(exchange, symbol, start=start)
# get_trades already applied exchange, symbol checks
trd = self.trd
else:
trd = self.trd
if exchange <> '':
trd = self.trd[self.trd.exchange==exchange]
if symbol <> '':
trd = self.trd[self.trd.symbol==symbol]
trd = tools.date_index(trd)
if len(trd.index) > 0:
prc = conv.olhcv(trd, freq, label=label)
self.prc = self.prc.append(prc)
self.prc = self.prc.drop_duplicates(['timestamp','exchange',
'symbol','freq'])
def to_sql(self, log='no'):
if 'sent' in self.trd:
trd = self.trd[self.trd['sent']<>'yes']
else:
trd = self.trd
if 'sent' in self.prc:
prc = self.prc[self.prc['sent']<>'yes']
else:
prc = self.prc
self.s.insert('trades', trd)
self.s.insert('price', prc)
if log == 'yes':
print trd
print prc
self.trd['sent'] = 'yes'
self.prc['sent'] = 'yes'<|fim▁end|> | |
<|file_name|>urls.py<|end_file_name|><|fim▁begin|>from django.conf.urls.defaults import patterns, url
urlpatterns = patterns(
'popcorn_gallery.users.views',
url(r'^edit/$', 'edit', name='users_edit'),
url(r'^delete/$', 'delete_profile', name='users_delete'),
url(r'^(?P<username>[\w-]+)/$', 'profile', name='users_profile'),<|fim▁hole|><|fim▁end|> | ) |
<|file_name|>xml2json.min.js<|end_file_name|><|fim▁begin|>function X2JS(_1){
"use strict";
var _2="1.1.2";
_1=_1||{};
_3();
function _3(){
if(_1.escapeMode===undefined){
_1.escapeMode=true;
}
if(_1.attributePrefix===undefined){
_1.attributePrefix="_";
}
if(_1.arrayAccessForm===undefined){
_1.arrayAccessForm="none";
}
if(_1.emptyNodeForm===undefined){
_1.emptyNodeForm="text";
}
};
var _4={ELEMENT_NODE:1,TEXT_NODE:3,CDATA_SECTION_NODE:4,DOCUMENT_NODE:9};
function _5(_6){
var _7=_6.localName;
if(_7==null){
_7=_6.baseName;
}
if(_7==null||_7==""){
_7=_6.nodeName;
}
return _7;
};
function _8(_9){
return _9.prefix;
};
function _a(_b){
if(typeof (_b)=="string"){
return _b.replace(/&/g,"&").replace(/</g,"<").replace(/>/g,">").replace(/"/g,""").replace(/'/g,"'").replace(/\//g,"/");
}else{
return _b;
}
};
function _c(_d){
return _d.replace(/&/g,"&").replace(/</g,"<").replace(/>/g,">").replace(/"/g,"\"").replace(/'/g,"'").replace(///g,"/");
};
function _e(_f,_10){
switch(_1.arrayAccessForm){
case "property":
if(!(_f[_10] instanceof Array)){
_f[_10+"_asArray"]=[_f[_10]];
}else{
_f[_10+"_asArray"]=_f[_10];
}
break;
}
};
function _11(_12){
if(_12.nodeType==_4.DOCUMENT_NODE){
var _13=new Object;
var _14=_12.firstChild;
var _15=_5(_14);
_13[_15]=_11(_14);
return _13;
}else{
if(_12.nodeType==_4.ELEMENT_NODE){
var _13=new Object;
_13.__cnt=0;
var _16=_12.childNodes;
for(var _17=0;_17<_16.length;_17++){
var _14=_16.item(_17);
var _15=_5(_14);
_13.__cnt++;
if(_13[_15]==null){
_13[_15]=_11(_14);
_e(_13,_15);
}else{
if(_13[_15]!=null){
if(!(_13[_15] instanceof Array)){<|fim▁hole|>var _18=0;
while(_13[_15][_18]!=null){
_18++;
}
(_13[_15])[_18]=_11(_14);
}
}
for(var _19=0;_19<_12.attributes.length;_19++){
var _1a=_12.attributes.item(_19);
_13.__cnt++;
_13[_1.attributePrefix+_1a.name]=_1a.value;
}
var _1b=_8(_12);
if(_1b!=null&&_1b!=""){
_13.__cnt++;
_13.__prefix=_1b;
}
if(_13["#text"]!=null){
_13.__text=_13["#text"];
if(_13.__text instanceof Array){
_13.__text=_13.__text.join("\n");
}
if(_1.escapeMode){
_13.__text=_c(_13.__text);
}
delete _13["#text"];
if(_1.arrayAccessForm=="property"){
delete _13["#text_asArray"];
}
}
if(_13["#cdata-section"]!=null){
_13.__cdata=_13["#cdata-section"];
delete _13["#cdata-section"];
if(_1.arrayAccessForm=="property"){
delete _13["#cdata-section_asArray"];
}
}
if(_13.__cnt==1&&_13.__text!=null){
_13=_13.__text;
}else{
if(_13.__cnt==0&&_1.emptyNodeForm=="text"){
_13="";
}
}
delete _13.__cnt;
if(_13.__text!=null||_13.__cdata!=null){
_13.toString=function(){
return (this.__text!=null?this.__text:"")+(this.__cdata!=null?this.__cdata:"");
};
}
return _13;
}else{
if(_12.nodeType==_4.TEXT_NODE||_12.nodeType==_4.CDATA_SECTION_NODE){
return _12.nodeValue;
}
}
}
};
function _1c(_1d,_1e,_1f,_20){
var _21="<"+((_1d!=null&&_1d.__prefix!=null)?(_1d.__prefix+":"):"")+_1e;
if(_1f!=null){
for(var _22=0;_22<_1f.length;_22++){
var _23=_1f[_22];
var _24=_1d[_23];
_21+=" "+_23.substr(_1.attributePrefix.length)+"='"+_24+"'";
}
}
if(!_20){
_21+=">";
}else{
_21+="/>";
}
return _21;
};
function _25(_26,_27){
return "</"+(_26.__prefix!=null?(_26.__prefix+":"):"")+_27+">";
};
function _28(str,_29){
return str.indexOf(_29,str.length-_29.length)!==-1;
};
function _2a(_2b,_2c){
if((_1.arrayAccessForm=="property"&&_28(_2c.toString(),("_asArray")))||_2c.toString().indexOf(_1.attributePrefix)==0||_2c.toString().indexOf("__")==0||(_2b[_2c] instanceof Function)){
return true;
}else{
return false;
}
};
function _2d(_2e){
var _2f=0;
if(_2e instanceof Object){
for(var it in _2e){
if(_2a(_2e,it)){
continue;
}
_2f++;
}
}
return _2f;
};
function _30(_31){
var _32=[];
if(_31 instanceof Object){
for(var ait in _31){
if(ait.toString().indexOf("__")==-1&&ait.toString().indexOf(_1.attributePrefix)==0){
_32.push(ait);
}
}
}
return _32;
};
function _33(_34){
var _35="";
if(_34.__cdata!=null){
_35+="<![CDATA["+_34.__cdata+"]]>";
}
if(_34.__text!=null){
if(_1.escapeMode){
_35+=_a(_34.__text);
}else{
_35+=_34.__text;
}
}
return _35;
};
function _36(_37){
var _38="";
if(_37 instanceof Object){
_38+=_33(_37);
}else{
if(_37!=null){
if(_1.escapeMode){
_38+=_a(_37);
}else{
_38+=_37;
}
}
}
return _38;
};
function _39(_3a,_3b,_3c){
var _3d="";
if(_3a.length==0){
_3d+=_1c(_3a,_3b,_3c,true);
}else{
for(var _3e=0;_3e<_3a.length;_3e++){
_3d+=_1c(_3a[_3e],_3b,_30(_3a[_3e]),false);
_3d+=_3f(_3a[_3e]);
_3d+=_25(_3a[_3e],_3b);
}
}
return _3d;
};
function _3f(_40){
var _41="";
var _42=_2d(_40);
if(_42>0){
for(var it in _40){
if(_2a(_40,it)){
continue;
}
var _43=_40[it];
var _44=_30(_43);
if(_43==null||_43==undefined){
_41+=_1c(_43,it,_44,true);
}else{
if(_43 instanceof Object){
if(_43 instanceof Array){
_41+=_39(_43,it,_44);
}else{
var _45=_2d(_43);
if(_45>0||_43.__text!=null||_43.__cdata!=null){
_41+=_1c(_43,it,_44,false);
_41+=_3f(_43);
_41+=_25(_43,it);
}else{
_41+=_1c(_43,it,_44,true);
}
}
}else{
_41+=_1c(_43,it,_44,false);
_41+=_36(_43);
_41+=_25(_43,it);
}
}
}
}
_41+=_36(_40);
return _41;
};
this.parseXmlString=function(_46){
if(_46===undefined){
return null;
}
var _47;
if(window.DOMParser){
var _48=new window.DOMParser();
_47=_48.parseFromString(_46,"text/xml");
}else{
if(_46.indexOf("<?")==0){
_46=_46.substr(_46.indexOf("?>")+2);
}
_47=new ActiveXObject("Microsoft.XMLDOM");
_47.async="false";
_47.loadXML(_46);
}
return _47;
};
this.asArray=function(_49){
if(_49 instanceof Array){
return _49;
}else{
return [_49];
}
};
this.xml2json=function(_4a){
return _11(_4a);
};
this.xml_str2json=function(_4b){
var _4c=this.parseXmlString(_4b);
return this.xml2json(_4c);
};
this.json2xml_str=function(_4d){
return _3f(_4d);
};
this.json2xml=function(_4e){
var _4f=this.json2xml_str(_4e);
return this.parseXmlString(_4f);
};
this.getVersion=function(){
return _2;
};
};<|fim▁end|> | _13[_15]=[_13[_15]];
_e(_13,_15);
}
} |
<|file_name|>check_static_recursion_foreign_helper.rs<|end_file_name|><|fim▁begin|>// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.<|fim▁hole|>
#![feature(libc)]
#[crate_id = "check_static_recursion_foreign_helper"]
#[crate_type = "lib"]
extern crate libc;
#[no_mangle]
pub static test_static: libc::c_int = 0;<|fim▁end|> |
// Helper definition for test/run-pass/check-static-recursion-foreign.rs. |
<|file_name|>package.py<|end_file_name|><|fim▁begin|># Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *<|fim▁hole|>class Hashcat(MakefilePackage):
"""hashcat is the world's fastest and most advanced password recovery
utility, supporting five unique modes of attack for over 300 highly
optimized hashing algorithms. hashcat currently supports CPUs, GPUs,
and other hardware accelerators on Linux, Windows, and macOS,and has
facilities to help enable distributed password cracking."""
homepage = "https://hashcat.net/hashcat/"
url = "https://github.com/hashcat/hashcat/archive/v6.1.1.tar.gz"
version('6.1.1', sha256='39c140bbb3c0bdb1564bfa9b9a1cff49115a42f4c9c19e9b066b617aea309f80')
version('6.1.0', sha256='916f92434e3b36a126be1d1247a95cd3b32b4d814604960a2ca325d4cc0542d1')
version('6.0.0', sha256='e8e70f2a5a608a4e224ccf847ad2b8e4d68286900296afe00eb514d8c9ec1285')
version('5.1.0', sha256='283beaa68e1eab41de080a58bb92349c8e47a2bb1b93d10f36ea30f418f1e338')
version('5.0.0', sha256='7092d98cf0d8b29bd6efe2cf94802442dd8d7283982e9439eafbdef62b0db08f')
def install(self, spec, prefix):
make('SHARED=1', 'PREFIX={0}'.format(prefix), 'install')<|fim▁end|> | |
<|file_name|>template_testing.py<|end_file_name|><|fim▁begin|>#!/bin/python3
# Testscript for template generation and deploying
from cloud_provider.amazon import Amazon
from template.template import CloudFormationTemplate
from pprint import pprint
if __name__ == "__main__":
# Amazon Settings
region = "eu-west-1"
stack_name = 'TestStack'
# Template settings
template_file = '/tmp/template.txt'
template_json_source_file = 'test-cluster.json'
# Create template
cfn_template = CloudFormationTemplate()
cfn_template.load_json_source(template_json_source_file)
cfn_template.save_template_file(template_file)
# pprint(cfn_template.source)
# Connect to Amazon CloudFormation
aws = Amazon(region)
# Deploy CloudFormation Template
aws.deploy_stack(stack_name, template_file=template_file)
# Delete Stack if error occured<|fim▁hole|><|fim▁end|> | # aws.delete_stack(stack_name) |
<|file_name|>util.py<|end_file_name|><|fim▁begin|># This file is part of Indico.
# Copyright (C) 2002 - 2017 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.<|fim▁hole|>
from __future__ import unicode_literals
def serialize_ip_network_group(group):
"""Serialize group to JSON-like object"""
return {
'id': group.id,
'name': group.name,
'identifier': 'IPNetworkGroup:{}'.format(group.id),
'_type': 'IPNetworkGroup'
}<|fim▁end|> | #
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>. |
<|file_name|>selector-css3.js<|end_file_name|><|fim▁begin|>/*
YUI 3.17.0 (build ce55cc9)
Copyright 2014 Yahoo! Inc. All rights reserved.
Licensed under the BSD License.
http://yuilibrary.com/license/
*/
YUI.add('selector-css3', function (Y, NAME) {
/**
* The selector css3 module provides support for css3 selectors.
* @module dom
* @submodule selector-css3
* @for Selector
*/
/*
an+b = get every _a_th node starting at the _b_th
0n+b = no repeat ("0" and "n" may both be omitted (together) , e.g. "0n+1" or "1", not "0+1"), return only the _b_th element
1n+b = get every element starting from b ("1" may may be omitted, e.g. "1n+0" or "n+0" or "n")
an+0 = get every _a_th element, "0" may be omitted
*/
Y.Selector._reNth = /^(?:([\-]?\d*)(n){1}|(odd|even)$)*([\-+]?\d*)$/;
Y.Selector._getNth = function(node, expr, tag, reverse) {
Y.Selector._reNth.test(expr);
var a = parseInt(RegExp.$1, 10), // include every _a_ elements (zero means no repeat, just first _a_)
n = RegExp.$2, // "n"
oddeven = RegExp.$3, // "odd" or "even"
b = parseInt(RegExp.$4, 10) || 0, // start scan from element _b_
result = [],
siblings = Y.DOM._children(node.parentNode, tag),
op;
if (oddeven) {
a = 2; // always every other
op = '+';
n = 'n';<|fim▁hole|> } else if ( isNaN(a) ) {
a = (n) ? 1 : 0; // start from the first or no repeat
}
if (a === 0) { // just the first
if (reverse) {
b = siblings.length - b + 1;
}
if (siblings[b - 1] === node) {
return true;
} else {
return false;
}
} else if (a < 0) {
reverse = !!reverse;
a = Math.abs(a);
}
if (!reverse) {
for (var i = b - 1, len = siblings.length; i < len; i += a) {
if ( i >= 0 && siblings[i] === node ) {
return true;
}
}
} else {
for (var i = siblings.length - b, len = siblings.length; i >= 0; i -= a) {
if ( i < len && siblings[i] === node ) {
return true;
}
}
}
return false;
};
Y.mix(Y.Selector.pseudos, {
'root': function(node) {
return node === node.ownerDocument.documentElement;
},
'nth-child': function(node, expr) {
return Y.Selector._getNth(node, expr);
},
'nth-last-child': function(node, expr) {
return Y.Selector._getNth(node, expr, null, true);
},
'nth-of-type': function(node, expr) {
return Y.Selector._getNth(node, expr, node.tagName);
},
'nth-last-of-type': function(node, expr) {
return Y.Selector._getNth(node, expr, node.tagName, true);
},
'last-child': function(node) {
var children = Y.DOM._children(node.parentNode);
return children[children.length - 1] === node;
},
'first-of-type': function(node) {
return Y.DOM._children(node.parentNode, node.tagName)[0] === node;
},
'last-of-type': function(node) {
var children = Y.DOM._children(node.parentNode, node.tagName);
return children[children.length - 1] === node;
},
'only-child': function(node) {
var children = Y.DOM._children(node.parentNode);
return children.length === 1 && children[0] === node;
},
'only-of-type': function(node) {
var children = Y.DOM._children(node.parentNode, node.tagName);
return children.length === 1 && children[0] === node;
},
'empty': function(node) {
return node.childNodes.length === 0;
},
'not': function(node, expr) {
return !Y.Selector.test(node, expr);
},
'contains': function(node, expr) {
var text = node.innerText || node.textContent || '';
return text.indexOf(expr) > -1;
},
'checked': function(node) {
return (node.checked === true || node.selected === true);
},
enabled: function(node) {
return (node.disabled !== undefined && !node.disabled);
},
disabled: function(node) {
return (node.disabled);
}
});
Y.mix(Y.Selector.operators, {
'^=': '^{val}', // Match starts with value
'$=': '{val}$', // Match ends with value
'*=': '{val}' // Match contains value as substring
});
Y.Selector.combinators['~'] = {
axis: 'previousSibling'
};
}, '3.17.0', {"requires": ["selector-native", "selector-css2"]});<|fim▁end|> | b = (oddeven === 'odd') ? 1 : 0; |
<|file_name|>11.js<|end_file_name|><|fim▁begin|>$(function () {
var colors = Highcharts.getOptions().colors,
categories = ['已关闭', 'NEW', '已解决'],
name = 'Browser brands',
data = [{
y: 290,
color: colors[0],
drilldown: {
name: 'close bug version',
categories: ['当前版本', '历史版本'],
data: [20,270],
color: colors[0]
}
}, {
y: 64,
color: colors[1],
drilldown: {
name: 'fix bug version',
categories: ['当前版本', '历史版本'],
data: [8,56],
color: colors[1]
}
}, {
y: 82,
color: colors[2],
drilldown: {
name: 'NEW bug versions',
categories: ['当前版本', '历史版本'],
data: [5,77],
color: colors[2]
}
}];
// Build the data arrays
var browserData = [];
var versionsData = [];
for (var i = 0; i < data.length; i++) {
// add browser data
browserData.push({
name: categories[i],
y: data[i].y,
color: data[i].color
});
// add version data
for (var j = 0; j < data[i].drilldown.data.length; j++) {
var brightness = 0.2 - (j / data[i].drilldown.data.length) / 5 ;
versionsData.push({
name: data[i].drilldown.categories[j],
y: data[i].drilldown.data[j],
color: Highcharts.Color(data[i].color).brighten(brightness).get()
});
}
}
// Create the chart
$('#container11').highcharts({
chart: {
type: 'pie'
},
title: {
text: '当前版本在历史版本总和占比'
},
yAxis: {
title: {
text: 'Total percent market share'
}
},
plotOptions: {
pie: {
shadow: false,
center: ['50%', '50%']
}
},
tooltip: {
valueSuffix: '' //这里更改tooltip显示的单位
},
series: [{
name: 'Browsers',
data: browserData,
size: '80%',
dataLabels: {
formatter: function() {
return this.y > 5 ? this.point.name : null;
},
color: 'white',
distance: -30
}
}, {
name: 'Versions',
data: versionsData,
size: '100%',
innerSize: '80%',
<|fim▁hole|> return this.y > 0 ? '<b>'+ this.point.name +':</b> '+ this.y+'个' : null;
}
}
}]
});
});<|fim▁end|> | dataLabels: {
formatter: function() {
// display only if larger than 1
|
<|file_name|>tests.rs<|end_file_name|><|fim▁begin|>#[test]
fn it_works() {
assert!(true);<|fim▁hole|><|fim▁end|> | } |
<|file_name|>test_build_scripts.py<|end_file_name|><|fim▁begin|>"""Tests for distutils.command.build_scripts."""
import os
import unittest
from distutils.command.build_scripts import build_scripts
from distutils.core import Distribution
import sysconfig
from distutils.tests import support
from test.test_support import run_unittest
class BuildScriptsTestCase(support.TempdirManager,
support.LoggingSilencer,
unittest.TestCase):
def test_default_settings(self):
cmd = self.get_build_scripts_cmd("/foo/bar", [])
self.assertTrue(not cmd.force)
self.assertTrue(cmd.build_dir is None)
cmd.finalize_options()
self.assertTrue(cmd.force)
self.assertEqual(cmd.build_dir, "/foo/bar")
def test_build(self):
source = self.mkdtemp()
target = self.mkdtemp()
expected = self.write_sample_scripts(source)
cmd = self.get_build_scripts_cmd(target,
[os.path.join(source, fn)
for fn in expected])
cmd.finalize_options()
cmd.run()
built = os.listdir(target)
for name in expected:
self.assertTrue(name in built)
def get_build_scripts_cmd(self, target, scripts):
import sys
dist = Distribution()
dist.scripts = scripts
dist.command_obj["build"] = support.DummyCommand(
build_scripts=target,
force=1,
executable=sys.executable
)
return build_scripts(dist)
def write_sample_scripts(self, dir):
expected = []
expected.append("script1.py")
self.write_script(dir, "script1.py",
("#! /usr/bin/env python2.3\n"
"# bogus script w/ Python sh-bang\n"
<|fim▁hole|> "pass\n"))
expected.append("script2.py")
self.write_script(dir, "script2.py",
("#!/usr/bin/python\n"
"# bogus script w/ Python sh-bang\n"
"pass\n"))
expected.append("shell.sh")
self.write_script(dir, "shell.sh",
("#!/bin/sh\n"
"# bogus shell script w/ sh-bang\n"
"exit 0\n"))
return expected
def write_script(self, dir, name, text):
f = open(os.path.join(dir, name), "w")
try:
f.write(text)
finally:
f.close()
def test_version_int(self):
source = self.mkdtemp()
target = self.mkdtemp()
expected = self.write_sample_scripts(source)
cmd = self.get_build_scripts_cmd(target,
[os.path.join(source, fn)
for fn in expected])
cmd.finalize_options()
# http://bugs.python.org/issue4524
#
# On linux-g++-32 with command line `./configure --enable-ipv6
# --with-suffix=3`, python is compiled okay but the build scripts
# failed when writing the name of the executable
old = sysconfig.get_config_vars().get('VERSION')
sysconfig._CONFIG_VARS['VERSION'] = 4
try:
cmd.run()
finally:
if old is not None:
sysconfig._CONFIG_VARS['VERSION'] = old
built = os.listdir(target)
for name in expected:
self.assertTrue(name in built)
def test_suite():
return unittest.makeSuite(BuildScriptsTestCase)
if __name__ == "__main__":
run_unittest(test_suite())<|fim▁end|> | |
<|file_name|>SilverInlineEditorWidthTest.ts<|end_file_name|><|fim▁begin|>import { ApproxStructure, Assertions, UiFinder } from '@ephox/agar';
import { describe, it } from '@ephox/bedrock-client';
import { Arr, Fun, Type } from '@ephox/katamari';
import { Css, Scroll, SugarBody, SugarElement } from '@ephox/sugar';
import { McEditor, TinyDom } from '@ephox/wrap-mcagar';
import { assert } from 'chai';
import Editor from 'tinymce/core/api/Editor';
import { RawEditorOptions } from 'tinymce/core/api/OptionTypes';
import { ToolbarMode } from 'tinymce/themes/silver/api/Options';
import { pOpenMore } from '../../module/MenuUtils';
describe('browser.tinymce.themes.silver.editor.SilverInlineEditorWidthTest', () => {
const structureTest = (editor: Editor, container: SugarElement<Node>, maxWidth: number) =>
Assertions.assertStructure(
'Container structure',
ApproxStructure.build((s, str, arr) => s.element('div', {
classes: [ arr.has('tox-tinymce'), arr.has('tox-tinymce-inline') ],
children: [
s.element('div', {
classes: [ arr.has('tox-editor-container') ],
children: [
s.element('div', {
classes: [ arr.has('tox-editor-header') ],
styles: {
'max-width': str.is(`${maxWidth}px`)
},
children: [
s.element('div', {
classes: [ arr.has('tox-toolbar-overlord') ],
attrs: { role: str.is('group') }
}),
s.element('div', {
classes: [ arr.has('tox-anchorbar') ]
})
]
})
]
}),
s.element('div', {
classes: [ arr.has('tox-throbber') ]
})
]
})),
container
);
const assertWidth = (uiContainer: SugarElement<Node>, maxWidth: number, minWidth: number = 0) => {
const overlord = UiFinder.findIn(uiContainer, '.tox-toolbar-overlord').getOrDie();
const widthString = Css.get(overlord, 'width') || '0px';
const width = parseInt(widthString.replace('px', ''), 10);
assert.isAtMost(width, maxWidth, `Toolbar with should be less than ${maxWidth}px - ${width}<=${maxWidth}`);
assert.isAtLeast(width, minWidth, `Toolbar with should be greater than ${minWidth}px - ${width}>=${minWidth}`);
};
const testRender = (options: RawEditorOptions, expectedWidth: number, pActions?: (editor: Editor) => Promise<void>) => async () => {
Scroll.to(0, 0);
const editor = await McEditor.pFromSettings<Editor>({
menubar: false,
inline: true,
base_url: '/project/tinymce/js/tinymce',
toolbar_mode: 'floating',
...options
});
editor.focus();
await UiFinder.pWaitForVisible('Wait for the editor to show', SugarBody.body(), '.tox-editor-header');
const uiContainer = TinyDom.container(editor);
structureTest(editor, uiContainer, expectedWidth);
assertWidth(uiContainer, expectedWidth, expectedWidth - 100);
editor.setContent(Arr.range(100, Fun.constant('<p></p>')).join(''));
Scroll.to(0, 500);
await UiFinder.pWaitForVisible('Wait to be docked', SugarBody.body(), '.tox-tinymce--toolbar-sticky-on .tox-editor-header');
assertWidth(uiContainer, expectedWidth, expectedWidth - 100);
// Run optional additional actions
if (Type.isNonNullable(pActions)) {
await pActions(editor);
}
McEditor.remove(editor);<|fim▁hole|> it('Check max-width is 400px when set via init', testRender({ width: 400 }, 400));
it('Check max-width is 400px when set via element', testRender({
setup: (ed: Editor) => {
Css.set(SugarElement.fromDom(ed.getElement()), 'width', '400px');
}
}, 400));
it('Check max-width is constrained to the body width when no width set', testRender({
setup: (ed: Editor) => {
ed.on('PreInit', () => {
Css.set(SugarBody.body(), 'width', '400px');
});
ed.on('remove', () => {
Css.remove(SugarBody.body(), 'width');
});
}
}, 400));
it('Check width when expanding sliding toolbar while docked', testRender({
toolbar_mode: 'sliding',
width: 400
}, 400, async (editor) => {
await pOpenMore(ToolbarMode.sliding);
assertWidth(SugarElement.fromDom(editor.getContainer()), 400, 300);
}));
});<|fim▁end|> | };
|
<|file_name|>tst_qsslkey.cpp<|end_file_name|><|fim▁begin|>/****************************************************************************
**
** Copyright (C) 2009 Nokia Corporation and/or its subsidiary(-ies).
** All rights reserved.
** Contact: Nokia Corporation ([email protected])
**
** This file is part of the test suite of the Qt Toolkit.
**
** $QT_BEGIN_LICENSE:LGPL$
** No Commercial Usage
** This file contains pre-release code and may not be distributed.
** You may use this file in accordance with the terms and conditions
** contained in the Technology Preview License Agreement accompanying
** this package.
**
** GNU Lesser General Public License Usage
** Alternatively, this file may be used under the terms of the GNU Lesser
** General Public License version 2.1 as published by the Free Software
** Foundation and appearing in the file LICENSE.LGPL included in the
** packaging of this file. Please review the following information to
** ensure the GNU Lesser General Public License version 2.1 requirements
** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
**
** In addition, as a special exception, Nokia gives you certain additional
** rights. These rights are described in the Nokia Qt LGPL Exception
** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
**
** If you have questions regarding the use of this file, please contact
** Nokia at [email protected].
**
**
**
**
**
**
**
**
** $QT_END_LICENSE$
**
****************************************************************************/
#include <QtTest/QtTest>
#include <qsslkey.h>
#include <qsslsocket.h>
#include <QtNetwork/qhostaddress.h>
#include <QtNetwork/qnetworkproxy.h>
#ifdef Q_OS_SYMBIAN
// In Symbian OS test data is located in applications private dir
// Current path (C:\private\<UID>) contains only ascii chars
#define SRCDIR QDir::currentPath().toAscii()
#endif
class tst_QSslKey : public QObject
{
Q_OBJECT
struct KeyInfo {
QFileInfo fileInfo;
QSsl::KeyAlgorithm algorithm;
QSsl::KeyType type;
int length;
QSsl::EncodingFormat format;
KeyInfo(
const QFileInfo &fileInfo, QSsl::KeyAlgorithm algorithm, QSsl::KeyType type,
int length, QSsl::EncodingFormat format)
: fileInfo(fileInfo), algorithm(algorithm), type(type), length(length)
, format(format) {}
};
QList<KeyInfo> keyInfoList;
void createPlainTestRows();
public:
tst_QSslKey();
virtual ~tst_QSslKey();
public slots:
void initTestCase_data();
void init();
void cleanup();
#ifndef QT_NO_OPENSSL
private slots:
void emptyConstructor();
void constructor_data();
void constructor();
void copyAndAssign_data();
void copyAndAssign();
void equalsOperator();
void length_data();
void length();
void toPemOrDer_data();
void toPemOrDer();
void toEncryptedPemOrDer_data();
void toEncryptedPemOrDer();
#endif
};
tst_QSslKey::tst_QSslKey()
{
#ifdef Q_WS_MAC
// applicationDirPath() points to a path inside the app bundle on Mac.
QDir dir(qApp->applicationDirPath() + QLatin1String("/../../../keys"));
#elif defined(Q_OS_WIN) || defined (Q_OS_SYMBIAN)
QDir dir(SRCDIR + QLatin1String("/keys")); // prefer this way to avoid ifdeffery and support shadow builds?
#else
QDir dir(qApp->applicationDirPath() + QLatin1String("/keys"));
#endif
QFileInfoList fileInfoList = dir.entryInfoList(QDir::Files | QDir::Readable);
QRegExp rx(QLatin1String("^(rsa|dsa)-(pub|pri)-(\\d+)\\.(pem|der)$"));
foreach (QFileInfo fileInfo, fileInfoList) {
if (rx.indexIn(fileInfo.fileName()) >= 0)
keyInfoList << KeyInfo(
fileInfo,<|fim▁hole|> rx.cap(4) == QLatin1String("pem") ? QSsl::Pem : QSsl::Der);
}
}
tst_QSslKey::~tst_QSslKey()
{
}
void tst_QSslKey::initTestCase_data()
{
}
void tst_QSslKey::init()
{
}
void tst_QSslKey::cleanup()
{
}
static QByteArray readFile(const QString &absFilePath)
{
QFile file(absFilePath);
if (!file.open(QIODevice::ReadOnly)) {
QWARN("failed to open file");
return QByteArray();
}
return file.readAll();
}
#ifndef QT_NO_OPENSSL
void tst_QSslKey::emptyConstructor()
{
if (!QSslSocket::supportsSsl())
return;
QSslKey key;
QVERIFY(key.isNull());
QVERIFY(key.length() < 0);
QSslKey key2;
QCOMPARE(key, key2);
}
Q_DECLARE_METATYPE(QSsl::KeyAlgorithm);
Q_DECLARE_METATYPE(QSsl::KeyType);
Q_DECLARE_METATYPE(QSsl::EncodingFormat);
void tst_QSslKey::createPlainTestRows()
{
QTest::addColumn<QString>("absFilePath");
QTest::addColumn<QSsl::KeyAlgorithm>("algorithm");
QTest::addColumn<QSsl::KeyType>("type");
QTest::addColumn<int>("length");
QTest::addColumn<QSsl::EncodingFormat>("format");
foreach (KeyInfo keyInfo, keyInfoList) {
QTest::newRow(keyInfo.fileInfo.fileName().toLatin1())
<< keyInfo.fileInfo.absoluteFilePath() << keyInfo.algorithm << keyInfo.type
<< keyInfo.length << keyInfo.format;
}
}
void tst_QSslKey::constructor_data()
{
createPlainTestRows();
}
void tst_QSslKey::constructor()
{
if (!QSslSocket::supportsSsl())
return;
QFETCH(QString, absFilePath);
QFETCH(QSsl::KeyAlgorithm, algorithm);
QFETCH(QSsl::KeyType, type);
QFETCH(QSsl::EncodingFormat, format);
QByteArray encoded = readFile(absFilePath);
QSslKey key(encoded, algorithm, format, type);
QVERIFY(!key.isNull());
}
void tst_QSslKey::copyAndAssign_data()
{
createPlainTestRows();
}
void tst_QSslKey::copyAndAssign()
{
if (!QSslSocket::supportsSsl())
return;
QFETCH(QString, absFilePath);
QFETCH(QSsl::KeyAlgorithm, algorithm);
QFETCH(QSsl::KeyType, type);
QFETCH(QSsl::EncodingFormat, format);
QByteArray encoded = readFile(absFilePath);
QSslKey key(encoded, algorithm, format, type);
QSslKey copied(key);
QCOMPARE(key, copied);
QCOMPARE(key.algorithm(), copied.algorithm());
QCOMPARE(key.type(), copied.type());
QCOMPARE(key.length(), copied.length());
QCOMPARE(key.toPem(), copied.toPem());
QCOMPARE(key.toDer(), copied.toDer());
QSslKey assigned = key;
QCOMPARE(key, assigned);
QCOMPARE(key.algorithm(), assigned.algorithm());
QCOMPARE(key.type(), assigned.type());
QCOMPARE(key.length(), assigned.length());
QCOMPARE(key.toPem(), assigned.toPem());
QCOMPARE(key.toDer(), assigned.toDer());
}
void tst_QSslKey::equalsOperator()
{
// ### unimplemented
}
void tst_QSslKey::length_data()
{
createPlainTestRows();
}
void tst_QSslKey::length()
{
if (!QSslSocket::supportsSsl())
return;
QFETCH(QString, absFilePath);
QFETCH(QSsl::KeyAlgorithm, algorithm);
QFETCH(QSsl::KeyType, type);
QFETCH(int, length);
QFETCH(QSsl::EncodingFormat, format);
QByteArray encoded = readFile(absFilePath);
QSslKey key(encoded, algorithm, format, type);
QVERIFY(!key.isNull());
QCOMPARE(key.length(), length);
}
void tst_QSslKey::toPemOrDer_data()
{
createPlainTestRows();
}
void tst_QSslKey::toPemOrDer()
{
if (!QSslSocket::supportsSsl())
return;
QFETCH(QString, absFilePath);
QFETCH(QSsl::KeyAlgorithm, algorithm);
QFETCH(QSsl::KeyType, type);
QFETCH(QSsl::EncodingFormat, format);
QByteArray encoded = readFile(absFilePath);
QSslKey key(encoded, algorithm, format, type);
QVERIFY(!key.isNull());
if (format == QSsl::Pem)
encoded.replace('\r', "");
QCOMPARE(format == QSsl::Pem ? key.toPem() : key.toDer(), encoded);
}
void tst_QSslKey::toEncryptedPemOrDer_data()
{
QTest::addColumn<QString>("absFilePath");
QTest::addColumn<QSsl::KeyAlgorithm>("algorithm");
QTest::addColumn<QSsl::KeyType>("type");
QTest::addColumn<QSsl::EncodingFormat>("format");
QTest::addColumn<QString>("password");
QStringList passwords;
passwords << " " << "foobar" << "foo bar"
<< "aAzZ`1234567890-=~!@#$%^&*()_+[]{}\\|;:'\",.<>/?"; // ### add more (?)
foreach (KeyInfo keyInfo, keyInfoList) {
foreach (QString password, passwords) {
QString testName = QString("%1-%2-%3-%4").arg(keyInfo.fileInfo.fileName())
.arg(keyInfo.algorithm == QSsl::Rsa ? "RSA" : "DSA")
.arg(keyInfo.type == QSsl::PrivateKey ? "PrivateKey" : "PublicKey")
.arg(keyInfo.format == QSsl::Pem ? "PEM" : "DER");
QTest::newRow(testName.toLatin1())
<< keyInfo.fileInfo.absoluteFilePath() << keyInfo.algorithm << keyInfo.type
<< keyInfo.format << password;
}
}
}
void tst_QSslKey::toEncryptedPemOrDer()
{
if (!QSslSocket::supportsSsl())
return;
QFETCH(QString, absFilePath);
QFETCH(QSsl::KeyAlgorithm, algorithm);
QFETCH(QSsl::KeyType, type);
QFETCH(QSsl::EncodingFormat, format);
QFETCH(QString, password);
QByteArray plain = readFile(absFilePath);
QSslKey key(plain, algorithm, format, type);
QVERIFY(!key.isNull());
QByteArray pwBytes(password.toLatin1());
if (type == QSsl::PrivateKey) {
QByteArray encryptedPem = key.toPem(pwBytes);
QVERIFY(!encryptedPem.isEmpty());
QSslKey keyPem(encryptedPem, algorithm, QSsl::Pem, type, pwBytes);
QVERIFY(!keyPem.isNull());
QCOMPARE(keyPem, key);
QCOMPARE(keyPem.toPem(), key.toPem());
} else {
// verify that public keys are never encrypted by toPem()
QByteArray encryptedPem = key.toPem(pwBytes);
QVERIFY(!encryptedPem.isEmpty());
QByteArray plainPem = key.toPem();
QVERIFY(!plainPem.isEmpty());
QCOMPARE(encryptedPem, plainPem);
}
if (type == QSsl::PrivateKey) {
QByteArray encryptedDer = key.toDer(pwBytes);
// ### at this point, encryptedDer is invalid, hence the below QEXPECT_FAILs
QVERIFY(!encryptedDer.isEmpty());
QSslKey keyDer(encryptedDer, algorithm, QSsl::Der, type, pwBytes);
if (type == QSsl::PrivateKey)
QEXPECT_FAIL(
QTest::currentDataTag(), "We're not able to decrypt these yet...", Continue);
QVERIFY(!keyDer.isNull());
if (type == QSsl::PrivateKey)
QEXPECT_FAIL(
QTest::currentDataTag(), "We're not able to decrypt these yet...", Continue);
QCOMPARE(keyDer.toPem(), key.toPem());
} else {
// verify that public keys are never encrypted by toDer()
QByteArray encryptedDer = key.toDer(pwBytes);
QVERIFY(!encryptedDer.isEmpty());
QByteArray plainDer = key.toDer();
QVERIFY(!plainDer.isEmpty());
QCOMPARE(encryptedDer, plainDer);
}
// ### add a test to verify that public keys are _decrypted_ correctly (by the ctor)
}
#endif
QTEST_MAIN(tst_QSslKey)
#include "tst_qsslkey.moc"<|fim▁end|> | rx.cap(1) == QLatin1String("rsa") ? QSsl::Rsa : QSsl::Dsa,
rx.cap(2) == QLatin1String("pub") ? QSsl::PublicKey : QSsl::PrivateKey,
rx.cap(3).toInt(), |
<|file_name|>url.rs<|end_file_name|><|fim▁begin|>// Copyright 2017 click2stream, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and<|fim▁hole|>use std::fmt;
use std::result;
use std::error::Error;
use std::fmt::{Display, Formatter};
use std::str::FromStr;
/// URL parse error.
#[derive(Debug, Clone)]
pub struct UrlParseError {
msg: String,
}
impl UrlParseError {
/// Create a new error.
pub fn new<T>(msg: T) -> Self
where
T: ToString,
{
Self {
msg: msg.to_string(),
}
}
}
impl Error for UrlParseError {}
impl Display for UrlParseError {
fn fmt(&self, f: &mut Formatter) -> result::Result<(), fmt::Error> {
f.write_str(&self.msg)
}
}
/// Result alias.
pub type Result<T> = result::Result<T, UrlParseError>;
/// Simple URL parser.
#[derive(Clone)]
pub struct Url {
serialized: String,
hier: usize,
netloc: usize,
username: Option<usize>,
password: Option<usize>,
portpos: Option<usize>,
port: Option<u16>,
path: Option<usize>,
query: Option<usize>,
fragment: Option<usize>,
}
impl Url {
/// Initialize all URL fields.
fn init(&mut self) -> Result<()> {
if let Some(delim) = self.serialized.find(':') {
self.process_hierarchy(delim + 1)
} else {
Err(UrlParseError::new("invalid URL"))
}
}
/// Process the hierarchy part.
fn process_hierarchy(&mut self, start: usize) -> Result<()> {
self.hier = start;
if self.serialized[start..].starts_with("//") {
let authority = start + 2;
if let Some(pos) = self.serialized[authority..].find('/') {
let path = authority + pos;
self.process_authority(authority, path)?;
self.process_path(path);
} else {
let authority_end = self.serialized.len();
self.process_authority(authority, authority_end)?;
}
Ok(())
} else {
Err(UrlParseError::new("invalid URL"))
}
}
/// Process the authority part.
fn process_authority(&mut self, start: usize, end: usize) -> Result<()> {
if let Some(delim) = self.serialized[start..end].rfind('@') {
self.process_user_info(start, start + delim);
self.netloc = start + delim + 1;
} else {
self.netloc = start;
}
let netloc = &self.serialized[self.netloc..end];
if !netloc.ends_with(']') {
if let Some(delim) = netloc.rfind(':') {
let ppos = delim + 1;
let port = u16::from_str(&netloc[ppos..])
.map_err(|_| UrlParseError::new("invalid port"))?;
self.portpos = Some(self.netloc + ppos);
self.port = Some(port);
}
}
Ok(())
}
/// Process user info.
fn process_user_info(&mut self, start: usize, end: usize) {
let uinfo = &self.serialized[start..end];
self.username = Some(start);
if let Some(delim) = uinfo.find(':') {
self.password = Some(start + delim + 1);
}
}
/// Process the path part and everything that follows it.
fn process_path(&mut self, start: usize) {
let path = &self.serialized[start..];
self.path = Some(start);
if let Some(delim) = path.rfind('#') {
self.fragment = Some(start + delim + 1);
}
let end = self.fragment.map_or(self.serialized.len(), |f| f - 1);
let path = &self.serialized[start..end];
if let Some(delim) = path.rfind('?') {
self.query = Some(start + delim + 1);
}
}
/// Get URL scheme.
pub fn scheme(&self) -> &str {
&self.serialized[..self.hier - 1]
}
/// Get username.
pub fn username(&self) -> Option<&str> {
if let Some(uname_pos) = self.username {
if let Some(pwd_pos) = self.password {
Some(&self.serialized[uname_pos..pwd_pos - 1])
} else {
Some(&self.serialized[uname_pos..self.netloc - 1])
}
} else {
None
}
}
/// Get password.
pub fn password(&self) -> Option<&str> {
self.password
.map(|pwd_pos| &self.serialized[pwd_pos..self.netloc - 1])
}
/// Get host.
pub fn host(&self) -> &str {
if let Some(portpos) = self.portpos {
&self.serialized[self.netloc..portpos - 1]
} else if let Some(path) = self.path {
&self.serialized[self.netloc..path]
} else {
&self.serialized[self.netloc..]
}
}
/// Get port.
pub fn port(&self) -> Option<u16> {
self.port
}
/// Get path.
pub fn path(&self) -> &str {
if let Some(path) = self.path {
if let Some(query) = self.query {
&self.serialized[path..query - 1]
} else if let Some(fragment) = self.fragment {
&self.serialized[path..fragment - 1]
} else {
&self.serialized[path..]
}
} else {
"/"
}
}
/// Get query.
pub fn query(&self) -> Option<&str> {
if let Some(query) = self.query {
if let Some(fragment) = self.fragment {
Some(&self.serialized[query..fragment - 1])
} else {
Some(&self.serialized[query..])
}
} else {
None
}
}
/// Get fragment.
pub fn fragment(&self) -> Option<&str> {
self.fragment.map(|fragment| &self.serialized[fragment..])
}
}
impl AsRef<str> for Url {
fn as_ref(&self) -> &str {
&self.serialized
}
}
impl Display for Url {
fn fmt(&self, f: &mut Formatter) -> result::Result<(), fmt::Error> {
f.write_str(&self.serialized)
}
}
impl FromStr for Url {
type Err = UrlParseError;
fn from_str(s: &str) -> Result<Self> {
let mut url = Self {
serialized: s.to_string(),
hier: 0,
netloc: 0,
username: None,
password: None,
portpos: None,
port: None,
path: None,
query: None,
fragment: None,
};
url.init()?;
Ok(url)
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_plain_hostname() {
let url = Url::from_str("foo");
assert!(url.is_err());
}
#[test]
fn test_no_authority() {
let url = Url::from_str("foo:bar");
assert!(url.is_err());
}
#[test]
fn test_invalid_port() {
let url = Url::from_str("http://foo:100000");
assert!(url.is_err());
}
#[test]
fn test_minimal_url() {
let url = Url::from_str("http://foo").unwrap();
assert_eq!(url.scheme(), "http");
assert_eq!(url.username(), None);
assert_eq!(url.password(), None);
assert_eq!(url.host(), "foo");
assert_eq!(url.port(), None);
assert_eq!(url.path(), "/");
assert_eq!(url.query(), None);
assert_eq!(url.fragment(), None);
}
#[test]
fn test_empty_port() {
let url = Url::from_str("http://foo:12").unwrap();
assert_eq!(url.scheme(), "http");
assert_eq!(url.username(), None);
assert_eq!(url.password(), None);
assert_eq!(url.host(), "foo");
assert_eq!(url.port(), Some(12));
assert_eq!(url.path(), "/");
assert_eq!(url.query(), None);
assert_eq!(url.fragment(), None);
}
#[test]
fn test_empty_username() {
let url = Url::from_str("http://@foo/some/path").unwrap();
assert_eq!(url.scheme(), "http");
assert_eq!(url.username(), Some(""));
assert_eq!(url.password(), None);
assert_eq!(url.host(), "foo");
assert_eq!(url.port(), None);
assert_eq!(url.path(), "/some/path");
assert_eq!(url.query(), None);
assert_eq!(url.fragment(), None);
}
#[test]
fn test_no_password() {
let url = Url::from_str("http://user@foo/").unwrap();
assert_eq!(url.scheme(), "http");
assert_eq!(url.username(), Some("user"));
assert_eq!(url.password(), None);
assert_eq!(url.host(), "foo");
assert_eq!(url.port(), None);
assert_eq!(url.path(), "/");
assert_eq!(url.query(), None);
assert_eq!(url.fragment(), None);
}
#[test]
fn test_empty_password() {
let url = Url::from_str("http://user:@foo/").unwrap();
assert_eq!(url.scheme(), "http");
assert_eq!(url.username(), Some("user"));
assert_eq!(url.password(), Some(""));
assert_eq!(url.host(), "foo");
assert_eq!(url.port(), None);
assert_eq!(url.path(), "/");
assert_eq!(url.query(), None);
assert_eq!(url.fragment(), None);
}
#[test]
fn test_password() {
let url = Url::from_str("http://user:pass@foo/").unwrap();
assert_eq!(url.scheme(), "http");
assert_eq!(url.username(), Some("user"));
assert_eq!(url.password(), Some("pass"));
assert_eq!(url.host(), "foo");
assert_eq!(url.port(), None);
assert_eq!(url.path(), "/");
assert_eq!(url.query(), None);
assert_eq!(url.fragment(), None);
}
#[test]
fn test_fragment_and_query() {
let url = Url::from_str("http://foo/some/path?and=query&a=b#and-fragment").unwrap();
assert_eq!(url.scheme(), "http");
assert_eq!(url.username(), None);
assert_eq!(url.password(), None);
assert_eq!(url.host(), "foo");
assert_eq!(url.port(), None);
assert_eq!(url.path(), "/some/path");
assert_eq!(url.query(), Some("and=query&a=b"));
assert_eq!(url.fragment(), Some("and-fragment"));
}
#[test]
fn test_query_alone() {
let url = Url::from_str("http://foo/some/path?and=query&a=b").unwrap();
assert_eq!(url.scheme(), "http");
assert_eq!(url.username(), None);
assert_eq!(url.password(), None);
assert_eq!(url.host(), "foo");
assert_eq!(url.port(), None);
assert_eq!(url.path(), "/some/path");
assert_eq!(url.query(), Some("and=query&a=b"));
assert_eq!(url.fragment(), None);
}
#[test]
fn test_fragment_alone() {
let url = Url::from_str("http://foo/some/path#and-fragment").unwrap();
assert_eq!(url.scheme(), "http");
assert_eq!(url.username(), None);
assert_eq!(url.password(), None);
assert_eq!(url.host(), "foo");
assert_eq!(url.port(), None);
assert_eq!(url.path(), "/some/path");
assert_eq!(url.query(), None);
assert_eq!(url.fragment(), Some("and-fragment"));
}
#[test]
fn test_full_featured_url() {
let url =
Url::from_str("http://user:pass@foo:123/some/path?and=query&a=b#and-fragment").unwrap();
assert_eq!(url.scheme(), "http");
assert_eq!(url.username(), Some("user"));
assert_eq!(url.password(), Some("pass"));
assert_eq!(url.host(), "foo");
assert_eq!(url.port(), Some(123));
assert_eq!(url.path(), "/some/path");
assert_eq!(url.query(), Some("and=query&a=b"));
assert_eq!(url.fragment(), Some("and-fragment"));
}
}<|fim▁end|> | // limitations under the License.
|
<|file_name|>db.py<|end_file_name|><|fim▁begin|>import contextlib
import functools
import logging
import psycopg2.extras
from psycopg2 import Error as Psycopg2Error
_logger = logging.getLogger(__name__)
def retry_on_psycopg2_error(func):
"""
Decorator that retries 3 times after Postgres error, in particular if
the connection was not valid anymore because the database was restarted
"""<|fim▁hole|> def wrapper_retry(*args, **kwargs):
retry = 0
while retry < 4:
try:
result = func(*args, **kwargs)
except Psycopg2Error:
retry += 1
if retry > 3:
raise
else:
_logger.warning(f'Retry query for {func.__name__} ({retry})')
continue
break
return result
return wrapper_retry
@functools.lru_cache()
def dbconnection(dsn):
"""Creates an instance of _DBConnection and remembers the last one made."""
return _DBConnection(dsn)
class _DBConnection:
""" Wraps a PostgreSQL database connection that reports crashes and tries
its best to repair broken connections.
NOTE: doesn't always work, but the failure scenario is very hard to
reproduce. Also see https://github.com/psycopg/psycopg2/issues/263
"""
def __init__(self, *args, **kwargs):
self.conn_args = args
self.conn_kwargs = kwargs
self._conn = None
self._connect()
def _connect(self):
if self._conn is None:
self._conn = psycopg2.connect(*self.conn_args, **self.conn_kwargs)
self._conn.autocommit = True
def _is_usable(self):
""" Checks whether the connection is usable.
:returns boolean: True if we can query the database, False otherwise
"""
try:
self._conn.cursor().execute("SELECT 1")
except psycopg2.Error:
return False
else:
return True
@contextlib.contextmanager
def _connection(self):
""" Contextmanager that catches tries to ensure we have a database
connection. Yields a Connection object.
If a :class:`psycopg2.DatabaseError` occurs then it will check whether
the connection is still usable, and if it's not, close and remove it.
"""
try:
self._connect()
yield self._conn
except psycopg2.Error as e:
_logger.critical('AUTHZ DatabaseError: {}'.format(e))
if not self._is_usable():
with contextlib.suppress(psycopg2.Error):
self._conn.close()
self._conn = None
raise e
@contextlib.contextmanager
def transaction_cursor(self, cursor_factory=None):
""" Yields a cursor with transaction.
"""
with self._connection() as transaction:
with transaction:
with transaction.cursor(cursor_factory=cursor_factory) as cur:
yield cur
@contextlib.contextmanager
def cursor(self, cursor_factory=None):
""" Yields a cursor without transaction.
"""
with self._connection() as conn:
with conn.cursor(cursor_factory=cursor_factory) as cur:
yield cur
def fetch_all(self, sql):
with self.cursor(
cursor_factory=psycopg2.extras.RealDictCursor) as cur:
cur.execute(sql)
return cur.fetchall()
def fetch_one(self, sql):
with self.cursor(
cursor_factory=psycopg2.extras.RealDictCursor) as cur:
cur.execute(sql)
return cur.fetchone()<|fim▁end|> | @functools.wraps(func) |
<|file_name|>parser_tests.py<|end_file_name|><|fim▁begin|>import unittest
import os
import math
from rdbtools import RdbCallback, RdbParser
class RedisParserTestCase(unittest.TestCase):
def setUp(self):<|fim▁hole|> pass
def tearDown(self):
pass
def test_empty_rdb(self):
r = load_rdb('empty_database.rdb')
self.assert_('start_rdb' in r.methods_called)
self.assert_('end_rdb' in r.methods_called)
self.assertEquals(len(r.databases), 0, msg = "didn't expect any databases")
def test_multiple_databases(self):
r = load_rdb('multiple_databases.rdb')
self.assert_(len(r.databases), 2)
self.assert_(1 not in r.databases)
self.assertEquals(r.databases[0]["key_in_zeroth_database"], "zero")
self.assertEquals(r.databases[2]["key_in_second_database"], "second")
def test_keys_with_expiry(self):
r = load_rdb('keys_with_expiry.rdb')
expiry = r.expiry[0]['expires_ms_precision']
self.assertEquals(expiry.year, 2022)
self.assertEquals(expiry.month, 12)
self.assertEquals(expiry.day, 25)
self.assertEquals(expiry.hour, 10)
self.assertEquals(expiry.minute, 11)
self.assertEquals(expiry.second, 12)
self.assertEquals(expiry.microsecond, 573000)
def test_integer_keys(self):
r = load_rdb('integer_keys.rdb')
self.assertEquals(r.databases[0][125], "Positive 8 bit integer")
self.assertEquals(r.databases[0][0xABAB], "Positive 16 bit integer")
self.assertEquals(r.databases[0][0x0AEDD325], "Positive 32 bit integer")
def test_negative_integer_keys(self):
r = load_rdb('integer_keys.rdb')
self.assertEquals(r.databases[0][-123], "Negative 8 bit integer")
self.assertEquals(r.databases[0][-0x7325], "Negative 16 bit integer")
self.assertEquals(r.databases[0][-0x0AEDD325], "Negative 32 bit integer")
def test_string_key_with_compression(self):
r = load_rdb('easily_compressible_string_key.rdb')
key = "".join('a' for x in range(0, 200))
value = "Key that redis should compress easily"
self.assertEquals(r.databases[0][key], value)
def test_zipmap_thats_compresses_easily(self):
r = load_rdb('zipmap_that_compresses_easily.rdb')
self.assertEquals(r.databases[0]["zipmap_compresses_easily"]["a"], "aa")
self.assertEquals(r.databases[0]["zipmap_compresses_easily"]["aa"], "aaaa")
self.assertEquals(r.databases[0]["zipmap_compresses_easily"]["aaaaa"], "aaaaaaaaaaaaaa")
def test_zipmap_that_doesnt_compress(self):
r = load_rdb('zipmap_that_doesnt_compress.rdb')
self.assertEquals(r.databases[0]["zimap_doesnt_compress"]["MKD1G6"], 2)
self.assertEquals(r.databases[0]["zimap_doesnt_compress"]["YNNXK"], "F7TI")
def test_zipmap_with_big_values(self):
''' See issue https://github.com/sripathikrishnan/redis-rdb-tools/issues/2
Values with length around 253/254/255 bytes are treated specially in the parser
This test exercises those boundary conditions
In order to test a bug with large ziplists, it is necessary to start
Redis with "hash-max-ziplist-value 21000", create this rdb file,
and run the test. That forces the 20kbyte value to be stored as a
ziplist with a length encoding of 5 bytes.
'''
r = load_rdb('zipmap_with_big_values.rdb')
self.assertEquals(len(r.databases[0]["zipmap_with_big_values"]["253bytes"]), 253)
self.assertEquals(len(r.databases[0]["zipmap_with_big_values"]["254bytes"]), 254)
self.assertEquals(len(r.databases[0]["zipmap_with_big_values"]["255bytes"]), 255)
self.assertEquals(len(r.databases[0]["zipmap_with_big_values"]["300bytes"]), 300)
self.assertEquals(len(r.databases[0]["zipmap_with_big_values"]["20kbytes"]), 20000)
def test_hash_as_ziplist(self):
'''In redis dump version = 4, hashmaps are stored as ziplists'''
r = load_rdb('hash_as_ziplist.rdb')
self.assertEquals(r.databases[0]["zipmap_compresses_easily"]["a"], "aa")
self.assertEquals(r.databases[0]["zipmap_compresses_easily"]["aa"], "aaaa")
self.assertEquals(r.databases[0]["zipmap_compresses_easily"]["aaaaa"], "aaaaaaaaaaaaaa")
def test_dictionary(self):
r = load_rdb('dictionary.rdb')
self.assertEquals(r.lengths[0]["force_dictionary"], 1000)
self.assertEquals(r.databases[0]["force_dictionary"]["ZMU5WEJDG7KU89AOG5LJT6K7HMNB3DEI43M6EYTJ83VRJ6XNXQ"],
"T63SOS8DQJF0Q0VJEZ0D1IQFCYTIPSBOUIAI9SB0OV57MQR1FI")
self.assertEquals(r.databases[0]["force_dictionary"]["UHS5ESW4HLK8XOGTM39IK1SJEUGVV9WOPK6JYA5QBZSJU84491"],
"6VULTCV52FXJ8MGVSFTZVAGK2JXZMGQ5F8OVJI0X6GEDDR27RZ")
def test_ziplist_that_compresses_easily(self):
r = load_rdb('ziplist_that_compresses_easily.rdb')
self.assertEquals(r.lengths[0]["ziplist_compresses_easily"], 6)
for idx, length in enumerate([6, 12, 18, 24, 30, 36]) :
self.assertEquals(("".join("a" for x in xrange(length))), r.databases[0]["ziplist_compresses_easily"][idx])
def test_ziplist_that_doesnt_compress(self):
r = load_rdb('ziplist_that_doesnt_compress.rdb')
self.assertEquals(r.lengths[0]["ziplist_doesnt_compress"], 2)
self.assert_("aj2410" in r.databases[0]["ziplist_doesnt_compress"])
self.assert_("cc953a17a8e096e76a44169ad3f9ac87c5f8248a403274416179aa9fbd852344"
in r.databases[0]["ziplist_doesnt_compress"])
def test_ziplist_with_integers(self):
r = load_rdb('ziplist_with_integers.rdb')
expected_numbers = []
for x in range(0,13):
expected_numbers.append(x)
expected_numbers += [-2, 13, 25, -61, 63, 16380, -16000, 65535, -65523, 4194304, 0x7fffffffffffffff]
self.assertEquals(r.lengths[0]["ziplist_with_integers"], len(expected_numbers))
for num in expected_numbers :
self.assert_(num in r.databases[0]["ziplist_with_integers"], "Cannot find %d" % num)
def test_linkedlist(self):
r = load_rdb('linkedlist.rdb')
self.assertEquals(r.lengths[0]["force_linkedlist"], 1000)
self.assert_("JYY4GIFI0ETHKP4VAJF5333082J4R1UPNPLE329YT0EYPGHSJQ" in r.databases[0]["force_linkedlist"])
self.assert_("TKBXHJOX9Q99ICF4V78XTCA2Y1UYW6ERL35JCIL1O0KSGXS58S" in r.databases[0]["force_linkedlist"])
def test_intset_16(self):
r = load_rdb('intset_16.rdb')
self.assertEquals(r.lengths[0]["intset_16"], 3)
for num in (0x7ffe, 0x7ffd, 0x7ffc) :
self.assert_(num in r.databases[0]["intset_16"])
def test_intset_32(self):
r = load_rdb('intset_32.rdb')
self.assertEquals(r.lengths[0]["intset_32"], 3)
for num in (0x7ffefffe, 0x7ffefffd, 0x7ffefffc) :
self.assert_(num in r.databases[0]["intset_32"])
def test_intset_64(self):
r = load_rdb('intset_64.rdb')
self.assertEquals(r.lengths[0]["intset_64"], 3)
for num in (0x7ffefffefffefffe, 0x7ffefffefffefffd, 0x7ffefffefffefffc) :
self.assert_(num in r.databases[0]["intset_64"])
def test_regular_set(self):
r = load_rdb('regular_set.rdb')
self.assertEquals(r.lengths[0]["regular_set"], 6)
for member in ("alpha", "beta", "gamma", "delta", "phi", "kappa") :
self.assert_(member in r.databases[0]["regular_set"], msg=('%s missing' % member))
def test_sorted_set_as_ziplist(self):
r = load_rdb('sorted_set_as_ziplist.rdb')
self.assertEquals(r.lengths[0]["sorted_set_as_ziplist"], 3)
zset = r.databases[0]["sorted_set_as_ziplist"]
self.assert_(floateq(zset['8b6ba6718a786daefa69438148361901'], 1))
self.assert_(floateq(zset['cb7a24bb7528f934b841b34c3a73e0c7'], 2.37))
self.assert_(floateq(zset['523af537946b79c4f8369ed39ba78605'], 3.423))
def test_filtering_by_keys(self):
r = load_rdb('parser_filters.rdb', filters={"keys":"k[0-9]"})
self.assertEquals(r.databases[0]['k1'], "ssssssss")
self.assertEquals(r.databases[0]['k3'], "wwwwwwww")
self.assertEquals(len(r.databases[0]), 2)
def test_filtering_by_type(self):
r = load_rdb('parser_filters.rdb', filters={"types":["sortedset"]})
self.assert_('z1' in r.databases[0])
self.assert_('z2' in r.databases[0])
self.assert_('z3' in r.databases[0])
self.assert_('z4' in r.databases[0])
self.assertEquals(len(r.databases[0]), 4)
def test_filtering_by_database(self):
r = load_rdb('multiple_databases.rdb', filters={"dbs":[2]})
self.assert_('key_in_zeroth_database' not in r.databases[0])
self.assert_('key_in_second_database' in r.databases[2])
self.assertEquals(len(r.databases[0]), 0)
self.assertEquals(len(r.databases[2]), 1)
def test_rdb_version_5_with_checksum(self):
r = load_rdb('rdb_version_5_with_checksum.rdb')
self.assertEquals(r.databases[0]['abcd'], 'efgh')
self.assertEquals(r.databases[0]['foo'], 'bar')
self.assertEquals(r.databases[0]['bar'], 'baz')
self.assertEquals(r.databases[0]['abcdef'], 'abcdef')
self.assertEquals(r.databases[0]['longerstring'], 'thisisalongerstring.idontknowwhatitmeans')
def floateq(f1, f2) :
return math.fabs(f1 - f2) < 0.00001
def load_rdb(file_name, filters=None) :
r = MockRedis()
parser = RdbParser(r, filters)
parser.parse(os.path.join(os.path.dirname(__file__), 'dumps', file_name))
return r
class MockRedis(RdbCallback):
def __init__(self) :
self.databases = {}
self.lengths = {}
self.expiry = {}
self.methods_called = []
self.dbnum = 0
def currentdb(self) :
return self.databases[self.dbnum]
def store_expiry(self, key, expiry) :
self.expiry[self.dbnum][key] = expiry
def store_length(self, key, length) :
if not self.dbnum in self.lengths :
self.lengths[self.dbnum] = {}
self.lengths[self.dbnum][key] = length
def get_length(self, key) :
if not key in self.lengths[self.dbnum] :
raise Exception('Key %s does not have a length' % key)
return self.lengths[self.dbnum][key]
def start_rdb(self):
self.methods_called.append('start_rdb')
def start_database(self, dbnum):
self.dbnum = dbnum
self.databases[dbnum] = {}
self.expiry[dbnum] = {}
self.lengths[dbnum] = {}
def set(self, key, value, expiry, info):
self.currentdb()[key] = value
if expiry :
self.store_expiry(key, expiry)
def start_hash(self, key, length, expiry, info):
if key in self.currentdb() :
raise Exception('start_hash called with key %s that already exists' % key)
else :
self.currentdb()[key] = {}
if expiry :
self.store_expiry(key, expiry)
self.store_length(key, length)
def hset(self, key, field, value):
if not key in self.currentdb() :
raise Exception('start_hash not called for key = %s', key)
self.currentdb()[key][field] = value
def end_hash(self, key):
if not key in self.currentdb() :
raise Exception('start_hash not called for key = %s', key)
if len(self.currentdb()[key]) != self.lengths[self.dbnum][key] :
raise Exception('Lengths mismatch on hash %s, expected length = %d, actual = %d'
% (key, self.lengths[self.dbnum][key], len(self.currentdb()[key])))
def start_set(self, key, cardinality, expiry, info):
if key in self.currentdb() :
raise Exception('start_set called with key %s that already exists' % key)
else :
self.currentdb()[key] = []
if expiry :
self.store_expiry(key, expiry)
self.store_length(key, cardinality)
def sadd(self, key, member):
if not key in self.currentdb() :
raise Exception('start_set not called for key = %s', key)
self.currentdb()[key].append(member)
def end_set(self, key):
if not key in self.currentdb() :
raise Exception('start_set not called for key = %s', key)
if len(self.currentdb()[key]) != self.lengths[self.dbnum][key] :
raise Exception('Lengths mismatch on set %s, expected length = %d, actual = %d'
% (key, self.lengths[self.dbnum][key], len(self.currentdb()[key])))
def start_list(self, key, length, expiry, info):
if key in self.currentdb() :
raise Exception('start_list called with key %s that already exists' % key)
else :
self.currentdb()[key] = []
if expiry :
self.store_expiry(key, expiry)
self.store_length(key, length)
def rpush(self, key, value) :
if not key in self.currentdb() :
raise Exception('start_list not called for key = %s', key)
self.currentdb()[key].append(value)
def end_list(self, key):
if not key in self.currentdb() :
raise Exception('start_set not called for key = %s', key)
if len(self.currentdb()[key]) != self.lengths[self.dbnum][key] :
raise Exception('Lengths mismatch on list %s, expected length = %d, actual = %d'
% (key, self.lengths[self.dbnum][key], len(self.currentdb()[key])))
def start_sorted_set(self, key, length, expiry, info):
if key in self.currentdb() :
raise Exception('start_sorted_set called with key %s that already exists' % key)
else :
self.currentdb()[key] = {}
if expiry :
self.store_expiry(key, expiry)
self.store_length(key, length)
def zadd(self, key, score, member):
if not key in self.currentdb() :
raise Exception('start_sorted_set not called for key = %s', key)
self.currentdb()[key][member] = score
def end_sorted_set(self, key):
if not key in self.currentdb() :
raise Exception('start_set not called for key = %s', key)
if len(self.currentdb()[key]) != self.lengths[self.dbnum][key] :
raise Exception('Lengths mismatch on sortedset %s, expected length = %d, actual = %d'
% (key, self.lengths[self.dbnum][key], len(self.currentdb()[key])))
def end_database(self, dbnum):
if self.dbnum != dbnum :
raise Exception('start_database called with %d, but end_database called %d instead' % (self.dbnum, dbnum))
def end_rdb(self):
self.methods_called.append('end_rdb')<|fim▁end|> | |
<|file_name|>metrics.py<|end_file_name|><|fim▁begin|>'''
Copyright (C) 2017 The Board of Trustees of the Leland Stanford Junior
University.
Copyright (C) 2016-2017 Vanessa Sochat.
This program is free software: you can redistribute it and/or modify it
under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or (at your
option) any later version.
This program is distributed in the hope that it will be useful, but WITHOUT<|fim▁hole|>License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
'''
from glob import glob
import json
import os
import re
from singularity.logger import bot
###################################################################################
# METRICS #########################################################################
###################################################################################
def information_coefficient(total1,total2,intersect):
'''a simple jacaard (information coefficient) to compare two lists of overlaps/diffs
'''
total = total1 + total2
return 2.0*len(intersect) / total
def RSA(m1,m2):
'''RSA analysis will compare the similarity of two matrices
'''
from scipy.stats import pearsonr
import scipy.linalg
import numpy
# This will take the diagonal of each matrix (and the other half is changed to nan) and flatten to vector
vectorm1 = m1.mask(numpy.triu(numpy.ones(m1.shape)).astype(numpy.bool)).values.flatten()
vectorm2 = m2.mask(numpy.triu(numpy.ones(m2.shape)).astype(numpy.bool)).values.flatten()
# Now remove the nans
m1defined = numpy.argwhere(~numpy.isnan(numpy.array(vectorm1,dtype=float)))
m2defined = numpy.argwhere(~numpy.isnan(numpy.array(vectorm2,dtype=float)))
idx = numpy.intersect1d(m1defined,m2defined)
return pearsonr(vectorm1[idx],vectorm2[idx])[0]<|fim▁end|> | ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public |
<|file_name|>n_queens.rs<|end_file_name|><|fim▁begin|>// Implements http://rosettacode.org/wiki/N-queens_problem
#![feature(test)]
extern crate test;
use std::vec::Vec;
use std::thread::spawn;
use std::sync::mpsc::channel;
#[cfg(test)]
use test::Bencher;<|fim▁hole|>
#[cfg(not(test))]
fn main() {
for num in 0i32..16 {
println!("Sequential: {}: {}", num, n_queens(num));
}
for num in 0i32..16 {
println!("Parallel: {}: {}", num, semi_parallel_n_queens(num));
}
}
/* _
___ ___ | |_ _____ _ __
/ __|/ _ \| \ \ / / _ \ '__/
\__ \ (_) | |\ V / __/ |
|___/\___/|_| \_/ \___|_|
*/
// Solves n-queens using a depth-first, backtracking solution.
// Returns the number of solutions for a given n.
fn n_queens(n: i32) -> usize {
// Pass off to our helper function.
return n_queens_helper((1 << n as usize) -1, 0, 0, 0);
}
// The meat of the algorithm is in here, a recursive helper function
// that actually computes the answer using a depth-first, backtracking
// algorithm.
//
// The 30,000 foot overview is as follows:
//
// This function takes only 3 important parameters: three integers
// which represent the spots on the current row that are blocked
// by previous queens.
//
// The "secret sauce" here is that we can avoid passing around the board
// or even the locations of the previous queens and instead we use this
// information to infer the conflicts for the next row.
//
// Once we know the conflicts in our current row we can simply recurse
// over all of the open spots and profit.
//
// This implementation is optimized for speed and memory by using
// integers and bit shifting instead of arrays for storing the conflicts.
fn n_queens_helper(all_ones: i32, left_diags: i32, columns: i32, right_diags: i32) -> usize {
// all_ones is a special value that simply has all 1s in the first n positions
// and 0s elsewhere. We can use it to clear out areas that we don't care about.
// Our solution count.
// This will be updated by the recursive calls to our helper.
let mut solutions = 0;
// We get validSpots with some bit trickery. Effectively, each of the parameters
// can be ORed together to create an integer with all the conflicts together,
// which we then invert and limit by ANDing with all_ones, our special value
//from earlier.
let mut valid_spots = !(left_diags | columns | right_diags) & all_ones;
// Since valid_spots contains 1s in all of the locations that
// are conflict-free, we know we have gone through all of
// those locations when valid_spots is all 0s, i.e. when it is 0.
while valid_spots != 0 {
// This is just bit trickery. For reasons involving the weird
// behavior of two's complement integers, this creates an integer
// which is all 0s except for a single 1 in the position of the
// LSB of valid_spots.
let spot = -valid_spots & valid_spots;
// We then XOR that integer with the validSpots to flip it to 0
// in valid_spots.
valid_spots = valid_spots ^ spot;
// Make a recursive call. This is where we infer the conflicts
// for the next row.
solutions += n_queens_helper(
all_ones,
// We add a conflict in the current spot and then shift left,
// which has the desired effect of moving all of the conflicts
// that are created by left diagonals to the left one square.
(left_diags | spot) << 1,
// For columns we simply mark this column as filled by ORing
// in the currentSpot.
(columns | spot),
// This is the same as the left_diag shift, except we shift
// right because these conflicts are caused by right diagonals.
(right_diags | spot) >> 1);
}
// If columns is all blocked (i.e. if it is all ones) then we
// have arrived at a solution because we have placed n queens.
solutions + ((columns == all_ones) as usize)
}
// This is the same as the regular nQueens except it creates
// n threads in which to to do the work.
//
// This is much slower for smaller numbers (under 16~17) but outperforms
// the sequential algorithm after that.
fn semi_parallel_n_queens(n: i32) -> usize {
let all_ones = (1 << n as usize) - 1;
let (columns, left_diags, right_diags) = (0, 0, 0);
let mut receivers = Vec::new();
let mut valid_spots = !(left_diags | columns | right_diags) & all_ones;
while valid_spots != 0 {
let (tx, rx) = channel();
let spot = -valid_spots & valid_spots;
valid_spots = valid_spots ^ spot;
receivers.push(rx);
spawn( move || -> () {
tx.send(n_queens_helper(all_ones,
(left_diags | spot) << 1,
(columns | spot),
(right_diags | spot) >> 1)).unwrap();
});
}
receivers.iter().map(|r| r.recv().unwrap()).fold(0, |a, b| a + b) +
((columns == all_ones) as usize)
}
// Tests
#[test]
fn test_n_queens() {
let real = vec!(1, 1, 0, 0, 2, 10, 4, 40, 92);
for num in (0..9i32) {
assert_eq!(n_queens(num), real[num as usize]);
}
}
#[test]
fn test_parallel_n_queens() {
let real = vec!(1, 1, 0, 0, 2, 10, 4, 40, 92);
for num in (0..9i32) {
assert_eq!(semi_parallel_n_queens(num), real[num as usize]);
}
}
#[bench]
fn bench_n_queens(b: &mut Bencher) {
b.iter(|| { test::black_box(n_queens(16)); });
}
#[bench]
fn bench_semi_parallel_n_queens(b: &mut Bencher) {
b.iter(|| { test::black_box(semi_parallel_n_queens(16)); });
}<|fim▁end|> | |
<|file_name|>app.component.ts<|end_file_name|><|fim▁begin|>import {Component} from 'angular2/core';
export class Reward{
id: number;
name: string;
description: string;
points: number;
}<|fim▁hole|>})
export class AppComponent {
title: string;
title = 'Reward';
reward: Reward = {
id: 1,
name: 'Una noche en el hotel XX',
description: 'Hotel de lujo 5 estrellas con acomodación doble',
points: '100'
};
}<|fim▁end|> |
@Component({
selector: 'my-app',
templateUrl: 'templates/reward-detail.html' |
<|file_name|>dbpediamap.py<|end_file_name|><|fim▁begin|>__author__ = 'Lorenzo'
planet_mapper = {
'<http://www.w3.org/1999/02/22-rdf-syntax-ns#type>': 'planet type', # link to yago category, can be explored more
'<http://live.dbpedia.org/ontology/wikiPageExternalLink>': 'external link', # many
'<http://live.dbpedia.org/property/inclination>': 'inclination', # quantity and text
'<http://www.w3.org/2000/01/rdf-schema#seeAlso>': 'see also', # many
'<http://live.dbpedia.org/property/albedo>': 'albedo', # quantity
'<http://xmlns.com/foaf/0.1/depiction>': 'depiction', # svg shape
'<http://live.dbpedia.org/property/rotVelocity>': 'rotation velocity', # quantity
'<http://live.dbpedia.org/property/period>': 'period', # quantity
'<http://live.dbpedia.org/property/meanTemp>': 'average temperature', # quantity
'<http://live.dbpedia.org/ontology/abstract>': 'abstract', # text
'<http://live.dbpedia.org/property/meanAnomaly>': 'average anomaly', # quantity
'<http://live.dbpedia.org/property/siderealDay>': 'sideral day', # quantity
'<http://live.dbpedia.org/property/scaleHeight>': 'atmospheric scale height', # quantity
'<http://live.dbpedia.org/property/mass>': 'mass', # quantity
'<http://live.dbpedia.org/property/escapeVelocity>': 'escape velocity (Km/s)', # quantity
'<http://live.dbpedia.org/property/atmosphere>': 'has atmosphere', # yes/no
'<http://live.dbpedia.org/property/ascNode>': 'asc node', # quantity
'<http://live.dbpedia.org/property/surfaceArea>': 'surface area', # quantity
'<http://live.dbpedia.org/property/equatorialRadius>': 'equatorial radius', # quantity
'<http://live.dbpedia.org/property/polarRadius>': 'polar radius', # quantity
'<http://live.dbpedia.org/ontology/escapeVelocity>': 'escape velocity (double)', # quantity<|fim▁hole|> '<http://live.dbpedia.org/property/surfacePressure>': 'surface pressure',
'<http://live.dbpedia.org/property/volume> ': 'volume',
'<http://live.dbpedia.org/property/angularSize>': 'angular size',
'<http://live.dbpedia.org/property/avgSpeed>': 'average speed (Km/s)',
'<http://live.dbpedia.org/property/declination>': 'declination',
'<http://live.dbpedia.org/property/surfaceGrav>': 'surface gravity (grams)',
'<http://live.dbpedia.org/property/satellites>': 'number of satellites'
}<|fim▁end|> | '<http://live.dbpedia.org/property/atmosphereComposition>': 'atmosphere chemistry', # text |
<|file_name|>assoc-const.rs<|end_file_name|><|fim▁begin|>// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// run-pass
#![allow(unused_variables)]
trait Nat {
const VALUE: usize;
}
struct Zero;
struct Succ<N>(N);
impl Nat for Zero {
const VALUE: usize = 0;
}
<|fim▁hole|>}
fn main() {
let x: [i32; <Succ<Succ<Succ<Succ<Zero>>>>>::VALUE] = [1, 2, 3, 4];
}<|fim▁end|> | impl<N: Nat> Nat for Succ<N> {
const VALUE: usize = N::VALUE + 1; |
<|file_name|>test_main.py<|end_file_name|><|fim▁begin|><|fim▁hole|>
def test_safe_timezone_with_tzinfo_objects():
tz = _safe_timezone(pytz.timezone("Europe/Paris"))
assert isinstance(tz, Timezone)
assert "Europe/Paris" == tz.name<|fim▁end|> | import pytz
from pendulum import _safe_timezone
from pendulum.tz.timezone import Timezone |
<|file_name|>policy.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
#
# 2016-05-07 Cornelius Kölbel <[email protected]>
# Add realm dropdown
# 2016-04-06 Cornelius Kölbel <[email protected]>
# Add time dependency in policy
# 2016-02-22 Cornelius Kölbel <[email protected]>
# Add RADIUS passthru policy
# 2016-02-05 Cornelius Kölbel <[email protected]>
# Add tokenwizard in scope UI
# 2015-12-30 Cornelius Kölbel <[email protected]>
# Add password reset policy
# 2015-12-28 Cornelius Kölbel <[email protected]>
# Add registration policy
# 2015-12-16 Cornelius Kölbel <[email protected]>
# Add tokenissuer policy
# 2015-11-29 Cornelius Kölbel <[email protected]>
# Add getchallenges policy
# 2015-10-31 Cornelius Kölbel <[email protected]>
# Add last_auth policy.
# 2015-10-30 Cornelius Kölbel <[email protected]>
# Display user details in token list
# 2015-10-26 Cornelius Kölbel <[email protected]>
# Add default token type for enrollment
# 2015-10-14 Cornelius Kölbel <[email protected]>
# Add auth_max_success and auth_max_fail actions to
# scope authorization
# 2015-10-09 Cornelius Kölbel <[email protected]>
# Add token_page_size and user_page_size policy
# 2015-09-06 Cornelius Kölbel <[email protected]>
# Add challenge_response authentication policy
# 2015-06-30 Cornelius Kölbel <[email protected]>
# Add the OTP PIN handling
# 2015-06-29 Cornelius Kölbel <[email protected]>
# Add the mangle policy
# 2015-04-03 Cornelius Kölbel <[email protected]>
# Add WebUI logout time.
# 2015-03-27 Cornelius Kölbel <[email protected]>
# Add PIN policies in USER scope
# 2015-02-06 Cornelius Kölbel <[email protected]>
# Rewrite for flask migration.
# Policies are not handled by decorators as
# 1. precondition for API calls
# 2. internal modifications of LIB-functions
# 3. postcondition for API calls
#
# Jul 07, 2014 add check_machine_policy, Cornelius Kölbel
# May 08, 2014 Cornelius Kölbel
#
# License: AGPLv3
# contact: http://www.privacyidea.org
#
# privacyIDEA is a fork of LinOTP
# Copyright (C) 2010 - 2014 LSE Leading Security Experts GmbH
# License: AGPLv3
# contact: http://www.linotp.org
# http://www.lsexperts.de
# [email protected]
#
# This code is free software; you can redistribute it and/or
# modify it under the terms of the GNU AFFERO GENERAL PUBLIC LICENSE
# License as published by the Free Software Foundation; either
# version 3 of the License, or any later version.
#
# This code is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU AFFERO GENERAL PUBLIC LICENSE for more details.
#
# You should have received a copy of the GNU Affero General Public
# License along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
Base function to handle the policy entries in the database.
This module only depends on the db/models.py
The functions of this module are tested in tests/test_lib_policy.py
A policy has the attributes
* name
* scope
* action
* realm
* resolver
* user
* client
* active
``name`` is the unique identifier of a policy. ``scope`` is the area,
where this policy is meant for. This can be values like admin, selfservice,
authentication...
``scope`` takes only one value.
``active`` is bool and indicates, whether a policy is active or not.
``action``, ``realm``, ``resolver``, ``user`` and ``client`` can take a comma
separated list of values.
realm and resolver
------------------
If these are empty '*', this policy matches each requested realm.
user
----
If the user is empty or '*', this policy matches each user.
You can exclude users from matching this policy, by prepending a '-' or a '!'.
``*, -admin`` will match for all users except the admin.
client
------
The client is identified by its IP address. A policy can contain a list of
IP addresses or subnets.
You can exclude clients from subnets by prepending the client with a '-' or
a '!'.
``172.16.0.0/24, -172.16.0.17`` will match each client in the subnet except
the 172.16.0.17.
time
----
You can specify a time in which the policy should be active.
Time formats are
<dow>-<dow>:<hh>:<mm>-<hh>:<mm>, ...
<dow>:<hh>:<mm>-<hh>:<mm>
<dow>:<hh>-<hh>
and any combination of it. "dow" being day of week Mon, Tue, Wed, Thu, Fri,
Sat, Sun.
"""
from .log import log_with
from configobj import ConfigObj
from netaddr import IPAddress
from netaddr import IPNetwork
from gettext import gettext as _
import logging
from ..models import (Policy, db)
from privacyidea.lib.config import (get_token_classes, get_token_types)
from privacyidea.lib.error import ParameterError, PolicyError
from privacyidea.lib.realm import get_realms
from privacyidea.lib.resolver import get_resolver_list
from privacyidea.lib.smtpserver import get_smtpservers
from privacyidea.lib.radiusserver import get_radiusservers
from privacyidea.lib.utils import check_time_in_range
log = logging.getLogger(__name__)
optional = True
required = False
class SCOPE(object):
__doc__ = """This is the list of the allowed scopes that can be used in
policy definitions.
"""
AUTHZ = "authorization"
ADMIN = "admin"
AUTH = "authentication"
AUDIT = "audit"
USER = "user" # was selfservice
ENROLL = "enrollment"
GETTOKEN = "gettoken"
WEBUI = "webui"
REGISTER = "register"
class ACTION(object):
__doc__ = """This is the list of usual actions."""
ASSIGN = "assign"
AUDIT = "auditlog"
AUTHITEMS = "fetch_authentication_items"
AUTHMAXSUCCESS = "auth_max_success"
AUTHMAXFAIL = "auth_max_fail"
AUTOASSIGN = "autoassignment"
CACONNECTORREAD = "caconnectorread"
CACONNECTORWRITE = "caconnectorwrite"
CACONNECTORDELETE = "caconnectordelete"
CHALLENGERESPONSE = "challenge_response"
GETCHALLENGES = "getchallenges"
COPYTOKENPIN = "copytokenpin"
COPYTOKENUSER = "copytokenuser"
DEFAULT_TOKENTYPE = "default_tokentype"
DELETE = "delete"
DISABLE = "disable"
EMAILCONFIG = "smtpconfig"
ENABLE = "enable"
ENCRYPTPIN = "encrypt_pin"
GETSERIAL = "getserial"
GETRANDOM = "getrandom"
IMPORT = "importtokens"
LASTAUTH = "last_auth"
LOGINMODE = "login_mode"
LOGOUTTIME = "logout_time"
LOSTTOKEN = 'losttoken'
LOSTTOKENPWLEN = "losttoken_PW_length"
LOSTTOKENPWCONTENTS = "losttoken_PW_contents"
LOSTTOKENVALID = "losttoken_valid"
MACHINERESOLVERWRITE = "mresolverwrite"
MACHINERESOLVERDELETE = "mresolverdelete"
MACHINELIST = "machinelist"
MACHINETOKENS = "manage_machine_tokens"
MANGLE = "mangle"
MAXTOKENREALM = "max_token_per_realm"
MAXTOKENUSER = "max_token_per_user"
NODETAILSUCCESS = "no_detail_on_success"
NODETAILFAIL = "no_detail_on_fail"
OTPPIN = "otppin"
OTPPINRANDOM = "otp_pin_random"
OTPPINMAXLEN = 'otp_pin_maxlength'
OTPPINMINLEN = 'otp_pin_minlength'
OTPPINCONTENTS = 'otp_pin_contents'
PASSNOTOKEN = "passOnNoToken"
PASSNOUSER = "passOnNoUser"
PASSTHRU = "passthru"
PASSWORDRESET = "password_reset"
PINHANDLING = "pinhandling"
POLICYDELETE = "policydelete"
POLICYWRITE = "policywrite"
POLICYTEMPLATEURL = "policy_template_url"
REALM = "realm"
REMOTE_USER = "remote_user"
REQUIREDEMAIL = "requiredemail"
RESET = "reset"
RESOLVERDELETE = "resolverdelete"
RESOLVERWRITE = "resolverwrite"
RESOLVER = "resolver"
RESYNC = "resync"
REVOKE = "revoke"
SET = "set"
SETPIN = "setpin"
SETREALM = "setrealm"
SERIAL = "serial"
SYSTEMDELETE = "configdelete"
SYSTEMWRITE = "configwrite"
CONFIGDOCUMENTATION = "system_documentation"
TOKENISSUER = "tokenissuer"
TOKENLABEL = "tokenlabel"
TOKENPAGESIZE = "token_page_size"
TOKENREALMS = "tokenrealms"
TOKENTYPE = "tokentype"
TOKENWIZARD = "tokenwizard"
TOKENWIZARD2ND = "tokenwizard_2nd_token"
UNASSIGN = "unassign"
USERLIST = "userlist"
USERPAGESIZE = "user_page_size"
ADDUSER = "adduser"
DELETEUSER = "deleteuser"
UPDATEUSER = "updateuser"
USERDETAILS = "user_details"
APIKEY = "api_key_required"
SETHSM = "set_hsm_password"
SMTPSERVERWRITE = "smtpserver_write"
RADIUSSERVERWRITE = "radiusserver_write"
REALMDROPDOWN = "realm_dropdown"
EVENTHANDLINGWRITE = "eventhandling_write"
class LOGINMODE(object):
__doc__ = """This is the list of possible values for the login mode."""
USERSTORE = "userstore"
PRIVACYIDEA = "privacyIDEA"
DISABLE = "disable"
class REMOTE_USER(object):
__doc__ = """The list of possible values for the remote_user policy."""
DISABLE = "disable"
ACTIVE = "allowed"
class ACTIONVALUE(object):
__doc__ = """This is a list of usual action values for e.g. policy
action-values like otppin."""
TOKENPIN = "tokenpin"
USERSTORE = "userstore"
DISABLE = "disable"
NONE = "none"
class AUTOASSIGNVALUE(object):
__doc__ = """This is the possible values for autoassign"""
USERSTORE = "userstore"
NONE = "any_pin"
class PolicyClass(object):
"""
The Policy_Object will contain all database policy entries for easy
filtering and mangling.
It will be created at the beginning of the request and is supposed to stay
alive unchanged during the request.
"""
def __init__(self):
"""
Create the Policy_Object from the database table
"""
self.policies = []
# read the policies from the database and store it in the object
policies = Policy.query.all()
for pol in policies:
# read each policy
self.policies.append(pol.get())
@log_with(log)
def get_policies(self, name=None, scope=None, realm=None, active=None,
resolver=None, user=None, client=None, action=None,
adminrealm=None, time=None, all_times=False):
"""
Return the policies of the given filter values
:param name:
:param scope:
:param realm:
:param active:
:param resolver:
:param user:
:param client:
:param action:
:param adminrealm: This is the realm of the admin. This is only
evaluated in the scope admin.
:param time: The optional time, for which the policies should be
fetched. The default time is now()
:type time: datetime
:param all_times: If True the time restriction of the policies is
ignored. Policies of all time ranges will be returned.
:type all_times: bool
:return: list of policies
:rtype: list of dicts
"""
reduced_policies = self.policies
# filter policy for time. If no time is set or is a time is set and
# it matches the time_range, then we add this policy
if not all_times:
reduced_policies = [policy for policy in reduced_policies if
(policy.get("time") and
check_time_in_range(policy.get("time"), time))
or not policy.get("time")]
log.debug("Policies after matching time: {0!s}".format(
reduced_policies))
# Do exact matches for "name", "active" and "scope", as these fields
# can only contain one entry
p = [("name", name), ("active", active), ("scope", scope)]
for searchkey, searchvalue in p:
if searchvalue is not None:
reduced_policies = [policy for policy in reduced_policies if
policy.get(searchkey) == searchvalue]
log.debug("Policies after matching {1!s}: {0!s}".format(
reduced_policies, searchkey))
p = [("action", action), ("user", user), ("resolver", resolver),
("realm", realm)]
# If this is an admin-policy, we also do check the adminrealm
if scope == "admin":
p.append(("adminrealm", adminrealm))
for searchkey, searchvalue in p:
if searchvalue is not None:
new_policies = []
# first we find policies, that really match!
# Either with the real value or with a "*"
# values can be excluded by a leading "!" or "-"
for policy in reduced_policies:
value_found = False
value_excluded = False
# iterate through the list of values:
for value in policy.get(searchkey):
if value and value[0] in ["!", "-"] and \
searchvalue == value[1:]:
value_excluded = True
elif type(searchvalue) == list and value in \
searchvalue + ["*"]:
value_found = True
elif value in [searchvalue, "*"]:
value_found = True
if value_found and not value_excluded:
new_policies.append(policy)
# We also find the policies with no distinct information
# about the request value
for policy in reduced_policies:
if not policy.get(searchkey):
new_policies.append(policy)
reduced_policies = new_policies
log.debug("Policies after matching {1!s}: {0!s}".format(
reduced_policies, searchkey))
# Match the client IP.
# Client IPs may be direct match, may be located in subnets or may
# be excluded by a leading "-" or "!" sign.
# The client definition in the policy may ba a comma separated list.
# It may start with a "-" or a "!" to exclude the client
# from a subnet.
# Thus a client 10.0.0.2 matches a policy "10.0.0.0/8, -10.0.0.1" but
# the client 10.0.0.1 does not match the policy "10.0.0.0/8, -10.0.0.1".
# An empty client definition in the policy matches all clients.
if client is not None:
new_policies = []
for policy in reduced_policies:
client_found = False
client_excluded = False
for polclient in policy.get("client"):
if polclient[0] in ['-', '!']:
# exclude the client?
if IPAddress(client) in IPNetwork(polclient[1:]):
log.debug("the client %s is excluded by %s in "
"policy %s" % (client, polclient, policy))
client_excluded = True
elif IPAddress(client) in IPNetwork(polclient):
client_found = True
if client_found and not client_excluded:
# The client was contained in the defined subnets and was
# not excluded
new_policies.append(policy)
# If there is a policy without any client, we also add it to the
# accepted list.
for policy in reduced_policies:
if not policy.get("client"):
new_policies.append(policy)
reduced_policies = new_policies
log.debug("Policies after matching client".format(
reduced_policies))
return reduced_policies
@log_with(log)
def get_action_values(self, action, scope=SCOPE.AUTHZ, realm=None,
resolver=None, user=None, client=None, unique=False,
allow_white_space_in_action=False):
"""
Get the defined action values for a certain action like
scope: authorization
action: tokentype
would return a list of the tokentypes
scope: authorization
action: serial
would return a list of allowed serials
:param unique: if set, the function will raise an exception if more
than one value is returned
:param allow_white_space_in_action: Some policies like emailtext
would allow entering text with whitespaces. These whitespaces
must not be used to separate action values!
:type allow_white_space_in_action: bool
:return: A list of the allowed tokentypes
:rtype: list
"""
action_values = []
policies = self.get_policies(scope=scope,
action=action, active=True,
realm=realm, resolver=resolver, user=user,
client=client)
for pol in policies:
action_dict = pol.get("action", {})
action_value = action_dict.get(action, "")
"""
We must distinguish actions like:
tokentype=totp hotp motp,
where the string represents a list divided by spaces, and
smstext='your otp is <otp>'
where the spaces are part of the string.
"""
if action_value.startswith("'") and action_value.endswith("'"):
action_values.append(action_dict.get(action)[1:-1])
elif allow_white_space_in_action:
action_values.append(action_dict.get(action))
else:
action_values.extend(action_dict.get(action, "").split())
# reduce the entries to unique entries
action_values = list(set(action_values))
if unique:
if len(action_values) > 1:
raise PolicyError("There are conflicting %s"
" definitions!" % action)
return action_values
@log_with(log)
def ui_get_rights(self, scope, realm, username, client=None):
"""
Get the rights derived from the policies for the given realm and user.
Works for admins and normal users.
It fetches all policies for this user and compiles a maximum list of
allowed rights, that can be used to hide certain UI elements.
:param scope: Can be SCOPE.ADMIN or SCOPE.USER
:param realm: Is either user users realm or the adminrealm
:param username: The loginname of the user
:param client: The HTTP client IP
:return: A list of actions
"""
from privacyidea.lib.auth import ROLE
from privacyidea.lib.token import get_dynamic_policy_definitions
rights = []
userealm = None
adminrealm = None
logged_in_user = {"username": username,
"realm": realm}
if scope == SCOPE.ADMIN:
adminrealm = realm
logged_in_user["role"] = ROLE.ADMIN
elif scope == SCOPE.USER:
userealm = realm
logged_in_user["role"] = ROLE.USER
pols = self.get_policies(scope=scope,
adminrealm=adminrealm,
realm=userealm,
user=username, active=True,<|fim▁hole|> client=client)
for pol in pols:
for action, action_value in pol.get("action").items():
if action_value:
rights.append(action)
# check if we have policies at all:
pols = self.get_policies(scope=scope, active=True)
if not pols:
# We do not have any policies in this scope, so we return all
# possible actions in this scope.
log.debug("No policies defined, so we set all rights.")
static_rights = get_static_policy_definitions(scope).keys()
enroll_rights = get_dynamic_policy_definitions(scope).keys()
rights = static_rights + enroll_rights
# reduce the list
rights = list(set(rights))
log.debug("returning the admin rights: {0!s}".format(rights))
return rights
@log_with(log)
def ui_get_enroll_tokentypes(self, client, logged_in_user):
"""
Return a dictionary of the allowed tokentypes for the logged in user.
This used for the token enrollment UI.
It looks like this:
{"hotp": "HOTP: event based One Time Passwords",
"totp": "TOTP: time based One Time Passwords",
"spass": "SPass: Simple Pass token. Static passwords",
"motp": "mOTP: classical mobile One Time Passwords",
"sshkey": "SSH Public Key: The public SSH key",
"yubikey": "Yubikey AES mode: One Time Passwords with Yubikey",
"remote": "Remote Token: Forward authentication request to another server",
"yubico": "Yubikey Cloud mode: Forward authentication request to YubiCloud",
"radius": "RADIUS: Forward authentication request to a RADIUS server",
"email": "EMail: Send a One Time Passwort to the users email address",
"sms": "SMS: Send a One Time Password to the users mobile phone",
"certificate": "Certificate: Enroll an x509 Certificate Token."}
:param client: Client IP address
:type client: basestring
:param logged_in_user: The Dict of the logged in user
:type logged_in_user: dict
:return: list of token types, the user may enroll
"""
from privacyidea.lib.auth import ROLE
enroll_types = {}
role = logged_in_user.get("role")
if role == ROLE.ADMIN:
admin_realm = logged_in_user.get("realm")
user_realm = None
else:
admin_realm = None
user_realm = logged_in_user.get("realm")
# check, if we have a policy definition at all.
pols = self.get_policies(scope=role, active=True)
tokenclasses = get_token_classes()
for tokenclass in tokenclasses:
# Check if the tokenclass is ui enrollable for "user" or "admin"
if role in tokenclass.get_class_info("ui_enroll"):
enroll_types[tokenclass.get_class_type()] = \
tokenclass.get_class_info("description")
if pols:
# admin policies or user policies are set, so we need to
# test, which tokens are allowed to be enrolled for this user
for tokentype in enroll_types.keys():
# determine, if there is a enrollment policy for this very type
typepols = self.get_policies(scope=role, client=client,
user=logged_in_user.get("username"),
realm=user_realm,
active=True,
action="enroll"+tokentype.upper(),
adminrealm=admin_realm)
if not typepols:
# If there is no policy allowing the enrollment of this
# tokentype, it is deleted.
del(enroll_types[tokentype])
return enroll_types
# --------------------------------------------------------------------------
#
# NEW STUFF
#
#
@log_with(log)
def set_policy(name=None, scope=None, action=None, realm=None, resolver=None,
user=None, time=None, client=None, active=True, adminrealm=None):
"""
Function to set a policy.
If the policy with this name already exists, it updates the policy.
It expects a dict of with the following keys:
:param name: The name of the policy
:param scope: The scope of the policy. Something like "admin", "system",
"authentication"
:param action: A scope specific action or a comma separated list of actions
:type active: basestring
:param realm: A realm, for which this policy is valid
:param resolver: A resolver, for which this policy is valid
:param user: A username or a list of usernames
:param time: N/A if type()
:param client: A client IP with optionally a subnet like 172.16.0.0/16
:param active: If the policy is active or not
:type active: bool
:return: The database ID od the the policy
:rtype: int
"""
if type(action) == dict:
action_list = []
for k, v in action.items():
if v is not True:
# value key
action_list.append("{0!s}={1!s}".format(k, v))
else:
# simple boolean value
action_list.append(k)
action = ", ".join(action_list)
if type(action) == list:
action = ", ".join(action)
if type(realm) == list:
realm = ", ".join(realm)
if type(adminrealm) == list:
adminrealm = ", ".join(adminrealm)
if type(user) == list:
user = ", ".join(user)
if type(resolver) == list:
resolver = ", ".join(resolver)
if type(client) == list:
client = ", ".join(client)
p = Policy(name, action=action, scope=scope, realm=realm,
user=user, time=time, client=client, active=active,
resolver=resolver, adminrealm=adminrealm).save()
return p
@log_with(log)
def enable_policy(name, enable=True):
"""
Enable or disable the policy with the given name
:param name:
:return: ID of the policy
"""
if not Policy.query.filter(Policy.name == name).first():
raise ParameterError("The policy with name '{0!s}' does not exist".format(name))
# Update the policy
p = set_policy(name=name, active=enable)
return p
@log_with(log)
def delete_policy(name):
"""
Function to delete one named policy
:param name: the name of the policy to be deleted
:return: the count of the deleted policies.
:rtype: int
"""
p = Policy.query.filter_by(name=name)
res = p.delete()
db.session.commit()
return res
@log_with(log)
def export_policies(policies):
"""
This function takes a policy list and creates an export file from it
:param policies: a policy definition
:type policies: list of policy dictionaries
:return: the contents of the file
:rtype: string
"""
file_contents = ""
if policies:
for policy in policies:
file_contents += "[{0!s}]\n".format(policy.get("name"))
for key, value in policy.items():
file_contents += "{0!s} = {1!s}\n".format(key, value)
file_contents += "\n"
return file_contents
@log_with(log)
def import_policies(file_contents):
"""
This function imports policies from a file.
The file has a config_object format, i.e. the text file has a header
[<policy_name>]
key = value
and key value pairs.
:param file_contents: The contents of the file
:type file_contents: basestring
:return: number of imported policies
:rtype: int
"""
policies = ConfigObj(file_contents.split('\n'), encoding="UTF-8")
res = 0
for policy_name, policy in policies.iteritems():
ret = set_policy(name=policy_name,
action=eval(policy.get("action")),
scope=policy.get("scope"),
realm=eval(policy.get("realm", "[]")),
user=eval(policy.get("user", "[]")),
resolver=eval(policy.get("resolver", "[]")),
client=eval(policy.get("client", "[]")),
time=policy.get("time", "")
)
if ret > 0:
log.debug("import policy {0!s}: {1!s}".format(policy_name, ret))
res += 1
return res
@log_with(log)
def get_static_policy_definitions(scope=None):
"""
These are the static hard coded policy definitions.
They can be enhanced by token based policy definitions, that can be found
in lib.token.get_dynamic_policy_definitions.
:param scope: Optional the scope of the policies
:type scope: basestring
:return: allowed scopes with allowed actions, the type of action and a
description.
:rtype: dict
"""
resolvers = get_resolver_list().keys()
realms = get_realms().keys()
smtpconfigs = [server.config.identifier for server in get_smtpservers()]
radiusconfigs = [radius.config.identifier for radius in
get_radiusservers()]
radiusconfigs.insert(0, "userstore")
pol = {
SCOPE.REGISTER: {
ACTION.RESOLVER: {'type': 'str',
'value': resolvers,
'desc': _('Define in which resolver the user '
'should be registered.')},
ACTION.REALM: {'type': 'str',
'value': realms,
'desc': _('Define in which realm the user should '
'be registered.')},
ACTION.EMAILCONFIG: {'type': 'str',
'value': smtpconfigs,
'desc': _('The SMTP server configuration, '
'that should be used to send the '
'registration email.')},
ACTION.REQUIREDEMAIL: {'type': 'str',
'desc': _('Only users with this email '
'address are allowed to '
'register. This is a regular '
'expression.')}
},
SCOPE.ADMIN: {
ACTION.ENABLE: {'type': 'bool',
'desc': _('Admin is allowed to enable tokens.')},
ACTION.DISABLE: {'type': 'bool',
'desc': _('Admin is allowed to disable tokens.')},
ACTION.SET: {'type': 'bool',
'desc': _(
'Admin is allowed to set token properties.')},
ACTION.SETPIN: {'type': 'bool',
'desc': _(
'Admin is allowed to set the OTP PIN of '
'tokens.')},
ACTION.RESYNC: {'type': 'bool',
'desc': _('Admin is allowed to resync tokens.')},
ACTION.RESET: {'type': 'bool',
'desc': _(
'Admin is allowed to reset the Failcounter of '
'a token.')},
ACTION.REVOKE: {'tpye': 'bool',
'desc': _("Admin is allowed to revoke a token")},
ACTION.ASSIGN: {'type': 'bool',
'desc': _(
'Admin is allowed to assign a token to a '
'user.')},
ACTION.UNASSIGN: {'type': 'bool',
'desc': _(
'Admin is allowed to remove the token from '
'a user, '
'i.e. unassign a token.')},
ACTION.IMPORT: {'type': 'bool',
'desc': _(
'Admin is allowed to import token files.')},
ACTION.DELETE: {'type': 'bool',
'desc': _(
'Admin is allowed to remove tokens from the '
'database.')},
ACTION.USERLIST: {'type': 'bool',
'desc': _(
'Admin is allowed to view the list of the '
'users.')},
ACTION.MACHINELIST: {'type': 'bool',
'desc': _('The Admin is allowed to list '
'the machines.')},
ACTION.MACHINETOKENS: {'type': 'bool',
'desc': _('The Admin is allowed to attach '
'and detach tokens to machines.')},
ACTION.AUTHITEMS: {'type': 'bool',
'desc': _('The Admin is allowed to fetch '
'authentication items of tokens '
'assigned to machines.')},
# 'checkstatus': {'type': 'bool',
# 'desc' : _('Admin is allowed to check the
# status of a challenge'
# "group": "tools"},
ACTION.TOKENREALMS: {'type': 'bool',
'desc': _('Admin is allowed to manage the '
'realms of a token.')},
ACTION.GETSERIAL: {'type': 'bool',
'desc': _('Admin is allowed to retrieve a serial'
' for a given OTP value.'),
"group": "tools"},
ACTION.GETRANDOM: {'type': 'bool',
'desc': _('Admin is allowed to retrieve '
'random keys from privacyIDEA.')},
# 'checkserial': {'type': 'bool',
# 'desc': _('Admin is allowed to check if a serial '
# 'is unique'),
# "group": "tools"},
ACTION.COPYTOKENPIN: {'type': 'bool',
'desc': _(
'Admin is allowed to copy the PIN of '
'one token '
'to another token.'),
"group": "tools"},
ACTION.COPYTOKENUSER: {'type': 'bool',
'desc': _(
'Admin is allowed to copy the assigned '
'user to another'
' token, i.e. assign a user ot '
'another token.'),
"group": "tools"},
ACTION.LOSTTOKEN: {'type': 'bool',
'desc': _('Admin is allowed to trigger the '
'lost token workflow.'),
"group": "tools"},
# 'getotp': {
# 'type': 'bool',
# 'desc': _('Allow the administrator to retrieve OTP values
# for tokens.'),
# "group": "tools"},
ACTION.SYSTEMWRITE: {'type': 'bool',
"desc": _("Admin is allowed to write and "
"modify the system configuration."),
"group": "system"},
ACTION.SYSTEMDELETE: {'type': 'bool',
"desc": _("Admin is allowed to delete "
"keys in the system "
"configuration."),
"group": "system"},
ACTION.CONFIGDOCUMENTATION: {'type': 'bool',
'desc': _('Admin is allowed to '
'export a documentation '
'of the complete '
'configuration including '
'resolvers and realm.'),
'group': 'system'},
ACTION.POLICYWRITE: {'type': 'bool',
"desc": _("Admin is allowed to write and "
"modify the policies."),
"group": "system"},
ACTION.POLICYDELETE: {'type': 'bool',
"desc": _("Admin is allowed to delete "
"policies."),
"group": "system"},
ACTION.RESOLVERWRITE: {'type': 'bool',
"desc": _("Admin is allowed to write and "
"modify the "
"resolver and realm "
"configuration."),
"group": "system"},
ACTION.RESOLVERDELETE: {'type': 'bool',
"desc": _("Admin is allowed to delete "
"resolvers and realms."),
"group": "system"},
ACTION.CACONNECTORWRITE: {'type': 'bool',
"desc": _("Admin is allowed to create new"
" CA Connector definitions "
"and modify existing ones."),
"group": "system"},
ACTION.CACONNECTORDELETE: {'type': 'bool',
"desc": _("Admin is allowed to delete "
"CA Connector definitions."),
"group": "system"},
ACTION.MACHINERESOLVERWRITE: {'type': 'bool',
'desc': _("Admin is allowed to "
"write and modify the "
"machine resolvers."),
'group': "system"},
ACTION.MACHINERESOLVERDELETE: {'type': 'bool',
'desc': _("Admin is allowed to "
"delete "
"machine resolvers."),
'group': "system"},
ACTION.AUDIT: {'type': 'bool',
"desc": _("Admin is allowed to view the Audit log."),
"group": "system"},
ACTION.ADDUSER: {'type': 'bool',
"desc": _("Admin is allowed to add users in a "
"userstore/UserIdResolver."),
"group": "system"},
ACTION.UPDATEUSER: {'type': 'bool',
"desc": _("Admin is allowed to update the "
"users data in a userstore."),
"group": "system"},
ACTION.DELETEUSER: {'type': 'bool',
"desc": _("Admin is allowed to delete a user "
"object in a userstore.")},
ACTION.SETHSM: {'type': 'bool',
'desc': _("Admin is allowed to set the password "
"of the HSM/Security Module.")},
ACTION.GETCHALLENGES: {'type': 'bool',
'desc': _("Admin is allowed to retrieve "
"the list of active challenges.")},
ACTION.SMTPSERVERWRITE: {'type': 'bool',
'desc': _("Admin is allowed to write new "
"SMTP server definitions.")},
ACTION.RADIUSSERVERWRITE: {'type': 'bool',
'desc': _("Admin is allowed to write "
"new RADIUS server "
"definitions.")},
ACTION.EVENTHANDLINGWRITE: {'type': 'bool',
'desc': _("Admin is allowed to write "
"and modify the event "
"handling configuration.")}
},
# 'gettoken': {
# 'max_count_dpw': {'type': 'int',
# 'desc' : _('When OTP values are retrieved for
# a DPW token, '
# 'this is the maximum number of
# retrievable OTP values.')},
# 'max_count_hotp': {'type': 'int',
# 'desc' : _('When OTP values are retrieved
# for a HOTP token, '
# 'this is the maximum number of
# retrievable OTP values.')},
# 'max_count_totp': {'type': 'int',
# 'desc' : _('When OTP values are retrieved
# for a TOTP token, '
# 'this is the maximum number of
# retrievable OTP values.')},
# },
SCOPE.USER: {
ACTION.ASSIGN: {
'type': 'bool',
'desc': _("The user is allowed to assign an existing token"
" that is not yet assigned"
" using the token serial number.")},
ACTION.DISABLE: {'type': 'bool',
'desc': _(
'The user is allowed to disable his own '
'tokens.')},
ACTION.ENABLE: {'type': 'bool',
'desc': _(
"The user is allowed to enable his own "
"tokens.")},
ACTION.DELETE: {'type': 'bool',
"desc": _(
"The user is allowed to delete his own "
"tokens.")},
ACTION.UNASSIGN: {'type': 'bool',
"desc": _("The user is allowed to unassign his "
"own tokens.")},
ACTION.RESYNC: {'type': 'bool',
"desc": _("The user is allowed to resyncronize his "
"tokens.")},
ACTION.REVOKE: {'type': 'bool',
'desc': _("The user is allowed to revoke a token")},
ACTION.RESET: {'type': 'bool',
'desc': _('The user is allowed to reset the '
'failcounter of his tokens.')},
ACTION.SETPIN: {'type': 'bool',
"desc": _("The user is allowed to set the OTP "
"PIN "
"of his tokens.")},
ACTION.OTPPINMAXLEN: {'type': 'int',
'value': range(0, 32),
"desc": _("Set the maximum allowed length "
"of the OTP PIN.")},
ACTION.OTPPINMINLEN: {'type': 'int',
'value': range(0, 32),
"desc": _("Set the minimum required length "
"of the OTP PIN.")},
ACTION.OTPPINCONTENTS: {'type': 'str',
"desc": _("Specifiy the required "
"contents of the OTP PIN. "
"(c)haracters, (n)umeric, "
"(s)pecial, (o)thers. [+/-]!")},
# 'setMOTPPIN': {'type': 'bool',
# "desc": _("The user is allowed to set the mOTP
# PIN of his mOTP tokens.")},
# 'getotp': {'type': 'bool',
# "desc": _("The user is allowed to retrieve OTP
# values for his own tokens.")},
# 'activateQR': {'type': 'bool',
# "desc": _("The user is allowed to enroll a QR
# token.")},
# 'max_count_dpw': {'type': 'int',
# "desc": _("This is the maximum number of OTP
# values, the user is allowed to retrieve for a DPW token.")},
# 'max_count_hotp': {'type': 'int',
# "desc": _("This is the maximum number of OTP
# values, the user is allowed to retrieve for a HOTP token.")},
# 'max_count_totp': {'type': 'int',
# "desc": _("This is the maximum number of OTP
# values, the user is allowed to retrieve for a TOTP token.")},
ACTION.AUDIT: {
'type': 'bool',
'desc': _('Allow the user to view his own token history.')},
ACTION.USERLIST: {'type': 'bool',
'desc': _("The user is allowed to view his "
"own user information.")},
ACTION.UPDATEUSER: {'type': 'bool',
'desc': _("The user is allowed to update his "
"own user information, like changing "
"his password.")},
ACTION.PASSWORDRESET: {'type': 'bool',
'desc': _("The user is allowed to do a "
"password reset in an editable "
"UserIdResolver.")}
# 'getserial': {
# 'type': 'bool',
# 'desc': _('Allow the user to search an unassigned token by
# OTP value.')},
},
SCOPE.ENROLL: {
ACTION.MAXTOKENREALM: {
'type': 'int',
'desc': _('Limit the number of allowed tokens in a realm.')},
ACTION.MAXTOKENUSER: {
'type': 'int',
'desc': _('Limit the number of tokens a user may have '
'assigned.')},
ACTION.OTPPINRANDOM: {
'type': 'int',
'value': range(0, 32),
"desc": _("Set a random OTP PIN with this length for a "
"token.")},
ACTION.PINHANDLING: {
'type': 'str',
'desc': _('In case of a random OTP PIN use this python '
'module to process the PIN.')},
ACTION.ENCRYPTPIN: {
'type': 'bool',
"desc": _("The OTP PIN can be hashed or encrypted. Hashing "
"the PIN is the default behaviour.")},
ACTION.TOKENLABEL: {
'type': 'str',
'desc': _("Set label for a new enrolled Google Authenticator. "
"Possible tags are <u> (user), <r> ("
"realm), <s> (serial).")},
ACTION.TOKENISSUER: {
'type': 'str',
'desc': _("This is the issuer label for new enrolled Google "
"Authenticators.")
},
ACTION.AUTOASSIGN: {
'type': 'str',
'value': [AUTOASSIGNVALUE.NONE, AUTOASSIGNVALUE.USERSTORE],
'desc': _("Users can assign a token just by using the "
"unassigned token to authenticate.")},
ACTION.LOSTTOKENPWLEN: {
'type': 'int',
'value': range(1, 32),
'desc': _('The length of the password in case of '
'temporary token (lost token).')},
ACTION.LOSTTOKENPWCONTENTS: {
'type': 'str',
'desc': _('The contents of the temporary password, '
'described by the characters C, c, n, s.')},
ACTION.LOSTTOKENVALID: {
'type': 'int',
'value': range(1, 61),
'desc': _('The length of the validity for the temporary '
'token (in days).')},
},
SCOPE.AUTH: {
ACTION.OTPPIN: {
'type': 'str',
'value': [ACTIONVALUE.TOKENPIN, ACTIONVALUE.USERSTORE,
ACTIONVALUE.NONE],
'desc': _('Either use the Token PIN , use the Userstore '
'Password or use no fixed password '
'component.')},
ACTION.CHALLENGERESPONSE: {
'type': 'str',
'desc': _('This is a whitespace separated list of tokentypes, '
'that can be used with challenge response.')
},
ACTION.PASSTHRU: {
'type': 'str',
'value': radiusconfigs,
'desc': _('If set, the user in this realm will be '
'authenticated against the userstore or against the '
'given RADIUS config,'
' if the user has no tokens assigned.')
},
ACTION.PASSNOTOKEN: {
'type': 'bool',
'desc': _('If the user has no token, the authentication '
'request for this user will always be true.')
},
ACTION.PASSNOUSER: {
'type': 'bool',
'desc': _('If the user user does not exist, '
'the authentication request for this '
'non-existing user will always be true.')
},
ACTION.MANGLE: {
'type': 'str',
'desc': _('Can be used to modify the parameters pass, '
'user and realm in an authentication request. See '
'the documentation for an example.')
}
# 'qrtanurl': {
# 'type': 'str',
# 'desc': _('The URL for the half automatic mode that should
# be '
# 'used in a QR Token')
# },
# 'challenge_response': {
# 'type': 'str',
# 'desc': _('A list of tokentypes for which challenge response '
# 'should be used.')
# }
},
SCOPE.AUTHZ: {
ACTION.AUTHMAXSUCCESS: {
'type': 'str',
'desc': _("You can specify how many successful authentication "
"requests a user is allowed to do in a given time. "
"Specify like 1/5s, 2/10m, 10/1h - s, m, h being "
"second, minute and hour.")
},
ACTION.AUTHMAXFAIL: {
'type': 'str',
'desc': _("You can specify how many failed authentication "
"requests a user is allowed to do in a given time. "
"Specify like 1/5s, 2/10m, 10/1h - s, m, h being "
"second, minute and hour.")
},
ACTION.LASTAUTH: {
'type': 'str',
'desc': _("You can specify in which time frame the user needs "
"to authenticate again with this token. If the user "
"authenticates later, authentication will fail. "
"Specify like 30h, 7d or 1y.")
},
ACTION.TOKENTYPE: {
'type': 'str',
'desc': _('The user will only be authenticated with this '
'very tokentype.')},
ACTION.SERIAL: {
'type': 'str',
'desc': _('The user will only be authenticated if the serial '
'number of the token matches this regexp.')},
ACTION.SETREALM: {
'type': 'str',
'value': realms,
'desc': _('The Realm of the user is set to this very realm. '
'This is important if the user is not contained in '
'the default realm and can not pass his realm.')},
ACTION.NODETAILSUCCESS: {
'type': 'bool',
'desc': _('In case of successful authentication additional '
'no detail information will be returned.')},
ACTION.NODETAILFAIL: {
'type': 'bool',
'desc': _('In case of failed authentication additional '
'no detail information will be returned.')},
ACTION.APIKEY: {
'type': 'bool',
'desc': _('The sending of an API Auth Key is required during'
'authentication. This avoids rogue authenticate '
'requests against the /validate/check interface.')
}
},
SCOPE.WEBUI: {
ACTION.LOGINMODE: {
'type': 'str',
'desc': _(
'If set to "privacyIDEA" the users and admins need to '
'authenticate against privacyIDEA when they log in '
'to the Web UI. Defaults to "userstore"'),
'value': [LOGINMODE.USERSTORE, LOGINMODE.PRIVACYIDEA,
LOGINMODE.DISABLE],
},
ACTION.REMOTE_USER: {
'type': 'str',
'value': [REMOTE_USER.ACTIVE, REMOTE_USER.DISABLE],
'desc': _('The REMOTE_USER set by the webserver can be used '
'to login to privacyIDEA or it will be ignored. '
'Defaults to "disable".')
},
ACTION.LOGOUTTIME: {
'type': 'int',
'desc': _("Set the time in seconds after which the user will "
"be logged out from the WebUI. Default: 120")
},
ACTION.TOKENPAGESIZE: {
'type': 'int',
'desc': _("Set how many tokens should be displayed in the "
"token view on one page.")
},
ACTION.USERPAGESIZE: {
'type': 'int',
'desc': _("Set how many users should be displayed in the user "
"view on one page.")
},
ACTION.USERDETAILS: {
'type': 'bool',
'desc': _("Whether the user ID and the resolver should be "
"displayed in the token list.")
},
ACTION.POLICYTEMPLATEURL: {
'type': 'str',
'desc': _("The URL of a repository, where the policy "
"templates can be found. (Default "
"https://raw.githubusercontent.com/privacyidea/"
"policy-templates/master/templates/)")
},
ACTION.TOKENWIZARD: {
'type': 'bool',
'desc': _("As long as a user has no token, he will only see"
" a token wizard in the UI.")
},
ACTION.TOKENWIZARD2ND: {
'type': 'bool',
'desc': _("The tokenwizard will be displayed in the token "
"menu, even if the user already has a token.")
},
ACTION.DEFAULT_TOKENTYPE: {
'type': 'str',
'desc': _("This is the default token type in the token "
"enrollment dialog."),
'value': get_token_types()
},
ACTION.REALMDROPDOWN: {
'type': 'bool',
'desc': _("If this is checked, a dropdown combobox with the "
"realms is displayed in the login screen.")
}
}
# 'ocra': {
# 'request': {
# 'type': 'bool',
# 'desc': _('Allow to do a ocra/request.')},
# 'status': {
# 'type': 'bool',
# 'desc': _('Allow to check the transaction status.')},
# 'activationcode': {
# 'type': 'bool',
# 'desc': _('Allow to do an ocra/getActivationCode.')},
# 'calcOTP': {
# 'type': 'bool',
# 'desc': _('Allow to do an ocra/calculateOtp.')}
# },
}
if scope:
ret = pol.get(scope, {})
else:
ret = pol
return ret<|fim▁end|> | |
<|file_name|>0026_auto_20160426_1232.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-04-26 12:32
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('iiits', '0025_auto_20160425_1937'),
]
operations = [
migrations.CreateModel(<|fim▁hole|> ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('photo', models.ImageField(upload_to='iiits/static/iiits/images/staff')),
('email', models.EmailField(max_length=254)),
],
),
migrations.CreateModel(
name='StaffDesignation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
],
),
migrations.AddField(
model_name='staff',
name='designation',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='iiits.StaffDesignation'),
),
]<|fim▁end|> | name='Staff',
fields=[ |
<|file_name|>lusolve.js<|end_file_name|><|fim▁begin|>export var lusolveDocs = {<|fim▁hole|> syntax: ['x=lusolve(A, b)', 'x=lusolve(lu, b)'],
description: 'Solves the linear system A * x = b where A is an [n x n] matrix and b is a [n] column vector.',
examples: ['a = [-2, 3; 2, 1]', 'b = [11, 9]', 'x = lusolve(a, b)'],
seealso: ['lup', 'slu', 'lsolve', 'usolve', 'matrix', 'sparse']
};<|fim▁end|> | name: 'lusolve',
category: 'Algebra', |
<|file_name|>ianna_score.py<|end_file_name|><|fim▁begin|>import pygame
import time
import scripts
"""
Score class
Handles all the score area
package: ianna
"""
class IannaScore():
def __init__ (self, buffer, screen, game_entities):
self.score_image = pygame.image.load('artwork/marcador.png').convert()
self.font = pygame.image.load('artwork/font.png').convert()
self.chars = []
self.buffer = buffer
self.screen = screen
self.game_entities = game_entities
self.weapons = []
self.weapons.append(pygame.image.load('artwork/marcador_armas_sword.png').convert())
self.weapons.append(pygame.image.load('artwork/marcador_armas_eclipse.png').convert())
self.weapons.append(pygame.image.load('artwork/marcador_armas_axe.png').convert())
self.weapons.append(pygame.image.load('artwork/marcador_armas_blade.png').convert())
self.first_object_in_inventory = 0
# We have 64 chars, in ASCII order starting by BLANK (32)
# There are some special chars, look at the font!
for tile_x in range (0,32):
rect = (tile_x*8, 0, 8, 8)
self.chars.append(self.font.subsurface(rect))
for tile_x in range (0,32):
rect = (tile_x*8, 8, 8, 8)
self.chars.append(self.font.subsurface(rect))
def clean_text_area(self):
for y in range(0,3):
for x in range(0,30):
self.buffer.blit(self.chars[0],(8+x*8,168+y*8))
def print_string(self,string):
fpsClock = pygame.time.Clock()
y=0
x=0
i=0
while i < len(string):
word = ""
# Find the word
while string[i] != ',' and string[i] != '.' and string[i] != ' ':
word = word + string[i]
i = i + 1
# Add the punctuation character
word = word + string[i]
i = i + 1
# Now print it
if x + len(word) > 30:
y = y + 1
x = 0
if y == 3: # We need to wait until the player presses any key
self.buffer.blit(self.chars[32],(240,184))
pygame.transform.scale(self.buffer,(256*3,192*3),self.screen)
pygame.display.flip()
self.wait_for_keypress()
y = 0
self.clean_text_area()
j = 0
while j < len(word):
char = ord(word[j]) - 32
self.buffer.blit(self.chars[char],(8+x*8,168+y*8))
x = x + 1
j = j + 1
pygame.transform.scale(self.buffer,(256*3,192*3),self.screen)
pygame.display.flip()
fpsClock.tick(25) # run at 10 fps
self.buffer.blit(self.chars[32],(240,184))
pygame.transform.scale(self.buffer,(256*3,192*3),self.screen)
pygame.display.flip()
self.wait_for_keypress()
def print_char(self,char,x,y):
char = ord(str(char)) - 32
self.buffer.blit(self.chars[char],(x,y))
def wait_for_keypress(self):
'''
Silly function, just wait for a keypress to happen
In the Spectrum version, it should be way better
'''
keypressed = False
keyreleased = False
key = None
while (not keypressed) and (not keyreleased):
events = pygame.event.get()
for event in events:
if event.type == pygame.KEYDOWN: # keypressed, wait until it is released
key = event.key
keypressed = True
if event.type == pygame.KEYUP: # keypressed, wait until it is released
if key == event.key:
keyreleased = True
def print_meter(self,x,value, color):
'''
Display an entity health, on X
'''
y=191
value = value*23/100
rect = [x+2,y-value,5,value]
pygame.draw.rect(self.buffer,color,rect)
def print_inventory(self,player):
'''
Display the inventory
'''
currentx = 24
x = 0
if player.current_object > self.first_object_in_inventory + 2:
self.first_object_in_inventory = self.first_object_in_inventory + 1
elif player.current_object < self.first_object_in_inventory:
self.first_object_in_inventory = self.first_object_in_inventory - 1
for item in player.inventory[self.first_object_in_inventory:]:
if x == 3:
break
self.buffer.blit(player.map.tile_table[self.tiles_per_pickable_object[item]], (currentx,168))
currentx = currentx + 24
x = x + 1
# Use a marker for the current selected object
self.buffer.blit(self.chars[63],(24+(player.current_object-self.first_object_in_inventory)*24,184))
def draw(self):
self.buffer.set_clip(pygame.Rect(0,160,256,192)) # set clipping area for game, should then set clipping for score area
self.buffer.blit(self.score_image,(0,160))
# Print barbarian energy
self.print_meter(168,(self.game_entities[0].energy*100) / self.game_entities[0].get_entity_max_energy(),(255,0,0))
# Print barbarian level
self.print_meter(176,(self.game_entities[0].experience*100) / self.game_entities[0].get_player_max_exp(),(0,255,255))
# Print current weapon
self.buffer.blit(self.weapons[self.game_entities[0].weapon-1],(112,168))
if self.game_entities[1] and self.game_entities[1].enemy_type != "OBJECT_ENEMY_ROCK":
entity = self.game_entities[1]
energy = (entity.energy*100) / entity.enemy_energy[entity.enemy_type][entity.level]
self.print_meter(192,energy,(0,255,0))
# Print energy in numbers
if entity.energy > 99:
print "WARNING: enemy energy is > 100"
else:
self.print_char(entity.energy/10,200,176)
self.print_char(entity.energy%10,208,176)
self.print_char(entity.level,208,184)
if self.game_entities[2] and self.game_entities[2].enemy_type not in ('OBJECT_ENEMY_ROCK','OBJECT_ENEMY_SECONDARY'):
entity = self.game_entities[2]
energy = (entity.energy*100) / entity.enemy_energy[entity.enemy_type][entity.level]
self.print_meter(216,energy,(0,255,0))
if entity.energy > 99:
print "WARNING: enemy energy is > 100"
else:
self.print_char(entity.energy/10,224,176)
self.print_char(entity.energy%10,232,176)<|fim▁hole|>
# Remember to copy this from scripts.py when new objects are created
tiles_per_pickable_object = { "OBJECT_KEY_GREEN": 217,
"OBJECT_KEY_BLUE": 218,
"OBJECT_KEY_YELLOW": 219,
"OBJECT_BREAD": 220,
"OBJECT_MEAT": 221,
"OBJECT_HEALTH": 222,
"OBJECT_KEY_RED": 223,
"OBJECT_KEY_WHITE": 224,
"OBJECT_KEY_PURPLE": 225,
}<|fim▁end|> | self.print_char(entity.level,232,184)
self.print_inventory(self.game_entities[0])
|
<|file_name|>later-hydrator.tsx<|end_file_name|><|fim▁begin|>import * as React from "react"
export function LaterHydrator({
children,
}: React.PropsWithChildren<Record<string, unknown>>): React.ReactNode {
React.useEffect(() => {<|fim▁hole|> // eslint-disable-next-line no-unused-expressions
import(`./lazy-hydrate`)
}, [])
return children
}<|fim▁end|> | |
<|file_name|>autocomplete_light_registry.py<|end_file_name|><|fim▁begin|>import autocomplete_light<|fim▁hole|>
autocomplete_light.register(City, search_fields=('search_names',),
autocomplete_js_attributes={'placeholder': 'city name ..'})<|fim▁end|> |
from cities_light.models import City |
<|file_name|>markdown_extensions.py<|end_file_name|><|fim▁begin|>import markdown
import re
from django.core.urlresolvers import reverse
from django.template.context import Context
from django.template.loader import render_to_string
from wiki.core.permissions import can_read
ATTACHMENT_RE = re.compile(r'(?P<before>.*)(\[attachment\:(?P<id>\d+)\])(?P<after>.*)', re.IGNORECASE)
from wiki.plugins.attachments import models
class AttachmentExtension(markdown.Extension):
""" Abbreviation Extension for Python-Markdown. """
def extendMarkdown(self, md, md_globals):
""" Insert AbbrPreprocessor before ReferencePreprocessor. """
md.preprocessors.add('dw-attachments', AttachmentPreprocessor(md), '>html_block')
class AttachmentPreprocessor(markdown.preprocessors.Preprocessor):
"""django-wiki attachment preprocessor - parse text for [attachment:id] references. """
def run(self, lines):
new_text = []
for line in lines:
m = ATTACHMENT_RE.match(line)
if m:
attachment_id = m.group('id').strip()
before = m.group('before')
after = m.group('after')
try:
attachment = models.Attachment.objects.get(
articles__current_revision__deleted=False,
id=attachment_id, current_revision__deleted=False
)
url = reverse('wiki:attachments_download', kwargs={'article_id': self.markdown.article.id,
'attachment_id':attachment.id,})
# The readability of the attachment is decided relative
# to the owner of the original article.
# I.e. do not insert attachments in other articles that
# the original uploader cannot read, that would be out
# of scope!
attachment_can_read = can_read( self.markdown.article,
attachment.article.owner)
html = render_to_string(
"wiki/plugins/attachments/render.html",
Context({
'url': url,
'filename': attachment.original_filename,
'attachment_can_read': attachment_can_read,
}))
line = self.markdown.htmlStash.store(html, safe=True)
except models.Attachment.DoesNotExist:
line = line.replace(m.group(1), u"""<span class="attachment attachment-deleted">Attachment with ID #%s is deleted.</span>""" % attachment_id)
line = before + line + after <|fim▁hole|> return new_text<|fim▁end|> | new_text.append(line) |
<|file_name|>io.go<|end_file_name|><|fim▁begin|>package io
import (
"fmt"
"os"
)
func Info(args ...interface{}) {
fmt.Print("\033[1m-----> ")
args = append(args, "\033[0m")
fmt.Println(args...)
}
func Infof(format string, args ...interface{}) {
fmt.Print("\033[1m-----> ")
fmt.Printf(format+"\033[0m", args...)
}
<|fim▁hole|> fmt.Println(args...)
}
func Printf(format string, args ...interface{}) {
fmt.Print(" ")
fmt.Printf(format, args...)
}
func Warnf(format string, args ...interface{}) {
fmt.Print(" ! ")
fmt.Printf(format, args...)
}
func Error(args ...interface{}) {
fmt.Print(" ! ")
fmt.Println(args...)
os.Exit(1)
}<|fim▁end|> | func Print(args ...interface{}) {
fmt.Print(" ") |
<|file_name|>lib.rs<|end_file_name|><|fim▁begin|>// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/*!
Syntax extension to create floating point literals from hexadecimal strings
Once loaded, hexfloat!() is called with a string containing the hexadecimal
floating-point literal, and an optional type (f32 or f64).
If the type is omitted, the literal is treated the same as a normal unsuffixed
literal.
# Examples
To load the extension and use it:
```rust,ignore
#[phase(plugin)]
extern crate hexfloat;
fn main() {
let val = hexfloat!("0x1.ffffb4", f32);
}
```
# References
* [ExploringBinary: hexadecimal floating point constants]
(http://www.exploringbinary.com/hexadecimal-floating-point-constants/)
*/
#![crate_name = "hexfloat"]
#![deprecated = "This is now a cargo package located at: \
https://github.com/rust-lang/hexfloat"]
#![allow(deprecated)]
#![crate_type = "rlib"]
#![crate_type = "dylib"]
#![license = "MIT/ASL2"]
#![doc(html_logo_url = "http://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png",
html_favicon_url = "http://www.rust-lang.org/favicon.ico",
html_root_url = "http://doc.rust-lang.org/nightly/")]
#![feature(plugin_registrar)]
extern crate syntax;
extern crate rustc;
use syntax::ast;
use syntax::codemap::{Span, mk_sp};
use syntax::ext::base;
use syntax::ext::base::{ExtCtxt, MacExpr};
use syntax::ext::build::AstBuilder;
use syntax::parse::token;
use syntax::ptr::P;
use rustc::plugin::Registry;
#[plugin_registrar]
pub fn plugin_registrar(reg: &mut Registry) {
reg.register_macro("hexfloat", expand_syntax_ext);
}
//Check if the literal is valid (as LLVM expects),
//and return a descriptive error if not.
fn hex_float_lit_err(s: &str) -> Option<(uint, String)> {
let mut chars = s.chars().peekable();
let mut i = 0;
if chars.peek() == Some(&'-') { chars.next(); i+= 1 }
if chars.next() != Some('0') {
return Some((i, "Expected '0'".to_string()));
} i+=1;
if chars.next() != Some('x') {
return Some((i, "Expected 'x'".to_string()));
} i+=1;
let mut d_len = 0i;
for _ in chars.take_while(|c| c.is_digit_radix(16)) { chars.next(); i+=1; d_len += 1;}
if chars.next() != Some('.') {
return Some((i, "Expected '.'".to_string()));
} i+=1;
let mut f_len = 0i;
for _ in chars.take_while(|c| c.is_digit_radix(16)) { chars.next(); i+=1; f_len += 1;}
if d_len == 0 && f_len == 0 {
return Some((i, "Expected digits before or after decimal \
point".to_string()));
}
if chars.next() != Some('p') {
return Some((i, "Expected 'p'".to_string()));
} i+=1;
if chars.peek() == Some(&'-') { chars.next(); i+= 1 }
let mut e_len = 0i;
for _ in chars.take_while(|c| c.is_digit()) { chars.next(); i+=1; e_len += 1}
if e_len == 0 {
return Some((i, "Expected exponent digits".to_string()));
}
match chars.next() {
None => None,
Some(_) => Some((i, "Expected end of string".to_string()))
}
}
pub fn expand_syntax_ext(cx: &mut ExtCtxt, sp: Span, tts: &[ast::TokenTree])
-> Box<base::MacResult+'static> {
let (expr, ty_lit) = parse_tts(cx, tts);<|fim▁hole|>
let ty = match ty_lit {
None => None,
Some(Ident{ident, span}) => match token::get_ident(ident).get() {
"f32" => Some(ast::TyF32),
"f64" => Some(ast::TyF64),
_ => {
cx.span_err(span, "invalid floating point type in hexfloat!");
None
}
}
};
let s = match expr.node {
// expression is a literal
ast::ExprLit(ref lit) => match lit.node {
// string literal
ast::LitStr(ref s, _) => {
s.clone()
}
_ => {
cx.span_err(expr.span, "unsupported literal in hexfloat!");
return base::DummyResult::expr(sp);
}
},
_ => {
cx.span_err(expr.span, "non-literal in hexfloat!");
return base::DummyResult::expr(sp);
}
};
{
let err = hex_float_lit_err(s.get());
match err {
Some((err_pos, err_str)) => {
let pos = expr.span.lo + syntax::codemap::Pos::from_uint(err_pos + 1);
let span = syntax::codemap::mk_sp(pos,pos);
cx.span_err(span,
format!("invalid hex float literal in hexfloat!: \
{}",
err_str).as_slice());
return base::DummyResult::expr(sp);
}
_ => ()
}
}
let lit = match ty {
None => ast::LitFloatUnsuffixed(s),
Some (ty) => ast::LitFloat(s, ty)
};
MacExpr::new(cx.expr_lit(sp, lit))
}
struct Ident {
ident: ast::Ident,
span: Span
}
fn parse_tts(cx: &ExtCtxt,
tts: &[ast::TokenTree]) -> (P<ast::Expr>, Option<Ident>) {
let p = &mut cx.new_parser_from_tts(tts);
let ex = p.parse_expr();
let id = if p.token == token::EOF {
None
} else {
p.expect(&token::COMMA);
let lo = p.span.lo;
let ident = p.parse_ident();
let hi = p.last_span.hi;
Some(Ident{ident: ident, span: mk_sp(lo, hi)})
};
if p.token != token::EOF {
p.unexpected();
}
(ex, id)
}
// FIXME (10872): This is required to prevent an LLVM assert on Windows
#[test]
fn dummy_test() { }<|fim▁end|> | |
<|file_name|>update-google-chart.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
# TODO: issues with new oauth2 stuff. Keep using older version of Python for now.
# #!/usr/bin/env python
import json
import gspread
from oauth2client.client import SignedJwtAssertionCredentials
import datetime
from participantCollection import ParticipantCollection
# Edit Me!
participantFileNames = ['../stayclean-2014-november/participants.txt',
'../stayclean-2014-december/participants.txt',
'../stayclean-2015-january/participants.txt',
'../stayclean-2015-february/participants.txt',
'../stayclean-2015-march/participants.txt',
'../stayclean-2015-april/participants.txt',
'../stayclean-2015-may/participants.txt',
'../stayclean-2015-june/participants.txt',
'../stayclean-2015-july/participants.txt',
'../stayclean-2015-august/participants.txt',
'../stayclean-2015-september/participants.txt',
'../stayclean-2015-october/participants.txt',
'../stayclean-2015-november/participants.txt',
'../stayclean-2015-december/participants.txt',
'../stayclean-2016-january/participants.txt',
'../stayclean-2016-february/participants.txt',
'../stayclean-2016-march/participants.txt',
'../stayclean-2016-april/participants.txt',
'../stayclean-2016-may/participants.txt',
'../stayclean-2016-june/participants.txt',
'../stayclean-2016-july/participants.txt',
'../stayclean-2016-august/participants.txt',
'../stayclean-2016-september/participants.txt',
'../stayclean-2016-october/participants.txt',
'../stayclean-2016-november/participants.txt',
'../stayclean-2016-december/participants.txt',
'../stayclean-2017-january/participants.txt',
'../stayclean-2017-february/participants.txt',
'../stayclean-2017-march/participants.txt',
'../stayclean-2017-april/participants.txt',
'../stayclean-2017-may/participants.txt',
'../stayclean-2017-june/participants.txt',
'../stayclean-2017-july/participants.txt',
'../stayclean-2017-august/participants.txt',
'../stayclean-2017-september/participants.txt',
'../stayclean-2017-october/participants.txt',
'../stayclean-2017-november/participants.txt',
'../stayclean-2017-december/participants.txt',
'../stayclean-2018-january/participants.txt',
'../stayclean-2018-february/participants.txt',
'../stayclean-2018-march/participants.txt',
'../stayclean-2018-april/participants.txt',
'../stayclean-2018-may/participants.txt',
'../stayclean-2018-june/participants.txt',
'../stayclean-2018-july/participants.txt',
'../stayclean-2018-august/participants.txt',
'../stayclean-2018-september/participants.txt',
'../stayclean-2018-october/participants.txt',
'../stayclean-2018-november/participants.txt',
'../stayclean-2018-december/participants.txt',
'../stayclean-2019-january/participants.txt',
'../stayclean-2019-february/participants.txt',
'../stayclean-2019-march/participants.txt',
'../stayclean-2019-april/participants.txt',
'../stayclean-2019-may/participants.txt',
'../stayclean-2019-june/participants.txt',
'../stayclean-2019-july/participants.txt',
'../stayclean-2019-august/participants.txt',
'../stayclean-2019-september/participants.txt',
'./participants.txt']
sortedRelapseDates = []
for participantFileName in participantFileNames:
participants = ParticipantCollection(fileNameString=participantFileName)
sortedRelapseDates = sortedRelapseDates + participants.allRelapseDates()
sortedRelapseDates.sort()
earliestReportDate = sortedRelapseDates[0]
latestReportDate = sortedRelapseDates[-1]
reportDates = []
numberOfRelapsesPerDate = []
reportDatesAndNumberOfRelapses = {}
dayOfWeekIndexesAndNumberOfInstances = {0: 0, 1: 0, 2: 0, 3: 0, 4: 0, 5: 0, 6: 0}
reportDate = earliestReportDate
while reportDate <= latestReportDate:
reportDatesAndNumberOfRelapses[reportDate] = 0
# dayOfWeekIndexesAndNumberOfInstances[reportDate.weekday()] = dayOfWeekIndexesAndNumberOfInstances[reportDate.weekday()] + 1
dayOfWeekIndexesAndNumberOfInstances[reportDate.weekday()] += 1
reportDate += datetime.timedelta(days=1)
for relapseDate in sortedRelapseDates:
# reportDatesAndNumberOfRelapses[relapseDate] = reportDatesAndNumberOfRelapses[relapseDate] + 1
reportDatesAndNumberOfRelapses[relapseDate] += 1
dayOfWeekIndexesAndTotalNumberOfRelapses = {0: 0, 1: 0, 2: 0, 3: 0, 4: 0, 5: 0, 6: 0}
for participantFileName in participantFileNames:
participants = ParticipantCollection(fileNameString=participantFileName)
# print participants.relapseDayOfWeekIndexesAndParticipants()<|fim▁hole|> for index, parts in participants.relapseDayOfWeekIndexesAndParticipants().iteritems():
# dayOfWeekIndexesAndTotalNumberOfRelapses[index] = dayOfWeekIndexesAndTotalNumberOfRelapses[index] + len(parts)
dayOfWeekIndexesAndTotalNumberOfRelapses[index] += len(parts)
dayOfWeekIndexesAndAverageNumberOfRelapses = {0: 0, 1: 0, 2: 0, 3: 0, 4: 0, 5: 0, 6: 0}
for index, instances in dayOfWeekIndexesAndNumberOfInstances.iteritems():
# dayOfWeekIndexesAndAverageNumberOfRelapses[index] = int(round(float(dayOfWeekIndexesAndTotalNumberOfRelapses[index]) / float(instances)))
dayOfWeekIndexesAndAverageNumberOfRelapses[index] = float(dayOfWeekIndexesAndTotalNumberOfRelapses[index]) / float(instances)
spreadsheetTitle = "StayClean monthly challenge relapse data"
# spreadsheetTitle = "Test spreadsheet"
json_key = json.load(open('../google-oauth-credentials.json'))
scope = ['https://spreadsheets.google.com/feeds']
credentials = SignedJwtAssertionCredentials(json_key['client_email'], json_key['private_key'].encode(), scope)
gc = gspread.authorize(credentials)
spreadSheet = None
try:
spreadSheet = gc.open(spreadsheetTitle)
except gspread.exceptions.SpreadsheetNotFound:
print "No spreadsheet with title " + spreadsheetTitle
exit(1)
workSheet = spreadSheet.get_worksheet(0)
columnACells = workSheet.range("A2:A" + str(len(reportDatesAndNumberOfRelapses) + 1))
columnBCells = workSheet.range("B2:B" + str(len(reportDatesAndNumberOfRelapses) + 1))
columnCCells = workSheet.range("C2:C8")
columnDCells = workSheet.range("D2:D8")
reportDate = earliestReportDate
rowIndex = 0
while reportDate <= latestReportDate:
columnACells[rowIndex].value = str(reportDate)
columnBCells[rowIndex].value = str(reportDatesAndNumberOfRelapses[reportDate])
rowIndex += 1
reportDate += datetime.timedelta(days=1)
for weekdayIndex in range(0, 7):
weekdayName = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday'][weekdayIndex]
# spreadsheetClient.UpdateCell(weekdayIndex + 2,3,weekdayName,spreadsheetId)
# spreadsheetClient.UpdateCell(weekdayIndex + 2,4,str(dayOfWeekIndexesAndAverageNumberOfRelapses[weekdayIndex]),spreadsheetId)
columnCCells[weekdayIndex].value = weekdayName
columnDCells[weekdayIndex].value = str(dayOfWeekIndexesAndAverageNumberOfRelapses[weekdayIndex])
allCells = columnACells + columnBCells + columnCCells + columnDCells
workSheet.update_cells(allCells)
exit(0)<|fim▁end|> | |
<|file_name|>haudiobroadcast.cpp<|end_file_name|><|fim▁begin|>/*
* Copyright (C) 2011 Tuomo Penttinen, all rights reserved.
*
* Author: Tuomo Penttinen <[email protected]>
*
* This file is part of Herqq UPnP Av (HUPnPAv) library.
*
* Herqq UPnP Av is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Herqq UPnP Av is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Herqq UPnP Av. If not, see <http://www.gnu.org/licenses/>.
*/
#include "haudiobroadcast.h"
#include "haudiobroadcast_p.h"
#include "../model_mgmt/hcdsproperties.h"
#include "../../common/hradioband.h"
namespace Herqq<|fim▁hole|>namespace Upnp
{
namespace Av
{
/*******************************************************************************
* HAudioBroadcastPrivate
******************************************************************************/
HAudioBroadcastPrivate::HAudioBroadcastPrivate(
const QString& clazz, HObject::CdsType cdsType) :
HAudioItemPrivate(clazz, cdsType)
{
const HCdsProperties& inst = HCdsProperties::instance();
insert(inst.get(HCdsProperties::upnp_region));
insert(inst.get(HCdsProperties::upnp_radioCallSign));
insert(inst.get(HCdsProperties::upnp_radioStationID));
insert(inst.get(HCdsProperties::upnp_radioBand));
insert(inst.get(HCdsProperties::upnp_channelNr));
insert(inst.get(HCdsProperties::upnp_signalStrength));
insert(inst.get(HCdsProperties::upnp_signalLocked).name(), false);
insert(inst.get(HCdsProperties::upnp_tuned).name(), false);
insert(inst.get(HCdsProperties::upnp_recordable).name(), false);
}
/*******************************************************************************
* HAudioBroadcast
******************************************************************************/
HAudioBroadcast::HAudioBroadcast(const QString& clazz, CdsType cdsType) :
HAudioItem(*new HAudioBroadcastPrivate(clazz, cdsType))
{
}
HAudioBroadcast::HAudioBroadcast(HAudioBroadcastPrivate& dd) :
HAudioItem(dd)
{
}
HAudioBroadcast::HAudioBroadcast(
const QString& title, const QString& parentId, const QString& id) :
HAudioItem(*new HAudioBroadcastPrivate(sClass(), sType()))
{
init(title, parentId, id);
}
HAudioBroadcast::~HAudioBroadcast()
{
}
HAudioBroadcast* HAudioBroadcast::newInstance() const
{
return new HAudioBroadcast();
}
void HAudioBroadcast::setRegion(const QString& arg)
{
setCdsProperty(HCdsProperties::upnp_region, arg);
}
void HAudioBroadcast::setRadioCallSign(const QString& arg)
{
setCdsProperty(HCdsProperties::upnp_radioCallSign, arg);
}
void HAudioBroadcast::setRadioStationId(const QString& arg)
{
setCdsProperty(HCdsProperties::upnp_radioStationID, arg);
}
void HAudioBroadcast::setRadioBand(const HRadioBand& arg)
{
setCdsProperty(HCdsProperties::upnp_radioBand, QVariant::fromValue(arg));
}
void HAudioBroadcast::setChannelNr(qint32 arg)
{
setCdsProperty(HCdsProperties::upnp_channelNr, arg);
}
void HAudioBroadcast::setSignalStrength(qint32 arg)
{
setCdsProperty(HCdsProperties::upnp_signalStrength, arg);
}
void HAudioBroadcast::setSignalLocked(bool arg)
{
setCdsProperty(HCdsProperties::upnp_signalLocked, arg);
}
void HAudioBroadcast::setTuned(bool arg)
{
setCdsProperty(HCdsProperties::upnp_tuned, arg);
}
void HAudioBroadcast::setRecordable(bool arg)
{
setCdsProperty(HCdsProperties::upnp_recordable, arg);
}
QString HAudioBroadcast::region() const
{
QVariant value;
getCdsProperty(HCdsProperties::upnp_region, &value);
return value.toString();
}
QString HAudioBroadcast::radioCallSign() const
{
QVariant value;
getCdsProperty(HCdsProperties::upnp_radioCallSign, &value);
return value.toString();
}
QString HAudioBroadcast::radioStationId() const
{
QVariant value;
getCdsProperty(HCdsProperties::upnp_radioStationID, &value);
return value.toString();
}
HRadioBand HAudioBroadcast::radioBand() const
{
QVariant value;
getCdsProperty(HCdsProperties::upnp_radioBand, &value);
return value.value<HRadioBand>();
}
qint32 HAudioBroadcast::channelNr() const
{
QVariant value;
getCdsProperty(HCdsProperties::upnp_channelNr, &value);
return value.toInt();
}
qint32 HAudioBroadcast::signalStrength() const
{
QVariant value;
getCdsProperty(HCdsProperties::upnp_signalStrength, &value);
return value.toInt();
}
bool HAudioBroadcast::signalLocked() const
{
QVariant value;
getCdsProperty(HCdsProperties::upnp_signalLocked, &value);
return value.toBool();
}
bool HAudioBroadcast::tuned() const
{
QVariant value;
getCdsProperty(HCdsProperties::upnp_tuned, &value);
return value.toBool();
}
bool HAudioBroadcast::recordable() const
{
QVariant value;
getCdsProperty(HCdsProperties::upnp_recordable, &value);
return value.toBool();
}
}
}
}<|fim▁end|> | {
|
<|file_name|>store_mocks.go<|end_file_name|><|fim▁begin|>// Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You may
// not use this file except in compliance with the License. A copy of the
// License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is distributed
// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
// express or implied. See the License for the specific language governing
// permissions and limitations under the License.
// Automatically generated by MockGen. DO NOT EDIT!
// Source: pkg/store/store.go
package mocks
import (
context "context"
gomock "github.com/golang/mock/gomock"
)
// Mock of DataStore interface
type MockDataStore struct {
ctrl *gomock.Controller
recorder *_MockDataStoreRecorder
}
// Recorder for MockDataStore (not exported)
type _MockDataStoreRecorder struct {
mock *MockDataStore
}
func NewMockDataStore(ctrl *gomock.Controller) *MockDataStore {
mock := &MockDataStore{ctrl: ctrl}
mock.recorder = &_MockDataStoreRecorder{mock}
return mock
}
func (_m *MockDataStore) EXPECT() *_MockDataStoreRecorder {
return _m.recorder
}
func (_m *MockDataStore) Put(ctx context.Context, key string, value string) error {
ret := _m.ctrl.Call(_m, "Put", ctx, key, value)
ret0, _ := ret[0].(error)
return ret0
}
func (_mr *_MockDataStoreRecorder) Put(arg0, arg1, arg2 interface{}) *gomock.Call {
return _mr.mock.ctrl.RecordCall(_mr.mock, "Put", arg0, arg1, arg2)
}
func (_m *MockDataStore) Get(ctx context.Context, key string) (map[string]string, error) {
ret := _m.ctrl.Call(_m, "Get", ctx, key)
ret0, _ := ret[0].(map[string]string)
ret1, _ := ret[1].(error)
return ret0, ret1
}
func (_mr *_MockDataStoreRecorder) Get(arg0, arg1 interface{}) *gomock.Call {
return _mr.mock.ctrl.RecordCall(_mr.mock, "Get", arg0, arg1)
}
func (_m *MockDataStore) Delete(ctx context.Context, key string) error {
ret := _m.ctrl.Call(_m, "Delete", ctx, key)
ret0, _ := ret[0].(error)
return ret0
}
func (_mr *_MockDataStoreRecorder) Delete(arg0, arg1 interface{}) *gomock.Call {
return _mr.mock.ctrl.RecordCall(_mr.mock, "Delete", arg0, arg1)<|fim▁hole|>
func (_m *MockDataStore) GetWithPrefix(ctx context.Context, keyPrefix string) (map[string]string, error) {
ret := _m.ctrl.Call(_m, "GetWithPrefix", ctx, keyPrefix)
ret0, _ := ret[0].(map[string]string)
ret1, _ := ret[1].(error)
return ret0, ret1
}
func (_mr *_MockDataStoreRecorder) GetWithPrefix(arg0, arg1 interface{}) *gomock.Call {
return _mr.mock.ctrl.RecordCall(_mr.mock, "GetWithPrefix", arg0, arg1)
}<|fim▁end|> | } |
<|file_name|>text.mako.rs<|end_file_name|><|fim▁begin|>/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
<%namespace name="helpers" file="/helpers.mako.rs" />
<% from data import Method %>
<% data.new_style_struct("Text",
inherited=False,
gecko_name="TextReset",
additional_methods=[Method("has_underline", "bool"),
Method("has_overline", "bool"),
Method("has_line_through", "bool")]) %>
${helpers.predefined_type("text-overflow",
"TextOverflow",
"computed::TextOverflow::get_initial_value()",
animation_value_type="discrete",
boxed=True,
flags="APPLIES_TO_PLACEHOLDER",
spec="https://drafts.csswg.org/css-ui/#propdef-text-overflow")}
${helpers.single_keyword("unicode-bidi",
"normal embed isolate bidi-override isolate-override plaintext",
animation_value_type="discrete",
spec="https://drafts.csswg.org/css-writing-modes/#propdef-unicode-bidi")}
<|fim▁hole|> "specified::TextDecorationLine::none()",
initial_specified_value="specified::TextDecorationLine::none()",
custom_cascade= product == 'servo',
custom_cascade_function="specified::TextDecorationLine::cascade_property_custom",
animation_value_type="discrete",
flags="APPLIES_TO_FIRST_LETTER APPLIES_TO_FIRST_LINE APPLIES_TO_PLACEHOLDER",
spec="https://drafts.csswg.org/css-text-decor/#propdef-text-decoration-line")}
${helpers.single_keyword("text-decoration-style",
"solid double dotted dashed wavy -moz-none",
products="gecko",
animation_value_type="discrete",
flags="APPLIES_TO_FIRST_LETTER APPLIES_TO_FIRST_LINE APPLIES_TO_PLACEHOLDER",
spec="https://drafts.csswg.org/css-text-decor/#propdef-text-decoration-style")}
${helpers.predefined_type(
"text-decoration-color",
"Color",
"computed_value::T::currentcolor()",
initial_specified_value="specified::Color::currentcolor()",
products="gecko",
animation_value_type="AnimatedColor",
ignored_when_colors_disabled=True,
flags="APPLIES_TO_FIRST_LETTER APPLIES_TO_FIRST_LINE APPLIES_TO_PLACEHOLDER",
spec="https://drafts.csswg.org/css-text-decor/#propdef-text-decoration-color",
)}
${helpers.predefined_type(
"initial-letter",
"InitialLetter",
"computed::InitialLetter::normal()",
initial_specified_value="specified::InitialLetter::normal()",
animation_value_type="discrete",
products="gecko",
flags="APPLIES_TO_FIRST_LETTER",
spec="https://drafts.csswg.org/css-inline/#sizing-drop-initials")}<|fim▁end|> | ${helpers.predefined_type("text-decoration-line",
"TextDecorationLine", |
<|file_name|>LaunchScriptAction.java<|end_file_name|><|fim▁begin|>/**
* Copyright (C) 2013, Moss Computing Inc.
*
* This file is part of simpledeb.
*
* simpledeb is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* simpledeb is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with simpledeb; see the file COPYING. If not, write to the
* Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301 USA.<|fim▁hole|> * conditions of the GNU General Public License cover the whole
* combination.
*
* As a special exception, the copyright holders of this library give you
* permission to link this library with independent modules to produce an
* executable, regardless of the license terms of these independent
* modules, and to copy and distribute the resulting executable under
* terms of your choice, provided that you also meet, for each linked
* independent module, the terms and conditions of the license of that
* module. An independent module is a module which is not derived from
* or based on this library. If you modify this library, you may extend
* this exception to your version of the library, but you are not
* obligated to do so. If you do not wish to do so, delete this
* exception statement from your version.
*/
package com.moss.simpledeb.core.action;
import java.io.File;
import java.util.LinkedList;
import javax.xml.bind.annotation.XmlAccessType;
import javax.xml.bind.annotation.XmlAccessorType;
import javax.xml.bind.annotation.XmlAttribute;
import org.apache.commons.compress.archivers.tar.TarArchiveEntry;
import com.moss.simpledeb.core.DebComponent;
import com.moss.simpledeb.core.DebState;
import com.moss.simpledeb.core.path.ArchivePath;
import com.moss.simpledeb.core.path.BytesArchivePath;
import com.moss.simpledeb.core.path.DirArchivePath;
@XmlAccessorType(XmlAccessType.FIELD)
public final class LaunchScriptAction extends DebAction {
@XmlAttribute(name="class-name")
private String className;
@XmlAttribute(name="target-file")
private String targetFile;
@XmlAttribute(name="path-level")
private int pathLevel;
@Override
public void run(DebState state) throws Exception {
{
File target = new File(targetFile).getParentFile();
LinkedList<File> pathsNeeded = new LinkedList<File>();
File f = target;
while (f != null) {
pathsNeeded.addFirst(f);
f = f.getParentFile();
}
for (int i=0; i<pathLevel; i++) {
pathsNeeded.removeFirst();
}
for (File e : pathsNeeded) {
String p = "./" + e.getPath();
if (!p.endsWith("/")) {
p = p + "/";
}
TarArchiveEntry tarEntry = new TarArchiveEntry(p);
tarEntry.setGroupId(0);
tarEntry.setGroupName("root");
tarEntry.setIds(0, 0);
tarEntry.setModTime(System.currentTimeMillis());
tarEntry.setSize(0);
tarEntry.setUserId(0);
tarEntry.setUserName("root");
tarEntry.setMode(Integer.parseInt("755", 8));
ArchivePath path = new DirArchivePath(tarEntry);
state.addPath(DebComponent.CONTENT, path);
}
}
String cp;
{
StringBuffer sb = new StringBuffer();
for (String path : state.classpath) {
if (sb.length() == 0) {
sb.append(path);
}
else {
sb.append(":");
sb.append(path);
}
}
cp = sb.toString();
}
StringBuilder sb = new StringBuilder();
sb.append("#!/bin/bash\n");
sb.append("CP=\"");
sb.append(cp);
sb.append("\"\n");
sb.append("/usr/bin/java -cp $CP ");
sb.append(className);
sb.append(" $@\n");
byte[] data = sb.toString().getBytes();
String entryName = "./" + targetFile;
TarArchiveEntry tarEntry = new TarArchiveEntry(entryName);
tarEntry.setGroupId(0);
tarEntry.setGroupName("root");
tarEntry.setIds(0, 0);
tarEntry.setModTime(System.currentTimeMillis());
tarEntry.setSize(data.length);
tarEntry.setUserId(0);
tarEntry.setUserName("root");
tarEntry.setMode(Integer.parseInt("755", 8));
ArchivePath path = new BytesArchivePath(tarEntry, data);
state.addPath(DebComponent.CONTENT, path);
}
public String getClassName() {
return className;
}
public void setClassName(String className) {
this.className = className;
}
public String getTargetFile() {
return targetFile;
}
public void setTargetFile(String targetFile) {
this.targetFile = targetFile;
}
public int getPathLevel() {
return pathLevel;
}
public void setPathLevel(int assumedTargetPathLevel) {
this.pathLevel = assumedTargetPathLevel;
}
}<|fim▁end|> | *
* Linking this library statically or dynamically with other modules is
* making a combined work based on this library. Thus, the terms and |
<|file_name|>generated_secondary_launch_config.go<|end_file_name|><|fim▁begin|>package client
const (
SECONDARY_LAUNCH_CONFIG_TYPE = "secondaryLaunchConfig"
)
type SecondaryLaunchConfig struct {
Resource
AccountId string `json:"accountId,omitempty" yaml:"account_id,omitempty"`
AgentId string `json:"agentId,omitempty" yaml:"agent_id,omitempty"`
AllocationState string `json:"allocationState,omitempty" yaml:"allocation_state,omitempty"`
BlkioDeviceOptions map[string]interface{} `json:"blkioDeviceOptions,omitempty" yaml:"blkio_device_options,omitempty"`
BlkioWeight int64 `json:"blkioWeight,omitempty" yaml:"blkio_weight,omitempty"`
Build *DockerBuild `json:"build,omitempty" yaml:"build,omitempty"`
CapAdd []string `json:"capAdd,omitempty" yaml:"cap_add,omitempty"`
CapDrop []string `json:"capDrop,omitempty" yaml:"cap_drop,omitempty"`
CgroupParent string `json:"cgroupParent,omitempty" yaml:"cgroup_parent,omitempty"`
Command []string `json:"command,omitempty" yaml:"command,omitempty"`
Count int64 `json:"count,omitempty" yaml:"count,omitempty"`
CpuCount int64 `json:"cpuCount,omitempty" yaml:"cpu_count,omitempty"`
CpuPercent int64 `json:"cpuPercent,omitempty" yaml:"cpu_percent,omitempty"`
CpuPeriod int64 `json:"cpuPeriod,omitempty" yaml:"cpu_period,omitempty"`
CpuQuota int64 `json:"cpuQuota,omitempty" yaml:"cpu_quota,omitempty"`
CpuSet string `json:"cpuSet,omitempty" yaml:"cpu_set,omitempty"`
CpuSetMems string `json:"cpuSetMems,omitempty" yaml:"cpu_set_mems,omitempty"`
CpuShares int64 `json:"cpuShares,omitempty" yaml:"cpu_shares,omitempty"`
CreateIndex int64 `json:"createIndex,omitempty" yaml:"create_index,omitempty"`
Created string `json:"created,omitempty" yaml:"created,omitempty"`
Data map[string]interface{} `json:"data,omitempty" yaml:"data,omitempty"`
DataVolumeMounts map[string]interface{} `json:"dataVolumeMounts,omitempty" yaml:"data_volume_mounts,omitempty"`
DataVolumes []string `json:"dataVolumes,omitempty" yaml:"data_volumes,omitempty"`
DataVolumesFrom []string `json:"dataVolumesFrom,omitempty" yaml:"data_volumes_from,omitempty"`
DataVolumesFromLaunchConfigs []string `json:"dataVolumesFromLaunchConfigs,omitempty" yaml:"data_volumes_from_launch_configs,omitempty"`
DeploymentUnitUuid string `json:"deploymentUnitUuid,omitempty" yaml:"deployment_unit_uuid,omitempty"`
Description string `json:"description,omitempty" yaml:"description,omitempty"`
Devices []string `json:"devices,omitempty" yaml:"devices,omitempty"`
DiskQuota int64 `json:"diskQuota,omitempty" yaml:"disk_quota,omitempty"`
Disks []VirtualMachineDisk `json:"disks,omitempty" yaml:"disks,omitempty"`
Dns []string `json:"dns,omitempty" yaml:"dns,omitempty"`
DnsOpt []string `json:"dnsOpt,omitempty" yaml:"dns_opt,omitempty"`
DnsSearch []string `json:"dnsSearch,omitempty" yaml:"dns_search,omitempty"`
DomainName string `json:"domainName,omitempty" yaml:"domain_name,omitempty"`
EntryPoint []string `json:"entryPoint,omitempty" yaml:"entry_point,omitempty"`
Environment map[string]interface{} `json:"environment,omitempty" yaml:"environment,omitempty"`
Expose []string `json:"expose,omitempty" yaml:"expose,omitempty"`
ExternalId string `json:"externalId,omitempty" yaml:"external_id,omitempty"`
ExtraHosts []string `json:"extraHosts,omitempty" yaml:"extra_hosts,omitempty"`
FirstRunning string `json:"firstRunning,omitempty" yaml:"first_running,omitempty"`
GroupAdd []string `json:"groupAdd,omitempty" yaml:"group_add,omitempty"`
HealthCheck *InstanceHealthCheck `json:"healthCheck,omitempty" yaml:"health_check,omitempty"`
HealthCmd []string `json:"healthCmd,omitempty" yaml:"health_cmd,omitempty"`
HealthInterval int64 `json:"healthInterval,omitempty" yaml:"health_interval,omitempty"`
HealthRetries int64 `json:"healthRetries,omitempty" yaml:"health_retries,omitempty"`
HealthState string `json:"healthState,omitempty" yaml:"health_state,omitempty"`
HealthTimeout int64 `json:"healthTimeout,omitempty" yaml:"health_timeout,omitempty"`
HostId string `json:"hostId,omitempty" yaml:"host_id,omitempty"`
Hostname string `json:"hostname,omitempty" yaml:"hostname,omitempty"`
ImageUuid string `json:"imageUuid,omitempty" yaml:"image_uuid,omitempty"`
InstanceLinks map[string]interface{} `json:"instanceLinks,omitempty" yaml:"instance_links,omitempty"`
InstanceTriggeredStop string `json:"instanceTriggeredStop,omitempty" yaml:"instance_triggered_stop,omitempty"`
IoMaximumBandwidth int64 `json:"ioMaximumBandwidth,omitempty" yaml:"io_maximum_bandwidth,omitempty"`
IoMaximumIOps int64 `json:"ioMaximumIOps,omitempty" yaml:"io_maximum_iops,omitempty"`
Ip string `json:"ip,omitempty" yaml:"ip,omitempty"`
Ip6 string `json:"ip6,omitempty" yaml:"ip6,omitempty"`
IpcMode string `json:"ipcMode,omitempty" yaml:"ipc_mode,omitempty"`
Isolation string `json:"isolation,omitempty" yaml:"isolation,omitempty"`
KernelMemory int64 `json:"kernelMemory,omitempty" yaml:"kernel_memory,omitempty"`
Kind string `json:"kind,omitempty" yaml:"kind,omitempty"`
Labels map[string]interface{} `json:"labels,omitempty" yaml:"labels,omitempty"`
LogConfig *LogConfig `json:"logConfig,omitempty" yaml:"log_config,omitempty"`
LxcConf map[string]interface{} `json:"lxcConf,omitempty" yaml:"lxc_conf,omitempty"`
Memory int64 `json:"memory,omitempty" yaml:"memory,omitempty"`
MemoryMb int64 `json:"memoryMb,omitempty" yaml:"memory_mb,omitempty"`
MemoryReservation int64 `json:"memoryReservation,omitempty" yaml:"memory_reservation,omitempty"`
MemorySwap int64 `json:"memorySwap,omitempty" yaml:"memory_swap,omitempty"`
MemorySwappiness int64 `json:"memorySwappiness,omitempty" yaml:"memory_swappiness,omitempty"`
MilliCpuReservation int64 `json:"milliCpuReservation,omitempty" yaml:"milli_cpu_reservation,omitempty"`
Mounts []MountEntry `json:"mounts,omitempty" yaml:"mounts,omitempty"`
Name string `json:"name,omitempty" yaml:"name,omitempty"`
NativeContainer bool `json:"nativeContainer,omitempty" yaml:"native_container,omitempty"`
NetAlias []string `json:"netAlias,omitempty" yaml:"net_alias,omitempty"`
NetworkContainerId string `json:"networkContainerId,omitempty" yaml:"network_container_id,omitempty"`
NetworkIds []string `json:"networkIds,omitempty" yaml:"network_ids,omitempty"`
NetworkLaunchConfig string `json:"networkLaunchConfig,omitempty" yaml:"network_launch_config,omitempty"`
NetworkMode string `json:"networkMode,omitempty" yaml:"network_mode,omitempty"`
OomKillDisable bool `json:"oomKillDisable,omitempty" yaml:"oom_kill_disable,omitempty"`
OomScoreAdj int64 `json:"oomScoreAdj,omitempty" yaml:"oom_score_adj,omitempty"`
PidMode string `json:"pidMode,omitempty" yaml:"pid_mode,omitempty"`
PidsLimit int64 `json:"pidsLimit,omitempty" yaml:"pids_limit,omitempty"`
Ports []string `json:"ports,omitempty" yaml:"ports,omitempty"`
PrimaryIpAddress string `json:"primaryIpAddress,omitempty" yaml:"primary_ip_address,omitempty"`
PrimaryNetworkId string `json:"primaryNetworkId,omitempty" yaml:"primary_network_id,omitempty"`
Privileged bool `json:"privileged,omitempty" yaml:"privileged,omitempty"`
PublishAllPorts bool `json:"publishAllPorts,omitempty" yaml:"publish_all_ports,omitempty"`
ReadOnly bool `json:"readOnly,omitempty" yaml:"read_only,omitempty"`
RegistryCredentialId string `json:"registryCredentialId,omitempty" yaml:"registry_credential_id,omitempty"`
RemoveTime string `json:"removeTime,omitempty" yaml:"remove_time,omitempty"`
Removed string `json:"removed,omitempty" yaml:"removed,omitempty"`
RequestedHostId string `json:"requestedHostId,omitempty" yaml:"requested_host_id,omitempty"`
RequestedIpAddress string `json:"requestedIpAddress,omitempty" yaml:"requested_ip_address,omitempty"`
Secrets []SecretReference `json:"secrets,omitempty" yaml:"secrets,omitempty"`
SecurityOpt []string `json:"securityOpt,omitempty" yaml:"security_opt,omitempty"`
ServiceId string `json:"serviceId,omitempty" yaml:"service_id,omitempty"`
ServiceIds []string `json:"serviceIds,omitempty" yaml:"service_ids,omitempty"`
ShmSize int64 `json:"shmSize,omitempty" yaml:"shm_size,omitempty"`
StackId string `json:"stackId,omitempty" yaml:"stack_id,omitempty"`
StartCount int64 `json:"startCount,omitempty" yaml:"start_count,omitempty"`
StartOnCreate bool `json:"startOnCreate,omitempty" yaml:"start_on_create,omitempty"`
State string `json:"state,omitempty" yaml:"state,omitempty"`
StdinOpen bool `json:"stdinOpen,omitempty" yaml:"stdin_open,omitempty"`
StopSignal string `json:"stopSignal,omitempty" yaml:"stop_signal,omitempty"`
StorageOpt map[string]interface{} `json:"storageOpt,omitempty" yaml:"storage_opt,omitempty"`
Sysctls map[string]interface{} `json:"sysctls,omitempty" yaml:"sysctls,omitempty"`
System bool `json:"system,omitempty" yaml:"system,omitempty"`
Tmpfs map[string]interface{} `json:"tmpfs,omitempty" yaml:"tmpfs,omitempty"`
Token string `json:"token,omitempty" yaml:"token,omitempty"`
Transitioning string `json:"transitioning,omitempty" yaml:"transitioning,omitempty"`
TransitioningMessage string `json:"transitioningMessage,omitempty" yaml:"transitioning_message,omitempty"`
TransitioningProgress int64 `json:"transitioningProgress,omitempty" yaml:"transitioning_progress,omitempty"`
Tty bool `json:"tty,omitempty" yaml:"tty,omitempty"`
Ulimits []Ulimit `json:"ulimits,omitempty" yaml:"ulimits,omitempty"`
User string `json:"user,omitempty" yaml:"user,omitempty"`
UserPorts []string `json:"userPorts,omitempty" yaml:"user_ports,omitempty"`
Userdata string `json:"userdata,omitempty" yaml:"userdata,omitempty"`
UsernsMode string `json:"usernsMode,omitempty" yaml:"userns_mode,omitempty"`
Uts string `json:"uts,omitempty" yaml:"uts,omitempty"`
Uuid string `json:"uuid,omitempty" yaml:"uuid,omitempty"`
Vcpu int64 `json:"vcpu,omitempty" yaml:"vcpu,omitempty"`
Version string `json:"version,omitempty" yaml:"version,omitempty"`
VolumeDriver string `json:"volumeDriver,omitempty" yaml:"volume_driver,omitempty"`
WorkingDir string `json:"workingDir,omitempty" yaml:"working_dir,omitempty"`
}
type SecondaryLaunchConfigCollection struct {
Collection
Data []SecondaryLaunchConfig `json:"data,omitempty"`
client *SecondaryLaunchConfigClient
}
type SecondaryLaunchConfigClient struct {
rancherClient *RancherClient
}
type SecondaryLaunchConfigOperations interface {
List(opts *ListOpts) (*SecondaryLaunchConfigCollection, error)
Create(opts *SecondaryLaunchConfig) (*SecondaryLaunchConfig, error)
Update(existing *SecondaryLaunchConfig, updates interface{}) (*SecondaryLaunchConfig, error)
ById(id string) (*SecondaryLaunchConfig, error)
Delete(container *SecondaryLaunchConfig) error
ActionAllocate(*SecondaryLaunchConfig) (*Instance, error)
ActionConsole(*SecondaryLaunchConfig, *InstanceConsoleInput) (*InstanceConsole, error)
ActionCreate(*SecondaryLaunchConfig) (*Instance, error)
ActionDeallocate(*SecondaryLaunchConfig) (*Instance, error)
ActionError(*SecondaryLaunchConfig) (*Instance, error)
ActionExecute(*SecondaryLaunchConfig, *ContainerExec) (*HostAccess, error)
ActionMigrate(*SecondaryLaunchConfig) (*Instance, error)
ActionProxy(*SecondaryLaunchConfig, *ContainerProxy) (*HostAccess, error)
ActionPurge(*SecondaryLaunchConfig) (*Instance, error)
ActionRemove(*SecondaryLaunchConfig) (*Instance, error)
ActionRestart(*SecondaryLaunchConfig) (*Instance, error)
ActionRestore(*SecondaryLaunchConfig) (*Instance, error)
ActionStart(*SecondaryLaunchConfig) (*Instance, error)
ActionStop(*SecondaryLaunchConfig, *InstanceStop) (*Instance, error)
ActionUpdate(*SecondaryLaunchConfig) (*Instance, error)
ActionUpdatehealthy(*SecondaryLaunchConfig) (*Instance, error)
ActionUpdatereinitializing(*SecondaryLaunchConfig) (*Instance, error)
ActionUpdateunhealthy(*SecondaryLaunchConfig) (*Instance, error)
}
func newSecondaryLaunchConfigClient(rancherClient *RancherClient) *SecondaryLaunchConfigClient {
return &SecondaryLaunchConfigClient{
rancherClient: rancherClient,
}
}
func (c *SecondaryLaunchConfigClient) Create(container *SecondaryLaunchConfig) (*SecondaryLaunchConfig, error) {
resp := &SecondaryLaunchConfig{}
err := c.rancherClient.doCreate(SECONDARY_LAUNCH_CONFIG_TYPE, container, resp)
return resp, err
}
func (c *SecondaryLaunchConfigClient) Update(existing *SecondaryLaunchConfig, updates interface{}) (*SecondaryLaunchConfig, error) {
resp := &SecondaryLaunchConfig{}
err := c.rancherClient.doUpdate(SECONDARY_LAUNCH_CONFIG_TYPE, &existing.Resource, updates, resp)
return resp, err
}
func (c *SecondaryLaunchConfigClient) List(opts *ListOpts) (*SecondaryLaunchConfigCollection, error) {
resp := &SecondaryLaunchConfigCollection{}
err := c.rancherClient.doList(SECONDARY_LAUNCH_CONFIG_TYPE, opts, resp)
resp.client = c
return resp, err
}
func (cc *SecondaryLaunchConfigCollection) Next() (*SecondaryLaunchConfigCollection, error) {
if cc != nil && cc.Pagination != nil && cc.Pagination.Next != "" {
resp := &SecondaryLaunchConfigCollection{}
err := cc.client.rancherClient.doNext(cc.Pagination.Next, resp)
resp.client = cc.client
return resp, err
}
return nil, nil
}
func (c *SecondaryLaunchConfigClient) ById(id string) (*SecondaryLaunchConfig, error) {
resp := &SecondaryLaunchConfig{}
err := c.rancherClient.doById(SECONDARY_LAUNCH_CONFIG_TYPE, id, resp)
if apiError, ok := err.(*ApiError); ok {
if apiError.StatusCode == 404 {
return nil, nil
}
}
return resp, err
}
func (c *SecondaryLaunchConfigClient) Delete(container *SecondaryLaunchConfig) error {
return c.rancherClient.doResourceDelete(SECONDARY_LAUNCH_CONFIG_TYPE, &container.Resource)
}
func (c *SecondaryLaunchConfigClient) ActionAllocate(resource *SecondaryLaunchConfig) (*Instance, error) {
resp := &Instance{}
err := c.rancherClient.doAction(SECONDARY_LAUNCH_CONFIG_TYPE, "allocate", &resource.Resource, nil, resp)
<|fim▁hole|>
resp := &InstanceConsole{}
err := c.rancherClient.doAction(SECONDARY_LAUNCH_CONFIG_TYPE, "console", &resource.Resource, input, resp)
return resp, err
}
func (c *SecondaryLaunchConfigClient) ActionCreate(resource *SecondaryLaunchConfig) (*Instance, error) {
resp := &Instance{}
err := c.rancherClient.doAction(SECONDARY_LAUNCH_CONFIG_TYPE, "create", &resource.Resource, nil, resp)
return resp, err
}
func (c *SecondaryLaunchConfigClient) ActionDeallocate(resource *SecondaryLaunchConfig) (*Instance, error) {
resp := &Instance{}
err := c.rancherClient.doAction(SECONDARY_LAUNCH_CONFIG_TYPE, "deallocate", &resource.Resource, nil, resp)
return resp, err
}
func (c *SecondaryLaunchConfigClient) ActionError(resource *SecondaryLaunchConfig) (*Instance, error) {
resp := &Instance{}
err := c.rancherClient.doAction(SECONDARY_LAUNCH_CONFIG_TYPE, "error", &resource.Resource, nil, resp)
return resp, err
}
func (c *SecondaryLaunchConfigClient) ActionExecute(resource *SecondaryLaunchConfig, input *ContainerExec) (*HostAccess, error) {
resp := &HostAccess{}
err := c.rancherClient.doAction(SECONDARY_LAUNCH_CONFIG_TYPE, "execute", &resource.Resource, input, resp)
return resp, err
}
func (c *SecondaryLaunchConfigClient) ActionMigrate(resource *SecondaryLaunchConfig) (*Instance, error) {
resp := &Instance{}
err := c.rancherClient.doAction(SECONDARY_LAUNCH_CONFIG_TYPE, "migrate", &resource.Resource, nil, resp)
return resp, err
}
func (c *SecondaryLaunchConfigClient) ActionProxy(resource *SecondaryLaunchConfig, input *ContainerProxy) (*HostAccess, error) {
resp := &HostAccess{}
err := c.rancherClient.doAction(SECONDARY_LAUNCH_CONFIG_TYPE, "proxy", &resource.Resource, input, resp)
return resp, err
}
func (c *SecondaryLaunchConfigClient) ActionPurge(resource *SecondaryLaunchConfig) (*Instance, error) {
resp := &Instance{}
err := c.rancherClient.doAction(SECONDARY_LAUNCH_CONFIG_TYPE, "purge", &resource.Resource, nil, resp)
return resp, err
}
func (c *SecondaryLaunchConfigClient) ActionRemove(resource *SecondaryLaunchConfig) (*Instance, error) {
resp := &Instance{}
err := c.rancherClient.doAction(SECONDARY_LAUNCH_CONFIG_TYPE, "remove", &resource.Resource, nil, resp)
return resp, err
}
func (c *SecondaryLaunchConfigClient) ActionRestart(resource *SecondaryLaunchConfig) (*Instance, error) {
resp := &Instance{}
err := c.rancherClient.doAction(SECONDARY_LAUNCH_CONFIG_TYPE, "restart", &resource.Resource, nil, resp)
return resp, err
}
func (c *SecondaryLaunchConfigClient) ActionRestore(resource *SecondaryLaunchConfig) (*Instance, error) {
resp := &Instance{}
err := c.rancherClient.doAction(SECONDARY_LAUNCH_CONFIG_TYPE, "restore", &resource.Resource, nil, resp)
return resp, err
}
func (c *SecondaryLaunchConfigClient) ActionStart(resource *SecondaryLaunchConfig) (*Instance, error) {
resp := &Instance{}
err := c.rancherClient.doAction(SECONDARY_LAUNCH_CONFIG_TYPE, "start", &resource.Resource, nil, resp)
return resp, err
}
func (c *SecondaryLaunchConfigClient) ActionStop(resource *SecondaryLaunchConfig, input *InstanceStop) (*Instance, error) {
resp := &Instance{}
err := c.rancherClient.doAction(SECONDARY_LAUNCH_CONFIG_TYPE, "stop", &resource.Resource, input, resp)
return resp, err
}
func (c *SecondaryLaunchConfigClient) ActionUpdate(resource *SecondaryLaunchConfig) (*Instance, error) {
resp := &Instance{}
err := c.rancherClient.doAction(SECONDARY_LAUNCH_CONFIG_TYPE, "update", &resource.Resource, nil, resp)
return resp, err
}
func (c *SecondaryLaunchConfigClient) ActionUpdatehealthy(resource *SecondaryLaunchConfig) (*Instance, error) {
resp := &Instance{}
err := c.rancherClient.doAction(SECONDARY_LAUNCH_CONFIG_TYPE, "updatehealthy", &resource.Resource, nil, resp)
return resp, err
}
func (c *SecondaryLaunchConfigClient) ActionUpdatereinitializing(resource *SecondaryLaunchConfig) (*Instance, error) {
resp := &Instance{}
err := c.rancherClient.doAction(SECONDARY_LAUNCH_CONFIG_TYPE, "updatereinitializing", &resource.Resource, nil, resp)
return resp, err
}
func (c *SecondaryLaunchConfigClient) ActionUpdateunhealthy(resource *SecondaryLaunchConfig) (*Instance, error) {
resp := &Instance{}
err := c.rancherClient.doAction(SECONDARY_LAUNCH_CONFIG_TYPE, "updateunhealthy", &resource.Resource, nil, resp)
return resp, err
}<|fim▁end|> | return resp, err
}
func (c *SecondaryLaunchConfigClient) ActionConsole(resource *SecondaryLaunchConfig, input *InstanceConsoleInput) (*InstanceConsole, error) { |
<|file_name|>0004_auto_20171223_0859.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2017-12-23 08:59<|fim▁hole|>from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('events', '0003_auto_20171221_0336'),
]
operations = [
migrations.AlterField(
model_name='dailyproductivitylog',
name='source',
field=models.CharField(choices=[('api', 'Api'), ('ios', 'Ios'), ('android', 'Android'), ('mobile', 'Mobile'), ('web', 'Web'), ('user_excel', 'User_Excel'), ('text_message', 'Text_Message')], max_length=50),
),
migrations.AlterField(
model_name='sleeplog',
name='source',
field=models.CharField(choices=[('api', 'Api'), ('ios', 'Ios'), ('android', 'Android'), ('mobile', 'Mobile'), ('web', 'Web'), ('user_excel', 'User_Excel'), ('text_message', 'Text_Message')], max_length=50),
),
migrations.AlterField(
model_name='supplementlog',
name='source',
field=models.CharField(choices=[('api', 'Api'), ('ios', 'Ios'), ('android', 'Android'), ('mobile', 'Mobile'), ('web', 'Web'), ('user_excel', 'User_Excel'), ('text_message', 'Text_Message')], default='web', max_length=50),
),
migrations.AlterField(
model_name='useractivitylog',
name='source',
field=models.CharField(choices=[('api', 'Api'), ('ios', 'Ios'), ('android', 'Android'), ('mobile', 'Mobile'), ('web', 'Web'), ('user_excel', 'User_Excel'), ('text_message', 'Text_Message')], default='web', max_length=50),
),
migrations.AlterField(
model_name='usermoodlog',
name='source',
field=models.CharField(choices=[('api', 'Api'), ('ios', 'Ios'), ('android', 'Android'), ('mobile', 'Mobile'), ('web', 'Web'), ('user_excel', 'User_Excel'), ('text_message', 'Text_Message')], default='web', max_length=50),
),
]<|fim▁end|> | from __future__ import unicode_literals
|
<|file_name|>parseJson.py<|end_file_name|><|fim▁begin|>import json, sys, re
def printString(s, begin, end):
if not re.match(r'^(\*.*\*|CAM[0-9] .*|Z ?NULL.*)$', s):
sys.stdout.write('{}_({}){}'.format(begin, json.dumps(s, ensure_ascii=False), end))
def parse(obj):
if isinstance(obj, dict):
for k, v in obj.items():
parse(v)
if k == 'name' and isinstance(v, str):
printString(v, '', '\n')
elif k == 'text' and isinstance(v, list):
for s in v:
if isinstance(s, str):
printString(s, '', '\n')
elif isinstance(obj, list):<|fim▁hole|> for v in obj:
parse(v)
parse(json.load(open(sys.argv[1], 'r')))<|fim▁end|> | |
<|file_name|>st.js<|end_file_name|><|fim▁begin|>/**
* 统计js
* author zzy
*/
var st_config = {"memc":"","memcExpires":"","pass":0};
var st=(function(e){
var _self = this;
var config = {};
/*获取cookie*/
var _getCookie = function(key){
var strCookie=document.cookie;
var arrCookie=strCookie.split("; ");
for(var i=0;i<arrCookie.length;i++){
var arr=arrCookie[i].split("=");
if(arr[0]==key)return arr[1];
}
return "";
};
var _gCookie = function(key){
//console.log(key);
var strCookie=document.cookie;
var arrCookie=strCookie.split("; ");
for(var i=0;i<arrCookie.length;i++){
var arr=arrCookie[i].split("=");
if(arr[0]==key)return arr[1];
}
return "";
};
var _setCookie = function(key,value,expire){
var expdate = new Date(); //初始化时间
expdate.setTime(expdate.getTime() + expire ); //时间
document.cookie = key+"="+value+";expires="+expdate.toGMTString()+";path=/";
};
var _queryString = function(name){
var svalue = window.location.search.match(new RegExp("[\?\&]" + name + "=([^\&]*)(\&?)","i"));
return svalue ? svalue[1] : svalue;
}
var __getJSON=function(args){
var openid =_gCookie("stat_openid");// 'o_gjIjp5ONKYetCUiDo6Dr98QFdo';
var url = args.url+openid;
//alert(url);
var script = document.createElement('script');
script.src = url;
script.type = "text/javascript";
document.getElementsByTagName('script')[0].appendChild(script);
if (!+[1,]) {
script.onreadystatechange = function() {
if (this.readyState == 'loaded' || this.readyState == 'complete') {
}
}
} else {
script.onload = function() {
}
}
}
var _gJson = function(url,callbackFn){
//alert(args.callbackFn);
jq.ajax({
async:false,
type:'get',
url:url,
dataType:'jsonp',
jsonpCallback:callbackFn,
success : function(json){
console.log(json);
//alert(json[0].name);
}
});
// Ajax.send({
// url:url,
// async: false,
// dataType: 'jsonp',
// jsoncallback:callbackFn
// });
// $.ajax({
// url:url+'&jsonp='+callbackFn,
// type:'get',
// async:false,
// dataType:'jsonp',
// success:function(){}
// })
};
/***生成channelInfo ***/
var _createChannelInfo = function(args){
var channel=_getCookie("stat_channel"),channelInfo="";
//清空cookie中记录
_setCookie("stat_channel","",1);
//判断是否存在channel_expires
if( args["channel_expires"] && args["channel_expires"] < Math.ceil( Date.parse(new Date())/1000 ) ){
args["from_channel"] = ""; //清空from_channel
}
//判断ocs是否记录channel
if( args["from_channel"] )
{
channelInfo = args["from_channel"];
}else if( channel ){
channelInfo = channel;
}else{
return false;
}
return channelInfo;
}
/***生成ActionSeqId***/
var _createActionSeqId = function( args ){
var channel=_getCookie("stat_channel");
if( !channel ){
return _getCookie("stat_action_seq_id");
}
//_gJson({'url':args.memcServerPath+"&key=channel_expires_","callbackFn":"_memcTime"});
//_gJson({'url':args.memcServerPath+"&key=channel_",'callbackFn':"_returnBackMemc"});
//_gJson(args.memcServerPath+"&key=channel_expires_"+args.stat_openid,"_memcTime");
//_gJson(args.memcServerPath+"&key=channel_"+args.stat_openid,"_returnBackMemc");
var from_channel_expires= st_config.memcExpires;
//console.log(from_channel_expires+'-3');
var from_channel = st_config.memc;
//console.log(st_config.memc+'-3');
var channelInfo = _createChannelInfo({
"from_channel":from_channel,
"channel_expires":from_channel_expires
});
if( !channelInfo ){
return _getCookie("stat_action_seq_id");
}
// 密码字符集,可任意添加你需要的字符
var chars = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789',stat_random='',len = chars.length;
for ( var i = 0; i < 32; i++ )
{
id = Math.ceil( Math.random()*( len-1 ) );
stat_random += chars[ id ];
}
var stat_weimobid = _getCookie("weimobID");
var stat_action_seq_id = stat_random+":"+channelInfo+":"+stat_weimobid;
_setCookie("stat_action_seq_id", stat_action_seq_id ,3650*24*3600);
return stat_action_seq_id;
}
/**
* 页面加载执行
* @param openId
* @param pid
* @param bid
* @param webType 类型
* @param module 模块
* @param statisticServerPath 项目中配置
* @param memcServerPath
*/
var _triggerEvent = function(args){
var config = {
"stat_openid":_getCookie("stat_openid"),
"stat_pid":_getCookie("stat_pid"),
"stat_bid":_getCookie("stat_bid"),
"stat_webtype":_getCookie("stat_webtype"),
"stat_module":_getCookie("stat_module")
};
//__getJSON({'url':args.memcServerPath+"&key=channel_expires_"});
//__getJSON({'url':args.memcServerPath+"&key=channel_"});
//for(;st_config.pass<10;){}
//action_seq_id
var stat_action_seq_id = _createActionSeqId( {
"stat_openid":config.stat_openid,
"memcServerPath":args.memcServerPath
});
//统计
var _args={
"stat_pid":config.stat_pid,
"stat_bid":config.stat_bid,
"stat_action_seq_id":stat_action_seq_id,
"stat_webtype":config.stat_webtype,
"stat_module":config.stat_module,
"stat_action":args.stat_action,
"stat_optValue":args.stat_optValue,
"statisticServerPath":args.statisticServerPath
};
//console.log(st_config.memcExpires+'-1');
var arr=[];
arr[0] = "StatType=qudao";
arr[1] = "url="+encodeURIComponent(location.href);
arr[2] = "stat_bid="+_args.stat_bid;
arr[3] = "stat_pid="+_args.stat_pid;
arr[4] = "stat_action_seq_id="+_args.stat_action_seq_id;
arr[5] = "stat_webtype="+_args.stat_webtype;
arr[6] = "stat_module="+_args.stat_module;
arr[7] = "stat_time="+Math.ceil( Date.parse(new Date()) / 1000 );
arr[8] = "stat_action="+_args.stat_action;
arr[9] = "stat_optValue="+_args.stat_optValue;
var str= arr.join("&");
var wm = document.createElement("script");
wm.src = args.statisticServerPath + "?" + str;
console.log(wm.src);
var s = document.getElementsByTagName("body")[0];
s.appendChild(wm);
}
return {
push:function(fn,args){
if(args.is_statistic_on=="off"){
return false;
}
//var stat_openid=getCookie("stat_openid");///'o_gjIjp5ONKYetCUiDo6Dr98QFdo';
eval('__getJSON').call(this,{'url':args.memcServerPath+"&key=channel_expires_"});
eval('__getJSON').call(this,{'url':args.memcServerPath+"&key=channel_"});
setTimeout(function(){
//延迟执行执行
eval(fn).call(this,args);
},2000);
}
}
})();
function _memcTime(data){
//console.log(data+'-2');
//alert(data+'-2');
st_config.memcExpires = data;
}
<|fim▁hole|>function _returnBackMemc(data){
//alert(data+'-1');
//console.log(data+'-1');
st_config.memc = data;
}
/***调用案例***/
//注:is_statistic_on on 开 off 关
//页面统计使用案例
//st.push("_triggerEvent",{
// "is_statistic_on":"on",
// "statisticServerPath": "http://statistic.dev.weimob.com/wm.js", //统计地址
// "memcServerPath": "http://121.42.10.197/memc?cmd=get", //缓存地址
// "stat_action":"loadPage",
// "stat_optValue":""
//});<|fim▁end|> | |
<|file_name|>driver.rs<|end_file_name|><|fim▁begin|>//! Abstracts out the entire chain of runtime sub-drivers into common types.
use crate::park::thread::ParkThread;
use crate::park::Park;
use std::io;
use std::time::Duration;
// ===== io driver =====
cfg_io_driver! {
type IoDriver = crate::io::driver::Driver;
type IoStack = crate::park::either::Either<ProcessDriver, ParkThread>;
pub(crate) type IoHandle = Option<crate::io::driver::Handle>;
fn create_io_stack(enabled: bool) -> io::Result<(IoStack, IoHandle, SignalHandle)> {
use crate::park::either::Either;
#[cfg(loom)]
assert!(!enabled);
let ret = if enabled {
let io_driver = crate::io::driver::Driver::new()?;
let io_handle = io_driver.handle();
let (signal_driver, signal_handle) = create_signal_driver(io_driver)?;
let process_driver = create_process_driver(signal_driver);
(Either::A(process_driver), Some(io_handle), signal_handle)
} else {
(Either::B(ParkThread::new()), Default::default(), Default::default())
};
Ok(ret)
}
}
cfg_not_io_driver! {
pub(crate) type IoHandle = ();
type IoStack = ParkThread;
fn create_io_stack(_enabled: bool) -> io::Result<(IoStack, IoHandle, SignalHandle)> {
Ok((ParkThread::new(), Default::default(), Default::default()))
}
}
// ===== signal driver =====
macro_rules! cfg_signal_internal_and_unix {
($($item:item)*) => {
#[cfg(unix)]
cfg_signal_internal! { $($item)* }
}
}
cfg_signal_internal_and_unix! {
type SignalDriver = crate::signal::unix::driver::Driver;
pub(crate) type SignalHandle = Option<crate::signal::unix::driver::Handle>;
fn create_signal_driver(io_driver: IoDriver) -> io::Result<(SignalDriver, SignalHandle)> {
let driver = crate::signal::unix::driver::Driver::new(io_driver)?;
let handle = driver.handle();
Ok((driver, Some(handle)))
}
}
cfg_not_signal_internal! {
pub(crate) type SignalHandle = ();
cfg_io_driver! {<|fim▁hole|> }
}
}
// ===== process driver =====
cfg_process_driver! {
type ProcessDriver = crate::process::unix::driver::Driver;
fn create_process_driver(signal_driver: SignalDriver) -> ProcessDriver {
crate::process::unix::driver::Driver::new(signal_driver)
}
}
cfg_not_process_driver! {
cfg_io_driver! {
type ProcessDriver = SignalDriver;
fn create_process_driver(signal_driver: SignalDriver) -> ProcessDriver {
signal_driver
}
}
}
// ===== time driver =====
cfg_time! {
type TimeDriver = crate::park::either::Either<crate::time::driver::Driver<IoStack>, IoStack>;
pub(crate) type Clock = crate::time::Clock;
pub(crate) type TimeHandle = Option<crate::time::driver::Handle>;
fn create_clock(enable_pausing: bool, start_paused: bool) -> Clock {
crate::time::Clock::new(enable_pausing, start_paused)
}
fn create_time_driver(
enable: bool,
io_stack: IoStack,
clock: Clock,
) -> (TimeDriver, TimeHandle) {
use crate::park::either::Either;
if enable {
let driver = crate::time::driver::Driver::new(io_stack, clock);
let handle = driver.handle();
(Either::A(driver), Some(handle))
} else {
(Either::B(io_stack), None)
}
}
}
cfg_not_time! {
type TimeDriver = IoStack;
pub(crate) type Clock = ();
pub(crate) type TimeHandle = ();
fn create_clock(_enable_pausing: bool, _start_paused: bool) -> Clock {
()
}
fn create_time_driver(
_enable: bool,
io_stack: IoStack,
_clock: Clock,
) -> (TimeDriver, TimeHandle) {
(io_stack, ())
}
}
// ===== runtime driver =====
#[derive(Debug)]
pub(crate) struct Driver {
inner: TimeDriver,
}
pub(crate) struct Resources {
pub(crate) io_handle: IoHandle,
pub(crate) signal_handle: SignalHandle,
pub(crate) time_handle: TimeHandle,
pub(crate) clock: Clock,
}
pub(crate) struct Cfg {
pub(crate) enable_io: bool,
pub(crate) enable_time: bool,
pub(crate) enable_pause_time: bool,
pub(crate) start_paused: bool,
}
impl Driver {
pub(crate) fn new(cfg: Cfg) -> io::Result<(Self, Resources)> {
let (io_stack, io_handle, signal_handle) = create_io_stack(cfg.enable_io)?;
let clock = create_clock(cfg.enable_pause_time, cfg.start_paused);
let (time_driver, time_handle) =
create_time_driver(cfg.enable_time, io_stack, clock.clone());
Ok((
Self { inner: time_driver },
Resources {
io_handle,
signal_handle,
time_handle,
clock,
},
))
}
}
impl Park for Driver {
type Unpark = <TimeDriver as Park>::Unpark;
type Error = <TimeDriver as Park>::Error;
fn unpark(&self) -> Self::Unpark {
self.inner.unpark()
}
fn park(&mut self) -> Result<(), Self::Error> {
self.inner.park()
}
fn park_timeout(&mut self, duration: Duration) -> Result<(), Self::Error> {
self.inner.park_timeout(duration)
}
fn shutdown(&mut self) {
self.inner.shutdown()
}
}<|fim▁end|> | type SignalDriver = IoDriver;
fn create_signal_driver(io_driver: IoDriver) -> io::Result<(SignalDriver, SignalHandle)> {
Ok((io_driver, ())) |
<|file_name|>mod.rs<|end_file_name|><|fim▁begin|>//! This module provides the interface to different solvers.
//!
//! Both [`coin_cbc`](https://docs.rs/coin_cbc/latest/coin_cbc/) and
//! [`minilp`](https://docs.rs/minilp/0.2.2/minilp/) are available as cargo
//! [features](https://doc.rust-lang.org/cargo/reference/features.html). To use
//! them, specify your dependency to `lp_modeler` accordingly in your `Cargo.toml`
//! (note the name difference of the `native_coin_cbc` feature for the `coin_cbc` crate):
//! ```toml
//! [dependencies.lp_modeler]
//! version = "4.3"
//! features = "native_coin_cbc"
//! ```
//! or:
//! ```toml
//! [dependencies.lp_modeler]
//! version = "4.3"
//! features = "minilp"
//! ```
//! For `coin_cbc` to compile, the `Cbc` library files need to be available on your system.
//! See the [`coin_cbc` project README](https://github.com/KardinalAI/coin_cbc) for more infos.
//!
//! The other solvers need to be installed externally on your system.
//! The respective information is provided in the project's README in the section on
//! [installing external solvers](https://github.com/jcavat/rust-lp-modeler#installing-external-solvers).
use std::collections::HashMap;
use dsl::{Problem, LpContinuous, LpBinary, LpInteger, LpProblem, LpExprNode, LpExprOp, LpExprArenaIndex};
pub mod cbc;
pub use self::cbc::*;
pub mod gurobi;
pub use self::gurobi::*;
pub mod glpk;
pub use self::glpk::*;
#[cfg(feature = "minilp")]
pub mod minilp;
#[cfg(feature = "minilp")]
pub use self::minilp::*;
#[cfg(feature = "native_coin_cbc")]
pub mod native_cbc;
#[cfg(feature = "native_coin_cbc")]
pub use self::native_cbc::*;
<|fim▁hole|>#[derive(Debug, PartialEq, Clone)]
pub enum Status {
Optimal,
SubOptimal,
Infeasible,
Unbounded,
NotSolved,
}
#[derive(Debug, Clone)]
pub struct Solution<'a> {
pub status: Status,
pub results: HashMap<String, f32>,
pub related_problem: Option<&'a LpProblem>
}
impl Solution<'_> {
pub fn new<'a>(status: Status, results: HashMap<String, f32>) -> Solution<'a> {
Solution {
status,
results,
related_problem: None
}
}
pub fn with_problem(status: Status, results: HashMap<String, f32>, problem: &LpProblem) -> Solution {
Solution {
status,
results,
related_problem: Some(problem)
}
}
fn check_possible_solution(&self) {
match &self.status {
Status::Unbounded | Status::NotSolved | Status::Infeasible => panic!("Solution must be optimal or suboptimal"),
_ => ()
}
}
pub fn get_raw_value(&self, name: &str) -> f32 {
self.check_possible_solution();
*self.results.get(name).expect("No value found for this variable. Check if the variable has been used in the related problem.")
}
pub fn get_bool(&self, var: &LpBinary) -> bool {
self.check_possible_solution();
self.results.get(&var.name).and_then(|&f| if is_zero(1.0-f) { Some(true) } else if is_zero(f) { Some(false) } else { None } ).expect("Result value cannot be interpreted as boolean")
}
pub fn get_float(&self, var: &LpContinuous) -> f32 {
self.check_possible_solution();
*self.results.get(&var.name).expect("No value found for this variable. Check if the variable has been used in the related problem.")
}
pub fn get_int(&self, var: &LpInteger) -> i32 {
self.check_possible_solution();
let &f = self.results.get(&var.name).expect("No value found for this variable. Check if the variable has been used in the related problem.");
let i = f as i32;
assert!( is_zero( f-(i as f32)), format!("Value {} cannot be interpreted as integer.", f) );
i
}
pub fn eval(&self) -> Option<f32> {
self.related_problem
.and_then( |problem| {
match &problem.obj_expr_arena {
Some(obj_expr_arena) => Some( self.eval_with(&obj_expr_arena.get_root_index(), &self.results) ),
None => None
}
})
}
fn eval_with(&self, index: &LpExprArenaIndex, values: &HashMap<String, f32>) -> f32 {
match self.related_problem.unwrap().obj_expr_arena.as_ref().unwrap().expr_ref_at(*index) {
LpExprNode::LpCompExpr(operation, left, right) => {
match operation {
LpExprOp::Addition => self.eval_with(left, values) + self.eval_with(right, values),
LpExprOp::Multiplication => self.eval_with(left, values) * self.eval_with(right, values),
LpExprOp::Subtraction => self.eval_with(left, values) - self.eval_with(right, values),
}
},
LpExprNode::ConsBin(LpBinary { name })
| LpExprNode::ConsCont(LpContinuous { name, .. })
| LpExprNode::ConsInt(LpInteger { name, .. }) => *values.get(name).unwrap_or(&0f32),
LpExprNode::LitVal(n) => *n,
LpExprNode::EmptyExpr => 0.0
}
}
}
pub trait SolverTrait {
type P: Problem;
fn run<'a>(&self, problem: &'a Self::P) -> Result<Solution<'a>, String>;
}
pub trait SolverWithSolutionParsing {
fn read_solution<'a>(&self, temp_solution_file: &String, problem: Option<&'a LpProblem>) -> Result<Solution<'a>, String> {
match File::open( temp_solution_file ) {
Ok(f) => {
let res = self.read_specific_solution(&f, problem)?;
let _ = fs::remove_file(temp_solution_file);
Ok(res)
}
Err(_) => return Err("Cannot open file".to_string()),
}
}
fn read_specific_solution<'a>(&self, f: &File, problem: Option<&'a LpProblem>) -> Result<Solution<'a>, String>;
}
pub trait WithMaxSeconds<T> {
fn max_seconds(&self) -> Option<u32>;
fn with_max_seconds(&self, seconds: u32) -> T;
}
pub trait WithNbThreads<T> {
fn nb_threads(&self) -> Option<u32>;
fn with_nb_threads(&self, threads: u32) -> T;
}<|fim▁end|> | use std::fs::File;
use std::fs;
use util::is_zero;
|
<|file_name|>project-cache-issue-37154.rs<|end_file_name|><|fim▁begin|>// run-pass
#![allow(dead_code)]
// Regression test for #37154: the problem here was that the cache
// results in a false error because it was caching placeholder results
// even after those placeholder regions had been popped.
trait Foo {
fn method(&self) {}
}
<|fim▁hole|>impl<T> Foo for Wrapper<T> where for<'a> &'a T: IntoIterator<Item=&'a ()> {}
fn f(x: Wrapper<Vec<()>>) {
x.method(); // This works.
x.method(); // error: no method named `method`
}
fn main() { }<|fim▁end|> | struct Wrapper<T>(T);
|
<|file_name|>monocore.js<|end_file_name|><|fim▁begin|>//= require ./core/monocle
//= require ./compat/env
//= require ./compat/css
//= require ./compat/stubs
//= require ./compat/browser
//= require ./compat/gala
//= require ./core/bookdata
//= require ./core/factory
//= require ./core/events
//= require ./core/styles
//= require ./core/formatting<|fim▁hole|>//= require ./core/book
//= require ./core/place
//= require ./core/component
//= require ./core/selection
//= require ./core/billboard
//= require ./controls/panel
//= require ./panels/twopane
//= require ./panels/imode
//= require ./panels/eink
//= require ./panels/marginal
//= require ./panels/magic
//= require ./dimensions/columns
//= require ./flippers/slider
//= require ./flippers/scroller
//= require ./flippers/instant<|fim▁end|> | //= require ./core/reader |
<|file_name|>another-py-invaders.py<|end_file_name|><|fim▁begin|># import libraries
import math
import random
import pygame
from pygame.locals import *
pygame.init()
pygame.mixer.init()
width, height = 800, 600
screen = pygame.display.set_mode((width, height))
keys = [False, False, False, False]
player = [100, 520]
invaders = []
bullets = []
bombs = []
rockets = []
rocketpieces = []
bgimg = pygame.image.load("g:/invaders/paragliding_2017_4_bsl-73.jpg")
invaderimg = pygame.transform.scale(pygame.image.load("g:/invaders/Space-Invaders-PNG-Clipart.png"), (64, 64))
playerimg = pygame.transform.scale(pygame.image.load("g:/invaders/space-invaders-1again.png"), (64, 64))
bulletimg = pygame.transform.scale(pygame.image.load("g:/invaders/square-rounded-512.png"), (16, 16))
# 4 - keep looping through
running = 1
exitcode = 0
invadersmv = 1
# create invaders
for i in range (0, 734, 96):
for j in range (0, 300, 64):
invaders.append([i, j])
while running:
# 5 - clear the screen before drawing it again
movedown=False
#screen.fill(0)
# 6 - draw the screen elements
screen.blit(bgimg, (0, 0))
screen.blit(playerimg, player)
for invader in invaders:
screen.blit(invaderimg, invader)
for invader in invaders:
if invader[0] >= 736:
invadersmv = -1
movedown=True
break
if invader[0] <= 0:
invadersmv = 1
movedown=True
break
for invader in invaders:
invader[0] += invadersmv
if movedown: invader[1] += 2
for bullet in bullets:
screen.blit(bulletimg, bullet)
bullet[1] -= 1
if len(bullets) > 0 and bullets[0][1] <= -16:
bullets.pop(0)
# collision check
destroyedinvaders = []
destroyedbullets = []
for bullet in bullets:
for invader in invaders:
if bullet[0] < invader[0] + 16 and bullet[0] + 64 > invader[0] and bullet[1] < invader[1] + 16 and invader[1] + 16 > bullet[1]:
destroyedbullets.append(bullet)
destroyedinvaders.append(invader)
#print('collision')
bullets = [item for item in bullets if item not in destroyedbullets]
invaders = [item for item in invaders if item not in destroyedinvaders]
# 9 - Move player
## if keys[0]:
## player[1] -= 5
## elif keys[2]:
## player[1] += 5
if keys[1] and player[0] >= 0:
player[0] -= 5
elif keys[3] and player[0] <= 736:
player[0] += 5
# 7 - update the screen
pygame.display.flip()
# 8 - check events
for event in pygame.event.get():
if event.type == KEYDOWN:
if event.key == K_w:
keys[0] = True
elif event.key == K_a:
keys[1] = True
elif event.key == K_s:
keys[2] = True
elif event.key == K_d:
keys[3] = True
if event.type == KEYUP:
if event.key == K_w:
keys[0] = False
elif event.key == K_a:
keys[1] = False<|fim▁hole|> elif event.key == K_d:
keys[3] = False
if event.type == QUIT:
pygame.quit()
exit(0)
if event.type == MOUSEBUTTONDOWN:
#shoot.play()
if len(bullets) < 3: # up to three bullets
bullets.append([player[0]+32, player[1]-32])<|fim▁end|> | elif event.key == K_s:
keys[2] = False |
<|file_name|>cpp.min.js<|end_file_name|><|fim▁begin|><|fim▁hole|>oid sha256:5181d344dc3334a5a80ecae84df1bb3107af7d92135639b56a7f73ec2ea1931c
size 3057<|fim▁end|> | version https://git-lfs.github.com/spec/v1 |
<|file_name|>EventException.java<|end_file_name|><|fim▁begin|>/*
* Copyright 2009-2020 Aarhus University
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package dk.brics.tajs.analysis.dom.event;
import dk.brics.tajs.analysis.InitialStateBuilder;
import dk.brics.tajs.analysis.PropVarOperations;
import dk.brics.tajs.analysis.Solver;
import dk.brics.tajs.analysis.dom.DOMObjects;
import dk.brics.tajs.analysis.dom.DOMWindow;
import dk.brics.tajs.lattice.ObjectLabel;
import dk.brics.tajs.lattice.State;
import dk.brics.tajs.lattice.Value;
import static dk.brics.tajs.analysis.dom.DOMFunctions.createDOMProperty;
public class EventException {
public static ObjectLabel CONSTRUCTOR;
public static ObjectLabel PROTOTYPE;
public static ObjectLabel INSTANCES;
public static void build(Solver.SolverInterface c) {
State s = c.getState();
PropVarOperations pv = c.getAnalysis().getPropVarOperations();
CONSTRUCTOR = ObjectLabel.make(DOMObjects.EVENT_EXCEPTION_CONSTRUCTOR, ObjectLabel.Kind.FUNCTION);
PROTOTYPE = ObjectLabel.make(DOMObjects.EVENT_EXCEPTION_PROTOTYPE, ObjectLabel.Kind.OBJECT);
INSTANCES = ObjectLabel.make(DOMObjects.EVENT_EXCEPTION_INSTANCES, ObjectLabel.Kind.OBJECT);
// Constructor Object
s.newObject(CONSTRUCTOR);
pv.writePropertyWithAttributes(CONSTRUCTOR, "length", Value.makeNum(0).setAttributes(true, true, true));
pv.writePropertyWithAttributes(CONSTRUCTOR, "prototype", Value.makeObject(PROTOTYPE).setAttributes(true, true, true));
s.writeInternalPrototype(CONSTRUCTOR, Value.makeObject(InitialStateBuilder.OBJECT_PROTOTYPE));
pv.writeProperty(DOMWindow.WINDOW, "EventException", Value.makeObject(CONSTRUCTOR));
// Prototype object.
s.newObject(PROTOTYPE);
s.writeInternalPrototype(PROTOTYPE, Value.makeObject(InitialStateBuilder.OBJECT_PROTOTYPE));
// Multiplied object.
s.newObject(INSTANCES);
s.writeInternalPrototype(INSTANCES, Value.makeObject(PROTOTYPE));
/*
* Properties.
*/
createDOMProperty(INSTANCES, "code", Value.makeAnyNumUInt(), c);
s.multiplyObject(INSTANCES);
INSTANCES = INSTANCES.makeSingleton().makeSummary();
/*
* Constants.
<|fim▁hole|> */
createDOMProperty(PROTOTYPE, "UNSPECIFIED_EVENT_TYPE_ERR", Value.makeNum(0), c);
/*
* Functions.
*/
}
}<|fim▁end|> | |
<|file_name|>loss_layer.cpp<|end_file_name|><|fim▁begin|>// Copyright 2013 Yangqing Jia
#include <algorithm>
#include <cmath>
#include <cfloat>
#include <vector>
#include "caffe/layer.hpp"
#include "caffe/vision_layers.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/util/io.hpp"
#define C_ 1
using std::max;
namespace caffe {
const float kLOG_THRESHOLD = 1e-20;
template <typename Dtype>
void MultinomialLogisticLossLayer<Dtype>::SetUp(
const vector<Blob<Dtype>*>& bottom, vector<Blob<Dtype>*>* top) {
CHECK_EQ(bottom.size(), 2) << "Loss Layer takes two blobs as input.";
CHECK_EQ(top->size(), 0) << "Loss Layer takes no output.";
CHECK_EQ(bottom[0]->num(), bottom[1]->num())
<< "The data and label should have the same number.";
CHECK_EQ(bottom[1]->channels(), 1);
CHECK_EQ(bottom[1]->height(), 1);
CHECK_EQ(bottom[1]->width(), 1);
}
template <typename Dtype>
Dtype MultinomialLogisticLossLayer<Dtype>::Backward_cpu(
const vector<Blob<Dtype>*>& top, const bool propagate_down,
vector<Blob<Dtype>*>* bottom) {
const Dtype* bottom_data = (*bottom)[0]->cpu_data();
const Dtype* bottom_label = (*bottom)[1]->cpu_data();
Dtype* bottom_diff = (*bottom)[0]->mutable_cpu_diff();
int num = (*bottom)[0]->num();
int dim = (*bottom)[0]->count() / (*bottom)[0]->num();
memset(bottom_diff, 0, sizeof(Dtype) * (*bottom)[0]->count());
Dtype loss = 0;
for (int i = 0; i < num; ++i) {
int label = static_cast<int>(bottom_label[i]);
Dtype prob = max(bottom_data[i * dim + label], Dtype(kLOG_THRESHOLD));
loss -= log(prob);
bottom_diff[i * dim + label] = - 1. / prob / num;
}
return loss / num;
}
// TODO: implement the GPU version for multinomial loss
template <typename Dtype>
void InfogainLossLayer<Dtype>::SetUp(
const vector<Blob<Dtype>*>& bottom, vector<Blob<Dtype>*>* top) {
CHECK_EQ(bottom.size(), 2) << "Loss Layer takes two blobs as input.";
CHECK_EQ(top->size(), 0) << "Loss Layer takes no output.";
CHECK_EQ(bottom[0]->num(), bottom[1]->num())
<< "The data and label should have the same number.";
CHECK_EQ(bottom[1]->channels(), 1);
CHECK_EQ(bottom[1]->height(), 1);
CHECK_EQ(bottom[1]->width(), 1);
BlobProto blob_proto;
ReadProtoFromBinaryFile(this->layer_param_.source(), &blob_proto);
infogain_.FromProto(blob_proto);
CHECK_EQ(infogain_.num(), 1);
CHECK_EQ(infogain_.channels(), 1);
CHECK_EQ(infogain_.height(), infogain_.width());
}
template <typename Dtype>
Dtype InfogainLossLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top,
const bool propagate_down,
vector<Blob<Dtype>*>* bottom) {
const Dtype* bottom_data = (*bottom)[0]->cpu_data();
const Dtype* bottom_label = (*bottom)[1]->cpu_data();
const Dtype* infogain_mat = infogain_.cpu_data();
Dtype* bottom_diff = (*bottom)[0]->mutable_cpu_diff();
int num = (*bottom)[0]->num();
int dim = (*bottom)[0]->count() / (*bottom)[0]->num();
CHECK_EQ(infogain_.height(), dim);
Dtype loss = 0;
<|fim▁hole|> for (int j = 0; j < dim; ++j) {
Dtype prob = max(bottom_data[i * dim + j], Dtype(kLOG_THRESHOLD));
loss -= infogain_mat[label * dim + j] * log(prob);
bottom_diff[i * dim + j] = - infogain_mat[label * dim + j] / prob / num;
}
}
return loss / num;
}
template <typename Dtype>
void EuclideanLossLayer<Dtype>::SetUp(
const vector<Blob<Dtype>*>& bottom, vector<Blob<Dtype>*>* top) {
CHECK_EQ(bottom.size(), 2) << "Loss Layer takes two blobs as input.";
CHECK_EQ(top->size(), 0) << "Loss Layer takes no as output.";
CHECK_EQ(bottom[0]->num(), bottom[1]->num())
<< "The data and label should have the same number.";
CHECK_EQ(bottom[0]->channels(), bottom[1]->channels());
CHECK_EQ(bottom[0]->height(), bottom[1]->height());
CHECK_EQ(bottom[0]->width(), bottom[1]->width());
difference_.Reshape(bottom[0]->num(), bottom[0]->channels(),
bottom[0]->height(), bottom[0]->width());
}
template <typename Dtype>
Dtype EuclideanLossLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top,
const bool propagate_down, vector<Blob<Dtype>*>* bottom) {
int count = (*bottom)[0]->count();
int num = (*bottom)[0]->num();
caffe_sub(count, (*bottom)[0]->cpu_data(), (*bottom)[1]->cpu_data(),
difference_.mutable_cpu_data());
Dtype loss = caffe_cpu_dot(
count, difference_.cpu_data(), difference_.cpu_data()) / num / Dtype(2);
// Compute the gradient
caffe_axpby(count, Dtype(1) / num, difference_.cpu_data(), Dtype(0),
(*bottom)[0]->mutable_cpu_diff());
return loss;
}
template <typename Dtype>
void AccuracyLayer<Dtype>::SetUp(
const vector<Blob<Dtype>*>& bottom, vector<Blob<Dtype>*>* top) {
CHECK_EQ(bottom.size(), 2) << "Accuracy Layer takes two blobs as input.";
CHECK_EQ(top->size(), 1) << "Accuracy Layer takes 1 output.";
CHECK_EQ(bottom[0]->num(), bottom[1]->num())
<< "The data and label should have the same number.";
CHECK_EQ(bottom[1]->channels(), 1);
CHECK_EQ(bottom[1]->height(), 1);
CHECK_EQ(bottom[1]->width(), 1);
(*top)[0]->Reshape(1, 2, 1, 1);
}
template <typename Dtype>
void AccuracyLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom,
vector<Blob<Dtype>*>* top) {
Dtype accuracy = 0;
Dtype logprob = 0;
const Dtype* bottom_data = bottom[0]->cpu_data();
const Dtype* bottom_label = bottom[1]->cpu_data();
int num = bottom[0]->num();
int dim = bottom[0]->count() / bottom[0]->num();
for (int i = 0; i < num; ++i) {
// Accuracy
Dtype maxval = -FLT_MAX;
int max_id = 0;
for (int j = 0; j < dim; ++j) {
if (bottom_data[i * dim + j] > maxval) {
maxval = bottom_data[i * dim + j];
max_id = j;
}
}
//LOG(INFO) << " max_id: " << max_id << " label: " << static_cast<int>(bottom_label[i]);
if (max_id == static_cast<int>(bottom_label[i])) {
++accuracy;
}
Dtype prob = max(bottom_data[i * dim + static_cast<int>(bottom_label[i])],
Dtype(kLOG_THRESHOLD));
logprob -= log(prob);
}
// LOG(INFO) << "classes: " << num;
(*top)[0]->mutable_cpu_data()[0] = accuracy / num;
(*top)[0]->mutable_cpu_data()[1] = logprob / num;
}
template <typename Dtype>
void HingeLossLayer<Dtype>::SetUp(const vector<Blob<Dtype>*>& bottom,
vector<Blob<Dtype>*>* top) {
CHECK_EQ(bottom.size(), 2) << "Hinge Loss Layer takes two blobs as input.";
CHECK_EQ(top->size(), 0) << "Hinge Loss Layer takes no output.";
}
template <typename Dtype>
void HingeLossLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom,
vector<Blob<Dtype>*>* top) {
const Dtype* bottom_data = bottom[0]->cpu_data();
Dtype* bottom_diff = bottom[0]->mutable_cpu_diff();
const Dtype* label = bottom[1]->cpu_data();
int num = bottom[0]->num();
int count = bottom[0]->count();
int dim = count / num;
caffe_copy(count, bottom_data, bottom_diff);
if(0) {
for (int i = 0; i < 3; ++i)
for (int j = 0; j < 3; ++j)
LOG(INFO) << bottom_data[i * dim + j];
LOG(INFO) << "*************ONE PASS*****************";
}
for (int i = 0; i < num; ++i) {
bottom_diff[i * dim + static_cast<int>(label[i])] *= -1;
//LOG(INFO) << bottom_diff[i * dim + static_cast<int>(label[i])];
}
for (int i = 0; i < num; ++i) {
for (int j = 0; j < dim; ++j) {
//LOG(INFO) << bottom_diff[i * dim + j];
bottom_diff[i * dim + j] = max(Dtype(0), 1 + bottom_diff[i * dim + j]);
//if(bottom_diff[i*dim+j] != 1)
//LOG(INFO) << bottom_diff[i*dim+j];
}
}
}
template <typename Dtype>
Dtype HingeLossLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top,
const bool propagate_down, vector<Blob<Dtype>*>* bottom) {
Dtype* bottom_diff = (*bottom)[0]->mutable_cpu_diff();
const Dtype* label = (*bottom)[1]->cpu_data();
int num = (*bottom)[0]->num();
int count = (*bottom)[0]->count();
int dim = count / num;
Dtype loss = caffe_cpu_asum(count, bottom_diff) / num;
caffe_cpu_sign(count, bottom_diff, bottom_diff);
for (int i = 0; i < num; ++i) {
bottom_diff[i * dim + static_cast<int>(label[i])] *= -1;
}
caffe_scal(count, Dtype(1. / num), bottom_diff);
//LOG(INFO) << "loss" << loss;
return loss;
}
//**********************SquaredHingeLoss**********************
template <typename Dtype>
void SquaredHingeLossLayer<Dtype>::SetUp(const vector<Blob<Dtype>*>& bottom,
vector<Blob<Dtype>*>* top) {
CHECK_EQ(bottom.size(), 2) << "Squared Hinge Loss Layer takes two blobs as input.";
CHECK_EQ(top->size(), 0) << "Squared Hinge Loss Layer takes no output.";
}
template <typename Dtype>
void SquaredHingeLossLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom,
vector<Blob<Dtype>*>* top) {
const Dtype* bottom_data = bottom[0]->cpu_data();
Dtype* bottom_diff = bottom[0]->mutable_cpu_diff();
const Dtype* label = bottom[1]->cpu_data();
int num = bottom[0]->num();
int count = bottom[0]->count();
int dim = count / num;
caffe_copy(count, bottom_data, bottom_diff);
//Debug
if(0) {
for (int i = 0; i < 3; ++i)
for (int j = 0; j < 3; ++j) {
LOG(INFO) << bottom_data[i * dim + j];
}
LOG(INFO) << "*************ONE PASS*****************";
}
for (int i = 0; i < num; ++i) {
bottom_diff[i * dim + static_cast<int>(label[i])] *= -1;
//LOG(INFO) << static_cast<int>(label[i]);
}
for (int i = 0; i < num; ++i) {
for (int j = 0; j < dim; ++j) {
//LOG(INFO) << bottom_diff[i * dim + j];
bottom_diff[i * dim + j] = max(Dtype(0), 1 + bottom_diff[i * dim + j]);
//if(bottom_diff[i*dim+j] != 1)
//LOG(INFO) << bottom_diff[i*dim+j];
}
}
}
template <typename Dtype>
Dtype SquaredHingeLossLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top,
const bool propagate_down, vector<Blob<Dtype>*>* bottom) {
Dtype* bottom_diff = (*bottom)[0]->mutable_cpu_diff();
const Dtype* label = (*bottom)[1]->cpu_data();
int num = (*bottom)[0]->num();
int count = (*bottom)[0]->count();
int dim = count / num;
Dtype loss = caffe_cpu_dot(count, bottom_diff, bottom_diff) / num;
for (int i = 0; i < num; ++i) {
bottom_diff[i * dim + static_cast<int>(label[i])] *= -1;
}
caffe_scal(count, Dtype(2.* C_ / num), bottom_diff);
//LOG(INFO) << "loss" << loss;
return loss;
}
INSTANTIATE_CLASS(MultinomialLogisticLossLayer);
INSTANTIATE_CLASS(InfogainLossLayer);
INSTANTIATE_CLASS(EuclideanLossLayer);
INSTANTIATE_CLASS(AccuracyLayer);
INSTANTIATE_CLASS(HingeLossLayer);
INSTANTIATE_CLASS(SquaredHingeLossLayer);
} // namespace caffe<|fim▁end|> | for (int i = 0; i < num; ++i) {
int label = static_cast<int>(bottom_label[i]);
|
<|file_name|>intrinsics.rs<|end_file_name|><|fim▁begin|>// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! rustc compiler intrinsics.
//!
//! The corresponding definitions are in librustc_trans/trans/intrinsic.rs.
//!
//! # Volatiles
//!
//! The volatile intrinsics provide operations intended to act on I/O
//! memory, which are guaranteed to not be reordered by the compiler
//! across other volatile intrinsics. See the LLVM documentation on
//! [[volatile]].
//!
//! [volatile]: http://llvm.org/docs/LangRef.html#volatile-memory-accesses
//!
//! # Atomics
//!
//! The atomic intrinsics provide common atomic operations on machine
//! words, with multiple possible memory orderings. They obey the same
//! semantics as C++11. See the LLVM documentation on [[atomics]].
//!
//! [atomics]: http://llvm.org/docs/Atomics.html
//!
//! A quick refresher on memory ordering:
//!
//! * Acquire - a barrier for acquiring a lock. Subsequent reads and writes
//! take place after the barrier.
//! * Release - a barrier for releasing a lock. Preceding reads and writes
//! take place before the barrier.
//! * Sequentially consistent - sequentially consistent operations are
//! guaranteed to happen in order. This is the standard mode for working
//! with atomic types and is equivalent to Java's `volatile`.
#![unstable]
#![allow(missing_docs)]
use marker::Sized;
#[cfg(stage0)] use any::TypeId;
pub type GlueFn = extern "Rust" fn(*const i8);
#[lang="ty_desc"]
#[derive(Copy)]
pub struct TyDesc {
// sizeof(T)
pub size: uint,
// alignof(T)
pub align: uint,
// Called when a value of type `T` is no longer needed
pub drop_glue: GlueFn,
// Name corresponding to the type
pub name: &'static str,
}
extern "rust-intrinsic" {
// NB: These intrinsics take unsafe pointers because they mutate aliased
// memory, which is not valid for either `&` or `&mut`.
pub fn atomic_cxchg<T>(dst: *mut T, old: T, src: T) -> T;
pub fn atomic_cxchg_acq<T>(dst: *mut T, old: T, src: T) -> T;
pub fn atomic_cxchg_rel<T>(dst: *mut T, old: T, src: T) -> T;
pub fn atomic_cxchg_acqrel<T>(dst: *mut T, old: T, src: T) -> T;
pub fn atomic_cxchg_relaxed<T>(dst: *mut T, old: T, src: T) -> T;
pub fn atomic_load<T>(src: *const T) -> T;
pub fn atomic_load_acq<T>(src: *const T) -> T;
pub fn atomic_load_relaxed<T>(src: *const T) -> T;
pub fn atomic_load_unordered<T>(src: *const T) -> T;
pub fn atomic_store<T>(dst: *mut T, val: T);
pub fn atomic_store_rel<T>(dst: *mut T, val: T);
pub fn atomic_store_relaxed<T>(dst: *mut T, val: T);
pub fn atomic_store_unordered<T>(dst: *mut T, val: T);
pub fn atomic_xchg<T>(dst: *mut T, src: T) -> T;
pub fn atomic_xchg_acq<T>(dst: *mut T, src: T) -> T;
pub fn atomic_xchg_rel<T>(dst: *mut T, src: T) -> T;
pub fn atomic_xchg_acqrel<T>(dst: *mut T, src: T) -> T;
pub fn atomic_xchg_relaxed<T>(dst: *mut T, src: T) -> T;
pub fn atomic_xadd<T>(dst: *mut T, src: T) -> T;
pub fn atomic_xadd_acq<T>(dst: *mut T, src: T) -> T;
pub fn atomic_xadd_rel<T>(dst: *mut T, src: T) -> T;
pub fn atomic_xadd_acqrel<T>(dst: *mut T, src: T) -> T;
pub fn atomic_xadd_relaxed<T>(dst: *mut T, src: T) -> T;
pub fn atomic_xsub<T>(dst: *mut T, src: T) -> T;
pub fn atomic_xsub_acq<T>(dst: *mut T, src: T) -> T;
pub fn atomic_xsub_rel<T>(dst: *mut T, src: T) -> T;
pub fn atomic_xsub_acqrel<T>(dst: *mut T, src: T) -> T;
pub fn atomic_xsub_relaxed<T>(dst: *mut T, src: T) -> T;
pub fn atomic_and<T>(dst: *mut T, src: T) -> T;
pub fn atomic_and_acq<T>(dst: *mut T, src: T) -> T;
pub fn atomic_and_rel<T>(dst: *mut T, src: T) -> T;
pub fn atomic_and_acqrel<T>(dst: *mut T, src: T) -> T;
pub fn atomic_and_relaxed<T>(dst: *mut T, src: T) -> T;
pub fn atomic_nand<T>(dst: *mut T, src: T) -> T;
pub fn atomic_nand_acq<T>(dst: *mut T, src: T) -> T;
pub fn atomic_nand_rel<T>(dst: *mut T, src: T) -> T;
pub fn atomic_nand_acqrel<T>(dst: *mut T, src: T) -> T;
pub fn atomic_nand_relaxed<T>(dst: *mut T, src: T) -> T;
pub fn atomic_or<T>(dst: *mut T, src: T) -> T;
pub fn atomic_or_acq<T>(dst: *mut T, src: T) -> T;
pub fn atomic_or_rel<T>(dst: *mut T, src: T) -> T;
pub fn atomic_or_acqrel<T>(dst: *mut T, src: T) -> T;
pub fn atomic_or_relaxed<T>(dst: *mut T, src: T) -> T;
pub fn atomic_xor<T>(dst: *mut T, src: T) -> T;
pub fn atomic_xor_acq<T>(dst: *mut T, src: T) -> T;
pub fn atomic_xor_rel<T>(dst: *mut T, src: T) -> T;
pub fn atomic_xor_acqrel<T>(dst: *mut T, src: T) -> T;
pub fn atomic_xor_relaxed<T>(dst: *mut T, src: T) -> T;
pub fn atomic_max<T>(dst: *mut T, src: T) -> T;
pub fn atomic_max_acq<T>(dst: *mut T, src: T) -> T;
pub fn atomic_max_rel<T>(dst: *mut T, src: T) -> T;
pub fn atomic_max_acqrel<T>(dst: *mut T, src: T) -> T;
pub fn atomic_max_relaxed<T>(dst: *mut T, src: T) -> T;
pub fn atomic_min<T>(dst: *mut T, src: T) -> T;
pub fn atomic_min_acq<T>(dst: *mut T, src: T) -> T;
pub fn atomic_min_rel<T>(dst: *mut T, src: T) -> T;
pub fn atomic_min_acqrel<T>(dst: *mut T, src: T) -> T;
pub fn atomic_min_relaxed<T>(dst: *mut T, src: T) -> T;
pub fn atomic_umin<T>(dst: *mut T, src: T) -> T;
pub fn atomic_umin_acq<T>(dst: *mut T, src: T) -> T;
pub fn atomic_umin_rel<T>(dst: *mut T, src: T) -> T;
pub fn atomic_umin_acqrel<T>(dst: *mut T, src: T) -> T;
pub fn atomic_umin_relaxed<T>(dst: *mut T, src: T) -> T;
pub fn atomic_umax<T>(dst: *mut T, src: T) -> T;
pub fn atomic_umax_acq<T>(dst: *mut T, src: T) -> T;
pub fn atomic_umax_rel<T>(dst: *mut T, src: T) -> T;
pub fn atomic_umax_acqrel<T>(dst: *mut T, src: T) -> T;
pub fn atomic_umax_relaxed<T>(dst: *mut T, src: T) -> T;
}
extern "rust-intrinsic" {
pub fn atomic_fence();
pub fn atomic_fence_acq();
pub fn atomic_fence_rel();
pub fn atomic_fence_acqrel();
/// Abort the execution of the process.
pub fn abort() -> !;
/// Tell LLVM that this point in the code is not reachable,
/// enabling further optimizations.
///
/// NB: This is very different from the `unreachable!()` macro!
pub fn unreachable() -> !;
/// Inform the optimizer that a condition is always true.
/// If the condition is false, the behavior is undefined.
///
/// No code is generated for this intrinsic, but the optimizer will try
/// to preserve it (and its condition) between passes, which may interfere
/// with optimization of surrounding code and reduce performance. It should
/// not be used if the invariant can be discovered by the optimizer on its
/// own, or if it does not enable any significant optimizations.
pub fn assume(b: bool);
/// Execute a breakpoint trap, for inspection by a debugger.
pub fn breakpoint();
/// The size of a type in bytes.
///
/// This is the exact number of bytes in memory taken up by a
/// value of the given type. In other words, a memset of this size
/// would *exactly* overwrite a value. When laid out in vectors
/// and structures there may be additional padding between
/// elements.
pub fn size_of<T>() -> uint;
/// Move a value to an uninitialized memory location.
///
/// Drop glue is not run on the destination.
pub fn move_val_init<T>(dst: &mut T, src: T);
pub fn min_align_of<T>() -> uint;
pub fn pref_align_of<T>() -> uint;
/// Get a static pointer to a type descriptor.
#[cfg(not(stage0))]
pub fn get_tydesc<T: ?Sized>() -> *const TyDesc;
#[cfg(stage0)]
pub fn get_tydesc<T>() -> *const TyDesc;
/// Gets an identifier which is globally unique to the specified type. This
/// function will return the same value for a type regardless of whichever
/// crate it is invoked in.
#[cfg(not(stage0))]
pub fn type_id<T: ?Sized + 'static>() -> u64;
#[cfg(stage0)]
pub fn type_id<T: ?Sized + 'static>() -> TypeId;
/// Create a value initialized to zero.
///
/// `init` is unsafe because it returns a zeroed-out datum,
/// which is unsafe unless T is Copy.
pub fn init<T>() -> T;
/// Create an uninitialized value.
pub fn uninit<T>() -> T;
/// Move a value out of scope without running drop glue.
///
/// `forget` is unsafe because the caller is responsible for
/// ensuring the argument is deallocated already.
#[stable]
pub fn forget<T>(_: T) -> ();
/// Unsafely transforms a value of one type into a value of another type.
///
/// Both types must have the same size and alignment, and this guarantee
/// is enforced at compile-time.
///
/// # Examples
///
/// ```rust
/// use std::mem;
///
/// let v: &[u8] = unsafe { mem::transmute("L") };
/// assert!(v == [76u8]);
/// ```
#[stable]
pub fn transmute<T,U>(e: T) -> U;
/// Gives the address for the return value of the enclosing function.
///
/// Using this intrinsic in a function that does not use an out pointer
/// will trigger a compiler error.
pub fn return_address() -> *const u8;
/// Returns `true` if a type requires drop glue.
pub fn needs_drop<T>() -> bool;
/// Returns `true` if a type is managed (will be allocated on the local heap)
pub fn owns_managed<T>() -> bool;
/// Calculates the offset from a pointer. The offset *must* be in-bounds of
/// the object, or one-byte-past-the-end. An arithmetic overflow is also
/// undefined behaviour.
///
/// This is implemented as an intrinsic to avoid converting to and from an
/// integer, since the conversion would throw away aliasing information.
pub fn offset<T>(dst: *const T, offset: int) -> *const T;
/// Copies `count * size_of<T>` bytes from `src` to `dst`. The source
/// and destination may *not* overlap.
///
/// `copy_nonoverlapping_memory` is semantically equivalent to C's `memcpy`.
///
/// # Safety
///
/// Beyond requiring that both regions of memory be allocated, it is Undefined Behaviour
/// for source and destination to overlap. Care must also be taken with the ownership of
/// `src` and `dst`. This method semantically moves the values of `src` into `dst`.
/// However it does not drop the contents of `dst`, or prevent the contents of `src`
/// from being dropped or used.
///
/// # Examples
///
/// A safe swap function:
///
/// ```
/// use std::mem;
/// use std::ptr;
///
/// fn swap<T>(x: &mut T, y: &mut T) {
/// unsafe {
/// // Give ourselves some scratch space to work with
/// let mut t: T = mem::uninitialized();
///
/// // Perform the swap, `&mut` pointers never alias
/// ptr::copy_nonoverlapping_memory(&mut t, &*x, 1);
/// ptr::copy_nonoverlapping_memory(x, &*y, 1);
/// ptr::copy_nonoverlapping_memory(y, &t, 1);
///
/// // y and t now point to the same thing, but we need to completely forget `tmp`
/// // because it's no longer relevant.
/// mem::forget(t);
/// }
/// }
/// ```
#[unstable]
pub fn copy_nonoverlapping_memory<T>(dst: *mut T, src: *const T, count: uint);
/// Copies `count * size_of<T>` bytes from `src` to `dst`. The source
/// and destination may overlap.
///
/// `copy_memory` is semantically equivalent to C's `memmove`.
///
/// # Safety
///
/// Care must be taken with the ownership of `src` and `dst`.
/// This method semantically moves the values of `src` into `dst`.
/// However it does not drop the contents of `dst`, or prevent the contents of `src`
/// from being dropped or used.
///
/// # Examples
///
/// Efficiently create a Rust vector from an unsafe buffer:
///
/// ```
/// use std::ptr;
///
/// unsafe fn from_buf_raw<T>(ptr: *const T, elts: uint) -> Vec<T> {
/// let mut dst = Vec::with_capacity(elts);
/// dst.set_len(elts);
/// ptr::copy_memory(dst.as_mut_ptr(), ptr, elts);
/// dst
/// }
/// ```
///
#[unstable]
pub fn copy_memory<T>(dst: *mut T, src: *const T, count: uint);
/// Invokes memset on the specified pointer, setting `count * size_of::<T>()`
/// bytes of memory starting at `dst` to `c`.
#[unstable = "uncertain about naming and semantics"]
pub fn set_memory<T>(dst: *mut T, val: u8, count: uint);
/// Equivalent to the appropriate `llvm.memcpy.p0i8.0i8.*` intrinsic, with
/// a size of `count` * `size_of::<T>()` and an alignment of
/// `min_align_of::<T>()`
///
/// The volatile parameter parameter is set to `true`, so it will not be optimized out.
pub fn volatile_copy_nonoverlapping_memory<T>(dst: *mut T, src: *const T,
count: uint);
/// Equivalent to the appropriate `llvm.memmove.p0i8.0i8.*` intrinsic, with
/// a size of `count` * `size_of::<T>()` and an alignment of
/// `min_align_of::<T>()`
///
/// The volatile parameter parameter is set to `true`, so it will not be optimized out.
pub fn volatile_copy_memory<T>(dst: *mut T, src: *const T, count: uint);
/// Equivalent to the appropriate `llvm.memset.p0i8.*` intrinsic, with a
/// size of `count` * `size_of::<T>()` and an alignment of
/// `min_align_of::<T>()`.
///
/// The volatile parameter parameter is set to `true`, so it will not be optimized out.
pub fn volatile_set_memory<T>(dst: *mut T, val: u8, count: uint);
/// Perform a volatile load from the `src` pointer.
pub fn volatile_load<T>(src: *const T) -> T;
/// Perform a volatile store to the `dst` pointer.
pub fn volatile_store<T>(dst: *mut T, val: T);
/// Returns the square root of an `f32`
pub fn sqrtf32(x: f32) -> f32;
/// Returns the square root of an `f64`
pub fn sqrtf64(x: f64) -> f64;
/// Raises an `f32` to an integer power.
pub fn powif32(a: f32, x: i32) -> f32;
/// Raises an `f64` to an integer power.
pub fn powif64(a: f64, x: i32) -> f64;
/// Returns the sine of an `f32`.
pub fn sinf32(x: f32) -> f32;
/// Returns the sine of an `f64`.
pub fn sinf64(x: f64) -> f64;
/// Returns the cosine of an `f32`.
pub fn cosf32(x: f32) -> f32;
/// Returns the cosine of an `f64`.
pub fn cosf64(x: f64) -> f64;
/// Raises an `f32` to an `f32` power.
pub fn powf32(a: f32, x: f32) -> f32;
/// Raises an `f64` to an `f64` power.
pub fn powf64(a: f64, x: f64) -> f64;
/// Returns the exponential of an `f32`.
pub fn expf32(x: f32) -> f32;
/// Returns the exponential of an `f64`.
pub fn expf64(x: f64) -> f64;
/// Returns 2 raised to the power of an `f32`.
pub fn exp2f32(x: f32) -> f32;
/// Returns 2 raised to the power of an `f64`.
pub fn exp2f64(x: f64) -> f64;
/// Returns the natural logarithm of an `f32`.
pub fn logf32(x: f32) -> f32;
/// Returns the natural logarithm of an `f64`.
pub fn logf64(x: f64) -> f64;
/// Returns the base 10 logarithm of an `f32`.
pub fn log10f32(x: f32) -> f32;
/// Returns the base 10 logarithm of an `f64`.
pub fn log10f64(x: f64) -> f64;
/// Returns the base 2 logarithm of an `f32`.
pub fn log2f32(x: f32) -> f32;
/// Returns the base 2 logarithm of an `f64`.
pub fn log2f64(x: f64) -> f64;
/// Returns `a * b + c` for `f32` values.
pub fn fmaf32(a: f32, b: f32, c: f32) -> f32;
/// Returns `a * b + c` for `f64` values.
pub fn fmaf64(a: f64, b: f64, c: f64) -> f64;
/// Returns the absolute value of an `f32`.
pub fn fabsf32(x: f32) -> f32;
/// Returns the absolute value of an `f64`.
pub fn fabsf64(x: f64) -> f64;
/// Copies the sign from `y` to `x` for `f32` values.
pub fn copysignf32(x: f32, y: f32) -> f32;
/// Copies the sign from `y` to `x` for `f64` values.
pub fn copysignf64(x: f64, y: f64) -> f64;
/// Returns the largest integer less than or equal to an `f32`.
pub fn floorf32(x: f32) -> f32;
/// Returns the largest integer less than or equal to an `f64`.
pub fn floorf64(x: f64) -> f64;
/// Returns the smallest integer greater than or equal to an `f32`.
pub fn ceilf32(x: f32) -> f32;
/// Returns the smallest integer greater than or equal to an `f64`.
pub fn ceilf64(x: f64) -> f64;
/// Returns the integer part of an `f32`.
pub fn truncf32(x: f32) -> f32;
/// Returns the integer part of an `f64`.
pub fn truncf64(x: f64) -> f64;
/// Returns the nearest integer to an `f32`. May raise an inexact floating-point exception
/// if the argument is not an integer.
pub fn rintf32(x: f32) -> f32;
/// Returns the nearest integer to an `f64`. May raise an inexact floating-point exception
/// if the argument is not an integer.
pub fn rintf64(x: f64) -> f64;
/// Returns the nearest integer to an `f32`.
pub fn nearbyintf32(x: f32) -> f32;
/// Returns the nearest integer to an `f64`.
pub fn nearbyintf64(x: f64) -> f64;
/// Returns the nearest integer to an `f32`. Rounds half-way cases away from zero.
pub fn roundf32(x: f32) -> f32;
/// Returns the nearest integer to an `f64`. Rounds half-way cases away from zero.
pub fn roundf64(x: f64) -> f64;
/// Returns the number of bits set in a `u8`.
pub fn ctpop8(x: u8) -> u8;
/// Returns the number of bits set in a `u16`.
pub fn ctpop16(x: u16) -> u16;
/// Returns the number of bits set in a `u32`.
pub fn ctpop32(x: u32) -> u32;
/// Returns the number of bits set in a `u64`.
pub fn ctpop64(x: u64) -> u64;
/// Returns the number of leading bits unset in a `u8`.
pub fn ctlz8(x: u8) -> u8;
/// Returns the number of leading bits unset in a `u16`.
pub fn ctlz16(x: u16) -> u16;
/// Returns the number of leading bits unset in a `u32`.
pub fn ctlz32(x: u32) -> u32;
/// Returns the number of leading bits unset in a `u64`.
pub fn ctlz64(x: u64) -> u64;
/// Returns the number of trailing bits unset in a `u8`.
pub fn cttz8(x: u8) -> u8;
/// Returns the number of trailing bits unset in a `u16`.
pub fn cttz16(x: u16) -> u16;
/// Returns the number of trailing bits unset in a `u32`.
pub fn cttz32(x: u32) -> u32;
/// Returns the number of trailing bits unset in a `u64`.
pub fn cttz64(x: u64) -> u64;
/// Reverses the bytes in a `u16`.
pub fn bswap16(x: u16) -> u16;
/// Reverses the bytes in a `u32`.
pub fn bswap32(x: u32) -> u32;
/// Reverses the bytes in a `u64`.
pub fn bswap64(x: u64) -> u64;<|fim▁hole|>
/// Performs checked `i8` addition.
pub fn i8_add_with_overflow(x: i8, y: i8) -> (i8, bool);
/// Performs checked `i16` addition.
pub fn i16_add_with_overflow(x: i16, y: i16) -> (i16, bool);
/// Performs checked `i32` addition.
pub fn i32_add_with_overflow(x: i32, y: i32) -> (i32, bool);
/// Performs checked `i64` addition.
pub fn i64_add_with_overflow(x: i64, y: i64) -> (i64, bool);
/// Performs checked `u8` addition.
pub fn u8_add_with_overflow(x: u8, y: u8) -> (u8, bool);
/// Performs checked `u16` addition.
pub fn u16_add_with_overflow(x: u16, y: u16) -> (u16, bool);
/// Performs checked `u32` addition.
pub fn u32_add_with_overflow(x: u32, y: u32) -> (u32, bool);
/// Performs checked `u64` addition.
pub fn u64_add_with_overflow(x: u64, y: u64) -> (u64, bool);
/// Performs checked `i8` subtraction.
pub fn i8_sub_with_overflow(x: i8, y: i8) -> (i8, bool);
/// Performs checked `i16` subtraction.
pub fn i16_sub_with_overflow(x: i16, y: i16) -> (i16, bool);
/// Performs checked `i32` subtraction.
pub fn i32_sub_with_overflow(x: i32, y: i32) -> (i32, bool);
/// Performs checked `i64` subtraction.
pub fn i64_sub_with_overflow(x: i64, y: i64) -> (i64, bool);
/// Performs checked `u8` subtraction.
pub fn u8_sub_with_overflow(x: u8, y: u8) -> (u8, bool);
/// Performs checked `u16` subtraction.
pub fn u16_sub_with_overflow(x: u16, y: u16) -> (u16, bool);
/// Performs checked `u32` subtraction.
pub fn u32_sub_with_overflow(x: u32, y: u32) -> (u32, bool);
/// Performs checked `u64` subtraction.
pub fn u64_sub_with_overflow(x: u64, y: u64) -> (u64, bool);
/// Performs checked `i8` multiplication.
pub fn i8_mul_with_overflow(x: i8, y: i8) -> (i8, bool);
/// Performs checked `i16` multiplication.
pub fn i16_mul_with_overflow(x: i16, y: i16) -> (i16, bool);
/// Performs checked `i32` multiplication.
pub fn i32_mul_with_overflow(x: i32, y: i32) -> (i32, bool);
/// Performs checked `i64` multiplication.
pub fn i64_mul_with_overflow(x: i64, y: i64) -> (i64, bool);
/// Performs checked `u8` multiplication.
pub fn u8_mul_with_overflow(x: u8, y: u8) -> (u8, bool);
/// Performs checked `u16` multiplication.
pub fn u16_mul_with_overflow(x: u16, y: u16) -> (u16, bool);
/// Performs checked `u32` multiplication.
pub fn u32_mul_with_overflow(x: u32, y: u32) -> (u32, bool);
/// Performs checked `u64` multiplication.
pub fn u64_mul_with_overflow(x: u64, y: u64) -> (u64, bool);
}<|fim▁end|> | |
<|file_name|>lib.rs<|end_file_name|><|fim▁begin|>#[no_mangle]
pub extern "C" fn foo() {<|fim▁hole|><|fim▁end|> | println!("abc");
} |
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python<|fim▁hole|>
# Licensed under the MIT license:
# http://www.opensource.org/licenses/MIT-license
# Copyright (c) 2014 Bernardo Heynemann [email protected]
from fish_bundles.version import __version__<|fim▁end|> | # -*- coding: utf-8 -*-
# This file is part of fish-bundles.
# https://github.com/fish-bundles/fb |
<|file_name|>forceListMetadata.ts<|end_file_name|><|fim▁begin|>/*
* Copyright (c) 2019, salesforce.com, inc.
* All rights reserved.
* Licensed under the BSD 3-Clause license.
* For full license text, see LICENSE.txt file in the repo root or https://opensource.org/licenses/BSD-3-Clause
*/
import {
CliCommandExecution,
CliCommandExecutor,
Command,
CommandOutput,
SfdxCommandBuilder
} from '@salesforce/salesforcedx-utils-vscode/out/src/cli';
import * as fs from 'fs';
import { SfdxCommandletExecutor } from '../commands/util';
import { getRootWorkspacePath } from '../util';
export class ForceListMetadataExecutor extends SfdxCommandletExecutor<string> {
private metadataType: string;
private defaultUsernameOrAlias: string;
private folder?: string;
public constructor(
metadataType: string,
defaultUsernameOrAlias: string,
folder?: string
) {
super();
this.metadataType = metadataType;
this.defaultUsernameOrAlias = defaultUsernameOrAlias;
this.folder = folder;
}
public build(data: {}): Command {
const builder = new SfdxCommandBuilder()
.withArg('force:mdapi:listmetadata')
.withFlag('-m', this.metadataType)
.withFlag('-u', this.defaultUsernameOrAlias)
.withLogName('force_mdapi_listmetadata')
.withJson();
if (this.folder) {
builder.withFlag('--folder', this.folder);
}
return builder.build();<|fim▁hole|> }
public execute(): CliCommandExecution {
const startTime = process.hrtime();
const execution = new CliCommandExecutor(this.build({}), {
cwd: getRootWorkspacePath()
}).execute();
execution.processExitSubject.subscribe(() => {
this.logMetric(execution.command.logName, startTime);
});
return execution;
}
}
export async function forceListMetadata(
metadataType: string,
defaultUsernameOrAlias: string,
outputPath: string,
folder?: string
): Promise<string> {
const forceListMetadataExecutor = new ForceListMetadataExecutor(
metadataType,
defaultUsernameOrAlias,
folder
);
const execution = forceListMetadataExecutor.execute();
const cmdOutput = new CommandOutput();
const result = await cmdOutput.getCmdResult(execution);
fs.writeFileSync(outputPath, result);
return result;
}<|fim▁end|> | |
<|file_name|>raphael.js<|end_file_name|><|fim▁begin|>// ┌────────────────────────────────────────────────────────────────────┐ \\
// │ Raphaël 2.1.2 - JavaScript Vector Library │ \\
// ├────────────────────────────────────────────────────────────────────┤ \\
// │ Copyright © 2008-2012 Dmitry Baranovskiy (http://raphaeljs.com) │ \\
// │ Copyright © 2008-2012 Sencha Labs (http://sencha.com) │ \\
// ├────────────────────────────────────────────────────────────────────┤ \\
// │ Licensed under the MIT (http://raphaeljs.com/license.html) license.│ \\
// └────────────────────────────────────────────────────────────────────┘ \\
// Copyright (c) 2013 Adobe Systems Incorporated. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// ┌────────────────────────────────────────────────────────────┐ \\
// │ Eve 0.4.2 - JavaScript Events Library │ \\
// ├────────────────────────────────────────────────────────────┤ \\
// │ Author Dmitry Baranovskiy (http://dmitry.baranovskiy.com/) │ \\
// └────────────────────────────────────────────────────────────┘ \\
(function (glob) {
var version = "0.4.2",
has = "hasOwnProperty",
separator = /[\.\/]/,
wildcard = "*",
fun = function () {},
numsort = function (a, b) {
return a - b;
},
current_event,
stop,
events = {n: {}},
/*\
* eve
[ method ]
* Fires event with given `name`, given scope and other parameters.
> Arguments
- name (string) name of the *event*, dot (`.`) or slash (`/`) separated
- scope (object) context for the event handlers
- varargs (...) the rest of arguments will be sent to event handlers
= (object) array of returned values from the listeners
\*/
eve = function (name, scope) {
name = String(name);
var e = events,
oldstop = stop,
args = Array.prototype.slice.call(arguments, 2),
listeners = eve.listeners(name),
z = 0,
f = false,
l,
indexed = [],
queue = {},
out = [],
ce = current_event,
errors = [];
current_event = name;
stop = 0;
for (var i = 0, ii = listeners.length; i < ii; i++) if ("zIndex" in listeners[i]) {
indexed.push(listeners[i].zIndex);
if (listeners[i].zIndex < 0) {
queue[listeners[i].zIndex] = listeners[i];
}
}
indexed.sort(numsort);
while (indexed[z] < 0) {
l = queue[indexed[z++]];
out.push(l.apply(scope, args));
if (stop) {
stop = oldstop;
return out;
}
}
for (i = 0; i < ii; i++) {
l = listeners[i];
if ("zIndex" in l) {
if (l.zIndex == indexed[z]) {
out.push(l.apply(scope, args));
if (stop) {
break;
}
do {
z++;
l = queue[indexed[z]];
l && out.push(l.apply(scope, args));
if (stop) {
break;
}
} while (l)
} else {
queue[l.zIndex] = l;
}
} else {
out.push(l.apply(scope, args));
if (stop) {
break;
}
}
}
stop = oldstop;
current_event = ce;
return out.length ? out : null;
};
// Undocumented. Debug only.
eve._events = events;
/*\
* eve.listeners
[ method ]
* Internal method which gives you array of all event handlers that will be triggered by the given `name`.
> Arguments
- name (string) name of the event, dot (`.`) or slash (`/`) separated
= (array) array of event handlers
\*/
eve.listeners = function (name) {
var names = name.split(separator),
e = events,
item,
items,
k,
i,
ii,
j,
jj,
nes,
es = [e],
out = [];
for (i = 0, ii = names.length; i < ii; i++) {
nes = [];
for (j = 0, jj = es.length; j < jj; j++) {
e = es[j].n;
items = [e[names[i]], e[wildcard]];
k = 2;
while (k--) {
item = items[k];
if (item) {
nes.push(item);
out = out.concat(item.f || []);
}
}
}
es = nes;
}
return out;
};
/*\
* eve.on
[ method ]
**
* Binds given event handler with a given name. You can use wildcards “`*`” for the names:
| eve.on("*.under.*", f);
| eve("mouse.under.floor"); // triggers f
* Use @eve to trigger the listener.
**
> Arguments
**
- name (string) name of the event, dot (`.`) or slash (`/`) separated, with optional wildcards
- f (function) event handler function
**
= (function) returned function accepts a single numeric parameter that represents z-index of the handler. It is an optional feature and only used when you need to ensure that some subset of handlers will be invoked in a given order, despite of the order of assignment.
> Example:
| eve.on("mouse", eatIt)(2);
| eve.on("mouse", scream);
| eve.on("mouse", catchIt)(1);
* This will ensure that `catchIt()` function will be called before `eatIt()`.
*
* If you want to put your handler before non-indexed handlers, specify a negative value.
* Note: I assume most of the time you don’t need to worry about z-index, but it’s nice to have this feature “just in case”.
\*/
eve.on = function (name, f) {
name = String(name);
if (typeof f != "function") {
return function () {};
}
var names = name.split(separator),
e = events;
for (var i = 0, ii = names.length; i < ii; i++) {
e = e.n;
e = e.hasOwnProperty(names[i]) && e[names[i]] || (e[names[i]] = {n: {}});
}
e.f = e.f || [];
for (i = 0, ii = e.f.length; i < ii; i++) if (e.f[i] == f) {
return fun;
}
e.f.push(f);
return function (zIndex) {
if (+zIndex == +zIndex) {
f.zIndex = +zIndex;
}
};
};
/*\
* eve.f
[ method ]
**
* Returns function that will fire given event with optional arguments.
* Arguments that will be passed to the result function will be also
* concated to the list of final arguments.
| el.onclick = eve.f("click", 1, 2);
| eve.on("click", function (a, b, c) {
| console.log(a, b, c); // 1, 2, [event object]
| });
> Arguments
- event (string) event name
- varargs (…) and any other arguments
= (function) possible event handler function
\*/
eve.f = function (event) {
var attrs = [].slice.call(arguments, 1);
return function () {
eve.apply(null, [event, null].concat(attrs).concat([].slice.call(arguments, 0)));
};
};
/*\
* eve.stop
[ method ]
**
* Is used inside an event handler to stop the event, preventing any subsequent listeners from firing.
\*/
eve.stop = function () {
stop = 1;
};
/*\
* eve.nt
[ method ]
**
* Could be used inside event handler to figure out actual name of the event.
**
> Arguments
**
- subname (string) #optional subname of the event
**
= (string) name of the event, if `subname` is not specified
* or
= (boolean) `true`, if current event’s name contains `subname`
\*/
eve.nt = function (subname) {
if (subname) {
return new RegExp("(?:\\.|\\/|^)" + subname + "(?:\\.|\\/|$)").test(current_event);
}
return current_event;
};
/*\
* eve.nts
[ method ]
**
* Could be used inside event handler to figure out actual name of the event.
**
**
= (array) names of the event
\*/
eve.nts = function () {
return current_event.split(separator);
};
/*\
* eve.off
[ method ]
**
* Removes given function from the list of event listeners assigned to given name.
* If no arguments specified all the events will be cleared.
**
> Arguments
**
- name (string) name of the event, dot (`.`) or slash (`/`) separated, with optional wildcards
- f (function) event handler function
\*/
/*\
* eve.unbind
[ method ]
**
* See @eve.off
\*/
eve.off = eve.unbind = function (name, f) {
if (!name) {
eve._events = events = {n: {}};
return;
}
var names = name.split(separator),
e,
key,
splice,
i, ii, j, jj,
cur = [events];
for (i = 0, ii = names.length; i < ii; i++) {
for (j = 0; j < cur.length; j += splice.length - 2) {
splice = [j, 1];
e = cur[j].n;
if (names[i] != wildcard) {
if (e[names[i]]) {
splice.push(e[names[i]]);
}
} else {
for (key in e) if (e[has](key)) {
splice.push(e[key]);
}
}
cur.splice.apply(cur, splice);
}
}
for (i = 0, ii = cur.length; i < ii; i++) {
e = cur[i];
while (e.n) {
if (f) {
if (e.f) {
for (j = 0, jj = e.f.length; j < jj; j++) if (e.f[j] == f) {
e.f.splice(j, 1);
break;
}
!e.f.length && delete e.f;
}
for (key in e.n) if (e.n[has](key) && e.n[key].f) {
var funcs = e.n[key].f;
for (j = 0, jj = funcs.length; j < jj; j++) if (funcs[j] == f) {
funcs.splice(j, 1);
break;
}
!funcs.length && delete e.n[key].f;
}
} else {
delete e.f;
for (key in e.n) if (e.n[has](key) && e.n[key].f) {
delete e.n[key].f;
}
}
e = e.n;
}
}
};
/*\
* eve.once
[ method ]
**
* Binds given event handler with a given name to only run once then unbind itself.
| eve.once("login", f);
| eve("login"); // triggers f
| eve("login"); // no listeners
* Use @eve to trigger the listener.
**
> Arguments
**
- name (string) name of the event, dot (`.`) or slash (`/`) separated, with optional wildcards
- f (function) event handler function
**
= (function) same return function as @eve.on
\*/
eve.once = function (name, f) {
var f2 = function () {
eve.unbind(name, f2);
return f.apply(this, arguments);
};
return eve.on(name, f2);
};
/*\
* eve.version
[ property (string) ]
**
* Current version of the library.
\*/
eve.version = version;
eve.toString = function () {
return "You are running Eve " + version;
};
(typeof module != "undefined" && module.exports) ? (module.exports = eve) : (typeof define != "undefined" ? (define("eve", [], function() { return eve; })) : (glob.eve = eve));
})(window || this);
// ┌─────────────────────────────────────────────────────────────────────┐ \\
// │ "Raphaël 2.1.2" - JavaScript Vector Library │ \\
// ├─────────────────────────────────────────────────────────────────────┤ \\
// │ Copyright (c) 2008-2011 Dmitry Baranovskiy (http://raphaeljs.com) │ \\
// │ Copyright (c) 2008-2011 Sencha Labs (http://sencha.com) │ \\
// │ Licensed under the MIT (http://raphaeljs.com/license.html) license. │ \\
// └─────────────────────────────────────────────────────────────────────┘ \\
(function (glob, factory) {
// AMD support
if (typeof define === "function" && define.amd) {
// Define as an anonymous module
define(["eve"], function( eve ) {
return factory(glob, eve);
});
} else {
// Browser globals (glob is window)
// Raphael adds itself to window
factory(glob, glob.eve);
}
}(this, function (window, eve) {
/*\
* Raphael
[ method ]
**
* Creates a canvas object on which to draw.
* You must do this first, as all future calls to drawing methods
* from this instance will be bound to this canvas.
> Parameters
**
- container (HTMLElement|string) DOM element or its ID which is going to be a parent for drawing surface
- width (number)
- height (number)
- callback (function) #optional callback function which is going to be executed in the context of newly created paper
* or
- x (number)
- y (number)
- width (number)
- height (number)
- callback (function) #optional callback function which is going to be executed in the context of newly created paper
* or
- all (array) (first 3 or 4 elements in the array are equal to [containerID, width, height] or [x, y, width, height]. The rest are element descriptions in format {type: type, <attributes>}). See @Paper.add.
- callback (function) #optional callback function which is going to be executed in the context of newly created paper
* or
- onReadyCallback (function) function that is going to be called on DOM ready event. You can also subscribe to this event via Eve’s “DOMLoad” event. In this case method returns `undefined`.
= (object) @Paper
> Usage
| // Each of the following examples create a canvas
| // that is 320px wide by 200px high.
| // Canvas is created at the viewport’s 10,50 coordinate.
| var paper = Raphael(10, 50, 320, 200);
| // Canvas is created at the top left corner of the #notepad element
| // (or its top right corner in dir="rtl" elements)
| var paper = Raphael(document.getElementById("notepad"), 320, 200);
| // Same as above
| var paper = Raphael("notepad", 320, 200);
| // Image dump
| var set = Raphael(["notepad", 320, 200, {
| type: "rect",
| x: 10,
| y: 10,
| width: 25,
| height: 25,
| stroke: "#f00"
| }, {
| type: "text",
| x: 30,
| y: 40,
| text: "Dump"
| }]);
\*/
function R(first) {
if (R.is(first, "function")) {
return loaded ? first() : eve.on("raphael.DOMload", first);
} else if (R.is(first, array)) {
return R._engine.create[apply](R, first.splice(0, 3 + R.is(first[0], nu))).add(first);
} else {
var args = Array.prototype.slice.call(arguments, 0);
if (R.is(args[args.length - 1], "function")) {
var f = args.pop();
return loaded ? f.call(R._engine.create[apply](R, args)) : eve.on("raphael.DOMload", function () {
f.call(R._engine.create[apply](R, args));
});
} else {
return R._engine.create[apply](R, arguments);
}
}
}
R.version = "2.1.2";
R.eve = eve;
var loaded,
separator = /[, ]+/,
elements = {circle: 1, rect: 1, path: 1, ellipse: 1, text: 1, image: 1},
formatrg = /\{(\d+)\}/g,
proto = "prototype",
has = "hasOwnProperty",
g = {
doc: document,
win: window
},
oldRaphael = {
was: Object.prototype[has].call(g.win, "Raphael"),
is: g.win.Raphael
},
Paper = function () {
/*\
* Paper.ca
[ property (object) ]
**
* Shortcut for @Paper.customAttributes
\*/
/*\
* Paper.customAttributes
[ property (object) ]
**
* If you have a set of attributes that you would like to represent
* as a function of some number you can do it easily with custom attributes:
> Usage
| paper.customAttributes.hue = function (num) {
| num = num % 1;
| return {fill: "hsb(" + num + ", 0.75, 1)"};
| };
| // Custom attribute “hue” will change fill
| // to be given hue with fixed saturation and brightness.
| // Now you can use it like this:
| var c = paper.circle(10, 10, 10).attr({hue: .45});
| // or even like this:
| c.animate({hue: 1}, 1e3);
|
| // You could also create custom attribute
| // with multiple parameters:
| paper.customAttributes.hsb = function (h, s, b) {
| return {fill: "hsb(" + [h, s, b].join(",") + ")"};
| };
| c.attr({hsb: "0.5 .8 1"});
| c.animate({hsb: [1, 0, 0.5]}, 1e3);
\*/
this.ca = this.customAttributes = {};
},
paperproto,
appendChild = "appendChild",
apply = "apply",
concat = "concat",
supportsTouch = ('ontouchstart' in g.win) || g.win.DocumentTouch && g.doc instanceof DocumentTouch, //taken from Modernizr touch test
E = "",
S = " ",
Str = String,
split = "split",
events = "click dblclick mousedown mousemove mouseout mouseover mouseup touchstart touchmove touchend touchcancel"[split](S),
touchMap = {
mousedown: "touchstart",
mousemove: "touchmove",
mouseup: "touchend"
},
lowerCase = Str.prototype.toLowerCase,
math = Math,
mmax = math.max,
mmin = math.min,
abs = math.abs,
pow = math.pow,
PI = math.PI,
nu = "number",
string = "string",
array = "array",
toString = "toString",
fillString = "fill",
objectToString = Object.prototype.toString,
paper = {},
push = "push",
ISURL = R._ISURL = /^url\(['"]?([^\)]+?)['"]?\)$/i,
colourRegExp = /^\s*((#[a-f\d]{6})|(#[a-f\d]{3})|rgba?\(\s*([\d\.]+%?\s*,\s*[\d\.]+%?\s*,\s*[\d\.]+%?(?:\s*,\s*[\d\.]+%?)?)\s*\)|hsba?\(\s*([\d\.]+(?:deg|\xb0|%)?\s*,\s*[\d\.]+%?\s*,\s*[\d\.]+(?:%?\s*,\s*[\d\.]+)?)%?\s*\)|hsla?\(\s*([\d\.]+(?:deg|\xb0|%)?\s*,\s*[\d\.]+%?\s*,\s*[\d\.]+(?:%?\s*,\s*[\d\.]+)?)%?\s*\))\s*$/i,
isnan = {"NaN": 1, "Infinity": 1, "-Infinity": 1},
bezierrg = /^(?:cubic-)?bezier\(([^,]+),([^,]+),([^,]+),([^\)]+)\)/,
round = math.round,
setAttribute = "setAttribute",
toFloat = parseFloat,
toInt = parseInt,
upperCase = Str.prototype.toUpperCase,
availableAttrs = R._availableAttrs = {
"arrow-end": "none",
"arrow-start": "none",
blur: 0,
"clip-rect": "0 0 1e9 1e9",
cursor: "default",
cx: 0,
cy: 0,
fill: "#fff",
"fill-opacity": 1,
font: '10px "Arial"',
"font-family": '"Arial"',
"font-size": "10",
"font-style": "normal",
"font-weight": 400,
gradient: 0,
height: 0,
href: "http://raphaeljs.com/",
"letter-spacing": 0,
opacity: 1,
path: "M0,0",
r: 0,
rx: 0,
ry: 0,
src: "",
stroke: "#000",
"stroke-dasharray": "",
"stroke-linecap": "butt",
"stroke-linejoin": "butt",
"stroke-miterlimit": 0,
"stroke-opacity": 1,
"stroke-width": 1,
target: "_blank",
"text-anchor": "middle",
title: "Raphael",
transform: "",
width: 0,
x: 0,
y: 0
},
availableAnimAttrs = R._availableAnimAttrs = {
blur: nu,
"clip-rect": "csv",
cx: nu,
cy: nu,
fill: "colour",
"fill-opacity": nu,
"font-size": nu,
height: nu,
opacity: nu,
path: "path",
r: nu,
rx: nu,
ry: nu,
stroke: "colour",
"stroke-opacity": nu,
"stroke-width": nu,
transform: "transform",
width: nu,
x: nu,
y: nu
},
whitespace = /[\x09\x0a\x0b\x0c\x0d\x20\xa0\u1680\u180e\u2000\u2001\u2002\u2003\u2004\u2005\u2006\u2007\u2008\u2009\u200a\u202f\u205f\u3000\u2028\u2029]/g,
commaSpaces = /[\x09\x0a\x0b\x0c\x0d\x20\xa0\u1680\u180e\u2000\u2001\u2002\u2003\u2004\u2005\u2006\u2007\u2008\u2009\u200a\u202f\u205f\u3000\u2028\u2029]*,[\x09\x0a\x0b\x0c\x0d\x20\xa0\u1680\u180e\u2000\u2001\u2002\u2003\u2004\u2005\u2006\u2007\u2008\u2009\u200a\u202f\u205f\u3000\u2028\u2029]*/,
hsrg = {hs: 1, rg: 1},
p2s = /,?([achlmqrstvxz]),?/gi,
pathCommand = /([achlmrqstvz])[\x09\x0a\x0b\x0c\x0d\x20\xa0\u1680\u180e\u2000\u2001\u2002\u2003\u2004\u2005\u2006\u2007\u2008\u2009\u200a\u202f\u205f\u3000\u2028\u2029,]*((-?\d*\.?\d*(?:e[\-+]?\d+)?[\x09\x0a\x0b\x0c\x0d\x20\xa0\u1680\u180e\u2000\u2001\u2002\u2003\u2004\u2005\u2006\u2007\u2008\u2009\u200a\u202f\u205f\u3000\u2028\u2029]*,?[\x09\x0a\x0b\x0c\x0d\x20\xa0\u1680\u180e\u2000\u2001\u2002\u2003\u2004\u2005\u2006\u2007\u2008\u2009\u200a\u202f\u205f\u3000\u2028\u2029]*)+)/ig,
tCommand = /([rstm])[\x09\x0a\x0b\x0c\x0d\x20\xa0\u1680\u180e\u2000\u2001\u2002\u2003\u2004\u2005\u2006\u2007\u2008\u2009\u200a\u202f\u205f\u3000\u2028\u2029,]*((-?\d*\.?\d*(?:e[\-+]?\d+)?[\x09\x0a\x0b\x0c\x0d\x20\xa0\u1680\u180e\u2000\u2001\u2002\u2003\u2004\u2005\u2006\u2007\u2008\u2009\u200a\u202f\u205f\u3000\u2028\u2029]*,?[\x09\x0a\x0b\x0c\x0d\x20\xa0\u1680\u180e\u2000\u2001\u2002\u2003\u2004\u2005\u2006\u2007\u2008\u2009\u200a\u202f\u205f\u3000\u2028\u2029]*)+)/ig,
pathValues = /(-?\d*\.?\d*(?:e[\-+]?\d+)?)[\x09\x0a\x0b\x0c\x0d\x20\xa0\u1680\u180e\u2000\u2001\u2002\u2003\u2004\u2005\u2006\u2007\u2008\u2009\u200a\u202f\u205f\u3000\u2028\u2029]*,?[\x09\x0a\x0b\x0c\x0d\x20\xa0\u1680\u180e\u2000\u2001\u2002\u2003\u2004\u2005\u2006\u2007\u2008\u2009\u200a\u202f\u205f\u3000\u2028\u2029]*/ig,
radial_gradient = R._radial_gradient = /^r(?:\(([^,]+?)[\x09\x0a\x0b\x0c\x0d\x20\xa0\u1680\u180e\u2000\u2001\u2002\u2003\u2004\u2005\u2006\u2007\u2008\u2009\u200a\u202f\u205f\u3000\u2028\u2029]*,[\x09\x0a\x0b\x0c\x0d\x20\xa0\u1680\u180e\u2000\u2001\u2002\u2003\u2004\u2005\u2006\u2007\u2008\u2009\u200a\u202f\u205f\u3000\u2028\u2029]*([^\)]+?)\))?/,
eldata = {},
sortByKey = function (a, b) {
return a.key - b.key;
},
sortByNumber = function (a, b) {
return toFloat(a) - toFloat(b);
},
fun = function () {},
pipe = function (x) {
return x;
},
rectPath = R._rectPath = function (x, y, w, h, r) {
if (r) {
return [["M", x + r, y], ["l", w - r * 2, 0], ["a", r, r, 0, 0, 1, r, r], ["l", 0, h - r * 2], ["a", r, r, 0, 0, 1, -r, r], ["l", r * 2 - w, 0], ["a", r, r, 0, 0, 1, -r, -r], ["l", 0, r * 2 - h], ["a", r, r, 0, 0, 1, r, -r], ["z"]];
}
return [["M", x, y], ["l", w, 0], ["l", 0, h], ["l", -w, 0], ["z"]];
},
ellipsePath = function (x, y, rx, ry) {
if (ry == null) {
ry = rx;
}
return [["M", x, y], ["m", 0, -ry], ["a", rx, ry, 0, 1, 1, 0, 2 * ry], ["a", rx, ry, 0, 1, 1, 0, -2 * ry], ["z"]];
},
getPath = R._getPath = {
path: function (el) {
return el.attr("path");
},
circle: function (el) {
var a = el.attrs;
return ellipsePath(a.cx, a.cy, a.r);
},
ellipse: function (el) {
var a = el.attrs;
return ellipsePath(a.cx, a.cy, a.rx, a.ry);
},
rect: function (el) {
var a = el.attrs;
return rectPath(a.x, a.y, a.width, a.height, a.r);
},
image: function (el) {
var a = el.attrs;
return rectPath(a.x, a.y, a.width, a.height);
},
text: function (el) {
var bbox = el._getBBox();
return rectPath(bbox.x, bbox.y, bbox.width, bbox.height);
},
set : function(el) {
var bbox = el._getBBox();
return rectPath(bbox.x, bbox.y, bbox.width, bbox.height);
}
},
/*\
* Raphael.mapPath
[ method ]
**
* Transform the path string with given matrix.
> Parameters
- path (string) path string
- matrix (object) see @Matrix
= (string) transformed path string
\*/
mapPath = R.mapPath = function (path, matrix) {
if (!matrix) {
return path;
}
var x, y, i, j, ii, jj, pathi;
path = path2curve(path);
for (i = 0, ii = path.length; i < ii; i++) {
pathi = path[i];
for (j = 1, jj = pathi.length; j < jj; j += 2) {
x = matrix.x(pathi[j], pathi[j + 1]);
y = matrix.y(pathi[j], pathi[j + 1]);
pathi[j] = x;
pathi[j + 1] = y;
}
}
return path;
};
R._g = g;
/*\
* Raphael.type
[ property (string) ]
**
* Can be “SVG”, “VML” or empty, depending on browser support.
\*/
R.type = (g.win.SVGAngle || g.doc.implementation.hasFeature("http://www.w3.org/TR/SVG11/feature#BasicStructure", "1.1") ? "SVG" : "VML");
if (R.type == "VML") {
var d = g.doc.createElement("div"),
b;
d.innerHTML = '<v:shape adj="1"/>';
b = d.firstChild;
b.style.behavior = "url(#default#VML)";
if (!(b && typeof b.adj == "object")) {
return (R.type = E);
}
d = null;
}
/*\
* Raphael.svg
[ property (boolean) ]
**
* `true` if browser supports SVG.
\*/
/*\
* Raphael.vml
[ property (boolean) ]
**
* `true` if browser supports VML.
\*/
R.svg = !(R.vml = R.type == "VML");
R._Paper = Paper;
/*\
* Raphael.fn
[ property (object) ]
**
* You can add your own method to the canvas. For example if you want to draw a pie chart,
* you can create your own pie chart function and ship it as a Raphaël plugin. To do this
* you need to extend the `Raphael.fn` object. You should modify the `fn` object before a
* Raphaël instance is created, otherwise it will take no effect. Please note that the
* ability for namespaced plugins was removed in Raphael 2.0. It is up to the plugin to
* ensure any namespacing ensures proper context.
> Usage
| Raphael.fn.arrow = function (x1, y1, x2, y2, size) {
| return this.path( ... );
| };
| // or create namespace
| Raphael.fn.mystuff = {
| arrow: function () {…},
| star: function () {…},
| // etc…
| };
| var paper = Raphael(10, 10, 630, 480);
| // then use it
| paper.arrow(10, 10, 30, 30, 5).attr({fill: "#f00"});
| paper.mystuff.arrow();
| paper.mystuff.star();
\*/
R.fn = paperproto = Paper.prototype = R.prototype;
R._id = 0;
R._oid = 0;
/*\
* Raphael.is
[ method ]
**
* Handfull replacement for `typeof` operator.
> Parameters
- o (…) any object or primitive
- type (string) name of the type, i.e. “string”, “function”, “number”, etc.
= (boolean) is given value is of given type
\*/
R.is = function (o, type) {
type = lowerCase.call(type);
if (type == "finite") {
return !isnan[has](+o);
}
if (type == "array") {
return o instanceof Array;
}
return (type == "null" && o === null) ||
(type == typeof o && o !== null) ||
(type == "object" && o === Object(o)) ||
(type == "array" && Array.isArray && Array.isArray(o)) ||
objectToString.call(o).slice(8, -1).toLowerCase() == type;
};
function clone(obj) {
if (typeof obj == "function" || Object(obj) !== obj) {
return obj;
}
var res = new obj.constructor;
for (var key in obj) if (obj[has](key)) {
res[key] = clone(obj[key]);
}
return res;
}
/*\
* Raphael.angle
[ method ]
**
* Returns angle between two or three points
> Parameters
- x1 (number) x coord of first point
- y1 (number) y coord of first point
- x2 (number) x coord of second point
- y2 (number) y coord of second point
- x3 (number) #optional x coord of third point
- y3 (number) #optional y coord of third point
= (number) angle in degrees.
\*/
R.angle = function (x1, y1, x2, y2, x3, y3) {
if (x3 == null) {
var x = x1 - x2,
y = y1 - y2;
if (!x && !y) {
return 0;
}
return (180 + math.atan2(-y, -x) * 180 / PI + 360) % 360;
} else {
return R.angle(x1, y1, x3, y3) - R.angle(x2, y2, x3, y3);
}
};
/*\
* Raphael.rad
[ method ]
**
* Transform angle to radians
> Parameters
- deg (number) angle in degrees
= (number) angle in radians.
\*/
R.rad = function (deg) {
return deg % 360 * PI / 180;
};
/*\
* Raphael.deg
[ method ]
**
* Transform angle to degrees
> Parameters
- deg (number) angle in radians
= (number) angle in degrees.
\*/
R.deg = function (rad) {
return rad * 180 / PI % 360;
};
/*\
* Raphael.snapTo
[ method ]
**
* Snaps given value to given grid.
> Parameters
- values (array|number) given array of values or step of the grid
- value (number) value to adjust
- tolerance (number) #optional tolerance for snapping. Default is `10`.
= (number) adjusted value.
\*/
R.snapTo = function (values, value, tolerance) {
tolerance = R.is(tolerance, "finite") ? tolerance : 10;
if (R.is(values, array)) {
var i = values.length;
while (i--) if (abs(values[i] - value) <= tolerance) {
return values[i];
}
} else {
values = +values;
var rem = value % values;
if (rem < tolerance) {
return value - rem;
}
if (rem > values - tolerance) {
return value - rem + values;
}
}
return value;
};
/*\
* Raphael.createUUID
[ method ]
**
* Returns RFC4122, version 4 ID
\*/
var createUUID = R.createUUID = (function (uuidRegEx, uuidReplacer) {
return function () {
return "xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx".replace(uuidRegEx, uuidReplacer).toUpperCase();
};
})(/[xy]/g, function (c) {
var r = math.random() * 16 | 0,
v = c == "x" ? r : (r & 3 | 8);
return v.toString(16);
});
/*\
* Raphael.setWindow
[ method ]
**
* Used when you need to draw in `<iframe>`. Switched window to the iframe one.
> Parameters
- newwin (window) new window object
\*/
R.setWindow = function (newwin) {
eve("raphael.setWindow", R, g.win, newwin);
g.win = newwin;
g.doc = g.win.document;
if (R._engine.initWin) {
R._engine.initWin(g.win);
}
};
var toHex = function (color) {
if (R.vml) {
// http://dean.edwards.name/weblog/2009/10/convert-any-colour-value-to-hex-in-msie/
var trim = /^\s+|\s+$/g;
var bod;
try {
var docum = new ActiveXObject("htmlfile");
docum.write("<body>");
docum.close();
bod = docum.body;
} catch(e) {
bod = createPopup().document.body;
}
var range = bod.createTextRange();
toHex = cacher(function (color) {
try {
bod.style.color = Str(color).replace(trim, E);
var value = range.queryCommandValue("ForeColor");
value = ((value & 255) << 16) | (value & 65280) | ((value & 16711680) >>> 16);
return "#" + ("000000" + value.toString(16)).slice(-6);
} catch(e) {
return "none";
}
});
} else {
var i = g.doc.createElement("i");
i.title = "Rapha\xebl Colour Picker";
i.style.display = "none";
g.doc.body.appendChild(i);
toHex = cacher(function (color) {
i.style.color = color;
return g.doc.defaultView.getComputedStyle(i, E).getPropertyValue("color");
});
}
return toHex(color);
},
hsbtoString = function () {
return "hsb(" + [this.h, this.s, this.b] + ")";
},
hsltoString = function () {
return "hsl(" + [this.h, this.s, this.l] + ")";
},
rgbtoString = function () {
return this.hex;
},
prepareRGB = function (r, g, b) {
if (g == null && R.is(r, "object") && "r" in r && "g" in r && "b" in r) {
b = r.b;
g = r.g;
r = r.r;
}
if (g == null && R.is(r, string)) {
var clr = R.getRGB(r);
r = clr.r;
g = clr.g;
b = clr.b;
}
if (r > 1 || g > 1 || b > 1) {
r /= 255;
g /= 255;
b /= 255;
}
return [r, g, b];
},
packageRGB = function (r, g, b, o) {
r *= 255;
g *= 255;
b *= 255;
var rgb = {
r: r,
g: g,
b: b,
hex: R.rgb(r, g, b),
toString: rgbtoString
};
R.is(o, "finite") && (rgb.opacity = o);
return rgb;
};
/*\
* Raphael.color
[ method ]
**
* Parses the color string and returns object with all values for the given color.
> Parameters
- clr (string) color string in one of the supported formats (see @Raphael.getRGB)
= (object) Combined RGB & HSB object in format:
o {
o r (number) red,
o g (number) green,
o b (number) blue,
o hex (string) color in HTML/CSS format: #••••••,
o error (boolean) `true` if string can’t be parsed,
o h (number) hue,
o s (number) saturation,
o v (number) value (brightness),
o l (number) lightness
o }
\*/
R.color = function (clr) {
var rgb;
if (R.is(clr, "object") && "h" in clr && "s" in clr && "b" in clr) {
rgb = R.hsb2rgb(clr);
clr.r = rgb.r;
clr.g = rgb.g;
clr.b = rgb.b;
clr.hex = rgb.hex;
} else if (R.is(clr, "object") && "h" in clr && "s" in clr && "l" in clr) {
rgb = R.hsl2rgb(clr);
clr.r = rgb.r;
clr.g = rgb.g;
clr.b = rgb.b;
clr.hex = rgb.hex;
} else {
if (R.is(clr, "string")) {
clr = R.getRGB(clr);
}
if (R.is(clr, "object") && "r" in clr && "g" in clr && "b" in clr) {
rgb = R.rgb2hsl(clr);
clr.h = rgb.h;
clr.s = rgb.s;
clr.l = rgb.l;
rgb = R.rgb2hsb(clr);
clr.v = rgb.b;
} else {
clr = {hex: "none"};
clr.r = clr.g = clr.b = clr.h = clr.s = clr.v = clr.l = -1;
}
}
clr.toString = rgbtoString;
return clr;
};
/*\
* Raphael.hsb2rgb
[ method ]
**
* Converts HSB values to RGB object.
> Parameters
- h (number) hue
- s (number) saturation
- v (number) value or brightness
= (object) RGB object in format:
o {
o r (number) red,
o g (number) green,
o b (number) blue,
o hex (string) color in HTML/CSS format: #••••••
o }
\*/
R.hsb2rgb = function (h, s, v, o) {
if (this.is(h, "object") && "h" in h && "s" in h && "b" in h) {
v = h.b;
s = h.s;
h = h.h;
o = h.o;
}
h *= 360;
var R, G, B, X, C;
h = (h % 360) / 60;
C = v * s;
X = C * (1 - abs(h % 2 - 1));
R = G = B = v - C;
h = ~~h;
R += [C, X, 0, 0, X, C][h];
G += [X, C, C, X, 0, 0][h];
B += [0, 0, X, C, C, X][h];
return packageRGB(R, G, B, o);
};
/*\
* Raphael.hsl2rgb
[ method ]
**
* Converts HSL values to RGB object.
> Parameters
- h (number) hue
- s (number) saturation
- l (number) luminosity
= (object) RGB object in format:
o {
o r (number) red,
o g (number) green,
o b (number) blue,
o hex (string) color in HTML/CSS format: #••••••
o }
\*/
R.hsl2rgb = function (h, s, l, o) {
if (this.is(h, "object") && "h" in h && "s" in h && "l" in h) {
l = h.l;
s = h.s;
h = h.h;
}
if (h > 1 || s > 1 || l > 1) {
h /= 360;
s /= 100;
l /= 100;
}
h *= 360;
var R, G, B, X, C;
h = (h % 360) / 60;
C = 2 * s * (l < .5 ? l : 1 - l);
X = C * (1 - abs(h % 2 - 1));
R = G = B = l - C / 2;
h = ~~h;
R += [C, X, 0, 0, X, C][h];
G += [X, C, C, X, 0, 0][h];
B += [0, 0, X, C, C, X][h];
return packageRGB(R, G, B, o);
};
/*\
* Raphael.rgb2hsb
[ method ]
**
* Converts RGB values to HSB object.
> Parameters
- r (number) red
- g (number) green
- b (number) blue
= (object) HSB object in format:
o {
o h (number) hue
o s (number) saturation
o b (number) brightness
o }
\*/
R.rgb2hsb = function (r, g, b) {
b = prepareRGB(r, g, b);
r = b[0];
g = b[1];
b = b[2];
var H, S, V, C;
V = mmax(r, g, b);
C = V - mmin(r, g, b);
H = (C == 0 ? null :
V == r ? (g - b) / C :
V == g ? (b - r) / C + 2 :
(r - g) / C + 4
);
H = ((H + 360) % 6) * 60 / 360;
S = C == 0 ? 0 : C / V;
return {h: H, s: S, b: V, toString: hsbtoString};
};
/*\
* Raphael.rgb2hsl
[ method ]
**
* Converts RGB values to HSL object.
> Parameters
- r (number) red
- g (number) green
- b (number) blue
= (object) HSL object in format:
o {
o h (number) hue
o s (number) saturation
o l (number) luminosity
o }
\*/
R.rgb2hsl = function (r, g, b) {
b = prepareRGB(r, g, b);
r = b[0];
g = b[1];
b = b[2];
var H, S, L, M, m, C;
M = mmax(r, g, b);
m = mmin(r, g, b);
C = M - m;
H = (C == 0 ? null :
M == r ? (g - b) / C :
M == g ? (b - r) / C + 2 :
(r - g) / C + 4);
H = ((H + 360) % 6) * 60 / 360;
L = (M + m) / 2;
S = (C == 0 ? 0 :
L < .5 ? C / (2 * L) :
C / (2 - 2 * L));
return {h: H, s: S, l: L, toString: hsltoString};
};
R._path2string = function () {
return this.join(",").replace(p2s, "$1");
};
function repush(array, item) {
for (var i = 0, ii = array.length; i < ii; i++) if (array[i] === item) {
return array.push(array.splice(i, 1)[0]);
}
}
function cacher(f, scope, postprocessor) {
function newf() {
var arg = Array.prototype.slice.call(arguments, 0),
args = arg.join("\u2400"),
cache = newf.cache = newf.cache || {},
count = newf.count = newf.count || [];
if (cache[has](args)) {
repush(count, args);
return postprocessor ? postprocessor(cache[args]) : cache[args];
}
count.length >= 1e3 && delete cache[count.shift()];
count.push(args);
cache[args] = f[apply](scope, arg);
return postprocessor ? postprocessor(cache[args]) : cache[args];
}
return newf;
}
var preload = R._preload = function (src, f) {
var img = g.doc.createElement("img");
img.style.cssText = "position:absolute;left:-9999em;top:-9999em";
img.onload = function () {
f.call(this);
this.onload = null;
g.doc.body.removeChild(this);
};
img.onerror = function () {
g.doc.body.removeChild(this);
};
g.doc.body.appendChild(img);
img.src = src;
};
function clrToString() {
return this.hex;
}
/*\
* Raphael.getRGB
[ method ]
**
* Parses colour string as RGB object
> Parameters
- colour (string) colour string in one of formats:
# <ul>
# <li>Colour name (“<code>red</code>”, “<code>green</code>”, “<code>cornflowerblue</code>”, etc)</li>
# <li>#••• — shortened HTML colour: (“<code>#000</code>”, “<code>#fc0</code>”, etc)</li>
# <li>#•••••• — full length HTML colour: (“<code>#000000</code>”, “<code>#bd2300</code>”)</li>
# <li>rgb(•••, •••, •••) — red, green and blue channels’ values: (“<code>rgb(200, 100, 0)</code>”)</li>
# <li>rgb(•••%, •••%, •••%) — same as above, but in %: (“<code>rgb(100%, 175%, 0%)</code>”)</li>
# <li>hsb(•••, •••, •••) — hue, saturation and brightness values: (“<code>hsb(0.5, 0.25, 1)</code>”)</li>
# <li>hsb(•••%, •••%, •••%) — same as above, but in %</li>
# <li>hsl(•••, •••, •••) — same as hsb</li>
# <li>hsl(•••%, •••%, •••%) — same as hsb</li>
# </ul>
= (object) RGB object in format:
o {
o r (number) red,
o g (number) green,
o b (number) blue
o hex (string) color in HTML/CSS format: #••••••,
o error (boolean) true if string can’t be parsed
o }
\*/
R.getRGB = cacher(function (colour) {
if (!colour || !!((colour = Str(colour)).indexOf("-") + 1)) {
return {r: -1, g: -1, b: -1, hex: "none", error: 1, toString: clrToString};
}
if (colour == "none") {
return {r: -1, g: -1, b: -1, hex: "none", toString: clrToString};
}
!(hsrg[has](colour.toLowerCase().substring(0, 2)) || colour.charAt() == "#") && (colour = toHex(colour));
var res,
red,
green,
blue,
opacity,
t,
values,
rgb = colour.match(colourRegExp);
if (rgb) {
if (rgb[2]) {
blue = toInt(rgb[2].substring(5), 16);
green = toInt(rgb[2].substring(3, 5), 16);
red = toInt(rgb[2].substring(1, 3), 16);
}
if (rgb[3]) {
blue = toInt((t = rgb[3].charAt(3)) + t, 16);
green = toInt((t = rgb[3].charAt(2)) + t, 16);
red = toInt((t = rgb[3].charAt(1)) + t, 16);
}
if (rgb[4]) {
values = rgb[4][split](commaSpaces);
red = toFloat(values[0]);
values[0].slice(-1) == "%" && (red *= 2.55);
green = toFloat(values[1]);
values[1].slice(-1) == "%" && (green *= 2.55);
blue = toFloat(values[2]);
values[2].slice(-1) == "%" && (blue *= 2.55);
rgb[1].toLowerCase().slice(0, 4) == "rgba" && (opacity = toFloat(values[3]));
values[3] && values[3].slice(-1) == "%" && (opacity /= 100);
}
if (rgb[5]) {
values = rgb[5][split](commaSpaces);
red = toFloat(values[0]);
values[0].slice(-1) == "%" && (red *= 2.55);
green = toFloat(values[1]);
values[1].slice(-1) == "%" && (green *= 2.55);
blue = toFloat(values[2]);
values[2].slice(-1) == "%" && (blue *= 2.55);
(values[0].slice(-3) == "deg" || values[0].slice(-1) == "\xb0") && (red /= 360);
rgb[1].toLowerCase().slice(0, 4) == "hsba" && (opacity = toFloat(values[3]));
values[3] && values[3].slice(-1) == "%" && (opacity /= 100);
return R.hsb2rgb(red, green, blue, opacity);
}
if (rgb[6]) {
values = rgb[6][split](commaSpaces);
red = toFloat(values[0]);
values[0].slice(-1) == "%" && (red *= 2.55);
green = toFloat(values[1]);
values[1].slice(-1) == "%" && (green *= 2.55);
blue = toFloat(values[2]);
values[2].slice(-1) == "%" && (blue *= 2.55);
(values[0].slice(-3) == "deg" || values[0].slice(-1) == "\xb0") && (red /= 360);
rgb[1].toLowerCase().slice(0, 4) == "hsla" && (opacity = toFloat(values[3]));
values[3] && values[3].slice(-1) == "%" && (opacity /= 100);
return R.hsl2rgb(red, green, blue, opacity);
}
rgb = {r: red, g: green, b: blue, toString: clrToString};
rgb.hex = "#" + (16777216 | blue | (green << 8) | (red << 16)).toString(16).slice(1);
R.is(opacity, "finite") && (rgb.opacity = opacity);
return rgb;
}
return {r: -1, g: -1, b: -1, hex: "none", error: 1, toString: clrToString};
}, R);
/*\
* Raphael.hsb
[ method ]
**
* Converts HSB values to hex representation of the colour.
> Parameters
- h (number) hue
- s (number) saturation
- b (number) value or brightness
= (string) hex representation of the colour.
\*/
R.hsb = cacher(function (h, s, b) {
return R.hsb2rgb(h, s, b).hex;
});
/*\
* Raphael.hsl
[ method ]
**
* Converts HSL values to hex representation of the colour.
> Parameters
- h (number) hue
- s (number) saturation
- l (number) luminosity
= (string) hex representation of the colour.
\*/
R.hsl = cacher(function (h, s, l) {
return R.hsl2rgb(h, s, l).hex;
});
/*\
* Raphael.rgb
[ method ]
**
* Converts RGB values to hex representation of the colour.
> Parameters
- r (number) red
- g (number) green
- b (number) blue
= (string) hex representation of the colour.
\*/
R.rgb = cacher(function (r, g, b) {
return "#" + (16777216 | b | (g << 8) | (r << 16)).toString(16).slice(1);
});
/*\
* Raphael.getColor
[ method ]
**
* On each call returns next colour in the spectrum. To reset it back to red call @Raphael.getColor.reset
> Parameters
- value (number) #optional brightness, default is `0.75`
= (string) hex representation of the colour.
\*/
R.getColor = function (value) {
var start = this.getColor.start = this.getColor.start || {h: 0, s: 1, b: value || .75},
rgb = this.hsb2rgb(start.h, start.s, start.b);
start.h += .075;
if (start.h > 1) {
start.h = 0;
start.s -= .2;
start.s <= 0 && (this.getColor.start = {h: 0, s: 1, b: start.b});
}
return rgb.hex;
};
/*\
* Raphael.getColor.reset
[ method ]
**
* Resets spectrum position for @Raphael.getColor back to red.
\*/
R.getColor.reset = function () {
delete this.start;
};
// http://schepers.cc/getting-to-the-point
function catmullRom2bezier(crp, z) {
var d = [];
for (var i = 0, iLen = crp.length; iLen - 2 * !z > i; i += 2) {
var p = [
{x: +crp[i - 2], y: +crp[i - 1]},
{x: +crp[i], y: +crp[i + 1]},
{x: +crp[i + 2], y: +crp[i + 3]},
{x: +crp[i + 4], y: +crp[i + 5]}
];
if (z) {
if (!i) {
p[0] = {x: +crp[iLen - 2], y: +crp[iLen - 1]};
} else if (iLen - 4 == i) {
p[3] = {x: +crp[0], y: +crp[1]};
} else if (iLen - 2 == i) {
p[2] = {x: +crp[0], y: +crp[1]};
p[3] = {x: +crp[2], y: +crp[3]};
}
} else {
if (iLen - 4 == i) {
p[3] = p[2];
} else if (!i) {
p[0] = {x: +crp[i], y: +crp[i + 1]};
}
}
d.push(["C",
(-p[0].x + 6 * p[1].x + p[2].x) / 6,
(-p[0].y + 6 * p[1].y + p[2].y) / 6,
(p[1].x + 6 * p[2].x - p[3].x) / 6,
(p[1].y + 6*p[2].y - p[3].y) / 6,
p[2].x,
p[2].y
]);
}
return d;
}
/*\
* Raphael.parsePathString
[ method ]
**
* Utility method
**
* Parses given path string into an array of arrays of path segments.
> Parameters
- pathString (string|array) path string or array of segments (in the last case it will be returned straight away)
= (array) array of segments.
\*/
R.parsePathString = function (pathString) {
if (!pathString) {
return null;
}
var pth = paths(pathString);
if (pth.arr) {
return pathClone(pth.arr);
}
var paramCounts = {a: 7, c: 6, h: 1, l: 2, m: 2, r: 4, q: 4, s: 4, t: 2, v: 1, z: 0},
data = [];
if (R.is(pathString, array) && R.is(pathString[0], array)) { // rough assumption
data = pathClone(pathString);
}
if (!data.length) {
Str(pathString).replace(pathCommand, function (a, b, c) {
var params = [],
name = b.toLowerCase();
c.replace(pathValues, function (a, b) {
b && params.push(+b);
});
if (name == "m" && params.length > 2) {
data.push([b][concat](params.splice(0, 2)));
name = "l";
b = b == "m" ? "l" : "L";
}
if (name == "r") {
data.push([b][concat](params));
} else while (params.length >= paramCounts[name]) {
data.push([b][concat](params.splice(0, paramCounts[name])));
if (!paramCounts[name]) {
break;
}
}
});
}
data.toString = R._path2string;
pth.arr = pathClone(data);
return data;
};
/*\
* Raphael.parseTransformString
[ method ]
**
* Utility method
**
* Parses given path string into an array of transformations.
> Parameters
- TString (string|array) transform string or array of transformations (in the last case it will be returned straight away)
= (array) array of transformations.
\*/
R.parseTransformString = cacher(function (TString) {
if (!TString) {
return null;
}
var paramCounts = {r: 3, s: 4, t: 2, m: 6},
data = [];
if (R.is(TString, array) && R.is(TString[0], array)) { // rough assumption
data = pathClone(TString);
}
if (!data.length) {
Str(TString).replace(tCommand, function (a, b, c) {
var params = [],
name = lowerCase.call(b);
c.replace(pathValues, function (a, b) {
b && params.push(+b);
});
data.push([b][concat](params));
});
}
data.toString = R._path2string;
return data;
});
// PATHS
var paths = function (ps) {
var p = paths.ps = paths.ps || {};
if (p[ps]) {
p[ps].sleep = 100;
} else {
p[ps] = {
sleep: 100
};
}
setTimeout(function () {
for (var key in p) if (p[has](key) && key != ps) {
p[key].sleep--;
!p[key].sleep && delete p[key];
}
});
return p[ps];
};
/*\
* Raphael.findDotsAtSegment
[ method ]
**
* Utility method
**
* Find dot coordinates on the given cubic bezier curve at the given t.
> Parameters
- p1x (number) x of the first point of the curve
- p1y (number) y of the first point of the curve
- c1x (number) x of the first anchor of the curve
- c1y (number) y of the first anchor of the curve
- c2x (number) x of the second anchor of the curve
- c2y (number) y of the second anchor of the curve
- p2x (number) x of the second point of the curve
- p2y (number) y of the second point of the curve
- t (number) position on the curve (0..1)
= (object) point information in format:
o {
o x: (number) x coordinate of the point
o y: (number) y coordinate of the point
o m: {
o x: (number) x coordinate of the left anchor
o y: (number) y coordinate of the left anchor
o }
o n: {
o x: (number) x coordinate of the right anchor
o y: (number) y coordinate of the right anchor
o }
o start: {
o x: (number) x coordinate of the start of the curve
o y: (number) y coordinate of the start of the curve
o }
o end: {
o x: (number) x coordinate of the end of the curve
o y: (number) y coordinate of the end of the curve
o }
o alpha: (number) angle of the curve derivative at the point
o }
\*/
R.findDotsAtSegment = function (p1x, p1y, c1x, c1y, c2x, c2y, p2x, p2y, t) {
var t1 = 1 - t,
t13 = pow(t1, 3),
t12 = pow(t1, 2),
t2 = t * t,
t3 = t2 * t,
x = t13 * p1x + t12 * 3 * t * c1x + t1 * 3 * t * t * c2x + t3 * p2x,
y = t13 * p1y + t12 * 3 * t * c1y + t1 * 3 * t * t * c2y + t3 * p2y,
mx = p1x + 2 * t * (c1x - p1x) + t2 * (c2x - 2 * c1x + p1x),
my = p1y + 2 * t * (c1y - p1y) + t2 * (c2y - 2 * c1y + p1y),
nx = c1x + 2 * t * (c2x - c1x) + t2 * (p2x - 2 * c2x + c1x),
ny = c1y + 2 * t * (c2y - c1y) + t2 * (p2y - 2 * c2y + c1y),
ax = t1 * p1x + t * c1x,
ay = t1 * p1y + t * c1y,
cx = t1 * c2x + t * p2x,
cy = t1 * c2y + t * p2y,
alpha = (90 - math.atan2(mx - nx, my - ny) * 180 / PI);
(mx > nx || my < ny) && (alpha += 180);
return {
x: x,
y: y,
m: {x: mx, y: my},
n: {x: nx, y: ny},
start: {x: ax, y: ay},
end: {x: cx, y: cy},
alpha: alpha
};
};
/*\
* Raphael.bezierBBox
[ method ]
**
* Utility method
**
* Return bounding box of a given cubic bezier curve
> Parameters
- p1x (number) x of the first point of the curve
- p1y (number) y of the first point of the curve
- c1x (number) x of the first anchor of the curve
- c1y (number) y of the first anchor of the curve
- c2x (number) x of the second anchor of the curve
- c2y (number) y of the second anchor of the curve
- p2x (number) x of the second point of the curve
- p2y (number) y of the second point of the curve
* or
- bez (array) array of six points for bezier curve
= (object) point information in format:
o {
o min: {
o x: (number) x coordinate of the left point
o y: (number) y coordinate of the top point
o }
o max: {
o x: (number) x coordinate of the right point
o y: (number) y coordinate of the bottom point
o }
o }
\*/
R.bezierBBox = function (p1x, p1y, c1x, c1y, c2x, c2y, p2x, p2y) {
if (!R.is(p1x, "array")) {
p1x = [p1x, p1y, c1x, c1y, c2x, c2y, p2x, p2y];
}
var bbox = curveDim.apply(null, p1x);
return {
x: bbox.min.x,
y: bbox.min.y,
x2: bbox.max.x,
y2: bbox.max.y,
width: bbox.max.x - bbox.min.x,
height: bbox.max.y - bbox.min.y
};
};
/*\
* Raphael.isPointInsideBBox
[ method ]
**
* Utility method
**
* Returns `true` if given point is inside bounding boxes.
> Parameters
- bbox (string) bounding box
- x (string) x coordinate of the point
- y (string) y coordinate of the point
= (boolean) `true` if point inside
\*/
R.isPointInsideBBox = function (bbox, x, y) {
return x >= bbox.x && x <= bbox.x2 && y >= bbox.y && y <= bbox.y2;
};
/*\
* Raphael.isBBoxIntersect
[ method ]
**
* Utility method
**
* Returns `true` if two bounding boxes intersect
> Parameters
- bbox1 (string) first bounding box
- bbox2 (string) second bounding box
= (boolean) `true` if they intersect
\*/
R.isBBoxIntersect = function (bbox1, bbox2) {
var i = R.isPointInsideBBox;
return i(bbox2, bbox1.x, bbox1.y)
|| i(bbox2, bbox1.x2, bbox1.y)
|| i(bbox2, bbox1.x, bbox1.y2)
|| i(bbox2, bbox1.x2, bbox1.y2)
|| i(bbox1, bbox2.x, bbox2.y)
|| i(bbox1, bbox2.x2, bbox2.y)
|| i(bbox1, bbox2.x, bbox2.y2)
|| i(bbox1, bbox2.x2, bbox2.y2)
|| (bbox1.x < bbox2.x2 && bbox1.x > bbox2.x || bbox2.x < bbox1.x2 && bbox2.x > bbox1.x)
&& (bbox1.y < bbox2.y2 && bbox1.y > bbox2.y || bbox2.y < bbox1.y2 && bbox2.y > bbox1.y);
};
function base3(t, p1, p2, p3, p4) {
var t1 = -3 * p1 + 9 * p2 - 9 * p3 + 3 * p4,
t2 = t * t1 + 6 * p1 - 12 * p2 + 6 * p3;
return t * t2 - 3 * p1 + 3 * p2;
}
function bezlen(x1, y1, x2, y2, x3, y3, x4, y4, z) {
if (z == null) {
z = 1;
}
z = z > 1 ? 1 : z < 0 ? 0 : z;
var z2 = z / 2,
n = 12,
Tvalues = [-0.1252,0.1252,-0.3678,0.3678,-0.5873,0.5873,-0.7699,0.7699,-0.9041,0.9041,-0.9816,0.9816],
Cvalues = [0.2491,0.2491,0.2335,0.2335,0.2032,0.2032,0.1601,0.1601,0.1069,0.1069,0.0472,0.0472],
sum = 0;
for (var i = 0; i < n; i++) {
var ct = z2 * Tvalues[i] + z2,
xbase = base3(ct, x1, x2, x3, x4),
ybase = base3(ct, y1, y2, y3, y4),
comb = xbase * xbase + ybase * ybase;
sum += Cvalues[i] * math.sqrt(comb);
}
return z2 * sum;
}
function getTatLen(x1, y1, x2, y2, x3, y3, x4, y4, ll) {
if (ll < 0 || bezlen(x1, y1, x2, y2, x3, y3, x4, y4) < ll) {
return;
}
var t = 1,
step = t / 2,
t2 = t - step,
l,
e = .01;
l = bezlen(x1, y1, x2, y2, x3, y3, x4, y4, t2);
while (abs(l - ll) > e) {
step /= 2;
t2 += (l < ll ? 1 : -1) * step;
l = bezlen(x1, y1, x2, y2, x3, y3, x4, y4, t2);
}
return t2;
}
function intersect(x1, y1, x2, y2, x3, y3, x4, y4) {
if (
mmax(x1, x2) < mmin(x3, x4) ||
mmin(x1, x2) > mmax(x3, x4) ||
mmax(y1, y2) < mmin(y3, y4) ||
mmin(y1, y2) > mmax(y3, y4)
) {
return;
}
var nx = (x1 * y2 - y1 * x2) * (x3 - x4) - (x1 - x2) * (x3 * y4 - y3 * x4),
ny = (x1 * y2 - y1 * x2) * (y3 - y4) - (y1 - y2) * (x3 * y4 - y3 * x4),
denominator = (x1 - x2) * (y3 - y4) - (y1 - y2) * (x3 - x4);
if (!denominator) {
return;
}
var px = nx / denominator,
py = ny / denominator,
px2 = +px.toFixed(2),
py2 = +py.toFixed(2);
if (
px2 < +mmin(x1, x2).toFixed(2) ||
px2 > +mmax(x1, x2).toFixed(2) ||
px2 < +mmin(x3, x4).toFixed(2) ||
px2 > +mmax(x3, x4).toFixed(2) ||
py2 < +mmin(y1, y2).toFixed(2) ||
py2 > +mmax(y1, y2).toFixed(2) ||
py2 < +mmin(y3, y4).toFixed(2) ||
py2 > +mmax(y3, y4).toFixed(2)
) {
return;
}
return {x: px, y: py};
}
function inter(bez1, bez2) {
return interHelper(bez1, bez2);
}
function interCount(bez1, bez2) {
return interHelper(bez1, bez2, 1);
}
function interHelper(bez1, bez2, justCount) {
var bbox1 = R.bezierBBox(bez1),
bbox2 = R.bezierBBox(bez2);
if (!R.isBBoxIntersect(bbox1, bbox2)) {
return justCount ? 0 : [];
}
var l1 = bezlen.apply(0, bez1),
l2 = bezlen.apply(0, bez2),
n1 = mmax(~~(l1 / 5), 1),
n2 = mmax(~~(l2 / 5), 1),
dots1 = [],
dots2 = [],
xy = {},
res = justCount ? 0 : [];
for (var i = 0; i < n1 + 1; i++) {
var p = R.findDotsAtSegment.apply(R, bez1.concat(i / n1));
dots1.push({x: p.x, y: p.y, t: i / n1});
}
for (i = 0; i < n2 + 1; i++) {
p = R.findDotsAtSegment.apply(R, bez2.concat(i / n2));
dots2.push({x: p.x, y: p.y, t: i / n2});
}
for (i = 0; i < n1; i++) {
for (var j = 0; j < n2; j++) {
var di = dots1[i],
di1 = dots1[i + 1],
dj = dots2[j],
dj1 = dots2[j + 1],
ci = abs(di1.x - di.x) < .001 ? "y" : "x",
cj = abs(dj1.x - dj.x) < .001 ? "y" : "x",
is = intersect(di.x, di.y, di1.x, di1.y, dj.x, dj.y, dj1.x, dj1.y);
if (is) {
if (xy[is.x.toFixed(4)] == is.y.toFixed(4)) {
continue;
}
xy[is.x.toFixed(4)] = is.y.toFixed(4);
var t1 = di.t + abs((is[ci] - di[ci]) / (di1[ci] - di[ci])) * (di1.t - di.t),
t2 = dj.t + abs((is[cj] - dj[cj]) / (dj1[cj] - dj[cj])) * (dj1.t - dj.t);
if (t1 >= 0 && t1 <= 1.001 && t2 >= 0 && t2 <= 1.001) {
if (justCount) {
res++;
} else {
res.push({
x: is.x,
y: is.y,
t1: mmin(t1, 1),
t2: mmin(t2, 1)
});
}
}
}
}
}
return res;
}
/*\
* Raphael.pathIntersection
[ method ]
**
* Utility method
**
* Finds intersections of two paths
> Parameters
- path1 (string) path string
- path2 (string) path string
= (array) dots of intersection
o [
o {
o x: (number) x coordinate of the point
o y: (number) y coordinate of the point
o t1: (number) t value for segment of path1
o t2: (number) t value for segment of path2
o segment1: (number) order number for segment of path1
o segment2: (number) order number for segment of path2
o bez1: (array) eight coordinates representing beziér curve for the segment of path1
o bez2: (array) eight coordinates representing beziér curve for the segment of path2
o }
o ]
\*/
R.pathIntersection = function (path1, path2) {
return interPathHelper(path1, path2);
};
R.pathIntersectionNumber = function (path1, path2) {
return interPathHelper(path1, path2, 1);
};
function interPathHelper(path1, path2, justCount) {
path1 = R._path2curve(path1);
path2 = R._path2curve(path2);
var x1, y1, x2, y2, x1m, y1m, x2m, y2m, bez1, bez2,
res = justCount ? 0 : [];
for (var i = 0, ii = path1.length; i < ii; i++) {
var pi = path1[i];
if (pi[0] == "M") {
x1 = x1m = pi[1];
y1 = y1m = pi[2];
} else {
if (pi[0] == "C") {
bez1 = [x1, y1].concat(pi.slice(1));
x1 = bez1[6];
y1 = bez1[7];
} else {
bez1 = [x1, y1, x1, y1, x1m, y1m, x1m, y1m];
x1 = x1m;
y1 = y1m;
}
for (var j = 0, jj = path2.length; j < jj; j++) {
var pj = path2[j];
if (pj[0] == "M") {
x2 = x2m = pj[1];
y2 = y2m = pj[2];
} else {
if (pj[0] == "C") {
bez2 = [x2, y2].concat(pj.slice(1));
x2 = bez2[6];
y2 = bez2[7];
} else {
bez2 = [x2, y2, x2, y2, x2m, y2m, x2m, y2m];
x2 = x2m;
y2 = y2m;
}
var intr = interHelper(bez1, bez2, justCount);
if (justCount) {
res += intr;
} else {
for (var k = 0, kk = intr.length; k < kk; k++) {
intr[k].segment1 = i;
intr[k].segment2 = j;
intr[k].bez1 = bez1;
intr[k].bez2 = bez2;
}
res = res.concat(intr);
}
}
}
}
}
return res;
}
/*\
* Raphael.isPointInsidePath
[ method ]
**
* Utility method
**
* Returns `true` if given point is inside a given closed path.
> Parameters
- path (string) path string
- x (number) x of the point
- y (number) y of the point
= (boolean) true, if point is inside the path
\*/
R.isPointInsidePath = function (path, x, y) {
var bbox = R.pathBBox(path);
return R.isPointInsideBBox(bbox, x, y) &&
interPathHelper(path, [["M", x, y], ["H", bbox.x2 + 10]], 1) % 2 == 1;
};
R._removedFactory = function (methodname) {
return function () {
eve("raphael.log", null, "Rapha\xebl: you are calling to method \u201c" + methodname + "\u201d of removed object", methodname);
};
};
/*\
* Raphael.pathBBox
[ method ]
**
* Utility method
**
* Return bounding box of a given path
> Parameters
- path (string) path string
= (object) bounding box
o {
o x: (number) x coordinate of the left top point of the box
o y: (number) y coordinate of the left top point of the box
o x2: (number) x coordinate of the right bottom point of the box
o y2: (number) y coordinate of the right bottom point of the box
o width: (number) width of the box
o height: (number) height of the box
o cx: (number) x coordinate of the center of the box
o cy: (number) y coordinate of the center of the box
o }
\*/
var pathDimensions = R.pathBBox = function (path) {
var pth = paths(path);
if (pth.bbox) {
return clone(pth.bbox);
}
if (!path) {
return {x: 0, y: 0, width: 0, height: 0, x2: 0, y2: 0};
}
path = path2curve(path);
var x = 0,
y = 0,
X = [],
Y = [],
p;
for (var i = 0, ii = path.length; i < ii; i++) {
p = path[i];
if (p[0] == "M") {
x = p[1];
y = p[2];
X.push(x);
Y.push(y);
} else {
var dim = curveDim(x, y, p[1], p[2], p[3], p[4], p[5], p[6]);
X = X[concat](dim.min.x, dim.max.x);
Y = Y[concat](dim.min.y, dim.max.y);
x = p[5];
y = p[6];
}
}
var xmin = mmin[apply](0, X),
ymin = mmin[apply](0, Y),
xmax = mmax[apply](0, X),
ymax = mmax[apply](0, Y),
width = xmax - xmin,
height = ymax - ymin,
bb = {
x: xmin,
y: ymin,
x2: xmax,
y2: ymax,
width: width,
height: height,
cx: xmin + width / 2,
cy: ymin + height / 2
};
pth.bbox = clone(bb);
return bb;
},
pathClone = function (pathArray) {
var res = clone(pathArray);
res.toString = R._path2string;
return res;
},
pathToRelative = R._pathToRelative = function (pathArray) {
var pth = paths(pathArray);
if (pth.rel) {
return pathClone(pth.rel);
}
if (!R.is(pathArray, array) || !R.is(pathArray && pathArray[0], array)) { // rough assumption
pathArray = R.parsePathString(pathArray);
}
var res = [],
x = 0,
y = 0,
mx = 0,
my = 0,
start = 0;
if (pathArray[0][0] == "M") {
x = pathArray[0][1];
y = pathArray[0][2];
mx = x;
my = y;
start++;
res.push(["M", x, y]);
}
for (var i = start, ii = pathArray.length; i < ii; i++) {
var r = res[i] = [],
pa = pathArray[i];
if (pa[0] != lowerCase.call(pa[0])) {
r[0] = lowerCase.call(pa[0]);
switch (r[0]) {
case "a":
r[1] = pa[1];
r[2] = pa[2];
r[3] = pa[3];
r[4] = pa[4];
r[5] = pa[5];
r[6] = +(pa[6] - x).toFixed(3);
r[7] = +(pa[7] - y).toFixed(3);
break;
case "v":
r[1] = +(pa[1] - y).toFixed(3);
break;
case "m":
mx = pa[1];
my = pa[2];
default:
for (var j = 1, jj = pa.length; j < jj; j++) {
r[j] = +(pa[j] - ((j % 2) ? x : y)).toFixed(3);
}
}
} else {
r = res[i] = [];
if (pa[0] == "m") {
mx = pa[1] + x;
my = pa[2] + y;
}
for (var k = 0, kk = pa.length; k < kk; k++) {
res[i][k] = pa[k];
}
}
var len = res[i].length;
switch (res[i][0]) {
case "z":
x = mx;
y = my;
break;
case "h":
x += +res[i][len - 1];
break;
case "v":
y += +res[i][len - 1];
break;
default:
x += +res[i][len - 2];
y += +res[i][len - 1];
}
}
res.toString = R._path2string;
pth.rel = pathClone(res);
return res;
},
pathToAbsolute = R._pathToAbsolute = function (pathArray) {
var pth = paths(pathArray);
if (pth.abs) {
return pathClone(pth.abs);
}
if (!R.is(pathArray, array) || !R.is(pathArray && pathArray[0], array)) { // rough assumption
pathArray = R.parsePathString(pathArray);
}
if (!pathArray || !pathArray.length) {
return [["M", 0, 0]];
}
var res = [],
x = 0,
y = 0,
mx = 0,
my = 0,
start = 0;
if (pathArray[0][0] == "M") {
x = +pathArray[0][1];
y = +pathArray[0][2];
mx = x;
my = y;
start++;
res[0] = ["M", x, y];
}
var crz = pathArray.length == 3 && pathArray[0][0] == "M" && pathArray[1][0].toUpperCase() == "R" && pathArray[2][0].toUpperCase() == "Z";
for (var r, pa, i = start, ii = pathArray.length; i < ii; i++) {
res.push(r = []);
pa = pathArray[i];
if (pa[0] != upperCase.call(pa[0])) {
r[0] = upperCase.call(pa[0]);
switch (r[0]) {
case "A":
r[1] = pa[1];
r[2] = pa[2];
r[3] = pa[3];
r[4] = pa[4];
r[5] = pa[5];
r[6] = +(pa[6] + x);
r[7] = +(pa[7] + y);
break;
case "V":
r[1] = +pa[1] + y;
break;
case "H":
r[1] = +pa[1] + x;
break;
case "R":
var dots = [x, y][concat](pa.slice(1));
for (var j = 2, jj = dots.length; j < jj; j++) {
dots[j] = +dots[j] + x;
dots[++j] = +dots[j] + y;
}
res.pop();
res = res[concat](catmullRom2bezier(dots, crz));
break;
case "M":
mx = +pa[1] + x;
my = +pa[2] + y;
default:
for (j = 1, jj = pa.length; j < jj; j++) {
r[j] = +pa[j] + ((j % 2) ? x : y);
}
}
} else if (pa[0] == "R") {
dots = [x, y][concat](pa.slice(1));
res.pop();
res = res[concat](catmullRom2bezier(dots, crz));
r = ["R"][concat](pa.slice(-2));
} else {
for (var k = 0, kk = pa.length; k < kk; k++) {
r[k] = pa[k];
}
}
switch (r[0]) {
case "Z":
x = mx;
y = my;
break;
case "H":
x = r[1];
break;
case "V":
y = r[1];
break;
case "M":
mx = r[r.length - 2];
my = r[r.length - 1];
default:
x = r[r.length - 2];
y = r[r.length - 1];
}
}
res.toString = R._path2string;
pth.abs = pathClone(res);
return res;
},
l2c = function (x1, y1, x2, y2) {
return [x1, y1, x2, y2, x2, y2];
},
q2c = function (x1, y1, ax, ay, x2, y2) {
var _13 = 1 / 3,
_23 = 2 / 3;
return [
_13 * x1 + _23 * ax,
_13 * y1 + _23 * ay,
_13 * x2 + _23 * ax,
_13 * y2 + _23 * ay,
x2,
y2
];
},
a2c = function (x1, y1, rx, ry, angle, large_arc_flag, sweep_flag, x2, y2, recursive) {
// for more information of where this math came from visit:
// http://www.w3.org/TR/SVG11/implnote.html#ArcImplementationNotes
var _120 = PI * 120 / 180,
rad = PI / 180 * (+angle || 0),
res = [],
xy,
rotate = cacher(function (x, y, rad) {
var X = x * math.cos(rad) - y * math.sin(rad),
Y = x * math.sin(rad) + y * math.cos(rad);
return {x: X, y: Y};
});
if (!recursive) {
xy = rotate(x1, y1, -rad);
x1 = xy.x;
y1 = xy.y;
xy = rotate(x2, y2, -rad);
x2 = xy.x;
y2 = xy.y;
var cos = math.cos(PI / 180 * angle),
sin = math.sin(PI / 180 * angle),
x = (x1 - x2) / 2,
y = (y1 - y2) / 2;
var h = (x * x) / (rx * rx) + (y * y) / (ry * ry);
if (h > 1) {
h = math.sqrt(h);
rx = h * rx;
ry = h * ry;
}
var rx2 = rx * rx,
ry2 = ry * ry,
k = (large_arc_flag == sweep_flag ? -1 : 1) *
math.sqrt(abs((rx2 * ry2 - rx2 * y * y - ry2 * x * x) / (rx2 * y * y + ry2 * x * x))),
cx = k * rx * y / ry + (x1 + x2) / 2,
cy = k * -ry * x / rx + (y1 + y2) / 2,
f1 = math.asin(((y1 - cy) / ry).toFixed(9)),
f2 = math.asin(((y2 - cy) / ry).toFixed(9));
f1 = x1 < cx ? PI - f1 : f1;
f2 = x2 < cx ? PI - f2 : f2;
f1 < 0 && (f1 = PI * 2 + f1);
f2 < 0 && (f2 = PI * 2 + f2);
if (sweep_flag && f1 > f2) {
f1 = f1 - PI * 2;
}
if (!sweep_flag && f2 > f1) {
f2 = f2 - PI * 2;
}
} else {
f1 = recursive[0];
f2 = recursive[1];
cx = recursive[2];
cy = recursive[3];
}
var df = f2 - f1;
if (abs(df) > _120) {
var f2old = f2,
x2old = x2,
y2old = y2;
f2 = f1 + _120 * (sweep_flag && f2 > f1 ? 1 : -1);
x2 = cx + rx * math.cos(f2);
y2 = cy + ry * math.sin(f2);
res = a2c(x2, y2, rx, ry, angle, 0, sweep_flag, x2old, y2old, [f2, f2old, cx, cy]);
}
df = f2 - f1;
var c1 = math.cos(f1),
s1 = math.sin(f1),
c2 = math.cos(f2),
s2 = math.sin(f2),
t = math.tan(df / 4),
hx = 4 / 3 * rx * t,
hy = 4 / 3 * ry * t,
m1 = [x1, y1],
m2 = [x1 + hx * s1, y1 - hy * c1],
m3 = [x2 + hx * s2, y2 - hy * c2],
m4 = [x2, y2];
m2[0] = 2 * m1[0] - m2[0];
m2[1] = 2 * m1[1] - m2[1];
if (recursive) {
return [m2, m3, m4][concat](res);
} else {
res = [m2, m3, m4][concat](res).join()[split](",");
var newres = [];
for (var i = 0, ii = res.length; i < ii; i++) {
newres[i] = i % 2 ? rotate(res[i - 1], res[i], rad).y : rotate(res[i], res[i + 1], rad).x;
}
return newres;
}
},
findDotAtSegment = function (p1x, p1y, c1x, c1y, c2x, c2y, p2x, p2y, t) {
var t1 = 1 - t;
return {
x: pow(t1, 3) * p1x + pow(t1, 2) * 3 * t * c1x + t1 * 3 * t * t * c2x + pow(t, 3) * p2x,
y: pow(t1, 3) * p1y + pow(t1, 2) * 3 * t * c1y + t1 * 3 * t * t * c2y + pow(t, 3) * p2y
};
},
curveDim = cacher(function (p1x, p1y, c1x, c1y, c2x, c2y, p2x, p2y) {
var a = (c2x - 2 * c1x + p1x) - (p2x - 2 * c2x + c1x),
b = 2 * (c1x - p1x) - 2 * (c2x - c1x),
c = p1x - c1x,
t1 = (-b + math.sqrt(b * b - 4 * a * c)) / 2 / a,
t2 = (-b - math.sqrt(b * b - 4 * a * c)) / 2 / a,
y = [p1y, p2y],
x = [p1x, p2x],
dot;
abs(t1) > "1e12" && (t1 = .5);
abs(t2) > "1e12" && (t2 = .5);
if (t1 > 0 && t1 < 1) {
dot = findDotAtSegment(p1x, p1y, c1x, c1y, c2x, c2y, p2x, p2y, t1);
x.push(dot.x);
y.push(dot.y);
}
if (t2 > 0 && t2 < 1) {
dot = findDotAtSegment(p1x, p1y, c1x, c1y, c2x, c2y, p2x, p2y, t2);
x.push(dot.x);
y.push(dot.y);
}
a = (c2y - 2 * c1y + p1y) - (p2y - 2 * c2y + c1y);
b = 2 * (c1y - p1y) - 2 * (c2y - c1y);
c = p1y - c1y;
t1 = (-b + math.sqrt(b * b - 4 * a * c)) / 2 / a;
t2 = (-b - math.sqrt(b * b - 4 * a * c)) / 2 / a;
abs(t1) > "1e12" && (t1 = .5);
abs(t2) > "1e12" && (t2 = .5);
if (t1 > 0 && t1 < 1) {
dot = findDotAtSegment(p1x, p1y, c1x, c1y, c2x, c2y, p2x, p2y, t1);
x.push(dot.x);
y.push(dot.y);
}
if (t2 > 0 && t2 < 1) {
dot = findDotAtSegment(p1x, p1y, c1x, c1y, c2x, c2y, p2x, p2y, t2);
x.push(dot.x);
y.push(dot.y);
}
return {
min: {x: mmin[apply](0, x), y: mmin[apply](0, y)},
max: {x: mmax[apply](0, x), y: mmax[apply](0, y)}
};
}),
path2curve = R._path2curve = cacher(function (path, path2) {
var pth = !path2 && paths(path);
if (!path2 && pth.curve) {
return pathClone(pth.curve);
}
var p = pathToAbsolute(path),
p2 = path2 && pathToAbsolute(path2),
attrs = {x: 0, y: 0, bx: 0, by: 0, X: 0, Y: 0, qx: null, qy: null},
attrs2 = {x: 0, y: 0, bx: 0, by: 0, X: 0, Y: 0, qx: null, qy: null},
processPath = function (path, d, pcom) {
var nx, ny, tq = {T:1, Q:1};
if (!path) {
return ["C", d.x, d.y, d.x, d.y, d.x, d.y];
}
!(path[0] in tq) && (d.qx = d.qy = null);
switch (path[0]) {
case "M":
d.X = path[1];
d.Y = path[2];
break;
case "A":
path = ["C"][concat](a2c[apply](0, [d.x, d.y][concat](path.slice(1))));
break;
case "S":
if (pcom == "C" || pcom == "S") { // In "S" case we have to take into account, if the previous command is C/S.
nx = d.x * 2 - d.bx; // And reflect the previous
ny = d.y * 2 - d.by; // command's control point relative to the current point.
}
else { // or some else or nothing
nx = d.x;
ny = d.y;
}
path = ["C", nx, ny][concat](path.slice(1));
break;
case "T":
if (pcom == "Q" || pcom == "T") { // In "T" case we have to take into account, if the previous command is Q/T.
d.qx = d.x * 2 - d.qx; // And make a reflection similar
d.qy = d.y * 2 - d.qy; // to case "S".
}
else { // or something else or nothing
d.qx = d.x;
d.qy = d.y;
}
path = ["C"][concat](q2c(d.x, d.y, d.qx, d.qy, path[1], path[2]));
break;
case "Q":
d.qx = path[1];
d.qy = path[2];
path = ["C"][concat](q2c(d.x, d.y, path[1], path[2], path[3], path[4]));
break;
case "L":
path = ["C"][concat](l2c(d.x, d.y, path[1], path[2]));
break;
case "H":
path = ["C"][concat](l2c(d.x, d.y, path[1], d.y));
break;
case "V":
path = ["C"][concat](l2c(d.x, d.y, d.x, path[1]));
break;
case "Z":
path = ["C"][concat](l2c(d.x, d.y, d.X, d.Y));
break;
}
return path;
},
fixArc = function (pp, i) {
if (pp[i].length > 7) {
pp[i].shift();
var pi = pp[i];
while (pi.length) {
pp.splice(i++, 0, ["C"][concat](pi.splice(0, 6)));
}
pp.splice(i, 1);
ii = mmax(p.length, p2 && p2.length || 0);
}
},
fixM = function (path1, path2, a1, a2, i) {
if (path1 && path2 && path1[i][0] == "M" && path2[i][0] != "M") {
path2.splice(i, 0, ["M", a2.x, a2.y]);
a1.bx = 0;
a1.by = 0;
a1.x = path1[i][1];
a1.y = path1[i][2];
ii = mmax(p.length, p2 && p2.length || 0);
}
};
for (var i = 0, ii = mmax(p.length, p2 && p2.length || 0); i < ii; i++) {
p[i] = processPath(p[i], attrs);
fixArc(p, i);
p2 && (p2[i] = processPath(p2[i], attrs2));
p2 && fixArc(p2, i);
fixM(p, p2, attrs, attrs2, i);
fixM(p2, p, attrs2, attrs, i);
var seg = p[i],
seg2 = p2 && p2[i],
seglen = seg.length,
seg2len = p2 && seg2.length;
attrs.x = seg[seglen - 2];
attrs.y = seg[seglen - 1];
attrs.bx = toFloat(seg[seglen - 4]) || attrs.x;
attrs.by = toFloat(seg[seglen - 3]) || attrs.y;
attrs2.bx = p2 && (toFloat(seg2[seg2len - 4]) || attrs2.x);
attrs2.by = p2 && (toFloat(seg2[seg2len - 3]) || attrs2.y);
attrs2.x = p2 && seg2[seg2len - 2];
attrs2.y = p2 && seg2[seg2len - 1];
}
if (!p2) {
pth.curve = pathClone(p);
}
return p2 ? [p, p2] : p;
}, null, pathClone),
parseDots = R._parseDots = cacher(function (gradient) {
var dots = [];
for (var i = 0, ii = gradient.length; i < ii; i++) {
var dot = {},
par = gradient[i].match(/^([^:]*):?([\d\.]*)/);
dot.color = R.getRGB(par[1]);
if (dot.color.error) {
return null;
}
dot.color = dot.color.hex;
par[2] && (dot.offset = par[2] + "%");
dots.push(dot);
}
for (i = 1, ii = dots.length - 1; i < ii; i++) {
if (!dots[i].offset) {
var start = toFloat(dots[i - 1].offset || 0),
end = 0;
for (var j = i + 1; j < ii; j++) {
if (dots[j].offset) {
end = dots[j].offset;
break;
}
}
if (!end) {
end = 100;
j = ii;
}
end = toFloat(end);
var d = (end - start) / (j - i + 1);
for (; i < j; i++) {
start += d;
dots[i].offset = start + "%";
}
}
}
return dots;
}),
tear = R._tear = function (el, paper) {
el == paper.top && (paper.top = el.prev);
el == paper.bottom && (paper.bottom = el.next);
el.next && (el.next.prev = el.prev);
el.prev && (el.prev.next = el.next);
},
tofront = R._tofront = function (el, paper) {
if (paper.top === el) {
return;
}
tear(el, paper);
el.next = null;
el.prev = paper.top;
paper.top.next = el;
paper.top = el;
},
toback = R._toback = function (el, paper) {
if (paper.bottom === el) {
return;
}
tear(el, paper);
el.next = paper.bottom;
el.prev = null;
paper.bottom.prev = el;
paper.bottom = el;
},
insertafter = R._insertafter = function (el, el2, paper) {
tear(el, paper);
el2 == paper.top && (paper.top = el);
el2.next && (el2.next.prev = el);
el.next = el2.next;
el.prev = el2;
el2.next = el;
},
insertbefore = R._insertbefore = function (el, el2, paper) {
tear(el, paper);
el2 == paper.bottom && (paper.bottom = el);
el2.prev && (el2.prev.next = el);
el.prev = el2.prev;
el2.prev = el;
el.next = el2;
},
/*\
* Raphael.toMatrix
[ method ]
**
* Utility method
**
* Returns matrix of transformations applied to a given path
> Parameters
- path (string) path string
- transform (string|array) transformation string
= (object) @Matrix
\*/
toMatrix = R.toMatrix = function (path, transform) {
var bb = pathDimensions(path),
el = {
_: {
transform: E
},
getBBox: function () {
return bb;
}
};
extractTransform(el, transform);
return el.matrix;
},
/*\
* Raphael.transformPath
[ method ]
**
* Utility method
**
* Returns path transformed by a given transformation
> Parameters
- path (string) path string
- transform (string|array) transformation string
= (string) path
\*/
transformPath = R.transformPath = function (path, transform) {
return mapPath(path, toMatrix(path, transform));
},
extractTransform = R._extractTransform = function (el, tstr) {
if (tstr == null) {
return el._.transform;
}
tstr = Str(tstr).replace(/\.{3}|\u2026/g, el._.transform || E);
var tdata = R.parseTransformString(tstr),
deg = 0,
dx = 0,
dy = 0,
sx = 1,
sy = 1,
_ = el._,
m = new Matrix;
_.transform = tdata || [];
if (tdata) {
for (var i = 0, ii = tdata.length; i < ii; i++) {
var t = tdata[i],
tlen = t.length,
command = Str(t[0]).toLowerCase(),
absolute = t[0] != command,
inver = absolute ? m.invert() : 0,
x1,
y1,
x2,
y2,
bb;
if (command == "t" && tlen == 3) {
if (absolute) {
x1 = inver.x(0, 0);
y1 = inver.y(0, 0);
x2 = inver.x(t[1], t[2]);
y2 = inver.y(t[1], t[2]);
m.translate(x2 - x1, y2 - y1);
} else {
m.translate(t[1], t[2]);
}
} else if (command == "r") {
if (tlen == 2) {
bb = bb || el.getBBox(1);
m.rotate(t[1], bb.x + bb.width / 2, bb.y + bb.height / 2);
deg += t[1];
} else if (tlen == 4) {
if (absolute) {
x2 = inver.x(t[2], t[3]);
y2 = inver.y(t[2], t[3]);
m.rotate(t[1], x2, y2);
} else {
m.rotate(t[1], t[2], t[3]);
}
deg += t[1];
}
} else if (command == "s") {
if (tlen == 2 || tlen == 3) {
bb = bb || el.getBBox(1);
m.scale(t[1], t[tlen - 1], bb.x + bb.width / 2, bb.y + bb.height / 2);
sx *= t[1];
sy *= t[tlen - 1];
} else if (tlen == 5) {
if (absolute) {
x2 = inver.x(t[3], t[4]);
y2 = inver.y(t[3], t[4]);
m.scale(t[1], t[2], x2, y2);
} else {
m.scale(t[1], t[2], t[3], t[4]);
}
sx *= t[1];
sy *= t[2];
}
} else if (command == "m" && tlen == 7) {
m.add(t[1], t[2], t[3], t[4], t[5], t[6]);
}
_.dirtyT = 1;
el.matrix = m;
}
}
/*\
* Element.matrix
[ property (object) ]
**
* Keeps @Matrix object, which represents element transformation
\*/
el.matrix = m;
_.sx = sx;
_.sy = sy;
_.deg = deg;
_.dx = dx = m.e;
_.dy = dy = m.f;
if (sx == 1 && sy == 1 && !deg && _.bbox) {
_.bbox.x += +dx;
_.bbox.y += +dy;
} else {
_.dirtyT = 1;
}
},
getEmpty = function (item) {
var l = item[0];
switch (l.toLowerCase()) {
case "t": return [l, 0, 0];
case "m": return [l, 1, 0, 0, 1, 0, 0];
case "r": if (item.length == 4) {
return [l, 0, item[2], item[3]];
} else {
return [l, 0];
}
case "s": if (item.length == 5) {
return [l, 1, 1, item[3], item[4]];
} else if (item.length == 3) {
return [l, 1, 1];
} else {
return [l, 1];
}
}
},
equaliseTransform = R._equaliseTransform = function (t1, t2) {
t2 = Str(t2).replace(/\.{3}|\u2026/g, t1);
t1 = R.parseTransformString(t1) || [];
t2 = R.parseTransformString(t2) || [];
var maxlength = mmax(t1.length, t2.length),
from = [],
to = [],
i = 0, j, jj,
tt1, tt2;
for (; i < maxlength; i++) {
tt1 = t1[i] || getEmpty(t2[i]);
tt2 = t2[i] || getEmpty(tt1);
if ((tt1[0] != tt2[0]) ||
(tt1[0].toLowerCase() == "r" && (tt1[2] != tt2[2] || tt1[3] != tt2[3])) ||
(tt1[0].toLowerCase() == "s" && (tt1[3] != tt2[3] || tt1[4] != tt2[4]))
) {
return;
}
from[i] = [];
to[i] = [];
for (j = 0, jj = mmax(tt1.length, tt2.length); j < jj; j++) {
j in tt1 && (from[i][j] = tt1[j]);
j in tt2 && (to[i][j] = tt2[j]);
}
}
return {
from: from,
to: to
};
};
R._getContainer = function (x, y, w, h) {
var container;
container = h == null && !R.is(x, "object") ? g.doc.getElementById(x) : x;
if (container == null) {
return;
}
if (container.tagName) {
if (y == null) {
return {
container: container,
width: container.style.pixelWidth || container.offsetWidth,
height: container.style.pixelHeight || container.offsetHeight
};
} else {
return {
container: container,
width: y,
height: w
};
}
}
return {
container: 1,
x: x,
y: y,
width: w,
height: h
};
};
/*\
* Raphael.pathToRelative
[ method ]
**
* Utility method
**
* Converts path to relative form
> Parameters
- pathString (string|array) path string or array of segments
= (array) array of segments.
\*/
R.pathToRelative = pathToRelative;
R._engine = {};
/*\
* Raphael.path2curve
[ method ]
**
* Utility method
**
* Converts path to a new path where all segments are cubic bezier curves.
> Parameters
- pathString (string|array) path string or array of segments
= (array) array of segments.
\*/
R.path2curve = path2curve;
/*\
* Raphael.matrix
[ method ]
**
* Utility method
**
* Returns matrix based on given parameters.
> Parameters
- a (number)
- b (number)
- c (number)
- d (number)
- e (number)
- f (number)
= (object) @Matrix
\*/
R.matrix = function (a, b, c, d, e, f) {
return new Matrix(a, b, c, d, e, f);
};
function Matrix(a, b, c, d, e, f) {
if (a != null) {
this.a = +a;
this.b = +b;
this.c = +c;
this.d = +d;
this.e = +e;
this.f = +f;
} else {
this.a = 1;
this.b = 0;
this.c = 0;
this.d = 1;
this.e = 0;
this.f = 0;
}
}
(function (matrixproto) {
/*\
* Matrix.add
[ method ]
**
* Adds given matrix to existing one.
> Parameters
- a (number)
- b (number)
- c (number)
- d (number)
- e (number)
- f (number)
or
- matrix (object) @Matrix
\*/
matrixproto.add = function (a, b, c, d, e, f) {
var out = [[], [], []],
m = [[this.a, this.c, this.e], [this.b, this.d, this.f], [0, 0, 1]],
matrix = [[a, c, e], [b, d, f], [0, 0, 1]],
x, y, z, res;
if (a && a instanceof Matrix) {
matrix = [[a.a, a.c, a.e], [a.b, a.d, a.f], [0, 0, 1]];
}
for (x = 0; x < 3; x++) {
for (y = 0; y < 3; y++) {
res = 0;
for (z = 0; z < 3; z++) {
res += m[x][z] * matrix[z][y];
}
out[x][y] = res;
}
}
this.a = out[0][0];
this.b = out[1][0];
this.c = out[0][1];
this.d = out[1][1];
this.e = out[0][2];
this.f = out[1][2];
};
/*\
* Matrix.invert
[ method ]
**
* Returns inverted version of the matrix
= (object) @Matrix
\*/
matrixproto.invert = function () {
var me = this,
x = me.a * me.d - me.b * me.c;
return new Matrix(me.d / x, -me.b / x, -me.c / x, me.a / x, (me.c * me.f - me.d * me.e) / x, (me.b * me.e - me.a * me.f) / x);
};
/*\
* Matrix.clone
[ method ]
**
* Returns copy of the matrix
= (object) @Matrix
\*/
matrixproto.clone = function () {
return new Matrix(this.a, this.b, this.c, this.d, this.e, this.f);
};
/*\
* Matrix.translate
[ method ]
**
* Translate the matrix
> Parameters
- x (number)
- y (number)
\*/
matrixproto.translate = function (x, y) {
this.add(1, 0, 0, 1, x, y);
};
/*\
* Matrix.scale
[ method ]
**
* Scales the matrix
> Parameters
- x (number)
- y (number) #optional
- cx (number) #optional
- cy (number) #optional
\*/
matrixproto.scale = function (x, y, cx, cy) {
y == null && (y = x);
(cx || cy) && this.add(1, 0, 0, 1, cx, cy);
this.add(x, 0, 0, y, 0, 0);
(cx || cy) && this.add(1, 0, 0, 1, -cx, -cy);
};
/*\
* Matrix.rotate
[ method ]
**
* Rotates the matrix
> Parameters
- a (number)
- x (number)
- y (number)
\*/
matrixproto.rotate = function (a, x, y) {
a = R.rad(a);
x = x || 0;
y = y || 0;
var cos = +math.cos(a).toFixed(9),
sin = +math.sin(a).toFixed(9);
this.add(cos, sin, -sin, cos, x, y);
this.add(1, 0, 0, 1, -x, -y);
};
/*\
* Matrix.x
[ method ]
**
* Return x coordinate for given point after transformation described by the matrix. See also @Matrix.y
> Parameters
- x (number)
- y (number)
= (number) x
\*/
matrixproto.x = function (x, y) {
return x * this.a + y * this.c + this.e;
};
/*\
* Matrix.y
[ method ]
**
* Return y coordinate for given point after transformation described by the matrix. See also @Matrix.x
> Parameters
- x (number)
- y (number)
= (number) y
\*/
matrixproto.y = function (x, y) {
return x * this.b + y * this.d + this.f;
};
matrixproto.get = function (i) {
return +this[Str.fromCharCode(97 + i)].toFixed(4);
};
matrixproto.toString = function () {
return R.svg ?
"matrix(" + [this.get(0), this.get(1), this.get(2), this.get(3), this.get(4), this.get(5)].join() + ")" :
[this.get(0), this.get(2), this.get(1), this.get(3), 0, 0].join();
};
matrixproto.toFilter = function () {
return "progid:DXImageTransform.Microsoft.Matrix(M11=" + this.get(0) +
", M12=" + this.get(2) + ", M21=" + this.get(1) + ", M22=" + this.get(3) +
", Dx=" + this.get(4) + ", Dy=" + this.get(5) + ", sizingmethod='auto expand')";
};
matrixproto.offset = function () {
return [this.e.toFixed(4), this.f.toFixed(4)];
};
function norm(a) {
return a[0] * a[0] + a[1] * a[1];
}
function normalize(a) {
var mag = math.sqrt(norm(a));
a[0] && (a[0] /= mag);
a[1] && (a[1] /= mag);
}
/*\
* Matrix.split
[ method ]
**
* Splits matrix into primitive transformations
= (object) in format:
o dx (number) translation by x
o dy (number) translation by y
o scalex (number) scale by x
o scaley (number) scale by y
o shear (number) shear
o rotate (number) rotation in deg
o isSimple (boolean) could it be represented via simple transformations
\*/
matrixproto.split = function () {
var out = {};
// translation
out.dx = this.e;
out.dy = this.f;
// scale and shear
var row = [[this.a, this.c], [this.b, this.d]];
out.scalex = math.sqrt(norm(row[0]));
normalize(row[0]);
out.shear = row[0][0] * row[1][0] + row[0][1] * row[1][1];
row[1] = [row[1][0] - row[0][0] * out.shear, row[1][1] - row[0][1] * out.shear];
out.scaley = math.sqrt(norm(row[1]));
normalize(row[1]);
out.shear /= out.scaley;
// rotation
var sin = -row[0][1],
cos = row[1][1];
if (cos < 0) {
out.rotate = R.deg(math.acos(cos));
if (sin < 0) {
out.rotate = 360 - out.rotate;
}
} else {
out.rotate = R.deg(math.asin(sin));
}
out.isSimple = !+out.shear.toFixed(9) && (out.scalex.toFixed(9) == out.scaley.toFixed(9) || !out.rotate);
out.isSuperSimple = !+out.shear.toFixed(9) && out.scalex.toFixed(9) == out.scaley.toFixed(9) && !out.rotate;
out.noRotation = !+out.shear.toFixed(9) && !out.rotate;
return out;
};
/*\
* Matrix.toTransformString
[ method ]
**
* Return transform string that represents given matrix
= (string) transform string
\*/
matrixproto.toTransformString = function (shorter) {
var s = shorter || this[split]();
if (s.isSimple) {
s.scalex = +s.scalex.toFixed(4);
s.scaley = +s.scaley.toFixed(4);
s.rotate = +s.rotate.toFixed(4);
return (s.dx || s.dy ? "t" + [s.dx, s.dy] : E) +
(s.scalex != 1 || s.scaley != 1 ? "s" + [s.scalex, s.scaley, 0, 0] : E) +
(s.rotate ? "r" + [s.rotate, 0, 0] : E);
} else {
return "m" + [this.get(0), this.get(1), this.get(2), this.get(3), this.get(4), this.get(5)];
}
};
})(Matrix.prototype);
// WebKit rendering bug workaround method
var version = navigator.userAgent.match(/Version\/(.*?)\s/) || navigator.userAgent.match(/Chrome\/(\d+)/);
if ((navigator.vendor == "Apple Computer, Inc.") && (version && version[1] < 4 || navigator.platform.slice(0, 2) == "iP") ||
(navigator.vendor == "Google Inc." && version && version[1] < 8)) {
/*\
* Paper.safari
[ method ]
**
* There is an inconvenient rendering bug in Safari (WebKit):
* sometimes the rendering should be forced.
* This method should help with dealing with this bug.
\*/
paperproto.safari = function () {
var rect = this.rect(-99, -99, this.width + 99, this.height + 99).attr({stroke: "none"});
setTimeout(function () {rect.remove();});
};
} else {
paperproto.safari = fun;
}
var preventDefault = function () {
this.returnValue = false;
},
preventTouch = function () {
return this.originalEvent.preventDefault();
},
stopPropagation = function () {
this.cancelBubble = true;
},
stopTouch = function () {
return this.originalEvent.stopPropagation();
},
getEventPosition = function (e) {
var scrollY = g.doc.documentElement.scrollTop || g.doc.body.scrollTop,
scrollX = g.doc.documentElement.scrollLeft || g.doc.body.scrollLeft;
return {
x: e.clientX + scrollX,
y: e.clientY + scrollY
};
},
addEvent = (function () {
if (g.doc.addEventListener) {
return function (obj, type, fn, element) {
var f = function (e) {
var pos = getEventPosition(e);
return fn.call(element, e, pos.x, pos.y);
};
obj.addEventListener(type, f, false);
if (supportsTouch && touchMap[type]) {
var _f = function (e) {
var pos = getEventPosition(e),
olde = e;
for (var i = 0, ii = e.targetTouches && e.targetTouches.length; i < ii; i++) {
if (e.targetTouches[i].target == obj) {
e = e.targetTouches[i];
e.originalEvent = olde;
e.preventDefault = preventTouch;
e.stopPropagation = stopTouch;
break;
}
}
return fn.call(element, e, pos.x, pos.y);
};
obj.addEventListener(touchMap[type], _f, false);
}
return function () {
obj.removeEventListener(type, f, false);
if (supportsTouch && touchMap[type])
obj.removeEventListener(touchMap[type], f, false);
return true;
};
};
} else if (g.doc.attachEvent) {
return function (obj, type, fn, element) {
var f = function (e) {
e = e || g.win.event;
var scrollY = g.doc.documentElement.scrollTop || g.doc.body.scrollTop,
scrollX = g.doc.documentElement.scrollLeft || g.doc.body.scrollLeft,
x = e.clientX + scrollX,
y = e.clientY + scrollY;
e.preventDefault = e.preventDefault || preventDefault;
e.stopPropagation = e.stopPropagation || stopPropagation;
return fn.call(element, e, x, y);
};
obj.attachEvent("on" + type, f);
var detacher = function () {
obj.detachEvent("on" + type, f);
return true;
};
return detacher;
};
}
})(),
drag = [],
dragMove = function (e) {
var x = e.clientX,
y = e.clientY,
scrollY = g.doc.documentElement.scrollTop || g.doc.body.scrollTop,
scrollX = g.doc.documentElement.scrollLeft || g.doc.body.scrollLeft,
dragi,
j = drag.length;
while (j--) {
dragi = drag[j];
if (supportsTouch && e.touches) {
var i = e.touches.length,
touch;
while (i--) {
touch = e.touches[i];
if (touch.identifier == dragi.el._drag.id) {
x = touch.clientX;
y = touch.clientY;
(e.originalEvent ? e.originalEvent : e).preventDefault();
break;
}
}
} else {
e.preventDefault();
}
var node = dragi.el.node,
o,
next = node.nextSibling,
parent = node.parentNode,
display = node.style.display;
g.win.opera && parent.removeChild(node);
node.style.display = "none";
o = dragi.el.paper.getElementByPoint(x, y);
node.style.display = display;
g.win.opera && (next ? parent.insertBefore(node, next) : parent.appendChild(node));
o && eve("raphael.drag.over." + dragi.el.id, dragi.el, o);
x += scrollX;
y += scrollY;
eve("raphael.drag.move." + dragi.el.id, dragi.move_scope || dragi.el, x - dragi.el._drag.x, y - dragi.el._drag.y, x, y, e);
}
},
dragUp = function (e) {
R.unmousemove(dragMove).unmouseup(dragUp);
var i = drag.length,
dragi;
while (i--) {
dragi = drag[i];
dragi.el._drag = {};
eve("raphael.drag.end." + dragi.el.id, dragi.end_scope || dragi.start_scope || dragi.move_scope || dragi.el, e);
}
drag = [];
},
/*\
* Raphael.el
[ property (object) ]
**
* You can add your own method to elements. This is usefull when you want to hack default functionality or
* want to wrap some common transformation or attributes in one method. In difference to canvas methods,
* you can redefine element method at any time. Expending element methods wouldn’t affect set.
> Usage
| Raphael.el.red = function () {
| this.attr({fill: "#f00"});
| };
| // then use it
| paper.circle(100, 100, 20).red();
\*/
elproto = R.el = {};
/*\
* Element.click
[ method ]
**
* Adds event handler for click for the element.
> Parameters
- handler (function) handler for the event
= (object) @Element
\*/
/*\
* Element.unclick
[ method ]
**
* Removes event handler for click for the element.
> Parameters
- handler (function) #optional handler for the event
= (object) @Element
\*/
/*\
* Element.dblclick
[ method ]
**
* Adds event handler for double click for the element.
> Parameters
- handler (function) handler for the event
= (object) @Element
\*/
/*\
* Element.undblclick
[ method ]
**
* Removes event handler for double click for the element.
> Parameters
- handler (function) #optional handler for the event
= (object) @Element
\*/
/*\
* Element.mousedown
[ method ]
**
* Adds event handler for mousedown for the element.
> Parameters
- handler (function) handler for the event
= (object) @Element
\*/
/*\
* Element.unmousedown
[ method ]
**
* Removes event handler for mousedown for the element.
> Parameters
- handler (function) #optional handler for the event
= (object) @Element
\*/
/*\
* Element.mousemove
[ method ]
**
* Adds event handler for mousemove for the element.
> Parameters
- handler (function) handler for the event
= (object) @Element
\*/
/*\
* Element.unmousemove
[ method ]
**
* Removes event handler for mousemove for the element.
> Parameters
- handler (function) #optional handler for the event
= (object) @Element
\*/
/*\
* Element.mouseout
[ method ]
**
* Adds event handler for mouseout for the element.
> Parameters
- handler (function) handler for the event
= (object) @Element
\*/
/*\
* Element.unmouseout
[ method ]
**
* Removes event handler for mouseout for the element.
> Parameters
- handler (function) #optional handler for the event
= (object) @Element
\*/
/*\
* Element.mouseover
[ method ]
**
* Adds event handler for mouseover for the element.
> Parameters
- handler (function) handler for the event
= (object) @Element
\*/
/*\
* Element.unmouseover
[ method ]
**
* Removes event handler for mouseover for the element.
> Parameters
- handler (function) #optional handler for the event
= (object) @Element
\*/
/*\
* Element.mouseup
[ method ]
**
* Adds event handler for mouseup for the element.
> Parameters
- handler (function) handler for the event
= (object) @Element
\*/
/*\
* Element.unmouseup
[ method ]
**
* Removes event handler for mouseup for the element.
> Parameters
- handler (function) #optional handler for the event
= (object) @Element
\*/
/*\
* Element.touchstart
[ method ]
**
* Adds event handler for touchstart for the element.
> Parameters
- handler (function) handler for the event
= (object) @Element
\*/
/*\
* Element.untouchstart
[ method ]
**
* Removes event handler for touchstart for the element.
> Parameters
- handler (function) #optional handler for the event
= (object) @Element
\*/
/*\
* Element.touchmove
[ method ]
**
* Adds event handler for touchmove for the element.
> Parameters
- handler (function) handler for the event
= (object) @Element
\*/
/*\
* Element.untouchmove
[ method ]
**
* Removes event handler for touchmove for the element.
> Parameters
- handler (function) #optional handler for the event
= (object) @Element
\*/
/*\
* Element.touchend
[ method ]
**
* Adds event handler for touchend for the element.
> Parameters
- handler (function) handler for the event
= (object) @Element
\*/
/*\
* Element.untouchend
[ method ]
**
* Removes event handler for touchend for the element.
> Parameters
- handler (function) #optional handler for the event
= (object) @Element
\*/
/*\
* Element.touchcancel
[ method ]
**
* Adds event handler for touchcancel for the element.
> Parameters
- handler (function) handler for the event
= (object) @Element
\*/
/*\
* Element.untouchcancel
[ method ]
**
* Removes event handler for touchcancel for the element.
> Parameters
- handler (function) #optional handler for the event
= (object) @Element
\*/
for (var i = events.length; i--;) {
(function (eventName) {
R[eventName] = elproto[eventName] = function (fn, scope) {
if (R.is(fn, "function")) {
this.events = this.events || [];
this.events.push({name: eventName, f: fn, unbind: addEvent(this.shape || this.node || g.doc, eventName, fn, scope || this)});
}
return this;
};
R["un" + eventName] = elproto["un" + eventName] = function (fn) {
var events = this.events || [],
l = events.length;
while (l--){
if (events[l].name == eventName && (R.is(fn, "undefined") || events[l].f == fn)) {
events[l].unbind();
events.splice(l, 1);
!events.length && delete this.events;
}
}
return this;
};
})(events[i]);
}
/*\
* Element.data
[ method ]
**
* Adds or retrieves given value asociated with given key.
**
* See also @Element.removeData
> Parameters
- key (string) key to store data
- value (any) #optional value to store
= (object) @Element
* or, if value is not specified:
= (any) value
* or, if key and value are not specified:
= (object) Key/value pairs for all the data associated with the element.
> Usage
| for (var i = 0, i < 5, i++) {
| paper.circle(10 + 15 * i, 10, 10)
| .attr({fill: "#000"})
| .data("i", i)
| .click(function () {
| alert(this.data("i"));
| });
| }
\*/
elproto.data = function (key, value) {
var data = eldata[this.id] = eldata[this.id] || {};
if (arguments.length == 0) {
return data;
}
if (arguments.length == 1) {
if (R.is(key, "object")) {
for (var i in key) if (key[has](i)) {
this.data(i, key[i]);
}
return this;
}
eve("raphael.data.get." + this.id, this, data[key], key);
return data[key];
}
data[key] = value;
eve("raphael.data.set." + this.id, this, value, key);
return this;
};
/*\
* Element.removeData
[ method ]
**
* Removes value associated with an element by given key.
* If key is not provided, removes all the data of the element.
> Parameters
- key (string) #optional key
= (object) @Element
\*/
elproto.removeData = function (key) {
if (key == null) {
eldata[this.id] = {};
} else {
eldata[this.id] && delete eldata[this.id][key];
}
return this;
};
/*\
* Element.getData
[ method ]
**
* Retrieves the element data
= (object) data
\*/
elproto.getData = function () {
return clone(eldata[this.id] || {});
};
/*\
* Element.hover
[ method ]
**
* Adds event handlers for hover for the element.
> Parameters
- f_in (function) handler for hover in
- f_out (function) handler for hover out
- icontext (object) #optional context for hover in handler
- ocontext (object) #optional context for hover out handler
= (object) @Element
\*/
elproto.hover = function (f_in, f_out, scope_in, scope_out) {
return this.mouseover(f_in, scope_in).mouseout(f_out, scope_out || scope_in);
};
/*\
* Element.unhover
[ method ]
**
* Removes event handlers for hover for the element.
> Parameters
- f_in (function) handler for hover in
- f_out (function) handler for hover out
= (object) @Element
\*/
elproto.unhover = function (f_in, f_out) {
return this.unmouseover(f_in).unmouseout(f_out);
};
var draggable = [];
/*\
* Element.drag
[ method ]
**
* Adds event handlers for drag of the element.
> Parameters
- onmove (function) handler for moving
- onstart (function) handler for drag start
- onend (function) handler for drag end
- mcontext (object) #optional context for moving handler
- scontext (object) #optional context for drag start handler
- econtext (object) #optional context for drag end handler
* Additionaly following `drag` events will be triggered: `drag.start.<id>` on start,
* `drag.end.<id>` on end and `drag.move.<id>` on every move. When element will be dragged over another element
* `drag.over.<id>` will be fired as well.
*
* Start event and start handler will be called in specified context or in context of the element with following parameters:
o x (number) x position of the mouse
o y (number) y position of the mouse
o event (object) DOM event object
* Move event and move handler will be called in specified context or in context of the element with following parameters:
o dx (number) shift by x from the start point
o dy (number) shift by y from the start point
o x (number) x position of the mouse
o y (number) y position of the mouse
o event (object) DOM event object
* End event and end handler will be called in specified context or in context of the element with following parameters:
o event (object) DOM event object
= (object) @Element
\*/
elproto.drag = function (onmove, onstart, onend, move_scope, start_scope, end_scope) {
function start(e) {
(e.originalEvent || e).preventDefault();
var x = e.clientX,
y = e.clientY,
scrollY = g.doc.documentElement.scrollTop || g.doc.body.scrollTop,
scrollX = g.doc.documentElement.scrollLeft || g.doc.body.scrollLeft;
this._drag.id = e.identifier;
if (supportsTouch && e.touches) {
var i = e.touches.length, touch;
while (i--) {
touch = e.touches[i];
this._drag.id = touch.identifier;
if (touch.identifier == this._drag.id) {
x = touch.clientX;
y = touch.clientY;
break;
}
}
}
this._drag.x = x + scrollX;
this._drag.y = y + scrollY;
!drag.length && R.mousemove(dragMove).mouseup(dragUp);
drag.push({el: this, move_scope: move_scope, start_scope: start_scope, end_scope: end_scope});
onstart && eve.on("raphael.drag.start." + this.id, onstart);
onmove && eve.on("raphael.drag.move." + this.id, onmove);
onend && eve.on("raphael.drag.end." + this.id, onend);
eve("raphael.drag.start." + this.id, start_scope || move_scope || this, e.clientX + scrollX, e.clientY + scrollY, e);
}
this._drag = {};
draggable.push({el: this, start: start});
this.mousedown(start);
return this;
};
/*\
* Element.onDragOver
[ method ]
**
* Shortcut for assigning event handler for `drag.over.<id>` event, where id is id of the element (see @Element.id).
> Parameters
- f (function) handler for event, first argument would be the element you are dragging over
\*/
elproto.onDragOver = function (f) {
f ? eve.on("raphael.drag.over." + this.id, f) : eve.unbind("raphael.drag.over." + this.id);
};
/*\
* Element.undrag
[ method ]
**
* Removes all drag event handlers from given element.
\*/
elproto.undrag = function () {
var i = draggable.length;
while (i--) if (draggable[i].el == this) {
this.unmousedown(draggable[i].start);
draggable.splice(i, 1);
eve.unbind("raphael.drag.*." + this.id);
}
!draggable.length && R.unmousemove(dragMove).unmouseup(dragUp);
drag = [];
};
/*\
* Paper.circle
[ method ]
**
* Draws a circle.
**
> Parameters
**
- x (number) x coordinate of the centre
- y (number) y coordinate of the centre
- r (number) radius
= (object) Raphaël element object with type “circle”
**
> Usage
| var c = paper.circle(50, 50, 40);
\*/
paperproto.circle = function (x, y, r) {
var out = R._engine.circle(this, x || 0, y || 0, r || 0);
this.__set__ && this.__set__.push(out);
return out;
};
/*\
* Paper.rect
[ method ]
*
* Draws a rectangle.
**
> Parameters
**
- x (number) x coordinate of the top left corner
- y (number) y coordinate of the top left corner
- width (number) width
- height (number) height
- r (number) #optional radius for rounded corners, default is 0
= (object) Raphaël element object with type “rect”
**
> Usage
| // regular rectangle
| var c = paper.rect(10, 10, 50, 50);
| // rectangle with rounded corners
| var c = paper.rect(40, 40, 50, 50, 10);
\*/
paperproto.rect = function (x, y, w, h, r) {
var out = R._engine.rect(this, x || 0, y || 0, w || 0, h || 0, r || 0);
this.__set__ && this.__set__.push(out);
return out;
};
/*\
* Paper.ellipse
[ method ]
**
* Draws an ellipse.
**
> Parameters
**
- x (number) x coordinate of the centre
- y (number) y coordinate of the centre
- rx (number) horizontal radius
- ry (number) vertical radius
= (object) Raphaël element object with type “ellipse”
**
> Usage
| var c = paper.ellipse(50, 50, 40, 20);
\*/
paperproto.ellipse = function (x, y, rx, ry) {
var out = R._engine.ellipse(this, x || 0, y || 0, rx || 0, ry || 0);
this.__set__ && this.__set__.push(out);
return out;
};
/*\
* Paper.path
[ method ]
**
* Creates a path element by given path data string.
> Parameters
- pathString (string) #optional path string in SVG format.
* Path string consists of one-letter commands, followed by comma seprarated arguments in numercal form. Example:
| "M10,20L30,40"
* Here we can see two commands: “M”, with arguments `(10, 20)` and “L” with arguments `(30, 40)`. Upper case letter mean command is absolute, lower case—relative.
*
# <p>Here is short list of commands available, for more details see <a href="http://www.w3.org/TR/SVG/paths.html#PathData" title="Details of a path's data attribute's format are described in the SVG specification.">SVG path string format</a>.</p>
# <table><thead><tr><th>Command</th><th>Name</th><th>Parameters</th></tr></thead><tbody>
# <tr><td>M</td><td>moveto</td><td>(x y)+</td></tr>
# <tr><td>Z</td><td>closepath</td><td>(none)</td></tr>
# <tr><td>L</td><td>lineto</td><td>(x y)+</td></tr>
# <tr><td>H</td><td>horizontal lineto</td><td>x+</td></tr>
# <tr><td>V</td><td>vertical lineto</td><td>y+</td></tr>
# <tr><td>C</td><td>curveto</td><td>(x1 y1 x2 y2 x y)+</td></tr>
# <tr><td>S</td><td>smooth curveto</td><td>(x2 y2 x y)+</td></tr>
# <tr><td>Q</td><td>quadratic Bézier curveto</td><td>(x1 y1 x y)+</td></tr>
# <tr><td>T</td><td>smooth quadratic Bézier curveto</td><td>(x y)+</td></tr>
# <tr><td>A</td><td>elliptical arc</td><td>(rx ry x-axis-rotation large-arc-flag sweep-flag x y)+</td></tr>
# <tr><td>R</td><td><a href="http://en.wikipedia.org/wiki/Catmull–Rom_spline#Catmull.E2.80.93Rom_spline">Catmull-Rom curveto</a>*</td><td>x1 y1 (x y)+</td></tr></tbody></table>
* * “Catmull-Rom curveto” is a not standard SVG command and added in 2.0 to make life easier.
* Note: there is a special case when path consist of just three commands: “M10,10R…z”. In this case path will smoothly connects to its beginning.
> Usage
| var c = paper.path("M10 10L90 90");
| // draw a diagonal line:
| // move to 10,10, line to 90,90
* For example of path strings, check out these icons: http://raphaeljs.com/icons/
\*/
paperproto.path = function (pathString) {
pathString && !R.is(pathString, string) && !R.is(pathString[0], array) && (pathString += E);
var out = R._engine.path(R.format[apply](R, arguments), this);
this.__set__ && this.__set__.push(out);
return out;
};
/*\
* Paper.image
[ method ]
**
* Embeds an image into the surface.
**
> Parameters
**
- src (string) URI of the source image
- x (number) x coordinate position
- y (number) y coordinate position
- width (number) width of the image
- height (number) height of the image
= (object) Raphaël element object with type “image”
**
> Usage
| var c = paper.image("apple.png", 10, 10, 80, 80);
\*/
paperproto.image = function (src, x, y, w, h) {
var out = R._engine.image(this, src || "about:blank", x || 0, y || 0, w || 0, h || 0);
this.__set__ && this.__set__.push(out);
return out;
};
/*\
* Paper.text
[ method ]
**
* Draws a text string. If you need line breaks, put “\n” in the string.
**
> Parameters
**
- x (number) x coordinate position
- y (number) y coordinate position
- text (string) The text string to draw
= (object) Raphaël element object with type “text”
**
> Usage
| var t = paper.text(50, 50, "Raphaël\nkicks\nbutt!");
\*/
paperproto.text = function (x, y, text) {
var out = R._engine.text(this, x || 0, y || 0, Str(text));
this.__set__ && this.__set__.push(out);
return out;
};
/*\
* Paper.set
[ method ]
**
* Creates array-like object to keep and operate several elements at once.
* Warning: it doesn’t create any elements for itself in the page, it just groups existing elements.
* Sets act as pseudo elements — all methods available to an element can be used on a set.
= (object) array-like object that represents set of elements
**
> Usage
| var st = paper.set();
| st.push(
| paper.circle(10, 10, 5),
| paper.circle(30, 10, 5)
| );
| st.attr({fill: "red"}); // changes the fill of both circles
\*/
paperproto.set = function (itemsArray) {
!R.is(itemsArray, "array") && (itemsArray = Array.prototype.splice.call(arguments, 0, arguments.length));
var out = new Set(itemsArray);
this.__set__ && this.__set__.push(out);
out["paper"] = this;
out["type"] = "set";
return out;
};
/*\
* Paper.setStart
[ method ]
**
* Creates @Paper.set. All elements that will be created after calling this method and before calling
* @Paper.setFinish will be added to the set.
**
> Usage
| paper.setStart();
| paper.circle(10, 10, 5),
| paper.circle(30, 10, 5)
| var st = paper.setFinish();
| st.attr({fill: "red"}); // changes the fill of both circles
\*/
paperproto.setStart = function (set) {
this.__set__ = set || this.set();
};
/*\
* Paper.setFinish
[ method ]
**
* See @Paper.setStart. This method finishes catching and returns resulting set.
**
= (object) set
\*/
paperproto.setFinish = function (set) {
var out = this.__set__;
delete this.__set__;
return out;
};
/*\
* Paper.setSize
[ method ]
**
* If you need to change dimensions of the canvas call this method
**
> Parameters
**
- width (number) new width of the canvas
- height (number) new height of the canvas
\*/
paperproto.setSize = function (width, height) {
return R._engine.setSize.call(this, width, height);
};
/*\
* Paper.setViewBox
[ method ]
**
* Sets the view box of the paper. Practically it gives you ability to zoom and pan whole paper surface by
* specifying new boundaries.
**
> Parameters
**
- x (number) new x position, default is `0`
- y (number) new y position, default is `0`
- w (number) new width of the canvas
- h (number) new height of the canvas
- fit (boolean) `true` if you want graphics to fit into new boundary box
\*/
paperproto.setViewBox = function (x, y, w, h, fit) {
return R._engine.setViewBox.call(this, x, y, w, h, fit);
};
/*\
* Paper.top
[ property ]
**
* Points to the topmost element on the paper
\*/
/*\
* Paper.bottom
[ property ]
**
* Points to the bottom element on the paper
\*/
paperproto.top = paperproto.bottom = null;
/*\
* Paper.raphael
[ property ]
**
* Points to the @Raphael object/function
\*/
paperproto.raphael = R;
var getOffset = function (elem) {
var box = elem.getBoundingClientRect(),
doc = elem.ownerDocument,
body = doc.body,
docElem = doc.documentElement,
clientTop = docElem.clientTop || body.clientTop || 0, clientLeft = docElem.clientLeft || body.clientLeft || 0,
top = box.top + (g.win.pageYOffset || docElem.scrollTop || body.scrollTop ) - clientTop,
left = box.left + (g.win.pageXOffset || docElem.scrollLeft || body.scrollLeft) - clientLeft;
return {
y: top,
x: left
};
};
/*\
* Paper.getElementByPoint
[ method ]
**
* Returns you topmost element under given point.
**
= (object) Raphaël element object
> Parameters
**
- x (number) x coordinate from the top left corner of the window
- y (number) y coordinate from the top left corner of the window
> Usage
| paper.getElementByPoint(mouseX, mouseY).attr({stroke: "#f00"});
\*/
paperproto.getElementByPoint = function (x, y) {
var paper = this,
svg = paper.canvas,
target = g.doc.elementFromPoint(x, y);
if (g.win.opera && target.tagName == "svg") {
var so = getOffset(svg),
sr = svg.createSVGRect();
sr.x = x - so.x;
sr.y = y - so.y;
sr.width = sr.height = 1;
var hits = svg.getIntersectionList(sr, null);
if (hits.length) {
target = hits[hits.length - 1];
}
}
if (!target) {
return null;
}
while (target.parentNode && target != svg.parentNode && !target.raphael) {
target = target.parentNode;
}
target == paper.canvas.parentNode && (target = svg);
target = target && target.raphael ? paper.getById(target.raphaelid) : null;
return target;
};
/*\
* Paper.getElementsByBBox
[ method ]
**
* Returns set of elements that have an intersecting bounding box
**
> Parameters
**
- bbox (object) bbox to check with
= (object) @Set
\*/
paperproto.getElementsByBBox = function (bbox) {
var set = this.set();
this.forEach(function (el) {
if (R.isBBoxIntersect(el.getBBox(), bbox)) {
set.push(el);
}
});
return set;
};
/*\
* Paper.getById
[ method ]
**
* Returns you element by its internal ID.
**
> Parameters
**
- id (number) id
= (object) Raphaël element object
\*/
paperproto.getById = function (id) {
var bot = this.bottom;
while (bot) {
if (bot.id == id) {
return bot;
}
bot = bot.next;
}
return null;
};
/*\
* Paper.forEach
[ method ]
**
* Executes given function for each element on the paper
*
* If callback function returns `false` it will stop loop running.
**
> Parameters
**
- callback (function) function to run
- thisArg (object) context object for the callback
= (object) Paper object
> Usage
| paper.forEach(function (el) {
| el.attr({ stroke: "blue" });
| });
\*/
paperproto.forEach = function (callback, thisArg) {
var bot = this.bottom;
while (bot) {
if (callback.call(thisArg, bot) === false) {
return this;
}
bot = bot.next;
}
return this;
};
/*\
* Paper.getElementsByPoint
[ method ]
**
* Returns set of elements that have common point inside
**
> Parameters
**
- x (number) x coordinate of the point
- y (number) y coordinate of the point
= (object) @Set
\*/
paperproto.getElementsByPoint = function (x, y) {
var set = this.set();
this.forEach(function (el) {
if (el.isPointInside(x, y)) {
set.push(el);
}
});
return set;
};
function x_y() {
return this.x + S + this.y;
}
function x_y_w_h() {
return this.x + S + this.y + S + this.width + " \xd7 " + this.height;
}
/*\
* Element.isPointInside
[ method ]
**
* Determine if given point is inside this element’s shape
**
> Parameters
**
- x (number) x coordinate of the point
- y (number) y coordinate of the point
= (boolean) `true` if point inside the shape
\*/
elproto.isPointInside = function (x, y) {
var rp = this.realPath = getPath[this.type](this);
if (this.attr('transform') && this.attr('transform').length) {
rp = R.transformPath(rp, this.attr('transform'));
}
return R.isPointInsidePath(rp, x, y);
};
/*\
* Element.getBBox
[ method ]
**
* Return bounding box for a given element
**
> Parameters
**
- isWithoutTransform (boolean) flag, `true` if you want to have bounding box before transformations. Default is `false`.
= (object) Bounding box object:
o {
o x: (number) top left corner x
o y: (number) top left corner y
o x2: (number) bottom right corner x
o y2: (number) bottom right corner y
o width: (number) width
o height: (number) height
o }
\*/
elproto.getBBox = function (isWithoutTransform) {
if (this.removed) {
return {};
}
var _ = this._;
if (isWithoutTransform) {
if (_.dirty || !_.bboxwt) {
this.realPath = getPath[this.type](this);
_.bboxwt = pathDimensions(this.realPath);
_.bboxwt.toString = x_y_w_h;
_.dirty = 0;
}
return _.bboxwt;
}
if (_.dirty || _.dirtyT || !_.bbox) {
if (_.dirty || !this.realPath) {
_.bboxwt = 0;
this.realPath = getPath[this.type](this);
}
_.bbox = pathDimensions(mapPath(this.realPath, this.matrix));
_.bbox.toString = x_y_w_h;
_.dirty = _.dirtyT = 0;
}
return _.bbox;
};
/*\
* Element.clone
[ method ]
**
= (object) clone of a given element
**
\*/
elproto.clone = function () {
if (this.removed) {
return null;
}
var out = this.paper[this.type]().attr(this.attr());
this.__set__ && this.__set__.push(out);
return out;
};
/*\
* Element.glow
[ method ]
**
* Return set of elements that create glow-like effect around given element. See @Paper.set.
*
* Note: Glow is not connected to the element. If you change element attributes it won’t adjust itself.
**
> Parameters
**
- glow (object) #optional parameters object with all properties optional:
o {
o width (number) size of the glow, default is `10`
o fill (boolean) will it be filled, default is `false`
o opacity (number) opacity, default is `0.5`
o offsetx (number) horizontal offset, default is `0`
o offsety (number) vertical offset, default is `0`
o color (string) glow colour, default is `black`
o }
= (object) @Paper.set of elements that represents glow
\*/
elproto.glow = function (glow) {
if (this.type == "text") {
return null;
}
glow = glow || {};
var s = {
width: (glow.width || 10) + (+this.attr("stroke-width") || 1),
fill: glow.fill || false,
opacity: glow.opacity || .5,
offsetx: glow.offsetx || 0,
offsety: glow.offsety || 0,
color: glow.color || "#000"
},
c = s.width / 2,
r = this.paper,
out = r.set(),
path = this.realPath || getPath[this.type](this);
path = this.matrix ? mapPath(path, this.matrix) : path;
for (var i = 1; i < c + 1; i++) {
out.push(r.path(path).attr({
stroke: s.color,
fill: s.fill ? s.color : "none",
"stroke-linejoin": "round",
"stroke-linecap": "round",
"stroke-width": +(s.width / c * i).toFixed(3),
opacity: +(s.opacity / c).toFixed(3)
}));
}
return out.insertBefore(this).translate(s.offsetx, s.offsety);
};
var curveslengths = {},
getPointAtSegmentLength = function (p1x, p1y, c1x, c1y, c2x, c2y, p2x, p2y, length) {
if (length == null) {
return bezlen(p1x, p1y, c1x, c1y, c2x, c2y, p2x, p2y);
} else {
return R.findDotsAtSegment(p1x, p1y, c1x, c1y, c2x, c2y, p2x, p2y, getTatLen(p1x, p1y, c1x, c1y, c2x, c2y, p2x, p2y, length));
}
},
getLengthFactory = function (istotal, subpath) {
return function (path, length, onlystart) {
path = path2curve(path);
var x, y, p, l, sp = "", subpaths = {}, point,
len = 0;
for (var i = 0, ii = path.length; i < ii; i++) {
p = path[i];
if (p[0] == "M") {
x = +p[1];
y = +p[2];
} else {
l = getPointAtSegmentLength(x, y, p[1], p[2], p[3], p[4], p[5], p[6]);
if (len + l > length) {
if (subpath && !subpaths.start) {
point = getPointAtSegmentLength(x, y, p[1], p[2], p[3], p[4], p[5], p[6], length - len);
sp += ["C" + point.start.x, point.start.y, point.m.x, point.m.y, point.x, point.y];
if (onlystart) {return sp;}
subpaths.start = sp;
sp = ["M" + point.x, point.y + "C" + point.n.x, point.n.y, point.end.x, point.end.y, p[5], p[6]].join();
len += l;
x = +p[5];
y = +p[6];
continue;
}
if (!istotal && !subpath) {
point = getPointAtSegmentLength(x, y, p[1], p[2], p[3], p[4], p[5], p[6], length - len);
return {x: point.x, y: point.y, alpha: point.alpha};
}
}
len += l;
x = +p[5];
y = +p[6];
}
sp += p.shift() + p;
}
subpaths.end = sp;
point = istotal ? len : subpath ? subpaths : R.findDotsAtSegment(x, y, p[0], p[1], p[2], p[3], p[4], p[5], 1);
point.alpha && (point = {x: point.x, y: point.y, alpha: point.alpha});
return point;
};
};
var getTotalLength = getLengthFactory(1),
getPointAtLength = getLengthFactory(),
getSubpathsAtLength = getLengthFactory(0, 1);
/*\
* Raphael.getTotalLength
[ method ]
**
* Returns length of the given path in pixels.
**
> Parameters
**
- path (string) SVG path string.
**
= (number) length.
\*/
R.getTotalLength = getTotalLength;
/*\
* Raphael.getPointAtLength
[ method ]
**
* Return coordinates of the point located at the given length on the given path.
**
> Parameters
**
- path (string) SVG path string
- length (number)
**
= (object) representation of the point:
o {
o x: (number) x coordinate
o y: (number) y coordinate
o alpha: (number) angle of derivative
o }
\*/
R.getPointAtLength = getPointAtLength;
/*\
* Raphael.getSubpath
[ method ]
**
* Return subpath of a given path from given length to given length.
**
> Parameters
**
- path (string) SVG path string
- from (number) position of the start of the segment
- to (number) position of the end of the segment
**
= (string) pathstring for the segment
\*/
R.getSubpath = function (path, from, to) {
if (this.getTotalLength(path) - to < 1e-6) {
return getSubpathsAtLength(path, from).end;
}
var a = getSubpathsAtLength(path, to, 1);
return from ? getSubpathsAtLength(a, from).end : a;
};
/*\
* Element.getTotalLength
[ method ]
**
* Returns length of the path in pixels. Only works for element of “path” type.
= (number) length.
\*/
elproto.getTotalLength = function () {
var path = this.getPath();
if (!path) {
return;
}
if (this.node.getTotalLength) {
return this.node.getTotalLength();
}
return getTotalLength(path);
};
/*\
* Element.getPointAtLength
[ method ]
**
* Return coordinates of the point located at the given length on the given path. Only works for element of “path” type.
**
> Parameters
**
- length (number)
**
= (object) representation of the point:
o {
o x: (number) x coordinate
o y: (number) y coordinate
o alpha: (number) angle of derivative
o }
\*/
elproto.getPointAtLength = function (length) {
var path = this.getPath();
if (!path) {
return;
}
return getPointAtLength(path, length);
};
/*\
* Element.getPath
[ method ]
**
* Returns path of the element. Only works for elements of “path” type and simple elements like circle.
= (object) path
**
\*/
elproto.getPath = function () {
var path,
getPath = R._getPath[this.type];
if (this.type == "text" || this.type == "set") {
return;
}
if (getPath) {
path = getPath(this);
}
return path;
};
/*\
* Element.getSubpath
[ method ]
**
* Return subpath of a given element from given length to given length. Only works for element of “path” type.
**
> Parameters
**
- from (number) position of the start of the segment
- to (number) position of the end of the segment
**
= (string) pathstring for the segment
\*/
elproto.getSubpath = function (from, to) {
var path = this.getPath();
if (!path) {
return;
}
return R.getSubpath(path, from, to);
};
/*\
* Raphael.easing_formulas
[ property ]
**
* Object that contains easing formulas for animation. You could extend it with your own. By default it has following list of easing:
# <ul>
# <li>“linear”</li>
# <li>“<” or “easeIn” or “ease-in”</li>
# <li>“>” or “easeOut” or “ease-out”</li>
# <li>“<>” or “easeInOut” or “ease-in-out”</li>
# <li>“backIn” or “back-in”</li>
# <li>“backOut” or “back-out”</li>
# <li>“elastic”</li>
# <li>“bounce”</li>
# </ul>
# <p>See also <a href="http://raphaeljs.com/easing.html">Easing demo</a>.</p>
\*/
var ef = R.easing_formulas = {
linear: function (n) {
return n;
},
"<": function (n) {
return pow(n, 1.7);
},
">": function (n) {
return pow(n, .48);
},
"<>": function (n) {
var q = .48 - n / 1.04,
Q = math.sqrt(.1734 + q * q),
x = Q - q,
X = pow(abs(x), 1 / 3) * (x < 0 ? -1 : 1),
y = -Q - q,
Y = pow(abs(y), 1 / 3) * (y < 0 ? -1 : 1),
t = X + Y + .5;
return (1 - t) * 3 * t * t + t * t * t;
},
backIn: function (n) {
var s = 1.70158;
return n * n * ((s + 1) * n - s);
},
backOut: function (n) {
n = n - 1;
var s = 1.70158;
return n * n * ((s + 1) * n + s) + 1;
},
elastic: function (n) {
if (n == !!n) {
return n;
}
return pow(2, -10 * n) * math.sin((n - .075) * (2 * PI) / .3) + 1;
},
bounce: function (n) {
var s = 7.5625,
p = 2.75,
l;
if (n < (1 / p)) {
l = s * n * n;
} else {
if (n < (2 / p)) {
n -= (1.5 / p);
l = s * n * n + .75;
} else {
if (n < (2.5 / p)) {
n -= (2.25 / p);
l = s * n * n + .9375;
} else {
n -= (2.625 / p);
l = s * n * n + .984375;
}
}
}
return l;
}
};
ef.easeIn = ef["ease-in"] = ef["<"];
ef.easeOut = ef["ease-out"] = ef[">"];
ef.easeInOut = ef["ease-in-out"] = ef["<>"];
ef["back-in"] = ef.backIn;
ef["back-out"] = ef.backOut;
var animationElements = [],
requestAnimFrame = window.requestAnimationFrame ||
window.webkitRequestAnimationFrame ||
window.mozRequestAnimationFrame ||
window.oRequestAnimationFrame ||
window.msRequestAnimationFrame ||
function (callback) {
setTimeout(callback, 16);
},
animation = function () {
var Now = +new Date,
l = 0;
for (; l < animationElements.length; l++) {
var e = animationElements[l];
if (e.el.removed || e.paused) {
continue;
}
var time = Now - e.start,
ms = e.ms,
easing = e.easing,
from = e.from,
diff = e.diff,
to = e.to,
t = e.t,
that = e.el,
set = {},
now,
init = {},
key;
if (e.initstatus) {
time = (e.initstatus * e.anim.top - e.prev) / (e.percent - e.prev) * ms;
e.status = e.initstatus;
delete e.initstatus;
e.stop && animationElements.splice(l--, 1);
} else {
e.status = (e.prev + (e.percent - e.prev) * (time / ms)) / e.anim.top;
}
if (time < 0) {
continue;
}
if (time < ms) {
var pos = easing(time / ms);
for (var attr in from) if (from[has](attr)) {
switch (availableAnimAttrs[attr]) {
case nu:
now = +from[attr] + pos * ms * diff[attr];
break;
case "colour":
now = "rgb(" + [
upto255(round(from[attr].r + pos * ms * diff[attr].r)),
upto255(round(from[attr].g + pos * ms * diff[attr].g)),
upto255(round(from[attr].b + pos * ms * diff[attr].b))
].join(",") + ")";
break;
case "path":
now = [];
for (var i = 0, ii = from[attr].length; i < ii; i++) {
now[i] = [from[attr][i][0]];
for (var j = 1, jj = from[attr][i].length; j < jj; j++) {
now[i][j] = +from[attr][i][j] + pos * ms * diff[attr][i][j];
}
now[i] = now[i].join(S);
}
now = now.join(S);
break;
case "transform":
if (diff[attr].real) {
now = [];
for (i = 0, ii = from[attr].length; i < ii; i++) {
now[i] = [from[attr][i][0]];
for (j = 1, jj = from[attr][i].length; j < jj; j++) {
now[i][j] = from[attr][i][j] + pos * ms * diff[attr][i][j];
}
}
} else {
var get = function (i) {
return +from[attr][i] + pos * ms * diff[attr][i];
};
// now = [["r", get(2), 0, 0], ["t", get(3), get(4)], ["s", get(0), get(1), 0, 0]];
now = [["m", get(0), get(1), get(2), get(3), get(4), get(5)]];
}
break;
case "csv":
if (attr == "clip-rect") {
now = [];
i = 4;
while (i--) {
now[i] = +from[attr][i] + pos * ms * diff[attr][i];
}
}
break;
default:
var from2 = [][concat](from[attr]);
now = [];
i = that.paper.customAttributes[attr].length;
while (i--) {
now[i] = +from2[i] + pos * ms * diff[attr][i];
}
break;
}
set[attr] = now;
}
that.attr(set);
(function (id, that, anim) {
setTimeout(function () {
eve("raphael.anim.frame." + id, that, anim);
});
})(that.id, that, e.anim);
} else {
(function(f, el, a) {
setTimeout(function() {
eve("raphael.anim.frame." + el.id, el, a);
eve("raphael.anim.finish." + el.id, el, a);
R.is(f, "function") && f.call(el);
});
})(e.callback, that, e.anim);
that.attr(to);
animationElements.splice(l--, 1);
if (e.repeat > 1 && !e.next) {
for (key in to) if (to[has](key)) {
init[key] = e.totalOrigin[key];
}
e.el.attr(init);
runAnimation(e.anim, e.el, e.anim.percents[0], null, e.totalOrigin, e.repeat - 1);
}
if (e.next && !e.stop) {
runAnimation(e.anim, e.el, e.next, null, e.totalOrigin, e.repeat);
}
}
}
R.svg && that && that.paper && that.paper.safari();
animationElements.length && requestAnimFrame(animation);
},
upto255 = function (color) {
return color > 255 ? 255 : color < 0 ? 0 : color;
};
/*\
* Element.animateWith
[ method ]
**
* Acts similar to @Element.animate, but ensure that given animation runs in sync with another given element.
**
> Parameters
**<|fim▁hole|> - ms (number) #optional number of milliseconds for animation to run
- easing (string) #optional easing type. Accept on of @Raphael.easing_formulas or CSS format: `cubic‐bezier(XX, XX, XX, XX)`
- callback (function) #optional callback function. Will be called at the end of animation.
* or
- element (object) element to sync with
- anim (object) animation to sync with
- animation (object) #optional animation object, see @Raphael.animation
**
= (object) original element
\*/
elproto.animateWith = function (el, anim, params, ms, easing, callback) {
var element = this;
if (element.removed) {
callback && callback.call(element);
return element;
}
var a = params instanceof Animation ? params : R.animation(params, ms, easing, callback),
x, y;
runAnimation(a, element, a.percents[0], null, element.attr());
for (var i = 0, ii = animationElements.length; i < ii; i++) {
if (animationElements[i].anim == anim && animationElements[i].el == el) {
animationElements[ii - 1].start = animationElements[i].start;
break;
}
}
return element;
//
//
// var a = params ? R.animation(params, ms, easing, callback) : anim,
// status = element.status(anim);
// return this.animate(a).status(a, status * anim.ms / a.ms);
};
function CubicBezierAtTime(t, p1x, p1y, p2x, p2y, duration) {
var cx = 3 * p1x,
bx = 3 * (p2x - p1x) - cx,
ax = 1 - cx - bx,
cy = 3 * p1y,
by = 3 * (p2y - p1y) - cy,
ay = 1 - cy - by;
function sampleCurveX(t) {
return ((ax * t + bx) * t + cx) * t;
}
function solve(x, epsilon) {
var t = solveCurveX(x, epsilon);
return ((ay * t + by) * t + cy) * t;
}
function solveCurveX(x, epsilon) {
var t0, t1, t2, x2, d2, i;
for(t2 = x, i = 0; i < 8; i++) {
x2 = sampleCurveX(t2) - x;
if (abs(x2) < epsilon) {
return t2;
}
d2 = (3 * ax * t2 + 2 * bx) * t2 + cx;
if (abs(d2) < 1e-6) {
break;
}
t2 = t2 - x2 / d2;
}
t0 = 0;
t1 = 1;
t2 = x;
if (t2 < t0) {
return t0;
}
if (t2 > t1) {
return t1;
}
while (t0 < t1) {
x2 = sampleCurveX(t2);
if (abs(x2 - x) < epsilon) {
return t2;
}
if (x > x2) {
t0 = t2;
} else {
t1 = t2;
}
t2 = (t1 - t0) / 2 + t0;
}
return t2;
}
return solve(t, 1 / (200 * duration));
}
elproto.onAnimation = function (f) {
f ? eve.on("raphael.anim.frame." + this.id, f) : eve.unbind("raphael.anim.frame." + this.id);
return this;
};
function Animation(anim, ms) {
var percents = [],
newAnim = {};
this.ms = ms;
this.times = 1;
if (anim) {
for (var attr in anim) if (anim[has](attr)) {
newAnim[toFloat(attr)] = anim[attr];
percents.push(toFloat(attr));
}
percents.sort(sortByNumber);
}
this.anim = newAnim;
this.top = percents[percents.length - 1];
this.percents = percents;
}
/*\
* Animation.delay
[ method ]
**
* Creates a copy of existing animation object with given delay.
**
> Parameters
**
- delay (number) number of ms to pass between animation start and actual animation
**
= (object) new altered Animation object
| var anim = Raphael.animation({cx: 10, cy: 20}, 2e3);
| circle1.animate(anim); // run the given animation immediately
| circle2.animate(anim.delay(500)); // run the given animation after 500 ms
\*/
Animation.prototype.delay = function (delay) {
var a = new Animation(this.anim, this.ms);
a.times = this.times;
a.del = +delay || 0;
return a;
};
/*\
* Animation.repeat
[ method ]
**
* Creates a copy of existing animation object with given repetition.
**
> Parameters
**
- repeat (number) number iterations of animation. For infinite animation pass `Infinity`
**
= (object) new altered Animation object
\*/
Animation.prototype.repeat = function (times) {
var a = new Animation(this.anim, this.ms);
a.del = this.del;
a.times = math.floor(mmax(times, 0)) || 1;
return a;
};
function runAnimation(anim, element, percent, status, totalOrigin, times) {
percent = toFloat(percent);
var params,
isInAnim,
isInAnimSet,
percents = [],
next,
prev,
timestamp,
ms = anim.ms,
from = {},
to = {},
diff = {};
if (status) {
for (i = 0, ii = animationElements.length; i < ii; i++) {
var e = animationElements[i];
if (e.el.id == element.id && e.anim == anim) {
if (e.percent != percent) {
animationElements.splice(i, 1);
isInAnimSet = 1;
} else {
isInAnim = e;
}
element.attr(e.totalOrigin);
break;
}
}
} else {
status = +to; // NaN
}
for (var i = 0, ii = anim.percents.length; i < ii; i++) {
if (anim.percents[i] == percent || anim.percents[i] > status * anim.top) {
percent = anim.percents[i];
prev = anim.percents[i - 1] || 0;
ms = ms / anim.top * (percent - prev);
next = anim.percents[i + 1];
params = anim.anim[percent];
break;
} else if (status) {
element.attr(anim.anim[anim.percents[i]]);
}
}
if (!params) {
return;
}
if (!isInAnim) {
for (var attr in params) if (params[has](attr)) {
if (availableAnimAttrs[has](attr) || element.paper.customAttributes[has](attr)) {
from[attr] = element.attr(attr);
(from[attr] == null) && (from[attr] = availableAttrs[attr]);
to[attr] = params[attr];
switch (availableAnimAttrs[attr]) {
case nu:
diff[attr] = (to[attr] - from[attr]) / ms;
break;
case "colour":
from[attr] = R.getRGB(from[attr]);
var toColour = R.getRGB(to[attr]);
diff[attr] = {
r: (toColour.r - from[attr].r) / ms,
g: (toColour.g - from[attr].g) / ms,
b: (toColour.b - from[attr].b) / ms
};
break;
case "path":
var pathes = path2curve(from[attr], to[attr]),
toPath = pathes[1];
from[attr] = pathes[0];
diff[attr] = [];
for (i = 0, ii = from[attr].length; i < ii; i++) {
diff[attr][i] = [0];
for (var j = 1, jj = from[attr][i].length; j < jj; j++) {
diff[attr][i][j] = (toPath[i][j] - from[attr][i][j]) / ms;
}
}
break;
case "transform":
var _ = element._,
eq = equaliseTransform(_[attr], to[attr]);
if (eq) {
from[attr] = eq.from;
to[attr] = eq.to;
diff[attr] = [];
diff[attr].real = true;
for (i = 0, ii = from[attr].length; i < ii; i++) {
diff[attr][i] = [from[attr][i][0]];
for (j = 1, jj = from[attr][i].length; j < jj; j++) {
diff[attr][i][j] = (to[attr][i][j] - from[attr][i][j]) / ms;
}
}
} else {
var m = (element.matrix || new Matrix),
to2 = {
_: {transform: _.transform},
getBBox: function () {
return element.getBBox(1);
}
};
from[attr] = [
m.a,
m.b,
m.c,
m.d,
m.e,
m.f
];
extractTransform(to2, to[attr]);
to[attr] = to2._.transform;
diff[attr] = [
(to2.matrix.a - m.a) / ms,
(to2.matrix.b - m.b) / ms,
(to2.matrix.c - m.c) / ms,
(to2.matrix.d - m.d) / ms,
(to2.matrix.e - m.e) / ms,
(to2.matrix.f - m.f) / ms
];
// from[attr] = [_.sx, _.sy, _.deg, _.dx, _.dy];
// var to2 = {_:{}, getBBox: function () { return element.getBBox(); }};
// extractTransform(to2, to[attr]);
// diff[attr] = [
// (to2._.sx - _.sx) / ms,
// (to2._.sy - _.sy) / ms,
// (to2._.deg - _.deg) / ms,
// (to2._.dx - _.dx) / ms,
// (to2._.dy - _.dy) / ms
// ];
}
break;
case "csv":
var values = Str(params[attr])[split](separator),
from2 = Str(from[attr])[split](separator);
if (attr == "clip-rect") {
from[attr] = from2;
diff[attr] = [];
i = from2.length;
while (i--) {
diff[attr][i] = (values[i] - from[attr][i]) / ms;
}
}
to[attr] = values;
break;
default:
values = [][concat](params[attr]);
from2 = [][concat](from[attr]);
diff[attr] = [];
i = element.paper.customAttributes[attr].length;
while (i--) {
diff[attr][i] = ((values[i] || 0) - (from2[i] || 0)) / ms;
}
break;
}
}
}
var easing = params.easing,
easyeasy = R.easing_formulas[easing];
if (!easyeasy) {
easyeasy = Str(easing).match(bezierrg);
if (easyeasy && easyeasy.length == 5) {
var curve = easyeasy;
easyeasy = function (t) {
return CubicBezierAtTime(t, +curve[1], +curve[2], +curve[3], +curve[4], ms);
};
} else {
easyeasy = pipe;
}
}
timestamp = params.start || anim.start || +new Date;
e = {
anim: anim,
percent: percent,
timestamp: timestamp,
start: timestamp + (anim.del || 0),
status: 0,
initstatus: status || 0,
stop: false,
ms: ms,
easing: easyeasy,
from: from,
diff: diff,
to: to,
el: element,
callback: params.callback,
prev: prev,
next: next,
repeat: times || anim.times,
origin: element.attr(),
totalOrigin: totalOrigin
};
animationElements.push(e);
if (status && !isInAnim && !isInAnimSet) {
e.stop = true;
e.start = new Date - ms * status;
if (animationElements.length == 1) {
return animation();
}
}
if (isInAnimSet) {
e.start = new Date - e.ms * status;
}
animationElements.length == 1 && requestAnimFrame(animation);
} else {
isInAnim.initstatus = status;
isInAnim.start = new Date - isInAnim.ms * status;
}
eve("raphael.anim.start." + element.id, element, anim);
}
/*\
* Raphael.animation
[ method ]
**
* Creates an animation object that can be passed to the @Element.animate or @Element.animateWith methods.
* See also @Animation.delay and @Animation.repeat methods.
**
> Parameters
**
- params (object) final attributes for the element, see also @Element.attr
- ms (number) number of milliseconds for animation to run
- easing (string) #optional easing type. Accept one of @Raphael.easing_formulas or CSS format: `cubic‐bezier(XX, XX, XX, XX)`
- callback (function) #optional callback function. Will be called at the end of animation.
**
= (object) @Animation
\*/
R.animation = function (params, ms, easing, callback) {
if (params instanceof Animation) {
return params;
}
if (R.is(easing, "function") || !easing) {
callback = callback || easing || null;
easing = null;
}
params = Object(params);
ms = +ms || 0;
var p = {},
json,
attr;
for (attr in params) if (params[has](attr) && toFloat(attr) != attr && toFloat(attr) + "%" != attr) {
json = true;
p[attr] = params[attr];
}
if (!json) {
return new Animation(params, ms);
} else {
easing && (p.easing = easing);
callback && (p.callback = callback);
return new Animation({100: p}, ms);
}
};
/*\
* Element.animate
[ method ]
**
* Creates and starts animation for given element.
**
> Parameters
**
- params (object) final attributes for the element, see also @Element.attr
- ms (number) number of milliseconds for animation to run
- easing (string) #optional easing type. Accept one of @Raphael.easing_formulas or CSS format: `cubic‐bezier(XX, XX, XX, XX)`
- callback (function) #optional callback function. Will be called at the end of animation.
* or
- animation (object) animation object, see @Raphael.animation
**
= (object) original element
\*/
elproto.animate = function (params, ms, easing, callback) {
var element = this;
if (element.removed) {
callback && callback.call(element);
return element;
}
var anim = params instanceof Animation ? params : R.animation(params, ms, easing, callback);
runAnimation(anim, element, anim.percents[0], null, element.attr());
return element;
};
/*\
* Element.setTime
[ method ]
**
* Sets the status of animation of the element in milliseconds. Similar to @Element.status method.
**
> Parameters
**
- anim (object) animation object
- value (number) number of milliseconds from the beginning of the animation
**
= (object) original element if `value` is specified
* Note, that during animation following events are triggered:
*
* On each animation frame event `anim.frame.<id>`, on start `anim.start.<id>` and on end `anim.finish.<id>`.
\*/
elproto.setTime = function (anim, value) {
if (anim && value != null) {
this.status(anim, mmin(value, anim.ms) / anim.ms);
}
return this;
};
/*\
* Element.status
[ method ]
**
* Gets or sets the status of animation of the element.
**
> Parameters
**
- anim (object) #optional animation object
- value (number) #optional 0 – 1. If specified, method works like a setter and sets the status of a given animation to the value. This will cause animation to jump to the given position.
**
= (number) status
* or
= (array) status if `anim` is not specified. Array of objects in format:
o {
o anim: (object) animation object
o status: (number) status
o }
* or
= (object) original element if `value` is specified
\*/
elproto.status = function (anim, value) {
var out = [],
i = 0,
len,
e;
if (value != null) {
runAnimation(anim, this, -1, mmin(value, 1));
return this;
} else {
len = animationElements.length;
for (; i < len; i++) {
e = animationElements[i];
if (e.el.id == this.id && (!anim || e.anim == anim)) {
if (anim) {
return e.status;
}
out.push({
anim: e.anim,
status: e.status
});
}
}
if (anim) {
return 0;
}
return out;
}
};
/*\
* Element.pause
[ method ]
**
* Stops animation of the element with ability to resume it later on.
**
> Parameters
**
- anim (object) #optional animation object
**
= (object) original element
\*/
elproto.pause = function (anim) {
for (var i = 0; i < animationElements.length; i++) if (animationElements[i].el.id == this.id && (!anim || animationElements[i].anim == anim)) {
if (eve("raphael.anim.pause." + this.id, this, animationElements[i].anim) !== false) {
animationElements[i].paused = true;
}
}
return this;
};
/*\
* Element.resume
[ method ]
**
* Resumes animation if it was paused with @Element.pause method.
**
> Parameters
**
- anim (object) #optional animation object
**
= (object) original element
\*/
elproto.resume = function (anim) {
for (var i = 0; i < animationElements.length; i++) if (animationElements[i].el.id == this.id && (!anim || animationElements[i].anim == anim)) {
var e = animationElements[i];
if (eve("raphael.anim.resume." + this.id, this, e.anim) !== false) {
delete e.paused;
this.status(e.anim, e.status);
}
}
return this;
};
/*\
* Element.stop
[ method ]
**
* Stops animation of the element.
**
> Parameters
**
- anim (object) #optional animation object
**
= (object) original element
\*/
elproto.stop = function (anim) {
for (var i = 0; i < animationElements.length; i++) if (animationElements[i].el.id == this.id && (!anim || animationElements[i].anim == anim)) {
if (eve("raphael.anim.stop." + this.id, this, animationElements[i].anim) !== false) {
animationElements.splice(i--, 1);
}
}
return this;
};
function stopAnimation(paper) {
for (var i = 0; i < animationElements.length; i++) if (animationElements[i].el.paper == paper) {
animationElements.splice(i--, 1);
}
}
eve.on("raphael.remove", stopAnimation);
eve.on("raphael.clear", stopAnimation);
elproto.toString = function () {
return "Rapha\xebl\u2019s object";
};
// Set
var Set = function (items) {
this.items = [];
this.length = 0;
this.type = "set";
if (items) {
for (var i = 0, ii = items.length; i < ii; i++) {
if (items[i] && (items[i].constructor == elproto.constructor || items[i].constructor == Set)) {
this[this.items.length] = this.items[this.items.length] = items[i];
this.length++;
}
}
}
},
setproto = Set.prototype;
/*\
* Set.push
[ method ]
**
* Adds each argument to the current set.
= (object) original element
\*/
setproto.push = function () {
var item,
len;
for (var i = 0, ii = arguments.length; i < ii; i++) {
item = arguments[i];
if (item && (item.constructor == elproto.constructor || item.constructor == Set)) {
len = this.items.length;
this[len] = this.items[len] = item;
this.length++;
}
}
return this;
};
/*\
* Set.pop
[ method ]
**
* Removes last element and returns it.
= (object) element
\*/
setproto.pop = function () {
this.length && delete this[this.length--];
return this.items.pop();
};
/*\
* Set.forEach
[ method ]
**
* Executes given function for each element in the set.
*
* If function returns `false` it will stop loop running.
**
> Parameters
**
- callback (function) function to run
- thisArg (object) context object for the callback
= (object) Set object
\*/
setproto.forEach = function (callback, thisArg) {
for (var i = 0, ii = this.items.length; i < ii; i++) {
if (callback.call(thisArg, this.items[i], i) === false) {
return this;
}
}
return this;
};
for (var method in elproto) if (elproto[has](method)) {
setproto[method] = (function (methodname) {
return function () {
var arg = arguments;
return this.forEach(function (el) {
el[methodname][apply](el, arg);
});
};
})(method);
}
setproto.attr = function (name, value) {
if (name && R.is(name, array) && R.is(name[0], "object")) {
for (var j = 0, jj = name.length; j < jj; j++) {
this.items[j].attr(name[j]);
}
} else {
for (var i = 0, ii = this.items.length; i < ii; i++) {
this.items[i].attr(name, value);
}
}
return this;
};
/*\
* Set.clear
[ method ]
**
* Removeds all elements from the set
\*/
setproto.clear = function () {
while (this.length) {
this.pop();
}
};
/*\
* Set.splice
[ method ]
**
* Removes given element from the set
**
> Parameters
**
- index (number) position of the deletion
- count (number) number of element to remove
- insertion… (object) #optional elements to insert
= (object) set elements that were deleted
\*/
setproto.splice = function (index, count, insertion) {
index = index < 0 ? mmax(this.length + index, 0) : index;
count = mmax(0, mmin(this.length - index, count));
var tail = [],
todel = [],
args = [],
i;
for (i = 2; i < arguments.length; i++) {
args.push(arguments[i]);
}
for (i = 0; i < count; i++) {
todel.push(this[index + i]);
}
for (; i < this.length - index; i++) {
tail.push(this[index + i]);
}
var arglen = args.length;
for (i = 0; i < arglen + tail.length; i++) {
this.items[index + i] = this[index + i] = i < arglen ? args[i] : tail[i - arglen];
}
i = this.items.length = this.length -= count - arglen;
while (this[i]) {
delete this[i++];
}
return new Set(todel);
};
/*\
* Set.exclude
[ method ]
**
* Removes given element from the set
**
> Parameters
**
- element (object) element to remove
= (boolean) `true` if object was found & removed from the set
\*/
setproto.exclude = function (el) {
for (var i = 0, ii = this.length; i < ii; i++) if (this[i] == el) {
this.splice(i, 1);
return true;
}
};
setproto.animate = function (params, ms, easing, callback) {
(R.is(easing, "function") || !easing) && (callback = easing || null);
var len = this.items.length,
i = len,
item,
set = this,
collector;
if (!len) {
return this;
}
callback && (collector = function () {
!--len && callback.call(set);
});
easing = R.is(easing, string) ? easing : collector;
var anim = R.animation(params, ms, easing, collector);
item = this.items[--i].animate(anim);
while (i--) {
this.items[i] && !this.items[i].removed && this.items[i].animateWith(item, anim, anim);
(this.items[i] && !this.items[i].removed) || len--;
}
return this;
};
setproto.insertAfter = function (el) {
var i = this.items.length;
while (i--) {
this.items[i].insertAfter(el);
}
return this;
};
setproto.getBBox = function () {
var x = [],
y = [],
x2 = [],
y2 = [];
for (var i = this.items.length; i--;) if (!this.items[i].removed) {
var box = this.items[i].getBBox();
x.push(box.x);
y.push(box.y);
x2.push(box.x + box.width);
y2.push(box.y + box.height);
}
x = mmin[apply](0, x);
y = mmin[apply](0, y);
x2 = mmax[apply](0, x2);
y2 = mmax[apply](0, y2);
return {
x: x,
y: y,
x2: x2,
y2: y2,
width: x2 - x,
height: y2 - y
};
};
setproto.clone = function (s) {
s = this.paper.set();
for (var i = 0, ii = this.items.length; i < ii; i++) {
s.push(this.items[i].clone());
}
return s;
};
setproto.toString = function () {
return "Rapha\xebl\u2018s set";
};
setproto.glow = function(glowConfig) {
var ret = this.paper.set();
this.forEach(function(shape, index){
var g = shape.glow(glowConfig);
if(g != null){
g.forEach(function(shape2, index2){
ret.push(shape2);
});
}
});
return ret;
};
/*\
* Set.isPointInside
[ method ]
**
* Determine if given point is inside this set’s elements
**
> Parameters
**
- x (number) x coordinate of the point
- y (number) y coordinate of the point
= (boolean) `true` if point is inside any of the set's elements
\*/
setproto.isPointInside = function (x, y) {
var isPointInside = false;
this.forEach(function (el) {
if (el.isPointInside(x, y)) {
isPointInside = true;
return false; // stop loop
}
});
return isPointInside;
};
/*\
* Raphael.registerFont
[ method ]
**
* Adds given font to the registered set of fonts for Raphaël. Should be used as an internal call from within Cufón’s font file.
* Returns original parameter, so it could be used with chaining.
# <a href="http://wiki.github.com/sorccu/cufon/about">More about Cufón and how to convert your font form TTF, OTF, etc to JavaScript file.</a>
**
> Parameters
**
- font (object) the font to register
= (object) the font you passed in
> Usage
| Cufon.registerFont(Raphael.registerFont({…}));
\*/
R.registerFont = function (font) {
if (!font.face) {
return font;
}
this.fonts = this.fonts || {};
var fontcopy = {
w: font.w,
face: {},
glyphs: {}
},
family = font.face["font-family"];
for (var prop in font.face) if (font.face[has](prop)) {
fontcopy.face[prop] = font.face[prop];
}
if (this.fonts[family]) {
this.fonts[family].push(fontcopy);
} else {
this.fonts[family] = [fontcopy];
}
if (!font.svg) {
fontcopy.face["units-per-em"] = toInt(font.face["units-per-em"], 10);
for (var glyph in font.glyphs) if (font.glyphs[has](glyph)) {
var path = font.glyphs[glyph];
fontcopy.glyphs[glyph] = {
w: path.w,
k: {},
d: path.d && "M" + path.d.replace(/[mlcxtrv]/g, function (command) {
return {l: "L", c: "C", x: "z", t: "m", r: "l", v: "c"}[command] || "M";
}) + "z"
};
if (path.k) {
for (var k in path.k) if (path[has](k)) {
fontcopy.glyphs[glyph].k[k] = path.k[k];
}
}
}
}
return font;
};
/*\
* Paper.getFont
[ method ]
**
* Finds font object in the registered fonts by given parameters. You could specify only one word from the font name, like “Myriad” for “Myriad Pro”.
**
> Parameters
**
- family (string) font family name or any word from it
- weight (string) #optional font weight
- style (string) #optional font style
- stretch (string) #optional font stretch
= (object) the font object
> Usage
| paper.print(100, 100, "Test string", paper.getFont("Times", 800), 30);
\*/
paperproto.getFont = function (family, weight, style, stretch) {
stretch = stretch || "normal";
style = style || "normal";
weight = +weight || {normal: 400, bold: 700, lighter: 300, bolder: 800}[weight] || 400;
if (!R.fonts) {
return;
}
var font = R.fonts[family];
if (!font) {
var name = new RegExp("(^|\\s)" + family.replace(/[^\w\d\s+!~.:_-]/g, E) + "(\\s|$)", "i");
for (var fontName in R.fonts) if (R.fonts[has](fontName)) {
if (name.test(fontName)) {
font = R.fonts[fontName];
break;
}
}
}
var thefont;
if (font) {
for (var i = 0, ii = font.length; i < ii; i++) {
thefont = font[i];
if (thefont.face["font-weight"] == weight && (thefont.face["font-style"] == style || !thefont.face["font-style"]) && thefont.face["font-stretch"] == stretch) {
break;
}
}
}
return thefont;
};
/*\
* Paper.print
[ method ]
**
* Creates path that represent given text written using given font at given position with given size.
* Result of the method is path element that contains whole text as a separate path.
**
> Parameters
**
- x (number) x position of the text
- y (number) y position of the text
- string (string) text to print
- font (object) font object, see @Paper.getFont
- size (number) #optional size of the font, default is `16`
- origin (string) #optional could be `"baseline"` or `"middle"`, default is `"middle"`
- letter_spacing (number) #optional number in range `-1..1`, default is `0`
- line_spacing (number) #optional number in range `1..3`, default is `1`
= (object) resulting path element, which consist of all letters
> Usage
| var txt = r.print(10, 50, "print", r.getFont("Museo"), 30).attr({fill: "#fff"});
\*/
paperproto.print = function (x, y, string, font, size, origin, letter_spacing, line_spacing) {
origin = origin || "middle"; // baseline|middle
letter_spacing = mmax(mmin(letter_spacing || 0, 1), -1);
line_spacing = mmax(mmin(line_spacing || 1, 3), 1);
var letters = Str(string)[split](E),
shift = 0,
notfirst = 0,
path = E,
scale;
R.is(font, "string") && (font = this.getFont(font));
if (font) {
scale = (size || 16) / font.face["units-per-em"];
var bb = font.face.bbox[split](separator),
top = +bb[0],
lineHeight = bb[3] - bb[1],
shifty = 0,
height = +bb[1] + (origin == "baseline" ? lineHeight + (+font.face.descent) : lineHeight / 2);
for (var i = 0, ii = letters.length; i < ii; i++) {
if (letters[i] == "\n") {
shift = 0;
curr = 0;
notfirst = 0;
shifty += lineHeight * line_spacing;
} else {
var prev = notfirst && font.glyphs[letters[i - 1]] || {},
curr = font.glyphs[letters[i]];
shift += notfirst ? (prev.w || font.w) + (prev.k && prev.k[letters[i]] || 0) + (font.w * letter_spacing) : 0;
notfirst = 1;
}
if (curr && curr.d) {
path += R.transformPath(curr.d, ["t", shift * scale, shifty * scale, "s", scale, scale, top, height, "t", (x - top) / scale, (y - height) / scale]);
}
}
}
return this.path(path).attr({
fill: "#000",
stroke: "none"
});
};
/*\
* Paper.add
[ method ]
**
* Imports elements in JSON array in format `{type: type, <attributes>}`
**
> Parameters
**
- json (array)
= (object) resulting set of imported elements
> Usage
| paper.add([
| {
| type: "circle",
| cx: 10,
| cy: 10,
| r: 5
| },
| {
| type: "rect",
| x: 10,
| y: 10,
| width: 10,
| height: 10,
| fill: "#fc0"
| }
| ]);
\*/
paperproto.add = function (json) {
if (R.is(json, "array")) {
var res = this.set(),
i = 0,
ii = json.length,
j;
for (; i < ii; i++) {
j = json[i] || {};
elements[has](j.type) && res.push(this[j.type]().attr(j));
}
}
return res;
};
/*\
* Raphael.format
[ method ]
**
* Simple format function. Replaces construction of type “`{<number>}`” to the corresponding argument.
**
> Parameters
**
- token (string) string to format
- … (string) rest of arguments will be treated as parameters for replacement
= (string) formated string
> Usage
| var x = 10,
| y = 20,
| width = 40,
| height = 50;
| // this will draw a rectangular shape equivalent to "M10,20h40v50h-40z"
| paper.path(Raphael.format("M{0},{1}h{2}v{3}h{4}z", x, y, width, height, -width));
\*/
R.format = function (token, params) {
var args = R.is(params, array) ? [0][concat](params) : arguments;
token && R.is(token, string) && args.length - 1 && (token = token.replace(formatrg, function (str, i) {
return args[++i] == null ? E : args[i];
}));
return token || E;
};
/*\
* Raphael.fullfill
[ method ]
**
* A little bit more advanced format function than @Raphael.format. Replaces construction of type “`{<name>}`” to the corresponding argument.
**
> Parameters
**
- token (string) string to format
- json (object) object which properties will be used as a replacement
= (string) formated string
> Usage
| // this will draw a rectangular shape equivalent to "M10,20h40v50h-40z"
| paper.path(Raphael.fullfill("M{x},{y}h{dim.width}v{dim.height}h{dim['negative width']}z", {
| x: 10,
| y: 20,
| dim: {
| width: 40,
| height: 50,
| "negative width": -40
| }
| }));
\*/
R.fullfill = (function () {
var tokenRegex = /\{([^\}]+)\}/g,
objNotationRegex = /(?:(?:^|\.)(.+?)(?=\[|\.|$|\()|\[('|")(.+?)\2\])(\(\))?/g, // matches .xxxxx or ["xxxxx"] to run over object properties
replacer = function (all, key, obj) {
var res = obj;
key.replace(objNotationRegex, function (all, name, quote, quotedName, isFunc) {
name = name || quotedName;
if (res) {
if (name in res) {
res = res[name];
}
typeof res == "function" && isFunc && (res = res());
}
});
res = (res == null || res == obj ? all : res) + "";
return res;
};
return function (str, obj) {
return String(str).replace(tokenRegex, function (all, key) {
return replacer(all, key, obj);
});
};
})();
/*\
* Raphael.ninja
[ method ]
**
* If you want to leave no trace of Raphaël (Well, Raphaël creates only one global variable `Raphael`, but anyway.) You can use `ninja` method.
* Beware, that in this case plugins could stop working, because they are depending on global variable existance.
**
= (object) Raphael object
> Usage
| (function (local_raphael) {
| var paper = local_raphael(10, 10, 320, 200);
| …
| })(Raphael.ninja());
\*/
R.ninja = function () {
oldRaphael.was ? (g.win.Raphael = oldRaphael.is) : delete Raphael;
return R;
};
/*\
* Raphael.st
[ property (object) ]
**
* You can add your own method to elements and sets. It is wise to add a set method for each element method
* you added, so you will be able to call the same method on sets too.
**
* See also @Raphael.el.
> Usage
| Raphael.el.red = function () {
| this.attr({fill: "#f00"});
| };
| Raphael.st.red = function () {
| this.forEach(function (el) {
| el.red();
| });
| };
| // then use it
| paper.set(paper.circle(100, 100, 20), paper.circle(110, 100, 20)).red();
\*/
R.st = setproto;
// Firefox <3.6 fix: http://webreflection.blogspot.com/2009/11/195-chars-to-help-lazy-loading.html
(function (doc, loaded, f) {
if (doc.readyState == null && doc.addEventListener){
doc.addEventListener(loaded, f = function () {
doc.removeEventListener(loaded, f, false);
doc.readyState = "complete";
}, false);
doc.readyState = "loading";
}
function isLoaded() {
(/in/).test(doc.readyState) ? setTimeout(isLoaded, 9) : R.eve("raphael.DOMload");
}
isLoaded();
})(document, "DOMContentLoaded");
eve.on("raphael.DOMload", function () {
loaded = true;
});
// ┌─────────────────────────────────────────────────────────────────────┐ \\
// │ Raphaël - JavaScript Vector Library │ \\
// ├─────────────────────────────────────────────────────────────────────┤ \\
// │ SVG Module │ \\
// ├─────────────────────────────────────────────────────────────────────┤ \\
// │ Copyright (c) 2008-2011 Dmitry Baranovskiy (http://raphaeljs.com) │ \\
// │ Copyright (c) 2008-2011 Sencha Labs (http://sencha.com) │ \\
// │ Licensed under the MIT (http://raphaeljs.com/license.html) license. │ \\
// └─────────────────────────────────────────────────────────────────────┘ \\
(function(){
if (!R.svg) {
return;
}
var has = "hasOwnProperty",
Str = String,
toFloat = parseFloat,
toInt = parseInt,
math = Math,
mmax = math.max,
abs = math.abs,
pow = math.pow,
separator = /[, ]+/,
eve = R.eve,
E = "",
S = " ";
var xlink = "http://www.w3.org/1999/xlink",
markers = {
block: "M5,0 0,2.5 5,5z",
classic: "M5,0 0,2.5 5,5 3.5,3 3.5,2z",
diamond: "M2.5,0 5,2.5 2.5,5 0,2.5z",
open: "M6,1 1,3.5 6,6",
oval: "M2.5,0A2.5,2.5,0,0,1,2.5,5 2.5,2.5,0,0,1,2.5,0z"
},
markerCounter = {};
R.toString = function () {
return "Your browser supports SVG.\nYou are running Rapha\xebl " + this.version;
};
var $ = function (el, attr) {
if (attr) {
if (typeof el == "string") {
el = $(el);
}
for (var key in attr) if (attr[has](key)) {
if (key.substring(0, 6) == "xlink:") {
el.setAttributeNS(xlink, key.substring(6), Str(attr[key]));
} else {
el.setAttribute(key, Str(attr[key]));
}
}
} else {
el = R._g.doc.createElementNS("http://www.w3.org/2000/svg", el);
el.style && (el.style.webkitTapHighlightColor = "rgba(0,0,0,0)");
}
return el;
},
addGradientFill = function (element, gradient) {
var type = "linear",
id = element.id + gradient,
fx = .5, fy = .5,
o = element.node,
SVG = element.paper,
s = o.style,
el = R._g.doc.getElementById(id);
if (!el) {
gradient = Str(gradient).replace(R._radial_gradient, function (all, _fx, _fy) {
type = "radial";
if (_fx && _fy) {
fx = toFloat(_fx);
fy = toFloat(_fy);
var dir = ((fy > .5) * 2 - 1);
pow(fx - .5, 2) + pow(fy - .5, 2) > .25 &&
(fy = math.sqrt(.25 - pow(fx - .5, 2)) * dir + .5) &&
fy != .5 &&
(fy = fy.toFixed(5) - 1e-5 * dir);
}
return E;
});
gradient = gradient.split(/\s*\-\s*/);
if (type == "linear") {
var angle = gradient.shift();
angle = -toFloat(angle);
if (isNaN(angle)) {
return null;
}
var vector = [0, 0, math.cos(R.rad(angle)), math.sin(R.rad(angle))],
max = 1 / (mmax(abs(vector[2]), abs(vector[3])) || 1);
vector[2] *= max;
vector[3] *= max;
if (vector[2] < 0) {
vector[0] = -vector[2];
vector[2] = 0;
}
if (vector[3] < 0) {
vector[1] = -vector[3];
vector[3] = 0;
}
}
var dots = R._parseDots(gradient);
if (!dots) {
return null;
}
id = id.replace(/[\(\)\s,\xb0#]/g, "_");
if (element.gradient && id != element.gradient.id) {
SVG.defs.removeChild(element.gradient);
delete element.gradient;
}
if (!element.gradient) {
el = $(type + "Gradient", {id: id});
element.gradient = el;
$(el, type == "radial" ? {
fx: fx,
fy: fy
} : {
x1: vector[0],
y1: vector[1],
x2: vector[2],
y2: vector[3],
gradientTransform: element.matrix.invert()
});
SVG.defs.appendChild(el);
for (var i = 0, ii = dots.length; i < ii; i++) {
el.appendChild($("stop", {
offset: dots[i].offset ? dots[i].offset : i ? "100%" : "0%",
"stop-color": dots[i].color || "#fff"
}));
}
}
}
$(o, {
fill: "url(#" + id + ")",
opacity: 1,
"fill-opacity": 1
});
s.fill = E;
s.opacity = 1;
s.fillOpacity = 1;
return 1;
},
updatePosition = function (o) {
var bbox = o.getBBox(1);
$(o.pattern, {patternTransform: o.matrix.invert() + " translate(" + bbox.x + "," + bbox.y + ")"});
},
addArrow = function (o, value, isEnd) {
if (o.type == "path") {
var values = Str(value).toLowerCase().split("-"),
p = o.paper,
se = isEnd ? "end" : "start",
node = o.node,
attrs = o.attrs,
stroke = attrs["stroke-width"],
i = values.length,
type = "classic",
from,
to,
dx,
refX,
attr,
w = 3,
h = 3,
t = 5;
while (i--) {
switch (values[i]) {
case "block":
case "classic":
case "oval":
case "diamond":
case "open":
case "none":
type = values[i];
break;
case "wide": h = 5; break;
case "narrow": h = 2; break;
case "long": w = 5; break;
case "short": w = 2; break;
}
}
if (type == "open") {
w += 2;
h += 2;
t += 2;
dx = 1;
refX = isEnd ? 4 : 1;
attr = {
fill: "none",
stroke: attrs.stroke
};
} else {
refX = dx = w / 2;
attr = {
fill: attrs.stroke,
stroke: "none"
};
}
if (o._.arrows) {
if (isEnd) {
o._.arrows.endPath && markerCounter[o._.arrows.endPath]--;
o._.arrows.endMarker && markerCounter[o._.arrows.endMarker]--;
} else {
o._.arrows.startPath && markerCounter[o._.arrows.startPath]--;
o._.arrows.startMarker && markerCounter[o._.arrows.startMarker]--;
}
} else {
o._.arrows = {};
}
if (type != "none") {
var pathId = "raphael-marker-" + type,
markerId = "raphael-marker-" + se + type + w + h;
if (!R._g.doc.getElementById(pathId)) {
p.defs.appendChild($($("path"), {
"stroke-linecap": "round",
d: markers[type],
id: pathId
}));
markerCounter[pathId] = 1;
} else {
markerCounter[pathId]++;
}
var marker = R._g.doc.getElementById(markerId),
use;
if (!marker) {
marker = $($("marker"), {
id: markerId,
markerHeight: h,
markerWidth: w,
orient: "auto",
refX: refX,
refY: h / 2
});
use = $($("use"), {
"xlink:href": "#" + pathId,
transform: (isEnd ? "rotate(180 " + w / 2 + " " + h / 2 + ") " : E) + "scale(" + w / t + "," + h / t + ")",
"stroke-width": (1 / ((w / t + h / t) / 2)).toFixed(4)
});
marker.appendChild(use);
p.defs.appendChild(marker);
markerCounter[markerId] = 1;
} else {
markerCounter[markerId]++;
use = marker.getElementsByTagName("use")[0];
}
$(use, attr);
var delta = dx * (type != "diamond" && type != "oval");
if (isEnd) {
from = o._.arrows.startdx * stroke || 0;
to = R.getTotalLength(attrs.path) - delta * stroke;
} else {
from = delta * stroke;
to = R.getTotalLength(attrs.path) - (o._.arrows.enddx * stroke || 0);
}
attr = {};
attr["marker-" + se] = "url(#" + markerId + ")";
if (to || from) {
attr.d = R.getSubpath(attrs.path, from, to);
}
$(node, attr);
o._.arrows[se + "Path"] = pathId;
o._.arrows[se + "Marker"] = markerId;
o._.arrows[se + "dx"] = delta;
o._.arrows[se + "Type"] = type;
o._.arrows[se + "String"] = value;
} else {
if (isEnd) {
from = o._.arrows.startdx * stroke || 0;
to = R.getTotalLength(attrs.path) - from;
} else {
from = 0;
to = R.getTotalLength(attrs.path) - (o._.arrows.enddx * stroke || 0);
}
o._.arrows[se + "Path"] && $(node, {d: R.getSubpath(attrs.path, from, to)});
delete o._.arrows[se + "Path"];
delete o._.arrows[se + "Marker"];
delete o._.arrows[se + "dx"];
delete o._.arrows[se + "Type"];
delete o._.arrows[se + "String"];
}
for (attr in markerCounter) if (markerCounter[has](attr) && !markerCounter[attr]) {
var item = R._g.doc.getElementById(attr);
item && item.parentNode.removeChild(item);
}
}
},
dasharray = {
"": [0],
"none": [0],
"-": [3, 1],
".": [1, 1],
"-.": [3, 1, 1, 1],
"-..": [3, 1, 1, 1, 1, 1],
". ": [1, 3],
"- ": [4, 3],
"--": [8, 3],
"- .": [4, 3, 1, 3],
"--.": [8, 3, 1, 3],
"--..": [8, 3, 1, 3, 1, 3]
},
addDashes = function (o, value, params) {
value = dasharray[Str(value).toLowerCase()];
if (value) {
var width = o.attrs["stroke-width"] || "1",
butt = {round: width, square: width, butt: 0}[o.attrs["stroke-linecap"] || params["stroke-linecap"]] || 0,
dashes = [],
i = value.length;
while (i--) {
dashes[i] = value[i] * width + ((i % 2) ? 1 : -1) * butt;
}
$(o.node, {"stroke-dasharray": dashes.join(",")});
}
},
setFillAndStroke = function (o, params) {
var node = o.node,
attrs = o.attrs,
vis = node.style.visibility;
node.style.visibility = "hidden";
for (var att in params) {
if (params[has](att)) {
if (!R._availableAttrs[has](att)) {
continue;
}
var value = params[att];
attrs[att] = value;
switch (att) {
case "blur":
o.blur(value);
break;
case "title":
var title = node.getElementsByTagName("title");
// Use the existing <title>.
if (title.length && (title = title[0])) {
title.firstChild.nodeValue = value;
} else {
title = $("title");
var val = R._g.doc.createTextNode(value);
title.appendChild(val);
node.appendChild(title);
}
break;
case "href":
case "target":
var pn = node.parentNode;
if (pn.tagName.toLowerCase() != "a") {
var hl = $("a");
pn.insertBefore(hl, node);
hl.appendChild(node);
pn = hl;
}
if (att == "target") {
pn.setAttributeNS(xlink, "show", value == "blank" ? "new" : value);
} else {
pn.setAttributeNS(xlink, att, value);
}
break;
case "cursor":
node.style.cursor = value;
break;
case "transform":
o.transform(value);
break;
case "arrow-start":
addArrow(o, value);
break;
case "arrow-end":
addArrow(o, value, 1);
break;
case "clip-rect":
var rect = Str(value).split(separator);
if (rect.length == 4) {
o.clip && o.clip.parentNode.parentNode.removeChild(o.clip.parentNode);
var el = $("clipPath"),
rc = $("rect");
el.id = R.createUUID();
$(rc, {
x: rect[0],
y: rect[1],
width: rect[2],
height: rect[3]
});
el.appendChild(rc);
o.paper.defs.appendChild(el);
$(node, {"clip-path": "url(#" + el.id + ")"});
o.clip = rc;
}
if (!value) {
var path = node.getAttribute("clip-path");
if (path) {
var clip = R._g.doc.getElementById(path.replace(/(^url\(#|\)$)/g, E));
clip && clip.parentNode.removeChild(clip);
$(node, {"clip-path": E});
delete o.clip;
}
}
break;
case "path":
if (o.type == "path") {
$(node, {d: value ? attrs.path = R._pathToAbsolute(value) : "M0,0"});
o._.dirty = 1;
if (o._.arrows) {
"startString" in o._.arrows && addArrow(o, o._.arrows.startString);
"endString" in o._.arrows && addArrow(o, o._.arrows.endString, 1);
}
}
break;
case "width":
node.setAttribute(att, value);
o._.dirty = 1;
if (attrs.fx) {
att = "x";
value = attrs.x;
} else {
break;
}
case "x":
if (attrs.fx) {
value = -attrs.x - (attrs.width || 0);
}
case "rx":
if (att == "rx" && o.type == "rect") {
break;
}
case "cx":
node.setAttribute(att, value);
o.pattern && updatePosition(o);
o._.dirty = 1;
break;
case "height":
node.setAttribute(att, value);
o._.dirty = 1;
if (attrs.fy) {
att = "y";
value = attrs.y;
} else {
break;
}
case "y":
if (attrs.fy) {
value = -attrs.y - (attrs.height || 0);
}
case "ry":
if (att == "ry" && o.type == "rect") {
break;
}
case "cy":
node.setAttribute(att, value);
o.pattern && updatePosition(o);
o._.dirty = 1;
break;
case "r":
if (o.type == "rect") {
$(node, {rx: value, ry: value});
} else {
node.setAttribute(att, value);
}
o._.dirty = 1;
break;
case "src":
if (o.type == "image") {
node.setAttributeNS(xlink, "href", value);
}
break;
case "stroke-width":
if (o._.sx != 1 || o._.sy != 1) {
value /= mmax(abs(o._.sx), abs(o._.sy)) || 1;
}
if (o.paper._vbSize) {
value *= o.paper._vbSize;
}
node.setAttribute(att, value);
if (attrs["stroke-dasharray"]) {
addDashes(o, attrs["stroke-dasharray"], params);
}
if (o._.arrows) {
"startString" in o._.arrows && addArrow(o, o._.arrows.startString);
"endString" in o._.arrows && addArrow(o, o._.arrows.endString, 1);
}
break;
case "stroke-dasharray":
addDashes(o, value, params);
break;
case "fill":
var isURL = Str(value).match(R._ISURL);
if (isURL) {
el = $("pattern");
var ig = $("image");
el.id = R.createUUID();
$(el, {x: 0, y: 0, patternUnits: "userSpaceOnUse", height: 1, width: 1});
$(ig, {x: 0, y: 0, "xlink:href": isURL[1]});
el.appendChild(ig);
(function (el) {
R._preload(isURL[1], function () {
var w = this.offsetWidth,
h = this.offsetHeight;
$(el, {width: w, height: h});
$(ig, {width: w, height: h});
o.paper.safari();
});
})(el);
o.paper.defs.appendChild(el);
$(node, {fill: "url(#" + el.id + ")"});
o.pattern = el;
o.pattern && updatePosition(o);
break;
}
var clr = R.getRGB(value);
if (!clr.error) {
delete params.gradient;
delete attrs.gradient;
!R.is(attrs.opacity, "undefined") &&
R.is(params.opacity, "undefined") &&
$(node, {opacity: attrs.opacity});
!R.is(attrs["fill-opacity"], "undefined") &&
R.is(params["fill-opacity"], "undefined") &&
$(node, {"fill-opacity": attrs["fill-opacity"]});
} else if ((o.type == "circle" || o.type == "ellipse" || Str(value).charAt() != "r") && addGradientFill(o, value)) {
if ("opacity" in attrs || "fill-opacity" in attrs) {
var gradient = R._g.doc.getElementById(node.getAttribute("fill").replace(/^url\(#|\)$/g, E));
if (gradient) {
var stops = gradient.getElementsByTagName("stop");
$(stops[stops.length - 1], {"stop-opacity": ("opacity" in attrs ? attrs.opacity : 1) * ("fill-opacity" in attrs ? attrs["fill-opacity"] : 1)});
}
}
attrs.gradient = value;
attrs.fill = "none";
break;
}
clr[has]("opacity") && $(node, {"fill-opacity": clr.opacity > 1 ? clr.opacity / 100 : clr.opacity});
case "stroke":
clr = R.getRGB(value);
node.setAttribute(att, clr.hex);
att == "stroke" && clr[has]("opacity") && $(node, {"stroke-opacity": clr.opacity > 1 ? clr.opacity / 100 : clr.opacity});
if (att == "stroke" && o._.arrows) {
"startString" in o._.arrows && addArrow(o, o._.arrows.startString);
"endString" in o._.arrows && addArrow(o, o._.arrows.endString, 1);
}
break;
case "gradient":
(o.type == "circle" || o.type == "ellipse" || Str(value).charAt() != "r") && addGradientFill(o, value);
break;
case "opacity":
if (attrs.gradient && !attrs[has]("stroke-opacity")) {
$(node, {"stroke-opacity": value > 1 ? value / 100 : value});
}
// fall
case "fill-opacity":
if (attrs.gradient) {
gradient = R._g.doc.getElementById(node.getAttribute("fill").replace(/^url\(#|\)$/g, E));
if (gradient) {
stops = gradient.getElementsByTagName("stop");
$(stops[stops.length - 1], {"stop-opacity": value});
}
break;
}
default:
att == "font-size" && (value = toInt(value, 10) + "px");
var cssrule = att.replace(/(\-.)/g, function (w) {
return w.substring(1).toUpperCase();
});
node.style[cssrule] = value;
o._.dirty = 1;
node.setAttribute(att, value);
break;
}
}
}
tuneText(o, params);
node.style.visibility = vis;
},
leading = 1.2,
tuneText = function (el, params) {
if (el.type != "text" || !(params[has]("text") || params[has]("font") || params[has]("font-size") || params[has]("x") || params[has]("y"))) {
return;
}
var a = el.attrs,
node = el.node,
fontSize = node.firstChild ? toInt(R._g.doc.defaultView.getComputedStyle(node.firstChild, E).getPropertyValue("font-size"), 10) : 10;
if (params[has]("text")) {
a.text = params.text;
while (node.firstChild) {
node.removeChild(node.firstChild);
}
var texts = Str(params.text).split("\n"),
tspans = [],
tspan;
for (var i = 0, ii = texts.length; i < ii; i++) {
tspan = $("tspan");
i && $(tspan, {dy: fontSize * leading, x: a.x});
tspan.appendChild(R._g.doc.createTextNode(texts[i]));
node.appendChild(tspan);
tspans[i] = tspan;
}
} else {
tspans = node.getElementsByTagName("tspan");
for (i = 0, ii = tspans.length; i < ii; i++) if (i) {
$(tspans[i], {dy: fontSize * leading, x: a.x});
} else {
$(tspans[0], {dy: 0});
}
}
$(node, {x: a.x, y: a.y});
el._.dirty = 1;
var bb = el._getBBox(),
dif = a.y - (bb.y + bb.height / 2);
dif && R.is(dif, "finite") && $(tspans[0], {dy: dif});
},
Element = function (node, svg) {
var X = 0,
Y = 0;
/*\
* Element.node
[ property (object) ]
**
* Gives you a reference to the DOM object, so you can assign event handlers or just mess around.
**
* Note: Don’t mess with it.
> Usage
| // draw a circle at coordinate 10,10 with radius of 10
| var c = paper.circle(10, 10, 10);
| c.node.onclick = function () {
| c.attr("fill", "red");
| };
\*/
this[0] = this.node = node;
/*\
* Element.raphael
[ property (object) ]
**
* Internal reference to @Raphael object. In case it is not available.
> Usage
| Raphael.el.red = function () {
| var hsb = this.paper.raphael.rgb2hsb(this.attr("fill"));
| hsb.h = 1;
| this.attr({fill: this.paper.raphael.hsb2rgb(hsb).hex});
| }
\*/
node.raphael = true;
/*\
* Element.id
[ property (number) ]
**
* Unique id of the element. Especially usesful when you want to listen to events of the element,
* because all events are fired in format `<module>.<action>.<id>`. Also useful for @Paper.getById method.
\*/
this.id = R._oid++;
node.raphaelid = this.id;
this.matrix = R.matrix();
this.realPath = null;
/*\
* Element.paper
[ property (object) ]
**
* Internal reference to “paper” where object drawn. Mainly for use in plugins and element extensions.
> Usage
| Raphael.el.cross = function () {
| this.attr({fill: "red"});
| this.paper.path("M10,10L50,50M50,10L10,50")
| .attr({stroke: "red"});
| }
\*/
this.paper = svg;
this.attrs = this.attrs || {};
this._ = {
transform: [],
sx: 1,
sy: 1,
deg: 0,
dx: 0,
dy: 0,
dirty: 1
};
!svg.bottom && (svg.bottom = this);
/*\
* Element.prev
[ property (object) ]
**
* Reference to the previous element in the hierarchy.
\*/
this.prev = svg.top;
svg.top && (svg.top.next = this);
svg.top = this;
/*\
* Element.next
[ property (object) ]
**
* Reference to the next element in the hierarchy.
\*/
this.next = null;
},
elproto = R.el;
Element.prototype = elproto;
elproto.constructor = Element;
R._engine.path = function (pathString, SVG) {
var el = $("path");
SVG.canvas && SVG.canvas.appendChild(el);
var p = new Element(el, SVG);
p.type = "path";
setFillAndStroke(p, {
fill: "none",
stroke: "#000",
path: pathString
});
return p;
};
/*\
* Element.rotate
[ method ]
**
* Deprecated! Use @Element.transform instead.
* Adds rotation by given angle around given point to the list of
* transformations of the element.
> Parameters
- deg (number) angle in degrees
- cx (number) #optional x coordinate of the centre of rotation
- cy (number) #optional y coordinate of the centre of rotation
* If cx & cy aren’t specified centre of the shape is used as a point of rotation.
= (object) @Element
\*/
elproto.rotate = function (deg, cx, cy) {
if (this.removed) {
return this;
}
deg = Str(deg).split(separator);
if (deg.length - 1) {
cx = toFloat(deg[1]);
cy = toFloat(deg[2]);
}
deg = toFloat(deg[0]);
(cy == null) && (cx = cy);
if (cx == null || cy == null) {
var bbox = this.getBBox(1);
cx = bbox.x + bbox.width / 2;
cy = bbox.y + bbox.height / 2;
}
this.transform(this._.transform.concat([["r", deg, cx, cy]]));
return this;
};
/*\
* Element.scale
[ method ]
**
* Deprecated! Use @Element.transform instead.
* Adds scale by given amount relative to given point to the list of
* transformations of the element.
> Parameters
- sx (number) horisontal scale amount
- sy (number) vertical scale amount
- cx (number) #optional x coordinate of the centre of scale
- cy (number) #optional y coordinate of the centre of scale
* If cx & cy aren’t specified centre of the shape is used instead.
= (object) @Element
\*/
elproto.scale = function (sx, sy, cx, cy) {
if (this.removed) {
return this;
}
sx = Str(sx).split(separator);
if (sx.length - 1) {
sy = toFloat(sx[1]);
cx = toFloat(sx[2]);
cy = toFloat(sx[3]);
}
sx = toFloat(sx[0]);
(sy == null) && (sy = sx);
(cy == null) && (cx = cy);
if (cx == null || cy == null) {
var bbox = this.getBBox(1);
}
cx = cx == null ? bbox.x + bbox.width / 2 : cx;
cy = cy == null ? bbox.y + bbox.height / 2 : cy;
this.transform(this._.transform.concat([["s", sx, sy, cx, cy]]));
return this;
};
/*\
* Element.translate
[ method ]
**
* Deprecated! Use @Element.transform instead.
* Adds translation by given amount to the list of transformations of the element.
> Parameters
- dx (number) horisontal shift
- dy (number) vertical shift
= (object) @Element
\*/
elproto.translate = function (dx, dy) {
if (this.removed) {
return this;
}
dx = Str(dx).split(separator);
if (dx.length - 1) {
dy = toFloat(dx[1]);
}
dx = toFloat(dx[0]) || 0;
dy = +dy || 0;
this.transform(this._.transform.concat([["t", dx, dy]]));
return this;
};
/*\
* Element.transform
[ method ]
**
* Adds transformation to the element which is separate to other attributes,
* i.e. translation doesn’t change `x` or `y` of the rectange. The format
* of transformation string is similar to the path string syntax:
| "t100,100r30,100,100s2,2,100,100r45s1.5"
* Each letter is a command. There are four commands: `t` is for translate, `r` is for rotate, `s` is for
* scale and `m` is for matrix.
*
* There are also alternative “absolute” translation, rotation and scale: `T`, `R` and `S`. They will not take previous transformation into account. For example, `...T100,0` will always move element 100 px horisontally, while `...t100,0` could move it vertically if there is `r90` before. Just compare results of `r90t100,0` and `r90T100,0`.
*
* So, the example line above could be read like “translate by 100, 100; rotate 30° around 100, 100; scale twice around 100, 100;
* rotate 45° around centre; scale 1.5 times relative to centre”. As you can see rotate and scale commands have origin
* coordinates as optional parameters, the default is the centre point of the element.
* Matrix accepts six parameters.
> Usage
| var el = paper.rect(10, 20, 300, 200);
| // translate 100, 100, rotate 45°, translate -100, 0
| el.transform("t100,100r45t-100,0");
| // if you want you can append or prepend transformations
| el.transform("...t50,50");
| el.transform("s2...");
| // or even wrap
| el.transform("t50,50...t-50-50");
| // to reset transformation call method with empty string
| el.transform("");
| // to get current value call it without parameters
| console.log(el.transform());
> Parameters
- tstr (string) #optional transformation string
* If tstr isn’t specified
= (string) current transformation string
* else
= (object) @Element
\*/
elproto.transform = function (tstr) {
var _ = this._;
if (tstr == null) {
return _.transform;
}
R._extractTransform(this, tstr);
this.clip && $(this.clip, {transform: this.matrix.invert()});
this.pattern && updatePosition(this);
this.node && $(this.node, {transform: this.matrix});
if (_.sx != 1 || _.sy != 1) {
var sw = this.attrs[has]("stroke-width") ? this.attrs["stroke-width"] : 1;
this.attr({"stroke-width": sw});
}
return this;
};
/*\
* Element.hide
[ method ]
**
* Makes element invisible. See @Element.show.
= (object) @Element
\*/
elproto.hide = function () {
!this.removed && this.paper.safari(this.node.style.display = "none");
return this;
};
/*\
* Element.show
[ method ]
**
* Makes element visible. See @Element.hide.
= (object) @Element
\*/
elproto.show = function () {
!this.removed && this.paper.safari(this.node.style.display = "");
return this;
};
/*\
* Element.remove
[ method ]
**
* Removes element from the paper.
\*/
elproto.remove = function () {
if (this.removed || !this.node.parentNode) {
return;
}
var paper = this.paper;
paper.__set__ && paper.__set__.exclude(this);
eve.unbind("raphael.*.*." + this.id);
if (this.gradient) {
paper.defs.removeChild(this.gradient);
}
R._tear(this, paper);
if (this.node.parentNode.tagName.toLowerCase() == "a") {
this.node.parentNode.parentNode.removeChild(this.node.parentNode);
} else {
this.node.parentNode.removeChild(this.node);
}
for (var i in this) {
this[i] = typeof this[i] == "function" ? R._removedFactory(i) : null;
}
this.removed = true;
};
elproto._getBBox = function () {
if (this.node.style.display == "none") {
this.show();
var hide = true;
}
var bbox = {};
try {
bbox = this.node.getBBox();
} catch(e) {
// Firefox 3.0.x plays badly here
} finally {
bbox = bbox || {};
}
hide && this.hide();
return bbox;
};
/*\
* Element.attr
[ method ]
**
* Sets the attributes of the element.
> Parameters
- attrName (string) attribute’s name
- value (string) value
* or
- params (object) object of name/value pairs
* or
- attrName (string) attribute’s name
* or
- attrNames (array) in this case method returns array of current values for given attribute names
= (object) @Element if attrsName & value or params are passed in.
= (...) value of the attribute if only attrsName is passed in.
= (array) array of values of the attribute if attrsNames is passed in.
= (object) object of attributes if nothing is passed in.
> Possible parameters
# <p>Please refer to the <a href="http://www.w3.org/TR/SVG/" title="The W3C Recommendation for the SVG language describes these properties in detail.">SVG specification</a> for an explanation of these parameters.</p>
o arrow-end (string) arrowhead on the end of the path. The format for string is `<type>[-<width>[-<length>]]`. Possible types: `classic`, `block`, `open`, `oval`, `diamond`, `none`, width: `wide`, `narrow`, `medium`, length: `long`, `short`, `midium`.
o clip-rect (string) comma or space separated values: x, y, width and height
o cursor (string) CSS type of the cursor
o cx (number) the x-axis coordinate of the center of the circle, or ellipse
o cy (number) the y-axis coordinate of the center of the circle, or ellipse
o fill (string) colour, gradient or image
o fill-opacity (number)
o font (string)
o font-family (string)
o font-size (number) font size in pixels
o font-weight (string)
o height (number)
o href (string) URL, if specified element behaves as hyperlink
o opacity (number)
o path (string) SVG path string format
o r (number) radius of the circle, ellipse or rounded corner on the rect
o rx (number) horisontal radius of the ellipse
o ry (number) vertical radius of the ellipse
o src (string) image URL, only works for @Element.image element
o stroke (string) stroke colour
o stroke-dasharray (string) [“”, “`-`”, “`.`”, “`-.`”, “`-..`”, “`. `”, “`- `”, “`--`”, “`- .`”, “`--.`”, “`--..`”]
o stroke-linecap (string) [“`butt`”, “`square`”, “`round`”]
o stroke-linejoin (string) [“`bevel`”, “`round`”, “`miter`”]
o stroke-miterlimit (number)
o stroke-opacity (number)
o stroke-width (number) stroke width in pixels, default is '1'
o target (string) used with href
o text (string) contents of the text element. Use `\n` for multiline text
o text-anchor (string) [“`start`”, “`middle`”, “`end`”], default is “`middle`”
o title (string) will create tooltip with a given text
o transform (string) see @Element.transform
o width (number)
o x (number)
o y (number)
> Gradients
* Linear gradient format: “`‹angle›-‹colour›[-‹colour›[:‹offset›]]*-‹colour›`”, example: “`90-#fff-#000`” – 90°
* gradient from white to black or “`0-#fff-#f00:20-#000`” – 0° gradient from white via red (at 20%) to black.
*
* radial gradient: “`r[(‹fx›, ‹fy›)]‹colour›[-‹colour›[:‹offset›]]*-‹colour›`”, example: “`r#fff-#000`” –
* gradient from white to black or “`r(0.25, 0.75)#fff-#000`” – gradient from white to black with focus point
* at 0.25, 0.75. Focus point coordinates are in 0..1 range. Radial gradients can only be applied to circles and ellipses.
> Path String
# <p>Please refer to <a href="http://www.w3.org/TR/SVG/paths.html#PathData" title="Details of a path’s data attribute’s format are described in the SVG specification.">SVG documentation regarding path string</a>. Raphaël fully supports it.</p>
> Colour Parsing
# <ul>
# <li>Colour name (“<code>red</code>”, “<code>green</code>”, “<code>cornflowerblue</code>”, etc)</li>
# <li>#••• — shortened HTML colour: (“<code>#000</code>”, “<code>#fc0</code>”, etc)</li>
# <li>#•••••• — full length HTML colour: (“<code>#000000</code>”, “<code>#bd2300</code>”)</li>
# <li>rgb(•••, •••, •••) — red, green and blue channels’ values: (“<code>rgb(200, 100, 0)</code>”)</li>
# <li>rgb(•••%, •••%, •••%) — same as above, but in %: (“<code>rgb(100%, 175%, 0%)</code>”)</li>
# <li>rgba(•••, •••, •••, •••) — red, green and blue channels’ values: (“<code>rgba(200, 100, 0, .5)</code>”)</li>
# <li>rgba(•••%, •••%, •••%, •••%) — same as above, but in %: (“<code>rgba(100%, 175%, 0%, 50%)</code>”)</li>
# <li>hsb(•••, •••, •••) — hue, saturation and brightness values: (“<code>hsb(0.5, 0.25, 1)</code>”)</li>
# <li>hsb(•••%, •••%, •••%) — same as above, but in %</li>
# <li>hsba(•••, •••, •••, •••) — same as above, but with opacity</li>
# <li>hsl(•••, •••, •••) — almost the same as hsb, see <a href="http://en.wikipedia.org/wiki/HSL_and_HSV" title="HSL and HSV - Wikipedia, the free encyclopedia">Wikipedia page</a></li>
# <li>hsl(•••%, •••%, •••%) — same as above, but in %</li>
# <li>hsla(•••, •••, •••, •••) — same as above, but with opacity</li>
# <li>Optionally for hsb and hsl you could specify hue as a degree: “<code>hsl(240deg, 1, .5)</code>” or, if you want to go fancy, “<code>hsl(240°, 1, .5)</code>”</li>
# </ul>
\*/
elproto.attr = function (name, value) {
if (this.removed) {
return this;
}
if (name == null) {
var res = {};
for (var a in this.attrs) if (this.attrs[has](a)) {
res[a] = this.attrs[a];
}
res.gradient && res.fill == "none" && (res.fill = res.gradient) && delete res.gradient;
res.transform = this._.transform;
return res;
}
if (value == null && R.is(name, "string")) {
if (name == "fill" && this.attrs.fill == "none" && this.attrs.gradient) {
return this.attrs.gradient;
}
if (name == "transform") {
return this._.transform;
}
var names = name.split(separator),
out = {};
for (var i = 0, ii = names.length; i < ii; i++) {
name = names[i];
if (name in this.attrs) {
out[name] = this.attrs[name];
} else if (R.is(this.paper.customAttributes[name], "function")) {
out[name] = this.paper.customAttributes[name].def;
} else {
out[name] = R._availableAttrs[name];
}
}
return ii - 1 ? out : out[names[0]];
}
if (value == null && R.is(name, "array")) {
out = {};
for (i = 0, ii = name.length; i < ii; i++) {
out[name[i]] = this.attr(name[i]);
}
return out;
}
if (value != null) {
var params = {};
params[name] = value;
} else if (name != null && R.is(name, "object")) {
params = name;
}
for (var key in params) {
eve("raphael.attr." + key + "." + this.id, this, params[key]);
}
for (key in this.paper.customAttributes) if (this.paper.customAttributes[has](key) && params[has](key) && R.is(this.paper.customAttributes[key], "function")) {
var par = this.paper.customAttributes[key].apply(this, [].concat(params[key]));
this.attrs[key] = params[key];
for (var subkey in par) if (par[has](subkey)) {
params[subkey] = par[subkey];
}
}
setFillAndStroke(this, params);
return this;
};
/*\
* Element.toFront
[ method ]
**
* Moves the element so it is the closest to the viewer’s eyes, on top of other elements.
= (object) @Element
\*/
elproto.toFront = function () {
if (this.removed) {
return this;
}
if (this.node.parentNode.tagName.toLowerCase() == "a") {
this.node.parentNode.parentNode.appendChild(this.node.parentNode);
} else {
this.node.parentNode.appendChild(this.node);
}
var svg = this.paper;
svg.top != this && R._tofront(this, svg);
return this;
};
/*\
* Element.toBack
[ method ]
**
* Moves the element so it is the furthest from the viewer’s eyes, behind other elements.
= (object) @Element
\*/
elproto.toBack = function () {
if (this.removed) {
return this;
}
var parent = this.node.parentNode;
if (parent.tagName.toLowerCase() == "a") {
parent.parentNode.insertBefore(this.node.parentNode, this.node.parentNode.parentNode.firstChild);
} else if (parent.firstChild != this.node) {
parent.insertBefore(this.node, this.node.parentNode.firstChild);
}
R._toback(this, this.paper);
var svg = this.paper;
return this;
};
/*\
* Element.insertAfter
[ method ]
**
* Inserts current object after the given one.
= (object) @Element
\*/
elproto.insertAfter = function (element) {
if (this.removed) {
return this;
}
var node = element.node || element[element.length - 1].node;
if (node.nextSibling) {
node.parentNode.insertBefore(this.node, node.nextSibling);
} else {
node.parentNode.appendChild(this.node);
}
R._insertafter(this, element, this.paper);
return this;
};
/*\
* Element.insertBefore
[ method ]
**
* Inserts current object before the given one.
= (object) @Element
\*/
elproto.insertBefore = function (element) {
if (this.removed) {
return this;
}
var node = element.node || element[0].node;
node.parentNode.insertBefore(this.node, node);
R._insertbefore(this, element, this.paper);
return this;
};
elproto.blur = function (size) {
// Experimental. No Safari support. Use it on your own risk.
var t = this;
if (+size !== 0) {
var fltr = $("filter"),
blur = $("feGaussianBlur");
t.attrs.blur = size;
fltr.id = R.createUUID();
$(blur, {stdDeviation: +size || 1.5});
fltr.appendChild(blur);
t.paper.defs.appendChild(fltr);
t._blur = fltr;
$(t.node, {filter: "url(#" + fltr.id + ")"});
} else {
if (t._blur) {
t._blur.parentNode.removeChild(t._blur);
delete t._blur;
delete t.attrs.blur;
}
t.node.removeAttribute("filter");
}
return t;
};
R._engine.circle = function (svg, x, y, r) {
var el = $("circle");
svg.canvas && svg.canvas.appendChild(el);
var res = new Element(el, svg);
res.attrs = {cx: x, cy: y, r: r, fill: "none", stroke: "#000"};
res.type = "circle";
$(el, res.attrs);
return res;
};
R._engine.rect = function (svg, x, y, w, h, r) {
var el = $("rect");
svg.canvas && svg.canvas.appendChild(el);
var res = new Element(el, svg);
res.attrs = {x: x, y: y, width: w, height: h, r: r || 0, rx: r || 0, ry: r || 0, fill: "none", stroke: "#000"};
res.type = "rect";
$(el, res.attrs);
return res;
};
R._engine.ellipse = function (svg, x, y, rx, ry) {
var el = $("ellipse");
svg.canvas && svg.canvas.appendChild(el);
var res = new Element(el, svg);
res.attrs = {cx: x, cy: y, rx: rx, ry: ry, fill: "none", stroke: "#000"};
res.type = "ellipse";
$(el, res.attrs);
return res;
};
R._engine.image = function (svg, src, x, y, w, h) {
var el = $("image");
$(el, {x: x, y: y, width: w, height: h, preserveAspectRatio: "xMinYMin"});
el.setAttributeNS(xlink, "href", src);
svg.canvas && svg.canvas.appendChild(el);
var res = new Element(el, svg);
res.attrs = {x: x, y: y, width: w, height: h, src: src};
res.type = "image";
return res;
};
R._engine.text = function (svg, x, y, text) {
var el = $("text");
svg.canvas && svg.canvas.appendChild(el);
var res = new Element(el, svg);
res.attrs = {
x: x,
y: y,
"text-anchor": "middle",
text: text,
font: R._availableAttrs.font,
stroke: "none",
fill: "#000"
};
res.type = "text";
setFillAndStroke(res, res.attrs);
return res;
};
R._engine.setSize = function (width, height) {
this.width = width || this.width;
this.height = height || this.height;
this.canvas.setAttribute("width", this.width);
this.canvas.setAttribute("height", this.height);
if (this._viewBox) {
this.setViewBox.apply(this, this._viewBox);
}
return this;
};
R._engine.create = function () {
var con = R._getContainer.apply(0, arguments),
container = con && con.container,
x = con.x,
y = con.y,
width = con.width,
height = con.height;
if (!container) {
throw new Error("SVG container not found.");
}
var cnvs = $("svg"),
css = "overflow:hidden;",
isFloating;
x = x || 0;
y = y || 0;
width = width || 512;
height = height || 342;
$(cnvs, {
height: height,
version: 1.1,
width: width,
xmlns: "http://www.w3.org/2000/svg"
});
if (container == 1) {
cnvs.style.cssText = css + "position:absolute;left:" + x + "px;top:" + y + "px";
R._g.doc.body.appendChild(cnvs);
isFloating = 1;
} else {
cnvs.style.cssText = css + "position:relative";
if (container.firstChild) {
container.insertBefore(cnvs, container.firstChild);
} else {
container.appendChild(cnvs);
}
}
container = new R._Paper;
container.width = width;
container.height = height;
container.canvas = cnvs;
container.clear();
container._left = container._top = 0;
isFloating && (container.renderfix = function () {});
container.renderfix();
return container;
};
R._engine.setViewBox = function (x, y, w, h, fit) {
eve("raphael.setViewBox", this, this._viewBox, [x, y, w, h, fit]);
var size = mmax(w / this.width, h / this.height),
top = this.top,
aspectRatio = fit ? "meet" : "xMinYMin",
vb,
sw;
if (x == null) {
if (this._vbSize) {
size = 1;
}
delete this._vbSize;
vb = "0 0 " + this.width + S + this.height;
} else {
this._vbSize = size;
vb = x + S + y + S + w + S + h;
}
$(this.canvas, {
viewBox: vb,
preserveAspectRatio: aspectRatio
});
while (size && top) {
sw = "stroke-width" in top.attrs ? top.attrs["stroke-width"] : 1;
top.attr({"stroke-width": sw});
top._.dirty = 1;
top._.dirtyT = 1;
top = top.prev;
}
this._viewBox = [x, y, w, h, !!fit];
return this;
};
/*\
* Paper.renderfix
[ method ]
**
* Fixes the issue of Firefox and IE9 regarding subpixel rendering. If paper is dependant
* on other elements after reflow it could shift half pixel which cause for lines to lost their crispness.
* This method fixes the issue.
**
Special thanks to Mariusz Nowak (http://www.medikoo.com/) for this method.
\*/
R.prototype.renderfix = function () {
var cnvs = this.canvas,
s = cnvs.style,
pos;
try {
pos = cnvs.getScreenCTM() || cnvs.createSVGMatrix();
} catch (e) {
pos = cnvs.createSVGMatrix();
}
var left = -pos.e % 1,
top = -pos.f % 1;
if (left || top) {
if (left) {
this._left = (this._left + left) % 1;
s.left = this._left + "px";
}
if (top) {
this._top = (this._top + top) % 1;
s.top = this._top + "px";
}
}
};
/*\
* Paper.clear
[ method ]
**
* Clears the paper, i.e. removes all the elements.
\*/
R.prototype.clear = function () {
R.eve("raphael.clear", this);
var c = this.canvas;
while (c.firstChild) {
c.removeChild(c.firstChild);
}
this.bottom = this.top = null;
(this.desc = $("desc")).appendChild(R._g.doc.createTextNode("Created with Rapha\xebl " + R.version));
c.appendChild(this.desc);
c.appendChild(this.defs = $("defs"));
};
/*\
* Paper.remove
[ method ]
**
* Removes the paper from the DOM.
\*/
R.prototype.remove = function () {
eve("raphael.remove", this);
this.canvas.parentNode && this.canvas.parentNode.removeChild(this.canvas);
for (var i in this) {
this[i] = typeof this[i] == "function" ? R._removedFactory(i) : null;
}
};
var setproto = R.st;
for (var method in elproto) if (elproto[has](method) && !setproto[has](method)) {
setproto[method] = (function (methodname) {
return function () {
var arg = arguments;
return this.forEach(function (el) {
el[methodname].apply(el, arg);
});
};
})(method);
}
})();
// ┌─────────────────────────────────────────────────────────────────────┐ \\
// │ Raphaël - JavaScript Vector Library │ \\
// ├─────────────────────────────────────────────────────────────────────┤ \\
// │ VML Module │ \\
// ├─────────────────────────────────────────────────────────────────────┤ \\
// │ Copyright (c) 2008-2011 Dmitry Baranovskiy (http://raphaeljs.com) │ \\
// │ Copyright (c) 2008-2011 Sencha Labs (http://sencha.com) │ \\
// │ Licensed under the MIT (http://raphaeljs.com/license.html) license. │ \\
// └─────────────────────────────────────────────────────────────────────┘ \\
(function(){
if (!R.vml) {
return;
}
var has = "hasOwnProperty",
Str = String,
toFloat = parseFloat,
math = Math,
round = math.round,
mmax = math.max,
mmin = math.min,
abs = math.abs,
fillString = "fill",
separator = /[, ]+/,
eve = R.eve,
ms = " progid:DXImageTransform.Microsoft",
S = " ",
E = "",
map = {M: "m", L: "l", C: "c", Z: "x", m: "t", l: "r", c: "v", z: "x"},
bites = /([clmz]),?([^clmz]*)/gi,
blurregexp = / progid:\S+Blur\([^\)]+\)/g,
val = /-?[^,\s-]+/g,
cssDot = "position:absolute;left:0;top:0;width:1px;height:1px",
zoom = 21600,
pathTypes = {path: 1, rect: 1, image: 1},
ovalTypes = {circle: 1, ellipse: 1},
path2vml = function (path) {
var total = /[ahqstv]/ig,
command = R._pathToAbsolute;
Str(path).match(total) && (command = R._path2curve);
total = /[clmz]/g;
if (command == R._pathToAbsolute && !Str(path).match(total)) {
var res = Str(path).replace(bites, function (all, command, args) {
var vals = [],
isMove = command.toLowerCase() == "m",
res = map[command];
args.replace(val, function (value) {
if (isMove && vals.length == 2) {
res += vals + map[command == "m" ? "l" : "L"];
vals = [];
}
vals.push(round(value * zoom));
});
return res + vals;
});
return res;
}
var pa = command(path), p, r;
res = [];
for (var i = 0, ii = pa.length; i < ii; i++) {
p = pa[i];
r = pa[i][0].toLowerCase();
r == "z" && (r = "x");
for (var j = 1, jj = p.length; j < jj; j++) {
r += round(p[j] * zoom) + (j != jj - 1 ? "," : E);
}
res.push(r);
}
return res.join(S);
},
compensation = function (deg, dx, dy) {
var m = R.matrix();
m.rotate(-deg, .5, .5);
return {
dx: m.x(dx, dy),
dy: m.y(dx, dy)
};
},
setCoords = function (p, sx, sy, dx, dy, deg) {
var _ = p._,
m = p.matrix,
fillpos = _.fillpos,
o = p.node,
s = o.style,
y = 1,
flip = "",
dxdy,
kx = zoom / sx,
ky = zoom / sy;
s.visibility = "hidden";
if (!sx || !sy) {
return;
}
o.coordsize = abs(kx) + S + abs(ky);
s.rotation = deg * (sx * sy < 0 ? -1 : 1);
if (deg) {
var c = compensation(deg, dx, dy);
dx = c.dx;
dy = c.dy;
}
sx < 0 && (flip += "x");
sy < 0 && (flip += " y") && (y = -1);
s.flip = flip;
o.coordorigin = (dx * -kx) + S + (dy * -ky);
if (fillpos || _.fillsize) {
var fill = o.getElementsByTagName(fillString);
fill = fill && fill[0];
o.removeChild(fill);
if (fillpos) {
c = compensation(deg, m.x(fillpos[0], fillpos[1]), m.y(fillpos[0], fillpos[1]));
fill.position = c.dx * y + S + c.dy * y;
}
if (_.fillsize) {
fill.size = _.fillsize[0] * abs(sx) + S + _.fillsize[1] * abs(sy);
}
o.appendChild(fill);
}
s.visibility = "visible";
};
R.toString = function () {
return "Your browser doesn\u2019t support SVG. Falling down to VML.\nYou are running Rapha\xebl " + this.version;
};
var addArrow = function (o, value, isEnd) {
var values = Str(value).toLowerCase().split("-"),
se = isEnd ? "end" : "start",
i = values.length,
type = "classic",
w = "medium",
h = "medium";
while (i--) {
switch (values[i]) {
case "block":
case "classic":
case "oval":
case "diamond":
case "open":
case "none":
type = values[i];
break;
case "wide":
case "narrow": h = values[i]; break;
case "long":
case "short": w = values[i]; break;
}
}
var stroke = o.node.getElementsByTagName("stroke")[0];
stroke[se + "arrow"] = type;
stroke[se + "arrowlength"] = w;
stroke[se + "arrowwidth"] = h;
},
setFillAndStroke = function (o, params) {
// o.paper.canvas.style.display = "none";
o.attrs = o.attrs || {};
var node = o.node,
a = o.attrs,
s = node.style,
xy,
newpath = pathTypes[o.type] && (params.x != a.x || params.y != a.y || params.width != a.width || params.height != a.height || params.cx != a.cx || params.cy != a.cy || params.rx != a.rx || params.ry != a.ry || params.r != a.r),
isOval = ovalTypes[o.type] && (a.cx != params.cx || a.cy != params.cy || a.r != params.r || a.rx != params.rx || a.ry != params.ry),
res = o;
for (var par in params) if (params[has](par)) {
a[par] = params[par];
}
if (newpath) {
a.path = R._getPath[o.type](o);
o._.dirty = 1;
}
params.href && (node.href = params.href);
params.title && (node.title = params.title);
params.target && (node.target = params.target);
params.cursor && (s.cursor = params.cursor);
"blur" in params && o.blur(params.blur);
if (params.path && o.type == "path" || newpath) {
node.path = path2vml(~Str(a.path).toLowerCase().indexOf("r") ? R._pathToAbsolute(a.path) : a.path);
if (o.type == "image") {
o._.fillpos = [a.x, a.y];
o._.fillsize = [a.width, a.height];
setCoords(o, 1, 1, 0, 0, 0);
}
}
"transform" in params && o.transform(params.transform);
if (isOval) {
var cx = +a.cx,
cy = +a.cy,
rx = +a.rx || +a.r || 0,
ry = +a.ry || +a.r || 0;
node.path = R.format("ar{0},{1},{2},{3},{4},{1},{4},{1}x", round((cx - rx) * zoom), round((cy - ry) * zoom), round((cx + rx) * zoom), round((cy + ry) * zoom), round(cx * zoom));
o._.dirty = 1;
}
if ("clip-rect" in params) {
var rect = Str(params["clip-rect"]).split(separator);
if (rect.length == 4) {
rect[2] = +rect[2] + (+rect[0]);
rect[3] = +rect[3] + (+rect[1]);
var div = node.clipRect || R._g.doc.createElement("div"),
dstyle = div.style;
dstyle.clip = R.format("rect({1}px {2}px {3}px {0}px)", rect);
if (!node.clipRect) {
dstyle.position = "absolute";
dstyle.top = 0;
dstyle.left = 0;
dstyle.width = o.paper.width + "px";
dstyle.height = o.paper.height + "px";
node.parentNode.insertBefore(div, node);
div.appendChild(node);
node.clipRect = div;
}
}
if (!params["clip-rect"]) {
node.clipRect && (node.clipRect.style.clip = "auto");
}
}
if (o.textpath) {
var textpathStyle = o.textpath.style;
params.font && (textpathStyle.font = params.font);
params["font-family"] && (textpathStyle.fontFamily = '"' + params["font-family"].split(",")[0].replace(/^['"]+|['"]+$/g, E) + '"');
params["font-size"] && (textpathStyle.fontSize = params["font-size"]);
params["font-weight"] && (textpathStyle.fontWeight = params["font-weight"]);
params["font-style"] && (textpathStyle.fontStyle = params["font-style"]);
}
if ("arrow-start" in params) {
addArrow(res, params["arrow-start"]);
}
if ("arrow-end" in params) {
addArrow(res, params["arrow-end"], 1);
}
if (params.opacity != null ||
params["stroke-width"] != null ||
params.fill != null ||
params.src != null ||
params.stroke != null ||
params["stroke-width"] != null ||
params["stroke-opacity"] != null ||
params["fill-opacity"] != null ||
params["stroke-dasharray"] != null ||
params["stroke-miterlimit"] != null ||
params["stroke-linejoin"] != null ||
params["stroke-linecap"] != null) {
var fill = node.getElementsByTagName(fillString),
newfill = false;
fill = fill && fill[0];
!fill && (newfill = fill = createNode(fillString));
if (o.type == "image" && params.src) {
fill.src = params.src;
}
params.fill && (fill.on = true);
if (fill.on == null || params.fill == "none" || params.fill === null) {
fill.on = false;
}
if (fill.on && params.fill) {
var isURL = Str(params.fill).match(R._ISURL);
if (isURL) {
fill.parentNode == node && node.removeChild(fill);
fill.rotate = true;
fill.src = isURL[1];
fill.type = "tile";
var bbox = o.getBBox(1);
fill.position = bbox.x + S + bbox.y;
o._.fillpos = [bbox.x, bbox.y];
R._preload(isURL[1], function () {
o._.fillsize = [this.offsetWidth, this.offsetHeight];
});
} else {
fill.color = R.getRGB(params.fill).hex;
fill.src = E;
fill.type = "solid";
if (R.getRGB(params.fill).error && (res.type in {circle: 1, ellipse: 1} || Str(params.fill).charAt() != "r") && addGradientFill(res, params.fill, fill)) {
a.fill = "none";
a.gradient = params.fill;
fill.rotate = false;
}
}
}
if ("fill-opacity" in params || "opacity" in params) {
var opacity = ((+a["fill-opacity"] + 1 || 2) - 1) * ((+a.opacity + 1 || 2) - 1) * ((+R.getRGB(params.fill).o + 1 || 2) - 1);
opacity = mmin(mmax(opacity, 0), 1);
fill.opacity = opacity;
if (fill.src) {
fill.color = "none";
}
}
node.appendChild(fill);
var stroke = (node.getElementsByTagName("stroke") && node.getElementsByTagName("stroke")[0]),
newstroke = false;
!stroke && (newstroke = stroke = createNode("stroke"));
if ((params.stroke && params.stroke != "none") ||
params["stroke-width"] ||
params["stroke-opacity"] != null ||
params["stroke-dasharray"] ||
params["stroke-miterlimit"] ||
params["stroke-linejoin"] ||
params["stroke-linecap"]) {
stroke.on = true;
}
(params.stroke == "none" || params.stroke === null || stroke.on == null || params.stroke == 0 || params["stroke-width"] == 0) && (stroke.on = false);
var strokeColor = R.getRGB(params.stroke);
stroke.on && params.stroke && (stroke.color = strokeColor.hex);
opacity = ((+a["stroke-opacity"] + 1 || 2) - 1) * ((+a.opacity + 1 || 2) - 1) * ((+strokeColor.o + 1 || 2) - 1);
var width = (toFloat(params["stroke-width"]) || 1) * .75;
opacity = mmin(mmax(opacity, 0), 1);
params["stroke-width"] == null && (width = a["stroke-width"]);
params["stroke-width"] && (stroke.weight = width);
width && width < 1 && (opacity *= width) && (stroke.weight = 1);
stroke.opacity = opacity;
params["stroke-linejoin"] && (stroke.joinstyle = params["stroke-linejoin"] || "miter");
stroke.miterlimit = params["stroke-miterlimit"] || 8;
params["stroke-linecap"] && (stroke.endcap = params["stroke-linecap"] == "butt" ? "flat" : params["stroke-linecap"] == "square" ? "square" : "round");
if ("stroke-dasharray" in params) {
var dasharray = {
"-": "shortdash",
".": "shortdot",
"-.": "shortdashdot",
"-..": "shortdashdotdot",
". ": "dot",
"- ": "dash",
"--": "longdash",
"- .": "dashdot",
"--.": "longdashdot",
"--..": "longdashdotdot"
};
stroke.dashstyle = dasharray[has](params["stroke-dasharray"]) ? dasharray[params["stroke-dasharray"]] : E;
}
newstroke && node.appendChild(stroke);
}
if (res.type == "text") {
res.paper.canvas.style.display = E;
var span = res.paper.span,
m = 100,
fontSize = a.font && a.font.match(/\d+(?:\.\d*)?(?=px)/);
s = span.style;
a.font && (s.font = a.font);
a["font-family"] && (s.fontFamily = a["font-family"]);
a["font-weight"] && (s.fontWeight = a["font-weight"]);
a["font-style"] && (s.fontStyle = a["font-style"]);
fontSize = toFloat(a["font-size"] || fontSize && fontSize[0]) || 10;
s.fontSize = fontSize * m + "px";
res.textpath.string && (span.innerHTML = Str(res.textpath.string).replace(/</g, "<").replace(/&/g, "&").replace(/\n/g, "<br>"));
var brect = span.getBoundingClientRect();
res.W = a.w = (brect.right - brect.left) / m;
res.H = a.h = (brect.bottom - brect.top) / m;
// res.paper.canvas.style.display = "none";
res.X = a.x;
res.Y = a.y + res.H / 2;
("x" in params || "y" in params) && (res.path.v = R.format("m{0},{1}l{2},{1}", round(a.x * zoom), round(a.y * zoom), round(a.x * zoom) + 1));
var dirtyattrs = ["x", "y", "text", "font", "font-family", "font-weight", "font-style", "font-size"];
for (var d = 0, dd = dirtyattrs.length; d < dd; d++) if (dirtyattrs[d] in params) {
res._.dirty = 1;
break;
}
// text-anchor emulation
switch (a["text-anchor"]) {
case "start":
res.textpath.style["v-text-align"] = "left";
res.bbx = res.W / 2;
break;
case "end":
res.textpath.style["v-text-align"] = "right";
res.bbx = -res.W / 2;
break;
default:
res.textpath.style["v-text-align"] = "center";
res.bbx = 0;
break;
}
res.textpath.style["v-text-kern"] = true;
}
// res.paper.canvas.style.display = E;
},
addGradientFill = function (o, gradient, fill) {
o.attrs = o.attrs || {};
var attrs = o.attrs,
pow = Math.pow,
opacity,
oindex,
type = "linear",
fxfy = ".5 .5";
o.attrs.gradient = gradient;
gradient = Str(gradient).replace(R._radial_gradient, function (all, fx, fy) {
type = "radial";
if (fx && fy) {
fx = toFloat(fx);
fy = toFloat(fy);
pow(fx - .5, 2) + pow(fy - .5, 2) > .25 && (fy = math.sqrt(.25 - pow(fx - .5, 2)) * ((fy > .5) * 2 - 1) + .5);
fxfy = fx + S + fy;
}
return E;
});
gradient = gradient.split(/\s*\-\s*/);
if (type == "linear") {
var angle = gradient.shift();
angle = -toFloat(angle);
if (isNaN(angle)) {
return null;
}
}
var dots = R._parseDots(gradient);
if (!dots) {
return null;
}
o = o.shape || o.node;
if (dots.length) {
o.removeChild(fill);
fill.on = true;
fill.method = "none";
fill.color = dots[0].color;
fill.color2 = dots[dots.length - 1].color;
var clrs = [];
for (var i = 0, ii = dots.length; i < ii; i++) {
dots[i].offset && clrs.push(dots[i].offset + S + dots[i].color);
}
fill.colors = clrs.length ? clrs.join() : "0% " + fill.color;
if (type == "radial") {
fill.type = "gradientTitle";
fill.focus = "100%";
fill.focussize = "0 0";
fill.focusposition = fxfy;
fill.angle = 0;
} else {
// fill.rotate= true;
fill.type = "gradient";
fill.angle = (270 - angle) % 360;
}
o.appendChild(fill);
}
return 1;
},
Element = function (node, vml) {
this[0] = this.node = node;
node.raphael = true;
this.id = R._oid++;
node.raphaelid = this.id;
this.X = 0;
this.Y = 0;
this.attrs = {};
this.paper = vml;
this.matrix = R.matrix();
this._ = {
transform: [],
sx: 1,
sy: 1,
dx: 0,
dy: 0,
deg: 0,
dirty: 1,
dirtyT: 1
};
!vml.bottom && (vml.bottom = this);
this.prev = vml.top;
vml.top && (vml.top.next = this);
vml.top = this;
this.next = null;
};
var elproto = R.el;
Element.prototype = elproto;
elproto.constructor = Element;
elproto.transform = function (tstr) {
if (tstr == null) {
return this._.transform;
}
var vbs = this.paper._viewBoxShift,
vbt = vbs ? "s" + [vbs.scale, vbs.scale] + "-1-1t" + [vbs.dx, vbs.dy] : E,
oldt;
if (vbs) {
oldt = tstr = Str(tstr).replace(/\.{3}|\u2026/g, this._.transform || E);
}
R._extractTransform(this, vbt + tstr);
var matrix = this.matrix.clone(),
skew = this.skew,
o = this.node,
split,
isGrad = ~Str(this.attrs.fill).indexOf("-"),
isPatt = !Str(this.attrs.fill).indexOf("url(");
matrix.translate(1, 1);
if (isPatt || isGrad || this.type == "image") {
skew.matrix = "1 0 0 1";
skew.offset = "0 0";
split = matrix.split();
if ((isGrad && split.noRotation) || !split.isSimple) {
o.style.filter = matrix.toFilter();
var bb = this.getBBox(),
bbt = this.getBBox(1),
dx = bb.x - bbt.x,
dy = bb.y - bbt.y;
o.coordorigin = (dx * -zoom) + S + (dy * -zoom);
setCoords(this, 1, 1, dx, dy, 0);
} else {
o.style.filter = E;
setCoords(this, split.scalex, split.scaley, split.dx, split.dy, split.rotate);
}
} else {
o.style.filter = E;
skew.matrix = Str(matrix);
skew.offset = matrix.offset();
}
oldt && (this._.transform = oldt);
return this;
};
elproto.rotate = function (deg, cx, cy) {
if (this.removed) {
return this;
}
if (deg == null) {
return;
}
deg = Str(deg).split(separator);
if (deg.length - 1) {
cx = toFloat(deg[1]);
cy = toFloat(deg[2]);
}
deg = toFloat(deg[0]);
(cy == null) && (cx = cy);
if (cx == null || cy == null) {
var bbox = this.getBBox(1);
cx = bbox.x + bbox.width / 2;
cy = bbox.y + bbox.height / 2;
}
this._.dirtyT = 1;
this.transform(this._.transform.concat([["r", deg, cx, cy]]));
return this;
};
elproto.translate = function (dx, dy) {
if (this.removed) {
return this;
}
dx = Str(dx).split(separator);
if (dx.length - 1) {
dy = toFloat(dx[1]);
}
dx = toFloat(dx[0]) || 0;
dy = +dy || 0;
if (this._.bbox) {
this._.bbox.x += dx;
this._.bbox.y += dy;
}
this.transform(this._.transform.concat([["t", dx, dy]]));
return this;
};
elproto.scale = function (sx, sy, cx, cy) {
if (this.removed) {
return this;
}
sx = Str(sx).split(separator);
if (sx.length - 1) {
sy = toFloat(sx[1]);
cx = toFloat(sx[2]);
cy = toFloat(sx[3]);
isNaN(cx) && (cx = null);
isNaN(cy) && (cy = null);
}
sx = toFloat(sx[0]);
(sy == null) && (sy = sx);
(cy == null) && (cx = cy);
if (cx == null || cy == null) {
var bbox = this.getBBox(1);
}
cx = cx == null ? bbox.x + bbox.width / 2 : cx;
cy = cy == null ? bbox.y + bbox.height / 2 : cy;
this.transform(this._.transform.concat([["s", sx, sy, cx, cy]]));
this._.dirtyT = 1;
return this;
};
elproto.hide = function () {
!this.removed && (this.node.style.display = "none");
return this;
};
elproto.show = function () {
!this.removed && (this.node.style.display = E);
return this;
};
elproto._getBBox = function () {
if (this.removed) {
return {};
}
return {
x: this.X + (this.bbx || 0) - this.W / 2,
y: this.Y - this.H,
width: this.W,
height: this.H
};
};
elproto.remove = function () {
if (this.removed || !this.node.parentNode) {
return;
}
this.paper.__set__ && this.paper.__set__.exclude(this);
R.eve.unbind("raphael.*.*." + this.id);
R._tear(this, this.paper);
this.node.parentNode.removeChild(this.node);
this.shape && this.shape.parentNode.removeChild(this.shape);
for (var i in this) {
this[i] = typeof this[i] == "function" ? R._removedFactory(i) : null;
}
this.removed = true;
};
elproto.attr = function (name, value) {
if (this.removed) {
return this;
}
if (name == null) {
var res = {};
for (var a in this.attrs) if (this.attrs[has](a)) {
res[a] = this.attrs[a];
}
res.gradient && res.fill == "none" && (res.fill = res.gradient) && delete res.gradient;
res.transform = this._.transform;
return res;
}
if (value == null && R.is(name, "string")) {
if (name == fillString && this.attrs.fill == "none" && this.attrs.gradient) {
return this.attrs.gradient;
}
var names = name.split(separator),
out = {};
for (var i = 0, ii = names.length; i < ii; i++) {
name = names[i];
if (name in this.attrs) {
out[name] = this.attrs[name];
} else if (R.is(this.paper.customAttributes[name], "function")) {
out[name] = this.paper.customAttributes[name].def;
} else {
out[name] = R._availableAttrs[name];
}
}
return ii - 1 ? out : out[names[0]];
}
if (this.attrs && value == null && R.is(name, "array")) {
out = {};
for (i = 0, ii = name.length; i < ii; i++) {
out[name[i]] = this.attr(name[i]);
}
return out;
}
var params;
if (value != null) {
params = {};
params[name] = value;
}
value == null && R.is(name, "object") && (params = name);
for (var key in params) {
eve("raphael.attr." + key + "." + this.id, this, params[key]);
}
if (params) {
for (key in this.paper.customAttributes) if (this.paper.customAttributes[has](key) && params[has](key) && R.is(this.paper.customAttributes[key], "function")) {
var par = this.paper.customAttributes[key].apply(this, [].concat(params[key]));
this.attrs[key] = params[key];
for (var subkey in par) if (par[has](subkey)) {
params[subkey] = par[subkey];
}
}
// this.paper.canvas.style.display = "none";
if (params.text && this.type == "text") {
this.textpath.string = params.text;
}
setFillAndStroke(this, params);
// this.paper.canvas.style.display = E;
}
return this;
};
elproto.toFront = function () {
!this.removed && this.node.parentNode.appendChild(this.node);
this.paper && this.paper.top != this && R._tofront(this, this.paper);
return this;
};
elproto.toBack = function () {
if (this.removed) {
return this;
}
if (this.node.parentNode.firstChild != this.node) {
this.node.parentNode.insertBefore(this.node, this.node.parentNode.firstChild);
R._toback(this, this.paper);
}
return this;
};
elproto.insertAfter = function (element) {
if (this.removed) {
return this;
}
if (element.constructor == R.st.constructor) {
element = element[element.length - 1];
}
if (element.node.nextSibling) {
element.node.parentNode.insertBefore(this.node, element.node.nextSibling);
} else {
element.node.parentNode.appendChild(this.node);
}
R._insertafter(this, element, this.paper);
return this;
};
elproto.insertBefore = function (element) {
if (this.removed) {
return this;
}
if (element.constructor == R.st.constructor) {
element = element[0];
}
element.node.parentNode.insertBefore(this.node, element.node);
R._insertbefore(this, element, this.paper);
return this;
};
elproto.blur = function (size) {
var s = this.node.runtimeStyle,
f = s.filter;
f = f.replace(blurregexp, E);
if (+size !== 0) {
this.attrs.blur = size;
s.filter = f + S + ms + ".Blur(pixelradius=" + (+size || 1.5) + ")";
s.margin = R.format("-{0}px 0 0 -{0}px", round(+size || 1.5));
} else {
s.filter = f;
s.margin = 0;
delete this.attrs.blur;
}
return this;
};
R._engine.path = function (pathString, vml) {
var el = createNode("shape");
el.style.cssText = cssDot;
el.coordsize = zoom + S + zoom;
el.coordorigin = vml.coordorigin;
var p = new Element(el, vml),
attr = {fill: "none", stroke: "#000"};
pathString && (attr.path = pathString);
p.type = "path";
p.path = [];
p.Path = E;
setFillAndStroke(p, attr);
vml.canvas.appendChild(el);
var skew = createNode("skew");
skew.on = true;
el.appendChild(skew);
p.skew = skew;
p.transform(E);
return p;
};
R._engine.rect = function (vml, x, y, w, h, r) {
var path = R._rectPath(x, y, w, h, r),
res = vml.path(path),
a = res.attrs;
res.X = a.x = x;
res.Y = a.y = y;
res.W = a.width = w;
res.H = a.height = h;
a.r = r;
a.path = path;
res.type = "rect";
return res;
};
R._engine.ellipse = function (vml, x, y, rx, ry) {
var res = vml.path(),
a = res.attrs;
res.X = x - rx;
res.Y = y - ry;
res.W = rx * 2;
res.H = ry * 2;
res.type = "ellipse";
setFillAndStroke(res, {
cx: x,
cy: y,
rx: rx,
ry: ry
});
return res;
};
R._engine.circle = function (vml, x, y, r) {
var res = vml.path(),
a = res.attrs;
res.X = x - r;
res.Y = y - r;
res.W = res.H = r * 2;
res.type = "circle";
setFillAndStroke(res, {
cx: x,
cy: y,
r: r
});
return res;
};
R._engine.image = function (vml, src, x, y, w, h) {
var path = R._rectPath(x, y, w, h),
res = vml.path(path).attr({stroke: "none"}),
a = res.attrs,
node = res.node,
fill = node.getElementsByTagName(fillString)[0];
a.src = src;
res.X = a.x = x;
res.Y = a.y = y;
res.W = a.width = w;
res.H = a.height = h;
a.path = path;
res.type = "image";
fill.parentNode == node && node.removeChild(fill);
fill.rotate = true;
fill.src = src;
fill.type = "tile";
res._.fillpos = [x, y];
res._.fillsize = [w, h];
node.appendChild(fill);
setCoords(res, 1, 1, 0, 0, 0);
return res;
};
R._engine.text = function (vml, x, y, text) {
var el = createNode("shape"),
path = createNode("path"),
o = createNode("textpath");
x = x || 0;
y = y || 0;
text = text || "";
path.v = R.format("m{0},{1}l{2},{1}", round(x * zoom), round(y * zoom), round(x * zoom) + 1);
path.textpathok = true;
o.string = Str(text);
o.on = true;
el.style.cssText = cssDot;
el.coordsize = zoom + S + zoom;
el.coordorigin = "0 0";
var p = new Element(el, vml),
attr = {
fill: "#000",
stroke: "none",
font: R._availableAttrs.font,
text: text
};
p.shape = el;
p.path = path;
p.textpath = o;
p.type = "text";
p.attrs.text = Str(text);
p.attrs.x = x;
p.attrs.y = y;
p.attrs.w = 1;
p.attrs.h = 1;
setFillAndStroke(p, attr);
el.appendChild(o);
el.appendChild(path);
vml.canvas.appendChild(el);
var skew = createNode("skew");
skew.on = true;
el.appendChild(skew);
p.skew = skew;
p.transform(E);
return p;
};
R._engine.setSize = function (width, height) {
var cs = this.canvas.style;
this.width = width;
this.height = height;
width == +width && (width += "px");
height == +height && (height += "px");
cs.width = width;
cs.height = height;
cs.clip = "rect(0 " + width + " " + height + " 0)";
if (this._viewBox) {
R._engine.setViewBox.apply(this, this._viewBox);
}
return this;
};
R._engine.setViewBox = function (x, y, w, h, fit) {
R.eve("raphael.setViewBox", this, this._viewBox, [x, y, w, h, fit]);
var width = this.width,
height = this.height,
size = 1 / mmax(w / width, h / height),
H, W;
if (fit) {
H = height / h;
W = width / w;
if (w * H < width) {
x -= (width - w * H) / 2 / H;
}
if (h * W < height) {
y -= (height - h * W) / 2 / W;
}
}
this._viewBox = [x, y, w, h, !!fit];
this._viewBoxShift = {
dx: -x,
dy: -y,
scale: size
};
this.forEach(function (el) {
el.transform("...");
});
return this;
};
var createNode;
R._engine.initWin = function (win) {
var doc = win.document;
doc.createStyleSheet().addRule(".rvml", "behavior:url(#default#VML)");
try {
!doc.namespaces.rvml && doc.namespaces.add("rvml", "urn:schemas-microsoft-com:vml");
createNode = function (tagName) {
return doc.createElement('<rvml:' + tagName + ' class="rvml">');
};
} catch (e) {
createNode = function (tagName) {
return doc.createElement('<' + tagName + ' xmlns="urn:schemas-microsoft.com:vml" class="rvml">');
};
}
};
R._engine.initWin(R._g.win);
R._engine.create = function () {
var con = R._getContainer.apply(0, arguments),
container = con.container,
height = con.height,
s,
width = con.width,
x = con.x,
y = con.y;
if (!container) {
throw new Error("VML container not found.");
}
var res = new R._Paper,
c = res.canvas = R._g.doc.createElement("div"),
cs = c.style;
x = x || 0;
y = y || 0;
width = width || 512;
height = height || 342;
res.width = width;
res.height = height;
width == +width && (width += "px");
height == +height && (height += "px");
res.coordsize = zoom * 1e3 + S + zoom * 1e3;
res.coordorigin = "0 0";
res.span = R._g.doc.createElement("span");
res.span.style.cssText = "position:absolute;left:-9999em;top:-9999em;padding:0;margin:0;line-height:1;";
c.appendChild(res.span);
cs.cssText = R.format("top:0;left:0;width:{0};height:{1};display:inline-block;position:relative;clip:rect(0 {0} {1} 0);overflow:hidden", width, height);
if (container == 1) {
R._g.doc.body.appendChild(c);
cs.left = x + "px";
cs.top = y + "px";
cs.position = "absolute";
} else {
if (container.firstChild) {
container.insertBefore(c, container.firstChild);
} else {
container.appendChild(c);
}
}
res.renderfix = function () {};
return res;
};
R.prototype.clear = function () {
R.eve("raphael.clear", this);
this.canvas.innerHTML = E;
this.span = R._g.doc.createElement("span");
this.span.style.cssText = "position:absolute;left:-9999em;top:-9999em;padding:0;margin:0;line-height:1;display:inline;";
this.canvas.appendChild(this.span);
this.bottom = this.top = null;
};
R.prototype.remove = function () {
R.eve("raphael.remove", this);
this.canvas.parentNode.removeChild(this.canvas);
for (var i in this) {
this[i] = typeof this[i] == "function" ? R._removedFactory(i) : null;
}
return true;
};
var setproto = R.st;
for (var method in elproto) if (elproto[has](method) && !setproto[has](method)) {
setproto[method] = (function (methodname) {
return function () {
var arg = arguments;
return this.forEach(function (el) {
el[methodname].apply(el, arg);
});
};
})(method);
}
})();
// EXPOSE
// SVG and VML are appended just before the EXPOSE line
// Even with AMD, Raphael should be defined globally
oldRaphael.was ? (g.win.Raphael = R) : (Raphael = R);
return R;
}));<|fim▁end|> | - el (object) element to sync with
- anim (object) animation to sync with
- params (object) #optional final attributes for the element, see also @Element.attr |
<|file_name|>CustomPrinter.java<|end_file_name|><|fim▁begin|>package org.andidev.applicationname.format.custom;
<|fim▁hole|>import org.apache.commons.lang3.StringUtils;
import org.springframework.expression.EvaluationContext;
import org.springframework.expression.ExpressionParser;
import org.springframework.expression.spel.SpelParseException;
import org.springframework.expression.spel.standard.SpelExpressionParser;
import org.springframework.format.Printer;
public class CustomPrinter implements Printer<Object> {
private final String spelExpression;
private final EvaluationContext evaluationContext;
public CustomPrinter(String spelExpression, EvaluationContext evaluationContext) {
this.spelExpression = StringUtils.defaultIfBlank(spelExpression, null);
this.evaluationContext = evaluationContext;
}
@Override
public String print(Object object, Locale locale) {
if (spelExpression == null) {
return null;
}
ExpressionParser parser = new SpelExpressionParser();
try {
Object result = parser.parseExpression(spelExpression).getValue(evaluationContext, object);
return result.toString();
} catch (SpelParseException e) {
throw new CustomFormatException("Could not parse spel expression = \"" + spelExpression + "\" in " + CustomFormat.class.getSimpleName() + " annotation: " + e.getMessage());
}
}
}<|fim▁end|> | import java.util.Locale;
import org.andidev.applicationname.format.annotation.CustomFormat; |
<|file_name|>admin.py<|end_file_name|><|fim▁begin|>from django.contrib import admin
from bananas.apps.appointment.forms import AppointmentForm
from bananas.apps.appointment.models import Appointment
from bananas.apps.appointment.models import AppointmentType
@admin.register(Appointment)
class AppointmentAdmin(admin.ModelAdmin):
list_display = (
'time',
'client_name',
'client_phone',
'client_email',
'appointment_type_name',
'counselor_name'
)
search_fields = (
'time',
'client_first_name',
'client_last_name',
'client_email',
'client_phone',
'appointment_type__name',
'counselor__first_name',
'counselor__last_name',
'counselor__email',
'counselor__phone',
)<|fim▁hole|>
def get_queryset(self, request):
queryset = super(AppointmentAdmin, self).get_queryset(request)
return queryset.filter(deleted=False)
def client_name(self, obj):
return "{} {}".format(
obj.client_first_name, obj.client_last_name)
def counselor_name(self, obj):
return "{} {}".format(
obj.counselor.first_name, obj.counselor.last_name)
def appointment_type_name(self, obj):
return obj.appointment_type.name
client_name.short_description = 'Client'
counselor_name.short_description = 'Counselor'
appointment_type_name.short_description = 'Appointment type'
@admin.register(AppointmentType)
class AppointmentTypeAdmin(admin.ModelAdmin):
list_display = (
'name',
'appointment_count',
'message_template_count'
)
search_fields = (
'name',
)
def appointment_count(self, obj):
return obj.appointments.all().count()
def message_template_count(self, obj):
return obj.message_templates.all().count()<|fim▁end|> | list_filter = ('time', )
form = AppointmentForm |
<|file_name|>ServerPublicKey.java<|end_file_name|><|fim▁begin|>package com.sochat.client;
import java.math.BigInteger;
import java.security.GeneralSecurityException;
import java.security.KeyFactory;
import java.security.PublicKey;
import java.security.spec.RSAPublicKeySpec;
public class ServerPublicKey {
public static PublicKey getServerPublicKey(String publicKeyModulus, String publicKeyExponent)
throws GeneralSecurityException {
BigInteger modulus = new BigInteger(publicKeyModulus, 16);
BigInteger exponent = new BigInteger(publicKeyExponent, 16);
RSAPublicKeySpec pubKeySpec = new RSAPublicKeySpec(modulus, exponent);
KeyFactory keyFactory = KeyFactory.getInstance("RSA");<|fim▁hole|> return keyFactory.generatePublic(pubKeySpec);
}
}<|fim▁end|> | |
<|file_name|>issue_reporting.py<|end_file_name|><|fim▁begin|># Copyright (c) 2017 Charles University, Faculty of Arts,
# Institute of the Czech National Corpus
# Copyright (c) 2017 Tomas Machalek <[email protected]>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; version 2
# dated June, 1991.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
<|fim▁hole|>
class IssueReportingAction(object):
def to_dict(self):
return self.__dict__
class DynamicReportingAction(IssueReportingAction):
def __init__(self):
self.type = 'dynamic'
class StaticReportingAction(IssueReportingAction):
def __init__(self, url, args, label, blank_window):
self.url = url
self.args = args
self.label = label
self.blank_window = blank_window
self.type = 'static'
class AbstractIssueReporting(object):
def export_report_action(self, plugin_api):
raise NotImplementedError()
def submit(self, plugin_api, args):
raise NotImplementedError()<|fim▁end|> | |
<|file_name|>sequence_base_listener.go<|end_file_name|><|fim▁begin|>// File generated by ANTLR. DO NOT EDIT.
package sequence // Sequence
import "github.com/antlr/antlr4/runtime/Go/antlr"
// BaseSequenceListener is a complete listener for a parse tree produced by SequenceParser.
type BaseSequenceListener struct{}
var _ SequenceListener = &BaseSequenceListener{}
// VisitTerminal is called when a terminal node is visited.
func (s *BaseSequenceListener) VisitTerminal(node antlr.TerminalNode) {}
// VisitErrorNode is called when an error node is visited.
func (s *BaseSequenceListener) VisitErrorNode(node antlr.ErrorNode) {}
// EnterEveryRule is called when any rule is entered.
func (s *BaseSequenceListener) EnterEveryRule(ctx antlr.ParserRuleContext) {}
// ExitEveryRule is called when any rule is exited.
func (s *BaseSequenceListener) ExitEveryRule(ctx antlr.ParserRuleContext) {}
// EnterStart is called when production start is entered.
func (s *BaseSequenceListener) EnterStart(ctx *StartContext) {}
// ExitStart is called when production start is exited.
func (s *BaseSequenceListener) ExitStart(ctx *StartContext) {}
// EnterOr is called when production Or is entered.
func (s *BaseSequenceListener) EnterOr(ctx *OrContext) {}
// ExitOr is called when production Or is exited.
func (s *BaseSequenceListener) ExitOr(ctx *OrContext) {}
// EnterConcatenation is called when production Concatenation is entered.
func (s *BaseSequenceListener) EnterConcatenation(ctx *ConcatenationContext) {}
// ExitConcatenation is called when production Concatenation is exited.
func (s *BaseSequenceListener) ExitConcatenation(ctx *ConcatenationContext) {}
// EnterQuestionMark is called when production QuestionMark is entered.
func (s *BaseSequenceListener) EnterQuestionMark(ctx *QuestionMarkContext) {}
// ExitQuestionMark is called when production QuestionMark is exited.
func (s *BaseSequenceListener) ExitQuestionMark(ctx *QuestionMarkContext) {}
// EnterHop is called when production Hop is entered.
func (s *BaseSequenceListener) EnterHop(ctx *HopContext) {}
// ExitHop is called when production Hop is exited.
func (s *BaseSequenceListener) ExitHop(ctx *HopContext) {}
// EnterPlus is called when production Plus is entered.
func (s *BaseSequenceListener) EnterPlus(ctx *PlusContext) {}
// ExitPlus is called when production Plus is exited.
func (s *BaseSequenceListener) ExitPlus(ctx *PlusContext) {}
// EnterAsterisk is called when production Asterisk is entered.
func (s *BaseSequenceListener) EnterAsterisk(ctx *AsteriskContext) {}
// ExitAsterisk is called when production Asterisk is exited.
func (s *BaseSequenceListener) ExitAsterisk(ctx *AsteriskContext) {}
// EnterParentheses is called when production Parentheses is entered.
func (s *BaseSequenceListener) EnterParentheses(ctx *ParenthesesContext) {}
// ExitParentheses is called when production Parentheses is exited.
func (s *BaseSequenceListener) ExitParentheses(ctx *ParenthesesContext) {}
// EnterISDHop is called when production ISDHop is entered.
func (s *BaseSequenceListener) EnterISDHop(ctx *ISDHopContext) {}
// ExitISDHop is called when production ISDHop is exited.
func (s *BaseSequenceListener) ExitISDHop(ctx *ISDHopContext) {}
// EnterISDASHop is called when production ISDASHop is entered.
func (s *BaseSequenceListener) EnterISDASHop(ctx *ISDASHopContext) {}
// ExitISDASHop is called when production ISDASHop is exited.
func (s *BaseSequenceListener) ExitISDASHop(ctx *ISDASHopContext) {}
// EnterISDASIFHop is called when production ISDASIFHop is entered.
func (s *BaseSequenceListener) EnterISDASIFHop(ctx *ISDASIFHopContext) {}
// ExitISDASIFHop is called when production ISDASIFHop is exited.
func (s *BaseSequenceListener) ExitISDASIFHop(ctx *ISDASIFHopContext) {}
// EnterISDASIFIFHop is called when production ISDASIFIFHop is entered.
func (s *BaseSequenceListener) EnterISDASIFIFHop(ctx *ISDASIFIFHopContext) {}
// ExitISDASIFIFHop is called when production ISDASIFIFHop is exited.
func (s *BaseSequenceListener) ExitISDASIFIFHop(ctx *ISDASIFIFHopContext) {}
// EnterWildcardISD is called when production WildcardISD is entered.
func (s *BaseSequenceListener) EnterWildcardISD(ctx *WildcardISDContext) {}
// ExitWildcardISD is called when production WildcardISD is exited.
func (s *BaseSequenceListener) ExitWildcardISD(ctx *WildcardISDContext) {}
// EnterISD is called when production ISD is entered.
func (s *BaseSequenceListener) EnterISD(ctx *ISDContext) {}
// ExitISD is called when production ISD is exited.
func (s *BaseSequenceListener) ExitISD(ctx *ISDContext) {}
// EnterWildcardAS is called when production WildcardAS is entered.
func (s *BaseSequenceListener) EnterWildcardAS(ctx *WildcardASContext) {}
// ExitWildcardAS is called when production WildcardAS is exited.
func (s *BaseSequenceListener) ExitWildcardAS(ctx *WildcardASContext) {}
// EnterLegacyAS is called when production LegacyAS is entered.
func (s *BaseSequenceListener) EnterLegacyAS(ctx *LegacyASContext) {}
// ExitLegacyAS is called when production LegacyAS is exited.
func (s *BaseSequenceListener) ExitLegacyAS(ctx *LegacyASContext) {}
// EnterAS is called when production AS is entered.
func (s *BaseSequenceListener) EnterAS(ctx *ASContext) {}
// ExitAS is called when production AS is exited.
func (s *BaseSequenceListener) ExitAS(ctx *ASContext) {}
<|fim▁hole|>// EnterWildcardIFace is called when production WildcardIFace is entered.
func (s *BaseSequenceListener) EnterWildcardIFace(ctx *WildcardIFaceContext) {}
// ExitWildcardIFace is called when production WildcardIFace is exited.
func (s *BaseSequenceListener) ExitWildcardIFace(ctx *WildcardIFaceContext) {}
// EnterIFace is called when production IFace is entered.
func (s *BaseSequenceListener) EnterIFace(ctx *IFaceContext) {}
// ExitIFace is called when production IFace is exited.
func (s *BaseSequenceListener) ExitIFace(ctx *IFaceContext) {}<|fim▁end|> | |
<|file_name|>DataRdr_.java<|end_file_name|><|fim▁begin|>package gplx.core.stores; import gplx.*; import gplx.core.*;
import gplx.core.strings.*;
public class DataRdr_ {
public static final DataRdr Null = new DataRdr_null();
public static DataRdr as_(Object obj) {return obj instanceof DataRdr ? (DataRdr)obj : null;}
public static DataRdr cast(Object obj) {try {return (DataRdr)obj;} catch(Exception exc) {throw Err_.new_type_mismatch_w_exc(exc, DataRdr.class, obj);}}
public static Object Read_1st_row_and_1st_fld(DataRdr rdr) {
try {return rdr.MoveNextPeer() ? rdr.ReadAt(0) : null;}
finally {rdr.Rls();}
}
}
class DataRdr_null implements DataRdr {
public String NameOfNode() {return To_str();} public String To_str() {return "<< NULL READER >>";}
public boolean Type_rdr() {return true;}
public Hash_adp EnvVars() {return Hash_adp_.Noop;}
public Io_url Uri() {return Io_url_.Empty;} public void Uri_set(Io_url s) {}
public boolean Parse() {return parse;} public void Parse_set(boolean v) {parse = v;} private boolean parse;
public int FieldCount() {return 0;}
public String KeyAt(int i) {return To_str();}
public Object ReadAt(int i) {return null;}
public Keyval KeyValAt(int i) {return Keyval_.new_(this.KeyAt(i), this.ReadAt(i));}
public Object Read(String name) {return null;}
public String ReadStr(String key) {return String_.Empty;} public String ReadStrOr(String key, String or) {return or;}
public byte[] ReadBryByStr(String key) {return Bry_.Empty;} public byte[] ReadBryByStrOr(String key, byte[] or) {return or;}
public byte[] ReadBry(String key) {return Bry_.Empty;} public byte[] ReadBryOr(String key, byte[] or) {return or;}
public char ReadChar(String key) {return Char_.Null;} public char ReadCharOr(String key, char or) {return or;}
public int ReadInt(String key) {return Int_.Min_value;} public int ReadIntOr(String key, int or) {return or;}
public boolean ReadBool(String key) {return false;} public boolean ReadBoolOr(String key, boolean or) {return or;}
public long ReadLong(String key) {return Long_.Min_value;} public long ReadLongOr(String key, long or) {return or;}
public double ReadDouble(String key) {return Double_.NaN;} public double ReadDoubleOr(String key, double or) {return or;}
public float ReadFloat(String key) {return Float_.NaN;} public float ReadFloatOr(String key, float or) {return or;}
public byte ReadByte(String key) {return Byte_.Min_value;} public byte ReadByteOr(String key, byte or) {return or;}
public Decimal_adp ReadDecimal(String key) {return Decimal_adp_.Zero;}public Decimal_adp ReadDecimalOr(String key, Decimal_adp or) {return or;}
<|fim▁hole|> public DateAdp ReadDate(String key) {return DateAdp_.MinValue;} public DateAdp ReadDateOr(String key, DateAdp or) {return or;}
public gplx.core.ios.streams.Io_stream_rdr ReadRdr(String key) {return gplx.core.ios.streams.Io_stream_rdr_.Noop;}
public boolean MoveNextPeer() {return false;}
public DataRdr Subs() {return this;}
public DataRdr Subs_byName(String name) {return this;}
public DataRdr Subs_byName_moveFirst(String name) {return this;}
public Object StoreRoot(SrlObj root, String key) {return null;}
public boolean SrlBoolOr(String key, boolean v) {return v;}
public byte SrlByteOr(String key, byte v) {return v;}
public int SrlIntOr(String key, int or) {return or;}
public long SrlLongOr(String key, long or) {return or;}
public String SrlStrOr(String key, String or) {return or;}
public DateAdp SrlDateOr(String key, DateAdp or) {return or;}
public Decimal_adp SrlDecimalOr(String key, Decimal_adp or) {return or;}
public double SrlDoubleOr(String key, double or) {return or;}
public Object SrlObjOr(String key, Object or) {return or;}
public void SrlList(String key, List_adp list, SrlObj proto, String itmKey) {}
public void TypeKey_(String v) {}
public void XtoStr_gfml(String_bldr sb) {sb.Add_str_w_crlf("NULL:;");}
public SrlMgr SrlMgr_new(Object o) {return this;}
public void Rls() {}
}<|fim▁end|> | |
<|file_name|>wordprocessor.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
import additional
import animal
import configuration
import dbfs
import financial
import html
import log
import medical
import person
import users
import utils
from i18n import _, format_currency_no_symbol, now, python2display, yes_no
from sitedefs import BASE_URL
def org_tags(dbo, username):
"""
Generates a list of tags from the organisation and user info
"""
u = users.get_users(dbo, username)
realname = ""
email = ""
if len(u) > 0:
u = u[0]
realname = u["REALNAME"]
email = u["EMAILADDRESS"]
tags = {
"ORGANISATION" : configuration.organisation(dbo),
"ORGANISATIONADDRESS" : configuration.organisation_address(dbo),
"ORGANISATIONTELEPHONE" : configuration.organisation_telephone(dbo),
"DATE" : python2display(dbo.locale, now(dbo.timezone)),
"USERNAME" : username,
"USERREALNAME" : realname,
"USEREMAILADDRESS" : email
}
return tags
def additional_yesno(l, af):
"""
Returns the yes/no value for an additional field. If it has a LOOKUPVALUES
set, we use the value the user set.
"""
if af["LOOKUPVALUES"] is not None and af["LOOKUPVALUES"] != "":
values = af["LOOKUPVALUES"].split("|")
for v in values:
if af["VALUE"] is None:
if v.strip().startswith("0"):
return v[v.find("=")+1:]
else:
if v.strip().startswith(af["VALUE"]):
return v[v.find("=")+1:]
else:
return yes_no(l, af["VALUE"])
def animal_tags(dbo, a):
"""
Generates a list of tags from an animal result (the deep type from
calling animal.get_animal)
"""
l = dbo.locale<|fim▁hole|> displaydob = a["AGEGROUP"]
displayage = a["AGEGROUP"]
estimate = _("estimate", l)
tags = {
"ANIMALNAME" : a["ANIMALNAME"],
"ANIMALTYPENAME" : a["ANIMALTYPENAME"],
"BASECOLOURNAME" : a["BASECOLOURNAME"],
"BASECOLORNAME" : a["BASECOLOURNAME"],
"BREEDNAME" : a["BREEDNAME"],
"INTERNALLOCATION" : a["SHELTERLOCATIONNAME"],
"LOCATIONUNIT" : a["SHELTERLOCATIONUNIT"],
"COATTYPE" : a["COATTYPENAME"],
"HEALTHPROBLEMS" : a["HEALTHPROBLEMS"],
"ANIMALCREATEDBY" : a["CREATEDBY"],
"ANIMALCREATEDDATE" : python2display(l, a["CREATEDDATE"]),
"DATEBROUGHTIN" : python2display(l, a["DATEBROUGHTIN"]),
"DATEOFBIRTH" : python2display(l, a["DATEOFBIRTH"]),
"AGEGROUP" : a["AGEGROUP"],
"DISPLAYDOB" : displaydob,
"DISPLAYAGE" : displayage,
"ESTIMATEDDOB" : estimate,
"ANIMALID" : str(a["ID"]),
"IDENTICHIPNUMBER" : a["IDENTICHIPNUMBER"],
"IDENTICHIPPED" : a["IDENTICHIPPEDNAME"],
"IDENTICHIPPEDDATE" : python2display(l, a["IDENTICHIPDATE"]),
"MICROCHIPNUMBER" : a["IDENTICHIPNUMBER"],
"MICROCHIPPED" : a["IDENTICHIPPEDNAME"],
"MICROCHIPDATE" : python2display(l, a["IDENTICHIPDATE"]),
"TATTOO" : a["TATTOONAME"],
"TATTOODATE" : python2display(l, a["TATTOODATE"]),
"TATTOONUMBER" : a["TATTOONUMBER"],
"COMBITESTED" : a["COMBITESTEDNAME"],
"FIVLTESTED" : a["COMBITESTEDNAME"],
"COMBITESTDATE" : python2display(l, a["COMBITESTDATE"]),
"FIVLTESTDATE" : python2display(l, a["COMBITESTDATE"]),
"COMBITESTRESULT" : a["COMBITESTRESULTNAME"],
"FIVTESTRESULT" : a["COMBITESTRESULTNAME"],
"FIVRESULT" : a["COMBITESTRESULTNAME"],
"FLVTESTRESULT" : a["FLVRESULTNAME"],
"FLVRESULT" : a["FLVRESULTNAME"],
"HEARTWORMTESTED" : a["HEARTWORMTESTEDNAME"],
"HEARTWORMTESTDATE" : python2display(l, a["HEARTWORMTESTDATE"]),
"HEARTWORMTESTRESULT" : a["HEARTWORMTESTRESULTNAME"],
"HIDDENANIMALDETAILS" : a["HIDDENANIMALDETAILS"],
"ANIMALLASTCHANGEDBY" : a["LASTCHANGEDBY"],
"ANIMALLASTCHANGEDDATE" : python2display(l, a["LASTCHANGEDDATE"]),
"MARKINGS" : a["MARKINGS"],
"DECLAWED" : a["DECLAWEDNAME"],
"RABIESTAG" : a["RABIESTAG"],
"GOODWITHCATS" : a["ISGOODWITHCATSNAME"],
"GOODWITHDOGS" : a["ISGOODWITHDOGSNAME"],
"GOODWITHCHILDREN" : a["ISGOODWITHCHILDRENNAME"],
"HOUSETRAINED" : a["ISHOUSETRAINEDNAME"],
"NAMEOFPERSONBROUGHTANIMALIN" : a["BROUGHTINBYOWNERNAME"],
"ADDRESSOFPERSONBROUGHTANIMALIN" : a["BROUGHTINBYOWNERADDRESS"],
"TOWNOFPERSONBROUGHTANIMALIN" : a["BROUGHTINBYOWNERTOWN"],
"COUNTYOFPERSONBROUGHTANIMALIN": a["BROUGHTINBYOWNERCOUNTY"],
"POSTCODEOFPERSONBROUGHTIN": a["BROUGHTINBYOWNERPOSTCODE"],
"CITYOFPERSONBROUGHTANIMALIN" : a["BROUGHTINBYOWNERTOWN"],
"STATEOFPERSONBROUGHTANIMALIN": a["BROUGHTINBYOWNERCOUNTY"],
"ZIPCODEOFPERSONBROUGHTIN": a["BROUGHTINBYOWNERPOSTCODE"],
"BROUGHTINBYNAME" : a["BROUGHTINBYOWNERNAME"],
"BROUGHTINBYADDRESS" : a["BROUGHTINBYOWNERADDRESS"],
"BROUGHTINBYTOWN" : a["BROUGHTINBYOWNERTOWN"],
"BROUGHTINBYCOUNTY" : a["BROUGHTINBYOWNERCOUNTY"],
"BROUGHTINBYPOSTCODE" : a["BROUGHTINBYOWNERPOSTCODE"],
"BROUGHTINBYCITY" : a["BROUGHTINBYOWNERTOWN"],
"BROUGHTINBYSTATE" : a["BROUGHTINBYOWNERCOUNTY"],
"BROUGHTINBYZIPCODE" : a["BROUGHTINBYOWNERPOSTCODE"],
"BROUGHTINBYHOMEPHONE" : a["BROUGHTINBYHOMETELEPHONE"],
"BROUGHTINBYPHONE" : a["BROUGHTINBYHOMETELEPHONE"],
"BROUGHTINBYWORKPHONE" : a["BROUGHTINBYWORKTELEPHONE"],
"BROUGHTINBYMOBILEPHONE" : a["BROUGHTINBYMOBILETELEPHONE"],
"BROUGHTINBYCELLPHONE" : a["BROUGHTINBYMOBILETELEPHONE"],
"BROUGHTINBYEMAIL" : a["BROUGHTINBYEMAILADDRESS"],
"NAMEOFOWNERSVET" : a["OWNERSVETNAME"],
"NAMEOFCURRENTVET" : a["CURRENTVETNAME"],
"HASSPECIALNEEDS" : a["HASSPECIALNEEDSNAME"],
"NEUTERED" : a["NEUTEREDNAME"],
"FIXED" : a["NEUTEREDNAME"],
"ALTERED" : a["NEUTEREDNAME"],
"NEUTEREDDATE" : python2display(l, a["NEUTEREDDATE"]),
"FIXEDDATE" : python2display(l, a["NEUTEREDDATE"]),
"ALTEREDDATE" : python2display(l, a["NEUTEREDDATE"]),
"ORIGINALOWNERNAME" : a["ORIGINALOWNERNAME"],
"ORIGINALOWNERADDRESS" : a["ORIGINALOWNERADDRESS"],
"ORIGINALOWNERTOWN" : a["ORIGINALOWNERTOWN"],
"ORIGINALOWNERCOUNTY" : a["ORIGINALOWNERCOUNTY"],
"ORIGINALOWNERPOSTCODE" : a["ORIGINALOWNERPOSTCODE"],
"ORIGINALOWNERCITY" : a["ORIGINALOWNERTOWN"],
"ORIGINALOWNERSTATE" : a["ORIGINALOWNERCOUNTY"],
"ORIGINALOWNERZIPCODE" : a["ORIGINALOWNERPOSTCODE"],
"ORIGINALOWNERHOMEPHONE" : a["ORIGINALOWNERHOMETELEPHONE"],
"ORIGINALOWNERPHONE" : a["ORIGINALOWNERHOMETELEPHONE"],
"ORIGINALOWNERWORKPHONE" : a["ORIGINALOWNERWORKTELEPHONE"],
"ORIGINALOWNERMOBILEPHONE" : a["ORIGINALOWNERMOBILETELEPHONE"],
"ORIGINALOWNERCELLPHONE" : a["ORIGINALOWNERMOBILETELEPHONE"],
"ORIGINALOWNEREMAIL" : a["ORIGINALOWNEREMAILADDRESS"],
"CURRENTOWNERNAME" : a["CURRENTOWNERNAME"],
"CURRENTOWNERADDRESS" : a["CURRENTOWNERADDRESS"],
"CURRENTOWNERTOWN" : a["CURRENTOWNERTOWN"],
"CURRENTOWNERCOUNTY" : a["CURRENTOWNERCOUNTY"],
"CURRENTOWNERPOSTCODE" : a["CURRENTOWNERPOSTCODE"],
"CURRENTOWNERCITY" : a["CURRENTOWNERTOWN"],
"CURRENTOWNERSTATE" : a["CURRENTOWNERCOUNTY"],
"CURRENTOWNERZIPCODE" : a["CURRENTOWNERPOSTCODE"],
"CURRENTOWNERHOMEPHONE" : a["CURRENTOWNERHOMETELEPHONE"],
"CURRENTOWNERPHONE" : a["CURRENTOWNERHOMETELEPHONE"],
"CURRENTOWNERWORKPHONE" : a["CURRENTOWNERWORKTELEPHONE"],
"CURRENTOWNERMOBILEPHONE" : a["CURRENTOWNERMOBILETELEPHONE"],
"CURRENTOWNERCELLPHONE" : a["CURRENTOWNERMOBILETELEPHONE"],
"CURRENTOWNEREMAIL" : a["CURRENTOWNEREMAILADDRESS"],
"CURRENTVETNAME" : a["CURRENTVETNAME"],
"CURRENTVETADDRESS" : a["CURRENTVETADDRESS"],
"CURRENTVETTOWN" : a["CURRENTVETTOWN"],
"CURRENTVETCOUNTY" : a["CURRENTVETCOUNTY"],
"CURRENTVETPOSTCODE" : a["CURRENTVETPOSTCODE"],
"CURRENTVETCITY" : a["CURRENTVETTOWN"],
"CURRENTVETSTATE" : a["CURRENTVETCOUNTY"],
"CURRENTVETZIPCODE" : a["CURRENTVETPOSTCODE"],
"CURRENTVETPHONE" : a["CURRENTVETWORKTELEPHONE"],
"OWNERSVETNAME" : a["OWNERSVETNAME"],
"OWNERSVETADDRESS" : a["OWNERSVETADDRESS"],
"OWNERSVETTOWN" : a["OWNERSVETTOWN"],
"OWNERSVETCOUNTY" : a["OWNERSVETCOUNTY"],
"OWNERSVETPOSTCODE" : a["OWNERSVETPOSTCODE"],
"OWNERSVETCITY" : a["OWNERSVETTOWN"],
"OWNERSVETSTATE" : a["OWNERSVETCOUNTY"],
"OWNERSVETZIPCODE" : a["OWNERSVETPOSTCODE"],
"OWNERSVETPHONE" : a["OWNERSVETWORKTELEPHONE"],
"RESERVEDOWNERNAME" : a["RESERVEDOWNERNAME"],
"RESERVEDOWNERADDRESS" : a["RESERVEDOWNERADDRESS"],
"RESERVEDOWNERTOWN" : a["RESERVEDOWNERTOWN"],
"RESERVEDOWNERCOUNTY" : a["RESERVEDOWNERCOUNTY"],
"RESERVEDOWNERPOSTCODE" : a["RESERVEDOWNERPOSTCODE"],
"RESERVEDOWNERCITY" : a["RESERVEDOWNERTOWN"],
"RESERVEDOWNERSTATE" : a["RESERVEDOWNERCOUNTY"],
"RESERVEDOWNERZIPCODE" : a["RESERVEDOWNERPOSTCODE"],
"RESERVEDOWNERHOMEPHONE" : a["RESERVEDOWNERHOMETELEPHONE"],
"RESERVEDOWNERPHONE" : a["RESERVEDOWNERHOMETELEPHONE"],
"RESERVEDOWNERWORKPHONE" : a["RESERVEDOWNERWORKTELEPHONE"],
"RESERVEDOWNERMOBILEPHONE" : a["RESERVEDOWNERMOBILETELEPHONE"],
"RESERVEDOWNERCELLPHONE" : a["RESERVEDOWNERMOBILETELEPHONE"],
"RESERVEDOWNEREMAIL" : a["RESERVEDOWNEREMAILADDRESS"],
"ENTRYCATEGORY" : a["ENTRYREASONNAME"],
"REASONFORENTRY" : a["REASONFORENTRY"],
"REASONNOTBROUGHTBYOWNER" : a["REASONNO"],
"SEX" : a["SEXNAME"],
"SIZE" : a["SIZENAME"],
"SPECIESNAME" : a["SPECIESNAME"],
"ANIMALCOMMENTS" : a["ANIMALCOMMENTS"],
"SHELTERCODE" : a["SHELTERCODE"],
"AGE" : a["ANIMALAGE"],
"ACCEPTANCENUMBER" : a["ACCEPTANCENUMBER"],
"LITTERID" : a["ACCEPTANCENUMBER"],
"DECEASEDDATE" : python2display(l, a["DECEASEDDATE"]),
"DECEASEDNOTES" : a["PTSREASON"],
"DECEASEDCATEGORY" : a["PTSREASONNAME"],
"SHORTSHELTERCODE" : a["SHORTCODE"],
"MOSTRECENTENTRY" : python2display(l, a["MOSTRECENTENTRYDATE"]),
"TIMEONSHELTER" : a["TIMEONSHELTER"],
"WEBMEDIAFILENAME" : a["WEBSITEMEDIANAME"],
"WEBSITEMEDIANAME" : a["WEBSITEMEDIANAME"],
"WEBSITEVIDEOURL" : a["WEBSITEVIDEOURL"],
"WEBSITEVIDEONOTES" : a["WEBSITEVIDEONOTES"],
"WEBMEDIANOTES" : a["WEBSITEMEDIANOTES"],
"WEBSITEMEDIANOTES" : a["WEBSITEMEDIANOTES"],
"DOCUMENTIMGLINK" : "<img height=\"200\" src=\"" + html.img_src(a, "animal") + "\" >",
"DOCUMENTIMGTHUMBLINK" : "<img src=\"" + html.thumbnail_img_src(a, "animalthumb") + "\" />",
"DOCUMENTQRLINK" : "<img src=\"http://chart.apis.google.com/chart?cht=qr&chl=%s&chs=150x150\" />" % (BASE_URL + "/animal?id=%s" % a["ID"]),
"ANIMALONSHELTER" : yes_no(l, a["ARCHIVED"] == 0),
"ANIMALISRESERVED" : yes_no(l, a["HASACTIVERESERVE"] == 1),
"ADOPTIONID" : a["ACTIVEMOVEMENTADOPTIONNUMBER"],
"ADOPTIONNUMBER" : a["ACTIVEMOVEMENTADOPTIONNUMBER"],
"INSURANCENUMBER" : a["ACTIVEMOVEMENTINSURANCENUMBER"],
"RESERVATIONDATE" : python2display(l, a["ACTIVEMOVEMENTRESERVATIONDATE"]),
"RETURNDATE" : python2display(l, a["ACTIVEMOVEMENTRETURNDATE"]),
"ADOPTIONDATE" : python2display(l, a["ACTIVEMOVEMENTDATE"]),
"FOSTEREDDATE" : python2display(l, a["ACTIVEMOVEMENTDATE"]),
"TRANSFERDATE" : python2display(l, a["ACTIVEMOVEMENTDATE"]),
"MOVEMENTDATE" : python2display(l, a["ACTIVEMOVEMENTDATE"]),
"MOVEMENTTYPE" : a["ACTIVEMOVEMENTTYPENAME"],
"ADOPTIONDONATION" : format_currency_no_symbol(l, a["ACTIVEMOVEMENTDONATION"]),
"ADOPTIONCREATEDBY" : a["ACTIVEMOVEMENTCREATEDBY"],
"ADOPTIONCREATEDBYNAME" : a["ACTIVEMOVEMENTCREATEDBYNAME"],
"ADOPTIONCREATEDDATE" : python2display(l, a["ACTIVEMOVEMENTCREATEDDATE"]),
"ADOPTIONLASTCHANGEDBY" : a["ACTIVEMOVEMENTLASTCHANGEDBY"],
"ADOPTIONLASTCHANGEDDATE" : python2display(l, a["ACTIVEMOVEMENTLASTCHANGEDDATE"])
}
# Set original owner to be current owner on non-shelter animals
if a["NONSHELTERANIMAL"] == 1 and a["ORIGINALOWNERNAME"] is not None and a["ORIGINALOWNERNAME"] != "":
tags["CURRENTOWNERNAME"] = a["ORIGINALOWNERNAME"]
tags["CURRENTOWNERADDRESS"] = a["ORIGINALOWNERADDRESS"]
tags["CURRENTOWNERTOWN"] = a["ORIGINALOWNERTOWN"]
tags["CURRENTOWNERCOUNTY"] = a["ORIGINALOWNERCOUNTY"]
tags["CURRENTOWNERPOSTCODE"] = a["ORIGINALOWNERPOSTCODE"]
tags["CURRENTOWNERCITY"] = a["ORIGINALOWNERTOWN"]
tags["CURRENTOWNERSTATE"] = a["ORIGINALOWNERCOUNTY"]
tags["CURRENTOWNERZIPCODE"] = a["ORIGINALOWNERPOSTCODE"]
tags["CURRENTOWNERHOMEPHONE"] = a["ORIGINALOWNERHOMETELEPHONE"]
tags["CURRENTOWNERPHONE"] = a["ORIGINALOWNERHOMETELEPHONE"]
tags["CURRENTOWNERWORKPHONE"] = a["ORIGINALOWNERWORKTELEPHONE"]
tags["CURRENTOWNERMOBILEPHONE"] = a["ORIGINALOWNERMOBILETELEPHONE"]
tags["CURRENTOWNERCELLPHONE"] = a["ORIGINALOWNERMOBILETELEPHONE"]
tags["CURRENTOWNEREMAIL"] = a["ORIGINALOWNEREMAILADDRESS"]
# If the animal doesn't have a current owner, but does have an open
# movement with a future date on it, look up the owner and use that
# instead so that we can still generate paperwork for future adoptions.
if a["CURRENTOWNERID"] is None or a["CURRENTOWNERID"] == 0:
latest = animal.get_latest_movement(dbo, a["ID"])
if latest is not None:
p = person.get_person(dbo, latest["OWNERID"])
if p is not None:
tags["CURRENTOWNERNAME"] = p["OWNERNAME"]
tags["CURRENTOWNERADDRESS"] = p["OWNERADDRESS"]
tags["CURRENTOWNERTOWN"] = p["OWNERTOWN"]
tags["CURRENTOWNERCOUNTY"] = p["OWNERCOUNTY"]
tags["CURRENTOWNERPOSTCODE"] = p["OWNERPOSTCODE"]
tags["CURRENTOWNERCITY"] = p["OWNERTOWN"]
tags["CURRENTOWNERSTATE"] = p["OWNERCOUNTY"]
tags["CURRENTOWNERZIPCODE"] = p["OWNERPOSTCODE"]
tags["CURRENTOWNERHOMEPHONE"] = p["HOMETELEPHONE"]
tags["CURRENTOWNERPHONE"] = p["HOMETELEPHONE"]
tags["CURRENTOWNERWORKPHONE"] = p["WORKTELEPHONE"]
tags["CURRENTOWNERMOBILEPHONE"] = p["MOBILETELEPHONE"]
tags["CURRENTOWNERCELLPHONE"] = p["MOBILETELEPHONE"]
tags["CURRENTOWNEREMAIL"] = p["EMAILADDRESS"]
# Additional fields
add = additional.get_additional_fields(dbo, int(a["ID"]), "animal")
for af in add:
val = af["VALUE"]
if af["FIELDTYPE"] == additional.YESNO:
val = additional_yesno(l, af)
if af["FIELDTYPE"] == additional.MONEY:
val = format_currency_no_symbol(l, af["VALUE"])
tags[af["FIELDNAME"].upper()] = val
include_incomplete = configuration.include_incomplete_medical_doc(dbo)
# Vaccinations
vaccasc = medical.get_vaccinations(dbo, int(a["ID"]), not include_incomplete)
vaccdesc = medical.get_vaccinations(dbo, int(a["ID"]), not include_incomplete, medical.DESCENDING_REQUIRED)
for idx in range(1, 101):
tags["VACCINATIONNAME" + str(idx)] = ""
tags["VACCINATIONREQUIRED" + str(idx)] = ""
tags["VACCINATIONGIVEN" + str(idx)] = ""
tags["VACCINATIONCOMMENTS" + str(idx)] = ""
tags["VACCINATIONDESCRIPTION" + str(idx)] = ""
tags["VACCINATIONNAMELAST" + str(idx)] = ""
tags["VACCINATIONREQUIREDLAST" + str(idx)] = ""
tags["VACCINATIONGIVENLAST" + str(idx)] = ""
tags["VACCINATIONCOMMENTSLAST" + str(idx)] = ""
tags["VACCINATIONDESCRIPTIONLAST" + str(idx)] = ""
idx = 1
for v in vaccasc:
tags["VACCINATIONNAME" + str(idx)] = v["VACCINATIONTYPE"]
tags["VACCINATIONREQUIRED" + str(idx)] = python2display(l, v["DATEREQUIRED"])
tags["VACCINATIONGIVEN" + str(idx)] = python2display(l, v["DATEOFVACCINATION"])
tags["VACCINATIONCOMMENTS" + str(idx)] = v["COMMENTS"]
tags["VACCINATIONDESCRIPTION" + str(idx)] = v["VACCINATIONDESCRIPTION"]
idx += 1
idx = 1
uniquetypes = {}
recentgiven = {}
for v in vaccdesc:
tags["VACCINATIONNAMELAST" + str(idx)] = v["VACCINATIONTYPE"]
tags["VACCINATIONREQUIREDLAST" + str(idx)] = python2display(l, v["DATEREQUIRED"])
tags["VACCINATIONGIVENLAST" + str(idx)] = python2display(l, v["DATEOFVACCINATION"])
tags["VACCINATIONCOMMENTSLAST" + str(idx)] = v["COMMENTS"]
tags["VACCINATIONDESCRIPTIONLAST" + str(idx)] = v["VACCINATIONDESCRIPTION"]
idx += 1
# If this is the first of this type of vacc we've seen, make
# some keys based on its name.
if not uniquetypes.has_key(v["VACCINATIONTYPE"]):
vname = v["VACCINATIONTYPE"].upper().replace(" ", "").replace("/", "")
uniquetypes[v["VACCINATIONTYPE"]] = v
tags["VACCINATIONNAME" + vname] = v["VACCINATIONTYPE"]
tags["VACCINATIONREQUIRED" + vname] = python2display(l, v["DATEREQUIRED"])
tags["VACCINATIONGIVEN" + vname] = python2display(l, v["DATEOFVACCINATION"])
tags["VACCINATIONCOMMENTS" + vname] = v["COMMENTS"]
tags["VACCINATIONDESCRIPTION" + vname] = v["VACCINATIONDESCRIPTION"]
# If this is the first of this type of vacc we've seen that's been given
# make some keys based on its name
if not recentgiven.has_key(v["VACCINATIONTYPE"]) and v["DATEOFVACCINATION"] is not None:
vname = v["VACCINATIONTYPE"].upper().replace(" ", "").replace("/", "")
recentgiven[v["VACCINATIONTYPE"]] = v
tags["VACCINATIONNAMERECENT" + vname] = v["VACCINATIONTYPE"]
tags["VACCINATIONREQUIREDRECENT" + vname] = python2display(l, v["DATEREQUIRED"])
tags["VACCINATIONGIVENRECENT" + vname] = python2display(l, v["DATEOFVACCINATION"])
tags["VACCINATIONCOMMENTSRECENT" + vname] = v["COMMENTS"]
tags["VACCINATIONDESCRIPTIONRECENT" + vname] = v["VACCINATIONDESCRIPTION"]
# Tests
testasc = medical.get_tests(dbo, int(a["ID"]), not include_incomplete)
testdesc = medical.get_tests(dbo, int(a["ID"]), not include_incomplete, medical.DESCENDING_REQUIRED)
for idx in range(1, 101):
tags["TESTNAME" + str(idx)] = ""
tags["TESTRESULT" + str(idx)] = ""
tags["TESTREQUIRED" + str(idx)] = ""
tags["TESTGIVEN" + str(idx)] = ""
tags["TESTCOMMENTS" + str(idx)] = ""
tags["TESTDESCRIPTION" + str(idx)] = ""
tags["TESTNAMELAST" + str(idx)] = ""
tags["TESTREQUIREDLAST" + str(idx)] = ""
tags["TESTGIVENLAST" + str(idx)] = ""
tags["TESTCOMMENTSLAST" + str(idx)] = ""
tags["TESTDESCRIPTIONLAST" + str(idx)] = ""
idx = 1
for t in testasc:
tags["TESTNAME" + str(idx)] = t["TESTNAME"]
tags["TESTRESULT" + str(idx)] = t["RESULTNAME"]
tags["TESTREQUIRED" + str(idx)] = python2display(l, t["DATEREQUIRED"])
tags["TESTGIVEN" + str(idx)] = python2display(l, t["DATEOFTEST"])
tags["TESTCOMMENTS" + str(idx)] = t["COMMENTS"]
tags["TESTDESCRIPTION" + str(idx)] = t["TESTDESCRIPTION"]
idx += 1
idx = 1
uniquetypes = {}
recentgiven = {}
for t in testdesc:
tags["TESTNAMELAST" + str(idx)] = t["TESTNAME"]
tags["TESTRESULTLAST" + str(idx)] = t["RESULTNAME"]
tags["TESTREQUIREDLAST" + str(idx)] = python2display(l, t["DATEREQUIRED"])
tags["TESTGIVENLAST" + str(idx)] = python2display(l, t["DATEOFTEST"])
tags["TESTCOMMENTSLAST" + str(idx)] = t["COMMENTS"]
tags["TESTDESCRIPTIONLAST" + str(idx)] = t["TESTDESCRIPTION"]
idx += 1
# If this is the first of this type of test we've seen, make
# some keys based on its name.
if not uniquetypes.has_key(t["TESTNAME"]):
tname = t["TESTNAME"].upper().replace(" ", "").replace("/", "")
uniquetypes[t["TESTNAME"]] = t
tags["TESTNAME" + tname] = t["TESTNAME"]
tags["TESTRESULT" + tname] = t["RESULTNAME"]
tags["TESTREQUIRED" + tname] = python2display(l, t["DATEREQUIRED"])
tags["TESTGIVEN" + tname] = python2display(l, t["DATEOFTEST"])
tags["TESTCOMMENTS" + tname] = t["COMMENTS"]
tags["TESTDESCRIPTION" + tname] = t["TESTDESCRIPTION"]
# If this is the first of this type of test we've seen that's been given
# make some keys based on its name
if not recentgiven.has_key(t["TESTNAME"]) and t["DATEOFTEST"] is not None:
tname = t["TESTNAME"].upper().replace(" ", "").replace("/", "")
recentgiven[t["TESTNAME"]] = t
tags["TESTNAMERECENT" + tname] = t["TESTNAME"]
tags["TESTRESULTRECENT" + tname] = t["RESULTNAME"]
tags["TESTREQUIREDRECENT" + tname] = python2display(l, t["DATEREQUIRED"])
tags["TESTGIVENRECENT" + tname] = python2display(l, t["DATEOFTEST"])
tags["TESTCOMMENTSRECENT" + tname] = t["COMMENTS"]
tags["TESTDESCRIPTIONRECENT" + tname] = t["TESTDESCRIPTION"]
# Medical
medasc = medical.get_regimens(dbo, int(a["ID"]), not include_incomplete)
meddesc = medical.get_regimens(dbo, int(a["ID"]), not include_incomplete, medical.DESCENDING_REQUIRED)
for idx in range(1, 101):
tags["MEDICALNAME" + str(idx)] = ""
tags["MEDICALCOMMENTS" + str(idx)] = ""
tags["MEDICALFREQUENCY" + str(idx)] = ""
tags["MEDICALNUMBEROFTREATMENTS" + str(idx)] = ""
tags["MEDICALSTATUS" + str(idx)] = ""
tags["MEDICALDOSAGE" + str(idx)] = ""
tags["MEDICALSTARTDATE" + str(idx)] = ""
tags["MEDICALTREATMENTSGIVEN" + str(idx)] = ""
tags["MEDICALTREATMENTSREMAINING" + str(idx)] = ""
tags["MEDICALNAMELAST" + str(idx)] = ""
tags["MEDICALCOMMENTSLAST" + str(idx)] = ""
tags["MEDICALFREQUENCYLAST" + str(idx)] = ""
tags["MEDICALNUMBEROFTREATMENTSLAST" + str(idx)] = ""
tags["MEDICALSTATUSLAST" + str(idx)] = ""
tags["MEDICALDOSAGELAST" + str(idx)] = ""
tags["MEDICALSTARTDATELAST" + str(idx)] = ""
tags["MEDICALTREATMENTSGIVENLAST" + str(idx)] = ""
tags["MEDICALTREATMENTSREMAININGLAST" + str(idx)] = ""
idx = 1
for m in medasc:
tags["MEDICALNAME" + str(idx)] = m["TREATMENTNAME"]
tags["MEDICALCOMMENTS" + str(idx)] = m["COMMENTS"]
tags["MEDICALFREQUENCY" + str(idx)] = m["NAMEDFREQUENCY"]
tags["MEDICALNUMBEROFTREATMENTS" + str(idx)] = m["NAMEDNUMBEROFTREATMENTS"]
tags["MEDICALSTATUS" + str(idx)] = m["NAMEDSTATUS"]
tags["MEDICALDOSAGE" + str(idx)] = m["DOSAGE"]
tags["MEDICALSTARTDATE" + str(idx)] = python2display(l, m["STARTDATE"])
tags["MEDICALTREATMENTSGIVEN" + str(idx)] = str(m["TREATMENTSGIVEN"])
tags["MEDICALTREATMENTSREMAINING" + str(idx)] = str(m["TREATMENTSREMAINING"])
idx += 1
idx = 1
uniquetypes = {}
recentgiven = {}
for m in meddesc:
tags["MEDICALNAMELAST" + str(idx)] = m["TREATMENTNAME"]
tags["MEDICALCOMMENTSLAST" + str(idx)] = m["COMMENTS"]
tags["MEDICALFREQUENCYLAST" + str(idx)] = m["NAMEDFREQUENCY"]
tags["MEDICALNUMBEROFTREATMENTSLAST" + str(idx)] = m["NAMEDNUMBEROFTREATMENTS"]
tags["MEDICALSTATUSLAST" + str(idx)] = m["NAMEDSTATUS"]
tags["MEDICALDOSAGELAST" + str(idx)] = m["DOSAGE"]
tags["MEDICALSTARTDATELAST" + str(idx)] = python2display(l, m["STARTDATE"])
tags["MEDICALTREATMENTSGIVENLAST" + str(idx)] = str(m["TREATMENTSGIVEN"])
tags["MEDICALTREATMENTSREMAININGLAST" + str(idx)] = str(m["TREATMENTSREMAINING"])
idx += 1
# If this is the first of this type of med we've seen, make
# some keys based on its name.
if not uniquetypes.has_key(m["TREATMENTNAME"]):
tname = m["TREATMENTNAME"].upper().replace(" ", "").replace("/", "")
uniquetypes[m["TREATMENTNAME"]] = m
tags["MEDICALNAME" + tname] = m["TREATMENTNAME"]
tags["MEDICALCOMMENTS" + tname] = m["COMMENTS"]
tags["MEDICALFREQUENCY" + tname] = m["NAMEDFREQUENCY"]
tags["MEDICALNUMBEROFTREATMENTS" + tname] = m["NAMEDNUMBEROFTREATMENTS"]
tags["MEDICALSTATUS" + tname] = m["NAMEDSTATUS"]
tags["MEDICALDOSAGE" + tname] = m["DOSAGE"]
tags["MEDICALSTARTDATE" + tname] = python2display(l, m["STARTDATE"])
tags["MEDICALTREATMENTSGIVEN" + tname] = str(m["TREATMENTSGIVEN"])
tags["MEDICALTREATMENTSREMAINING" + tname] = str(m["TREATMENTSREMAINING"])
# If this is the first of this type of med we've seen that's complete
if not recentgiven.has_key(m["TREATMENTNAME"]) and m["STATUS"] == 2:
tname = m["TREATMENTNAME"].upper().replace(" ", "").replace("/", "")
recentgiven[m["TREATMENTNAME"]] = m
tags["MEDICALNAMERECENT" + tname] = m["TREATMENTNAME"]
tags["MEDICALCOMMENTSRECENT" + tname] = m["COMMENTS"]
tags["MEDICALFREQUENCYRECENT" + tname] = m["NAMEDFREQUENCY"]
tags["MEDICALNUMBEROFTREATMENTSRECENT" + tname] = m["NAMEDNUMBEROFTREATMENTS"]
tags["MEDICALSTATUSRECENT" + tname] = m["NAMEDSTATUS"]
tags["MEDICALDOSAGERECENT" + tname] = m["DOSAGE"]
tags["MEDICALSTARTDATERECENT" + tname] = python2display(l, m["STARTDATE"])
tags["MEDICALTREATMENTSGIVENRECENT" + tname] = str(m["TREATMENTSGIVEN"])
tags["MEDICALTREATMENTSREMAININGRECENT" + tname] = str(m["TREATMENTSREMAINING"])
# Diet
dietasc = animal.get_diets(dbo, int(a["ID"]))
dietdesc = animal.get_diets(dbo, int(a["ID"]), animal.DESCENDING)
for idx in range(1, 101):
tags["DIETNAME" + str(idx)] = ""
tags["DIETDESCRIPTION" + str(idx)] = ""
tags["DIETDATESTARTED" + str(idx)] = ""
tags["DIETCOMMENTS" + str(idx)] = ""
tags["DIETNAMELAST" + str(idx)] = ""
tags["DIETDESCRIPTIONLAST" + str(idx)] = ""
tags["DIETDATESTARTEDLAST" + str(idx)] = ""
tags["DIETCOMMENTSLAST" + str(idx)] = ""
idx = 1
for d in dietasc:
tags["DIETNAME" + str(idx)] = d["DIETNAME"]
tags["DIETDESCRIPTION" + str(idx)] = d["DIETDESCRIPTION"]
tags["DIETDATESTARTED" + str(idx)] = python2display(l, d["DATESTARTED"])
tags["DIETCOMMENTS" + str(idx)] = d["COMMENTS"]
idx += 1
idx = 1
for d in dietdesc:
tags["DIETNAMELAST" + str(idx)] = d["DIETNAME"]
tags["DIETDESCRIPTIONLAST" + str(idx)] = d["DIETDESCRIPTION"]
tags["DIETDATESTARTEDLAST" + str(idx)] = python2display(l, d["DATESTARTED"])
tags["DIETCOMMENTSLAST" + str(idx)] = d["COMMENTS"]
idx += 1
# Donations
donasc = financial.get_animal_donations(dbo, int(a["ID"]))
dondesc = financial.get_animal_donations(dbo, int(a["ID"]), financial.DESCENDING)
for idx in range(1, 101):
tags["RECEIPTNUM" + str(idx)] = ""
tags["DONATIONTYPE" + str(idx)] = ""
tags["DONATIONDATE" + str(idx)] = ""
tags["DONATIONDATEDUE" + str(idx)] = ""
tags["DONATIONAMOUNT" + str(idx)] = ""
tags["DONATIONCOMMENTS" + str(idx)] = ""
tags["DONATIONGIFTAID" + str(idx)] = ""
tags["RECEIPTNUMLAST" + str(idx)] = ""
tags["DONATIONTYPELAST" + str(idx)] = ""
tags["DONATIONDATELAST" + str(idx)] = ""
tags["DONATIONDATEDUELAST" + str(idx)] = ""
tags["DONATIONAMOUNTLAST" + str(idx)] = ""
tags["DONATIONCOMMENTSLAST" + str(idx)] = ""
tags["DONATIONGIFTAIDLAST" + str(idx)] = ""
idx = 1
for d in donasc:
tags["RECEIPTNUM" + str(idx)] = utils.padleft(d["ID"], 8)
tags["DONATIONTYPE" + str(idx)] = d["DONATIONNAME"]
tags["DONATIONDATE" + str(idx)] = python2display(l, d["DATE"])
tags["DONATIONDATEDUE" + str(idx)] = python2display(l, d["DATEDUE"])
tags["DONATIONAMOUNT" + str(idx)] = format_currency_no_symbol(l, d["DONATION"])
tags["DONATIONCOMMENTS" + str(idx)] = d["COMMENTS"]
tags["DONATIONGIFTAID" + str(idx)] = d["ISGIFTAID"] == 1 and _("Yes", l) or _("No", l)
idx = 1
uniquetypes = {}
recentrec = {}
for d in dondesc:
tags["RECEIPTNUMLAST" + str(idx)] = utils.padleft(d["ID"], 8)
tags["DONATIONTYPELAST" + str(idx)] = d["DONATIONNAME"]
tags["DONATIONDATELAST" + str(idx)] = python2display(l, d["DATE"])
tags["DONATIONDATEDUELAST" + str(idx)] = python2display(l, d["DATEDUE"])
tags["DONATIONAMOUNTLAST" + str(idx)] = format_currency_no_symbol(l, d["DONATION"])
tags["DONATIONCOMMENTSLAST" + str(idx)] = d["COMMENTS"]
tags["DONATIONGIFTAIDLAST" + str(idx)] = d["ISGIFTAID"] == 1 and _("Yes", l) or _("No", l)
idx += 1
# If this is the first of this type of donation we've seen, make
# some keys based on its name.
if not uniquetypes.has_key(d["DONATIONNAME"]):
dname = d["DONATIONNAME"].upper().replace(" ", "").replace("/", "")
uniquetypes[d["DONATIONNAME"]] = d
tags["RECEIPTNUM" + dname] = utils.padleft(d["ID"], 8)
tags["DONATIONTYPE" + dname] = d["DONATIONNAME"]
tags["DONATIONDATE" + dname] = python2display(l, d["DATE"])
tags["DONATIONDATEDUE" + dname] = python2display(l, d["DATEDUE"])
tags["DONATIONAMOUNT" + dname] = format_currency_no_symbol(l, d["DONATION"])
tags["DONATIONCOMMENTS" + dname] = d["COMMENTS"]
tags["DONATIONGIFTAID" + dname] = d["ISGIFTAID"] == 1 and _("Yes", l) or _("No", l)
# If this is the first of this type of donation we've seen that's received
if not recentrec.has_key(d["DONATIONNAME"]) and d["DATE"] is not None:
dname = d["DONATIONNAME"].upper().replace(" ", "").replace("/", "")
recentrec[d["DONATIONNAME"]] = d
tags["RECEIPTNUMRECENT" + dname] = utils.padleft(d["ID"], 8)
tags["DONATIONTYPERECENT" + dname] = d["DONATIONNAME"]
tags["DONATIONDATERECENT" + dname] = python2display(l, d["DATE"])
tags["DONATIONDATEDUERECENT" + dname] = python2display(l, d["DATEDUE"])
tags["DONATIONAMOUNTRECENT" + dname] = format_currency_no_symbol(l, d["DONATION"])
tags["DONATIONCOMMENTSRECENT" + dname] = d["COMMENTS"]
tags["DONATIONGIFTAIDRECENT" + dname] = d["ISGIFTAID"] == 1 and _("Yes", l) or _("No", l)
# Logs
logasc = log.get_logs(dbo, log.ANIMAL, int(a["ID"]), 0, log.ASCENDING)
logdesc = log.get_logs(dbo, log.ANIMAL, int(a["ID"]), 0, log.DESCENDING)
for idx in range(1, 101):
tags["LOGNAME" + str(idx)] = ""
tags["LOGDATE" + str(idx)] = ""
tags["LOGCOMMENTS" + str(idx)] = ""
tags["LOGNAMELAST" + str(idx)] = ""
tags["LOGDATELAST" + str(idx)] = ""
tags["LOGCOMMENTSLAST" + str(idx)] = ""
idx = 1
for o in logasc:
tags["LOGNAME" + str(idx)] = o["LOGTYPENAME"]
tags["LOGDATE" + str(idx)] = python2display(l, o["DATE"])
tags["LOGCOMMENTS" + str(idx)] = o["COMMENTS"]
idx += 1
idx = 1
uniquetypes = {}
recentgiven = {}
for o in logdesc:
tags["LOGNAMELAST" + str(idx)] = o["LOGTYPENAME"]
tags["LOGDATELAST" + str(idx)] = python2display(l, o["DATE"])
tags["LOGCOMMENTSLAST" + str(idx)] = o["COMMENTS"]
idx += 1
uniquetypes = {}
recentrec = {}
# If this is the first of this type of log we've seen, make
# some keys based on its name.
if not uniquetypes.has_key(o["LOGTYPENAME"]):
lname = o["LOGTYPENAME"].upper().replace(" ", "").replace("/", "")
uniquetypes[o["LOGTYPENAME"]] = o
tags["LOGNAME" + lname] = o["LOGTYPENAME"]
tags["LOGDATE" + lname] = python2display(l, o["DATE"])
tags["LOGCOMMENTS" + lname] = o["COMMENTS"]
tags["LOGNAMERECENT" + lname] = o["LOGTYPENAME"]
tags["LOGDATERECENT" + lname] = python2display(l, o["DATE"])
tags["LOGCOMMENTSRECENT" + lname] = o["COMMENTS"]
return tags
def donation_tags(dbo, p):
"""
Generates a list of tags from a donation result.
"""
l = dbo.locale
tags = {
"DONATIONID" : str(p["ID"]),
"RECEIPTNUM" : utils.padleft(p["ID"], 8),
"DONATIONTYPE" : p["DONATIONNAME"],
"DONATIONDATE" : python2display(l, p["DATE"]),
"DONATIONDATEDUE" : python2display(l, p["DATEDUE"]),
"DONATIONAMOUNT" : format_currency_no_symbol(l, p["DONATION"]),
"DONATIONCOMMENTS" : p["COMMENTS"],
"DONATIONGIFTAID" : p["ISGIFTAIDNAME"],
"DONATIONCREATEDBY" : p["CREATEDBY"],
"DONATIONCREATEDBYNAME" : p["CREATEDBY"],
"DONATIONCREATEDDATE" : python2display(l, p["CREATEDDATE"]),
"DONATIONLASTCHANGEDBY" : p["LASTCHANGEDBY"],
"DONATIONLASTCHANGEDBYNAME" : p["LASTCHANGEDBY"],
"DONATIONLASTCHANGEDDATE" : python2display(l, p["LASTCHANGEDDATE"])
}
return tags
def person_tags(dbo, p):
"""
Generates a list of tags from a person result (the deep type from
calling person.get_person)
"""
l = dbo.locale
tags = {
"OWNERID" : str(p["ID"]),
"OWNERCODE" : p["OWNERCODE"],
"OWNERTITLE" : p["OWNERTITLE"],
"TITLE" : p["OWNERTITLE"],
"OWNERINITIALS" : p["OWNERINITIALS"],
"INITIALS" : p["OWNERINITIALS"],
"OWNERFORENAMES" : p["OWNERFORENAMES"],
"FORENAMES" : p["OWNERFORENAMES"],
"OWNERFIRSTNAMES" : p["OWNERFORENAMES"],
"FIRSTNAMES" : p["OWNERFORENAMES"],
"OWNERSURNAME" : p["OWNERSURNAME"],
"SURNAME" : p["OWNERSURNAME"],
"OWNERLASTNAME" : p["OWNERSURNAME"],
"LASTNAME" : p["OWNERSURNAME"],
"OWNERNAME" : p["OWNERNAME"],
"NAME" : p["OWNERNAME"],
"OWNERADDRESS" : p["OWNERADDRESS"],
"ADDRESS" : p["OWNERADDRESS"],
"OWNERTOWN" : p["OWNERTOWN"],
"TOWN" : p["OWNERTOWN"],
"OWNERCOUNTY" : p["OWNERCOUNTY"],
"COUNTY" : p["OWNERCOUNTY"],
"OWNERCITY" : p["OWNERTOWN"],
"CITY" : p["OWNERTOWN"],
"OWNERSTATE" : p["OWNERCOUNTY"],
"STATE" : p["OWNERCOUNTY"],
"OWNERPOSTCODE" : p["OWNERPOSTCODE"],
"POSTCODE" : p["OWNERPOSTCODE"],
"OWNERZIPCODE" : p["OWNERPOSTCODE"],
"ZIPCODE" : p["OWNERPOSTCODE"],
"HOMETELEPHONE" : p["HOMETELEPHONE"],
"WORKTELEPHONE" : p["WORKTELEPHONE"],
"MOBILETELEPHONE" : p["MOBILETELEPHONE"],
"CELLTELEPHONE" : p["MOBILETELEPHONE"],
"EMAILADDRESS" : p["EMAILADDRESS"],
"OWNERCOMMENTS" : p["COMMENTS"],
"COMMENTS" : p["COMMENTS"],
"OWNERCREATEDBY" : p["CREATEDBY"],
"OWNERCREATEDBYNAME" : p["CREATEDBY"],
"OWNERCREATEDDATE" : python2display(l, p["CREATEDDATE"]),
"OWNERLASTCHANGEDBY" : p["LASTCHANGEDBY"],
"OWNERLASTCHANGEDBYNAME" : p["LASTCHANGEDBY"],
"OWNERLASTCHANGEDDATE" : python2display(l, p["LASTCHANGEDDATE"]),
"IDCHECK" : (p["IDCHECK"] == 1 and _("Yes", l) or _("No", l)),
"MEMBERSHIPNUMBER" : p["MEMBERSHIPNUMBER"],
"MEMBERSHIPEXPIRYDATE" : python2display(l, p["MEMBERSHIPEXPIRYDATE"])
}
# Additional fields
add = additional.get_additional_fields(dbo, int(p["ID"]), "person")
for af in add:
val = af["VALUE"]
if af["FIELDTYPE"] == additional.YESNO:
val = additional_yesno(l, af)
tags[af["FIELDNAME"].upper()] = val
return tags
def append_tags(tags1, tags2):
"""
Adds two dictionaries of tags together and returns
a new dictionary containing both sets.
"""
tags = {}
tags.update(tags1)
tags.update(tags2)
return tags
def substitute_tags_plain(searchin, tags):
"""
Substitutes the dictionary of tags in "tags" for any found in
"searchin". This is a convenience method for plain text substitution
with << >> opener/closers and no XML escaping.
"""
return substitute_tags(searchin, tags, False, "<<", ">>")
def substitute_tags(searchin, tags, use_xml_escaping = True, opener = "<<", closer = ">>"):
"""
Substitutes the dictionary of tags in "tags" for any found
in "searchin". opener and closer denote the start of a tag,
if use_xml_escaping is set to true, then tags are XML escaped when
output and opener/closer are escaped.
"""
if not use_xml_escaping:
opener = opener.replace("<", "<").replace(">", ">")
closer = closer.replace("<", "<").replace(">", ">")
s = searchin
sp = s.find(opener)
while sp != -1:
ep = s.find(closer, sp + len(opener))
if ep != -1:
matchtag = s[sp + len(opener):ep].upper()
newval = ""
if tags.has_key(matchtag):
newval = tags[matchtag]
if newval is not None:
newval = str(newval)
if use_xml_escaping and not newval.lower().startswith("<img"):
newval = newval.replace("&", "&")
newval = newval.replace("<", "<")
newval = newval.replace(">", ">")
s = s[0:sp] + str(newval) + s[ep + len(closer):]
sp = s.find(opener, sp)
else:
# No end marker for this tag, stop processing
break
return s
def generate_animal_doc(dbo, template, animalid, username):
"""
Generates an animal document from a template using animal keys and
(if a currentowner is available) person keys
template: The path/name of the template to use
animalid: The animal to generate for
"""
s = dbfs.get_string_id(dbo, template)
a = animal.get_animal(dbo, animalid)
if a is None: raise utils.ASMValidationError("%d is not a valid animal ID" % animalid)
tags = animal_tags(dbo, a)
if a["CURRENTOWNERID"] is not None and a["CURRENTOWNERID"] != 0:
tags = append_tags(tags, person_tags(dbo, person.get_person(dbo, int(a["CURRENTOWNERID"]))))
tags = append_tags(tags, org_tags(dbo, username))
s = substitute_tags(s, tags)
return s
def generate_person_doc(dbo, template, personid, username):
"""
Generates a person document from a template
template: The path/name of the template to use
personid: The person to generate for
"""
s = dbfs.get_string_id(dbo, template)
p = person.get_person(dbo, personid)
if p is None: raise utils.ASMValidationError("%d is not a valid person ID" % personid)
tags = person_tags(dbo, p)
tags = append_tags(tags, org_tags(dbo, username))
s = substitute_tags(s, tags)
return s
def generate_donation_doc(dbo, template, donationid, username):
"""
Generates a donation document from a template
template: The path/name of the template to use
donationid: The donation to generate for
"""
s = dbfs.get_string_id(dbo, template)
d = financial.get_donation(dbo, donationid)
if d is None: raise utils.ASMValidationError("%d is not a valid donation ID" % donationid)
tags = donation_tags(dbo, d)
tags = append_tags(tags, person_tags(dbo, person.get_person(dbo, int(d["OWNERID"]))))
if d["ANIMALID"] is not None and d["ANIMALID"] != 0:
tags = append_tags(tags, animal_tags(dbo, animal.get_animal(dbo, d["ANIMALID"])))
tags = append_tags(tags, org_tags(dbo, username))
s = substitute_tags(s, tags)
return s<|fim▁end|> | displaydob = python2display(l, a["DATEOFBIRTH"])
displayage = a["ANIMALAGE"]
estimate = ""
if a["ESTIMATEDDOB"] == 1: |
<|file_name|>lint.go<|end_file_name|><|fim▁begin|>// Package staticcheck contains a linter for Go source code.
package staticcheck // import "honnef.co/go/tools/staticcheck"
import (
"fmt"
"go/ast"
"go/constant"
"go/token"
"go/types"
htmltemplate "html/template"
"net/http"
"regexp"
"sort"
"strconv"
"strings"
"sync"
texttemplate "text/template"
"honnef.co/go/tools/functions"
"honnef.co/go/tools/internal/sharedcheck"
"honnef.co/go/tools/lint"
"honnef.co/go/tools/ssa"
"honnef.co/go/tools/staticcheck/vrp"
"golang.org/x/tools/go/ast/astutil"
)
func validRegexp(call *Call) {
arg := call.Args[0]
err := ValidateRegexp(arg.Value)
if err != nil {
arg.Invalid(err.Error())
}
}
type runeSlice []rune
func (rs runeSlice) Len() int { return len(rs) }
func (rs runeSlice) Less(i int, j int) bool { return rs[i] < rs[j] }
func (rs runeSlice) Swap(i int, j int) { rs[i], rs[j] = rs[j], rs[i] }
func utf8Cutset(call *Call) {
arg := call.Args[1]
if InvalidUTF8(arg.Value) {
arg.Invalid(MsgInvalidUTF8)
}
}
func uniqueCutset(call *Call) {
arg := call.Args[1]
if !UniqueStringCutset(arg.Value) {
arg.Invalid(MsgNonUniqueCutset)
}
}
func unmarshalPointer(name string, arg int) CallCheck {
return func(call *Call) {
if !Pointer(call.Args[arg].Value) {
call.Args[arg].Invalid(fmt.Sprintf("%s expects to unmarshal into a pointer, but the provided value is not a pointer", name))
}
}
}
func pointlessIntMath(call *Call) {
if ConvertedFromInt(call.Args[0].Value) {
call.Invalid(fmt.Sprintf("calling %s on a converted integer is pointless", lint.CallName(call.Instr.Common())))
}
}
func checkValidHostPort(arg int) CallCheck {
return func(call *Call) {
if !ValidHostPort(call.Args[arg].Value) {
call.Args[arg].Invalid(MsgInvalidHostPort)
}
}
}
var (
checkRegexpRules = map[string]CallCheck{
"regexp.MustCompile": validRegexp,
"regexp.Compile": validRegexp,
}
checkTimeParseRules = map[string]CallCheck{
"time.Parse": func(call *Call) {
arg := call.Args[0]
err := ValidateTimeLayout(arg.Value)
if err != nil {
arg.Invalid(err.Error())
}
},
}
checkEncodingBinaryRules = map[string]CallCheck{
"encoding/binary.Write": func(call *Call) {
arg := call.Args[2]
if !CanBinaryMarshal(call.Job, arg.Value) {
arg.Invalid(fmt.Sprintf("value of type %s cannot be used with binary.Write", arg.Value.Value.Type()))
}
},
}
checkURLsRules = map[string]CallCheck{
"net/url.Parse": func(call *Call) {
arg := call.Args[0]
err := ValidateURL(arg.Value)
if err != nil {
arg.Invalid(err.Error())
}
},
}
checkSyncPoolValueRules = map[string]CallCheck{
"(*sync.Pool).Put": func(call *Call) {
arg := call.Args[0]
typ := arg.Value.Value.Type()
if !lint.IsPointerLike(typ) {
arg.Invalid("argument should be pointer-like to avoid allocations")
}
},
}
checkRegexpFindAllRules = map[string]CallCheck{
"(*regexp.Regexp).FindAll": RepeatZeroTimes("a FindAll method", 1),
"(*regexp.Regexp).FindAllIndex": RepeatZeroTimes("a FindAll method", 1),
"(*regexp.Regexp).FindAllString": RepeatZeroTimes("a FindAll method", 1),
"(*regexp.Regexp).FindAllStringIndex": RepeatZeroTimes("a FindAll method", 1),
"(*regexp.Regexp).FindAllStringSubmatch": RepeatZeroTimes("a FindAll method", 1),
"(*regexp.Regexp).FindAllStringSubmatchIndex": RepeatZeroTimes("a FindAll method", 1),
"(*regexp.Regexp).FindAllSubmatch": RepeatZeroTimes("a FindAll method", 1),
"(*regexp.Regexp).FindAllSubmatchIndex": RepeatZeroTimes("a FindAll method", 1),
}
checkUTF8CutsetRules = map[string]CallCheck{
"strings.IndexAny": utf8Cutset,
"strings.LastIndexAny": utf8Cutset,
"strings.ContainsAny": utf8Cutset,
"strings.Trim": utf8Cutset,
"strings.TrimLeft": utf8Cutset,
"strings.TrimRight": utf8Cutset,
}
checkUniqueCutsetRules = map[string]CallCheck{
"strings.Trim": uniqueCutset,
"strings.TrimLeft": uniqueCutset,
"strings.TrimRight": uniqueCutset,
}
checkUnmarshalPointerRules = map[string]CallCheck{
"encoding/xml.Unmarshal": unmarshalPointer("xml.Unmarshal", 1),
"(*encoding/xml.Decoder).Decode": unmarshalPointer("Decode", 0),
"encoding/json.Unmarshal": unmarshalPointer("json.Unmarshal", 1),
"(*encoding/json.Decoder).Decode": unmarshalPointer("Decode", 0),
}
checkUnbufferedSignalChanRules = map[string]CallCheck{
"os/signal.Notify": func(call *Call) {
arg := call.Args[0]
if UnbufferedChannel(arg.Value) {
arg.Invalid("the channel used with signal.Notify should be buffered")
}
},
}
checkMathIntRules = map[string]CallCheck{
"math.Ceil": pointlessIntMath,
"math.Floor": pointlessIntMath,
"math.IsNaN": pointlessIntMath,
"math.Trunc": pointlessIntMath,
"math.IsInf": pointlessIntMath,
}
checkStringsReplaceZeroRules = map[string]CallCheck{
"strings.Replace": RepeatZeroTimes("strings.Replace", 3),
"bytes.Replace": RepeatZeroTimes("bytes.Replace", 3),
}
checkListenAddressRules = map[string]CallCheck{
"net/http.ListenAndServe": checkValidHostPort(0),
"net/http.ListenAndServeTLS": checkValidHostPort(0),
}
checkBytesEqualIPRules = map[string]CallCheck{
"bytes.Equal": func(call *Call) {
if ConvertedFrom(call.Args[0].Value, "net.IP") && ConvertedFrom(call.Args[1].Value, "net.IP") {
call.Invalid("use net.IP.Equal to compare net.IPs, not bytes.Equal")
}
},
}
checkRegexpMatchLoopRules = map[string]CallCheck{
"regexp.Match": loopedRegexp("regexp.Match"),
"regexp.MatchReader": loopedRegexp("regexp.MatchReader"),
"regexp.MatchString": loopedRegexp("regexp.MatchString"),
}
)
type Checker struct {
CheckGenerated bool
funcDescs *functions.Descriptions
deprecatedObjs map[types.Object]string
nodeFns map[ast.Node]*ssa.Function
}
func NewChecker() *Checker {
return &Checker{}
}
func (c *Checker) Funcs() map[string]lint.Func {
return map[string]lint.Func{
"SA1000": c.callChecker(checkRegexpRules),
"SA1001": c.CheckTemplate,
"SA1002": c.callChecker(checkTimeParseRules),
"SA1003": c.callChecker(checkEncodingBinaryRules),
"SA1004": c.CheckTimeSleepConstant,
"SA1005": c.CheckExec,
"SA1006": c.CheckUnsafePrintf,
"SA1007": c.callChecker(checkURLsRules),
"SA1008": c.CheckCanonicalHeaderKey,
"SA1009": nil,
"SA1010": c.callChecker(checkRegexpFindAllRules),
"SA1011": c.callChecker(checkUTF8CutsetRules),
"SA1012": c.CheckNilContext,
"SA1013": c.CheckSeeker,
"SA1014": c.callChecker(checkUnmarshalPointerRules),
"SA1015": c.CheckLeakyTimeTick,
"SA1016": c.CheckUntrappableSignal,
"SA1017": c.callChecker(checkUnbufferedSignalChanRules),
"SA1018": c.callChecker(checkStringsReplaceZeroRules),
"SA1019": c.CheckDeprecated,
"SA1020": c.callChecker(checkListenAddressRules),
"SA1021": c.callChecker(checkBytesEqualIPRules),
"SA1022": nil,
"SA1023": c.CheckWriterBufferModified,
"SA1024": c.callChecker(checkUniqueCutsetRules),
"SA2000": c.CheckWaitgroupAdd,
"SA2001": c.CheckEmptyCriticalSection,
"SA2002": c.CheckConcurrentTesting,
"SA2003": c.CheckDeferLock,
"SA3000": c.CheckTestMainExit,
"SA3001": c.CheckBenchmarkN,
"SA4000": c.CheckLhsRhsIdentical,
"SA4001": c.CheckIneffectiveCopy,
"SA4002": c.CheckDiffSizeComparison,
"SA4003": c.CheckUnsignedComparison,
"SA4004": c.CheckIneffectiveLoop,
"SA4005": c.CheckIneffectiveFieldAssignments,
"SA4006": c.CheckUnreadVariableValues,
// "SA4007": c.CheckPredeterminedBooleanExprs,
"SA4007": nil,
"SA4008": c.CheckLoopCondition,
"SA4009": c.CheckArgOverwritten,
"SA4010": c.CheckIneffectiveAppend,
"SA4011": c.CheckScopedBreak,
"SA4012": c.CheckNaNComparison,
"SA4013": c.CheckDoubleNegation,
"SA4014": c.CheckRepeatedIfElse,
"SA4015": c.callChecker(checkMathIntRules),
"SA4016": c.CheckSillyBitwiseOps,
"SA4017": c.CheckPureFunctions,
"SA4018": c.CheckSelfAssignment,
"SA4019": c.CheckDuplicateBuildConstraints,
"SA5000": c.CheckNilMaps,
"SA5001": c.CheckEarlyDefer,
"SA5002": c.CheckInfiniteEmptyLoop,
"SA5003": c.CheckDeferInInfiniteLoop,
"SA5004": c.CheckLoopEmptyDefault,
"SA5005": c.CheckCyclicFinalizer,
// "SA5006": c.CheckSliceOutOfBounds,
"SA5007": c.CheckInfiniteRecursion,
"SA6000": c.callChecker(checkRegexpMatchLoopRules),
"SA6001": c.CheckMapBytesKey,
"SA6002": c.callChecker(checkSyncPoolValueRules),
"SA6003": c.CheckRangeStringRunes,
"SA6004": nil,
"SA9000": nil,
"SA9001": c.CheckDubiousDeferInChannelRangeLoop,
"SA9002": c.CheckNonOctalFileMode,
"SA9003": c.CheckEmptyBranch,
}
}
func (c *Checker) filterGenerated(files []*ast.File) []*ast.File {
if c.CheckGenerated {
return files
}
var out []*ast.File
for _, f := range files {
if !lint.IsGenerated(f) {
out = append(out, f)
}
}
return out
}
func (c *Checker) Init(prog *lint.Program) {
c.funcDescs = functions.NewDescriptions(prog.SSA)
c.deprecatedObjs = map[types.Object]string{}
c.nodeFns = map[ast.Node]*ssa.Function{}
for _, fn := range prog.AllFunctions {
if fn.Blocks != nil {
applyStdlibKnowledge(fn)
ssa.OptimizeBlocks(fn)
}
}
c.nodeFns = lint.NodeFns(prog.Packages)
deprecated := []map[types.Object]string{}
wg := &sync.WaitGroup{}
for _, pkginfo := range prog.Prog.AllPackages {
pkginfo := pkginfo
scope := pkginfo.Pkg.Scope()
names := scope.Names()
wg.Add(1)
m := map[types.Object]string{}
deprecated = append(deprecated, m)
go func(m map[types.Object]string) {
for _, name := range names {
obj := scope.Lookup(name)
msg := c.deprecationMessage(pkginfo.Files, prog.SSA.Fset, obj)
if msg != "" {
m[obj] = msg
}
if typ, ok := obj.Type().Underlying().(*types.Struct); ok {
n := typ.NumFields()
for i := 0; i < n; i++ {
// FIXME(dh): This code will not find deprecated
// fields in anonymous structs.
field := typ.Field(i)
msg := c.deprecationMessage(pkginfo.Files, prog.SSA.Fset, field)
if msg != "" {
m[field] = msg
}
}
}
}
wg.Done()
}(m)
}
wg.Wait()
for _, m := range deprecated {
for k, v := range m {
c.deprecatedObjs[k] = v
}
}
}
// TODO(adonovan): make this a method: func (*token.File) Contains(token.Pos)
func tokenFileContainsPos(f *token.File, pos token.Pos) bool {
p := int(pos)
base := f.Base()
return base <= p && p < base+f.Size()
}
func pathEnclosingInterval(files []*ast.File, fset *token.FileSet, start, end token.Pos) (path []ast.Node, exact bool) {
for _, f := range files {
if f.Pos() == token.NoPos {
// This can happen if the parser saw
// too many errors and bailed out.
// (Use parser.AllErrors to prevent that.)
continue
}
if !tokenFileContainsPos(fset.File(f.Pos()), start) {
continue
}
if path, exact := astutil.PathEnclosingInterval(f, start, end); path != nil {
return path, exact
}
}
return nil, false
}
func (c *Checker) deprecationMessage(files []*ast.File, fset *token.FileSet, obj types.Object) (message string) {
path, _ := pathEnclosingInterval(files, fset, obj.Pos(), obj.Pos())
if len(path) <= 2 {
return ""
}
var docs []*ast.CommentGroup
switch n := path[1].(type) {
case *ast.FuncDecl:
docs = append(docs, n.Doc)
case *ast.Field:
docs = append(docs, n.Doc)
case *ast.ValueSpec:
docs = append(docs, n.Doc)
if len(path) >= 3 {
if n, ok := path[2].(*ast.GenDecl); ok {
docs = append(docs, n.Doc)
}
}
case *ast.TypeSpec:
docs = append(docs, n.Doc)
if len(path) >= 3 {
if n, ok := path[2].(*ast.GenDecl); ok {
docs = append(docs, n.Doc)
}
}
default:
return ""
}
for _, doc := range docs {
if doc == nil {
continue
}
parts := strings.Split(doc.Text(), "\n\n")
last := parts[len(parts)-1]
if !strings.HasPrefix(last, "Deprecated: ") {
continue
}
alt := last[len("Deprecated: "):]
alt = strings.Replace(alt, "\n", " ", -1)
return alt
}
return ""
}
func (c *Checker) isInLoop(b *ssa.BasicBlock) bool {
sets := c.funcDescs.Get(b.Parent()).Loops
for _, set := range sets {
if set[b] {
return true
}
}
return false
}
func applyStdlibKnowledge(fn *ssa.Function) {
if len(fn.Blocks) == 0 {
return
}
// comma-ok receiving from a time.Tick channel will never return
// ok == false, so any branching on the value of ok can be
// replaced with an unconditional jump. This will primarily match
// `for range time.Tick(x)` loops, but it can also match
// user-written code.
for _, block := range fn.Blocks {
if len(block.Instrs) < 3 {
continue
}
if len(block.Succs) != 2 {
continue
}
var instrs []*ssa.Instruction
for i, ins := range block.Instrs {
if _, ok := ins.(*ssa.DebugRef); ok {
continue
}
instrs = append(instrs, &block.Instrs[i])
}
for i, ins := range instrs {
unop, ok := (*ins).(*ssa.UnOp)
if !ok || unop.Op != token.ARROW {
continue
}
call, ok := unop.X.(*ssa.Call)
if !ok {
continue
}
if !lint.IsCallTo(call.Common(), "time.Tick") {
continue
}
ex, ok := (*instrs[i+1]).(*ssa.Extract)
if !ok || ex.Tuple != unop || ex.Index != 1 {
continue
}
ifstmt, ok := (*instrs[i+2]).(*ssa.If)
if !ok || ifstmt.Cond != ex {
continue
}
*instrs[i+2] = ssa.NewJump(block)
succ := block.Succs[1]
block.Succs = block.Succs[0:1]
succ.RemovePred(block)
}
}
}
func hasType(j *lint.Job, expr ast.Expr, name string) bool {
return types.TypeString(j.Program.Info.TypeOf(expr), nil) == name
}
func (c *Checker) CheckUntrappableSignal(j *lint.Job) {
fn := func(node ast.Node) bool {
call, ok := node.(*ast.CallExpr)
if !ok {
return true
}
if !j.IsCallToAnyAST(call,
"os/signal.Ignore", "os/signal.Notify", "os/signal.Reset") {
return true
}
for _, arg := range call.Args {
if conv, ok := arg.(*ast.CallExpr); ok && isName(j, conv.Fun, "os.Signal") {
arg = conv.Args[0]
}
if isName(j, arg, "os.Kill") || isName(j, arg, "syscall.SIGKILL") {
j.Errorf(arg, "%s cannot be trapped (did you mean syscall.SIGTERM?)", j.Render(arg))
}
if isName(j, arg, "syscall.SIGSTOP") {
j.Errorf(arg, "%s signal cannot be trapped", j.Render(arg))
}
}
return true
}
for _, f := range j.Program.Files {
ast.Inspect(f, fn)
}
}
func (c *Checker) CheckTemplate(j *lint.Job) {
fn := func(node ast.Node) bool {
call, ok := node.(*ast.CallExpr)
if !ok {
return true
}
var kind string
if j.IsCallToAST(call, "(*text/template.Template).Parse") {
kind = "text"
} else if j.IsCallToAST(call, "(*html/template.Template).Parse") {
kind = "html"
} else {
return true
}
sel := call.Fun.(*ast.SelectorExpr)
if !j.IsCallToAST(sel.X, "text/template.New") &&
!j.IsCallToAST(sel.X, "html/template.New") {
// TODO(dh): this is a cheap workaround for templates with
// different delims. A better solution with less false
// negatives would use data flow analysis to see where the
// template comes from and where it has been
return true
}
s, ok := j.ExprToString(call.Args[0])
if !ok {
return true
}
var err error
switch kind {
case "text":
_, err = texttemplate.New("").Parse(s)
case "html":
_, err = htmltemplate.New("").Parse(s)
}
if err != nil {
// TODO(dominikh): whitelist other parse errors, if any
if strings.Contains(err.Error(), "unexpected") {
j.Errorf(call.Args[0], "%s", err)
}
}
return true
}
for _, f := range j.Program.Files {
ast.Inspect(f, fn)
}
}
func (c *Checker) CheckTimeSleepConstant(j *lint.Job) {
fn := func(node ast.Node) bool {
call, ok := node.(*ast.CallExpr)
if !ok {
return true
}
if !j.IsCallToAST(call, "time.Sleep") {
return true
}
lit, ok := call.Args[0].(*ast.BasicLit)
if !ok {
return true
}
n, err := strconv.Atoi(lit.Value)
if err != nil {
return true
}
if n == 0 || n > 120 {
// time.Sleep(0) is a seldomly used pattern in concurrency
// tests. >120 might be intentional. 120 was chosen
// because the user could've meant 2 minutes.
return true
}
recommendation := "time.Sleep(time.Nanosecond)"
if n != 1 {
recommendation = fmt.Sprintf("time.Sleep(%d * time.Nanosecond)", n)
}
j.Errorf(call.Args[0], "sleeping for %d nanoseconds is probably a bug. Be explicit if it isn't: %s", n, recommendation)
return true
}
for _, f := range j.Program.Files {
ast.Inspect(f, fn)
}
}
func (c *Checker) CheckWaitgroupAdd(j *lint.Job) {
fn := func(node ast.Node) bool {
g, ok := node.(*ast.GoStmt)
if !ok {
return true
}
fun, ok := g.Call.Fun.(*ast.FuncLit)
if !ok {
return true
}
if len(fun.Body.List) == 0 {
return true
}
stmt, ok := fun.Body.List[0].(*ast.ExprStmt)
if !ok {
return true
}
call, ok := stmt.X.(*ast.CallExpr)
if !ok {
return true
}
sel, ok := call.Fun.(*ast.SelectorExpr)
if !ok {
return true
}
fn, ok := j.Program.Info.ObjectOf(sel.Sel).(*types.Func)
if !ok {
return true
}
if fn.FullName() == "(*sync.WaitGroup).Add" {
j.Errorf(sel, "should call %s before starting the goroutine to avoid a race",
j.Render(stmt))
}
return true
}
for _, f := range j.Program.Files {
ast.Inspect(f, fn)
}
}
func (c *Checker) CheckInfiniteEmptyLoop(j *lint.Job) {
fn := func(node ast.Node) bool {
loop, ok := node.(*ast.ForStmt)
if !ok || len(loop.Body.List) != 0 || loop.Post != nil {
return true
}
if loop.Init != nil {
// TODO(dh): this isn't strictly necessary, it just makes
// the check easier.
return true
}
// An empty loop is bad news in two cases: 1) The loop has no
// condition. In that case, it's just a loop that spins
// forever and as fast as it can, keeping a core busy. 2) The
// loop condition only consists of variable or field reads and
// operators on those. The only way those could change their
// value is with unsynchronised access, which constitutes a
// data race.
//
// If the condition contains any function calls, its behaviour
// is dynamic and the loop might terminate. Similarly for
// channel receives.
if loop.Cond != nil && hasSideEffects(loop.Cond) {
return true
}
j.Errorf(loop, "this loop will spin, using 100%% CPU")
if loop.Cond != nil {
j.Errorf(loop, "loop condition never changes or has a race condition")
}
return true
}
for _, f := range j.Program.Files {
ast.Inspect(f, fn)
}
}
func (c *Checker) CheckDeferInInfiniteLoop(j *lint.Job) {
fn := func(node ast.Node) bool {
mightExit := false
var defers []ast.Stmt
loop, ok := node.(*ast.ForStmt)
if !ok || loop.Cond != nil {
return true
}
fn2 := func(node ast.Node) bool {
switch stmt := node.(type) {
case *ast.ReturnStmt:
mightExit = true
case *ast.BranchStmt:
// TODO(dominikh): if this sees a break in a switch or
// select, it doesn't check if it breaks the loop or
// just the select/switch. This causes some false
// negatives.
if stmt.Tok == token.BREAK {
mightExit = true
}
case *ast.DeferStmt:
defers = append(defers, stmt)
case *ast.FuncLit:
// Don't look into function bodies
return false
}
return true
}
ast.Inspect(loop.Body, fn2)
if mightExit {
return true
}
for _, stmt := range defers {
j.Errorf(stmt, "defers in this infinite loop will never run")
}
return true
}
for _, f := range j.Program.Files {
ast.Inspect(f, fn)
}
}
func (c *Checker) CheckDubiousDeferInChannelRangeLoop(j *lint.Job) {
fn := func(node ast.Node) bool {
loop, ok := node.(*ast.RangeStmt)
if !ok {
return true
}
typ := j.Program.Info.TypeOf(loop.X)
_, ok = typ.Underlying().(*types.Chan)
if !ok {
return true
}
fn2 := func(node ast.Node) bool {
switch stmt := node.(type) {
case *ast.DeferStmt:
j.Errorf(stmt, "defers in this range loop won't run unless the channel gets closed")
case *ast.FuncLit:
// Don't look into function bodies
return false
}
return true
}
ast.Inspect(loop.Body, fn2)
return true
}
for _, f := range j.Program.Files {
ast.Inspect(f, fn)
}
}
func (c *Checker) CheckTestMainExit(j *lint.Job) {
fn := func(node ast.Node) bool {
if !isTestMain(j, node) {
return true
}
arg := j.Program.Info.ObjectOf(node.(*ast.FuncDecl).Type.Params.List[0].Names[0])
callsRun := false
fn2 := func(node ast.Node) bool {
call, ok := node.(*ast.CallExpr)
if !ok {
return true
}
sel, ok := call.Fun.(*ast.SelectorExpr)
if !ok {
return true
}
ident, ok := sel.X.(*ast.Ident)
if !ok {
return true
}
if arg != j.Program.Info.ObjectOf(ident) {
return true
}
if sel.Sel.Name == "Run" {
callsRun = true
return false
}
return true
}
ast.Inspect(node.(*ast.FuncDecl).Body, fn2)
callsExit := false
fn3 := func(node ast.Node) bool {
if j.IsCallToAST(node, "os.Exit") {
callsExit = true
return false
}
return true
}
ast.Inspect(node.(*ast.FuncDecl).Body, fn3)
if !callsExit && callsRun {
j.Errorf(node, "TestMain should call os.Exit to set exit code")
}
return true
}
for _, f := range j.Program.Files {
ast.Inspect(f, fn)
}
}
func isTestMain(j *lint.Job, node ast.Node) bool {
decl, ok := node.(*ast.FuncDecl)
if !ok {
return false
}
if decl.Name.Name != "TestMain" {
return false
}
if len(decl.Type.Params.List) != 1 {
return false
}
arg := decl.Type.Params.List[0]
if len(arg.Names) != 1 {
return false
}
typ := j.Program.Info.TypeOf(arg.Type)
return typ != nil && typ.String() == "*testing.M"
}
func (c *Checker) CheckExec(j *lint.Job) {
fn := func(node ast.Node) bool {
call, ok := node.(*ast.CallExpr)
if !ok {
return true
}
if !j.IsCallToAST(call, "os/exec.Command") {
return true
}
val, ok := j.ExprToString(call.Args[0])
if !ok {
return true
}
if !strings.Contains(val, " ") || strings.Contains(val, `\`) || strings.Contains(val, "/") {
return true
}
j.Errorf(call.Args[0], "first argument to exec.Command looks like a shell command, but a program name or path are expected")
return true
}
for _, f := range j.Program.Files {
ast.Inspect(f, fn)
}
}
func (c *Checker) CheckLoopEmptyDefault(j *lint.Job) {
fn := func(node ast.Node) bool {
loop, ok := node.(*ast.ForStmt)
if !ok || len(loop.Body.List) != 1 || loop.Cond != nil || loop.Init != nil {
return true
}
sel, ok := loop.Body.List[0].(*ast.SelectStmt)
if !ok {
return true
}
for _, c := range sel.Body.List {
if comm, ok := c.(*ast.CommClause); ok && comm.Comm == nil && len(comm.Body) == 0 {
j.Errorf(comm, "should not have an empty default case in a for+select loop. The loop will spin.")
}
}
return true
}
for _, f := range j.Program.Files {
ast.Inspect(f, fn)
}
}
func (c *Checker) CheckLhsRhsIdentical(j *lint.Job) {
fn := func(node ast.Node) bool {
op, ok := node.(*ast.BinaryExpr)
if !ok {
return true
}
switch op.Op {
case token.EQL, token.NEQ:
if basic, ok := j.Program.Info.TypeOf(op.X).(*types.Basic); ok {
if kind := basic.Kind(); kind == types.Float32 || kind == types.Float64 {
// f == f and f != f might be used to check for NaN
return true
}
}
case token.SUB, token.QUO, token.AND, token.REM, token.OR, token.XOR, token.AND_NOT,
token.LAND, token.LOR, token.LSS, token.GTR, token.LEQ, token.GEQ:
default:
// For some ops, such as + and *, it can make sense to
// have identical operands
return true
}
if j.Render(op.X) != j.Render(op.Y) {
return true
}
j.Errorf(op, "identical expressions on the left and right side of the '%s' operator", op.Op)
return true
}
for _, f := range j.Program.Files {
ast.Inspect(f, fn)
}
}
func (c *Checker) CheckScopedBreak(j *lint.Job) {
fn := func(node ast.Node) bool {
var body *ast.BlockStmt
switch node := node.(type) {
case *ast.ForStmt:
body = node.Body
case *ast.RangeStmt:
body = node.Body
default:
return true
}
for _, stmt := range body.List {
var blocks [][]ast.Stmt
switch stmt := stmt.(type) {
case *ast.SwitchStmt:
for _, c := range stmt.Body.List {
blocks = append(blocks, c.(*ast.CaseClause).Body)
}
case *ast.SelectStmt:
for _, c := range stmt.Body.List {
blocks = append(blocks, c.(*ast.CommClause).Body)
}
default:
continue
}
for _, body := range blocks {
if len(body) == 0 {
continue
}
lasts := []ast.Stmt{body[len(body)-1]}
// TODO(dh): unfold all levels of nested block
// statements, not just a single level if statement
if ifs, ok := lasts[0].(*ast.IfStmt); ok {
if len(ifs.Body.List) == 0 {
continue
}
lasts[0] = ifs.Body.List[len(ifs.Body.List)-1]
if block, ok := ifs.Else.(*ast.BlockStmt); ok {
if len(block.List) != 0 {
lasts = append(lasts, block.List[len(block.List)-1])
}
}
}
for _, last := range lasts {
branch, ok := last.(*ast.BranchStmt)
if !ok || branch.Tok != token.BREAK || branch.Label != nil {
continue
}
j.Errorf(branch, "ineffective break statement. Did you mean to break out of the outer loop?")
}
}
}
return true
}
for _, f := range j.Program.Files {
ast.Inspect(f, fn)
}
}
func (c *Checker) CheckUnsafePrintf(j *lint.Job) {
fn := func(node ast.Node) bool {
call, ok := node.(*ast.CallExpr)
if !ok {
return true
}
if !j.IsCallToAnyAST(call, "fmt.Printf", "fmt.Sprintf", "log.Printf") {
return true
}
if len(call.Args) != 1 {
return true
}
switch call.Args[0].(type) {
case *ast.CallExpr, *ast.Ident:
default:
return true
}
j.Errorf(call.Args[0], "printf-style function with dynamic first argument and no further arguments should use print-style function instead")
return true
}
for _, f := range j.Program.Files {
ast.Inspect(f, fn)
}
}
func (c *Checker) CheckEarlyDefer(j *lint.Job) {
fn := func(node ast.Node) bool {
block, ok := node.(*ast.BlockStmt)
if !ok {
return true
}
if len(block.List) < 2 {
return true
}
for i, stmt := range block.List {
if i == len(block.List)-1 {
break
}
assign, ok := stmt.(*ast.AssignStmt)
if !ok {
continue
}
if len(assign.Rhs) != 1 {
continue
}
if len(assign.Lhs) < 2 {
continue
}
if lhs, ok := assign.Lhs[len(assign.Lhs)-1].(*ast.Ident); ok && lhs.Name == "_" {
continue
}
call, ok := assign.Rhs[0].(*ast.CallExpr)
if !ok {
continue
}
sig, ok := j.Program.Info.TypeOf(call.Fun).(*types.Signature)
if !ok {
continue
}
if sig.Results().Len() < 2 {
continue
}
last := sig.Results().At(sig.Results().Len() - 1)
// FIXME(dh): check that it's error from universe, not
// another type of the same name
if last.Type().String() != "error" {
continue
}
lhs, ok := assign.Lhs[0].(*ast.Ident)
if !ok {
continue
}
def, ok := block.List[i+1].(*ast.DeferStmt)
if !ok {
continue
}
sel, ok := def.Call.Fun.(*ast.SelectorExpr)
if !ok {
continue
}
ident, ok := selectorX(sel).(*ast.Ident)
if !ok {
continue
}
if ident.Obj != lhs.Obj {
continue
}
if sel.Sel.Name != "Close" {
continue
}
j.Errorf(def, "should check returned error before deferring %s", j.Render(def.Call))
}
return true
}
for _, f := range j.Program.Files {
ast.Inspect(f, fn)
}
}
func selectorX(sel *ast.SelectorExpr) ast.Node {
switch x := sel.X.(type) {
case *ast.SelectorExpr:
return selectorX(x)
default:
return x
}
}
func (c *Checker) CheckEmptyCriticalSection(j *lint.Job) {
// Initially it might seem like this check would be easier to
// implement in SSA. After all, we're only checking for two
// consecutive method calls. In reality, however, there may be any
// number of other instructions between the lock and unlock, while
// still constituting an empty critical section. For example,
// given `m.x().Lock(); m.x().Unlock()`, there will be a call to
// x(). In the AST-based approach, this has a tiny potential for a
// false positive (the second call to x might be doing work that
// is protected by the mutex). In an SSA-based approach, however,
// it would miss a lot of real bugs.
mutexParams := func(s ast.Stmt) (x ast.Expr, funcName string, ok bool) {
expr, ok := s.(*ast.ExprStmt)
if !ok {
return nil, "", false
}
call, ok := expr.X.(*ast.CallExpr)
if !ok {
return nil, "", false
}
sel, ok := call.Fun.(*ast.SelectorExpr)
if !ok {
return nil, "", false
}
fn, ok := j.Program.Info.ObjectOf(sel.Sel).(*types.Func)
if !ok {
return nil, "", false
}
sig := fn.Type().(*types.Signature)
if sig.Params().Len() != 0 || sig.Results().Len() != 0 {
return nil, "", false
}
return sel.X, fn.Name(), true
}
fn := func(node ast.Node) bool {
block, ok := node.(*ast.BlockStmt)
if !ok {
return true
}
if len(block.List) < 2 {
return true
}
for i := range block.List[:len(block.List)-1] {
sel1, method1, ok1 := mutexParams(block.List[i])
sel2, method2, ok2 := mutexParams(block.List[i+1])
if !ok1 || !ok2 || j.Render(sel1) != j.Render(sel2) {
continue
}
if (method1 == "Lock" && method2 == "Unlock") ||
(method1 == "RLock" && method2 == "RUnlock") {
j.Errorf(block.List[i+1], "empty critical section")
}
}
return true
}
for _, f := range j.Program.Files {
ast.Inspect(f, fn)
}
}
// cgo produces code like fn(&*_Cvar_kSomeCallbacks) which we don't
// want to flag.
var cgoIdent = regexp.MustCompile(`^_C(func|var)_.+$`)
func (c *Checker) CheckIneffectiveCopy(j *lint.Job) {
fn := func(node ast.Node) bool {
if unary, ok := node.(*ast.UnaryExpr); ok {
if star, ok := unary.X.(*ast.StarExpr); ok && unary.Op == token.AND {
ident, ok := star.X.(*ast.Ident)
if !ok || !cgoIdent.MatchString(ident.Name) {
j.Errorf(unary, "&*x will be simplified to x. It will not copy x.")
}
}
}
if star, ok := node.(*ast.StarExpr); ok {
if unary, ok := star.X.(*ast.UnaryExpr); ok && unary.Op == token.AND {
j.Errorf(star, "*&x will be simplified to x. It will not copy x.")
}
}
return true
}
for _, f := range j.Program.Files {
ast.Inspect(f, fn)
}
}
func (c *Checker) CheckDiffSizeComparison(j *lint.Job) {
for _, ssafn := range j.Program.InitialFunctions {
for _, b := range ssafn.Blocks {
for _, ins := range b.Instrs {
binop, ok := ins.(*ssa.BinOp)
if !ok {
continue
}
if binop.Op != token.EQL && binop.Op != token.NEQ {
continue
}
_, ok1 := binop.X.(*ssa.Slice)
_, ok2 := binop.Y.(*ssa.Slice)
if !ok1 && !ok2 {
continue
}
r := c.funcDescs.Get(ssafn).Ranges
r1, ok1 := r.Get(binop.X).(vrp.StringInterval)
r2, ok2 := r.Get(binop.Y).(vrp.StringInterval)
if !ok1 || !ok2 {
continue
}
if r1.Length.Intersection(r2.Length).Empty() {
j.Errorf(binop, "comparing strings of different sizes for equality will always return false")
}
}
}
}
}
func (c *Checker) CheckCanonicalHeaderKey(j *lint.Job) {
fn := func(node ast.Node) bool {
assign, ok := node.(*ast.AssignStmt)
if ok {
// TODO(dh): This risks missing some Header reads, for
// example in `h1["foo"] = h2["foo"]` – these edge
// cases are probably rare enough to ignore for now.
for _, expr := range assign.Lhs {
op, ok := expr.(*ast.IndexExpr)
if !ok {
continue
}
if hasType(j, op.X, "net/http.Header") {
return false
}
}
return true
}
op, ok := node.(*ast.IndexExpr)
if !ok {
return true
}
if !hasType(j, op.X, "net/http.Header") {
return true
}
s, ok := j.ExprToString(op.Index)
if !ok {
return true
}
if s == http.CanonicalHeaderKey(s) {
return true
}
j.Errorf(op, "keys in http.Header are canonicalized, %q is not canonical; fix the constant or use http.CanonicalHeaderKey", s)
return true
}
for _, f := range j.Program.Files {
ast.Inspect(f, fn)
}
}
func (c *Checker) CheckBenchmarkN(j *lint.Job) {
fn := func(node ast.Node) bool {
assign, ok := node.(*ast.AssignStmt)
if !ok {
return true
}
if len(assign.Lhs) != 1 || len(assign.Rhs) != 1 {
return true
}
sel, ok := assign.Lhs[0].(*ast.SelectorExpr)
if !ok {
return true
}
if sel.Sel.Name != "N" {
return true
}
if !hasType(j, sel.X, "*testing.B") {
return true
}
j.Errorf(assign, "should not assign to %s", j.Render(sel))
return true
}
for _, f := range j.Program.Files {
ast.Inspect(f, fn)
}
}
func (c *Checker) CheckIneffectiveFieldAssignments(j *lint.Job) {
for _, ssafn := range j.Program.InitialFunctions {
// fset := j.Program.SSA.Fset
// if fset.File(f.File.Pos()) != fset.File(ssafn.Pos()) {
// continue
// }
if ssafn.Signature.Recv() == nil {
continue
}
if len(ssafn.Blocks) == 0 {
// External function
continue
}
reads := map[*ssa.BasicBlock]map[ssa.Value]bool{}
writes := map[*ssa.BasicBlock]map[ssa.Value]bool{}
recv := ssafn.Params[0]
if _, ok := recv.Type().Underlying().(*types.Struct); !ok {
continue
}
recvPtrs := map[ssa.Value]bool{
recv: true,
}
if len(ssafn.Locals) == 0 || ssafn.Locals[0].Heap {
continue
}
blocks := ssafn.DomPreorder()
for _, block := range blocks {
if writes[block] == nil {
writes[block] = map[ssa.Value]bool{}
}
if reads[block] == nil {
reads[block] = map[ssa.Value]bool{}
}
for _, ins := range block.Instrs {
switch ins := ins.(type) {
case *ssa.Store:
if recvPtrs[ins.Val] {
recvPtrs[ins.Addr] = true
}
fa, ok := ins.Addr.(*ssa.FieldAddr)
if !ok {
continue
}
if !recvPtrs[fa.X] {
continue
}
writes[block][fa] = true
case *ssa.UnOp:
if ins.Op != token.MUL {
continue
}
if recvPtrs[ins.X] {
reads[block][ins] = true
continue
}
fa, ok := ins.X.(*ssa.FieldAddr)
if !ok {
continue
}
if !recvPtrs[fa.X] {
continue
}
reads[block][fa] = true
}
}
}
for block, writes := range writes {
seen := map[*ssa.BasicBlock]bool{}
var hasRead func(block *ssa.BasicBlock, write *ssa.FieldAddr) bool
hasRead = func(block *ssa.BasicBlock, write *ssa.FieldAddr) bool {
seen[block] = true
for read := range reads[block] {
switch ins := read.(type) {
case *ssa.FieldAddr:
if ins.Field == write.Field && read.Pos() > write.Pos() {
return true
}
case *ssa.UnOp:
if ins.Pos() >= write.Pos() {
return true
}
}
}
for _, succ := range block.Succs {
if !seen[succ] {
if hasRead(succ, write) {
return true
}
}
}
return false
}
for write := range writes {
fa := write.(*ssa.FieldAddr)
if !hasRead(block, fa) {
name := recv.Type().Underlying().(*types.Struct).Field(fa.Field).Name()
j.Errorf(fa, "ineffective assignment to field %s", name)
}
}
}
}
}
func (c *Checker) CheckUnreadVariableValues(j *lint.Job) {
fn := func(node ast.Node) bool {
switch node.(type) {
case *ast.FuncDecl, *ast.FuncLit:
default:
return true
}
ssafn := c.nodeFns[node]
if ssafn == nil {
return true
}
if lint.IsExample(ssafn) {
return true
}
ast.Inspect(node, func(node ast.Node) bool {
assign, ok := node.(*ast.AssignStmt)
if !ok {
return true
}
if len(assign.Lhs) > 1 && len(assign.Rhs) == 1 {
// Either a function call with multiple return values,
// or a comma-ok assignment
val, _ := ssafn.ValueForExpr(assign.Rhs[0])
if val == nil {
return true
}
refs := val.Referrers()
if refs == nil {
return true
}
for _, ref := range *refs {
ex, ok := ref.(*ssa.Extract)
if !ok {
continue
}
exrefs := ex.Referrers()
if exrefs == nil {
continue
}
if len(lint.FilterDebug(*exrefs)) == 0 {
lhs := assign.Lhs[ex.Index]
if ident, ok := lhs.(*ast.Ident); !ok || ok && ident.Name == "_" {
continue
}
j.Errorf(lhs, "this value of %s is never used", lhs)
}
}
return true
}
for i, lhs := range assign.Lhs {
rhs := assign.Rhs[i]
if ident, ok := lhs.(*ast.Ident); !ok || ok && ident.Name == "_" {
continue
}
val, _ := ssafn.ValueForExpr(rhs)
if val == nil {
continue
}
refs := val.Referrers()
if refs == nil {
// TODO investigate why refs can be nil
return true
}
if len(lint.FilterDebug(*refs)) == 0 {
j.Errorf(lhs, "this value of %s is never used", lhs)
}
}
return true
})
return true
}
for _, f := range j.Program.Files {
ast.Inspect(f, fn)
}
}
func (c *Checker) CheckPredeterminedBooleanExprs(j *lint.Job) {
for _, ssafn := range j.Program.InitialFunctions {
for _, block := range ssafn.Blocks {
for _, ins := range block.Instrs {
ssabinop, ok := ins.(*ssa.BinOp)
if !ok {
continue
}
switch ssabinop.Op {
case token.GTR, token.LSS, token.EQL, token.NEQ, token.LEQ, token.GEQ:
default:
continue
}
xs, ok1 := consts(ssabinop.X, nil, nil)
ys, ok2 := consts(ssabinop.Y, nil, nil)
if !ok1 || !ok2 || len(xs) == 0 || len(ys) == 0 {
continue
}
trues := 0
for _, x := range xs {
for _, y := range ys {
if x.Value == nil {
if y.Value == nil {
trues++
}
continue
}
if constant.Compare(x.Value, ssabinop.Op, y.Value) {
trues++
}
}
}
b := trues != 0
if trues == 0 || trues == len(xs)*len(ys) {
j.Errorf(ssabinop, "binary expression is always %t for all possible values (%s %s %s)",
b, xs, ssabinop.Op, ys)
}
}
}
}
}
func (c *Checker) CheckNilMaps(j *lint.Job) {
for _, ssafn := range j.Program.InitialFunctions {
for _, block := range ssafn.Blocks {
for _, ins := range block.Instrs {
mu, ok := ins.(*ssa.MapUpdate)
if !ok {
continue
}
c, ok := mu.Map.(*ssa.Const)
if !ok {
continue
}
if c.Value != nil {
continue
}
j.Errorf(mu, "assignment to nil map")
}
}
}
}
func (c *Checker) CheckUnsignedComparison(j *lint.Job) {
fn := func(node ast.Node) bool {
expr, ok := node.(*ast.BinaryExpr)
if !ok {
return true
}
tx := j.Program.Info.TypeOf(expr.X)
basic, ok := tx.Underlying().(*types.Basic)
if !ok {
return true
}
if (basic.Info() & types.IsUnsigned) == 0 {
return true
}
lit, ok := expr.Y.(*ast.BasicLit)
if !ok || lit.Value != "0" {
return true
}
switch expr.Op {
case token.GEQ:
j.Errorf(expr, "unsigned values are always >= 0")
case token.LSS:
j.Errorf(expr, "unsigned values are never < 0")
}
return true
}
for _, f := range j.Program.Files {
ast.Inspect(f, fn)
}
}
func consts(val ssa.Value, out []*ssa.Const, visitedPhis map[string]bool) ([]*ssa.Const, bool) {
if visitedPhis == nil {
visitedPhis = map[string]bool{}
}
var ok bool
switch val := val.(type) {
case *ssa.Phi:
if visitedPhis[val.Name()] {
break
}
visitedPhis[val.Name()] = true
vals := val.Operands(nil)
for _, phival := range vals {
out, ok = consts(*phival, out, visitedPhis)
if !ok {
return nil, false
}
}
case *ssa.Const:
out = append(out, val)
case *ssa.Convert:
out, ok = consts(val.X, out, visitedPhis)
if !ok {
return nil, false
}
default:
return nil, false
}
if len(out) < 2 {
return out, true
}
uniq := []*ssa.Const{out[0]}
for _, val := range out[1:] {
if val.Value == uniq[len(uniq)-1].Value {
continue
}
uniq = append(uniq, val)
}
return uniq, true
}
func (c *Checker) CheckLoopCondition(j *lint.Job) {
fn := func(node ast.Node) bool {
loop, ok := node.(*ast.ForStmt)
if !ok {
return true
}
if loop.Init == nil || loop.Cond == nil || loop.Post == nil {
return true
}
init, ok := loop.Init.(*ast.AssignStmt)
if !ok || len(init.Lhs) != 1 || len(init.Rhs) != 1 {
return true
}
cond, ok := loop.Cond.(*ast.BinaryExpr)
if !ok {
return true
}
x, ok := cond.X.(*ast.Ident)
if !ok {
return true
}
lhs, ok := init.Lhs[0].(*ast.Ident)
if !ok {
return true
}
if x.Obj != lhs.Obj {
return true
}
if _, ok := loop.Post.(*ast.IncDecStmt); !ok {
return true
}
ssafn := c.nodeFns[cond]
if ssafn == nil {
return true
}
v, isAddr := ssafn.ValueForExpr(cond.X)
if v == nil || isAddr {
return true
}
switch v := v.(type) {
case *ssa.Phi:
ops := v.Operands(nil)
if len(ops) != 2 {
return true
}
_, ok := (*ops[0]).(*ssa.Const)
if !ok {
return true
}
sigma, ok := (*ops[1]).(*ssa.Sigma)
if !ok {
return true
}
if sigma.X != v {
return true
}
case *ssa.UnOp:
return true
}
j.Errorf(cond, "variable in loop condition never changes")
return true
}
for _, f := range j.Program.Files {
ast.Inspect(f, fn)
}
}
func (c *Checker) CheckArgOverwritten(j *lint.Job) {
fn := func(node ast.Node) bool {
var typ *ast.FuncType
var body *ast.BlockStmt
switch fn := node.(type) {
case *ast.FuncDecl:
typ = fn.Type
body = fn.Body
case *ast.FuncLit:
typ = fn.Type
body = fn.Body
}
if body == nil {
return true
}
ssafn := c.nodeFns[node]
if ssafn == nil {
return true
}
if len(typ.Params.List) == 0 {
return true
}
for _, field := range typ.Params.List {
for _, arg := range field.Names {
obj := j.Program.Info.ObjectOf(arg)
var ssaobj *ssa.Parameter
for _, param := range ssafn.Params {
if param.Object() == obj {
ssaobj = param
break
}
}
if ssaobj == nil {
continue
}
refs := ssaobj.Referrers()
if refs == nil {
continue
}
if len(lint.FilterDebug(*refs)) != 0 {
continue
}
assigned := false
ast.Inspect(body, func(node ast.Node) bool {
assign, ok := node.(*ast.AssignStmt)
if !ok {
return true
}
for _, lhs := range assign.Lhs {
ident, ok := lhs.(*ast.Ident)
if !ok {
continue
}
if j.Program.Info.ObjectOf(ident) == obj {
assigned = true
return false
}
}
return true
})
if assigned {
j.Errorf(arg, "argument %s is overwritten before first use", arg)
}
}
}
return true
}
for _, f := range j.Program.Files {
ast.Inspect(f, fn)
}
}
func (c *Checker) CheckIneffectiveLoop(j *lint.Job) {
// This check detects some, but not all unconditional loop exits.
// We give up in the following cases:
//
// - a goto anywhere in the loop. The goto might skip over our
// return, and we don't check that it doesn't.
//
// - any nested, unlabelled continue, even if it is in another
// loop or closure.
fn := func(node ast.Node) bool {
var body *ast.BlockStmt
switch fn := node.(type) {
case *ast.FuncDecl:
body = fn.Body
case *ast.FuncLit:
body = fn.Body
default:
return true
}
if body == nil {
return true
}
labels := map[*ast.Object]ast.Stmt{}
ast.Inspect(body, func(node ast.Node) bool {
label, ok := node.(*ast.LabeledStmt)
if !ok {
return true
}
labels[label.Label.Obj] = label.Stmt
return true
})
ast.Inspect(body, func(node ast.Node) bool {
var loop ast.Node
var body *ast.BlockStmt
switch node := node.(type) {
case *ast.ForStmt:
body = node.Body
loop = node
case *ast.RangeStmt:
typ := j.Program.Info.TypeOf(node.X)
if _, ok := typ.Underlying().(*types.Map); ok {
// looping once over a map is a valid pattern for
// getting an arbitrary element.
return true
}
body = node.Body
loop = node
default:
return true
}
if len(body.List) < 2 {
// avoid flagging the somewhat common pattern of using
// a range loop to get the first element in a slice,
// or the first rune in a string.
return true
}
var unconditionalExit ast.Node
hasBranching := false
for _, stmt := range body.List {
switch stmt := stmt.(type) {
case *ast.BranchStmt:
switch stmt.Tok {
case token.BREAK:
if stmt.Label == nil || labels[stmt.Label.Obj] == loop {
unconditionalExit = stmt
}
case token.CONTINUE:
if stmt.Label == nil || labels[stmt.Label.Obj] == loop {
unconditionalExit = nil
return false
}
}
case *ast.ReturnStmt:
unconditionalExit = stmt
case *ast.IfStmt, *ast.ForStmt, *ast.RangeStmt, *ast.SwitchStmt, *ast.SelectStmt:
hasBranching = true
}
}
if unconditionalExit == nil || !hasBranching {
return false
}
ast.Inspect(body, func(node ast.Node) bool {
if branch, ok := node.(*ast.BranchStmt); ok {
switch branch.Tok {
case token.GOTO:
unconditionalExit = nil
return false
case token.CONTINUE:
if branch.Label != nil && labels[branch.Label.Obj] != loop {
return true
}
unconditionalExit = nil
return false
}
}
return true
})
if unconditionalExit != nil {
j.Errorf(unconditionalExit, "the surrounding loop is unconditionally terminated")
}
return true
})
return true
}
for _, f := range j.Program.Files {
ast.Inspect(f, fn)
}
}
func (c *Checker) CheckNilContext(j *lint.Job) {
fn := func(node ast.Node) bool {
call, ok := node.(*ast.CallExpr)
if !ok {
return true
}
if len(call.Args) == 0 {
return true
}
if typ, ok := j.Program.Info.TypeOf(call.Args[0]).(*types.Basic); !ok || typ.Kind() != types.UntypedNil {
return true
}
sig, ok := j.Program.Info.TypeOf(call.Fun).(*types.Signature)
if !ok {
return true
}
if sig.Params().Len() == 0 {
return true
}
if types.TypeString(sig.Params().At(0).Type(), nil) != "context.Context" {
return true
}
j.Errorf(call.Args[0],
"do not pass a nil Context, even if a function permits it; pass context.TODO if you are unsure about which Context to use")
return true
}
for _, f := range j.Program.Files {
ast.Inspect(f, fn)
}
}
func (c *Checker) CheckSeeker(j *lint.Job) {
fn := func(node ast.Node) bool {
call, ok := node.(*ast.CallExpr)
if !ok {
return true
}
sel, ok := call.Fun.(*ast.SelectorExpr)
if !ok {
return true
}
if sel.Sel.Name != "Seek" {
return true
}
if len(call.Args) != 2 {
return true
}
arg0, ok := call.Args[0].(*ast.SelectorExpr)
if !ok {
return true
}
switch arg0.Sel.Name {
case "SeekStart", "SeekCurrent", "SeekEnd":
default:
return true
}
pkg, ok := arg0.X.(*ast.Ident)
if !ok {
return true
}
if pkg.Name != "io" {
return true
}
j.Errorf(call, "the first argument of io.Seeker is the offset, but an io.Seek* constant is being used instead")
return true
}
for _, f := range j.Program.Files {
ast.Inspect(f, fn)
}
}
func (c *Checker) CheckIneffectiveAppend(j *lint.Job) {
isAppend := func(ins ssa.Value) bool {
call, ok := ins.(*ssa.Call)
if !ok {
return false
}
if call.Call.IsInvoke() {
return false
}
if builtin, ok := call.Call.Value.(*ssa.Builtin); !ok || builtin.Name() != "append" {
return false
}
return true
}
for _, ssafn := range j.Program.InitialFunctions {
for _, block := range ssafn.Blocks {
for _, ins := range block.Instrs {
val, ok := ins.(ssa.Value)
if !ok || !isAppend(val) {
continue
}
isUsed := false
visited := map[ssa.Instruction]bool{}
var walkRefs func(refs []ssa.Instruction)
walkRefs = func(refs []ssa.Instruction) {
loop:
for _, ref := range refs {
if visited[ref] {
continue
}
visited[ref] = true
if _, ok := ref.(*ssa.DebugRef); ok {
continue
}
switch ref := ref.(type) {
case *ssa.Phi:
walkRefs(*ref.Referrers())
case *ssa.Sigma:
walkRefs(*ref.Referrers())
case ssa.Value:
if !isAppend(ref) {
isUsed = true
} else {
walkRefs(*ref.Referrers())
}
case ssa.Instruction:
isUsed = true
break loop
}
}
}
refs := val.Referrers()
if refs == nil {
continue
}
walkRefs(*refs)
if !isUsed {
j.Errorf(ins, "this result of append is never used, except maybe in other appends")
}
}
}
}
}
func (c *Checker) CheckConcurrentTesting(j *lint.Job) {
for _, ssafn := range j.Program.InitialFunctions {
for _, block := range ssafn.Blocks {
for _, ins := range block.Instrs {
gostmt, ok := ins.(*ssa.Go)
if !ok {
continue
}
var fn *ssa.Function
switch val := gostmt.Call.Value.(type) {
case *ssa.Function:
fn = val
case *ssa.MakeClosure:
fn = val.Fn.(*ssa.Function)
default:
continue
}
if fn.Blocks == nil {
continue
}
for _, block := range fn.Blocks {
for _, ins := range block.Instrs {
call, ok := ins.(*ssa.Call)
if !ok {
continue
}
if call.Call.IsInvoke() {
continue
}
callee := call.Call.StaticCallee()
if callee == nil {
continue
}
recv := callee.Signature.Recv()
if recv == nil {
continue
}
if types.TypeString(recv.Type(), nil) != "*testing.common" {
continue
}
fn, ok := call.Call.StaticCallee().Object().(*types.Func)
if !ok {
continue
}
name := fn.Name()
switch name {
case "FailNow", "Fatal", "Fatalf", "SkipNow", "Skip", "Skipf":
default:
continue
}
j.Errorf(gostmt, "the goroutine calls T.%s, which must be called in the same goroutine as the test", name)
}
}
}
}
}
}
func (c *Checker) CheckCyclicFinalizer(j *lint.Job) {
for _, ssafn := range j.Program.InitialFunctions {
node := c.funcDescs.CallGraph.CreateNode(ssafn)
for _, edge := range node.Out {
if edge.Callee.Func.RelString(nil) != "runtime.SetFinalizer" {
continue
}
arg0 := edge.Site.Common().Args[0]
if iface, ok := arg0.(*ssa.MakeInterface); ok {
arg0 = iface.X
}
unop, ok := arg0.(*ssa.UnOp)
if !ok {
continue
}
v, ok := unop.X.(*ssa.Alloc)
if !ok {
continue
}
arg1 := edge.Site.Common().Args[1]
if iface, ok := arg1.(*ssa.MakeInterface); ok {
arg1 = iface.X
}
mc, ok := arg1.(*ssa.MakeClosure)
if !ok {
continue
}
for _, b := range mc.Bindings {
if b == v {
pos := j.Program.SSA.Fset.Position(mc.Fn.Pos())
j.Errorf(edge.Site, "the finalizer closes over the object, preventing the finalizer from ever running (at %s)", pos)
}
}
}
}
}
func (c *Checker) CheckSliceOutOfBounds(j *lint.Job) {
for _, ssafn := range j.Program.InitialFunctions {
for _, block := range ssafn.Blocks {
for _, ins := range block.Instrs {
ia, ok := ins.(*ssa.IndexAddr)
if !ok {
continue
}
if _, ok := ia.X.Type().Underlying().(*types.Slice); !ok {
continue
}
sr, ok1 := c.funcDescs.Get(ssafn).Ranges[ia.X].(vrp.SliceInterval)
idxr, ok2 := c.funcDescs.Get(ssafn).Ranges[ia.Index].(vrp.IntInterval)
if !ok1 || !ok2 || !sr.IsKnown() || !idxr.IsKnown() || sr.Length.Empty() || idxr.Empty() {
continue
}
if idxr.Lower.Cmp(sr.Length.Upper) >= 0 {
j.Errorf(ia, "index out of bounds")
}
}
}
}
}
func (c *Checker) CheckDeferLock(j *lint.Job) {
for _, ssafn := range j.Program.InitialFunctions {
for _, block := range ssafn.Blocks {
instrs := lint.FilterDebug(block.Instrs)
if len(instrs) < 2 {
continue
}
for i, ins := range instrs[:len(instrs)-1] {
call, ok := ins.(*ssa.Call)
if !ok {
continue
}
if !lint.IsCallTo(call.Common(), "(*sync.Mutex).Lock") && !lint.IsCallTo(call.Common(), "(*sync.RWMutex).RLock") {
continue
}
nins, ok := instrs[i+1].(*ssa.Defer)
if !ok {
continue
}
if !lint.IsCallTo(&nins.Call, "(*sync.Mutex).Lock") && !lint.IsCallTo(&nins.Call, "(*sync.RWMutex).RLock") {
continue
}
if call.Common().Args[0] != nins.Call.Args[0] {
continue
}
name := shortCallName(call.Common())
alt := ""
switch name {
case "Lock":
alt = "Unlock"
case "RLock":
alt = "RUnlock"
}
j.Errorf(nins, "deferring %s right after having locked already; did you mean to defer %s?", name, alt)
}
}
}
}
func (c *Checker) CheckNaNComparison(j *lint.Job) {
isNaN := func(v ssa.Value) bool {
call, ok := v.(*ssa.Call)
if !ok {
return false
}
return lint.IsCallTo(call.Common(), "math.NaN")
}
for _, ssafn := range j.Program.InitialFunctions {
for _, block := range ssafn.Blocks {
for _, ins := range block.Instrs {
ins, ok := ins.(*ssa.BinOp)
if !ok {
continue
}
if isNaN(ins.X) || isNaN(ins.Y) {
j.Errorf(ins, "no value is equal to NaN, not even NaN itself")
}
}
}
}
}
func (c *Checker) CheckInfiniteRecursion(j *lint.Job) {
for _, ssafn := range j.Program.InitialFunctions {
node := c.funcDescs.CallGraph.CreateNode(ssafn)
for _, edge := range node.Out {
if edge.Callee != node {
continue
}
block := edge.Site.Block()
canReturn := false
for _, b := range ssafn.Blocks {
if block.Dominates(b) {
continue
}
if len(b.Instrs) == 0 {
continue
}
if _, ok := b.Instrs[len(b.Instrs)-1].(*ssa.Return); ok {
canReturn = true
break
}
}
if canReturn {
continue
}
j.Errorf(edge.Site, "infinite recursive call")
}
}
}
func objectName(obj types.Object) string {
if obj == nil {
return "<nil>"
}
var name string
if obj.Pkg() != nil && obj.Pkg().Scope().Lookup(obj.Name()) == obj {
var s string
s = obj.Pkg().Path()
if s != "" {
name += s + "."
}
}
name += obj.Name()
return name
}
func isName(j *lint.Job, expr ast.Expr, name string) bool {
var obj types.Object
switch expr := expr.(type) {
case *ast.Ident:
obj = j.Program.Info.ObjectOf(expr)
case *ast.SelectorExpr:
obj = j.Program.Info.ObjectOf(expr.Sel)
}
return objectName(obj) == name
}
func (c *Checker) CheckLeakyTimeTick(j *lint.Job) {
for _, ssafn := range j.Program.InitialFunctions {
if j.IsInMain(ssafn) || j.IsInTest(ssafn) {
continue
}
for _, block := range ssafn.Blocks {
for _, ins := range block.Instrs {
call, ok := ins.(*ssa.Call)
if !ok || !lint.IsCallTo(call.Common(), "time.Tick") {
continue
}
if c.funcDescs.Get(call.Parent()).Infinite {
continue
}
j.Errorf(call, "using time.Tick leaks the underlying ticker, consider using it only in endless functions, tests and the main package, and use time.NewTicker here")
}
}
}
}
func (c *Checker) CheckDoubleNegation(j *lint.Job) {
fn := func(node ast.Node) bool {
unary1, ok := node.(*ast.UnaryExpr)
if !ok {
return true
}
unary2, ok := unary1.X.(*ast.UnaryExpr)
if !ok {
return true
}
if unary1.Op != token.NOT || unary2.Op != token.NOT {
return true
}
j.Errorf(unary1, "negating a boolean twice has no effect; is this a typo?")
return true
}
for _, f := range j.Program.Files {
ast.Inspect(f, fn)
}
}
func hasSideEffects(node ast.Node) bool {
dynamic := false
ast.Inspect(node, func(node ast.Node) bool {
switch node := node.(type) {
case *ast.CallExpr:
dynamic = true
return false
case *ast.UnaryExpr:
if node.Op == token.ARROW {
dynamic = true
return false
}
}
return true
})
return dynamic
}
func (c *Checker) CheckRepeatedIfElse(j *lint.Job) {
seen := map[ast.Node]bool{}
var collectConds func(ifstmt *ast.IfStmt, inits []ast.Stmt, conds []ast.Expr) ([]ast.Stmt, []ast.Expr)
collectConds = func(ifstmt *ast.IfStmt, inits []ast.Stmt, conds []ast.Expr) ([]ast.Stmt, []ast.Expr) {
seen[ifstmt] = true
if ifstmt.Init != nil {
inits = append(inits, ifstmt.Init)
}
conds = append(conds, ifstmt.Cond)
if elsestmt, ok := ifstmt.Else.(*ast.IfStmt); ok {
return collectConds(elsestmt, inits, conds)
}
return inits, conds
}
fn := func(node ast.Node) bool {
ifstmt, ok := node.(*ast.IfStmt)
if !ok {
return true
}
if seen[ifstmt] {
return true
}
inits, conds := collectConds(ifstmt, nil, nil)
if len(inits) > 0 {
return true
}
for _, cond := range conds {
if hasSideEffects(cond) {
return true
}
}
counts := map[string]int{}
for _, cond := range conds {
s := j.Render(cond)
counts[s]++
if counts[s] == 2 {
j.Errorf(cond, "this condition occurs multiple times in this if/else if chain")
}
}
return true
}
for _, f := range j.Program.Files {
ast.Inspect(f, fn)
}
}
func (c *Checker) CheckSillyBitwiseOps(j *lint.Job) {
for _, ssafn := range j.Program.InitialFunctions {
for _, block := range ssafn.Blocks {
for _, ins := range block.Instrs {
ins, ok := ins.(*ssa.BinOp)
if !ok {
continue
}
if c, ok := ins.Y.(*ssa.Const); !ok || c.Value == nil || c.Value.Kind() != constant.Int || c.Uint64() != 0 {
continue
}
switch ins.Op {
case token.AND, token.OR, token.XOR:
default:
// we do not flag shifts because too often, x<<0 is part
// of a pattern, x<<0, x<<8, x<<16, ...
continue
}
path, _ := astutil.PathEnclosingInterval(j.File(ins), ins.Pos(), ins.Pos())
if len(path) == 0 {
continue
}
if node, ok := path[0].(*ast.BinaryExpr); !ok || !lint.IsZero(node.Y) {
continue
}
switch ins.Op {
case token.AND:
j.Errorf(ins, "x & 0 always equals 0")
case token.OR, token.XOR:
j.Errorf(ins, "x %s 0 always equals x", ins.Op)
}
}
}
}
}
func (c *Checker) CheckNonOctalFileMode(j *lint.Job) {
fn := func(node ast.Node) bool {
call, ok := node.(*ast.CallExpr)
if !ok {
return true
}
sig, ok := j.Program.Info.TypeOf(call.Fun).(*types.Signature)
if !ok {
return true
}
n := sig.Params().Len()
var args []int
for i := 0; i < n; i++ {
typ := sig.Params().At(i).Type()
if types.TypeString(typ, nil) == "os.FileMode" {
args = append(args, i)
}
}
for _, i := range args {
lit, ok := call.Args[i].(*ast.BasicLit)
if !ok {
continue
}
if len(lit.Value) == 3 &&
lit.Value[0] != '0' &&
lit.Value[0] >= '0' && lit.Value[0] <= '7' &&
lit.Value[1] >= '0' && lit.Value[1] <= '7' &&
lit.Value[2] >= '0' && lit.Value[2] <= '7' {
v, err := strconv.ParseInt(lit.Value, 10, 64)
if err != nil {
continue
}
j.Errorf(call.Args[i], "file mode '%s' evaluates to %#o; did you mean '0%s'?", lit.Value, v, lit.Value)
}
}
return true
}
for _, f := range j.Program.Files {
ast.Inspect(f, fn)
}
}
func (c *Checker) CheckPureFunctions(j *lint.Job) {
fnLoop:
for _, ssafn := range j.Program.InitialFunctions {
if j.IsInTest(ssafn) {
params := ssafn.Signature.Params()
for i := 0; i < params.Len(); i++ {
param := params.At(i)
if types.TypeString(param.Type(), nil) == "*testing.B" {
// Ignore discarded pure functions in code related
// to benchmarks. Instead of matching BenchmarkFoo
// functions, we match any function accepting a
// *testing.B. Benchmarks sometimes call generic
// functions for doing the actual work, and
// checking for the parameter is a lot easier and
// faster than analyzing call trees.
continue fnLoop
}
}
}
for _, b := range ssafn.Blocks {
for _, ins := range b.Instrs {
ins, ok := ins.(*ssa.Call)
if !ok {
continue
}
refs := ins.Referrers()
if refs == nil || len(lint.FilterDebug(*refs)) > 0 {
continue
}
callee := ins.Common().StaticCallee()
if callee == nil {
continue
}
if c.funcDescs.Get(callee).Pure {
j.Errorf(ins, "%s is a pure function but its return value is ignored", callee.Name())
continue
}
}
}
}
}
func enclosingFunction(j *lint.Job, node ast.Node) *ast.FuncDecl {
f := j.File(node)
path, _ := astutil.PathEnclosingInterval(f, node.Pos(), node.Pos())
for _, e := range path {
fn, ok := e.(*ast.FuncDecl)
if !ok {
continue
}
if fn.Name == nil {
continue
}
return fn
}
return nil
}
func (c *Checker) isDeprecated(j *lint.Job, ident *ast.Ident) (bool, string) {
obj := j.Program.Info.ObjectOf(ident)
if obj.Pkg() == nil {
return false, ""
}
alt := c.deprecatedObjs[obj]
return alt != "", alt
}
func (c *Checker) CheckDeprecated(j *lint.Job) {
fn := func(node ast.Node) bool {
sel, ok := node.(*ast.SelectorExpr)
if !ok {
return true
}
if fn := enclosingFunction(j, sel); fn != nil {
if ok, _ := c.isDeprecated(j, fn.Name); ok {
// functions that are deprecated may use deprecated
// symbols
return true
}
}
obj := j.Program.Info.ObjectOf(sel.Sel)
if obj.Pkg() == nil {
return true
}
nodePkg := j.NodePackage(node).Pkg
if nodePkg == obj.Pkg() || obj.Pkg().Path()+"_test" == nodePkg.Path() {
// Don't flag stuff in our own package
return true
}
if ok, alt := c.isDeprecated(j, sel.Sel); ok {
j.Errorf(sel, "%s is deprecated: %s", j.Render(sel), alt)
return true
}
return true
}
for _, f := range j.Program.Files {
ast.Inspect(f, fn)
}
}
func (c *Checker) callChecker(rules map[string]CallCheck) func(j *lint.Job) {
return func(j *lint.Job) {
c.checkCalls(j, rules)
}
}
func (c *Checker) checkCalls(j *lint.Job, rules map[string]CallCheck) {
for _, ssafn := range j.Program.InitialFunctions {
node := c.funcDescs.CallGraph.CreateNode(ssafn)
for _, edge := range node.Out {
callee := edge.Callee.Func
obj, ok := callee.Object().(*types.Func)
if !ok {
continue
}
r, ok := rules[obj.FullName()]
if !ok {
continue
}
var args []*Argument
ssaargs := edge.Site.Common().Args
if callee.Signature.Recv() != nil {
ssaargs = ssaargs[1:]
}
for _, arg := range ssaargs {
if iarg, ok := arg.(*ssa.MakeInterface); ok {
arg = iarg.X
}
vr := c.funcDescs.Get(edge.Site.Parent()).Ranges[arg]
args = append(args, &Argument{Value: Value{arg, vr}})
}
call := &Call{
Job: j,
Instr: edge.Site,
Args: args,
Checker: c,
Parent: edge.Site.Parent(),
}
r(call)
for idx, arg := range call.Args {
_ = idx
for _, e := range arg.invalids {
// path, _ := astutil.PathEnclosingInterval(f.File, edge.Site.Pos(), edge.Site.Pos())
// if len(path) < 2 {
// continue
// }
// astcall, ok := path[0].(*ast.CallExpr)
// if !ok {
// continue
// }
// j.Errorf(astcall.Args[idx], "%s", e)
j.Errorf(edge.Site, "%s", e)
}
}
for _, e := range call.invalids {
j.Errorf(call.Instr.Common(), "%s", e)
}
}
}
}
func unwrapFunction(val ssa.Value) *ssa.Function {
switch val := val.(type) {
case *ssa.Function:
return val
case *ssa.MakeClosure:
return val.Fn.(*ssa.Function)
default:
return nil
}
}
func shortCallName(call *ssa.CallCommon) string {
if call.IsInvoke() {
return ""
}
switch v := call.Value.(type) {
case *ssa.Function:
fn, ok := v.Object().(*types.Func)
if !ok {
return ""
}
return fn.Name()
case *ssa.Builtin:
return v.Name()
}
return ""
}
func hasCallTo(block *ssa.BasicBlock, name string) bool {
for _, ins := range block.Instrs {
call, ok := ins.(*ssa.Call)
if !ok {
continue
}
if lint.IsCallTo(call.Common(), name) {
return true
}
}
return false
}
// deref returns a pointer's element type; otherwise it returns typ.
func deref(typ types.Type) types.Type {
if p, ok := typ.Underlying().(*types.Pointer); ok {
return p.Elem()
}
return typ
}
func (c *Checker) CheckWriterBufferModified(j *lint.Job) {
// TODO(dh): this might be a good candidate for taint analysis.
// Taint the argument as MUST_NOT_MODIFY, then propagate that
// through functions like bytes.Split
for _, ssafn := range j.Program.InitialFunctions {
sig := ssafn.Signature
if ssafn.Name() != "Write" || sig.Recv() == nil || sig.Params().Len() != 1 || sig.Results().Len() != 2 {
continue
}
tArg, ok := sig.Params().At(0).Type().(*types.Slice)
if !ok {
continue
}
if basic, ok := tArg.Elem().(*types.Basic); !ok || basic.Kind() != types.Byte {
continue
}
if basic, ok := sig.Results().At(0).Type().(*types.Basic); !ok || basic.Kind() != types.Int {
continue
}
if named, ok := sig.Results().At(1).Type().(*types.Named); !ok || types.TypeString(named, nil) != "error" {
continue
}
for _, block := range ssafn.Blocks {
for _, ins := range block.Instrs {
switch ins := ins.(type) {
case *ssa.Store:
addr, ok := ins.Addr.(*ssa.IndexAddr)
if !ok {
continue
}
if addr.X != ssafn.Params[1] {
continue
}
j.Errorf(ins, "io.Writer.Write must not modify the provided buffer, not even temporarily")
case *ssa.Call:
if !lint.IsCallTo(ins.Common(), "append") {
continue
}
if ins.Common().Args[0] != ssafn.Params[1] {
continue
}
j.Errorf(ins, "io.Writer.Write must not modify the provided buffer, not even temporarily")
}
}
}
}
}
func loopedRegexp(name string) CallCheck {
return func(call *Call) {
if len(extractConsts(call.Args[0].Value.Value)) == 0 {
return
}
if !call.Checker.isInLoop(call.Instr.Block()) {
return
}
call.Invalid(fmt.Sprintf("calling %s in a loop has poor performance, consider using regexp.Compile", name))
}
}
func (c *Checker) CheckEmptyBranch(j *lint.Job) {
fn := func(node ast.Node) bool {
ifstmt, ok := node.(*ast.IfStmt)
if !ok {
return true
}
ssafn := c.nodeFns[node]
if lint.IsExample(ssafn) {
return true
}
if ifstmt.Else != nil {
b, ok := ifstmt.Else.(*ast.BlockStmt)
if !ok || len(b.List) != 0 {
return true
}
j.Errorf(ifstmt.Else, "empty branch")
}
if len(ifstmt.Body.List) != 0 {
return true
}
j.Errorf(ifstmt, "empty branch")
return true
}
for _, f := range c.filterGenerated(j.Program.Files) {
ast.Inspect(f, fn)
}
}
func (c *Checker) CheckMapBytesKey(j *lint.Job) {
for _, fn := range j.Program.InitialFunctions {
for _, b := range fn.Blocks {
insLoop:
for _, ins := range b.Instrs {
// find []byte -> string conversions
conv, ok := ins.(*ssa.Convert)
if !ok || conv.Type() != types.Universe.Lookup("string").Type() {
continue
}
if s, ok := conv.X.Type().(*types.Slice); !ok || s.Elem() != types.Universe.Lookup("byte").Type() {
continue
}
refs := conv.Referrers()
// need at least two (DebugRef) references: the
// conversion and the *ast.Ident
if refs == nil || len(*refs) < 2 {
continue
}
ident := false
// skip first reference, that's the conversion itself
for _, ref := range (*refs)[1:] {
switch ref := ref.(type) {
case *ssa.DebugRef:
if _, ok := ref.Expr.(*ast.Ident); !ok {
// the string seems to be used somewhere
// unexpected; the default branch should
// catch this already, but be safe
continue insLoop
} else {
ident = true
}
case *ssa.Lookup:
default:
// the string is used somewhere else than a
// map lookup
continue insLoop
}
}
// the result of the conversion wasn't assigned to an
// identifier
if !ident {
continue
}
j.Errorf(conv, "m[string(key)] would be more efficient than k := string(key); m[k]")
}
}
}
}
func (c *Checker) CheckRangeStringRunes(j *lint.Job) {
sharedcheck.CheckRangeStringRunes(c.nodeFns, j)
}
func (c *Checker) CheckSelfAssignment(j *lint.Job) {
fn := func(node ast.Node) bool {
assign, ok := node.(*ast.AssignStmt)
if !ok {
return true
}
if assign.Tok != token.ASSIGN || len(assign.Lhs) != len(assign.Rhs) {
return true
}
for i, stmt := range assign.Lhs {
rlh := j.Render(stmt)
rrh := j.Render(assign.Rhs[i])
if rlh == rrh {
j.Errorf(assign, "self-assignment of %s to %s", rrh, rlh)
}
}
return true
}
for _, f := range c.filterGenerated(j.Program.Files) {
ast.Inspect(f, fn)
}
}
func buildTagsIdentical(s1, s2 []string) bool {
if len(s1) != len(s2) {
return false
}
s1s := make([]string, len(s1))
copy(s1s, s1)
sort.Strings(s1s)
s2s := make([]string, len(s2))
copy(s2s, s2)
sort.Strings(s2s)
for i, s := range s1s {
if s != s2s[i] {
return false
}
}
return true
}
func (c *Checker) CheckDuplicateBuildConstraints(job *lint.Job) {
for _, f := range c.filterGenerated(job.Program.Files) {
constraints := buildTags(f)
for i, constraint1 := range constraints {
for j, constraint2 := range constraints {
if i >= j {
continue
}
if buildTagsIdentical(constraint1, constraint2) {
job.Errorf(f, "identical build constraints %q and %q",
strings.Join(constraint1, " "),
strings.Join(constraint2, " "))<|fim▁hole|> }
}<|fim▁end|> | }
}
} |
<|file_name|>TagEnum.java<|end_file_name|><|fim▁begin|>package com.xyp.sapidoc.idoc.enumeration;
import java.util.HashSet;
import java.util.Set;
/**
*
* @author Yunpeng_Xu
*/
public enum TagEnum {
FIELDS("FIELDS"),
RECORD_SECTION("RECORD_SECTION"),
CONTROL_RECORD("CONTROL_RECORD"),
DATA_RECORD("DATA_RECORD"),
STATUS_RECORD("STATUS_RECORD"),
SEGMENT_SECTION("SEGMENT_SECTION"),
IDOC("IDOC"),
SEGMENT("SEGMENT"),
GROUP("GROUP"),
;
private String tag;
private TagEnum(String tag) {
this.tag = tag;
}
public String getTagBegin() {
return "BEGIN_" + tag;
}
public String getTagEnd() {
return "END_" + tag;
}
public static Set<String> getAllTags(){
Set<String> tags = new HashSet<String>();
<|fim▁hole|> tags.add(tagEnum.getTagEnd());
}
return tags;
}
}<|fim▁end|> | TagEnum[] tagEnums = TagEnum.values();
for (TagEnum tagEnum : tagEnums) {
tags.add(tagEnum.getTagBegin());
|
<|file_name|>382-linked-list-random-node.py<|end_file_name|><|fim▁begin|>import random
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
_largesize = 300
def __init__(self, head):
self.head = head
self.lsize = 0
while head.next:
head = head.next
self.lsize += 1
self.m1_idx = None
self.m2_idx = None
if self.lsize > self._largesize:
self.m1_idx = self.lsize / 3 # start from 1/3
self.m1 = self._getN(self.m1_idx)
self.m2_idx = self.m1_idx * 2 # start from 2/3
self.m2 = self._getN(self.m2_idx)
def _getN(self, n):
n -= 1
p = self.head
while n:
p = p.next
n -= 1
return p
def getRandom(self):
def _get(delta, start):
p = start
while delta:
p = p.next
delta -= 1
return p.val
nextpos = random.randint(0, self.lsize)
if not self.m1_idx:<|fim▁hole|>
if nextpos < self.m1_idx:
val = _get(nextpos, self.head)
elif nextpos < self.m2_idx:
val = _get(nextpos - self.m1_idx, self.m1)
else:
val = _get(nextpos - self.m2_idx, self.m2)
return val<|fim▁end|> | return _get(nextpos, self.head) |
<|file_name|>post_anju.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
#-*- coding:utf-8 -*-
from poster.encode import multipart_encode
from poster.streaminghttp import register_openers
import urllib2,urllib,sys,time
import cookielib,mechanize
import re
DEBUG =0
reload(sys)
sys.setdefaultencoding('utf8') #@UndefinedVariable
register_openers()
headers = {
'Host':'agent.anjuke.com',
'User-Agent' : 'Mozilla/5.0 (X11; Linux i686; rv:2.0.1) Gecko/20100101 Firefox/4.0.1',
#'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
#'Accept-Language':'zh-cn,zh;q=0.5',
#'Accept-Encoding':'gzip, deflate',
#'Accept-Charset':'GB2312,utf-8;q=0.7,*;q=0.7',
'Keep-Alive':'115',
'Connection':'keep-alive',
}
#datagen11, headers = multipart_encode({"fileUploadInput": open("/home/myapp/Screenshot-1.jpg","rb"),"backFunction": "$.c.Uploader.finish"})
class httpPost():
data = {}
def __init__(self,dataDic):
self.cookie = cookielib.CookieJar()
httpsHandler = urllib2.HTTPHandler()
httpsHandler.set_http_debuglevel(DEBUG)
self.opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(self.cookie),httpsHandler)
self.data = dataDic
def login1(self):
self.brow = mechanize.Browser()
httpHandler = mechanize.HTTPHandler()<|fim▁hole|> httpHandler.set_http_debuglevel(DEBUG)
self.cookiejar = mechanize.LWPCookieJar()
#self.cookiejar = "Cookie lzstat_uv=34741959842666604402|1786789; Hm_lvt_976797cb85805d626fc5642aa5244ba0=1304534271541; ASPSESSIONIDQCDRAQBB=JHCHINLAHGMAIGBIFMNANLGF; lzstat_ss=2189193215_2_1304564199_1786789; Hm_lpvt_976797cb85805d626fc5642aa5244ba0=1304535401191"
self.opener = mechanize.OpenerFactory(mechanize.SeekableResponseOpener).build_opener(
httpHandler,httpsHandler,
mechanize.HTTPCookieProcessor(self.cookiejar),
mechanize.HTTPRefererProcessor,
mechanize.HTTPEquivProcessor,
mechanize.HTTPRefreshProcessor,
)
self.opener.addheaders = [("User-Agent","Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN; rv:1.9.2.13) Gecko/20101203 Firefox/3.6.13"),
("From", "")]
#self.opener.addheaders = [(
# "Referer", self.data['postUrl']
# )]
login={}
login['method'] = self.data['method']
login['name'] = self.data['name']
login['pwd'] = self.data['pwd']
loginUrl = self.data['loginUrl']+'?'+urllib.urlencode(login)
print loginUrl
response = mechanize.urlopen("http://esf.soufun.com/")
response = mechanize.urlopen(loginUrl)
print response.read().decode('gb2312')
def login(self):
self.cookie = cookielib.CookieJar()
httpsHandler = urllib2.HTTPHandler()
httpsHandler.set_http_debuglevel(DEBUG)
self.opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(self.cookie),httpsHandler)
login={}
login['act'] = self.data['act']
login['loginName'] = self.data['loginName']
login['history'] = ''
login['loginPasswd'] = self.data['loginPasswd']
loginUrl = self.data['loginUrl']
req = urllib2.Request(loginUrl,urllib.urlencode(login),headers)
r = self.opener.open(req)
res = None
for item in self.cookie:
#print item.name,item.value
if item.name == 'aQQ_ajklastuser':
res = item.value
return res
#aQQ_ajklastuser junyue_liuhua
#print self.opener.open('http://my.anjuke.com/v2/user/broker/checked/').read()
#open('login.txt','w').write(r.read().encode('utf-8'))
def post(self):
pass
#postData = {}
#postData['loginUrl'] = 'http://agent.anjuke.com/v2/login/'
#postData['act'] = 'login'
#postData['loginName'] = 'junyue_liuhua'
#postData['loginPasswd'] = 'lh_131415'
#http = httpPost(postData)
#http.login()<|fim▁end|> | httpsHandler = mechanize.HTTPSHandler()
|
<|file_name|>density.go<|end_file_name|><|fim▁begin|>/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
import (
"fmt"
"math"
"os"
"sort"
"strconv"
"sync"
"time"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/client/cache"
client "k8s.io/kubernetes/pkg/client/unversioned"
controllerframework "k8s.io/kubernetes/pkg/controller/framework"
"k8s.io/kubernetes/pkg/fields"
"k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/util/sets"
utiluuid "k8s.io/kubernetes/pkg/util/uuid"
"k8s.io/kubernetes/pkg/watch"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
const (
MinSaturationThreshold = 2 * time.Minute
MinPodsPerSecondThroughput = 8
)
// Maximum container failures this test tolerates before failing.
var MaxContainerFailures = 0
type DensityTestConfig struct {
Configs []framework.RCConfig
Client *client.Client
Namespace string
PollInterval time.Duration
PodCount int
Timeout time.Duration
}
func density30AddonResourceVerifier(numNodes int) map[string]framework.ResourceConstraint {
var apiserverMem uint64
var controllerMem uint64
var schedulerMem uint64
apiserverCPU := math.MaxFloat32
apiserverMem = math.MaxUint64
controllerCPU := math.MaxFloat32
controllerMem = math.MaxUint64
schedulerCPU := math.MaxFloat32
schedulerMem = math.MaxUint64
framework.Logf("Setting resource constraings for provider: %s", framework.TestContext.Provider)
if framework.ProviderIs("kubemark") {
if numNodes <= 5 {
apiserverCPU = 0.15
apiserverMem = 150 * (1024 * 1024)
controllerCPU = 0.1
controllerMem = 100 * (1024 * 1024)
schedulerCPU = 0.05
schedulerMem = 50 * (1024 * 1024)
} else if numNodes <= 100 {
apiserverCPU = 1.5
apiserverMem = 1500 * (1024 * 1024)
controllerCPU = 0.75
controllerMem = 750 * (1024 * 1024)
schedulerCPU = 0.75
schedulerMem = 500 * (1024 * 1024)
} else if numNodes <= 500 {
apiserverCPU = 2.25
apiserverMem = 2500 * (1024 * 1024)
controllerCPU = 1.0
controllerMem = 1100 * (1024 * 1024)
schedulerCPU = 0.8
schedulerMem = 500 * (1024 * 1024)
} else if numNodes <= 1000 {
apiserverCPU = 4
apiserverMem = 4000 * (1024 * 1024)
controllerCPU = 3
controllerMem = 2000 * (1024 * 1024)
schedulerCPU = 1.5
schedulerMem = 750 * (1024 * 1024)
}
} else {
if numNodes <= 100 {
apiserverCPU = 1.5
apiserverMem = 1300 * (1024 * 1024)
controllerCPU = 0.5
controllerMem = 300 * (1024 * 1024)
schedulerCPU = 0.4
schedulerMem = 150 * (1024 * 1024)
}
}
constraints := make(map[string]framework.ResourceConstraint)
constraints["fluentd-elasticsearch"] = framework.ResourceConstraint{
CPUConstraint: 0.2,
MemoryConstraint: 250 * (1024 * 1024),
}
constraints["elasticsearch-logging"] = framework.ResourceConstraint{
CPUConstraint: 2,
// TODO: bring it down to 750MB again, when we lower Kubelet verbosity level. I.e. revert #19164
MemoryConstraint: 5000 * (1024 * 1024),
}
constraints["heapster"] = framework.ResourceConstraint{
CPUConstraint: 2,
MemoryConstraint: 1800 * (1024 * 1024),
}
constraints["kibana-logging"] = framework.ResourceConstraint{
CPUConstraint: 0.2,
MemoryConstraint: 100 * (1024 * 1024),
}
constraints["kube-proxy"] = framework.ResourceConstraint{
CPUConstraint: 0.05,
MemoryConstraint: 20 * (1024 * 1024),
}
constraints["l7-lb-controller"] = framework.ResourceConstraint{
CPUConstraint: 0.1,
MemoryConstraint: 60 * (1024 * 1024),
}
constraints["influxdb"] = framework.ResourceConstraint{
CPUConstraint: 2,
MemoryConstraint: 500 * (1024 * 1024),
}
constraints["kube-apiserver"] = framework.ResourceConstraint{
CPUConstraint: apiserverCPU,
MemoryConstraint: apiserverMem,
}
constraints["kube-controller-manager"] = framework.ResourceConstraint{
CPUConstraint: controllerCPU,
MemoryConstraint: controllerMem,
}
constraints["kube-scheduler"] = framework.ResourceConstraint{
CPUConstraint: schedulerCPU,
MemoryConstraint: schedulerMem,
}
return constraints
}
func logPodStartupStatus(c *client.Client, expectedPods int, ns string, observedLabels map[string]string, period time.Duration, stopCh chan struct{}) {
label := labels.SelectorFromSet(labels.Set(observedLabels))
podStore := framework.NewPodStore(c, ns, label, fields.Everything())
defer podStore.Stop()
ticker := time.NewTicker(period)
defer ticker.Stop()
for {
select {
case <-ticker.C:
pods := podStore.List()
startupStatus := framework.ComputeRCStartupStatus(pods, expectedPods)
startupStatus.Print("Density")
case <-stopCh:
pods := podStore.List()
startupStatus := framework.ComputeRCStartupStatus(pods, expectedPods)
startupStatus.Print("Density")
return
}
}
}
// runDensityTest will perform a density test and return the time it took for
// all pods to start
func runDensityTest(dtc DensityTestConfig) time.Duration {
defer GinkgoRecover()
// Create a listener for events.
// eLock is a lock protects the events
var eLock sync.Mutex
events := make([](*api.Event), 0)
_, controller := controllerframework.NewInformer(
&cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return dtc.Client.Events(dtc.Namespace).List(options)
},
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
return dtc.Client.Events(dtc.Namespace).Watch(options)
},
},
&api.Event{},
0,
controllerframework.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
eLock.Lock()
defer eLock.Unlock()
events = append(events, obj.(*api.Event))
},
},
)
stop := make(chan struct{})
go controller.Run(stop)
// Create a listener for api updates
// uLock is a lock protects the updateCount
var uLock sync.Mutex
updateCount := 0
label := labels.SelectorFromSet(labels.Set(map[string]string{"type": "densityPod"}))
_, updateController := controllerframework.NewInformer(
&cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
options.LabelSelector = label
return dtc.Client.Pods(dtc.Namespace).List(options)
},
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
options.LabelSelector = label
return dtc.Client.Pods(dtc.Namespace).Watch(options)
},
},
&api.Pod{},
0,
controllerframework.ResourceEventHandlerFuncs{
UpdateFunc: func(_, _ interface{}) {
uLock.Lock()
defer uLock.Unlock()
updateCount++
},
},
)
go updateController.Run(stop)
// Start all replication controllers.
startTime := time.Now()
wg := sync.WaitGroup{}
wg.Add(len(dtc.Configs))
for i := range dtc.Configs {
rcConfig := dtc.Configs[i]
go func() {
framework.ExpectNoError(framework.RunRC(rcConfig))
wg.Done()
}()
}
logStopCh := make(chan struct{})
go logPodStartupStatus(dtc.Client, dtc.PodCount, dtc.Namespace, map[string]string{"type": "densityPod"}, dtc.PollInterval, logStopCh)
wg.Wait()
startupTime := time.Now().Sub(startTime)
close(logStopCh)
framework.Logf("E2E startup time for %d pods: %v", dtc.PodCount, startupTime)
framework.Logf("Throughput (pods/s) during cluster saturation phase: %v", float32(dtc.PodCount)/float32(startupTime/time.Second))
By("Waiting for all events to be recorded")
last := -1
current := len(events)
lastCount := -1
currentCount := updateCount
for start := time.Now(); (last < current || lastCount < currentCount) && time.Since(start) < dtc.Timeout; time.Sleep(10 * time.Second) {
func() {
eLock.Lock()
defer eLock.Unlock()
last = current
current = len(events)
}()
func() {
uLock.Lock()
defer uLock.Unlock()
lastCount = currentCount
currentCount = updateCount
}()
}
close(stop)
if current != last {
framework.Logf("Warning: Not all events were recorded after waiting %.2f minutes", dtc.Timeout.Minutes())
}
framework.Logf("Found %d events", current)
if currentCount != lastCount {
framework.Logf("Warning: Not all updates were recorded after waiting %.2f minutes", dtc.Timeout.Minutes())
}
framework.Logf("Found %d updates", currentCount)
// Tune the threshold for allowed failures.
badEvents := framework.BadEvents(events)
Expect(badEvents).NotTo(BeNumerically(">", int(math.Floor(0.01*float64(dtc.PodCount)))))
// Print some data about Pod to Node allocation
By("Printing Pod to Node allocation data")
podList, err := dtc.Client.Pods(api.NamespaceAll).List(api.ListOptions{})
framework.ExpectNoError(err)
pausePodAllocation := make(map[string]int)
systemPodAllocation := make(map[string][]string)
for _, pod := range podList.Items {
if pod.Namespace == api.NamespaceSystem {
systemPodAllocation[pod.Spec.NodeName] = append(systemPodAllocation[pod.Spec.NodeName], pod.Name)
} else {
pausePodAllocation[pod.Spec.NodeName]++
}
}
nodeNames := make([]string, 0)
for k := range pausePodAllocation {
nodeNames = append(nodeNames, k)
}
sort.Strings(nodeNames)
for _, node := range nodeNames {
framework.Logf("%v: %v pause pods, system pods: %v", node, pausePodAllocation[node], systemPodAllocation[node])
}
return startupTime
}
func cleanupDensityTest(dtc DensityTestConfig) {
defer GinkgoRecover()
By("Deleting ReplicationController")
// We explicitly delete all pods to have API calls necessary for deletion accounted in metrics.
for i := range dtc.Configs {
rcName := dtc.Configs[i].Name
rc, err := dtc.Client.ReplicationControllers(dtc.Namespace).Get(rcName)
if err == nil && rc.Spec.Replicas != 0 {
if framework.TestContext.GarbageCollectorEnabled {
By("Cleaning up only the replication controller, garbage collector will clean up the pods")
err := framework.DeleteRCAndWaitForGC(dtc.Client, dtc.Namespace, rcName)
framework.ExpectNoError(err)
} else {
By("Cleaning up the replication controller and pods")
err := framework.DeleteRCAndPods(dtc.Client, dtc.Namespace, rcName)
framework.ExpectNoError(err)
}
}
}
}
// This test suite can take a long time to run, and can affect or be affected by other tests.
// So by default it is added to the ginkgo.skip list (see driver.go).
// To run this suite you must explicitly ask for it by setting the
// -t/--test flag or ginkgo.focus flag.
// IMPORTANT: This test is designed to work on large (>= 100 Nodes) clusters. For smaller ones
// results will not be representative for control-plane performance as we'll start hitting
// limits on Docker's concurrent container startup.
var _ = framework.KubeDescribe("Density", func() {
var c *client.Client
var nodeCount int
var RCName string
var additionalPodsPrefix string
var ns string
var uuid string
var e2eStartupTime time.Duration
var totalPods int
var nodeCpuCapacity int64
var nodeMemCapacity int64
var nodes *api.NodeList
var masters sets.String
// Gathers data prior to framework namespace teardown
AfterEach(func() {
saturationThreshold := time.Duration((totalPods / MinPodsPerSecondThroughput)) * time.Second
if saturationThreshold < MinSaturationThreshold {
saturationThreshold = MinSaturationThreshold
}
Expect(e2eStartupTime).NotTo(BeNumerically(">", saturationThreshold))
saturationData := framework.SaturationTime{
TimeToSaturate: e2eStartupTime,
NumberOfNodes: nodeCount,
NumberOfPods: totalPods,
Throughput: float32(totalPods) / float32(e2eStartupTime/time.Second),
}
framework.Logf("Cluster saturation time: %s", framework.PrettyPrintJSON(saturationData))
// Verify latency metrics.
highLatencyRequests, err := framework.HighLatencyRequests(c)
framework.ExpectNoError(err)
Expect(highLatencyRequests).NotTo(BeNumerically(">", 0), "There should be no high-latency requests")
// Verify scheduler metrics.
// TODO: Reset metrics at the beginning of the test.
// We should do something similar to how we do it for APIserver.
framework.ExpectNoError(framework.VerifySchedulerLatency(c))
})
// Explicitly put here, to delete namespace at the end of the test
// (after measuring latency metrics, etc.).
f := framework.NewDefaultFramework("density")
f.NamespaceDeletionTimeout = time.Hour
BeforeEach(func() {
c = f.Client
ns = f.Namespace.Name
// In large clusters we may get to this point but still have a bunch
// of nodes without Routes created. Since this would make a node
// unschedulable, we need to wait until all of them are schedulable.
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(c))
masters, nodes = framework.GetMasterAndWorkerNodesOrDie(c)
nodeCount = len(nodes.Items)
Expect(nodeCount).NotTo(BeZero())
nodeCpuCapacity = nodes.Items[0].Status.Allocatable.Cpu().MilliValue()
nodeMemCapacity = nodes.Items[0].Status.Allocatable.Memory().Value()
// Terminating a namespace (deleting the remaining objects from it - which
// generally means events) can affect the current run. Thus we wait for all
// terminating namespace to be finally deleted before starting this test.
err := framework.CheckTestingNSDeletedExcept(c, ns)
framework.ExpectNoError(err)
uuid = string(utiluuid.NewUUID())
framework.ExpectNoError(framework.ResetMetrics(c))
framework.ExpectNoError(os.Mkdir(fmt.Sprintf(framework.TestContext.OutputDir+"/%s", uuid), 0777))
framework.Logf("Listing nodes for easy debugging:\n")
for _, node := range nodes.Items {
var internalIP, externalIP string
for _, address := range node.Status.Addresses {
if address.Type == api.NodeInternalIP {
internalIP = address.Address
}
if address.Type == api.NodeExternalIP {
externalIP = address.Address
}
}
framework.Logf("Name: %v, clusterIP: %v, externalIP: %v", node.ObjectMeta.Name, internalIP, externalIP)
}
})
type Density struct {
// Controls if e2e latency tests should be run (they are slow)
runLatencyTest bool
podsPerNode int
// Controls how often the apiserver is polled for pods
interval time.Duration
}
densityTests := []Density{
// TODO: Expose runLatencyTest as ginkgo flag.
{podsPerNode: 3, runLatencyTest: false, interval: 10 * time.Second},
{podsPerNode: 30, runLatencyTest: true, interval: 10 * time.Second},
{podsPerNode: 50, runLatencyTest: false, interval: 10 * time.Second},
{podsPerNode: 95, runLatencyTest: true, interval: 10 * time.Second},
{podsPerNode: 100, runLatencyTest: false, interval: 10 * time.Second},
}
for _, testArg := range densityTests {
name := fmt.Sprintf("should allow starting %d pods per node", testArg.podsPerNode)
switch testArg.podsPerNode {
case 30:
name = "[Feature:Performance] " + name
case 95:
name = "[Feature:HighDensityPerformance]" + name
default:
name = "[Feature:ManualPerformance] " + name
}
itArg := testArg
It(name, func() {
podsPerNode := itArg.podsPerNode
if podsPerNode == 30 {
f.AddonResourceConstraints = func() map[string]framework.ResourceConstraint { return density30AddonResourceVerifier(nodeCount) }()
}
totalPods = podsPerNode * nodeCount
fileHndl, err := os.Create(fmt.Sprintf(framework.TestContext.OutputDir+"/%s/pod_states.csv", uuid))
framework.ExpectNoError(err)
defer fileHndl.Close()
timeout := 10 * time.Minute
// TODO: loop to podsPerNode instead of 1 when we're ready.
numberOrRCs := 1
RCConfigs := make([]framework.RCConfig, numberOrRCs)
for i := 0; i < numberOrRCs; i++ {
RCName := "density" + strconv.Itoa(totalPods) + "-" + strconv.Itoa(i) + "-" + uuid
RCConfigs[i] = framework.RCConfig{Client: c,
Image: framework.GetPauseImageName(f.Client),
Name: RCName,
Namespace: ns,
Labels: map[string]string{"type": "densityPod"},
PollInterval: itArg.interval,
PodStatusFile: fileHndl,
Replicas: (totalPods + numberOrRCs - 1) / numberOrRCs,
CpuRequest: nodeCpuCapacity / 100,
MemRequest: nodeMemCapacity / 100,
MaxContainerFailures: &MaxContainerFailures,
Silent: true,
}
}
dConfig := DensityTestConfig{Client: c,
Configs: RCConfigs,
PodCount: totalPods,
Namespace: ns,
PollInterval: itArg.interval,
Timeout: timeout,
}
e2eStartupTime = runDensityTest(dConfig)
if itArg.runLatencyTest {
By("Scheduling additional Pods to measure startup latencies")
createTimes := make(map[string]unversioned.Time, 0)
nodes := make(map[string]string, 0)
scheduleTimes := make(map[string]unversioned.Time, 0)
runTimes := make(map[string]unversioned.Time, 0)
watchTimes := make(map[string]unversioned.Time, 0)
var mutex sync.Mutex
checkPod := func(p *api.Pod) {
mutex.Lock()
defer mutex.Unlock()
defer GinkgoRecover()
if p.Status.Phase == api.PodRunning {
if _, found := watchTimes[p.Name]; !found {
watchTimes[p.Name] = unversioned.Now()
createTimes[p.Name] = p.CreationTimestamp
nodes[p.Name] = p.Spec.NodeName
var startTime unversioned.Time
for _, cs := range p.Status.ContainerStatuses {
if cs.State.Running != nil {
if startTime.Before(cs.State.Running.StartedAt) {
startTime = cs.State.Running.StartedAt
}
}
}
if startTime != unversioned.NewTime(time.Time{}) {
runTimes[p.Name] = startTime
} else {
framework.Failf("Pod %v is reported to be running, but none of its containers is", p.Name)
}
}
}
}
additionalPodsPrefix = "density-latency-pod"
latencyPodsStore, controller := controllerframework.NewInformer(
&cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
options.LabelSelector = labels.SelectorFromSet(labels.Set{"type": additionalPodsPrefix})
return c.Pods(ns).List(options)
},
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
options.LabelSelector = labels.SelectorFromSet(labels.Set{"type": additionalPodsPrefix})
return c.Pods(ns).Watch(options)
},
},
&api.Pod{},
0,
controllerframework.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
p, ok := obj.(*api.Pod)
Expect(ok).To(Equal(true))
go checkPod(p)
},
UpdateFunc: func(oldObj, newObj interface{}) {
p, ok := newObj.(*api.Pod)
Expect(ok).To(Equal(true))
go checkPod(p)
},
},
)
stopCh := make(chan struct{})
go controller.Run(stopCh)
// Create some additional pods with throughput ~5 pods/sec.
var wg sync.WaitGroup
wg.Add(nodeCount)
// Explicitly set requests here.
// Thanks to it we trigger increasing priority function by scheduling
// a pod to a node, which in turn will result in spreading latency pods
// more evenly between nodes.
cpuRequest := *resource.NewMilliQuantity(nodeCpuCapacity/5, resource.DecimalSI)
memRequest := *resource.NewQuantity(nodeMemCapacity/5, resource.DecimalSI)
if podsPerNode > 30 {
// This is to make them schedulable on high-density tests
// (e.g. 100 pods/node kubemark).
cpuRequest = *resource.NewMilliQuantity(0, resource.DecimalSI)
memRequest = *resource.NewQuantity(0, resource.DecimalSI)
}
for i := 1; i <= nodeCount; i++ {
name := additionalPodsPrefix + "-" + strconv.Itoa(i)
go createRunningPodFromRC(&wg, c, name, ns, framework.GetPauseImageName(f.Client), additionalPodsPrefix, cpuRequest, memRequest)
time.Sleep(200 * time.Millisecond)
}
wg.Wait()
By("Waiting for all Pods begin observed by the watch...")
for start := time.Now(); len(watchTimes) < nodeCount; time.Sleep(10 * time.Second) {
if time.Since(start) < timeout {
framework.Failf("Timeout reached waiting for all Pods being observed by the watch.")
}
}
close(stopCh)
nodeToLatencyPods := make(map[string]int)
for _, item := range latencyPodsStore.List() {
pod := item.(*api.Pod)
nodeToLatencyPods[pod.Spec.NodeName]++
}
for node, count := range nodeToLatencyPods {
if count > 1 {
framework.Logf("%d latency pods scheduled on %s", count, node)
}
}
selector := fields.Set{
"involvedObject.kind": "Pod",
"involvedObject.namespace": ns,
"source": api.DefaultSchedulerName,
}.AsSelector()
options := api.ListOptions{FieldSelector: selector}
schedEvents, err := c.Events(ns).List(options)
framework.ExpectNoError(err)
for k := range createTimes {
for _, event := range schedEvents.Items {
if event.InvolvedObject.Name == k {
scheduleTimes[k] = event.FirstTimestamp
break
}
}
}
scheduleLag := make([]framework.PodLatencyData, 0)
startupLag := make([]framework.PodLatencyData, 0)
watchLag := make([]framework.PodLatencyData, 0)
schedToWatchLag := make([]framework.PodLatencyData, 0)
e2eLag := make([]framework.PodLatencyData, 0)
for name, create := range createTimes {
sched, ok := scheduleTimes[name]
Expect(ok).To(Equal(true))
run, ok := runTimes[name]
Expect(ok).To(Equal(true))
watch, ok := watchTimes[name]
Expect(ok).To(Equal(true))
node, ok := nodes[name]
Expect(ok).To(Equal(true))
scheduleLag = append(scheduleLag, framework.PodLatencyData{Name: name, Node: node, Latency: sched.Time.Sub(create.Time)})
startupLag = append(startupLag, framework.PodLatencyData{Name: name, Node: node, Latency: run.Time.Sub(sched.Time)})
watchLag = append(watchLag, framework.PodLatencyData{Name: name, Node: node, Latency: watch.Time.Sub(run.Time)})
schedToWatchLag = append(schedToWatchLag, framework.PodLatencyData{Name: name, Node: node, Latency: watch.Time.Sub(sched.Time)})
e2eLag = append(e2eLag, framework.PodLatencyData{Name: name, Node: node, Latency: watch.Time.Sub(create.Time)})
}
sort.Sort(framework.LatencySlice(scheduleLag))
sort.Sort(framework.LatencySlice(startupLag))
sort.Sort(framework.LatencySlice(watchLag))
sort.Sort(framework.LatencySlice(schedToWatchLag))
sort.Sort(framework.LatencySlice(e2eLag))
framework.PrintLatencies(scheduleLag, "worst schedule latencies")
framework.PrintLatencies(startupLag, "worst run-after-schedule latencies")
framework.PrintLatencies(watchLag, "worst watch latencies")
framework.PrintLatencies(schedToWatchLag, "worst scheduled-to-end total latencies")
framework.PrintLatencies(e2eLag, "worst e2e total latencies")
// Test whether e2e pod startup time is acceptable.
podStartupLatency := framework.PodStartupLatency{Latency: framework.ExtractLatencyMetrics(e2eLag)}
framework.ExpectNoError(framework.VerifyPodStartupLatency(podStartupLatency))
framework.LogSuspiciousLatency(startupLag, e2eLag, nodeCount, c)
}
cleanupDensityTest(dConfig)
By("Removing additional replication controllers if any")
for i := 1; i <= nodeCount; i++ {
name := additionalPodsPrefix + "-" + strconv.Itoa(i)
c.ReplicationControllers(ns).Delete(name, nil)
}
})
}
// Calculate total number of pods from each node's max-pod<|fim▁hole|> }
totalPods -= framework.WaitForStableCluster(c, masters)
fileHndl, err := os.Create(fmt.Sprintf(framework.TestContext.OutputDir+"/%s/pod_states.csv", uuid))
framework.ExpectNoError(err)
defer fileHndl.Close()
rcCnt := 1
RCConfigs := make([]framework.RCConfig, rcCnt)
podsPerRC := int(totalPods / rcCnt)
for i := 0; i < rcCnt; i++ {
if i == rcCnt-1 {
podsPerRC += int(math.Mod(float64(totalPods), float64(rcCnt)))
}
RCName = "density" + strconv.Itoa(totalPods) + "-" + strconv.Itoa(i) + "-" + uuid
RCConfigs[i] = framework.RCConfig{Client: c,
Image: framework.GetPauseImageName(f.Client),
Name: RCName,
Namespace: ns,
Labels: map[string]string{"type": "densityPod"},
PollInterval: 10 * time.Second,
PodStatusFile: fileHndl,
Replicas: podsPerRC,
MaxContainerFailures: &MaxContainerFailures,
Silent: true,
}
}
dConfig := DensityTestConfig{Client: c,
Configs: RCConfigs,
PodCount: totalPods,
Namespace: ns,
PollInterval: 10 * time.Second,
Timeout: 10 * time.Minute,
}
e2eStartupTime = runDensityTest(dConfig)
cleanupDensityTest(dConfig)
})
})
func createRunningPodFromRC(wg *sync.WaitGroup, c *client.Client, name, ns, image, podType string, cpuRequest, memRequest resource.Quantity) {
defer GinkgoRecover()
defer wg.Done()
labels := map[string]string{
"type": podType,
"name": name,
}
rc := &api.ReplicationController{
ObjectMeta: api.ObjectMeta{
Name: name,
Labels: labels,
},
Spec: api.ReplicationControllerSpec{
Replicas: 1,
Selector: labels,
Template: &api.PodTemplateSpec{
ObjectMeta: api.ObjectMeta{
Labels: labels,
},
Spec: api.PodSpec{
Containers: []api.Container{
{
Name: name,
Image: image,
Resources: api.ResourceRequirements{
Requests: api.ResourceList{
api.ResourceCPU: cpuRequest,
api.ResourceMemory: memRequest,
},
},
},
},
DNSPolicy: api.DNSDefault,
},
},
},
}
_, err := c.ReplicationControllers(ns).Create(rc)
framework.ExpectNoError(err)
framework.ExpectNoError(framework.WaitForRCPodsRunning(c, ns, name))
framework.Logf("Found pod '%s' running", name)
}<|fim▁end|> | It("[Feature:ManualPerformance] should allow running maximum capacity pods on nodes", func() {
totalPods = 0
for _, n := range nodes.Items {
totalPods += int(n.Status.Capacity.Pods().Value()) |
<|file_name|>tuples.rs<|end_file_name|><|fim▁begin|>use data::*;
use generators::core::*;
macro_rules! tuple_generator_impl {
($gen_a:ident: $var_a:ident: $type_a:ident
$(, $gen_n: ident: $var_n:ident: $type_n:ident)*) => (
impl<$type_a: Generator, $($type_n: Generator),*> Generator
for ($type_a, $($type_n),*) {
type Item = ($type_a::Item, $($type_n::Item),*);
fn generate<In: InfoSource>(&self, src: &mut In) -> Maybe<Self::Item> {
// Gens
let &(ref $gen_a, $(ref $gen_n),*) = self;
let $var_a = $gen_a.generate(src)?;
$(let $var_n = $gen_n.generate(src)?;)*
Ok(($var_a, $($var_n),*))
}
}
);
}
tuple_generator_impl!(ga: a: A);
tuple_generator_impl!(ga: a: A, gb: b: B);
tuple_generator_impl!(ga: a: A, gb: b: B, gc: c: C);
tuple_generator_impl!(ga: a: A, gb: b: B, gc: c: C, gd: d: D);
tuple_generator_impl!(ga: a: A, gb: b: B, gc: c: C, gd: d: D, ge: e: E);
tuple_generator_impl!(ga: a: A, gb: b: B, gc: c: C, gd: d: D, ge: e: E, gf: f: F);
tuple_generator_impl!(
ga: a: A,
gb: b: B,
gc: c: C,
gd: d: D,
ge: e: E,
gf: f: F,
gg: g: G
);
tuple_generator_impl!(
ga: a: A,
gb: b: B,
gc: c: C,
gd: d: D,<|fim▁hole|> ge: e: E,
gf: f: F,
gg: g: G,
gh: h: H
);
tuple_generator_impl!(
ga: a: A,
gb: b: B,
gc: c: C,
gd: d: D,
ge: e: E,
gf: f: F,
gg: g: G,
gh: h: H,
gi: i: I
);
tuple_generator_impl!(
ga: a: A,
gb: b: B,
gc: c: C,
gd: d: D,
ge: e: E,
gf: f: F,
gg: g: G,
gh: h: H,
gi: i: I,
gj: j: J
);
tuple_generator_impl!(
ga: a: A,
gb: b: B,
gc: c: C,
gd: d: D,
ge: e: E,
gf: f: F,
gg: g: G,
gh: h: H,
gi: i: I,
gj: j: J,
gk: k: K
);
tuple_generator_impl!(
ga: a: A,
gb: b: B,
gc: c: C,
gd: d: D,
ge: e: E,
gf: f: F,
gg: g: G,
gh: h: H,
gi: i: I,
gj: j: J,
gk: k: K,
gl: l: L
);<|fim▁end|> |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.